main_dpdk.cpp revision 558ce764
1/*
2  Hanoh Haim
3  Cisco Systems, Inc.
4*/
5
6/*
7  Copyright (c) 2015-2016 Cisco Systems, Inc.
8
9  Licensed under the Apache License, Version 2.0 (the "License");
10  you may not use this file except in compliance with the License.
11  You may obtain a copy of the License at
12
13  http://www.apache.org/licenses/LICENSE-2.0
14
15  Unless required by applicable law or agreed to in writing, software
16  distributed under the License is distributed on an "AS IS" BASIS,
17  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18  See the License for the specific language governing permissions and
19  limitations under the License.
20*/
21#include <assert.h>
22#include <pthread.h>
23#include <signal.h>
24#include <pwd.h>
25#include <stdio.h>
26#include <string.h>
27#include <unistd.h>
28#include <zmq.h>
29#include <rte_common.h>
30#include <rte_log.h>
31#include <rte_memory.h>
32#include <rte_memcpy.h>
33#include <rte_memzone.h>
34#include <rte_tailq.h>
35#include <rte_eal.h>
36#include <rte_per_lcore.h>
37#include <rte_launch.h>
38#include <rte_atomic.h>
39#include <rte_cycles.h>
40#include <rte_prefetch.h>
41#include <rte_lcore.h>
42#include <rte_per_lcore.h>
43#include <rte_branch_prediction.h>
44#include <rte_interrupts.h>
45#include <rte_pci.h>
46#include <rte_debug.h>
47#include <rte_ether.h>
48#include <rte_ethdev.h>
49#include <rte_ring.h>
50#include <rte_mempool.h>
51#include <rte_mbuf.h>
52#include <rte_random.h>
53#include <rte_version.h>
54#include <rte_ip.h>
55
56#include "bp_sim.h"
57#include "os_time.h"
58#include "common/arg/SimpleGlob.h"
59#include "common/arg/SimpleOpt.h"
60#include "common/basic_utils.h"
61#include "stateless/cp/trex_stateless.h"
62#include "stateless/dp/trex_stream_node.h"
63#include "stateless/messaging/trex_stateless_messaging.h"
64#include "stateless/rx/trex_stateless_rx_core.h"
65#include "publisher/trex_publisher.h"
66#include "../linux_dpdk/version.h"
67extern "C" {
68#include "dpdk/drivers/net/ixgbe/base/ixgbe_type.h"
69#include "dpdk_funcs.h"
70}
71#include "dpdk/drivers/net/e1000/base/e1000_regs.h"
72#include "global_io_mode.h"
73#include "utl_term_io.h"
74#include "msg_manager.h"
75#include "platform_cfg.h"
76#include "pre_test.h"
77#include "stateful_rx_core.h"
78#include "debug.h"
79#include "pkt_gen.h"
80#include "trex_port_attr.h"
81#include "internal_api/trex_platform_api.h"
82#include "main_dpdk.h"
83#include "trex_watchdog.h"
84
85#define RX_CHECK_MIX_SAMPLE_RATE 8
86#define RX_CHECK_MIX_SAMPLE_RATE_1G 2
87
88
89#define SOCKET0         0
90
91#define MAX_PKT_BURST   32
92
93#define BP_MAX_CORES 32
94#define BP_MAX_TX_QUEUE 16
95#define BP_MASTER_AND_LATENCY 2
96
97#define RTE_TEST_RX_DESC_DEFAULT 64
98#define RTE_TEST_RX_LATENCY_DESC_DEFAULT (1*1024)
99
100#define RTE_TEST_RX_DESC_VM_DEFAULT 512
101#define RTE_TEST_TX_DESC_VM_DEFAULT 512
102
103typedef struct rte_mbuf * (*rte_mbuf_convert_to_one_seg_t)(struct rte_mbuf *m);
104struct rte_mbuf *  rte_mbuf_convert_to_one_seg(struct rte_mbuf *m);
105extern "C" int rte_eth_dev_get_port_by_addr(const struct rte_pci_addr *addr, uint8_t *port_id);
106void reorder_dpdk_ports();
107
108#define RTE_TEST_TX_DESC_DEFAULT 512
109#define RTE_TEST_RX_DESC_DROP    0
110
111static int max_stat_hw_id_seen = 0;
112static int max_stat_hw_id_seen_payload = 0;
113
114static inline int get_vm_one_queue_enable(){
115    return (CGlobalInfo::m_options.preview.get_vm_one_queue_enable() ?1:0);
116}
117
118static inline int get_is_rx_thread_enabled() {
119    return ((CGlobalInfo::m_options.is_rx_enabled() || CGlobalInfo::m_options.is_stateless()) ?1:0);
120}
121
122struct port_cfg_t;
123
124#define MAX_DPDK_ARGS 40
125static CPlatformYamlInfo global_platform_cfg_info;
126static int global_dpdk_args_num ;
127static char * global_dpdk_args[MAX_DPDK_ARGS];
128static char global_cores_str[100];
129static char global_prefix_str[100];
130static char global_loglevel_str[20];
131static char global_master_id_str[10];
132
133class CTRexExtendedDriverBase {
134public:
135
136    /* by default NIC driver adds CRC */
137    virtual bool has_crc_added() {
138        return true;
139    }
140
141    virtual int get_min_sample_rate(void)=0;
142    virtual void update_configuration(port_cfg_t * cfg)=0;
143    virtual void update_global_config_fdir(port_cfg_t * cfg)=0;
144
145    virtual bool is_hardware_filter_is_supported(){
146        return(false);
147    }
148    virtual int configure_rx_filter_rules(CPhyEthIF * _if)=0;
149    virtual int add_del_rx_flow_stat_rule(uint8_t port_id, enum rte_filter_op op, uint16_t l3, uint8_t l4
150                                          , uint8_t ipv6_next_h, uint16_t id) {return -1;};
151    virtual bool is_hardware_support_drop_queue(){
152        return(false);
153    }
154
155    virtual int stop_queue(CPhyEthIF * _if, uint16_t q_num);
156    virtual void get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats)=0;
157    virtual void clear_extended_stats(CPhyEthIF * _if)=0;
158    virtual int  wait_for_stable_link();
159    virtual void wait_after_link_up();
160    virtual bool hw_rx_stat_supported(){return false;}
161    virtual int get_rx_stats(CPhyEthIF * _if, uint32_t *pkts, uint32_t *prev_pkts, uint32_t *bytes, uint32_t *prev_bytes
162                             , int min, int max) {return -1;}
163    virtual void reset_rx_stats(CPhyEthIF * _if, uint32_t *stats, int min, int len) {}
164    virtual int dump_fdir_global_stats(CPhyEthIF * _if, FILE *fd) { return -1;}
165    virtual int get_stat_counters_num() {return 0;}
166    virtual int get_rx_stat_capabilities() {return 0;}
167    virtual int verify_fw_ver(int i) {return 0;}
168    virtual CFlowStatParser *get_flow_stat_parser();
169    virtual int set_rcv_all(CPhyEthIF * _if, bool set_on)=0;
170    virtual TRexPortAttr * create_port_attr(uint8_t port_id) = 0;
171};
172
173
174class CTRexExtendedDriverBase1G : public CTRexExtendedDriverBase {
175
176public:
177    CTRexExtendedDriverBase1G(){
178    }
179
180    TRexPortAttr * create_port_attr(uint8_t port_id) {
181        return new DpdkTRexPortAttr(port_id, false, true);
182    }
183
184    static CTRexExtendedDriverBase * create(){
185        return ( new CTRexExtendedDriverBase1G() );
186    }
187
188    virtual void update_global_config_fdir(port_cfg_t * cfg);
189
190    virtual int get_min_sample_rate(void){
191        return ( RX_CHECK_MIX_SAMPLE_RATE_1G);
192    }
193    virtual void update_configuration(port_cfg_t * cfg);
194
195    virtual bool is_hardware_filter_is_supported(){
196        return (true);
197    }
198
199    virtual int stop_queue(CPhyEthIF * _if, uint16_t q_num);
200    virtual int configure_rx_filter_rules(CPhyEthIF * _if);
201    virtual int configure_rx_filter_rules_statefull(CPhyEthIF * _if);
202    virtual int configure_rx_filter_rules_stateless(CPhyEthIF * _if);
203    virtual void clear_rx_filter_rules(CPhyEthIF * _if);
204    virtual bool is_hardware_support_drop_queue(){
205        return(true);
206    }
207
208    virtual void get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats);
209    virtual void clear_extended_stats(CPhyEthIF * _if);
210    virtual int dump_fdir_global_stats(CPhyEthIF * _if, FILE *fd) {return 0;}
211    virtual int get_stat_counters_num() {return MAX_FLOW_STATS;}
212    virtual int get_rx_stat_capabilities() {
213        return TrexPlatformApi::IF_STAT_IPV4_ID | TrexPlatformApi::IF_STAT_RX_BYTES_COUNT
214            | TrexPlatformApi::IF_STAT_PAYLOAD;
215    }
216    virtual int wait_for_stable_link();
217    virtual void wait_after_link_up();
218    virtual int set_rcv_all(CPhyEthIF * _if, bool set_on);
219};
220
221class CTRexExtendedDriverBase1GVm : public CTRexExtendedDriverBase {
222
223public:
224    CTRexExtendedDriverBase1GVm(){
225        /* we are working in mode that we have 1 queue for rx and one queue for tx*/
226        CGlobalInfo::m_options.preview.set_vm_one_queue_enable(true);
227    }
228
229    TRexPortAttr * create_port_attr(uint8_t port_id) {
230        return new DpdkTRexPortAttr(port_id, true, true);
231    }
232
233    virtual bool has_crc_added() {
234        return false;
235    }
236
237    static CTRexExtendedDriverBase * create(){
238        return ( new CTRexExtendedDriverBase1GVm() );
239    }
240
241    virtual void update_global_config_fdir(port_cfg_t * cfg){
242
243    }
244
245    virtual int get_min_sample_rate(void){
246        return ( RX_CHECK_MIX_SAMPLE_RATE_1G);
247    }
248    virtual void update_configuration(port_cfg_t * cfg);
249
250    virtual bool is_hardware_filter_is_supported(){
251        return (true);
252    }
253
254    virtual int configure_rx_filter_rules(CPhyEthIF * _if);
255
256    virtual bool is_hardware_support_drop_queue(){
257        return(false);
258    }
259
260    virtual int stop_queue(CPhyEthIF * _if, uint16_t q_num);
261    virtual void get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats);
262    virtual void clear_extended_stats(CPhyEthIF * _if);
263    virtual int wait_for_stable_link();
264    virtual int get_stat_counters_num() {return MAX_FLOW_STATS;}
265    virtual int get_rx_stat_capabilities() {
266        return TrexPlatformApi::IF_STAT_IPV4_ID | TrexPlatformApi::IF_STAT_RX_BYTES_COUNT
267            | TrexPlatformApi::IF_STAT_PAYLOAD;
268    }
269    virtual int set_rcv_all(CPhyEthIF * _if, bool set_on) {return 0;}
270};
271
272
273class CTRexExtendedDriverBase10G : public CTRexExtendedDriverBase {
274public:
275    CTRexExtendedDriverBase10G(){
276    }
277
278    TRexPortAttr * create_port_attr(uint8_t port_id) {
279        return new DpdkTRexPortAttr(port_id, false, true);
280    }
281
282    static CTRexExtendedDriverBase * create(){
283        return ( new CTRexExtendedDriverBase10G() );
284    }
285
286    virtual void update_global_config_fdir(port_cfg_t * cfg);
287
288    virtual int get_min_sample_rate(void){
289        return (RX_CHECK_MIX_SAMPLE_RATE);
290    }
291    virtual void update_configuration(port_cfg_t * cfg);
292
293    virtual bool is_hardware_filter_is_supported(){
294        return (true);
295    }
296    virtual int configure_rx_filter_rules(CPhyEthIF * _if);
297    virtual int configure_rx_filter_rules_stateless(CPhyEthIF * _if);
298    virtual int configure_rx_filter_rules_statefull(CPhyEthIF * _if);
299    virtual bool is_hardware_support_drop_queue(){
300        return(true);
301    }
302    virtual void get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats);
303    virtual void clear_extended_stats(CPhyEthIF * _if);
304    virtual int wait_for_stable_link();
305    virtual int get_stat_counters_num() {return MAX_FLOW_STATS;}
306    virtual int get_rx_stat_capabilities() {
307        return TrexPlatformApi::IF_STAT_IPV4_ID | TrexPlatformApi::IF_STAT_RX_BYTES_COUNT
308            | TrexPlatformApi::IF_STAT_PAYLOAD;
309    }
310    virtual CFlowStatParser *get_flow_stat_parser();
311    virtual int set_rcv_all(CPhyEthIF * _if, bool set_on) {return -ENOTSUP;}
312};
313
314class CTRexExtendedDriverBase40G : public CTRexExtendedDriverBase10G {
315public:
316    CTRexExtendedDriverBase40G(){
317        // Since we support only 128 counters per if, it is OK to configure here 4 statically.
318        // If we want to support more counters in case of card having less interfaces, we
319        // Will have to identify the number of interfaces dynamically.
320        m_if_per_card = 4;
321    }
322
323    TRexPortAttr * create_port_attr(uint8_t port_id) {
324        // disabling flow control on 40G using DPDK API causes the interface to malfunction
325        return new DpdkTRexPortAttr(port_id, false, false);
326    }
327
328    static CTRexExtendedDriverBase * create(){
329        return ( new CTRexExtendedDriverBase40G() );
330    }
331
332    virtual void update_global_config_fdir(port_cfg_t * cfg){
333    }
334    virtual void update_configuration(port_cfg_t * cfg);
335    virtual int configure_rx_filter_rules(CPhyEthIF * _if);
336    virtual int add_del_rx_flow_stat_rule(uint8_t port_id, enum rte_filter_op op, uint16_t l3_proto
337                                          , uint8_t l4_proto, uint8_t ipv6_next_h, uint16_t id);
338    virtual bool is_hardware_filter_is_supported(){
339        return (true);
340    }
341
342    virtual bool is_hardware_support_drop_queue(){
343        return(true);
344    }
345    virtual void get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats);
346    virtual void clear_extended_stats(CPhyEthIF * _if);
347    virtual void reset_rx_stats(CPhyEthIF * _if, uint32_t *stats, int min, int len);
348    virtual int get_rx_stats(CPhyEthIF * _if, uint32_t *pkts, uint32_t *prev_pkts, uint32_t *bytes, uint32_t *prev_bytes, int min, int max);
349    virtual int dump_fdir_global_stats(CPhyEthIF * _if, FILE *fd);
350    virtual int get_stat_counters_num() {return MAX_FLOW_STATS;}
351    virtual int get_rx_stat_capabilities() {
352        return TrexPlatformApi::IF_STAT_IPV4_ID | TrexPlatformApi::IF_STAT_PAYLOAD;
353    }
354    virtual int wait_for_stable_link();
355    virtual bool hw_rx_stat_supported(){return true;}
356    virtual int verify_fw_ver(int i);
357    virtual CFlowStatParser *get_flow_stat_parser();
358    virtual int set_rcv_all(CPhyEthIF * _if, bool set_on);
359
360private:
361    virtual void add_del_rules(enum rte_filter_op op, uint8_t port_id, uint16_t type, uint8_t ttl
362                               , uint16_t ip_id, uint16_t l4_proto, int queue, uint16_t stat_idx);
363    virtual int add_del_eth_type_rule(uint8_t port_id, enum rte_filter_op op, uint16_t eth_type);
364    virtual int configure_rx_filter_rules_statefull(CPhyEthIF * _if);
365
366private:
367    uint8_t m_if_per_card;
368};
369
370class CTRexExtendedDriverBaseVIC : public CTRexExtendedDriverBase40G {
371public:
372    CTRexExtendedDriverBaseVIC(){
373    }
374
375    TRexPortAttr * create_port_attr(uint8_t port_id) {
376        return new DpdkTRexPortAttr(port_id, false, false);
377    }
378
379    static CTRexExtendedDriverBase * create(){
380        return ( new CTRexExtendedDriverBaseVIC() );
381    }
382
383    virtual bool is_hardware_filter_is_supported(){
384        return (false);
385    }
386
387    virtual int verify_fw_ver(int i) {return 0;}
388
389    virtual void update_configuration(port_cfg_t * cfg);
390};
391
392
393
394typedef CTRexExtendedDriverBase * (*create_object_t) (void);
395
396
397class CTRexExtendedDriverRec {
398public:
399    std::string         m_driver_name;
400    create_object_t     m_constructor;
401};
402
403class CTRexExtendedDriverDb {
404public:
405
406    const std::string & get_driver_name() {
407        return m_driver_name;
408    }
409
410    bool is_driver_exists(std::string name);
411
412
413
414    void set_driver_name(std::string name){
415        m_driver_was_set=true;
416        m_driver_name=name;
417        printf(" set driver name %s \n",name.c_str());
418        m_drv=create_driver(m_driver_name);
419        assert(m_drv);
420    }
421
422    CTRexExtendedDriverBase * get_drv(){
423        if (!m_driver_was_set) {
424            printf(" ERROR too early to use this object !\n");
425            printf(" need to set the right driver \n");
426            assert(0);
427        }
428        assert(m_drv);
429        return (m_drv);
430    }
431
432public:
433
434    static CTRexExtendedDriverDb * Ins();
435
436private:
437    CTRexExtendedDriverBase * create_driver(std::string name);
438
439    CTRexExtendedDriverDb(){
440        register_driver(std::string("rte_ixgbe_pmd"),CTRexExtendedDriverBase10G::create);
441        register_driver(std::string("rte_igb_pmd"),CTRexExtendedDriverBase1G::create);
442        register_driver(std::string("rte_i40e_pmd"),CTRexExtendedDriverBase40G::create);
443        register_driver(std::string("rte_enic_pmd"),CTRexExtendedDriverBaseVIC::create);
444
445        /* virtual devices */
446        register_driver(std::string("rte_em_pmd"),CTRexExtendedDriverBase1GVm::create);
447        register_driver(std::string("rte_vmxnet3_pmd"),CTRexExtendedDriverBase1GVm::create);
448        register_driver(std::string("rte_virtio_pmd"),CTRexExtendedDriverBase1GVm::create);
449
450
451
452
453        m_driver_was_set=false;
454        m_drv=0;
455        m_driver_name="";
456    }
457    void register_driver(std::string name,create_object_t func);
458    static CTRexExtendedDriverDb * m_ins;
459    bool        m_driver_was_set;
460    std::string m_driver_name;
461    CTRexExtendedDriverBase * m_drv;
462    std::vector <CTRexExtendedDriverRec*>     m_list;
463
464};
465
466CTRexExtendedDriverDb * CTRexExtendedDriverDb::m_ins;
467
468
469void CTRexExtendedDriverDb::register_driver(std::string name,
470                                            create_object_t func){
471    CTRexExtendedDriverRec * rec;
472    rec = new CTRexExtendedDriverRec();
473    rec->m_driver_name=name;
474    rec->m_constructor=func;
475    m_list.push_back(rec);
476}
477
478
479bool CTRexExtendedDriverDb::is_driver_exists(std::string name){
480    int i;
481    for (i=0; i<(int)m_list.size(); i++) {
482        if (m_list[i]->m_driver_name == name) {
483            return (true);
484        }
485    }
486    return (false);
487}
488
489
490CTRexExtendedDriverBase * CTRexExtendedDriverDb::create_driver(std::string name){
491    int i;
492    for (i=0; i<(int)m_list.size(); i++) {
493        if (m_list[i]->m_driver_name == name) {
494            return ( m_list[i]->m_constructor() );
495        }
496    }
497    return( (CTRexExtendedDriverBase *)0);
498}
499
500
501
502CTRexExtendedDriverDb * CTRexExtendedDriverDb::Ins(){
503    if (!m_ins) {
504        m_ins = new CTRexExtendedDriverDb();
505    }
506    return (m_ins);
507}
508
509static CTRexExtendedDriverBase *  get_ex_drv(){
510
511    return ( CTRexExtendedDriverDb::Ins()->get_drv());
512}
513
514static inline int get_min_sample_rate(void){
515    return ( get_ex_drv()->get_min_sample_rate());
516}
517
518// cores =0==1,1*2,2,3,4,5,6
519// An enum for all the option types
520enum { OPT_HELP,
521       OPT_MODE_BATCH,
522       OPT_MODE_INTERACTIVE,
523       OPT_NODE_DUMP,
524       OPT_DUMP_INTERFACES,
525       OPT_UT,
526       OPT_CORES,
527       OPT_SINGLE_CORE,
528       OPT_FLIP_CLIENT_SERVER,
529       OPT_FLOW_FLIP_CLIENT_SERVER,
530       OPT_FLOW_FLIP_CLIENT_SERVER_SIDE,
531       OPT_RATE_MULT,
532       OPT_DURATION,
533       OPT_PLATFORM_FACTOR,
534       OPT_PUB_DISABLE,
535       OPT_LIMT_NUM_OF_PORTS,
536       OPT_PLAT_CFG_FILE,
537       OPT_MBUF_FACTOR,
538       OPT_LATENCY,
539       OPT_NO_CLEAN_FLOW_CLOSE,
540       OPT_LATENCY_MASK,
541       OPT_ONLY_LATENCY,
542       OPT_LATENCY_PREVIEW ,
543       OPT_WAIT_BEFORE_TRAFFIC,
544       OPT_PCAP,
545       OPT_RX_CHECK,
546       OPT_IO_MODE,
547       OPT_IPV6,
548       OPT_LEARN,
549       OPT_LEARN_MODE,
550       OPT_LEARN_VERIFY,
551       OPT_L_PKT_MODE,
552       OPT_NO_FLOW_CONTROL,
553       OPT_VLAN,
554       OPT_RX_CHECK_HOPS,
555       OPT_CLIENT_CFG_FILE,
556       OPT_NO_KEYBOARD_INPUT,
557       OPT_VIRT_ONE_TX_RX_QUEUE,
558       OPT_PREFIX,
559       OPT_SEND_DEBUG_PKT,
560       OPT_NO_WATCHDOG,
561       OPT_ALLOW_COREDUMP,
562       OPT_CHECKSUM_OFFLOAD,
563       OPT_CLOSE,
564       OPT_ARP_REF_PER,
565};
566
567/* these are the argument types:
568   SO_NONE --    no argument needed
569   SO_REQ_SEP -- single required argument
570   SO_MULTI --   multiple arguments needed
571*/
572static CSimpleOpt::SOption parser_options[] =
573    {
574        { OPT_HELP,                   "-?",                SO_NONE   },
575        { OPT_HELP,                   "-h",                SO_NONE   },
576        { OPT_HELP,                   "--help",            SO_NONE   },
577        { OPT_UT,                     "--ut",              SO_NONE   },
578        { OPT_MODE_BATCH,             "-f",                SO_REQ_SEP},
579        { OPT_MODE_INTERACTIVE,       "-i",                SO_NONE   },
580        { OPT_PLAT_CFG_FILE,          "--cfg",             SO_REQ_SEP},
581        { OPT_SINGLE_CORE,            "-s",                SO_NONE  },
582        { OPT_FLIP_CLIENT_SERVER,"--flip",SO_NONE  },
583        { OPT_FLOW_FLIP_CLIENT_SERVER,"-p",SO_NONE  },
584        { OPT_FLOW_FLIP_CLIENT_SERVER_SIDE,"-e",SO_NONE  },
585        { OPT_NO_CLEAN_FLOW_CLOSE,"--nc",SO_NONE  },
586        { OPT_LIMT_NUM_OF_PORTS,"--limit-ports", SO_REQ_SEP },
587        { OPT_CORES     , "-c",         SO_REQ_SEP },
588        { OPT_NODE_DUMP , "-v",         SO_REQ_SEP },
589        { OPT_DUMP_INTERFACES , "--dump-interfaces",         SO_MULTI },
590        { OPT_LATENCY , "-l",         SO_REQ_SEP },
591        { OPT_DURATION     , "-d",  SO_REQ_SEP },
592        { OPT_PLATFORM_FACTOR     , "-pm",  SO_REQ_SEP },
593        { OPT_PUB_DISABLE     , "-pubd",  SO_NONE },
594        { OPT_RATE_MULT     , "-m",  SO_REQ_SEP },
595        { OPT_LATENCY_MASK     , "--lm",  SO_REQ_SEP },
596        { OPT_ONLY_LATENCY, "--lo",  SO_NONE  },
597        { OPT_LATENCY_PREVIEW ,       "-k",   SO_REQ_SEP   },
598        { OPT_WAIT_BEFORE_TRAFFIC ,   "-w",   SO_REQ_SEP   },
599        { OPT_PCAP,       "--pcap",       SO_NONE   },
600        { OPT_RX_CHECK,   "--rx-check",  SO_REQ_SEP },
601        { OPT_IO_MODE,   "--iom",  SO_REQ_SEP },
602        { OPT_RX_CHECK_HOPS, "--hops", SO_REQ_SEP },
603        { OPT_IPV6,       "--ipv6",       SO_NONE   },
604        { OPT_LEARN, "--learn",       SO_NONE   },
605        { OPT_LEARN_MODE, "--learn-mode",       SO_REQ_SEP   },
606        { OPT_LEARN_VERIFY, "--learn-verify",       SO_NONE   },
607        { OPT_L_PKT_MODE, "--l-pkt-mode",       SO_REQ_SEP   },
608        { OPT_NO_FLOW_CONTROL, "--no-flow-control-change",       SO_NONE   },
609        { OPT_VLAN,       "--vlan",       SO_NONE   },
610        { OPT_CLIENT_CFG_FILE, "--client_cfg", SO_REQ_SEP },
611        { OPT_CLIENT_CFG_FILE, "--client-cfg", SO_REQ_SEP },
612        { OPT_NO_KEYBOARD_INPUT ,"--no-key", SO_NONE   },
613        { OPT_VIRT_ONE_TX_RX_QUEUE, "--vm-sim", SO_NONE },
614        { OPT_PREFIX, "--prefix", SO_REQ_SEP },
615        { OPT_SEND_DEBUG_PKT, "--send-debug-pkt", SO_REQ_SEP },
616        { OPT_MBUF_FACTOR     , "--mbuf-factor",  SO_REQ_SEP },
617        { OPT_NO_WATCHDOG ,     "--no-watchdog",  SO_NONE  },
618        { OPT_ALLOW_COREDUMP ,  "--allow-coredump",  SO_NONE  },
619        { OPT_CHECKSUM_OFFLOAD, "--checksum-offload", SO_NONE },
620        { OPT_CLOSE, "--close-at-end", SO_NONE },
621        { OPT_ARP_REF_PER, "--arp-refresh-period", SO_REQ_SEP },
622        SO_END_OF_OPTIONS
623    };
624
625static int usage(){
626
627    printf(" Usage: t-rex-64 [mode] <options>\n\n");
628    printf(" mode is one of:\n");
629    printf("   -f <file> : YAML file with traffic template configuration (Will run TRex in 'stateful' mode)\n");
630    printf("   -i        : Run TRex in 'stateless' mode\n");
631    printf("\n");
632
633    printf(" Available options are:\n");
634    printf(" --allow-coredump           : Allow creation of core dump \n");
635    printf(" --arp-refresh-period       : Period in seconds between sending of gratuitous ARP for our addresses. Value of 0 means 'never send' \n");
636    printf(" -c <num>>                  : Number of hardware threads to allocate for each port pair. Overrides the 'c' argument from config file \n");
637    printf(" --cfg <file>               : Use file as TRex config file instead of the default /etc/trex_cfg.yaml \n");
638    printf(" --checksum-offload         : Enable IP, TCP and UDP tx checksum offloading, using DPDK. This requires all used interfaces to support this \n");
639    printf(" --client_cfg <file>        : YAML file describing clients configuration \n");
640    printf(" --close-at-end             : Call rte_eth_dev_stop and close at exit. Calling these functions caused link down issues in older versions, \n");
641    printf("                               so we do not call them by default for now. Leaving this as option in case someone thinks it is helpful for him \n");
642    printf("                               This it temporary option. Will be removed in the future \n");
643    printf(" -d                         : Duration of the test in sec (default is 3600). Look also at --nc \n");
644    printf(" -e                         : Like -p but src/dst IP will be chosen according to the port (i.e. on client port send all packets with client src and server dest, and vice versa on server port \n");
645    printf(" --flip                     : Each flow will be sent both from client to server and server to client. This can acheive better port utilization when flow traffic is asymmetric \n");
646    printf(" --hops <hops>              : If rx check is enabled, the hop number can be assigned. See manual for details \n");
647    printf(" --iom  <mode>              : IO mode  for server output [0- silent, 1- normal , 2- short] \n");
648    printf(" --ipv6                     : Work in ipv6 mode \n");
649    printf(" -k  <num>                  : Run 'warm up' traffic for num seconds before starting the test. \n");
650    printf(" -l <rate>                  : In parallel to the test, run latency check, sending packets at rate/sec from each interface \n");
651    printf("    Rate of zero means no latency check \n");
652    printf(" --learn (deprecated). Replaced by --learn-mode. To get older behaviour, use --learn-mode 2 \n");
653    printf(" --learn-mode [1-3]         : Work in NAT environments, learn the dynamic NAT translation and ALG \n");
654    printf("      1    Use TCP ACK in first SYN to pass NAT translation information. Will work only for TCP streams. Initial SYN packet must be first packet in stream \n");
655    printf("      2    Add special IP option to pass NAT translation information. Will not work on certain firewalls if they drop packets with IP options \n");
656    printf("      3    Like 1, but without support for sequence number randomization in server->clien direction. Performance (flow/second) better than 1 \n");
657    printf(" --learn-verify             : Test the NAT translation mechanism. Should be used when there is no NAT in the setup \n");
658    printf(" --limit-ports              : Limit number of ports used. Must be even number (TRex always uses port pairs) \n");
659    printf(" --lm                       : Hex mask of cores that should send traffic \n");
660    printf("    For example: Value of 0x5 will cause only ports 0 and 2 to send traffic \n");
661    printf(" --lo                       : Only run latency test \n");
662    printf(" --l-pkt-mode <0-3>         : Set mode for sending latency packets \n");
663    printf("      0 (default)    send SCTP packets  \n");
664    printf("      1              Send ICMP request packets  \n");
665    printf("      2              Send ICMP requests from client side, and response from server side (for working with firewall) \n");
666    printf("      3              Send ICMP requests with sequence ID 0 from both sides \n");
667    printf(" -m <num>                   : Rate multiplier.  Multiply basic rate of templates by this number \n");
668    printf(" --mbuf-factor              : Factor for packet memory \n");
669    printf(" --nc                       : If set, will not wait for all flows to be closed, before terminating - see manual for more information \n");
670    printf(" --no-flow-control-change   : By default TRex disables flow-control. If this option is given, it does not touch it \n");
671    printf(" --no-key                   : Daemon mode, don't get input from keyboard \n");
672    printf(" --no-watchdog              : Disable watchdog \n");
673    printf(" -p                         : Send all flow packets from the same interface (choosed randomly between client ad server ports) without changing their src/dst IP \n");
674    printf(" -pm                        : Platform factor. If you have splitter in the setup, you can multiply the total results by this factor \n");
675    printf("    e.g --pm 2.0 will multiply all the results bps in this factor \n");
676    printf(" --prefix <nam>             : For running multi TRex instances on the same machine. Each instance should have different name \n");
677    printf(" -pubd                      : Disable monitors publishers \n");
678    printf(" --rx-check  <rate>         : Enable rx check. TRex will sample flows at 1/rate and check order, latency and more \n");
679    printf(" -s                         : Single core. Run only one data path core. For debug \n");
680    printf(" --send-debug-pkt <proto>   : Do not run traffic generator. Just send debug packet and dump receive queues \n");
681    printf("    Supported protocols are 1 for icmp, 2 for UDP, 3 for TCP, 4 for ARP, 5 for 9K UDP \n");
682    printf(" -v <verbosity level>       : The higher the value, print more debug information \n");
683    printf(" --vlan                     : Relevant only for stateless mode with Intel 82599 10G NIC \n");
684    printf("                              When configuring flow stat and latency per stream rules, assume all streams uses VLAN \n");
685    printf(" --vm-sim                   : Simulate vm with driver of one input queue and one output queue \n");
686    printf(" -w  <num>                  : Wait num seconds between init of interfaces and sending traffic, default is 1 \n");
687    printf("\n");
688    printf(" Examples: ");
689    printf(" basic trex run for 20 sec and multiplier of 10 \n");
690    printf("  t-rex-64 -f cap2/dns.yaml -m 10 -d 20 \n");
691    printf("\n\n");
692    printf(" Copyright (c) 2015-2016 Cisco Systems, Inc.    \n");
693    printf("                                                                  \n");
694    printf(" Licensed under the Apache License, Version 2.0 (the 'License') \n");
695    printf(" you may not use this file except in compliance with the License. \n");
696    printf(" You may obtain a copy of the License at                          \n");
697    printf("                                                                  \n");
698    printf("    http://www.apache.org/licenses/LICENSE-2.0                    \n");
699    printf("                                                                  \n");
700    printf(" Unless required by applicable law or agreed to in writing, software \n");
701    printf(" distributed under the License is distributed on an \"AS IS\" BASIS,   \n");
702    printf(" WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. \n");
703    printf(" See the License for the specific language governing permissions and      \n");
704    printf(" limitations under the License.                                           \n");
705    printf(" \n");
706    printf(" Open Source Components / Libraries \n");
707    printf(" DPDK       (BSD)       \n");
708    printf(" YAML-CPP   (BSD)       \n");
709    printf(" JSONCPP    (MIT)       \n");
710    printf(" \n");
711    printf(" Open Source Binaries \n");
712    printf(" ZMQ        (LGPL v3plus) \n");
713    printf(" \n");
714    printf(" Version : %s   \n",VERSION_BUILD_NUM);
715    printf(" DPDK version : %s   \n",rte_version());
716    printf(" User    : %s   \n",VERSION_USER);
717    printf(" Date    : %s , %s \n",get_build_date(),get_build_time());
718    printf(" Uuid    : %s    \n",VERSION_UIID);
719    printf(" Git SHA : %s    \n",VERSION_GIT_SHA);
720    return (0);
721}
722
723
724int gtest_main(int argc, char **argv) ;
725
726static void parse_err(const std::string &msg) {
727    std::cout << "\nArgument Parsing Error: \n\n" << "*** "<< msg << "\n\n";
728    exit(-1);
729}
730
731static int parse_options(int argc, char *argv[], CParserOption* po, bool first_time ) {
732    CSimpleOpt args(argc, argv, parser_options);
733
734    bool latency_was_set=false;
735    (void)latency_was_set;
736    char ** rgpszArg = NULL;
737
738    int a=0;
739    int node_dump=0;
740
741    po->preview.setFileWrite(true);
742    po->preview.setRealTime(true);
743    uint32_t tmp_data;
744
745    po->m_run_mode = CParserOption::RUN_MODE_INVALID;
746
747    while ( args.Next() ){
748        if (args.LastError() == SO_SUCCESS) {
749            switch (args.OptionId()) {
750
751            case OPT_UT :
752                parse_err("Supported only in simulation");
753                break;
754
755            case OPT_HELP:
756                usage();
757                return -1;
758
759            case OPT_MODE_BATCH:
760                if (po->m_run_mode != CParserOption::RUN_MODE_INVALID) {
761                    parse_err("Please specify single run mode");
762                }
763                po->m_run_mode = CParserOption::RUN_MODE_BATCH;
764                po->cfg_file = args.OptionArg();
765                break;
766
767            case OPT_MODE_INTERACTIVE:
768                if (po->m_run_mode != CParserOption::RUN_MODE_INVALID) {
769                    parse_err("Please specify single run mode");
770                }
771                po->m_run_mode = CParserOption::RUN_MODE_INTERACTIVE;
772                break;
773
774            case OPT_NO_KEYBOARD_INPUT  :
775                po->preview.set_no_keyboard(true);
776                break;
777
778            case OPT_CLIENT_CFG_FILE :
779                po->client_cfg_file = args.OptionArg();
780                break;
781
782            case OPT_PLAT_CFG_FILE :
783                po->platform_cfg_file = args.OptionArg();
784                break;
785
786            case OPT_SINGLE_CORE :
787                po->preview.setSingleCore(true);
788                break;
789
790            case OPT_IPV6:
791                po->preview.set_ipv6_mode_enable(true);
792                break;
793
794
795            case OPT_LEARN :
796                po->m_learn_mode = CParserOption::LEARN_MODE_IP_OPTION;
797                break;
798
799            case OPT_LEARN_MODE :
800                sscanf(args.OptionArg(),"%d", &tmp_data);
801                if (! po->is_valid_opt_val(tmp_data, CParserOption::LEARN_MODE_DISABLED, CParserOption::LEARN_MODE_MAX, "--learn-mode")) {
802                    exit(-1);
803                }
804                po->m_learn_mode = (uint8_t)tmp_data;
805                break;
806
807            case OPT_LEARN_VERIFY :
808                // must configure learn_mode for learn verify to work. If different learn mode will be given later, it will be set instead.
809                if (po->m_learn_mode == 0) {
810                    po->m_learn_mode = CParserOption::LEARN_MODE_IP_OPTION;
811                }
812                po->preview.set_learn_and_verify_mode_enable(true);
813                break;
814
815            case OPT_L_PKT_MODE :
816                sscanf(args.OptionArg(),"%d", &tmp_data);
817                if (! po->is_valid_opt_val(tmp_data, 0, L_PKT_SUBMODE_0_SEQ, "--l-pkt-mode")) {
818                    exit(-1);
819                }
820                po->m_l_pkt_mode=(uint8_t)tmp_data;
821                break;
822
823            case OPT_NO_FLOW_CONTROL:
824                po->preview.set_disable_flow_control_setting(true);
825                break;
826            case OPT_VLAN:
827                if ( get_is_stateless() ) {
828                    po->preview.set_vlan_mode_enable(true);
829                }
830                break;
831            case OPT_LIMT_NUM_OF_PORTS :
832                po->m_expected_portd =atoi(args.OptionArg());
833                break;
834            case  OPT_CORES  :
835                po->preview.setCores(atoi(args.OptionArg()));
836                break;
837            case OPT_FLIP_CLIENT_SERVER :
838                po->preview.setClientServerFlip(true);
839                break;
840            case OPT_NO_CLEAN_FLOW_CLOSE :
841                po->preview.setNoCleanFlowClose(true);
842                break;
843            case OPT_FLOW_FLIP_CLIENT_SERVER :
844                po->preview.setClientServerFlowFlip(true);
845                break;
846            case OPT_FLOW_FLIP_CLIENT_SERVER_SIDE:
847                po->preview.setClientServerFlowFlipAddr(true);
848                break;
849            case OPT_NODE_DUMP:
850                a=atoi(args.OptionArg());
851                node_dump=1;
852                po->preview.setFileWrite(false);
853                break;
854            case OPT_DUMP_INTERFACES:
855                if (first_time) {
856                    rgpszArg = args.MultiArg(1);
857                    while (rgpszArg != NULL) {
858                        po->dump_interfaces.push_back(rgpszArg[0]);
859                        rgpszArg = args.MultiArg(1);
860                    }
861                }
862                if (po->m_run_mode != CParserOption::RUN_MODE_INVALID) {
863                    parse_err("Please specify single run mode");
864                }
865                po->m_run_mode = CParserOption::RUN_MODE_DUMP_INFO;
866                break;
867            case OPT_MBUF_FACTOR:
868                sscanf(args.OptionArg(),"%f", &po->m_mbuf_factor);
869                break;
870            case OPT_RATE_MULT :
871                sscanf(args.OptionArg(),"%f", &po->m_factor);
872                break;
873            case OPT_DURATION :
874                sscanf(args.OptionArg(),"%f", &po->m_duration);
875                break;
876            case OPT_PUB_DISABLE:
877                po->preview.set_zmq_publish_enable(false);
878                break;
879            case OPT_PLATFORM_FACTOR:
880                sscanf(args.OptionArg(),"%f", &po->m_platform_factor);
881                break;
882            case OPT_LATENCY :
883                latency_was_set=true;
884                sscanf(args.OptionArg(),"%d", &po->m_latency_rate);
885                break;
886            case OPT_LATENCY_MASK :
887                sscanf(args.OptionArg(),"%x", &po->m_latency_mask);
888                break;
889            case OPT_ONLY_LATENCY :
890                po->preview.setOnlyLatency(true);
891                break;
892            case OPT_NO_WATCHDOG :
893                po->preview.setWDDisable(true);
894                break;
895            case OPT_ALLOW_COREDUMP :
896                po->preview.setCoreDumpEnable(true);
897                break;
898            case  OPT_LATENCY_PREVIEW :
899                sscanf(args.OptionArg(),"%d", &po->m_latency_prev);
900                break;
901            case  OPT_WAIT_BEFORE_TRAFFIC :
902                sscanf(args.OptionArg(),"%d", &po->m_wait_before_traffic);
903                break;
904            case OPT_PCAP:
905                po->preview.set_pcap_mode_enable(true);
906                break;
907            case OPT_RX_CHECK :
908                sscanf(args.OptionArg(),"%d", &tmp_data);
909                po->m_rx_check_sample=(uint16_t)tmp_data;
910                po->preview.set_rx_check_enable(true);
911                break;
912            case OPT_RX_CHECK_HOPS :
913                sscanf(args.OptionArg(),"%d", &tmp_data);
914                po->m_rx_check_hops = (uint16_t)tmp_data;
915                break;
916            case OPT_IO_MODE :
917                sscanf(args.OptionArg(),"%d", &tmp_data);
918                po->m_io_mode=(uint16_t)tmp_data;
919                break;
920
921            case OPT_VIRT_ONE_TX_RX_QUEUE:
922                po->preview.set_vm_one_queue_enable(true);
923                break;
924
925            case OPT_PREFIX:
926                po->prefix = args.OptionArg();
927                break;
928
929            case OPT_SEND_DEBUG_PKT:
930                sscanf(args.OptionArg(),"%d", &tmp_data);
931                po->m_debug_pkt_proto = (uint8_t)tmp_data;
932                break;
933
934            case OPT_CHECKSUM_OFFLOAD:
935                po->preview.setChecksumOffloadEnable(true);
936                break;
937
938            case OPT_CLOSE:
939                po->preview.setCloseEnable(true);
940                break;
941            case  OPT_ARP_REF_PER:
942                sscanf(args.OptionArg(),"%d", &tmp_data);
943                po->m_arp_ref_per=(uint16_t)tmp_data;
944                break;
945
946            default:
947                usage();
948                return -1;
949                break;
950            } // End of switch
951        }// End of IF
952        else {
953            usage();
954            return -1;
955        }
956    } // End of while
957
958
959    if ((po->m_run_mode ==  CParserOption::RUN_MODE_INVALID) ) {
960        parse_err("Please provide single run mode (e.g. batch or interactive)");
961    }
962
963    if (CGlobalInfo::is_learn_mode() && po->preview.get_ipv6_mode_enable()) {
964        parse_err("--learn mode is not supported with --ipv6, beacuse there is not such thing NAT66 ( ipv6-ipv6) \n" \
965                  "if you think it is important,open a defect \n");
966    }
967
968    if (po->preview.get_is_rx_check_enable() ||  po->is_latency_enabled() || CGlobalInfo::is_learn_mode()
969        || (CGlobalInfo::m_options.m_arp_ref_per != 0)) {
970        po->set_rx_enabled();
971    }
972
973    if ( node_dump ){
974        po->preview.setVMode(a);
975    }
976
977    /* if we have a platform factor we need to devided by it so we can still work with normalized yaml profile  */
978    po->m_factor = po->m_factor/po->m_platform_factor;
979
980    uint32_t cores=po->preview.getCores();
981    if ( cores > ((BP_MAX_CORES)/2-1) ) {
982        printf(" ERROR maximum supported cores are : %d \n",((BP_MAX_CORES)/2-1));
983        return -1;
984    }
985
986
987    if ( first_time ){
988        /* only first time read the configuration file */
989        if ( po->platform_cfg_file.length() >0  ) {
990            if ( node_dump ){
991                printf("Loading platform configuration file from %s \n",po->platform_cfg_file.c_str());
992            }
993            global_platform_cfg_info.load_from_yaml_file(po->platform_cfg_file);
994            if ( node_dump ){
995                global_platform_cfg_info.Dump(stdout);
996            }
997        }else{
998            if ( utl_is_file_exists("/etc/trex_cfg.yaml") ){
999                printf("found configuration file at /etc/trex_cfg.yaml \n");
1000                global_platform_cfg_info.load_from_yaml_file("/etc/trex_cfg.yaml");
1001                if ( node_dump ){
1002                    global_platform_cfg_info.Dump(stdout);
1003                }
1004            }
1005        }
1006    }
1007
1008    if ( get_is_stateless() ) {
1009        if ( po->m_duration ) {
1010            parse_err("Duration is not supported with interactive mode ");
1011        }
1012
1013        if ( po->preview.get_is_rx_check_enable() ) {
1014            parse_err("Rx check is not supported with interactive mode ");
1015        }
1016
1017        if  ( (po->is_latency_enabled()) || (po->preview.getOnlyLatency()) ){
1018            parse_err("Latency check is not supported with interactive mode ");
1019        }
1020
1021        if ( po->preview.getSingleCore() ){
1022            parse_err("Single core is not supported with interactive mode ");
1023        }
1024
1025    }
1026    else {
1027        if ( !po->m_duration ) {
1028            po->m_duration = 3600.0;
1029        }
1030    }
1031    return 0;
1032}
1033
1034static int parse_options_wrapper(int argc, char *argv[], CParserOption* po, bool first_time ) {
1035    // copy, as arg parser sometimes changes the argv
1036    char ** argv_copy = (char **) malloc(sizeof(char *) * argc);
1037    for(int i=0; i<argc; i++) {
1038        argv_copy[i] = strdup(argv[i]);
1039    }
1040    int ret = parse_options(argc, argv_copy, po, first_time);
1041
1042    // free
1043    for(int i=0; i<argc; i++) {
1044        free(argv_copy[i]);
1045    }
1046    free(argv_copy);
1047    return ret;
1048}
1049
1050int main_test(int argc , char * argv[]);
1051
1052
1053#define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
1054#define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
1055#define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */
1056
1057/*
1058 * These default values are optimized for use with the Intel(R) 82599 10 GbE
1059 * Controller and the DPDK ixgbe PMD. Consider using other values for other
1060 * network controllers and/or network drivers.
1061 */
1062#define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */
1063#define TX_HTHRESH 0  /**< Default values of TX host threshold reg. */
1064#define TX_WTHRESH 0  /**< Default values of TX write-back threshold reg. */
1065
1066#define TX_WTHRESH_1G 1  /**< Default values of TX write-back threshold reg. */
1067#define TX_PTHRESH_1G 1 /**< Default values of TX prefetch threshold reg. */
1068
1069
1070struct port_cfg_t {
1071public:
1072    port_cfg_t(){
1073        memset(&m_port_conf,0,sizeof(m_port_conf));
1074        memset(&m_rx_conf,0,sizeof(m_rx_conf));
1075        memset(&m_tx_conf,0,sizeof(m_tx_conf));
1076        memset(&m_rx_drop_conf,0,sizeof(m_rx_drop_conf));
1077
1078        m_rx_conf.rx_thresh.pthresh = RX_PTHRESH;
1079        m_rx_conf.rx_thresh.hthresh = RX_HTHRESH;
1080        m_rx_conf.rx_thresh.wthresh = RX_WTHRESH;
1081        m_rx_conf.rx_free_thresh =32;
1082
1083        m_rx_drop_conf.rx_thresh.pthresh = 0;
1084        m_rx_drop_conf.rx_thresh.hthresh = 0;
1085        m_rx_drop_conf.rx_thresh.wthresh = 0;
1086        m_rx_drop_conf.rx_free_thresh =32;
1087        m_rx_drop_conf.rx_drop_en=1;
1088
1089        m_tx_conf.tx_thresh.pthresh = TX_PTHRESH;
1090        m_tx_conf.tx_thresh.hthresh = TX_HTHRESH;
1091        m_tx_conf.tx_thresh.wthresh = TX_WTHRESH;
1092
1093        m_port_conf.rxmode.jumbo_frame=1;
1094        m_port_conf.rxmode.max_rx_pkt_len =9*1024+22;
1095        m_port_conf.rxmode.hw_strip_crc=1;
1096    }
1097
1098
1099
1100    inline void update_var(void){
1101        get_ex_drv()->update_configuration(this);
1102    }
1103
1104    inline void update_global_config_fdir(void){
1105        get_ex_drv()->update_global_config_fdir(this);
1106    }
1107
1108    /* enable FDIR */
1109    inline void update_global_config_fdir_10g(void){
1110        m_port_conf.fdir_conf.mode=RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1111        m_port_conf.fdir_conf.pballoc=RTE_FDIR_PBALLOC_64K;
1112        m_port_conf.fdir_conf.status=RTE_FDIR_NO_REPORT_STATUS;
1113        /* Offset of flexbytes field in RX packets (in 16-bit word units). */
1114        /* Note: divide by 2 to convert byte offset to word offset */
1115        if (get_is_stateless()) {
1116            m_port_conf.fdir_conf.flexbytes_offset = (14+4)/2;
1117            /* Increment offset 4 bytes for the case where we add VLAN */
1118            if (  CGlobalInfo::m_options.preview.get_vlan_mode_enable() ) {
1119                m_port_conf.fdir_conf.flexbytes_offset += (4/2);
1120            }
1121        } else {
1122            if (  CGlobalInfo::m_options.preview.get_ipv6_mode_enable() ) {
1123                m_port_conf.fdir_conf.flexbytes_offset = (14+6)/2;
1124            } else {
1125                m_port_conf.fdir_conf.flexbytes_offset = (14+8)/2;
1126            }
1127
1128            /* Increment offset 4 bytes for the case where we add VLAN */
1129            if (  CGlobalInfo::m_options.preview.get_vlan_mode_enable() ) {
1130                m_port_conf.fdir_conf.flexbytes_offset += (4/2);
1131            }
1132        }
1133        m_port_conf.fdir_conf.drop_queue=1;
1134    }
1135
1136    inline void update_global_config_fdir_40g(void){
1137        m_port_conf.fdir_conf.mode=RTE_FDIR_MODE_PERFECT;
1138        m_port_conf.fdir_conf.pballoc=RTE_FDIR_PBALLOC_64K;
1139        m_port_conf.fdir_conf.status=RTE_FDIR_NO_REPORT_STATUS;
1140    }
1141
1142    struct rte_eth_conf     m_port_conf;
1143    struct rte_eth_rxconf   m_rx_conf;
1144    struct rte_eth_rxconf   m_rx_drop_conf;
1145    struct rte_eth_txconf   m_tx_conf;
1146};
1147
1148
1149/* this object is per core / per port / per queue
1150   each core will have 2 ports to send to
1151
1152
1153   port0                                port1
1154
1155   0,1,2,3,..15 out queue ( per core )       0,1,2,3,..15 out queue ( per core )
1156
1157*/
1158
1159
1160typedef struct cnt_name_ {
1161    uint32_t offset;
1162    char * name;
1163}cnt_name_t ;
1164
1165#define MY_REG(a) {a,(char *)#a}
1166
1167void CPhyEthIFStats::Clear() {
1168    ipackets = 0;
1169    ibytes = 0;
1170    f_ipackets = 0;
1171    f_ibytes = 0;
1172    opackets = 0;
1173    obytes = 0;
1174    ierrors = 0;
1175    oerrors = 0;
1176    imcasts = 0;
1177    rx_nombuf = 0;
1178    memset(&m_prev_stats, 0, sizeof(m_prev_stats));
1179    memset(m_rx_per_flow_pkts, 0, sizeof(m_rx_per_flow_pkts));
1180    memset(m_rx_per_flow_bytes, 0, sizeof(m_rx_per_flow_bytes));
1181}
1182
1183// dump all counters (even ones that equal 0)
1184void CPhyEthIFStats::DumpAll(FILE *fd) {
1185#define DP_A4(f) printf(" %-40s : %llu \n",#f, (unsigned long long)f)
1186#define DP_A(f) if (f) printf(" %-40s : %llu \n",#f, (unsigned long long)f)
1187    DP_A4(opackets);
1188    DP_A4(obytes);
1189    DP_A4(ipackets);
1190    DP_A4(ibytes);
1191    DP_A(ierrors);
1192    DP_A(oerrors);
1193}
1194
1195// dump all non zero counters
1196void CPhyEthIFStats::Dump(FILE *fd) {
1197    DP_A(opackets);
1198    DP_A(obytes);
1199    DP_A(f_ipackets);
1200    DP_A(f_ibytes);
1201    DP_A(ipackets);
1202    DP_A(ibytes);
1203    DP_A(ierrors);
1204    DP_A(oerrors);
1205    DP_A(imcasts);
1206    DP_A(rx_nombuf);
1207}
1208
1209void CPhyEthIgnoreStats::dump(FILE *fd) {
1210    DP_A4(opackets);
1211    DP_A4(obytes);
1212    DP_A4(ipackets);
1213    DP_A4(ibytes);
1214    DP_A4(m_tx_arp);
1215    DP_A4(m_rx_arp);
1216}
1217
1218// Clear the RX queue of an interface, dropping all packets
1219void CPhyEthIF::flush_rx_queue(void){
1220
1221    rte_mbuf_t * rx_pkts[32];
1222    int j=0;
1223    uint16_t cnt=0;
1224
1225    while (true) {
1226        j++;
1227        cnt = rx_burst(m_rx_queue,rx_pkts,32);
1228        if ( cnt ) {
1229            int i;
1230            for (i=0; i<(int)cnt;i++) {
1231                rte_mbuf_t * m=rx_pkts[i];
1232                /*printf("rx--\n");
1233                  rte_pktmbuf_dump(stdout,m, rte_pktmbuf_pkt_len(m));*/
1234                rte_pktmbuf_free(m);
1235            }
1236        }
1237        if ( ((cnt==0) && (j>10)) || (j>15) ) {
1238            break;
1239        }
1240    }
1241    if (cnt>0) {
1242        printf(" Warning can't flush rx-queue for port %d \n",(int)get_port_id());
1243    }
1244}
1245
1246
1247void CPhyEthIF::dump_stats_extended(FILE *fd){
1248
1249    cnt_name_t reg[]={
1250        MY_REG(IXGBE_GPTC), /* total packet */
1251        MY_REG(IXGBE_GOTCL), /* total bytes */
1252        MY_REG(IXGBE_GOTCH),
1253
1254        MY_REG(IXGBE_GPRC),
1255        MY_REG(IXGBE_GORCL),
1256        MY_REG(IXGBE_GORCH),
1257
1258
1259
1260        MY_REG(IXGBE_RXNFGPC),
1261        MY_REG(IXGBE_RXNFGBCL),
1262        MY_REG(IXGBE_RXNFGBCH),
1263        MY_REG(IXGBE_RXDGPC  ),
1264        MY_REG(IXGBE_RXDGBCL ),
1265        MY_REG(IXGBE_RXDGBCH  ),
1266        MY_REG(IXGBE_RXDDGPC ),
1267        MY_REG(IXGBE_RXDDGBCL ),
1268        MY_REG(IXGBE_RXDDGBCH  ),
1269        MY_REG(IXGBE_RXLPBKGPC ),
1270        MY_REG(IXGBE_RXLPBKGBCL),
1271        MY_REG(IXGBE_RXLPBKGBCH ),
1272        MY_REG(IXGBE_RXDLPBKGPC ),
1273        MY_REG(IXGBE_RXDLPBKGBCL),
1274        MY_REG(IXGBE_RXDLPBKGBCH ),
1275        MY_REG(IXGBE_TXDGPC      ),
1276        MY_REG(IXGBE_TXDGBCL     ),
1277        MY_REG(IXGBE_TXDGBCH     ),
1278        MY_REG(IXGBE_FDIRUSTAT ),
1279        MY_REG(IXGBE_FDIRFSTAT ),
1280        MY_REG(IXGBE_FDIRMATCH ),
1281        MY_REG(IXGBE_FDIRMISS )
1282
1283    };
1284    fprintf (fd," extended counters \n");
1285    int i;
1286    for (i=0; i<sizeof(reg)/sizeof(reg[0]); i++) {
1287        cnt_name_t *lp=&reg[i];
1288        uint32_t c=pci_reg_read(lp->offset);
1289        // xl710 bug. Counter values are -559038737 when they should be 0
1290        if (c && c != -559038737 ) {
1291            fprintf (fd," %s  : %d \n",lp->name,c);
1292        }
1293    }
1294}
1295
1296int CPhyEthIF::get_rx_stat_capabilities() {
1297    return get_ex_drv()->get_rx_stat_capabilities();
1298}
1299
1300
1301
1302void CPhyEthIF::configure(uint16_t nb_rx_queue,
1303                          uint16_t nb_tx_queue,
1304                          const struct rte_eth_conf *eth_conf){
1305    int ret;
1306    ret = rte_eth_dev_configure(m_port_id,
1307                                nb_rx_queue,
1308                                nb_tx_queue,
1309                                eth_conf);
1310
1311    if (ret < 0)
1312        rte_exit(EXIT_FAILURE, "Cannot configure device: "
1313                 "err=%d, port=%u\n",
1314                 ret, m_port_id);
1315
1316    /* get device info */
1317    rte_eth_dev_info_get(m_port_id, &m_dev_info);
1318
1319    if (CGlobalInfo::m_options.preview.getChecksumOffloadEnable()) {
1320        /* check if the device supports TCP and UDP checksum offloading */
1321        if ((m_dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) == 0) {
1322            rte_exit(EXIT_FAILURE, "Device does not support UDP checksum offload: "
1323                     "port=%u\n",
1324                     m_port_id);
1325        }
1326        if ((m_dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) == 0) {
1327            rte_exit(EXIT_FAILURE, "Device does not support TCP checksum offload: "
1328                     "port=%u\n",
1329                     m_port_id);
1330        }
1331    }
1332}
1333
1334
1335/*
1336
1337  rx-queue 0 - default- all traffic not goint to queue 1
1338  will be drop as queue is disable
1339
1340
1341  rx-queue 1 - Latency measurement packets will go here
1342
1343  pci_reg_write(IXGBE_L34T_IMIR(0),(1<<21));
1344
1345*/
1346
1347void CPhyEthIF::configure_rx_duplicate_rules(){
1348
1349    if ( get_is_rx_filter_enable() ){
1350
1351        if ( get_ex_drv()->is_hardware_filter_is_supported()==false ){
1352            printf(" ERROR this feature is not supported with current hardware \n");
1353            exit(1);
1354        }
1355        get_ex_drv()->configure_rx_filter_rules(this);
1356    }
1357}
1358
1359
1360void CPhyEthIF::stop_rx_drop_queue() {
1361    // In debug mode, we want to see all packets. Don't want to disable any queue.
1362    if ( get_vm_one_queue_enable() || (CGlobalInfo::m_options.m_debug_pkt_proto != 0)) {
1363        return;
1364    }
1365    if ( CGlobalInfo::m_options.is_rx_enabled() ) {
1366        if ( (!get_ex_drv()->is_hardware_support_drop_queue())  ) {
1367            printf(" ERROR latency feature is not supported with current hardware  \n");
1368            exit(1);
1369        }
1370    }
1371    get_ex_drv()->stop_queue(this, MAIN_DPDK_DATA_Q);
1372}
1373
1374
1375void CPhyEthIF::rx_queue_setup(uint16_t rx_queue_id,
1376                               uint16_t nb_rx_desc,
1377                               unsigned int socket_id,
1378                               const struct rte_eth_rxconf *rx_conf,
1379                               struct rte_mempool *mb_pool){
1380
1381    int ret = rte_eth_rx_queue_setup(m_port_id , rx_queue_id,
1382                                     nb_rx_desc,
1383                                     socket_id,
1384                                     rx_conf,
1385                                     mb_pool);
1386    if (ret < 0)
1387        rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup: "
1388                 "err=%d, port=%u\n",
1389                 ret, m_port_id);
1390}
1391
1392
1393
1394void CPhyEthIF::tx_queue_setup(uint16_t tx_queue_id,
1395                               uint16_t nb_tx_desc,
1396                               unsigned int socket_id,
1397                               const struct rte_eth_txconf *tx_conf){
1398
1399    int ret = rte_eth_tx_queue_setup( m_port_id,
1400                                      tx_queue_id,
1401                                      nb_tx_desc,
1402                                      socket_id,
1403                                      tx_conf);
1404    if (ret < 0)
1405        rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: "
1406                 "err=%d, port=%u queue=%u\n",
1407                 ret, m_port_id, tx_queue_id);
1408
1409}
1410
1411void CPhyEthIF::stop(){
1412    if (CGlobalInfo::m_options.preview.getCloseEnable()) {
1413        rte_eth_dev_stop(m_port_id);
1414        rte_eth_dev_close(m_port_id);
1415    }
1416}
1417
1418void CPhyEthIF::start(){
1419
1420    get_ex_drv()->clear_extended_stats(this);
1421
1422    int ret;
1423
1424    m_bw_tx.reset();
1425    m_bw_rx.reset();
1426
1427    m_stats.Clear();
1428    int i;
1429    for (i=0;i<10; i++ ) {
1430        ret = rte_eth_dev_start(m_port_id);
1431        if (ret==0) {
1432            return;
1433        }
1434        delay(1000);
1435    }
1436    if (ret < 0)
1437        rte_exit(EXIT_FAILURE, "rte_eth_dev_start: "
1438                 "err=%d, port=%u\n",
1439                 ret, m_port_id);
1440
1441}
1442
1443// Disabling flow control on interface
1444void CPhyEthIF::disable_flow_control(){
1445    int ret;
1446    // see trex-64 issue with loopback on the same NIC
1447    struct rte_eth_fc_conf fc_conf;
1448    memset(&fc_conf,0,sizeof(fc_conf));
1449    fc_conf.mode=RTE_FC_NONE;
1450    fc_conf.autoneg=1;
1451    fc_conf.pause_time=100;
1452    int i;
1453    for (i=0; i<5; i++) {
1454        ret=rte_eth_dev_flow_ctrl_set(m_port_id,&fc_conf);
1455        if (ret==0) {
1456            break;
1457        }
1458        delay(1000);
1459    }
1460    if (ret < 0)
1461        rte_exit(EXIT_FAILURE, "rte_eth_dev_flow_ctrl_set: "
1462                 "err=%d, port=%u\n probably link is down. Please check your link activity, or skip flow-control disabling, using: --no-flow-control-change option\n",
1463                 ret, m_port_id);
1464}
1465
1466/*
1467Get user frienly devices description from saved env. var
1468Changes certain attributes based on description
1469*/
1470void DpdkTRexPortAttr::update_description(){
1471    struct rte_pci_addr pci_addr;
1472    char pci[16];
1473    char * envvar;
1474    std::string pci_envvar_name;
1475    pci_addr = rte_eth_devices[m_port_id].pci_dev->addr;
1476    snprintf(pci, sizeof(pci), "%04x:%02x:%02x.%d", pci_addr.domain, pci_addr.bus, pci_addr.devid, pci_addr.function);
1477    intf_info_st.pci_addr = pci;
1478    pci_envvar_name = "pci" + intf_info_st.pci_addr;
1479    std::replace(pci_envvar_name.begin(), pci_envvar_name.end(), ':', '_');
1480    std::replace(pci_envvar_name.begin(), pci_envvar_name.end(), '.', '_');
1481    envvar = std::getenv(pci_envvar_name.c_str());
1482    if (envvar) {
1483        intf_info_st.description = envvar;
1484    } else {
1485        intf_info_st.description = "Unknown";
1486    }
1487    if (intf_info_st.description.find("82599ES") != std::string::npos) { // works for 82599EB etc. DPDK does not distinguish them
1488        flag_is_link_change_supported = false;
1489    }
1490    if (intf_info_st.description.find("82545EM") != std::string::npos) { // in virtual E1000, DPDK claims fc is supported, but it's not
1491        flag_is_fc_change_supported = false;
1492        flag_is_led_change_supported = false;
1493    }
1494    if ( CGlobalInfo::m_options.preview.getVMode() > 0){
1495        printf("port %d desc: %s\n", m_port_id, intf_info_st.description.c_str());
1496    }
1497}
1498
1499int DpdkTRexPortAttr::set_led(bool on){
1500    if (on) {
1501        return rte_eth_led_on(m_port_id);
1502    }else{
1503        return rte_eth_led_off(m_port_id);
1504    }
1505}
1506
1507int DpdkTRexPortAttr::get_flow_ctrl(int &mode) {
1508    int ret = rte_eth_dev_flow_ctrl_get(m_port_id, &fc_conf_tmp);
1509    if (ret) {
1510        mode = -1;
1511        return ret;
1512    }
1513    mode = (int) fc_conf_tmp.mode;
1514    return 0;
1515}
1516
1517int DpdkTRexPortAttr::set_flow_ctrl(int mode) {
1518    if (!flag_is_fc_change_supported) {
1519        return -ENOTSUP;
1520    }
1521    int ret = rte_eth_dev_flow_ctrl_get(m_port_id, &fc_conf_tmp);
1522    if (ret) {
1523        return ret;
1524    }
1525    fc_conf_tmp.mode = (enum rte_eth_fc_mode) mode;
1526    return rte_eth_dev_flow_ctrl_set(m_port_id, &fc_conf_tmp);
1527}
1528
1529void DpdkTRexPortAttr::reset_xstats() {
1530    rte_eth_xstats_reset(m_port_id);
1531}
1532
1533int DpdkTRexPortAttr::get_xstats_values(xstats_values_t &xstats_values) {
1534    int size = rte_eth_xstats_get(m_port_id, NULL, 0);
1535    if (size < 0) {
1536        return size;
1537    }
1538    xstats_values_tmp.resize(size);
1539    xstats_values.resize(size);
1540    size = rte_eth_xstats_get(m_port_id, xstats_values_tmp.data(), size);
1541    if (size < 0) {
1542        return size;
1543    }
1544    for (int i=0; i<size; i++) {
1545        xstats_values[xstats_values_tmp[i].id] = xstats_values_tmp[i].value;
1546    }
1547    return 0;
1548}
1549
1550int DpdkTRexPortAttr::get_xstats_names(xstats_names_t &xstats_names){
1551    int size = rte_eth_xstats_get_names(m_port_id, NULL, 0);
1552    if (size < 0) {
1553        return size;
1554    }
1555    xstats_names_tmp.resize(size);
1556    xstats_names.resize(size);
1557    size = rte_eth_xstats_get_names(m_port_id, xstats_names_tmp.data(), size);
1558    if (size < 0) {
1559        return size;
1560    }
1561    for (int i=0; i<size; i++) {
1562        xstats_names[i] = xstats_names_tmp[i].name;
1563    }
1564    return 0;
1565}
1566
1567void DpdkTRexPortAttr::dump_link(FILE *fd){
1568    fprintf(fd,"port : %d \n",(int)m_port_id);
1569    fprintf(fd,"------------\n");
1570
1571    fprintf(fd,"link         : ");
1572    if (m_link.link_status) {
1573        fprintf(fd," link : Link Up - speed %u Mbps - %s\n",
1574                (unsigned) m_link.link_speed,
1575                (m_link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1576                ("full-duplex") : ("half-duplex\n"));
1577    } else {
1578        fprintf(fd," Link Down\n");
1579    }
1580    fprintf(fd,"promiscuous  : %d \n",get_promiscuous());
1581}
1582
1583void DpdkTRexPortAttr::update_device_info(){
1584    rte_eth_dev_info_get(m_port_id, &dev_info);
1585}
1586
1587void DpdkTRexPortAttr::get_supported_speeds(supp_speeds_t &supp_speeds){
1588    uint32_t speed_capa = dev_info.speed_capa;
1589    if (speed_capa & ETH_LINK_SPEED_1G)
1590        supp_speeds.push_back(ETH_SPEED_NUM_1G);
1591    if (speed_capa & ETH_LINK_SPEED_10G)
1592        supp_speeds.push_back(ETH_SPEED_NUM_10G);
1593    if (speed_capa & ETH_LINK_SPEED_40G)
1594        supp_speeds.push_back(ETH_SPEED_NUM_40G);
1595    if (speed_capa & ETH_LINK_SPEED_100G)
1596        supp_speeds.push_back(ETH_SPEED_NUM_100G);
1597}
1598
1599void DpdkTRexPortAttr::update_link_status(){
1600    rte_eth_link_get(m_port_id, &m_link);
1601}
1602
1603bool DpdkTRexPortAttr::update_link_status_nowait(){
1604    rte_eth_link new_link;
1605    bool changed = false;
1606    rte_eth_link_get_nowait(m_port_id, &new_link);
1607
1608    /* if the link got down - update the dest atribute to move to unresolved */
1609    if (new_link.link_status != m_link.link_status) {
1610        get_dest().on_link_down();
1611        changed = true;
1612    }
1613
1614    /* other changes */
1615    if (new_link.link_speed != m_link.link_speed ||
1616                new_link.link_duplex != m_link.link_duplex ||
1617                    new_link.link_autoneg != m_link.link_autoneg) {
1618        changed = true;
1619    }
1620
1621    m_link = new_link;
1622    return changed;
1623}
1624
1625int DpdkTRexPortAttr::add_mac(char * mac){
1626    struct ether_addr mac_addr;
1627    for (int i=0; i<6;i++) {
1628        mac_addr.addr_bytes[i] =mac[i];
1629    }
1630    return rte_eth_dev_mac_addr_add(m_port_id, &mac_addr,0);
1631}
1632
1633int DpdkTRexPortAttr::set_promiscuous(bool enable){
1634    if (enable) {
1635        rte_eth_promiscuous_enable(m_port_id);
1636    }else{
1637        rte_eth_promiscuous_disable(m_port_id);
1638    }
1639    return 0;
1640}
1641
1642int DpdkTRexPortAttr::set_link_up(bool up){
1643    if (up) {
1644        return rte_eth_dev_set_link_up(m_port_id);
1645    }else{
1646        return rte_eth_dev_set_link_down(m_port_id);
1647    }
1648}
1649
1650bool DpdkTRexPortAttr::get_promiscuous(){
1651    int ret=rte_eth_promiscuous_get(m_port_id);
1652    if (ret<0) {
1653        rte_exit(EXIT_FAILURE, "rte_eth_promiscuous_get: "
1654                 "err=%d, port=%u\n",
1655                 ret, m_port_id);
1656
1657    }
1658    return ( ret?true:false);
1659}
1660
1661
1662void DpdkTRexPortAttr::get_hw_src_mac(struct ether_addr *mac_addr){
1663    rte_eth_macaddr_get(m_port_id , mac_addr);
1664}
1665
1666int CPhyEthIF::dump_fdir_global_stats(FILE *fd) {
1667    return get_ex_drv()->dump_fdir_global_stats(this, fd);
1668}
1669
1670void dump_hw_state(FILE *fd,struct ixgbe_hw_stats *hs ){
1671
1672#define DP_A1(f) if (hs->f) fprintf(fd," %-40s : %llu \n",#f, (unsigned long long)hs->f)
1673#define DP_A2(f,m) for (i=0;i<m; i++) { if (hs->f[i]) fprintf(fd," %-40s[%d] : %llu \n",#f,i, (unsigned long long)hs->f[i]); }
1674    int i;
1675
1676    //for (i=0;i<8; i++) { if (hs->mpc[i]) fprintf(fd," %-40s[%d] : %llu \n","mpc",i,hs->mpc[i]); }
1677    DP_A2(mpc,8);
1678    DP_A1(crcerrs);
1679    DP_A1(illerrc);
1680    //DP_A1(errbc);
1681    DP_A1(mspdc);
1682    DP_A1(mpctotal);
1683    DP_A1(mlfc);
1684    DP_A1(mrfc);
1685    DP_A1(rlec);
1686    //DP_A1(lxontxc);
1687    //DP_A1(lxonrxc);
1688    //DP_A1(lxofftxc);
1689    //DP_A1(lxoffrxc);
1690    //DP_A2(pxontxc,8);
1691    //DP_A2(pxonrxc,8);
1692    //DP_A2(pxofftxc,8);
1693    //DP_A2(pxoffrxc,8);
1694
1695    //DP_A1(prc64);
1696    //DP_A1(prc127);
1697    //DP_A1(prc255);
1698    // DP_A1(prc511);
1699    //DP_A1(prc1023);
1700    //DP_A1(prc1522);
1701
1702    DP_A1(gprc);
1703    DP_A1(bprc);
1704    DP_A1(mprc);
1705    DP_A1(gptc);
1706    DP_A1(gorc);
1707    DP_A1(gotc);
1708    DP_A2(rnbc,8);
1709    DP_A1(ruc);
1710    DP_A1(rfc);
1711    DP_A1(roc);
1712    DP_A1(rjc);
1713    DP_A1(mngprc);
1714    DP_A1(mngpdc);
1715    DP_A1(mngptc);
1716    DP_A1(tor);
1717    DP_A1(tpr);
1718    DP_A1(tpt);
1719    DP_A1(ptc64);
1720    DP_A1(ptc127);
1721    DP_A1(ptc255);
1722    DP_A1(ptc511);
1723    DP_A1(ptc1023);
1724    DP_A1(ptc1522);
1725    DP_A1(mptc);
1726    DP_A1(bptc);
1727    DP_A1(xec);
1728    DP_A2(qprc,16);
1729    DP_A2(qptc,16);
1730    DP_A2(qbrc,16);
1731    DP_A2(qbtc,16);
1732    DP_A2(qprdc,16);
1733    DP_A2(pxon2offc,8);
1734    DP_A1(fdirustat_add);
1735    DP_A1(fdirustat_remove);
1736    DP_A1(fdirfstat_fadd);
1737    DP_A1(fdirfstat_fremove);
1738    DP_A1(fdirmatch);
1739    DP_A1(fdirmiss);
1740    DP_A1(fccrc);
1741    DP_A1(fclast);
1742    DP_A1(fcoerpdc);
1743    DP_A1(fcoeprc);
1744    DP_A1(fcoeptc);
1745    DP_A1(fcoedwrc);
1746    DP_A1(fcoedwtc);
1747    DP_A1(fcoe_noddp);
1748    DP_A1(fcoe_noddp_ext_buff);
1749    DP_A1(ldpcec);
1750    DP_A1(pcrc8ec);
1751    DP_A1(b2ospc);
1752    DP_A1(b2ogprc);
1753    DP_A1(o2bgptc);
1754    DP_A1(o2bspc);
1755}
1756
1757void CPhyEthIF::set_ignore_stats_base(CPreTestStats &pre_stats) {
1758    // reading m_stats, so drivers saving prev in m_stats will be updated.
1759    // Actually, we want m_stats to be cleared
1760    get_ex_drv()->get_extended_stats(this, &m_stats);
1761
1762    m_ignore_stats.ipackets = m_stats.ipackets;
1763    m_ignore_stats.ibytes = m_stats.ibytes;
1764    m_ignore_stats.opackets = m_stats.opackets;
1765    m_ignore_stats.obytes = m_stats.obytes;
1766    m_stats.ipackets = 0;
1767    m_stats.opackets = 0;
1768    m_stats.ibytes = 0;
1769    m_stats.obytes = 0;
1770
1771    m_ignore_stats.m_tx_arp = pre_stats.m_tx_arp;
1772    m_ignore_stats.m_rx_arp = pre_stats.m_rx_arp;
1773
1774    if (CGlobalInfo::m_options.preview.getVMode() >= 3) {
1775        fprintf(stdout, "Pre test statistics for port %d\n", get_port_id());
1776        m_ignore_stats.dump(stdout);
1777    }
1778}
1779
1780void CPhyEthIF::dump_stats(FILE *fd){
1781
1782    update_counters();
1783
1784    fprintf(fd,"port : %d \n",(int)m_port_id);
1785    fprintf(fd,"------------\n");
1786    m_stats.DumpAll(fd);
1787    //m_stats.Dump(fd);
1788    printf (" Tx : %.1fMb/sec  \n",m_last_tx_rate);
1789    //printf (" Rx : %.1fMb/sec  \n",m_last_rx_rate);
1790}
1791
1792void CPhyEthIF::stats_clear(){
1793    rte_eth_stats_reset(m_port_id);
1794    m_stats.Clear();
1795}
1796
1797class CCorePerPort  {
1798public:
1799    CCorePerPort (){
1800        m_tx_queue_id=0;
1801        m_len=0;
1802        int i;
1803        for (i=0; i<MAX_PKT_BURST; i++) {
1804            m_table[i]=0;
1805        }
1806        m_port=0;
1807    }
1808    uint8_t                 m_tx_queue_id;
1809    uint8_t                 m_tx_queue_id_lat; // q id for tx of latency pkts
1810    uint16_t                m_len;
1811    rte_mbuf_t *            m_table[MAX_PKT_BURST];
1812    CPhyEthIF  *            m_port;
1813};
1814
1815
1816#define MAX_MBUF_CACHE 100
1817
1818
1819/* per core/gbe queue port for trasmitt */
1820class CCoreEthIF : public CVirtualIF {
1821public:
1822    enum {
1823     INVALID_Q_ID = 255
1824    };
1825
1826public:
1827
1828    CCoreEthIF(){
1829        m_mbuf_cache=0;
1830    }
1831
1832    bool Create(uint8_t             core_id,
1833                uint8_t            tx_client_queue_id,
1834                CPhyEthIF  *        tx_client_port,
1835                uint8_t            tx_server_queue_id,
1836                CPhyEthIF  *        tx_server_port,
1837                uint8_t             tx_q_id_lat);
1838    void Delete();
1839
1840    virtual int open_file(std::string file_name){
1841        return (0);
1842    }
1843
1844    virtual int close_file(void){
1845        return (flush_tx_queue());
1846    }
1847    __attribute__ ((noinline)) int send_node_flow_stat(rte_mbuf *m, CGenNodeStateless * node_sl
1848                                                       , CCorePerPort *  lp_port
1849                                                       , CVirtualIFPerSideStats  * lp_stats, bool is_const);
1850    virtual int send_node(CGenNode * node);
1851    virtual void send_one_pkt(pkt_dir_t dir, rte_mbuf_t *m);
1852    virtual int flush_tx_queue(void);
1853    __attribute__ ((noinline)) void handle_rx_queue();
1854    __attribute__ ((noinline)) void handle_slowpath_features(CGenNode *node, rte_mbuf_t *m, uint8_t *p, pkt_dir_t dir);
1855
1856    void apply_client_cfg(const ClientCfg *cfg, rte_mbuf_t *m, pkt_dir_t dir, uint8_t *p);
1857
1858    bool process_rx_pkt(pkt_dir_t   dir,rte_mbuf_t * m);
1859
1860    virtual int update_mac_addr_from_global_cfg(pkt_dir_t       dir, uint8_t * p);
1861
1862    virtual pkt_dir_t port_id_to_dir(uint8_t port_id);
1863    void GetCoreCounters(CVirtualIFPerSideStats *stats);
1864    void DumpCoreStats(FILE *fd);
1865    void DumpIfStats(FILE *fd);
1866    static void DumpIfCfgHeader(FILE *fd);
1867    void DumpIfCfg(FILE *fd);
1868
1869    socket_id_t get_socket_id(){
1870        return ( CGlobalInfo::m_socket.port_to_socket( m_ports[0].m_port->get_port_id() ) );
1871    }
1872
1873    const CCorePerPort * get_ports() {
1874        return m_ports;
1875    }
1876
1877protected:
1878
1879    int send_burst(CCorePerPort * lp_port,
1880                   uint16_t len,
1881                   CVirtualIFPerSideStats  * lp_stats);
1882    int send_pkt(CCorePerPort * lp_port,
1883                 rte_mbuf_t *m,
1884                 CVirtualIFPerSideStats  * lp_stats);
1885    int send_pkt_lat(CCorePerPort * lp_port,
1886                 rte_mbuf_t *m,
1887                 CVirtualIFPerSideStats  * lp_stats);
1888
1889    void add_vlan(rte_mbuf_t *m, uint16_t vlan_id);
1890
1891protected:
1892    uint8_t      m_core_id;
1893    uint16_t     m_mbuf_cache;
1894    CCorePerPort m_ports[CS_NUM]; /* each core has 2 tx queues 1. client side and server side */
1895    CNodeRing *  m_ring_to_rx;
1896
1897} __rte_cache_aligned; ;
1898
1899class CCoreEthIFStateless : public CCoreEthIF {
1900public:
1901    virtual int send_node_flow_stat(rte_mbuf *m, CGenNodeStateless * node_sl, CCorePerPort *  lp_port
1902                                    , CVirtualIFPerSideStats  * lp_stats, bool is_const);
1903    virtual int send_node(CGenNode * node);
1904protected:
1905    int handle_slow_path_node(CGenNode *node);
1906    int send_pcap_node(CGenNodePCAP *pcap_node);
1907};
1908
1909bool CCoreEthIF::Create(uint8_t             core_id,
1910                        uint8_t             tx_client_queue_id,
1911                        CPhyEthIF  *        tx_client_port,
1912                        uint8_t             tx_server_queue_id,
1913                        CPhyEthIF  *        tx_server_port,
1914                        uint8_t tx_q_id_lat ) {
1915    m_ports[CLIENT_SIDE].m_tx_queue_id = tx_client_queue_id;
1916    m_ports[CLIENT_SIDE].m_port        = tx_client_port;
1917    m_ports[CLIENT_SIDE].m_tx_queue_id_lat = tx_q_id_lat;
1918    m_ports[SERVER_SIDE].m_tx_queue_id = tx_server_queue_id;
1919    m_ports[SERVER_SIDE].m_port        = tx_server_port;
1920    m_ports[SERVER_SIDE].m_tx_queue_id_lat = tx_q_id_lat;
1921    m_core_id = core_id;
1922
1923    CMessagingManager * rx_dp=CMsgIns::Ins()->getRxDp();
1924    m_ring_to_rx = rx_dp->getRingDpToCp(core_id-1);
1925    assert( m_ring_to_rx);
1926    return (true);
1927}
1928
1929// This function is only relevant if we are in VM. In this case, we only have one rx queue. Can't have
1930// rules to drop queue 0 packets, and pass queue 1 packets to RX core, like in other cases.
1931// We receive all packets in the same core that transmitted, and handle them to RX core.
1932void CCoreEthIF::handle_rx_queue(void) {
1933    if ( likely( ! get_vm_one_queue_enable() ) ) {
1934        return;
1935    }
1936
1937    pkt_dir_t dir;
1938    bool is_rx = get_is_rx_thread_enabled();
1939    for (dir=CLIENT_SIDE; dir<CS_NUM; dir++) {
1940        CCorePerPort * lp_port=&m_ports[dir];
1941        CPhyEthIF * lp=lp_port->m_port;
1942
1943        rte_mbuf_t * rx_pkts[32];
1944        int j=0;
1945
1946        while (true) {
1947            j++;
1948            uint16_t cnt =lp->rx_burst(0,rx_pkts,32);
1949            if ( cnt ) {
1950                int i;
1951                for (i=0; i<(int)cnt;i++) {
1952                    rte_mbuf_t * m=rx_pkts[i];
1953                    if ( is_rx ){
1954                        if (!process_rx_pkt(dir,m)){
1955                            rte_pktmbuf_free(m);
1956                        }
1957                    }else{
1958                        rte_pktmbuf_free(m);
1959                    }
1960                }
1961            }
1962            if ((cnt<5) || j>10 ) {
1963                break;
1964            }
1965        }
1966    }
1967}
1968
1969int CCoreEthIF::flush_tx_queue(void){
1970    /* flush both sides */
1971    pkt_dir_t dir;
1972    for (dir = CLIENT_SIDE; dir < CS_NUM; dir++) {
1973        CCorePerPort * lp_port = &m_ports[dir];
1974        CVirtualIFPerSideStats  * lp_stats = &m_stats[dir];
1975        if ( likely(lp_port->m_len > 0) ) {
1976            send_burst(lp_port, lp_port->m_len, lp_stats);
1977            lp_port->m_len = 0;
1978        }
1979    }
1980
1981    handle_rx_queue();
1982
1983    return 0;
1984}
1985
1986void CCoreEthIF::GetCoreCounters(CVirtualIFPerSideStats *stats){
1987    stats->Clear();
1988    pkt_dir_t   dir ;
1989    for (dir=CLIENT_SIDE; dir<CS_NUM; dir++) {
1990        stats->Add(&m_stats[dir]);
1991    }
1992}
1993
1994void CCoreEthIF::DumpCoreStats(FILE *fd){
1995    fprintf (fd,"------------------------ \n");
1996    fprintf (fd," per core stats core id : %d  \n",m_core_id);
1997    fprintf (fd,"------------------------ \n");
1998
1999    CVirtualIFPerSideStats stats;
2000    GetCoreCounters(&stats);
2001    stats.Dump(stdout);
2002}
2003
2004void CCoreEthIF::DumpIfCfgHeader(FILE *fd){
2005    fprintf (fd," core, c-port, c-queue, s-port, s-queue, lat-queue\n");
2006    fprintf (fd," ------------------------------------------\n");
2007}
2008
2009void CCoreEthIF::DumpIfCfg(FILE *fd){
2010    fprintf (fd," %d   %6u %6u  %6u  %6u %6u  \n",m_core_id,
2011             m_ports[CLIENT_SIDE].m_port->get_port_id(),
2012             m_ports[CLIENT_SIDE].m_tx_queue_id,
2013             m_ports[SERVER_SIDE].m_port->get_port_id(),
2014             m_ports[SERVER_SIDE].m_tx_queue_id,
2015             m_ports[SERVER_SIDE].m_tx_queue_id_lat
2016             );
2017}
2018
2019
2020void CCoreEthIF::DumpIfStats(FILE *fd){
2021
2022    fprintf (fd,"------------------------ \n");
2023    fprintf (fd," per core per if stats id : %d  \n",m_core_id);
2024    fprintf (fd,"------------------------ \n");
2025
2026    const char * t[]={"client","server"};
2027    pkt_dir_t   dir ;
2028    for (dir=CLIENT_SIDE; dir<CS_NUM; dir++) {
2029        CCorePerPort * lp=&m_ports[dir];
2030        CVirtualIFPerSideStats * lpstats = &m_stats[dir];
2031        fprintf (fd," port %d, queue id :%d  - %s \n",lp->m_port->get_port_id(),lp->m_tx_queue_id,t[dir] );
2032        fprintf (fd," ---------------------------- \n");
2033        lpstats->Dump(fd);
2034    }
2035}
2036
2037#define DELAY_IF_NEEDED
2038
2039int CCoreEthIF::send_burst(CCorePerPort * lp_port,
2040                           uint16_t len,
2041                           CVirtualIFPerSideStats  * lp_stats){
2042
2043    uint16_t ret = lp_port->m_port->tx_burst(lp_port->m_tx_queue_id,lp_port->m_table,len);
2044#ifdef DELAY_IF_NEEDED
2045    while ( unlikely( ret<len ) ){
2046        rte_delay_us(1);
2047        lp_stats->m_tx_queue_full += 1;
2048        uint16_t ret1=lp_port->m_port->tx_burst(lp_port->m_tx_queue_id,
2049                                                &lp_port->m_table[ret],
2050                                                len-ret);
2051        ret+=ret1;
2052    }
2053#else
2054    /* CPU has burst of packets larger than TX can send. Need to drop packets */
2055    if ( unlikely(ret < len) ) {
2056        lp_stats->m_tx_drop += (len-ret);
2057        uint16_t i;
2058        for (i=ret; i<len;i++) {
2059            rte_mbuf_t * m=lp_port->m_table[i];
2060            rte_pktmbuf_free(m);
2061        }
2062    }
2063#endif
2064
2065    return (0);
2066}
2067
2068
2069int CCoreEthIF::send_pkt(CCorePerPort * lp_port,
2070                         rte_mbuf_t      *m,
2071                         CVirtualIFPerSideStats  * lp_stats
2072                         ){
2073
2074    uint16_t len = lp_port->m_len;
2075    lp_port->m_table[len]=m;
2076    len++;
2077    /* enough pkts to be sent */
2078    if (unlikely(len == MAX_PKT_BURST)) {
2079        send_burst(lp_port, MAX_PKT_BURST,lp_stats);
2080        len = 0;
2081    }
2082    lp_port->m_len = len;
2083
2084    return (0);
2085}
2086
2087int CCoreEthIF::send_pkt_lat(CCorePerPort *lp_port, rte_mbuf_t *m, CVirtualIFPerSideStats *lp_stats) {
2088    // We allow sending only from first core of each port. This is serious internal bug otherwise.
2089    assert(lp_port->m_tx_queue_id_lat != INVALID_Q_ID);
2090
2091    int ret = lp_port->m_port->tx_burst(lp_port->m_tx_queue_id_lat, &m, 1);
2092
2093#ifdef DELAY_IF_NEEDED
2094    while ( unlikely( ret != 1 ) ){
2095        rte_delay_us(1);
2096        lp_stats->m_tx_queue_full += 1;
2097        ret = lp_port->m_port->tx_burst(lp_port->m_tx_queue_id_lat, &m, 1);
2098    }
2099
2100#else
2101    if ( unlikely( ret != 1 ) ) {
2102        lp_stats->m_tx_drop ++;
2103        rte_pktmbuf_free(m);
2104        return 0;
2105    }
2106
2107#endif
2108
2109    return ret;
2110}
2111
2112void CCoreEthIF::send_one_pkt(pkt_dir_t       dir,
2113                              rte_mbuf_t      *m){
2114    CCorePerPort *  lp_port=&m_ports[dir];
2115    CVirtualIFPerSideStats  * lp_stats = &m_stats[dir];
2116    send_pkt(lp_port,m,lp_stats);
2117    /* flush */
2118    send_burst(lp_port,lp_port->m_len,lp_stats);
2119    lp_port->m_len = 0;
2120}
2121
2122int CCoreEthIFStateless::send_node_flow_stat(rte_mbuf *m, CGenNodeStateless * node_sl, CCorePerPort *  lp_port
2123                                             , CVirtualIFPerSideStats  * lp_stats, bool is_const) {
2124    // Defining this makes 10% percent packet loss. 1% packet reorder.
2125# ifdef ERR_CNTRS_TEST
2126    static int temp=1;
2127    temp++;
2128#endif
2129
2130    uint16_t hw_id = node_sl->get_stat_hw_id();
2131    rte_mbuf *mi;
2132    struct flow_stat_payload_header *fsp_head = NULL;
2133
2134    if (hw_id >= MAX_FLOW_STATS) {
2135        // payload rule hw_ids are in the range right above ip id rules
2136        uint16_t hw_id_payload = hw_id - MAX_FLOW_STATS;
2137        if (hw_id_payload > max_stat_hw_id_seen_payload) {
2138            max_stat_hw_id_seen_payload = hw_id_payload;
2139        }
2140
2141        mi = node_sl->alloc_flow_stat_mbuf(m, fsp_head, is_const);
2142        fsp_head->seq = lp_stats->m_lat_data[hw_id_payload].get_seq_num();
2143        fsp_head->hw_id = hw_id_payload;
2144        fsp_head->flow_seq = lp_stats->m_lat_data[hw_id_payload].get_flow_seq();
2145        fsp_head->magic = FLOW_STAT_PAYLOAD_MAGIC;
2146
2147        lp_stats->m_lat_data[hw_id_payload].inc_seq_num();
2148#ifdef ERR_CNTRS_TEST
2149        if (temp % 10 == 0) {
2150            fsp_head->seq = lp_stats->m_lat_data[hw_id_payload].inc_seq_num();
2151        }
2152        if ((temp - 1) % 100 == 0) {
2153            fsp_head->seq = lp_stats->m_lat_data[hw_id_payload].get_seq_num() - 4;
2154        }
2155#endif
2156    } else {
2157        // ip id rule
2158        if (hw_id > max_stat_hw_id_seen) {
2159            max_stat_hw_id_seen = hw_id;
2160        }
2161        mi = m;
2162    }
2163    tx_per_flow_t *lp_s = &lp_stats->m_tx_per_flow[hw_id];
2164    lp_s->add_pkts(1);
2165    lp_s->add_bytes(mi->pkt_len + 4); // We add 4 because of ethernet CRC
2166
2167    if (hw_id >= MAX_FLOW_STATS) {
2168        fsp_head->time_stamp = os_get_hr_tick_64();
2169        send_pkt_lat(lp_port, mi, lp_stats);
2170    } else {
2171        send_pkt(lp_port, mi, lp_stats);
2172    }
2173    return 0;
2174}
2175
2176int CCoreEthIFStateless::send_node(CGenNode * no) {
2177    /* if a node is marked as slow path - single IF to redirect it to slow path */
2178    if (no->get_is_slow_path()) {
2179        return handle_slow_path_node(no);
2180    }
2181
2182    CGenNodeStateless * node_sl=(CGenNodeStateless *) no;
2183
2184    /* check that we have mbuf  */
2185    rte_mbuf_t *    m;
2186
2187    pkt_dir_t dir=(pkt_dir_t)node_sl->get_mbuf_cache_dir();
2188    CCorePerPort *  lp_port=&m_ports[dir];
2189    CVirtualIFPerSideStats  * lp_stats = &m_stats[dir];
2190    if ( likely(node_sl->is_cache_mbuf_array()) ) {
2191        m=node_sl->cache_mbuf_array_get_cur();
2192        rte_pktmbuf_refcnt_update(m,1);
2193    }else{
2194        m=node_sl->get_cache_mbuf();
2195
2196        if (m) {
2197            /* cache case */
2198            rte_pktmbuf_refcnt_update(m,1);
2199        }else{
2200            m=node_sl->alloc_node_with_vm();
2201            assert(m);
2202        }
2203    }
2204
2205    if (unlikely(node_sl->is_stat_needed())) {
2206        if ( unlikely(node_sl->is_cache_mbuf_array()) ) {
2207            // No support for latency + cache. If user asks for cache on latency stream, we change cache to 0.
2208            // assert here just to make sure.
2209            assert(1);
2210        }
2211        return send_node_flow_stat(m, node_sl, lp_port, lp_stats, (node_sl->get_cache_mbuf()) ? true : false);
2212    } else {
2213        send_pkt(lp_port,m,lp_stats);
2214    }
2215
2216    return (0);
2217};
2218
2219int CCoreEthIFStateless::send_pcap_node(CGenNodePCAP *pcap_node) {
2220    rte_mbuf_t *m = pcap_node->get_pkt();
2221    if (!m) {
2222        return (-1);
2223    }
2224
2225    pkt_dir_t dir = (pkt_dir_t)pcap_node->get_mbuf_dir();
2226    CCorePerPort *lp_port=&m_ports[dir];
2227    CVirtualIFPerSideStats *lp_stats = &m_stats[dir];
2228
2229    send_pkt(lp_port, m, lp_stats);
2230
2231    return (0);
2232}
2233
2234/**
2235 * slow path code goes here
2236 *
2237 */
2238int CCoreEthIFStateless::handle_slow_path_node(CGenNode * no) {
2239
2240    if (no->m_type == CGenNode::PCAP_PKT) {
2241        return send_pcap_node((CGenNodePCAP *)no);
2242    }
2243
2244    return (-1);
2245}
2246
2247void CCoreEthIF::apply_client_cfg(const ClientCfg *cfg, rte_mbuf_t *m, pkt_dir_t dir, uint8_t *p) {
2248
2249    assert(cfg);
2250
2251    /* take the right direction config */
2252    const ClientCfgDir &cfg_dir = ( (dir == CLIENT_SIDE) ? cfg->m_initiator : cfg->m_responder);
2253
2254    /* dst mac */
2255    if (cfg_dir.has_dst_mac_addr()) {
2256        memcpy(p, cfg_dir.get_dst_mac_addr(), 6);
2257    }
2258
2259    /* src mac */
2260    if (cfg_dir.has_src_mac_addr()) {
2261        memcpy(p + 6, cfg_dir.get_src_mac_addr(), 6);
2262    }
2263
2264    /* VLAN */
2265    if (cfg_dir.has_vlan()) {
2266        add_vlan(m, cfg_dir.get_vlan());
2267    }
2268}
2269
2270
2271void CCoreEthIF::add_vlan(rte_mbuf_t *m, uint16_t vlan_id) {
2272    m->ol_flags = PKT_TX_VLAN_PKT;
2273    m->l2_len   = 14;
2274    m->vlan_tci = vlan_id;
2275}
2276
2277/**
2278 * slow path features goes here (avoid multiple IFs)
2279 *
2280 */
2281void CCoreEthIF::handle_slowpath_features(CGenNode *node, rte_mbuf_t *m, uint8_t *p, pkt_dir_t dir) {
2282
2283
2284    /* MAC ovverride */
2285    if ( unlikely( CGlobalInfo::m_options.preview.get_mac_ip_overide_enable() ) ) {
2286        /* client side */
2287        if ( node->is_initiator_pkt() ) {
2288            *((uint32_t*)(p+6)) = PKT_NTOHL(node->m_src_ip);
2289        }
2290    }
2291
2292    /* flag is faster than checking the node pointer (another cacheline) */
2293    if ( unlikely(CGlobalInfo::m_options.preview.get_is_client_cfg_enable() ) ) {
2294        apply_client_cfg(node->m_client_cfg, m, dir, p);
2295    }
2296
2297}
2298
2299int CCoreEthIF::send_node(CGenNode * node) {
2300
2301    if ( unlikely( node->get_cache_mbuf() !=NULL ) ) {
2302        pkt_dir_t       dir;
2303        rte_mbuf_t *    m=node->get_cache_mbuf();
2304        dir=(pkt_dir_t)node->get_mbuf_cache_dir();
2305        CCorePerPort *  lp_port=&m_ports[dir];
2306        CVirtualIFPerSideStats  * lp_stats = &m_stats[dir];
2307        rte_pktmbuf_refcnt_update(m,1);
2308        send_pkt(lp_port,m,lp_stats);
2309        return (0);
2310    }
2311
2312
2313    CFlowPktInfo *  lp=node->m_pkt_info;
2314    rte_mbuf_t *    m=lp->generate_new_mbuf(node);
2315
2316    pkt_dir_t       dir;
2317    bool            single_port;
2318
2319    dir         = node->cur_interface_dir();
2320    single_port = node->get_is_all_flow_from_same_dir() ;
2321
2322
2323    if ( unlikely( CGlobalInfo::m_options.preview.get_vlan_mode_enable() ) ){
2324        /* which vlan to choose 0 or 1*/
2325        uint8_t vlan_port = (node->m_src_ip &1);
2326        uint16_t vlan_id  = CGlobalInfo::m_options.m_vlan_port[vlan_port];
2327
2328        if (likely( vlan_id >0 ) ) {
2329            dir = dir ^ vlan_port;
2330        }else{
2331            /* both from the same dir but with VLAN0 */
2332            vlan_id = CGlobalInfo::m_options.m_vlan_port[0];
2333            dir = dir ^ 0;
2334        }
2335
2336        add_vlan(m, vlan_id);
2337    }
2338
2339    CCorePerPort *lp_port = &m_ports[dir];
2340    CVirtualIFPerSideStats *lp_stats = &m_stats[dir];
2341
2342    if (unlikely(m==0)) {
2343        lp_stats->m_tx_alloc_error++;
2344        return(0);
2345    }
2346
2347    /* update mac addr dest/src 12 bytes */
2348    uint8_t *p   = rte_pktmbuf_mtod(m, uint8_t*);
2349    uint8_t p_id = lp_port->m_port->get_port_id();
2350
2351    memcpy(p,CGlobalInfo::m_options.get_dst_src_mac_addr(p_id),12);
2352
2353     /* when slowpath features are on */
2354    if ( unlikely( CGlobalInfo::m_options.preview.get_is_slowpath_features_on() ) ) {
2355        handle_slowpath_features(node, m, p, dir);
2356    }
2357
2358
2359    if ( unlikely( node->is_rx_check_enabled() ) ) {
2360        lp_stats->m_tx_rx_check_pkt++;
2361        lp->do_generate_new_mbuf_rxcheck(m, node, single_port);
2362        lp_stats->m_template.inc_template( node->get_template_id( ));
2363    }else{
2364        // cache only if it is not sample as this is more complex mbuf struct
2365        if ( unlikely( node->can_cache_mbuf() ) ) {
2366            if ( !CGlobalInfo::m_options.preview.isMbufCacheDisabled() ){
2367                m_mbuf_cache++;
2368                if (m_mbuf_cache < MAX_MBUF_CACHE) {
2369                    /* limit the number of object to cache */
2370                    node->set_mbuf_cache_dir( dir);
2371                    node->set_cache_mbuf(m);
2372                    rte_pktmbuf_refcnt_update(m,1);
2373                }
2374            }
2375        }
2376    }
2377
2378    /*printf("send packet -- \n");
2379      rte_pktmbuf_dump(stdout,m, rte_pktmbuf_pkt_len(m));*/
2380
2381    /* send the packet */
2382    send_pkt(lp_port,m,lp_stats);
2383    return (0);
2384}
2385
2386
2387int CCoreEthIF::update_mac_addr_from_global_cfg(pkt_dir_t  dir, uint8_t * p){
2388    assert(p);
2389    assert(dir<2);
2390
2391    CCorePerPort *  lp_port=&m_ports[dir];
2392    uint8_t p_id=lp_port->m_port->get_port_id();
2393    memcpy(p,CGlobalInfo::m_options.get_dst_src_mac_addr(p_id),12);
2394    return (0);
2395}
2396
2397pkt_dir_t
2398CCoreEthIF::port_id_to_dir(uint8_t port_id) {
2399
2400    for (pkt_dir_t dir = 0; dir < CS_NUM; dir++) {
2401        if (m_ports[dir].m_port->get_port_id() == port_id) {
2402            return dir;
2403        }
2404    }
2405
2406    return (CS_INVALID);
2407}
2408
2409class CLatencyHWPort : public CPortLatencyHWBase {
2410public:
2411    void Create(CPhyEthIF  * p,
2412                uint8_t tx_queue,
2413                uint8_t rx_queue){
2414        m_port=p;
2415        m_tx_queue_id=tx_queue;
2416        m_rx_queue_id=rx_queue;
2417    }
2418
2419    virtual int tx(rte_mbuf_t * m){
2420        rte_mbuf_t * tx_pkts[2];
2421        tx_pkts[0]=m;
2422        if ( likely( CGlobalInfo::m_options.preview.get_vlan_mode_enable() ) ){
2423            /* vlan mode is the default */
2424            /* set the vlan */
2425            m->ol_flags = PKT_TX_VLAN_PKT;
2426            m->vlan_tci =CGlobalInfo::m_options.m_vlan_port[0];
2427            m->l2_len   =14;
2428        }
2429        uint16_t res=m_port->tx_burst(m_tx_queue_id,tx_pkts,1);
2430        if ( res == 0 ) {
2431            rte_pktmbuf_free(m);
2432            //printf(" queue is full for latency packet !!\n");
2433            return (-1);
2434
2435        }
2436#if 0
2437        fprintf(stdout," ==> %f.03 send packet ..\n",now_sec());
2438        uint8_t *p1=rte_pktmbuf_mtod(m, uint8_t*);
2439        uint16_t pkt_size1=rte_pktmbuf_pkt_len(m);
2440        utl_DumpBuffer(stdout,p1,pkt_size1,0);
2441#endif
2442
2443        return (0);
2444    }
2445    virtual rte_mbuf_t * rx(){
2446        rte_mbuf_t * rx_pkts[1];
2447        uint16_t cnt=m_port->rx_burst(m_rx_queue_id,rx_pkts,1);
2448        if (cnt) {
2449            return (rx_pkts[0]);
2450        }else{
2451            return (0);
2452        }
2453    }
2454
2455    virtual uint16_t rx_burst(struct rte_mbuf **rx_pkts,
2456                              uint16_t nb_pkts){
2457        uint16_t cnt=m_port->rx_burst(m_rx_queue_id,rx_pkts,nb_pkts);
2458        return (cnt);
2459    }
2460
2461
2462private:
2463    CPhyEthIF  * m_port;
2464    uint8_t      m_tx_queue_id ;
2465    uint8_t      m_rx_queue_id;
2466};
2467
2468
2469class CLatencyVmPort : public CPortLatencyHWBase {
2470public:
2471    void Create(uint8_t port_index,CNodeRing * ring,
2472                CLatencyManager * mgr){
2473        m_dir        = (port_index%2);
2474        m_ring_to_dp = ring;
2475        m_mgr        = mgr;
2476    }
2477
2478    virtual int tx(rte_mbuf_t * m){
2479        if ( likely( CGlobalInfo::m_options.preview.get_vlan_mode_enable() ) ){
2480            /* vlan mode is the default */
2481            /* set the vlan */
2482            m->ol_flags = PKT_TX_VLAN_PKT;
2483            m->vlan_tci =CGlobalInfo::m_options.m_vlan_port[0];
2484            m->l2_len   =14;
2485        }
2486
2487        /* allocate node */
2488        CGenNodeLatencyPktInfo * node=(CGenNodeLatencyPktInfo * )CGlobalInfo::create_node();
2489        if ( node ) {
2490            node->m_msg_type = CGenNodeMsgBase::LATENCY_PKT;
2491            node->m_dir      = m_dir;
2492            node->m_pkt      = m;
2493            node->m_latency_offset = m_mgr->get_latency_header_offset();
2494
2495            if ( m_ring_to_dp->Enqueue((CGenNode*)node) ==0 ){
2496                return (0);
2497            }
2498        }
2499        return (-1);
2500    }
2501
2502    virtual rte_mbuf_t * rx(){
2503        return (0);
2504    }
2505
2506    virtual uint16_t rx_burst(struct rte_mbuf **rx_pkts,
2507                              uint16_t nb_pkts){
2508        return (0);
2509    }
2510
2511
2512private:
2513    uint8_t                          m_dir;
2514    CNodeRing *                      m_ring_to_dp;   /* ring dp -> latency thread */
2515    CLatencyManager *                m_mgr;
2516};
2517
2518
2519
2520class CPerPortStats {
2521public:
2522    uint64_t opackets;
2523    uint64_t obytes;
2524    uint64_t ipackets;
2525    uint64_t ibytes;
2526    uint64_t ierrors;
2527    uint64_t oerrors;
2528    tx_per_flow_t m_tx_per_flow[MAX_FLOW_STATS + MAX_FLOW_STATS_PAYLOAD];
2529    tx_per_flow_t m_prev_tx_per_flow[MAX_FLOW_STATS + MAX_FLOW_STATS_PAYLOAD];
2530
2531    float     m_total_tx_bps;
2532    float     m_total_tx_pps;
2533
2534    float     m_total_rx_bps;
2535    float     m_total_rx_pps;
2536
2537    float     m_cpu_util;
2538};
2539
2540class CGlobalStats {
2541public:
2542    enum DumpFormat {
2543        dmpSTANDARD,
2544        dmpTABLE
2545    };
2546
2547    uint64_t  m_total_tx_pkts;
2548    uint64_t  m_total_rx_pkts;
2549    uint64_t  m_total_tx_bytes;
2550    uint64_t  m_total_rx_bytes;
2551
2552    uint64_t  m_total_alloc_error;
2553    uint64_t  m_total_queue_full;
2554    uint64_t  m_total_queue_drop;
2555
2556    uint64_t  m_total_clients;
2557    uint64_t  m_total_servers;
2558    uint64_t  m_active_sockets;
2559
2560    uint64_t  m_total_nat_time_out;
2561    uint64_t  m_total_nat_time_out_wait_ack;
2562    uint64_t  m_total_nat_no_fid  ;
2563    uint64_t  m_total_nat_active  ;
2564    uint64_t  m_total_nat_syn_wait;
2565    uint64_t  m_total_nat_open    ;
2566    uint64_t  m_total_nat_learn_error    ;
2567
2568    CPerTxthreadTemplateInfo m_template;
2569
2570    float     m_socket_util;
2571
2572    float m_platform_factor;
2573    float m_tx_bps;
2574    float m_rx_bps;
2575    float m_tx_pps;
2576    float m_rx_pps;
2577    float m_tx_cps;
2578    float m_tx_expected_cps;
2579    float m_tx_expected_pps;
2580    float m_tx_expected_bps;
2581    float m_rx_drop_bps;
2582    float m_active_flows;
2583    float m_open_flows;
2584    float m_cpu_util;
2585    float m_cpu_util_raw;
2586    float m_rx_cpu_util;
2587    float m_bw_per_core;
2588    uint8_t m_threads;
2589
2590    uint32_t      m_num_of_ports;
2591    CPerPortStats m_port[TREX_MAX_PORTS];
2592public:
2593    void Dump(FILE *fd,DumpFormat mode);
2594    void DumpAllPorts(FILE *fd);
2595    void dump_json(std::string & json, bool baseline);
2596private:
2597    std::string get_field(const char *name, float &f);
2598    std::string get_field(const char *name, uint64_t &f);
2599    std::string get_field_port(int port, const char *name, float &f);
2600    std::string get_field_port(int port, const char *name, uint64_t &f);
2601
2602};
2603
2604std::string CGlobalStats::get_field(const char *name, float &f){
2605    char buff[200];
2606    if(f <= -10.0 or f >= 10.0)
2607        snprintf(buff, sizeof(buff), "\"%s\":%.1f,",name,f);
2608    else
2609        snprintf(buff, sizeof(buff), "\"%s\":%.3e,",name,f);
2610    return (std::string(buff));
2611}
2612
2613std::string CGlobalStats::get_field(const char *name, uint64_t &f){
2614    char buff[200];
2615    snprintf(buff,  sizeof(buff), "\"%s\":%llu,", name, (unsigned long long)f);
2616    return (std::string(buff));
2617}
2618
2619std::string CGlobalStats::get_field_port(int port, const char *name, float &f){
2620    char buff[200];
2621    if(f <= -10.0 or f >= 10.0)
2622        snprintf(buff,  sizeof(buff), "\"%s-%d\":%.1f,", name, port, f);
2623    else
2624        snprintf(buff, sizeof(buff), "\"%s-%d\":%.3e,", name, port, f);
2625    return (std::string(buff));
2626}
2627
2628std::string CGlobalStats::get_field_port(int port, const char *name, uint64_t &f){
2629    char buff[200];
2630    snprintf(buff, sizeof(buff), "\"%s-%d\":%llu,",name, port, (unsigned long long)f);
2631    return (std::string(buff));
2632}
2633
2634
2635void CGlobalStats::dump_json(std::string & json, bool baseline){
2636    /* refactor this to JSON */
2637
2638    json="{\"name\":\"trex-global\",\"type\":0,";
2639    if (baseline) {
2640        json += "\"baseline\": true,";
2641    }
2642
2643    json +="\"data\":{";
2644
2645    char ts_buff[200];
2646    snprintf(ts_buff , sizeof(ts_buff), "\"ts\":{\"value\":%lu, \"freq\":%lu},", os_get_hr_tick_64(), os_get_hr_freq());
2647    json+= std::string(ts_buff);
2648
2649#define GET_FIELD(f) get_field(#f, f)
2650#define GET_FIELD_PORT(p,f) get_field_port(p, #f, lp->f)
2651
2652    json+=GET_FIELD(m_cpu_util);
2653    json+=GET_FIELD(m_cpu_util_raw);
2654    json+=GET_FIELD(m_bw_per_core);
2655    json+=GET_FIELD(m_rx_cpu_util);
2656    json+=GET_FIELD(m_platform_factor);
2657    json+=GET_FIELD(m_tx_bps);
2658    json+=GET_FIELD(m_rx_bps);
2659    json+=GET_FIELD(m_tx_pps);
2660    json+=GET_FIELD(m_rx_pps);
2661    json+=GET_FIELD(m_tx_cps);
2662    json+=GET_FIELD(m_tx_expected_cps);
2663    json+=GET_FIELD(m_tx_expected_pps);
2664    json+=GET_FIELD(m_tx_expected_bps);
2665    json+=GET_FIELD(m_total_alloc_error);
2666    json+=GET_FIELD(m_total_queue_full);
2667    json+=GET_FIELD(m_total_queue_drop);
2668    json+=GET_FIELD(m_rx_drop_bps);
2669    json+=GET_FIELD(m_active_flows);
2670    json+=GET_FIELD(m_open_flows);
2671
2672    json+=GET_FIELD(m_total_tx_pkts);
2673    json+=GET_FIELD(m_total_rx_pkts);
2674    json+=GET_FIELD(m_total_tx_bytes);
2675    json+=GET_FIELD(m_total_rx_bytes);
2676
2677    json+=GET_FIELD(m_total_clients);
2678    json+=GET_FIELD(m_total_servers);
2679    json+=GET_FIELD(m_active_sockets);
2680    json+=GET_FIELD(m_socket_util);
2681
2682    json+=GET_FIELD(m_total_nat_time_out);
2683    json+=GET_FIELD(m_total_nat_time_out_wait_ack);
2684    json+=GET_FIELD(m_total_nat_no_fid );
2685    json+=GET_FIELD(m_total_nat_active );
2686    json+=GET_FIELD(m_total_nat_syn_wait);
2687    json+=GET_FIELD(m_total_nat_open   );
2688    json+=GET_FIELD(m_total_nat_learn_error);
2689
2690    int i;
2691    for (i=0; i<(int)m_num_of_ports; i++) {
2692        CPerPortStats * lp=&m_port[i];
2693        json+=GET_FIELD_PORT(i,opackets) ;
2694        json+=GET_FIELD_PORT(i,obytes)   ;
2695        json+=GET_FIELD_PORT(i,ipackets) ;
2696        json+=GET_FIELD_PORT(i,ibytes)   ;
2697        json+=GET_FIELD_PORT(i,ierrors)  ;
2698        json+=GET_FIELD_PORT(i,oerrors)  ;
2699        json+=GET_FIELD_PORT(i,m_total_tx_bps);
2700        json+=GET_FIELD_PORT(i,m_total_tx_pps);
2701        json+=GET_FIELD_PORT(i,m_total_rx_bps);
2702        json+=GET_FIELD_PORT(i,m_total_rx_pps);
2703        json+=GET_FIELD_PORT(i,m_cpu_util);
2704    }
2705    json+=m_template.dump_as_json("template");
2706    json+="\"unknown\":0}}"  ;
2707}
2708
2709void CGlobalStats::DumpAllPorts(FILE *fd){
2710
2711    //fprintf (fd," Total-Tx-Pkts   : %s  \n",double_to_human_str((double)m_total_tx_pkts,"pkts",KBYE_1000).c_str());
2712    //fprintf (fd," Total-Rx-Pkts   : %s  \n",double_to_human_str((double)m_total_rx_pkts,"pkts",KBYE_1000).c_str());
2713
2714    //fprintf (fd," Total-Tx-Bytes  : %s  \n",double_to_human_str((double)m_total_tx_bytes,"bytes",KBYE_1000).c_str());
2715    //fprintf (fd," Total-Rx-Bytes  : %s  \n",double_to_human_str((double)m_total_rx_bytes,"bytes",KBYE_1000).c_str());
2716
2717
2718
2719    fprintf (fd," Cpu Utilization : %2.1f  %%  %2.1f Gb/core \n",m_cpu_util,m_bw_per_core);
2720    fprintf (fd," Platform_factor : %2.1f  \n",m_platform_factor);
2721    fprintf (fd," Total-Tx        : %s  ",double_to_human_str(m_tx_bps,"bps",KBYE_1000).c_str());
2722    if ( CGlobalInfo::is_learn_mode() ) {
2723        fprintf (fd," NAT time out    : %8llu", (unsigned long long)m_total_nat_time_out);
2724        if (CGlobalInfo::is_learn_mode(CParserOption::LEARN_MODE_TCP_ACK)) {
2725            fprintf (fd," (%llu in wait for syn+ack)\n", (unsigned long long)m_total_nat_time_out_wait_ack);
2726        } else {
2727            fprintf (fd, "\n");
2728        }
2729    }else{
2730        fprintf (fd,"\n");
2731    }
2732
2733
2734    fprintf (fd," Total-Rx        : %s  ",double_to_human_str(m_rx_bps,"bps",KBYE_1000).c_str());
2735    if ( CGlobalInfo::is_learn_mode() ) {
2736        fprintf (fd," NAT aged flow id: %8llu \n", (unsigned long long)m_total_nat_no_fid);
2737    }else{
2738        fprintf (fd,"\n");
2739    }
2740
2741    fprintf (fd," Total-PPS       : %s  ",double_to_human_str(m_tx_pps,"pps",KBYE_1000).c_str());
2742    if ( CGlobalInfo::is_learn_mode() ) {
2743        fprintf (fd," Total NAT active: %8llu", (unsigned long long)m_total_nat_active);
2744        if (CGlobalInfo::is_learn_mode(CParserOption::LEARN_MODE_TCP_ACK)) {
2745            fprintf (fd," (%llu waiting for syn)\n", (unsigned long long)m_total_nat_syn_wait);
2746        } else {
2747            fprintf (fd, "\n");
2748        }
2749    }else{
2750        fprintf (fd,"\n");
2751    }
2752
2753    fprintf (fd," Total-CPS       : %s  ",double_to_human_str(m_tx_cps,"cps",KBYE_1000).c_str());
2754    if ( CGlobalInfo::is_learn_mode() ) {
2755        fprintf (fd," Total NAT opened: %8llu \n", (unsigned long long)m_total_nat_open);
2756    }else{
2757        fprintf (fd,"\n");
2758    }
2759    fprintf (fd,"\n");
2760    fprintf (fd," Expected-PPS    : %s  ",double_to_human_str(m_tx_expected_pps,"pps",KBYE_1000).c_str());
2761    if ( CGlobalInfo::is_learn_verify_mode() ) {
2762        fprintf (fd," NAT learn errors: %8llu \n", (unsigned long long)m_total_nat_learn_error);
2763    }else{
2764        fprintf (fd,"\n");
2765    }
2766    fprintf (fd," Expected-CPS    : %s  \n",double_to_human_str(m_tx_expected_cps,"cps",KBYE_1000).c_str());
2767    fprintf (fd," Expected-BPS    : %s  \n",double_to_human_str(m_tx_expected_bps,"bps",KBYE_1000).c_str());
2768    fprintf (fd,"\n");
2769    fprintf (fd," Active-flows    : %8llu  Clients : %8llu   Socket-util : %3.4f %%    \n",
2770             (unsigned long long)m_active_flows,
2771             (unsigned long long)m_total_clients,
2772             m_socket_util);
2773    fprintf (fd," Open-flows      : %8llu  Servers : %8llu   Socket : %8llu Socket/Clients :  %.1f \n",
2774             (unsigned long long)m_open_flows,
2775             (unsigned long long)m_total_servers,
2776             (unsigned long long)m_active_sockets,
2777             (float)m_active_sockets/(float)m_total_clients);
2778
2779    if (m_total_alloc_error) {
2780        fprintf (fd," Total_alloc_err  : %llu         \n", (unsigned long long)m_total_alloc_error);
2781    }
2782    if ( m_total_queue_full ){
2783        fprintf (fd," Total_queue_full : %llu         \n", (unsigned long long)m_total_queue_full);
2784    }
2785    if (m_total_queue_drop) {
2786        fprintf (fd," Total_queue_drop : %llu         \n", (unsigned long long)m_total_queue_drop);
2787    }
2788
2789    //m_template.Dump(fd);
2790
2791    fprintf (fd," drop-rate       : %s   \n",double_to_human_str(m_rx_drop_bps,"bps",KBYE_1000).c_str() );
2792}
2793
2794
2795void CGlobalStats::Dump(FILE *fd,DumpFormat mode){
2796    int i;
2797    int port_to_show=m_num_of_ports;
2798    if (port_to_show>4) {
2799        port_to_show=4;
2800        fprintf (fd," per port - limited to 4   \n");
2801    }
2802
2803
2804    if ( mode== dmpSTANDARD ){
2805        fprintf (fd," --------------- \n");
2806        for (i=0; i<(int)port_to_show; i++) {
2807            CPerPortStats * lp=&m_port[i];
2808            fprintf(fd,"port : %d \n",(int)i);
2809            fprintf(fd,"------------\n");
2810#define GS_DP_A4(f) fprintf(fd," %-40s : %llu \n",#f, (unsigned long long)lp->f)
2811#define GS_DP_A(f) if (lp->f) fprintf(fd," %-40s : %llu \n",#f, (unsigned long long)lp->f)
2812            GS_DP_A4(opackets);
2813            GS_DP_A4(obytes);
2814            GS_DP_A4(ipackets);
2815            GS_DP_A4(ibytes);
2816            GS_DP_A(ierrors);
2817            GS_DP_A(oerrors);
2818            fprintf (fd," Tx : %s  \n",double_to_human_str((double)lp->m_total_tx_bps,"bps",KBYE_1000).c_str());
2819        }
2820    }else{
2821        fprintf(fd," %10s ","ports");
2822        for (i=0; i<(int)port_to_show; i++) {
2823            fprintf(fd,"| %15d ",i);
2824        }
2825        fprintf(fd,"\n");
2826        fprintf(fd," -----------------------------------------------------------------------------------------\n");
2827        std::string names[]={"opackets","obytes","ipackets","ibytes","ierrors","oerrors","Tx Bw"
2828        };
2829        for (i=0; i<7; i++) {
2830            fprintf(fd," %10s ",names[i].c_str());
2831            int j=0;
2832            for (j=0; j<port_to_show;j++) {
2833                CPerPortStats * lp=&m_port[j];
2834                uint64_t cnt;
2835                switch (i) {
2836                case 0:
2837                    cnt=lp->opackets;
2838                    fprintf(fd,"| %15lu ",cnt);
2839
2840                    break;
2841                case 1:
2842                    cnt=lp->obytes;
2843                    fprintf(fd,"| %15lu ",cnt);
2844
2845                    break;
2846                case 2:
2847                    cnt=lp->ipackets;
2848                    fprintf(fd,"| %15lu ",cnt);
2849
2850                    break;
2851                case 3:
2852                    cnt=lp->ibytes;
2853                    fprintf(fd,"| %15lu ",cnt);
2854
2855                    break;
2856                case 4:
2857                    cnt=lp->ierrors;
2858                    fprintf(fd,"| %15lu ",cnt);
2859
2860                    break;
2861                case 5:
2862                    cnt=lp->oerrors;
2863                    fprintf(fd,"| %15lu ",cnt);
2864
2865                    break;
2866                case 6:
2867                    fprintf(fd,"| %15s ",double_to_human_str((double)lp->m_total_tx_bps,"bps",KBYE_1000).c_str());
2868                    break;
2869                default:
2870                    cnt=0xffffff;
2871                }
2872            } /* ports */
2873            fprintf(fd, "\n");
2874        }/* fields*/
2875    }
2876
2877
2878}
2879
2880class CGlobalTRex  {
2881
2882public:
2883
2884    /**
2885     * different types of shutdown causes
2886     */
2887    typedef enum {
2888        SHUTDOWN_NONE,
2889        SHUTDOWN_TEST_ENDED,
2890        SHUTDOWN_CTRL_C,
2891        SHUTDOWN_SIGINT,
2892        SHUTDOWN_SIGTERM,
2893        SHUTDOWN_RPC_REQ
2894    } shutdown_rc_e;
2895
2896
2897    CGlobalTRex (){
2898        m_max_ports=4;
2899        m_max_cores=1;
2900        m_cores_to_dual_ports=0;
2901        m_max_queues_per_port=0;
2902        m_fl_was_init=false;
2903        m_expected_pps=0.0;
2904        m_expected_cps=0.0;
2905        m_expected_bps=0.0;
2906        m_trex_stateless = NULL;
2907        m_mark_for_shutdown = SHUTDOWN_NONE;
2908    }
2909
2910    bool Create();
2911    void Delete();
2912    int  ixgbe_prob_init();
2913    int  cores_prob_init();
2914    int  queues_prob_init();
2915    int  ixgbe_start();
2916    int  ixgbe_rx_queue_flush();
2917    void ixgbe_configure_mg();
2918    void rx_sl_configure();
2919    bool is_all_links_are_up(bool dump=false);
2920    void pre_test();
2921
2922    /**
2923     * mark for shutdown
2924     * on the next check - the control plane will
2925     * call shutdown()
2926     */
2927    void mark_for_shutdown(shutdown_rc_e rc) {
2928
2929        if (is_marked_for_shutdown()) {
2930            return;
2931        }
2932
2933        m_mark_for_shutdown = rc;
2934    }
2935
2936private:
2937    void register_signals();
2938
2939    /* try to stop all datapath cores and RX core */
2940    void try_stop_all_cores();
2941    /* send message to all dp cores */
2942    int  send_message_all_dp(TrexStatelessCpToDpMsgBase *msg);
2943    int  send_message_to_rx(TrexStatelessCpToRxMsgBase *msg);
2944    void check_for_dp_message_from_core(int thread_id);
2945
2946    bool is_marked_for_shutdown() const {
2947        return (m_mark_for_shutdown != SHUTDOWN_NONE);
2948    }
2949
2950    /**
2951     * shutdown sequence
2952     *
2953     */
2954    void shutdown();
2955
2956public:
2957    void check_for_dp_messages();
2958    int start_master_statefull();
2959    int start_master_stateless();
2960    int run_in_core(virtual_thread_id_t virt_core_id);
2961    int core_for_rx(){
2962        if ( (! get_is_rx_thread_enabled()) ) {
2963            return -1;
2964        }else{
2965            return m_max_cores - 1;
2966        }
2967    }
2968    int run_in_rx_core();
2969    int run_in_master();
2970
2971    void handle_fast_path();
2972    void handle_slow_path();
2973
2974    int stop_master();
2975    /* return the minimum number of dp cores needed to support the active ports
2976       this is for c==1 or  m_cores_mul==1
2977    */
2978    int get_base_num_cores(){
2979        return (m_max_ports>>1);
2980    }
2981
2982    int get_cores_tx(){
2983        /* 0 - master
2984           num_of_cores -
2985           last for latency */
2986        if ( (! get_is_rx_thread_enabled()) ) {
2987            return (m_max_cores - 1 );
2988        } else {
2989            return (m_max_cores - BP_MASTER_AND_LATENCY );
2990        }
2991    }
2992
2993private:
2994    bool is_all_cores_finished();
2995
2996public:
2997
2998    void publish_async_data(bool sync_now, bool baseline = false);
2999    void publish_async_barrier(uint32_t key);
3000    void publish_async_port_attr_changed(uint8_t port_id);
3001
3002    void dump_stats(FILE *fd,
3003                    CGlobalStats::DumpFormat format);
3004    void dump_template_info(std::string & json);
3005    bool sanity_check();
3006    void update_stats(void);
3007    tx_per_flow_t get_flow_tx_stats(uint8_t port, uint16_t hw_id);
3008    tx_per_flow_t clear_flow_tx_stats(uint8_t port, uint16_t index, bool is_lat);
3009    void get_stats(CGlobalStats & stats);
3010    float get_cpu_util_per_interface(uint8_t port_id);
3011    void dump_post_test_stats(FILE *fd);
3012    void dump_config(FILE *fd);
3013    void dump_links_status(FILE *fd);
3014
3015public:
3016    port_cfg_t  m_port_cfg;
3017    uint32_t    m_max_ports;    /* active number of ports supported options are  2,4,8,10,12  */
3018    uint32_t    m_max_cores;    /* current number of cores , include master and latency  ==> ( master)1+c*(m_max_ports>>1)+1( latency )  */
3019    uint32_t    m_cores_mul;    /* how cores multipler given  c=4 ==> m_cores_mul */
3020    uint32_t    m_max_queues_per_port;
3021    uint32_t    m_cores_to_dual_ports; /* number of ports that will handle dual ports */
3022    uint16_t    m_latency_tx_queue_id;
3023    // statistic
3024    CPPSMeasure  m_cps;
3025    float        m_expected_pps;
3026    float        m_expected_cps;
3027    float        m_expected_bps;//bps
3028    float        m_last_total_cps;
3029
3030    CPhyEthIF   m_ports[TREX_MAX_PORTS];
3031    CCoreEthIF          m_cores_vif_sf[BP_MAX_CORES]; /* counted from 1 , 2,3 core zero is reserved - stateful */
3032    CCoreEthIFStateless m_cores_vif_sl[BP_MAX_CORES]; /* counted from 1 , 2,3 core zero is reserved - stateless*/
3033    CCoreEthIF *        m_cores_vif[BP_MAX_CORES];
3034    CParserOption m_po ;
3035    CFlowGenList  m_fl;
3036    bool          m_fl_was_init;
3037    volatile uint8_t       m_signal[BP_MAX_CORES] __rte_cache_aligned ; // Signal to main core when DP thread finished
3038    volatile bool m_sl_rx_running; // Signal main core when RX thread finished
3039    CLatencyManager     m_mg; // statefull RX core
3040    CRxCoreStateless    m_rx_sl; // stateless RX core
3041    CTrexGlobalIoMode   m_io_modes;
3042    CTRexExtendedDriverBase * m_drv;
3043
3044private:
3045    CLatencyHWPort      m_latency_vports[TREX_MAX_PORTS];    /* read hardware driver */
3046    CLatencyVmPort      m_latency_vm_vports[TREX_MAX_PORTS]; /* vm driver */
3047    CLatencyPktInfo     m_latency_pkt;
3048    TrexPublisher       m_zmq_publisher;
3049    CGlobalStats        m_stats;
3050    uint32_t            m_stats_cnt;
3051    std::mutex          m_cp_lock;
3052
3053    TrexMonitor         m_monitor;
3054
3055    shutdown_rc_e       m_mark_for_shutdown;
3056
3057public:
3058    TrexStateless       *m_trex_stateless;
3059
3060};
3061
3062// Before starting, send gratuitous ARP on our addresses, and try to resolve dst MAC addresses.
3063void CGlobalTRex::pre_test() {
3064    CPretest pretest(m_max_ports);
3065    bool resolve_needed = false;
3066    uint8_t empty_mac[ETHER_ADDR_LEN] = {0,0,0,0,0,0};
3067
3068    for (int port_id = 0; port_id < m_max_ports; port_id++) {
3069        CPhyEthIF *pif = &m_ports[port_id];
3070        // Configure port to send all packets to software
3071        CTRexExtendedDriverDb::Ins()->get_drv()->set_rcv_all(pif, true);
3072        if (! memcmp( CGlobalInfo::m_options.m_mac_addr[port_id].u.m_mac.dest, empty_mac, ETHER_ADDR_LEN)) {
3073            resolve_needed = true;
3074        } else {
3075            resolve_needed = false;
3076        }
3077        if (! memcmp( CGlobalInfo::m_options.m_mac_addr[port_id].u.m_mac.src, empty_mac, ETHER_ADDR_LEN)) {
3078            rte_eth_macaddr_get(port_id,
3079                                (struct ether_addr *)&CGlobalInfo::m_options.m_mac_addr[port_id].u.m_mac.src);
3080            CGlobalInfo::m_options.m_ip_cfg[port_id].set_grat_arp_needed(true);
3081        }  else {
3082            // If we got src MAC from config file, do not send gratuitous ARP for it (for compatibility with old behaviour)
3083            CGlobalInfo::m_options.m_ip_cfg[port_id].set_grat_arp_needed(false);
3084        }
3085        pretest.set_port_params(port_id, CGlobalInfo::m_options.m_ip_cfg[port_id]
3086                                , CGlobalInfo::m_options.m_mac_addr[port_id].u.m_mac.src
3087                                , resolve_needed);
3088    }
3089
3090    pretest.send_grat_arp_all();
3091    bool ret;
3092    int count = 0;
3093    do {
3094        ret = pretest.resolve_all();
3095        count++;
3096    } while ((ret != true) && (count < 10));
3097
3098    if ( CGlobalInfo::m_options.preview.getVMode() > 0) {
3099        pretest.dump(stdout);
3100    }
3101    uint8_t mac[ETHER_ADDR_LEN];
3102    for (int port_id = 0; port_id < m_max_ports; port_id++) {
3103        if (! memcmp(CGlobalInfo::m_options.m_mac_addr[port_id].u.m_mac.dest, empty_mac, ETHER_ADDR_LEN)) {
3104            // we don't have dest MAC. Get it from what we resolved.
3105            uint32_t ip = CGlobalInfo::m_options.m_ip_cfg[port_id].get_def_gw();
3106            if (! pretest.get_mac(port_id, ip, mac)) {
3107                fprintf(stderr, "Failed resolving dest MAC for default gateway:%d.%d.%d.%d on port %d\n"
3108                        , (ip >> 24) & 0xFF, (ip >> 16) & 0xFF, (ip >> 8) & 0xFF, ip & 0xFF, port_id);
3109                exit(1);
3110            }
3111            memcpy(CGlobalInfo::m_options.m_mac_addr[port_id].u.m_mac.dest, mac, ETHER_ADDR_LEN);
3112
3113            // if port is connected in loopback, no need to send gratuitous ARP. It will only confuse our ingress counters.
3114            if (pretest.is_loopback(port_id))
3115                CGlobalInfo::m_options.m_ip_cfg[port_id].set_grat_arp_needed(false);
3116        }
3117
3118        // update statistics baseline, so we can ignore what happened in pre test phase
3119        CPhyEthIF *pif = &m_ports[port_id];
3120        CPreTestStats pre_stats = pretest.get_stats(port_id);
3121        pif->set_ignore_stats_base(pre_stats);
3122
3123        // Configure port back to normal mode. Only relevant packets handled by software.
3124        CTRexExtendedDriverDb::Ins()->get_drv()->set_rcv_all(pif, false);
3125
3126
3127        /* set resolved IPv4 */
3128        uint32_t dg = CGlobalInfo::m_options.m_ip_cfg[port_id].get_def_gw();
3129        const uint8_t *dst_mac = CGlobalInfo::m_options.m_mac_addr[port_id].u.m_mac.dest;
3130        if (dg) {
3131            m_ports[port_id].get_port_attr()->get_dest().set_dest_ipv4(dg, dst_mac);
3132        } else {
3133            m_ports[port_id].get_port_attr()->get_dest().set_dest_mac(dst_mac);
3134        }
3135
3136
3137    }
3138}
3139
3140/**
3141 * check for a single core
3142 *
3143 * @author imarom (19-Nov-15)
3144 *
3145 * @param thread_id
3146 */
3147void
3148CGlobalTRex::check_for_dp_message_from_core(int thread_id) {
3149
3150    CNodeRing *ring = CMsgIns::Ins()->getCpDp()->getRingDpToCp(thread_id);
3151
3152    /* fast path check */
3153    if ( likely ( ring->isEmpty() ) ) {
3154        return;
3155    }
3156
3157    while ( true ) {
3158        CGenNode * node = NULL;
3159        if (ring->Dequeue(node) != 0) {
3160            break;
3161        }
3162        assert(node);
3163
3164        TrexStatelessDpToCpMsgBase * msg = (TrexStatelessDpToCpMsgBase *)node;
3165        msg->handle();
3166        delete msg;
3167    }
3168
3169}
3170
3171/**
3172 * check for messages that arrived from DP to CP
3173 *
3174 */
3175void
3176CGlobalTRex::check_for_dp_messages() {
3177
3178    /* for all the cores - check for a new message */
3179    for (int i = 0; i < get_cores_tx(); i++) {
3180        check_for_dp_message_from_core(i);
3181    }
3182}
3183
3184bool CGlobalTRex::is_all_links_are_up(bool dump){
3185    bool all_link_are=true;
3186    int i;
3187    for (i=0; i<m_max_ports; i++) {
3188        CPhyEthIF * _if=&m_ports[i];
3189        _if->get_port_attr()->update_link_status();
3190        if ( dump ){
3191            _if->dump_stats(stdout);
3192        }
3193        if ( _if->get_port_attr()->is_link_up() == false){
3194            all_link_are=false;
3195            break;
3196        }
3197    }
3198    return (all_link_are);
3199}
3200
3201void CGlobalTRex::try_stop_all_cores(){
3202
3203    TrexStatelessDpQuit * dp_msg= new TrexStatelessDpQuit();
3204    TrexStatelessRxQuit * rx_msg= new TrexStatelessRxQuit();
3205    send_message_all_dp(dp_msg);
3206    if (get_is_stateless()) {
3207        send_message_to_rx(rx_msg);
3208    }
3209    delete dp_msg;
3210    // no need to delete rx_msg. Deleted by receiver
3211    bool all_core_finished = false;
3212    int i;
3213    for (i=0; i<20; i++) {
3214        if ( is_all_cores_finished() ){
3215            all_core_finished =true;
3216            break;
3217        }
3218        delay(100);
3219    }
3220    if ( all_core_finished ){
3221        m_zmq_publisher.publish_event(TrexPublisher::EVENT_SERVER_STOPPED);
3222        printf(" All cores stopped !! \n");
3223    }else{
3224        printf(" ERROR one of the DP core is stucked !\n");
3225    }
3226}
3227
3228
3229int  CGlobalTRex::send_message_all_dp(TrexStatelessCpToDpMsgBase *msg){
3230
3231    int max_threads=(int)CMsgIns::Ins()->getCpDp()->get_num_threads();
3232    int i;
3233
3234    for (i=0; i<max_threads; i++) {
3235        CNodeRing *ring = CMsgIns::Ins()->getCpDp()->getRingCpToDp((uint8_t)i);
3236        ring->Enqueue((CGenNode*)msg->clone());
3237    }
3238    return (0);
3239}
3240
3241int CGlobalTRex::send_message_to_rx(TrexStatelessCpToRxMsgBase *msg) {
3242    CNodeRing *ring = CMsgIns::Ins()->getCpRx()->getRingCpToDp(0);
3243    ring->Enqueue((CGenNode *) msg);
3244
3245    return (0);
3246}
3247
3248
3249int  CGlobalTRex::ixgbe_rx_queue_flush(){
3250    int i;
3251    for (i=0; i<m_max_ports; i++) {
3252        CPhyEthIF * _if=&m_ports[i];
3253        _if->flush_rx_queue();
3254    }
3255    return (0);
3256}
3257
3258
3259void CGlobalTRex::ixgbe_configure_mg(void) {
3260    int i;
3261    CLatencyManagerCfg mg_cfg;
3262    mg_cfg.m_max_ports = m_max_ports;
3263
3264    uint32_t latency_rate=CGlobalInfo::m_options.m_latency_rate;
3265
3266    if ( latency_rate ) {
3267        mg_cfg.m_cps = (double)latency_rate ;
3268    } else {
3269        // If RX core needed, we need something to make the scheduler running.
3270        // If nothing configured, send 1 CPS latency measurement packets.
3271        if (CGlobalInfo::m_options.m_arp_ref_per == 0) {
3272            mg_cfg.m_cps = 1.0;
3273        } else {
3274            mg_cfg.m_cps = 0;
3275        }
3276    }
3277
3278    if ( get_vm_one_queue_enable() ) {
3279        /* vm mode, indirect queues  */
3280        for (i=0; i<m_max_ports; i++) {
3281
3282            CMessagingManager * rx_dp=CMsgIns::Ins()->getRxDp();
3283
3284            uint8_t thread_id = (i>>1);
3285
3286            CNodeRing * r = rx_dp->getRingCpToDp(thread_id);
3287            m_latency_vm_vports[i].Create((uint8_t)i,r,&m_mg);
3288
3289            mg_cfg.m_ports[i] =&m_latency_vm_vports[i];
3290        }
3291
3292    }else{
3293        for (i=0; i<m_max_ports; i++) {
3294            CPhyEthIF * _if=&m_ports[i];
3295            _if->dump_stats(stdout);
3296            m_latency_vports[i].Create(_if,m_latency_tx_queue_id,1);
3297
3298            mg_cfg.m_ports[i] =&m_latency_vports[i];
3299        }
3300    }
3301
3302
3303    m_mg.Create(&mg_cfg);
3304    m_mg.set_mask(CGlobalInfo::m_options.m_latency_mask);
3305}
3306
3307// init m_rx_sl object for stateless rx core
3308void CGlobalTRex::rx_sl_configure(void) {
3309    CRxSlCfg rx_sl_cfg;
3310    int i;
3311
3312    rx_sl_cfg.m_max_ports = m_max_ports;
3313
3314    if ( get_vm_one_queue_enable() ) {
3315        /* vm mode, indirect queues  */
3316        for (i=0; i < m_max_ports; i++) {
3317            CMessagingManager * rx_dp = CMsgIns::Ins()->getRxDp();
3318            uint8_t thread_id = (i >> 1);
3319            CNodeRing * r = rx_dp->getRingCpToDp(thread_id);
3320            m_latency_vm_vports[i].Create((uint8_t)i, r, &m_mg);
3321            rx_sl_cfg.m_ports[i] = &m_latency_vm_vports[i];
3322        }
3323    } else {
3324        for (i = 0; i < m_max_ports; i++) {
3325            CPhyEthIF * _if = &m_ports[i];
3326            m_latency_vports[i].Create(_if, m_latency_tx_queue_id, 1);
3327            rx_sl_cfg.m_ports[i] = &m_latency_vports[i];
3328        }
3329    }
3330
3331    m_rx_sl.create(rx_sl_cfg);
3332}
3333
3334int  CGlobalTRex::ixgbe_start(void){
3335    int i;
3336    for (i=0; i<m_max_ports; i++) {
3337
3338        CPhyEthIF * _if=&m_ports[i];
3339        _if->Create((uint8_t)i);
3340        /* last TX queue if for latency check */
3341        if ( get_vm_one_queue_enable() ) {
3342            /* one tx one rx */
3343
3344            /* VMXNET3 does claim to support 16K but somehow does not work */
3345            /* reduce to 2000 */
3346            m_port_cfg.m_port_conf.rxmode.max_rx_pkt_len = 2000;
3347
3348            _if->configure(1,
3349                           1,
3350                           &m_port_cfg.m_port_conf);
3351
3352            /* will not be used */
3353            m_latency_tx_queue_id= m_cores_to_dual_ports;
3354
3355            socket_id_t socket_id = CGlobalInfo::m_socket.port_to_socket((port_id_t)i);
3356            assert(CGlobalInfo::m_mem_pool[socket_id].m_mbuf_pool_2048);
3357
3358
3359
3360            _if->set_rx_queue(0);
3361            _if->rx_queue_setup(0,
3362                                RTE_TEST_RX_DESC_VM_DEFAULT,
3363                                socket_id,
3364                                &m_port_cfg.m_rx_conf,
3365                                CGlobalInfo::m_mem_pool[socket_id].m_mbuf_pool_2048);
3366
3367            int qid;
3368            for ( qid=0; qid<(m_max_queues_per_port); qid++) {
3369                _if->tx_queue_setup((uint16_t)qid,
3370                                    RTE_TEST_TX_DESC_VM_DEFAULT ,
3371                                    socket_id,
3372                                    &m_port_cfg.m_tx_conf);
3373
3374            }
3375
3376        }else{
3377            _if->configure(2,
3378                           m_cores_to_dual_ports+1,
3379                           &m_port_cfg.m_port_conf);
3380
3381            /* the latency queue for latency measurement packets */
3382            m_latency_tx_queue_id= m_cores_to_dual_ports;
3383
3384            socket_id_t socket_id = CGlobalInfo::m_socket.port_to_socket((port_id_t)i);
3385            assert(CGlobalInfo::m_mem_pool[socket_id].m_mbuf_pool_2048);
3386
3387
3388            /* drop queue */
3389            _if->rx_queue_setup(0,
3390                                RTE_TEST_RX_DESC_DEFAULT,
3391                                socket_id,
3392                                &m_port_cfg.m_rx_conf,
3393                                CGlobalInfo::m_mem_pool[socket_id].m_mbuf_pool_2048);
3394
3395
3396            /* set the filter queue */
3397            _if->set_rx_queue(1);
3398            /* latency measurement ring is 1 */
3399            _if->rx_queue_setup(1,
3400                                RTE_TEST_RX_LATENCY_DESC_DEFAULT,
3401                                socket_id,
3402                                &m_port_cfg.m_rx_conf,
3403                                CGlobalInfo::m_mem_pool[socket_id].m_mbuf_pool_9k);
3404
3405            int qid;
3406            for ( qid=0; qid<(m_max_queues_per_port+1); qid++) {
3407                _if->tx_queue_setup((uint16_t)qid,
3408                                    RTE_TEST_TX_DESC_DEFAULT ,
3409                                    socket_id,
3410                                    &m_port_cfg.m_tx_conf);
3411
3412            }
3413
3414        }
3415
3416
3417        _if->stats_clear();
3418
3419        _if->start();
3420        _if->configure_rx_duplicate_rules();
3421
3422        if ( ! get_vm_one_queue_enable()  && ! CGlobalInfo::m_options.preview.get_is_disable_flow_control_setting()
3423             && _if->get_port_attr()->is_fc_change_supported()) {
3424            _if->disable_flow_control();
3425        }
3426
3427        _if->get_port_attr()->add_mac((char *)CGlobalInfo::m_options.get_src_mac_addr(i));
3428
3429        fflush(stdout);
3430    }
3431
3432    if ( !is_all_links_are_up()  ){
3433        /* wait for ports to be stable */
3434        get_ex_drv()->wait_for_stable_link();
3435
3436        if ( !is_all_links_are_up(true) /*&& !get_is_stateless()*/ ){ // disable start with link down for now
3437            dump_links_status(stdout);
3438            rte_exit(EXIT_FAILURE, " "
3439                     " one of the link is down \n");
3440        }
3441    } else {
3442        get_ex_drv()->wait_after_link_up();
3443    }
3444
3445    dump_links_status(stdout);
3446
3447    ixgbe_rx_queue_flush();
3448
3449    if (! get_is_stateless()) {
3450        ixgbe_configure_mg();
3451    } else {
3452        rx_sl_configure();
3453    }
3454
3455
3456    /* core 0 - control
3457       core 1 - port 0-0,1-0,
3458       core 2 - port 2-0,3-0,
3459       core 3 - port 0-1,1-1,
3460       core 4 - port 2-1,3-1,
3461
3462    */
3463    int port_offset=0;
3464    uint8_t lat_q_id;
3465
3466    if ( get_vm_one_queue_enable() ) {
3467        lat_q_id = 0;
3468    } else {
3469        lat_q_id = get_cores_tx() / get_base_num_cores();
3470    }
3471    for (i=0; i<get_cores_tx(); i++) {
3472        int j=(i+1);
3473        int queue_id=((j-1)/get_base_num_cores() );   /* for the first min core queue 0 , then queue 1 etc */
3474        if ( get_is_stateless() ){
3475            m_cores_vif[j]=&m_cores_vif_sl[j];
3476        }else{
3477            m_cores_vif[j]=&m_cores_vif_sf[j];
3478        }
3479        m_cores_vif[j]->Create(j,
3480                               queue_id,
3481                               &m_ports[port_offset], /* 0,2*/
3482                               queue_id,
3483                               &m_ports[port_offset+1], /*1,3*/
3484                               lat_q_id);
3485        port_offset+=2;
3486        if (port_offset == m_max_ports) {
3487            port_offset = 0;
3488            // We want to allow sending latency packets only from first core handling a port
3489            lat_q_id = CCoreEthIF::INVALID_Q_ID;
3490        }
3491    }
3492
3493    fprintf(stdout," -------------------------------\n");
3494    CCoreEthIF::DumpIfCfgHeader(stdout);
3495    for (i=0; i<get_cores_tx(); i++) {
3496        m_cores_vif[i+1]->DumpIfCfg(stdout);
3497    }
3498    fprintf(stdout," -------------------------------\n");
3499
3500    return (0);
3501}
3502
3503static void trex_termination_handler(int signum);
3504
3505void CGlobalTRex::register_signals() {
3506    struct sigaction action;
3507
3508    /* handler */
3509    action.sa_handler = trex_termination_handler;
3510
3511    /* blocked signals during handling */
3512    sigemptyset(&action.sa_mask);
3513    sigaddset(&action.sa_mask, SIGINT);
3514    sigaddset(&action.sa_mask, SIGTERM);
3515
3516    /* no flags */
3517    action.sa_flags = 0;
3518
3519    /* register */
3520    sigaction(SIGINT,  &action, NULL);
3521    sigaction(SIGTERM, &action, NULL);
3522}
3523
3524bool CGlobalTRex::Create(){
3525    CFlowsYamlInfo     pre_yaml_info;
3526
3527    register_signals();
3528
3529    m_stats_cnt =0;
3530    if (!get_is_stateless()) {
3531        pre_yaml_info.load_from_yaml_file(CGlobalInfo::m_options.cfg_file);
3532    }
3533
3534    if ( !m_zmq_publisher.Create( CGlobalInfo::m_options.m_zmq_port,
3535                                  !CGlobalInfo::m_options.preview.get_zmq_publish_enable() ) ){
3536        return (false);
3537    }
3538
3539    if ( pre_yaml_info.m_vlan_info.m_enable ){
3540        CGlobalInfo::m_options.preview.set_vlan_mode_enable(true);
3541    }
3542    /* End update pre flags */
3543
3544    ixgbe_prob_init();
3545    cores_prob_init();
3546    queues_prob_init();
3547
3548    /* allocate rings */
3549    assert( CMsgIns::Ins()->Create(get_cores_tx()) );
3550
3551    if ( sizeof(CGenNodeNatInfo) != sizeof(CGenNode)  ) {
3552        printf("ERROR sizeof(CGenNodeNatInfo) %lu != sizeof(CGenNode) %lu must be the same size \n",sizeof(CGenNodeNatInfo),sizeof(CGenNode));
3553        assert(0);
3554    }
3555
3556    if ( sizeof(CGenNodeLatencyPktInfo) != sizeof(CGenNode)  ) {
3557        printf("ERROR sizeof(CGenNodeLatencyPktInfo) %lu != sizeof(CGenNode) %lu must be the same size \n",sizeof(CGenNodeLatencyPktInfo),sizeof(CGenNode));
3558        assert(0);
3559    }
3560
3561    /* allocate the memory */
3562
3563    uint32_t rx_mbuf = 0 ;
3564
3565    if ( get_vm_one_queue_enable() ) {
3566        rx_mbuf = (m_max_ports * RTE_TEST_RX_DESC_VM_DEFAULT);
3567    }else{
3568        rx_mbuf = (m_max_ports * (RTE_TEST_RX_LATENCY_DESC_DEFAULT+RTE_TEST_RX_DESC_DEFAULT));
3569    }
3570
3571    CGlobalInfo::init_pools(rx_mbuf);
3572    ixgbe_start();
3573    dump_config(stdout);
3574
3575    /* start stateless */
3576    if (get_is_stateless()) {
3577
3578        TrexStatelessCfg cfg;
3579
3580        TrexRpcServerConfig rpc_req_resp_cfg(TrexRpcServerConfig::RPC_PROT_TCP,
3581                                             global_platform_cfg_info.m_zmq_rpc_port,
3582                                             &m_cp_lock);
3583
3584        cfg.m_port_count         = CGlobalInfo::m_options.m_expected_portd;
3585        cfg.m_rpc_req_resp_cfg   = &rpc_req_resp_cfg;
3586        cfg.m_rpc_server_verbose = false;
3587        cfg.m_platform_api       = new TrexDpdkPlatformApi();
3588        cfg.m_publisher          = &m_zmq_publisher;
3589
3590        m_trex_stateless = new TrexStateless(cfg);
3591    }
3592
3593    return (true);
3594
3595}
3596void CGlobalTRex::Delete(){
3597    m_zmq_publisher.Delete();
3598}
3599
3600
3601
3602int  CGlobalTRex::ixgbe_prob_init(void){
3603
3604    m_max_ports  = rte_eth_dev_count();
3605    if (m_max_ports == 0)
3606        rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n");
3607
3608    printf(" Number of ports found: %d \n",m_max_ports);
3609
3610    if ( m_max_ports %2 !=0 ) {
3611        rte_exit(EXIT_FAILURE, " Number of ports %d should be even, mask the one port in the configuration file  \n, ",
3612                 m_max_ports);
3613    }
3614
3615    if ( CGlobalInfo::m_options.get_expected_ports() > TREX_MAX_PORTS ) {
3616        rte_exit(EXIT_FAILURE, " Maximum ports supported are %d, use the configuration file to set the expected number of ports   \n",TREX_MAX_PORTS);
3617    }
3618
3619    if ( CGlobalInfo::m_options.get_expected_ports() > m_max_ports ){
3620        rte_exit(EXIT_FAILURE, " There are %d ports you expected more %d,use the configuration file to set the expected number of ports   \n",
3621                 m_max_ports,
3622                 CGlobalInfo::m_options.get_expected_ports());
3623    }
3624    if (CGlobalInfo::m_options.get_expected_ports() < m_max_ports ) {
3625        /* limit the number of ports */
3626        m_max_ports=CGlobalInfo::m_options.get_expected_ports();
3627    }
3628    assert(m_max_ports <= TREX_MAX_PORTS);
3629
3630    struct rte_eth_dev_info dev_info;
3631    rte_eth_dev_info_get((uint8_t) 0,&dev_info);
3632
3633    if ( CGlobalInfo::m_options.preview.getVMode() > 0){
3634        printf("\n\n");
3635        printf("if_index : %d \n",dev_info.if_index);
3636        printf("driver name : %s \n",dev_info.driver_name);
3637        printf("min_rx_bufsize : %d \n",dev_info.min_rx_bufsize);
3638        printf("max_rx_pktlen  : %d \n",dev_info.max_rx_pktlen);
3639        printf("max_rx_queues  : %d \n",dev_info.max_rx_queues);
3640        printf("max_tx_queues  : %d \n",dev_info.max_tx_queues);
3641        printf("max_mac_addrs  : %d \n",dev_info.max_mac_addrs);
3642
3643        printf("rx_offload_capa : %x \n",dev_info.rx_offload_capa);
3644        printf("tx_offload_capa : %x \n",dev_info.tx_offload_capa);
3645    }
3646
3647
3648
3649    if ( !CTRexExtendedDriverDb::Ins()->is_driver_exists(dev_info.driver_name) ){
3650        printf(" Error: driver %s is not supported. Please consult the documentation for a list of supported drivers\n"
3651               ,dev_info.driver_name);
3652        exit(1);
3653    }
3654
3655    int i;
3656    struct rte_eth_dev_info dev_info1;
3657
3658    for (i=1; i<m_max_ports; i++) {
3659        rte_eth_dev_info_get((uint8_t) i,&dev_info1);
3660        if ( strcmp(dev_info1.driver_name,dev_info.driver_name)!=0) {
3661            printf(" ERROR all device should have the same type  %s != %s \n",dev_info1.driver_name,dev_info.driver_name);
3662            exit(1);
3663        }
3664    }
3665
3666    CTRexExtendedDriverDb::Ins()->set_driver_name(dev_info.driver_name);
3667    m_drv = CTRexExtendedDriverDb::Ins()->get_drv();
3668
3669    // check if firmware version is new enough
3670    for (i = 0; i < m_max_ports; i++) {
3671        if (m_drv->verify_fw_ver(i) < 0) {
3672            // error message printed by verify_fw_ver
3673            exit(1);
3674        }
3675    }
3676
3677    m_port_cfg.update_var();
3678
3679    if ( get_is_rx_filter_enable() ){
3680        m_port_cfg.update_global_config_fdir();
3681    }
3682
3683    if ( get_vm_one_queue_enable() ) {
3684        /* verify that we have only one thread/core per dual- interface */
3685        if ( CGlobalInfo::m_options.preview.getCores()>1 ) {
3686            printf(" ERROR the number of cores should be 1 when the driver support only one tx queue and one rx queue \n");
3687            exit(1);
3688        }
3689    }
3690    return (0);
3691}
3692
3693int  CGlobalTRex::cores_prob_init(){
3694    m_max_cores = rte_lcore_count();
3695    assert(m_max_cores>0);
3696    return (0);
3697}
3698
3699int  CGlobalTRex::queues_prob_init(){
3700
3701    if (m_max_cores < 2) {
3702        rte_exit(EXIT_FAILURE, "number of cores should be at least 2 \n");
3703    }
3704
3705    assert((m_max_ports>>1) <= get_cores_tx() );
3706
3707    m_cores_mul = CGlobalInfo::m_options.preview.getCores();
3708
3709    m_cores_to_dual_ports  = m_cores_mul;
3710
3711    /* core 0 - control
3712       -core 1 - port 0/1
3713       -core 2 - port 2/3
3714       -core 3 - port 0/1
3715       -core 4 - port 2/3
3716
3717       m_cores_to_dual_ports = 2;
3718    */
3719
3720    /* number of queue - 1 per core for dual ports*/
3721    m_max_queues_per_port  = m_cores_to_dual_ports;
3722
3723    if (m_max_queues_per_port > BP_MAX_TX_QUEUE) {
3724        rte_exit(EXIT_FAILURE,
3725                 "maximum number of queue should be maximum %d  \n",BP_MAX_TX_QUEUE);
3726    }
3727
3728    assert(m_max_queues_per_port>0);
3729    return (0);
3730}
3731
3732
3733void CGlobalTRex::dump_config(FILE *fd){
3734    fprintf(fd," number of ports         : %u \n",m_max_ports);
3735    fprintf(fd," max cores for 2 ports   : %u \n",m_cores_to_dual_ports);
3736    fprintf(fd," max queue per port      : %u \n",m_max_queues_per_port);
3737}
3738
3739
3740void CGlobalTRex::dump_links_status(FILE *fd){
3741    for (int i=0; i<m_max_ports; i++) {
3742        m_ports[i].get_port_attr()->update_link_status_nowait();
3743        m_ports[i].get_port_attr()->dump_link(fd);
3744    }
3745}
3746
3747
3748void CGlobalTRex::dump_post_test_stats(FILE *fd){
3749    uint64_t pkt_out=0;
3750    uint64_t pkt_out_bytes=0;
3751    uint64_t pkt_in_bytes=0;
3752    uint64_t pkt_in=0;
3753    uint64_t sw_pkt_out=0;
3754    uint64_t sw_pkt_out_err=0;
3755    uint64_t sw_pkt_out_bytes=0;
3756    uint64_t tx_arp = 0;
3757    uint64_t rx_arp = 0;
3758
3759    int i;
3760    for (i=0; i<get_cores_tx(); i++) {
3761        CCoreEthIF * erf_vif = m_cores_vif[i+1];
3762        CVirtualIFPerSideStats stats;
3763        erf_vif->GetCoreCounters(&stats);
3764        sw_pkt_out     += stats.m_tx_pkt;
3765        sw_pkt_out_err += stats.m_tx_drop +stats.m_tx_queue_full +stats.m_tx_alloc_error ;
3766        sw_pkt_out_bytes +=stats.m_tx_bytes;
3767    }
3768
3769
3770    for (i=0; i<m_max_ports; i++) {
3771        CPhyEthIF * _if=&m_ports[i];
3772        pkt_in  +=_if->get_stats().ipackets;
3773        pkt_in_bytes +=_if->get_stats().ibytes;
3774        pkt_out +=_if->get_stats().opackets;
3775        pkt_out_bytes +=_if->get_stats().obytes;
3776        tx_arp += _if->get_ignore_stats().get_tx_arp();
3777        rx_arp += _if->get_ignore_stats().get_rx_arp();
3778    }
3779    if ( CGlobalInfo::m_options.is_latency_enabled() ){
3780        sw_pkt_out += m_mg.get_total_pkt();
3781        sw_pkt_out_bytes +=m_mg.get_total_bytes();
3782    }
3783
3784
3785    fprintf (fd," summary stats \n");
3786    fprintf (fd," -------------- \n");
3787
3788    if (pkt_in > pkt_out)
3789        {
3790            fprintf (fd, " Total-pkt-drop       : 0 pkts \n");
3791            if (pkt_in > pkt_out * 1.01)
3792                fprintf (fd, " Warning : number of rx packets exceeds 101%% of tx packets!\n");
3793        }
3794    else
3795        fprintf (fd, " Total-pkt-drop       : %llu pkts \n", (unsigned long long) (pkt_out - pkt_in));
3796    fprintf (fd," Total-tx-bytes       : %llu bytes \n", (unsigned long long)pkt_out_bytes);
3797    fprintf (fd," Total-tx-sw-bytes    : %llu bytes \n", (unsigned long long)sw_pkt_out_bytes);
3798    fprintf (fd," Total-rx-bytes       : %llu byte \n", (unsigned long long)pkt_in_bytes);
3799
3800    fprintf (fd," \n");
3801
3802    fprintf (fd," Total-tx-pkt         : %llu pkts \n", (unsigned long long)pkt_out);
3803    fprintf (fd," Total-rx-pkt         : %llu pkts \n", (unsigned long long)pkt_in);
3804    fprintf (fd," Total-sw-tx-pkt      : %llu pkts \n", (unsigned long long)sw_pkt_out);
3805    fprintf (fd," Total-sw-err         : %llu pkts \n", (unsigned long long)sw_pkt_out_err);
3806    fprintf (fd," Total ARP sent       : %llu pkts \n", (unsigned long long)tx_arp);
3807    fprintf (fd," Total ARP received   : %llu pkts \n", (unsigned long long)rx_arp);
3808
3809
3810    if ( CGlobalInfo::m_options.is_latency_enabled() ){
3811        fprintf (fd," maximum-latency   : %.0f usec \n",m_mg.get_max_latency());
3812        fprintf (fd," average-latency   : %.0f usec \n",m_mg.get_avr_latency());
3813        fprintf (fd," latency-any-error : %s  \n",m_mg.is_any_error()?"ERROR":"OK");
3814    }
3815
3816
3817}
3818
3819
3820void CGlobalTRex::update_stats(){
3821
3822    int i;
3823    for (i=0; i<m_max_ports; i++) {
3824        CPhyEthIF * _if=&m_ports[i];
3825        _if->update_counters();
3826    }
3827    uint64_t total_open_flows=0;
3828
3829
3830    CFlowGenListPerThread   * lpt;
3831    for (i=0; i<get_cores_tx(); i++) {
3832        lpt = m_fl.m_threads_info[i];
3833        total_open_flows +=   lpt->m_stats.m_total_open_flows ;
3834    }
3835    m_last_total_cps = m_cps.add(total_open_flows);
3836
3837}
3838
3839tx_per_flow_t CGlobalTRex::get_flow_tx_stats(uint8_t port, uint16_t index) {
3840    return m_stats.m_port[port].m_tx_per_flow[index] - m_stats.m_port[port].m_prev_tx_per_flow[index];
3841}
3842
3843// read stats. Return read value, and clear.
3844tx_per_flow_t CGlobalTRex::clear_flow_tx_stats(uint8_t port, uint16_t index, bool is_lat) {
3845    uint8_t port0;
3846    CFlowGenListPerThread * lpt;
3847    tx_per_flow_t ret;
3848
3849    m_stats.m_port[port].m_tx_per_flow[index].clear();
3850
3851    for (int i=0; i < get_cores_tx(); i++) {
3852        lpt = m_fl.m_threads_info[i];
3853        port0 = lpt->getDualPortId() * 2;
3854        if ((port == port0) || (port == port0 + 1)) {
3855            m_stats.m_port[port].m_tx_per_flow[index] +=
3856                lpt->m_node_gen.m_v_if->m_stats[port - port0].m_tx_per_flow[index];
3857            if (is_lat)
3858                lpt->m_node_gen.m_v_if->m_stats[port - port0].m_lat_data[index - MAX_FLOW_STATS].reset();
3859        }
3860    }
3861
3862    ret = m_stats.m_port[port].m_tx_per_flow[index] - m_stats.m_port[port].m_prev_tx_per_flow[index];
3863
3864    // Since we return diff from prev, following "clears" the stats.
3865    m_stats.m_port[port].m_prev_tx_per_flow[index] = m_stats.m_port[port].m_tx_per_flow[index];
3866
3867    return ret;
3868}
3869
3870void CGlobalTRex::get_stats(CGlobalStats & stats){
3871
3872    int i;
3873    float total_tx=0.0;
3874    float total_rx=0.0;
3875    float total_tx_pps=0.0;
3876    float total_rx_pps=0.0;
3877
3878    stats.m_total_tx_pkts  = 0;
3879    stats.m_total_rx_pkts  = 0;
3880    stats.m_total_tx_bytes = 0;
3881    stats.m_total_rx_bytes = 0;
3882    stats.m_total_alloc_error=0;
3883    stats.m_total_queue_full=0;
3884    stats.m_total_queue_drop=0;
3885
3886
3887    stats.m_num_of_ports = m_max_ports;
3888    stats.m_cpu_util = m_fl.GetCpuUtil();
3889    stats.m_cpu_util_raw = m_fl.GetCpuUtilRaw();
3890    if (get_is_stateless()) {
3891        stats.m_rx_cpu_util = m_rx_sl.get_cpu_util();
3892    }
3893    stats.m_threads      = m_fl.m_threads_info.size();
3894
3895    for (i=0; i<m_max_ports; i++) {
3896        CPhyEthIF * _if=&m_ports[i];
3897        CPerPortStats * stp=&stats.m_port[i];
3898
3899        CPhyEthIFStats & st =_if->get_stats();
3900
3901        stp->opackets = st.opackets;
3902        stp->obytes   = st.obytes;
3903        stp->ipackets = st.ipackets;
3904        stp->ibytes   = st.ibytes;
3905        stp->ierrors  = st.ierrors;
3906        stp->oerrors  = st.oerrors;
3907        stp->m_total_tx_bps = _if->get_last_tx_rate()*_1Mb_DOUBLE;
3908        stp->m_total_tx_pps = _if->get_last_tx_pps_rate();
3909        stp->m_total_rx_bps = _if->get_last_rx_rate()*_1Mb_DOUBLE;
3910        stp->m_total_rx_pps = _if->get_last_rx_pps_rate();
3911
3912        stats.m_total_tx_pkts  += st.opackets;
3913        stats.m_total_rx_pkts  += st.ipackets;
3914        stats.m_total_tx_bytes += st.obytes;
3915        stats.m_total_rx_bytes += st.ibytes;
3916
3917        total_tx +=_if->get_last_tx_rate();
3918        total_rx +=_if->get_last_rx_rate();
3919        total_tx_pps +=_if->get_last_tx_pps_rate();
3920        total_rx_pps +=_if->get_last_rx_pps_rate();
3921        // IP ID rules
3922        for (uint16_t flow = 0; flow <= max_stat_hw_id_seen; flow++) {
3923            stats.m_port[i].m_tx_per_flow[flow].clear();
3924        }
3925        // payload rules
3926        for (uint16_t flow = MAX_FLOW_STATS; flow <= MAX_FLOW_STATS + max_stat_hw_id_seen_payload; flow++) {
3927            stats.m_port[i].m_tx_per_flow[flow].clear();
3928        }
3929
3930        stp->m_cpu_util = get_cpu_util_per_interface(i);
3931
3932    }
3933
3934    uint64_t total_open_flows=0;
3935    uint64_t total_active_flows=0;
3936
3937    uint64_t total_clients=0;
3938    uint64_t total_servers=0;
3939    uint64_t active_sockets=0;
3940    uint64_t total_sockets=0;
3941
3942
3943    uint64_t total_nat_time_out =0;
3944    uint64_t total_nat_time_out_wait_ack =0;
3945    uint64_t total_nat_no_fid   =0;
3946    uint64_t total_nat_active   =0;
3947    uint64_t total_nat_syn_wait = 0;
3948    uint64_t total_nat_open     =0;
3949    uint64_t total_nat_learn_error=0;
3950
3951    CFlowGenListPerThread   * lpt;
3952    stats.m_template.Clear();
3953    for (i=0; i<get_cores_tx(); i++) {
3954        lpt = m_fl.m_threads_info[i];
3955        total_open_flows +=   lpt->m_stats.m_total_open_flows ;
3956        total_active_flows += (lpt->m_stats.m_total_open_flows-lpt->m_stats.m_total_close_flows) ;
3957
3958        stats.m_total_alloc_error += lpt->m_node_gen.m_v_if->m_stats[0].m_tx_alloc_error+
3959            lpt->m_node_gen.m_v_if->m_stats[1].m_tx_alloc_error;
3960        stats.m_total_queue_full +=lpt->m_node_gen.m_v_if->m_stats[0].m_tx_queue_full+
3961            lpt->m_node_gen.m_v_if->m_stats[1].m_tx_queue_full;
3962
3963        stats.m_total_queue_drop +=lpt->m_node_gen.m_v_if->m_stats[0].m_tx_drop+
3964            lpt->m_node_gen.m_v_if->m_stats[1].m_tx_drop;
3965
3966        stats.m_template.Add(&lpt->m_node_gen.m_v_if->m_stats[0].m_template);
3967        stats.m_template.Add(&lpt->m_node_gen.m_v_if->m_stats[1].m_template);
3968
3969
3970        total_clients   += lpt->m_smart_gen.getTotalClients();
3971        total_servers   += lpt->m_smart_gen.getTotalServers();
3972        active_sockets  += lpt->m_smart_gen.ActiveSockets();
3973        total_sockets   += lpt->m_smart_gen.MaxSockets();
3974
3975        total_nat_time_out +=lpt->m_stats.m_nat_flow_timeout;
3976        total_nat_time_out_wait_ack += lpt->m_stats.m_nat_flow_timeout_wait_ack;
3977        total_nat_no_fid   +=lpt->m_stats.m_nat_lookup_no_flow_id ;
3978        total_nat_active   +=lpt->m_stats.m_nat_lookup_add_flow_id - lpt->m_stats.m_nat_lookup_remove_flow_id;
3979        total_nat_syn_wait += lpt->m_stats.m_nat_lookup_add_flow_id - lpt->m_stats.m_nat_lookup_wait_ack_state;
3980        total_nat_open     +=lpt->m_stats.m_nat_lookup_add_flow_id;
3981        total_nat_learn_error   +=lpt->m_stats.m_nat_flow_learn_error;
3982        uint8_t port0 = lpt->getDualPortId() *2;
3983        // IP ID rules
3984        for (uint16_t flow = 0; flow <= max_stat_hw_id_seen; flow++) {
3985            stats.m_port[port0].m_tx_per_flow[flow] +=
3986                lpt->m_node_gen.m_v_if->m_stats[0].m_tx_per_flow[flow];
3987            stats.m_port[port0 + 1].m_tx_per_flow[flow] +=
3988                lpt->m_node_gen.m_v_if->m_stats[1].m_tx_per_flow[flow];
3989        }
3990        // payload rules
3991        for (uint16_t flow = MAX_FLOW_STATS; flow <= MAX_FLOW_STATS + max_stat_hw_id_seen_payload; flow++) {
3992            stats.m_port[port0].m_tx_per_flow[flow] +=
3993                lpt->m_node_gen.m_v_if->m_stats[0].m_tx_per_flow[flow];
3994            stats.m_port[port0 + 1].m_tx_per_flow[flow] +=
3995                lpt->m_node_gen.m_v_if->m_stats[1].m_tx_per_flow[flow];
3996        }
3997
3998    }
3999
4000    stats.m_total_nat_time_out = total_nat_time_out;
4001    stats.m_total_nat_time_out_wait_ack = total_nat_time_out_wait_ack;
4002    stats.m_total_nat_no_fid   = total_nat_no_fid;
4003    stats.m_total_nat_active   = total_nat_active;
4004    stats.m_total_nat_syn_wait = total_nat_syn_wait;
4005    stats.m_total_nat_open     = total_nat_open;
4006    stats.m_total_nat_learn_error     = total_nat_learn_error;
4007
4008    stats.m_total_clients = total_clients;
4009    stats.m_total_servers = total_servers;
4010    stats.m_active_sockets = active_sockets;
4011
4012    if (total_sockets != 0) {
4013        stats.m_socket_util =100.0*(double)active_sockets/(double)total_sockets;
4014    } else {
4015        stats.m_socket_util = 0;
4016    }
4017
4018
4019
4020    float drop_rate=total_tx-total_rx;
4021    if ( (drop_rate<0.0)  || (drop_rate < 0.1*total_tx ) )  {
4022        drop_rate=0.0;
4023    }
4024    float pf =CGlobalInfo::m_options.m_platform_factor;
4025    stats.m_platform_factor = pf;
4026
4027    stats.m_active_flows = total_active_flows*pf;
4028    stats.m_open_flows   = total_open_flows*pf;
4029    stats.m_rx_drop_bps   = drop_rate*pf *_1Mb_DOUBLE;
4030
4031    stats.m_tx_bps        = total_tx*pf*_1Mb_DOUBLE;
4032    stats.m_rx_bps        = total_rx*pf*_1Mb_DOUBLE;
4033    stats.m_tx_pps        = total_tx_pps*pf;
4034    stats.m_rx_pps        = total_rx_pps*pf;
4035    stats.m_tx_cps        = m_last_total_cps*pf;
4036    if(stats.m_cpu_util < 0.0001)
4037        stats.m_bw_per_core = 0;
4038    else
4039        stats.m_bw_per_core   = 2*(stats.m_tx_bps/1e9)*100.0/(stats.m_cpu_util*stats.m_threads);
4040
4041    stats.m_tx_expected_cps        = m_expected_cps*pf;
4042    stats.m_tx_expected_pps        = m_expected_pps*pf;
4043    stats.m_tx_expected_bps        = m_expected_bps*pf;
4044}
4045
4046float
4047CGlobalTRex::get_cpu_util_per_interface(uint8_t port_id) {
4048    CPhyEthIF * _if = &m_ports[port_id];
4049
4050    float    tmp = 0;
4051    uint8_t  cnt = 0;
4052    for (const auto &p : _if->get_core_list()) {
4053        uint8_t core_id = p.first;
4054        CFlowGenListPerThread *lp = m_fl.m_threads_info[core_id];
4055        if (lp->is_port_active(port_id)) {
4056            tmp += lp->m_cpu_cp_u.GetVal();
4057            cnt++;
4058        }
4059    }
4060
4061    return ( (cnt > 0) ? (tmp / cnt) : 0);
4062
4063}
4064
4065bool CGlobalTRex::sanity_check(){
4066
4067    CFlowGenListPerThread   * lpt;
4068    uint32_t errors=0;
4069    int i;
4070    for (i=0; i<get_cores_tx(); i++) {
4071        lpt = m_fl.m_threads_info[i];
4072        errors   += lpt->m_smart_gen.getErrorAllocationCounter();
4073    }
4074
4075    if ( errors ) {
4076        printf(" ERRORs sockets allocation errors! \n");
4077        printf(" you should allocate more clients in the pool \n");
4078        return(true);
4079    }
4080    return ( false);
4081}
4082
4083
4084/* dump the template info */
4085void CGlobalTRex::dump_template_info(std::string & json){
4086    CFlowGenListPerThread   * lpt = m_fl.m_threads_info[0];
4087    CFlowsYamlInfo * yaml_info=&lpt->m_yaml_info;
4088
4089    json="{\"name\":\"template_info\",\"type\":0,\"data\":[";
4090    int i;
4091    for (i=0; i<yaml_info->m_vec.size()-1; i++) {
4092        CFlowYamlInfo * r=&yaml_info->m_vec[i] ;
4093        json+="\""+ r->m_name+"\"";
4094        json+=",";
4095    }
4096    json+="\""+yaml_info->m_vec[i].m_name+"\"";
4097    json+="]}" ;
4098}
4099
4100void CGlobalTRex::dump_stats(FILE *fd, CGlobalStats::DumpFormat format){
4101
4102    update_stats();
4103    get_stats(m_stats);
4104
4105    if (format==CGlobalStats::dmpTABLE) {
4106        if ( m_io_modes.m_g_mode == CTrexGlobalIoMode::gNORMAL ){
4107            switch (m_io_modes.m_pp_mode ){
4108            case CTrexGlobalIoMode::ppDISABLE:
4109                fprintf(fd,"\n+Per port stats disabled \n");
4110                break;
4111            case CTrexGlobalIoMode::ppTABLE:
4112                fprintf(fd,"\n-Per port stats table \n");
4113                m_stats.Dump(fd,CGlobalStats::dmpTABLE);
4114                break;
4115            case CTrexGlobalIoMode::ppSTANDARD:
4116                fprintf(fd,"\n-Per port stats - standard\n");
4117                m_stats.Dump(fd,CGlobalStats::dmpSTANDARD);
4118                break;
4119            };
4120
4121            switch (m_io_modes.m_ap_mode ){
4122            case   CTrexGlobalIoMode::apDISABLE:
4123                fprintf(fd,"\n+Global stats disabled \n");
4124                break;
4125            case   CTrexGlobalIoMode::apENABLE:
4126                fprintf(fd,"\n-Global stats enabled \n");
4127                m_stats.DumpAllPorts(fd);
4128                break;
4129            };
4130        }
4131    }else{
4132        /* at exit , always need to dump it in standartd mode for scripts*/
4133        m_stats.Dump(fd,format);
4134        m_stats.DumpAllPorts(fd);
4135    }
4136
4137}
4138
4139void
4140CGlobalTRex::publish_async_data(bool sync_now, bool baseline) {
4141    std::string json;
4142
4143    /* refactor to update, dump, and etc. */
4144    if (sync_now) {
4145        update_stats();
4146        get_stats(m_stats);
4147    }
4148
4149    m_stats.dump_json(json, baseline);
4150    m_zmq_publisher.publish_json(json);
4151
4152    /* generator json , all cores are the same just sample the first one */
4153    m_fl.m_threads_info[0]->m_node_gen.dump_json(json);
4154    m_zmq_publisher.publish_json(json);
4155
4156
4157    if ( !get_is_stateless() ){
4158        dump_template_info(json);
4159        m_zmq_publisher.publish_json(json);
4160    }
4161
4162    if ( get_is_rx_check_mode() ) {
4163        m_mg.rx_check_dump_json(json );
4164        m_zmq_publisher.publish_json(json);
4165    }
4166
4167    /* backward compatible */
4168    m_mg.dump_json(json );
4169    m_zmq_publisher.publish_json(json);
4170
4171    /* more info */
4172    m_mg.dump_json_v2(json );
4173    m_zmq_publisher.publish_json(json);
4174
4175    if (get_is_stateless()) {
4176        std::string stat_json;
4177        std::string latency_json;
4178        if (m_trex_stateless->m_rx_flow_stat.dump_json(stat_json, latency_json, baseline)) {
4179            m_zmq_publisher.publish_json(stat_json);
4180            m_zmq_publisher.publish_json(latency_json);
4181        }
4182    }
4183}
4184
4185void
4186CGlobalTRex::publish_async_barrier(uint32_t key) {
4187    m_zmq_publisher.publish_barrier(key);
4188}
4189
4190void
4191CGlobalTRex:: publish_async_port_attr_changed(uint8_t port_id) {
4192    Json::Value data;
4193    data["port_id"] = port_id;
4194    TRexPortAttr * _attr = m_ports[port_id].get_port_attr();
4195
4196    _attr->to_json(data["attr"]);
4197
4198    m_zmq_publisher.publish_event(TrexPublisher::EVENT_PORT_ATTR_CHANGED, data);
4199}
4200
4201void
4202CGlobalTRex::handle_slow_path() {
4203    m_stats_cnt+=1;
4204
4205    // update speed, link up/down etc.
4206    for (int i=0; i<m_max_ports; i++) {
4207        bool changed = m_ports[i].get_port_attr()->update_link_status_nowait();
4208        if (changed) {
4209            publish_async_port_attr_changed(i);
4210        }
4211    }
4212
4213    if ( CGlobalInfo::m_options.preview.get_no_keyboard() ==false ) {
4214        if ( m_io_modes.handle_io_modes() ) {
4215            mark_for_shutdown(SHUTDOWN_CTRL_C);
4216            return;
4217        }
4218    }
4219
4220    if ( sanity_check() ) {
4221        mark_for_shutdown(SHUTDOWN_TEST_ENDED);
4222        return;
4223    }
4224
4225    if (m_io_modes.m_g_mode != CTrexGlobalIoMode::gDISABLE ) {
4226        fprintf(stdout,"\033[2J");
4227        fprintf(stdout,"\033[2H");
4228
4229    } else {
4230        if ( m_io_modes.m_g_disable_first  ) {
4231            m_io_modes.m_g_disable_first=false;
4232            fprintf(stdout,"\033[2J");
4233            fprintf(stdout,"\033[2H");
4234            printf("clean !!!\n");
4235            fflush(stdout);
4236        }
4237    }
4238
4239
4240    if (m_io_modes.m_g_mode == CTrexGlobalIoMode::gHELP ) {
4241        m_io_modes.DumpHelp(stdout);
4242    }
4243
4244    dump_stats(stdout,CGlobalStats::dmpTABLE);
4245
4246    if (m_io_modes.m_g_mode == CTrexGlobalIoMode::gNORMAL ) {
4247        fprintf (stdout," current time    : %.1f sec  \n",now_sec());
4248        float d= CGlobalInfo::m_options.m_duration - now_sec();
4249        if (d<0) {
4250            d=0;
4251
4252        }
4253        fprintf (stdout," test duration   : %.1f sec  \n",d);
4254    }
4255
4256    if (m_io_modes.m_g_mode == CTrexGlobalIoMode::gMem) {
4257
4258        if ( m_stats_cnt%4==0) {
4259            fprintf (stdout," %s \n",CGlobalInfo::dump_pool_as_json_str().c_str());
4260        }
4261    }
4262
4263
4264    if ( CGlobalInfo::m_options.is_rx_enabled() && (! get_is_stateless())) {
4265        m_mg.update();
4266
4267        if ( m_io_modes.m_g_mode ==  CTrexGlobalIoMode::gNORMAL ) {
4268            switch (m_io_modes.m_l_mode) {
4269            case CTrexGlobalIoMode::lDISABLE:
4270                fprintf(stdout,"\n+Latency stats disabled \n");
4271                break;
4272            case CTrexGlobalIoMode::lENABLE:
4273                fprintf(stdout,"\n-Latency stats enabled \n");
4274                m_mg.DumpShort(stdout);
4275                break;
4276            case CTrexGlobalIoMode::lENABLE_Extended:
4277                fprintf(stdout,"\n-Latency stats extended \n");
4278                m_mg.Dump(stdout);
4279                break;
4280            }
4281
4282            if ( get_is_rx_check_mode() ) {
4283
4284                switch (m_io_modes.m_rc_mode) {
4285                case CTrexGlobalIoMode::rcDISABLE:
4286                    fprintf(stdout,"\n+Rx Check stats disabled \n");
4287                    break;
4288                case CTrexGlobalIoMode::rcENABLE:
4289                    fprintf(stdout,"\n-Rx Check stats enabled \n");
4290                    m_mg.DumpShortRxCheck(stdout);
4291                    break;
4292                case CTrexGlobalIoMode::rcENABLE_Extended:
4293                    fprintf(stdout,"\n-Rx Check stats enhanced \n");
4294                    m_mg.DumpRxCheck(stdout);
4295                    break;
4296                }
4297            }
4298        }
4299    }
4300    if ( m_io_modes.m_g_mode ==  CTrexGlobalIoMode::gNAT ) {
4301        if ( m_io_modes.m_nat_mode == CTrexGlobalIoMode::natENABLE ) {
4302            if (CGlobalInfo::is_learn_mode(CParserOption::LEARN_MODE_TCP_ACK)) {
4303                fprintf(stdout, "NAT flow table info\n");
4304                m_mg.dump_nat_flow_table(stdout);
4305            } else {
4306                fprintf(stdout, "\nThis is only relevant in --learn-mode %d\n", CParserOption::LEARN_MODE_TCP_ACK);
4307            }
4308        }
4309    }
4310
4311    /* publish data */
4312    publish_async_data(false);
4313}
4314
4315
4316void
4317CGlobalTRex::handle_fast_path() {
4318    /* check from messages from DP */
4319    check_for_dp_messages();
4320
4321    /* measure CPU utilization by sampling (we sample 1000 to get an accurate sampling) */
4322    for (int i = 0; i < 1000; i++) {
4323        m_fl.UpdateFast();
4324
4325        if (get_is_stateless()) {
4326            m_rx_sl.update_cpu_util();
4327        }else{
4328            m_mg.update_fast();
4329        }
4330
4331        rte_pause();
4332    }
4333
4334
4335    if ( is_all_cores_finished() ) {
4336        mark_for_shutdown(SHUTDOWN_TEST_ENDED);
4337    }
4338}
4339
4340
4341/**
4342 * shutdown sequence
4343 *
4344 */
4345void CGlobalTRex::shutdown() {
4346    std::stringstream ss;
4347    ss << " *** TRex is shutting down - cause: '";
4348
4349    switch (m_mark_for_shutdown) {
4350
4351    case SHUTDOWN_TEST_ENDED:
4352        ss << "test has ended'";
4353        break;
4354
4355    case SHUTDOWN_CTRL_C:
4356        ss << "CTRL + C detected'";
4357        break;
4358
4359    case SHUTDOWN_SIGINT:
4360        ss << "received signal SIGINT'";
4361        break;
4362
4363    case SHUTDOWN_SIGTERM:
4364        ss << "received signal SIGTERM'";
4365        break;
4366
4367    case SHUTDOWN_RPC_REQ:
4368        ss << "server received RPC 'shutdown' request'";
4369        break;
4370
4371    default:
4372        assert(0);
4373    }
4374
4375    /* report */
4376    std::cout << ss.str() << "\n";
4377
4378    /* first stop the WD */
4379    TrexWatchDog::getInstance().stop();
4380
4381    /* stateless shutdown */
4382    if (get_is_stateless()) {
4383        m_trex_stateless->shutdown();
4384    }
4385
4386    if (!is_all_cores_finished()) {
4387        try_stop_all_cores();
4388    }
4389
4390    m_mg.stop();
4391
4392    delay(1000);
4393
4394    /* shutdown drivers */
4395    for (int i = 0; i < m_max_ports; i++) {
4396        m_ports[i].stop();
4397    }
4398    if (m_mark_for_shutdown != SHUTDOWN_TEST_ENDED) {
4399        /* we should stop latency and exit to stop agents */
4400        exit(-1);
4401    }
4402}
4403
4404
4405int CGlobalTRex::run_in_master() {
4406
4407    //rte_thread_setname(pthread_self(), "TRex Control");
4408
4409    if ( get_is_stateless() ) {
4410        m_trex_stateless->launch_control_plane();
4411    }
4412
4413    /* exception and scope safe */
4414    std::unique_lock<std::mutex> cp_lock(m_cp_lock);
4415
4416    uint32_t slow_path_counter = 0;
4417
4418    const int FASTPATH_DELAY_MS = 10;
4419    const int SLOWPATH_DELAY_MS = 500;
4420
4421    m_monitor.create("master", 2);
4422    TrexWatchDog::getInstance().register_monitor(&m_monitor);
4423
4424    TrexWatchDog::getInstance().start();
4425
4426    while (!is_marked_for_shutdown()) {
4427
4428        /* fast path */
4429        handle_fast_path();
4430
4431        /* slow path */
4432        if (slow_path_counter >= SLOWPATH_DELAY_MS) {
4433            handle_slow_path();
4434            slow_path_counter = 0;
4435        }
4436
4437
4438        cp_lock.unlock();
4439        delay(FASTPATH_DELAY_MS);
4440        slow_path_counter += FASTPATH_DELAY_MS;
4441        cp_lock.lock();
4442
4443        m_monitor.tickle();
4444    }
4445
4446    /* on exit release the lock */
4447    cp_lock.unlock();
4448
4449    /* shutdown everything gracefully */
4450    shutdown();
4451
4452    return (0);
4453}
4454
4455
4456
4457int CGlobalTRex::run_in_rx_core(void){
4458
4459    rte_thread_setname(pthread_self(), "TRex RX");
4460
4461    if (get_is_stateless()) {
4462        m_sl_rx_running = true;
4463        m_rx_sl.start();
4464        m_sl_rx_running = false;
4465    } else {
4466        if ( CGlobalInfo::m_options.is_rx_enabled() ){
4467            m_sl_rx_running = false;
4468            m_mg.start(0, true);
4469        }
4470    }
4471
4472    return (0);
4473}
4474
4475int CGlobalTRex::run_in_core(virtual_thread_id_t virt_core_id){
4476    std::stringstream ss;
4477
4478    ss << "Trex DP core " << int(virt_core_id);
4479    rte_thread_setname(pthread_self(), ss.str().c_str());
4480
4481    CPreviewMode *lp=&CGlobalInfo::m_options.preview;
4482    if ( lp->getSingleCore() &&
4483         (virt_core_id==2 ) &&
4484         (lp-> getCores() ==1) ){
4485        printf(" bypass this core \n");
4486        m_signal[virt_core_id]=1;
4487        return (0);
4488    }
4489
4490
4491    assert(m_fl_was_init);
4492    CFlowGenListPerThread   * lpt;
4493
4494    lpt = m_fl.m_threads_info[virt_core_id-1];
4495
4496    /* register a watchdog handle on current core */
4497    lpt->m_monitor.create(ss.str(), 1);
4498    TrexWatchDog::getInstance().register_monitor(&lpt->m_monitor);
4499
4500    if (get_is_stateless()) {
4501        lpt->start_stateless_daemon(*lp);
4502    }else{
4503        lpt->start_generate_stateful(CGlobalInfo::m_options.out_file,*lp);
4504    }
4505
4506    /* done - remove this from the watchdog (we might wait on join for a long time) */
4507    lpt->m_monitor.disable();
4508
4509    m_signal[virt_core_id]=1;
4510    return (0);
4511}
4512
4513
4514int CGlobalTRex::stop_master(){
4515
4516    delay(1000);
4517    fprintf(stdout," ==================\n");
4518    fprintf(stdout," interface sum \n");
4519    fprintf(stdout," ==================\n");
4520    dump_stats(stdout,CGlobalStats::dmpSTANDARD);
4521    fprintf(stdout," ==================\n");
4522    fprintf(stdout," \n\n");
4523
4524    fprintf(stdout," ==================\n");
4525    fprintf(stdout," interface sum \n");
4526    fprintf(stdout," ==================\n");
4527
4528    CFlowGenListPerThread   * lpt;
4529    uint64_t total_tx_rx_check=0;
4530
4531    int i;
4532    for (i=0; i<get_cores_tx(); i++) {
4533        lpt = m_fl.m_threads_info[i];
4534        CCoreEthIF * erf_vif = m_cores_vif[i+1];
4535
4536        erf_vif->DumpCoreStats(stdout);
4537        erf_vif->DumpIfStats(stdout);
4538        total_tx_rx_check+=erf_vif->m_stats[CLIENT_SIDE].m_tx_rx_check_pkt+
4539            erf_vif->m_stats[SERVER_SIDE].m_tx_rx_check_pkt;
4540    }
4541
4542    fprintf(stdout," ==================\n");
4543    fprintf(stdout," generators \n");
4544    fprintf(stdout," ==================\n");
4545    for (i=0; i<get_cores_tx(); i++) {
4546        lpt = m_fl.m_threads_info[i];
4547        lpt->m_node_gen.DumpHist(stdout);
4548        lpt->DumpStats(stdout);
4549    }
4550    if ( CGlobalInfo::m_options.is_latency_enabled() ){
4551        fprintf(stdout," ==================\n");
4552        fprintf(stdout," latency \n");
4553        fprintf(stdout," ==================\n");
4554        m_mg.DumpShort(stdout);
4555        m_mg.Dump(stdout);
4556        m_mg.DumpShortRxCheck(stdout);
4557        m_mg.DumpRxCheck(stdout);
4558        m_mg.DumpRxCheckVerification(stdout,total_tx_rx_check);
4559    }
4560
4561    dump_stats(stdout,CGlobalStats::dmpSTANDARD);
4562    dump_post_test_stats(stdout);
4563    m_fl.Delete();
4564
4565    return (0);
4566}
4567
4568bool CGlobalTRex::is_all_cores_finished() {
4569    int i;
4570    for (i=0; i<get_cores_tx(); i++) {
4571        if ( m_signal[i+1]==0){
4572            return false;
4573        }
4574    }
4575    if (m_sl_rx_running)
4576        return false;
4577
4578    return true;
4579}
4580
4581
4582int CGlobalTRex::start_master_stateless(){
4583    int i;
4584    for (i=0; i<BP_MAX_CORES; i++) {
4585        m_signal[i]=0;
4586    }
4587    m_fl.Create();
4588    m_expected_pps = 0;
4589    m_expected_cps = 0;
4590    m_expected_bps = 0;
4591
4592    m_fl.generate_p_thread_info(get_cores_tx());
4593    CFlowGenListPerThread   * lpt;
4594
4595    for (i=0; i<get_cores_tx(); i++) {
4596        lpt = m_fl.m_threads_info[i];
4597        CVirtualIF * erf_vif = m_cores_vif[i+1];
4598        lpt->set_vif(erf_vif);
4599        lpt->m_node_gen.m_socket_id =m_cores_vif[i+1]->get_socket_id();
4600    }
4601    m_fl_was_init=true;
4602
4603    return (0);
4604}
4605
4606int CGlobalTRex::start_master_statefull() {
4607    int i;
4608    for (i=0; i<BP_MAX_CORES; i++) {
4609        m_signal[i]=0;
4610    }
4611
4612    m_fl.Create();
4613    m_fl.load_from_yaml(CGlobalInfo::m_options.cfg_file,get_cores_tx());
4614
4615    /* client config */
4616    if (CGlobalInfo::m_options.client_cfg_file != "") {
4617        try {
4618            m_fl.load_client_config_file(CGlobalInfo::m_options.client_cfg_file);
4619        } catch (const std::runtime_error &e) {
4620            std::cout << "\n*** " << e.what() << "\n\n";
4621            exit(-1);
4622        }
4623        CGlobalInfo::m_options.preview.set_client_cfg_enable(true);
4624    }
4625
4626    /* verify options */
4627    try {
4628        CGlobalInfo::m_options.verify();
4629    } catch (const std::runtime_error &e) {
4630        std::cout << "\n*** " << e.what() << "\n\n";
4631        exit(-1);
4632    }
4633
4634    m_expected_pps = m_fl.get_total_pps();
4635    m_expected_cps = 1000.0*m_fl.get_total_kcps();
4636    m_expected_bps = m_fl.get_total_tx_bps();
4637    if ( m_fl.get_total_repeat_flows() > 2000) {
4638        /* disable flows cache */
4639        CGlobalInfo::m_options.preview.setDisableMbufCache(true);
4640    }
4641
4642    CTupleGenYamlInfo * tg=&m_fl.m_yaml_info.m_tuple_gen;
4643
4644    m_mg.set_ip( tg->m_client_pool[0].get_ip_start(),
4645                 tg->m_server_pool[0].get_ip_start(),
4646                 tg->m_client_pool[0].getDualMask()
4647                 );
4648
4649    if (  CGlobalInfo::m_options.preview.getVMode() >0 ) {
4650        m_fl.DumpCsv(stdout);
4651        for (i=0; i<100; i++) {
4652            fprintf(stdout,"\n");
4653        }
4654        fflush(stdout);
4655    }
4656
4657    m_fl.generate_p_thread_info(get_cores_tx());
4658    CFlowGenListPerThread   * lpt;
4659
4660    for (i=0; i<get_cores_tx(); i++) {
4661        lpt = m_fl.m_threads_info[i];
4662        //CNullIF * erf_vif = new CNullIF();
4663        CVirtualIF * erf_vif = m_cores_vif[i+1];
4664        lpt->set_vif(erf_vif);
4665        /* socket id */
4666        lpt->m_node_gen.m_socket_id =m_cores_vif[i+1]->get_socket_id();
4667
4668    }
4669    m_fl_was_init=true;
4670
4671    return (0);
4672}
4673
4674
4675////////////////////////////////////////////
4676static CGlobalTRex g_trex;
4677
4678void CPhyEthIF::update_counters() {
4679    get_ex_drv()->get_extended_stats(this, &m_stats);
4680    CRXCoreIgnoreStat ign_stats;
4681    g_trex.m_mg.get_ignore_stats(m_port_id, ign_stats, true);
4682    m_stats.obytes -= ign_stats.get_tx_bytes();
4683    m_stats.opackets -= ign_stats.get_tx_pkts();
4684    m_ignore_stats.opackets += ign_stats.get_tx_pkts();
4685    m_ignore_stats.obytes += ign_stats.get_tx_bytes();
4686    m_ignore_stats.m_tx_arp += ign_stats.get_tx_arp();
4687
4688    m_last_tx_rate      =  m_bw_tx.add(m_stats.obytes);
4689    m_last_rx_rate      =  m_bw_rx.add(m_stats.ibytes);
4690    m_last_tx_pps       =  m_pps_tx.add(m_stats.opackets);
4691    m_last_rx_pps       =  m_pps_rx.add(m_stats.ipackets);
4692}
4693
4694bool CPhyEthIF::Create(uint8_t portid) {
4695    m_port_id      = portid;
4696    m_last_rx_rate = 0.0;
4697    m_last_tx_rate = 0.0;
4698    m_last_tx_pps  = 0.0;
4699    m_port_attr    = g_trex.m_drv->create_port_attr(portid);
4700
4701
4702    uint32_t src_ipv4 = CGlobalInfo::m_options.m_ip_cfg[m_port_id].get_ip();
4703    if (src_ipv4) {
4704        m_port_attr->set_src_ipv4(src_ipv4);
4705    }
4706
4707    /* for now set as unresolved IPv4 destination */
4708    uint32_t dest_ipv4 = CGlobalInfo::m_options.m_ip_cfg[m_port_id].get_def_gw();
4709    if (dest_ipv4) {
4710        m_port_attr->get_dest().set_dest_ipv4(dest_ipv4);
4711    }
4712
4713    return true;
4714}
4715
4716const std::vector<std::pair<uint8_t, uint8_t>> &
4717CPhyEthIF::get_core_list() {
4718
4719    /* lazy find */
4720    if (m_core_id_list.size() == 0) {
4721
4722        for (uint8_t core_id = 0; core_id < g_trex.get_cores_tx(); core_id++) {
4723
4724            /* iterate over all the directions*/
4725            for (uint8_t dir = 0 ; dir < CS_NUM; dir++) {
4726                if (g_trex.m_cores_vif[core_id + 1]->get_ports()[dir].m_port->get_port_id() == m_port_id) {
4727                    m_core_id_list.push_back(std::make_pair(core_id, dir));
4728                }
4729            }
4730        }
4731    }
4732
4733    return m_core_id_list;
4734
4735}
4736
4737int CPhyEthIF::reset_hw_flow_stats() {
4738    if (get_ex_drv()->hw_rx_stat_supported()) {
4739        get_ex_drv()->reset_rx_stats(this, m_stats.m_fdir_prev_pkts, 0, MAX_FLOW_STATS);
4740    } else {
4741        g_trex.m_rx_sl.reset_rx_stats(get_port_id());
4742    }
4743    return 0;
4744}
4745
4746// get/reset flow director counters
4747// return 0 if OK. -1 if operation not supported.
4748// rx_stats, tx_stats - arrays of len max - min + 1. Returning rx, tx updated absolute values.
4749// min, max - minimum, maximum counters range to get
4750// reset - If true, need to reset counter value after reading
4751int CPhyEthIF::get_flow_stats(rx_per_flow_t *rx_stats, tx_per_flow_t *tx_stats, int min, int max, bool reset) {
4752    uint32_t diff_pkts[MAX_FLOW_STATS];
4753    uint32_t diff_bytes[MAX_FLOW_STATS];
4754    bool hw_rx_stat_supported = get_ex_drv()->hw_rx_stat_supported();
4755
4756    if (hw_rx_stat_supported) {
4757        if (get_ex_drv()->get_rx_stats(this, diff_pkts, m_stats.m_fdir_prev_pkts
4758                                       , diff_bytes, m_stats.m_fdir_prev_bytes, min, max) < 0) {
4759            return -1;
4760        }
4761    } else {
4762        g_trex.m_rx_sl.get_rx_stats(get_port_id(), rx_stats, min, max, reset, TrexPlatformApi::IF_STAT_IPV4_ID);
4763    }
4764
4765    for (int i = min; i <= max; i++) {
4766        if ( reset ) {
4767            // return value so far, and reset
4768            if (hw_rx_stat_supported) {
4769                if (rx_stats != NULL) {
4770                    rx_stats[i - min].set_pkts(m_stats.m_rx_per_flow_pkts[i] + diff_pkts[i]);
4771                    rx_stats[i - min].set_bytes(m_stats.m_rx_per_flow_bytes[i] + diff_bytes[i]);
4772                }
4773                m_stats.m_rx_per_flow_pkts[i] = 0;
4774                m_stats.m_rx_per_flow_bytes[i] = 0;
4775                get_ex_drv()->reset_rx_stats(this, &m_stats.m_fdir_prev_pkts[i], i, 1);
4776
4777            }
4778            if (tx_stats != NULL) {
4779                tx_stats[i - min] = g_trex.clear_flow_tx_stats(m_port_id, i, false);
4780            }
4781        } else {
4782            if (hw_rx_stat_supported) {
4783                m_stats.m_rx_per_flow_pkts[i] += diff_pkts[i];
4784                m_stats.m_rx_per_flow_bytes[i] += diff_bytes[i];
4785                if (rx_stats != NULL) {
4786                    rx_stats[i - min].set_pkts(m_stats.m_rx_per_flow_pkts[i]);
4787                    rx_stats[i - min].set_bytes(m_stats.m_rx_per_flow_bytes[i]);
4788                }
4789            }
4790            if (tx_stats != NULL) {
4791                tx_stats[i - min] = g_trex.get_flow_tx_stats(m_port_id, i);
4792            }
4793        }
4794    }
4795
4796    return 0;
4797}
4798
4799int CPhyEthIF::get_flow_stats_payload(rx_per_flow_t *rx_stats, tx_per_flow_t *tx_stats, int min, int max, bool reset) {
4800    g_trex.m_rx_sl.get_rx_stats(get_port_id(), rx_stats, min, max, reset, TrexPlatformApi::IF_STAT_PAYLOAD);
4801    for (int i = min; i <= max; i++) {
4802        if ( reset ) {
4803            if (tx_stats != NULL) {
4804                tx_stats[i - min] = g_trex.clear_flow_tx_stats(m_port_id, i + MAX_FLOW_STATS, true);
4805            }
4806        } else {
4807            if (tx_stats != NULL) {
4808                tx_stats[i - min] = g_trex.get_flow_tx_stats(m_port_id, i + MAX_FLOW_STATS);
4809            }
4810        }
4811    }
4812
4813    return 0;
4814}
4815
4816// If needed, send packets to rx core for processing.
4817// This is relevant only in VM case, where we receive packets to the working DP core (only 1 DP core in this case)
4818bool CCoreEthIF::process_rx_pkt(pkt_dir_t dir, rte_mbuf_t * m) {
4819    CFlowStatParser parser;
4820    uint32_t ip_id;
4821
4822    if (parser.parse(rte_pktmbuf_mtod(m, uint8_t*), rte_pktmbuf_pkt_len(m)) != 0) {
4823        return false;
4824    }
4825    bool send=false;
4826
4827    // e1000 on ESXI hands us the packet with the ethernet FCS
4828    if (parser.get_pkt_size() < rte_pktmbuf_pkt_len(m)) {
4829        rte_pktmbuf_trim(m, rte_pktmbuf_pkt_len(m) - parser.get_pkt_size());
4830    }
4831
4832    if ( get_is_stateless() ) {
4833        // In stateless RX, we only care about flow stat packets
4834        if ((parser.get_ip_id(ip_id) == 0) && ((ip_id & 0xff00) == IP_ID_RESERVE_BASE)) {
4835            send = true;
4836        }
4837    } else {
4838        CLatencyPktMode *c_l_pkt_mode = g_trex.m_mg.c_l_pkt_mode;
4839        bool is_lateancy_pkt =  c_l_pkt_mode->IsLatencyPkt((IPHeader *)parser.get_l4()) &
4840            CCPortLatency::IsLatencyPkt(parser.get_l4() + c_l_pkt_mode->l4_header_len());
4841
4842        if (is_lateancy_pkt) {
4843            send = true;
4844        } else {
4845            if ( get_is_rx_filter_enable() ) {
4846                uint8_t max_ttl = 0xff - get_rx_check_hops();
4847                uint8_t pkt_ttl = parser.get_ttl();
4848                if ( (pkt_ttl==max_ttl) || (pkt_ttl==(max_ttl-1) ) ) {
4849                    send=true;
4850                }
4851            }
4852        }
4853    }
4854
4855
4856    if (send) {
4857        CGenNodeLatencyPktInfo * node=(CGenNodeLatencyPktInfo * )CGlobalInfo::create_node();
4858        if ( node ) {
4859            node->m_msg_type = CGenNodeMsgBase::LATENCY_PKT;
4860            node->m_dir      = dir;
4861            node->m_latency_offset = 0xdead;
4862            node->m_pkt      = m;
4863            if ( m_ring_to_rx->Enqueue((CGenNode*)node)==0 ){
4864            }else{
4865                CGlobalInfo::free_node((CGenNode *)node);
4866                send=false;
4867            }
4868
4869#ifdef LATENCY_QUEUE_TRACE_
4870            printf("rx to cp --\n");
4871            rte_pktmbuf_dump(stdout,m, rte_pktmbuf_pkt_len(m));
4872#endif
4873        }else{
4874            send=false;
4875        }
4876    }
4877    return (send);
4878}
4879
4880TrexStateless * get_stateless_obj() {
4881    return g_trex.m_trex_stateless;
4882}
4883
4884CRxCoreStateless * get_rx_sl_core_obj() {
4885    return &g_trex.m_rx_sl;
4886}
4887
4888static int latency_one_lcore(__attribute__((unused)) void *dummy)
4889{
4890    CPlatformSocketInfo * lpsock=&CGlobalInfo::m_socket;
4891    physical_thread_id_t  phy_id =rte_lcore_id();
4892
4893    if ( lpsock->thread_phy_is_rx(phy_id) ) {
4894        g_trex.run_in_rx_core();
4895    }else{
4896
4897        if ( lpsock->thread_phy_is_master( phy_id ) ) {
4898            g_trex.run_in_master();
4899            delay(1);
4900        }else{
4901            delay((uint32_t)(1000.0*CGlobalInfo::m_options.m_duration));
4902            /* this core has stopped */
4903            g_trex.m_signal[ lpsock->thread_phy_to_virt( phy_id ) ]=1;
4904        }
4905    }
4906    return 0;
4907}
4908
4909
4910
4911static int slave_one_lcore(__attribute__((unused)) void *dummy)
4912{
4913    CPlatformSocketInfo * lpsock=&CGlobalInfo::m_socket;
4914    physical_thread_id_t  phy_id =rte_lcore_id();
4915
4916    if ( lpsock->thread_phy_is_rx(phy_id) ) {
4917        g_trex.run_in_rx_core();
4918    }else{
4919        if ( lpsock->thread_phy_is_master( phy_id ) ) {
4920            g_trex.run_in_master();
4921            delay(1);
4922        }else{
4923            g_trex.run_in_core( lpsock->thread_phy_to_virt( phy_id ) );
4924        }
4925    }
4926    return 0;
4927}
4928
4929
4930
4931uint32_t get_cores_mask(uint32_t cores,int offset){
4932    int i;
4933
4934    uint32_t res=1;
4935
4936    uint32_t mask=(1<<(offset+1));
4937    for (i=0; i<(cores-1); i++) {
4938        res |= mask ;
4939        mask = mask <<1;
4940    }
4941    return (res);
4942}
4943
4944
4945static char *g_exe_name;
4946const char *get_exe_name() {
4947    return g_exe_name;
4948}
4949
4950
4951int main(int argc , char * argv[]){
4952    g_exe_name = argv[0];
4953
4954    return ( main_test(argc , argv));
4955}
4956
4957
4958int update_global_info_from_platform_file(){
4959
4960    CPlatformYamlInfo *cg=&global_platform_cfg_info;
4961
4962    CGlobalInfo::m_socket.Create(&cg->m_platform);
4963
4964
4965    if (!cg->m_info_exist) {
4966        /* nothing to do ! */
4967        return 0;
4968    }
4969
4970    CGlobalInfo::m_options.prefix =cg->m_prefix;
4971    CGlobalInfo::m_options.preview.setCores(cg->m_thread_per_dual_if);
4972
4973    if ( cg->m_port_limit_exist ){
4974        CGlobalInfo::m_options.m_expected_portd =cg->m_port_limit;
4975    }
4976
4977    if ( cg->m_enable_zmq_pub_exist ){
4978        CGlobalInfo::m_options.preview.set_zmq_publish_enable(cg->m_enable_zmq_pub);
4979        CGlobalInfo::m_options.m_zmq_port = cg->m_zmq_pub_port;
4980    }
4981    if ( cg->m_telnet_exist ){
4982        CGlobalInfo::m_options.m_telnet_port = cg->m_telnet_port;
4983    }
4984
4985    if ( cg->m_mac_info_exist ){
4986        int i;
4987        /* cop the file info */
4988
4989        int port_size=cg->m_mac_info.size();
4990
4991        if ( port_size > TREX_MAX_PORTS ){
4992            port_size = TREX_MAX_PORTS;
4993        }
4994        for (i=0; i<port_size; i++){
4995            cg->m_mac_info[i].copy_src(( char *)CGlobalInfo::m_options.m_mac_addr[i].u.m_mac.src)   ;
4996            cg->m_mac_info[i].copy_dest(( char *)CGlobalInfo::m_options.m_mac_addr[i].u.m_mac.dest)  ;
4997            CGlobalInfo::m_options.m_ip_cfg[i].set_def_gw(cg->m_mac_info[i].get_def_gw());
4998            CGlobalInfo::m_options.m_ip_cfg[i].set_ip(cg->m_mac_info[i].get_ip());
4999            CGlobalInfo::m_options.m_ip_cfg[i].set_mask(cg->m_mac_info[i].get_mask());
5000            CGlobalInfo::m_options.m_ip_cfg[i].set_vlan(cg->m_mac_info[i].get_vlan());
5001        }
5002    }
5003
5004    /* mul by interface type */
5005    float mul=1.0;
5006    if (cg->m_port_bandwidth_gb<10) {
5007        cg->m_port_bandwidth_gb=10.0;
5008    }
5009
5010    mul = mul*(float)cg->m_port_bandwidth_gb/10.0;
5011    mul= mul * (float)cg->m_port_limit/2.0;
5012
5013    mul= mul * CGlobalInfo::m_options.m_mbuf_factor;
5014
5015
5016    CGlobalInfo::m_memory_cfg.set_pool_cache_size(RTE_MEMPOOL_CACHE_MAX_SIZE);
5017
5018    CGlobalInfo::m_memory_cfg.set_number_of_dp_cors(
5019                                                    CGlobalInfo::m_options.get_number_of_dp_cores_needed() );
5020
5021    CGlobalInfo::m_memory_cfg.set(cg->m_memory,mul);
5022    return (0);
5023}
5024
5025extern "C" int eal_cpu_detected(unsigned lcore_id);
5026// return mask representing available cores
5027int core_mask_calc() {
5028    uint32_t mask = 0;
5029    int lcore_id;
5030
5031    for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
5032        if (eal_cpu_detected(lcore_id)) {
5033            mask |= (1 << lcore_id);
5034        }
5035    }
5036
5037    return mask;
5038}
5039
5040// Return number of set bits in i
5041uint32_t num_set_bits(uint32_t i)
5042{
5043    i = i - ((i >> 1) & 0x55555555);
5044    i = (i & 0x33333333) + ((i >> 2) & 0x33333333);
5045    return (((i + (i >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24;
5046}
5047
5048// sanity check if the cores we want to use really exist
5049int core_mask_sanity(uint32_t wanted_core_mask) {
5050    uint32_t calc_core_mask = core_mask_calc();
5051    uint32_t wanted_core_num, calc_core_num;
5052
5053    wanted_core_num = num_set_bits(wanted_core_mask);
5054    calc_core_num = num_set_bits(calc_core_mask);
5055
5056    if (calc_core_num == 1) {
5057        printf ("Error: You have only 1 core available. Minimum configuration requires 2 cores\n");
5058        printf("        If you are running on VM, consider adding more cores if possible\n");
5059        return -1;
5060    }
5061    if (wanted_core_num > calc_core_num) {
5062        printf("Error: You have %d threads available, but you asked for %d threads.\n", calc_core_num, wanted_core_num);
5063        printf("       Calculation is: -c <num>(%d) * dual ports (%d) + 1 master thread %s"
5064               , CGlobalInfo::m_options.preview.getCores(), CGlobalInfo::m_options.get_expected_dual_ports()
5065               , get_is_rx_thread_enabled() ? "+1 latency thread (because of -l flag)\n" : "\n");
5066        if (CGlobalInfo::m_options.preview.getCores() > 1)
5067            printf("       Maybe try smaller -c <num>.\n");
5068        printf("       If you are running on VM, consider adding more cores if possible\n");
5069        return -1;
5070    }
5071
5072    if (wanted_core_mask != (wanted_core_mask & calc_core_mask)) {
5073        printf ("Serious error: Something is wrong with the hardware. Wanted core mask is %x. Existing core mask is %x\n", wanted_core_mask, calc_core_mask);
5074        return -1;
5075    }
5076
5077    return 0;
5078}
5079
5080int  update_dpdk_args(void){
5081
5082    CPlatformSocketInfo * lpsock=&CGlobalInfo::m_socket;
5083    CParserOption * lpop= &CGlobalInfo::m_options;
5084
5085    lpsock->set_rx_thread_is_enabled(get_is_rx_thread_enabled());
5086    lpsock->set_number_of_threads_per_ports(lpop->preview.getCores() );
5087    lpsock->set_number_of_dual_ports(lpop->get_expected_dual_ports());
5088    if ( !lpsock->sanity_check() ){
5089        printf(" ERROR in configuration file \n");
5090        return (-1);
5091    }
5092
5093    if ( CGlobalInfo::m_options.preview.getVMode() > 0  ) {
5094        lpsock->dump(stdout);
5095    }
5096
5097    snprintf(global_cores_str, sizeof(global_cores_str), "0x%llx" ,(unsigned long long)lpsock->get_cores_mask());
5098    if (core_mask_sanity(strtol(global_cores_str, NULL, 16)) < 0) {
5099        return -1;
5100    }
5101
5102    /* set the DPDK options */
5103    global_dpdk_args_num = 0;
5104
5105    global_dpdk_args[global_dpdk_args_num++]=(char *)"xx";
5106    global_dpdk_args[global_dpdk_args_num++]=(char *)"-c";
5107    global_dpdk_args[global_dpdk_args_num++]=(char *)global_cores_str;
5108    global_dpdk_args[global_dpdk_args_num++]=(char *)"-n";
5109    global_dpdk_args[global_dpdk_args_num++]=(char *)"4";
5110
5111    if ( CGlobalInfo::m_options.preview.getVMode() == 0  ) {
5112        global_dpdk_args[global_dpdk_args_num++]=(char *)"--log-level";
5113        snprintf(global_loglevel_str, sizeof(global_loglevel_str), "%d", 4);
5114        global_dpdk_args[global_dpdk_args_num++]=(char *)global_loglevel_str;
5115    }else{
5116        global_dpdk_args[global_dpdk_args_num++]=(char *)"--log-level";
5117        snprintf(global_loglevel_str, sizeof(global_loglevel_str), "%d", CGlobalInfo::m_options.preview.getVMode()+1);
5118        global_dpdk_args[global_dpdk_args_num++]=(char *)global_loglevel_str;
5119    }
5120
5121    global_dpdk_args[global_dpdk_args_num++] = (char *)"--master-lcore";
5122
5123    snprintf(global_master_id_str, sizeof(global_master_id_str), "%u", lpsock->get_master_phy_id());
5124    global_dpdk_args[global_dpdk_args_num++] = global_master_id_str;
5125
5126    /* add white list */
5127    if (lpop->m_run_mode == CParserOption::RUN_MODE_DUMP_INFO and lpop->dump_interfaces.size()) {
5128        for (int i=0; i<(int)lpop->dump_interfaces.size(); i++) {
5129            global_dpdk_args[global_dpdk_args_num++]=(char *)"-w";
5130            global_dpdk_args[global_dpdk_args_num++]=(char *)lpop->dump_interfaces[i].c_str();
5131        }
5132    }
5133    else {
5134        for (int i=0; i<(int)global_platform_cfg_info.m_if_list.size(); i++) {
5135            global_dpdk_args[global_dpdk_args_num++]=(char *)"-w";
5136            global_dpdk_args[global_dpdk_args_num++]=(char *)global_platform_cfg_info.m_if_list[i].c_str();
5137        }
5138    }
5139
5140
5141
5142    if ( lpop->prefix.length()  ){
5143        global_dpdk_args[global_dpdk_args_num++]=(char *)"--file-prefix";
5144        snprintf(global_prefix_str, sizeof(global_prefix_str), "%s", lpop->prefix.c_str());
5145        global_dpdk_args[global_dpdk_args_num++]=(char *)global_prefix_str;
5146        global_dpdk_args[global_dpdk_args_num++]=(char *)"-m";
5147        if (global_platform_cfg_info.m_limit_memory.length()) {
5148            global_dpdk_args[global_dpdk_args_num++]=(char *)global_platform_cfg_info.m_limit_memory.c_str();
5149        }else{
5150            global_dpdk_args[global_dpdk_args_num++]=(char *)"1024";
5151        }
5152    }
5153
5154
5155    if ( CGlobalInfo::m_options.preview.getVMode() > 0  ) {
5156        printf("args \n");
5157        int i;
5158        for (i=0; i<global_dpdk_args_num; i++) {
5159            printf(" %s \n",global_dpdk_args[i]);
5160        }
5161    }
5162    return (0);
5163}
5164
5165
5166int sim_load_list_of_cap_files(CParserOption * op){
5167
5168    CFlowGenList fl;
5169    fl.Create();
5170    fl.load_from_yaml(op->cfg_file,1);
5171    if ( op->preview.getVMode() >0 ) {
5172        fl.DumpCsv(stdout);
5173    }
5174    uint32_t start=    os_get_time_msec();
5175
5176    CErfIF erf_vif;
5177
5178    fl.generate_p_thread_info(1);
5179    CFlowGenListPerThread   * lpt;
5180    lpt=fl.m_threads_info[0];
5181    lpt->set_vif(&erf_vif);
5182
5183    if ( (op->preview.getVMode() >1)  || op->preview.getFileWrite() ) {
5184        lpt->start_generate_stateful(op->out_file,op->preview);
5185    }
5186
5187    lpt->m_node_gen.DumpHist(stdout);
5188
5189    uint32_t stop=    os_get_time_msec();
5190    printf(" d time = %ul %ul \n",stop-start,os_get_time_freq());
5191    fl.Delete();
5192    return (0);
5193}
5194
5195void dump_interfaces_info() {
5196    printf("Showing interfaces info.\n");
5197    uint8_t m_max_ports = rte_eth_dev_count();
5198    struct ether_addr mac_addr;
5199    char mac_str[ETHER_ADDR_FMT_SIZE];
5200    struct rte_pci_addr pci_addr;
5201
5202    for (uint8_t port_id=0; port_id<m_max_ports; port_id++) {
5203        // PCI, MAC and Driver
5204        pci_addr = rte_eth_devices[port_id].pci_dev->addr;
5205        rte_eth_macaddr_get(port_id, &mac_addr);
5206        ether_format_addr(mac_str, sizeof mac_str, &mac_addr);
5207        printf("PCI: %04x:%02x:%02x.%d - MAC: %s - Driver: %s\n",
5208            pci_addr.domain, pci_addr.bus, pci_addr.devid, pci_addr.function, mac_str,
5209            rte_eth_devices[port_id].pci_dev->driver->name);
5210    }
5211}
5212
5213int main_test(int argc , char * argv[]){
5214
5215
5216    utl_termio_init();
5217
5218    int ret;
5219    unsigned lcore_id;
5220    printf("Starting  TRex %s please wait  ... \n",VERSION_BUILD_NUM);
5221
5222    CGlobalInfo::m_options.preview.clean();
5223
5224    if ( parse_options_wrapper(argc, argv, &CGlobalInfo::m_options,true ) != 0){
5225        exit(-1);
5226    }
5227
5228    /* enable core dump if requested */
5229    if (CGlobalInfo::m_options.preview.getCoreDumpEnable()) {
5230        utl_set_coredump_size(-1);
5231    }
5232    else {
5233        utl_set_coredump_size(0);
5234    }
5235
5236
5237    update_global_info_from_platform_file();
5238
5239    /* It is not a mistake. Give the user higher priorty over the configuration file */
5240    if (parse_options_wrapper(argc, argv, &CGlobalInfo::m_options ,false) != 0) {
5241        exit(-1);
5242    }
5243
5244
5245    if ( CGlobalInfo::m_options.preview.getVMode() > 0){
5246        CGlobalInfo::m_options.dump(stdout);
5247        CGlobalInfo::m_memory_cfg.Dump(stdout);
5248    }
5249
5250
5251    if (update_dpdk_args() < 0) {
5252        return -1;
5253    }
5254
5255    CParserOption * po=&CGlobalInfo::m_options;
5256
5257
5258    if ( CGlobalInfo::m_options.preview.getVMode() == 0  ) {
5259        rte_set_log_level(1);
5260
5261    }
5262    uid_t uid;
5263    uid = geteuid ();
5264    if ( uid != 0 ) {
5265        printf("ERROR you must run with superuser priviliges \n");
5266        printf("User id   : %d \n",uid);
5267        printf("try 'sudo' %s \n",argv[0]);
5268        return (-1);
5269    }
5270
5271    /* set affinity to the master core as default */
5272    cpu_set_t mask;
5273    CPU_ZERO(&mask);
5274    CPU_SET(CGlobalInfo::m_socket.get_master_phy_id(), &mask);
5275    pthread_setaffinity_np(pthread_self(), sizeof(mask), &mask);
5276
5277    ret = rte_eal_init(global_dpdk_args_num, (char **)global_dpdk_args);
5278    if (ret < 0){
5279        printf(" You might need to run ./trex-cfg  once  \n");
5280        rte_exit(EXIT_FAILURE, "Invalid EAL arguments\n");
5281    }
5282    if (CGlobalInfo::m_options.m_run_mode == CParserOption::RUN_MODE_DUMP_INFO) {
5283        dump_interfaces_info();
5284        exit(0);
5285    }
5286    reorder_dpdk_ports();
5287    time_init();
5288
5289    /* check if we are in simulation mode */
5290    if ( CGlobalInfo::m_options.out_file != "" ){
5291        printf(" t-rex simulation mode into %s \n",CGlobalInfo::m_options.out_file.c_str());
5292        return ( sim_load_list_of_cap_files(&CGlobalInfo::m_options) );
5293    }
5294
5295    if ( !g_trex.Create() ){
5296        exit(1);
5297    }
5298
5299    if (po->preview.get_is_rx_check_enable() &&  (po->m_rx_check_sample< get_min_sample_rate()) ) {
5300        po->m_rx_check_sample = get_min_sample_rate();
5301        printf("Warning:rx check sample rate should not be lower than %d. Setting it to %d\n",get_min_sample_rate(),get_min_sample_rate());
5302    }
5303
5304    /* set dump mode */
5305    g_trex.m_io_modes.set_mode((CTrexGlobalIoMode::CliDumpMode)CGlobalInfo::m_options.m_io_mode);
5306
5307    /* disable WD if needed */
5308    bool wd_enable = (CGlobalInfo::m_options.preview.getWDDisable() ? false : true);
5309    TrexWatchDog::getInstance().init(wd_enable);
5310
5311    g_trex.m_sl_rx_running = false;
5312    if ( get_is_stateless() ) {
5313        g_trex.start_master_stateless();
5314
5315    }else{
5316        g_trex.start_master_statefull();
5317    }
5318
5319    // For unit testing of HW rules and queues configuration. Just send some packets and exit.
5320    if (CGlobalInfo::m_options.m_debug_pkt_proto != 0) {
5321        CTrexDebug debug = CTrexDebug(g_trex.m_ports, g_trex.m_max_ports);
5322        int ret;
5323
5324        if (CGlobalInfo::m_options.m_debug_pkt_proto == D_PKT_TYPE_HW_TOGGLE_TEST) {
5325            // Unit test: toggle many times between receive all and stateless/stateful modes,
5326            // to test resiliency of add/delete fdir filters
5327            printf("Starting receive all/normal mode toggle unit test\n");
5328            for (int i = 0; i < 100; i++) {
5329                for (int port_id = 0; port_id < g_trex.m_max_ports; port_id++) {
5330                    CPhyEthIF *pif = &g_trex.m_ports[port_id];
5331                    CTRexExtendedDriverDb::Ins()->get_drv()->set_rcv_all(pif, true);
5332                }
5333                ret = debug.test_send(D_PKT_TYPE_HW_VERIFY_RCV_ALL);
5334                if (ret != 0) {
5335                    printf("Iteration %d: Receive all mode failed\n", i);
5336                    exit(ret);
5337                }
5338
5339                for (int port_id = 0; port_id < g_trex.m_max_ports; port_id++) {
5340                    CPhyEthIF *pif = &g_trex.m_ports[port_id];
5341                    CTRexExtendedDriverDb::Ins()->get_drv()->configure_rx_filter_rules(pif);
5342                }
5343
5344                ret = debug.test_send(D_PKT_TYPE_HW_VERIFY);
5345                if (ret != 0) {
5346                    printf("Iteration %d: Normal mode failed\n", i);
5347                    exit(ret);
5348                }
5349
5350                printf("Iteration %d OK\n", i);
5351            }
5352            exit(0);
5353        } else {
5354            if (CGlobalInfo::m_options.m_debug_pkt_proto == D_PKT_TYPE_HW_VERIFY_RCV_ALL) {
5355                for (int port_id = 0; port_id < g_trex.m_max_ports; port_id++) {
5356                    CPhyEthIF *pif = &g_trex.m_ports[port_id];
5357                    CTRexExtendedDriverDb::Ins()->get_drv()->set_rcv_all(pif, true);
5358                }
5359            }
5360            ret = debug.test_send(CGlobalInfo::m_options.m_debug_pkt_proto);
5361            exit(ret);
5362        }
5363    }
5364
5365    g_trex.pre_test();
5366
5367    // after doing all needed ARP resolution, we need to flush queues, and stop our drop queue
5368    g_trex.ixgbe_rx_queue_flush();
5369    for (int i = 0; i < g_trex.m_max_ports; i++) {
5370        CPhyEthIF *_if = &g_trex.m_ports[i];
5371        _if->stop_rx_drop_queue();
5372    }
5373
5374    if ( CGlobalInfo::m_options.is_latency_enabled()
5375         && (CGlobalInfo::m_options.m_latency_prev > 0)) {
5376        uint32_t pkts = CGlobalInfo::m_options.m_latency_prev *
5377            CGlobalInfo::m_options.m_latency_rate;
5378        printf("Starting warm up phase for %d sec\n",CGlobalInfo::m_options.m_latency_prev);
5379        g_trex.m_mg.start(pkts, NULL);
5380        delay(CGlobalInfo::m_options.m_latency_prev* 1000);
5381        printf("Finished \n");
5382        g_trex.m_mg.reset();
5383    }
5384
5385    if ( CGlobalInfo::m_options.preview.getOnlyLatency() ){
5386        rte_eal_mp_remote_launch(latency_one_lcore, NULL, CALL_MASTER);
5387        RTE_LCORE_FOREACH_SLAVE(lcore_id) {
5388            if (rte_eal_wait_lcore(lcore_id) < 0)
5389                return -1;
5390        }
5391        g_trex.stop_master();
5392
5393        return (0);
5394    }
5395
5396    if ( CGlobalInfo::m_options.preview.getSingleCore() ) {
5397        g_trex.run_in_core(1);
5398        g_trex.stop_master();
5399        return (0);
5400    }
5401
5402    rte_eal_mp_remote_launch(slave_one_lcore, NULL, CALL_MASTER);
5403    RTE_LCORE_FOREACH_SLAVE(lcore_id) {
5404        if (rte_eal_wait_lcore(lcore_id) < 0)
5405            return -1;
5406    }
5407
5408    g_trex.stop_master();
5409    g_trex.Delete();
5410    utl_termio_reset();
5411
5412    return (0);
5413}
5414
5415void wait_x_sec(int sec) {
5416    int i;
5417    printf(" wait %d sec ", sec);
5418    fflush(stdout);
5419    for (i=0; i<sec; i++) {
5420        delay(1000);
5421        printf(".");
5422        fflush(stdout);
5423    }
5424    printf("\n");
5425    fflush(stdout);
5426}
5427
5428/*
5429Changes the order of rte_eth_devices array elements
5430to be consistent with our /etc/trex_cfg.yaml
5431*/
5432void reorder_dpdk_ports() {
5433    rte_eth_dev rte_eth_devices_temp[RTE_MAX_ETHPORTS];
5434    uint8_t m_port_map[RTE_MAX_ETHPORTS];
5435    struct rte_pci_addr addr;
5436    uint8_t port_id;
5437
5438    // gather port relation information and save current array to temp
5439    for (int i=0; i<(int)global_platform_cfg_info.m_if_list.size(); i++) {
5440        memcpy(&rte_eth_devices_temp[i], &rte_eth_devices[i], sizeof rte_eth_devices[i]);
5441        if (eal_parse_pci_BDF(global_platform_cfg_info.m_if_list[i].c_str(), &addr) != 0 && eal_parse_pci_DomBDF(global_platform_cfg_info.m_if_list[i].c_str(), &addr) != 0) {
5442            printf("Failed mapping TRex port id to DPDK id: %d\n", i);
5443            exit(1);
5444        }
5445        rte_eth_dev_get_port_by_addr(&addr, &port_id);
5446        m_port_map[port_id] = i;
5447        // print the relation in verbose mode
5448        if ( CGlobalInfo::m_options.preview.getVMode() > 0){
5449            printf("TRex cfg port id: %d <-> DPDK port id: %d\n", i, port_id);
5450        }
5451    }
5452
5453    // actual reorder
5454    for (int i=0; i<(int)global_platform_cfg_info.m_if_list.size(); i++) {
5455        memcpy(&rte_eth_devices[m_port_map[i]], &rte_eth_devices_temp[i], sizeof rte_eth_devices_temp[i]);
5456    }
5457}
5458
5459//////////////////////////////////////////////////////////////////////////////////////////////
5460//////////////////////////////////////////////////////////////////////////////////////////////
5461// driver section
5462//////////////////////////////////////////////////////////////////////////////////////////////
5463int CTRexExtendedDriverBase::stop_queue(CPhyEthIF * _if, uint16_t q_num) {
5464    uint8_t port_id=_if->get_rte_port_id();
5465    return (rte_eth_dev_rx_queue_stop(port_id, q_num));
5466}
5467
5468int CTRexExtendedDriverBase::wait_for_stable_link() {
5469    wait_x_sec(CGlobalInfo::m_options.m_wait_before_traffic);
5470    return 0;
5471}
5472
5473void CTRexExtendedDriverBase::wait_after_link_up() {
5474    wait_x_sec(CGlobalInfo::m_options.m_wait_before_traffic);
5475}
5476
5477CFlowStatParser *CTRexExtendedDriverBase::get_flow_stat_parser() {
5478    CFlowStatParser *parser = new CFlowStatParser();
5479    assert (parser);
5480    return parser;
5481}
5482
5483// in 1G we need to wait if links became ready to soon
5484void CTRexExtendedDriverBase1G::wait_after_link_up(){
5485    //wait_x_sec(6 + CGlobalInfo::m_options.m_wait_before_traffic);
5486}
5487
5488int CTRexExtendedDriverBase1G::wait_for_stable_link(){
5489    wait_x_sec(9 + CGlobalInfo::m_options.m_wait_before_traffic);
5490    return(0);
5491}
5492
5493void CTRexExtendedDriverBase1G::update_configuration(port_cfg_t * cfg){
5494
5495    cfg->m_tx_conf.tx_thresh.pthresh = TX_PTHRESH_1G;
5496    cfg->m_tx_conf.tx_thresh.hthresh = TX_HTHRESH;
5497    cfg->m_tx_conf.tx_thresh.wthresh = 0;
5498}
5499
5500void CTRexExtendedDriverBase1G::update_global_config_fdir(port_cfg_t * cfg){
5501    // Configuration is done in configure_rx_filter_rules by writing to registers
5502}
5503
5504#define E1000_RXDCTL_QUEUE_ENABLE	0x02000000
5505// e1000 driver does not support the generic stop/start queue API, so we need to implement ourselves
5506int CTRexExtendedDriverBase1G::stop_queue(CPhyEthIF * _if, uint16_t q_num) {
5507    uint32_t reg_val = _if->pci_reg_read( E1000_RXDCTL(q_num));
5508    reg_val &= ~E1000_RXDCTL_QUEUE_ENABLE;
5509    _if->pci_reg_write( E1000_RXDCTL(q_num), reg_val);
5510    return 0;
5511}
5512
5513int CTRexExtendedDriverBase1G::configure_rx_filter_rules(CPhyEthIF * _if){
5514    if ( get_is_stateless() ) {
5515        return configure_rx_filter_rules_stateless(_if);
5516    } else {
5517        return configure_rx_filter_rules_statefull(_if);
5518    }
5519
5520    return 0;
5521}
5522
5523int CTRexExtendedDriverBase1G::configure_rx_filter_rules_statefull(CPhyEthIF * _if) {
5524    uint16_t hops = get_rx_check_hops();
5525    uint16_t v4_hops = (hops << 8)&0xff00;
5526    uint8_t protocol;
5527
5528    if (CGlobalInfo::m_options.m_l_pkt_mode == 0) {
5529        protocol = IPPROTO_SCTP;
5530    } else {
5531        protocol = IPPROTO_ICMP;
5532    }
5533    /* enable filter to pass packet to rx queue 1 */
5534    _if->pci_reg_write( E1000_IMIR(0), 0x00020000);
5535    _if->pci_reg_write( E1000_IMIREXT(0), 0x00081000);
5536    _if->pci_reg_write( E1000_TTQF(0),   protocol
5537                        | 0x00008100 /* enable */
5538                        | 0xE0010000 /* RX queue is 1 */
5539                        );
5540
5541
5542    /* 16  :   12 MAC , (2)0x0800,2      | DW0 , DW1
5543       6 bytes , TTL , PROTO     | DW2=0 , DW3=0x0000FF06
5544    */
5545    int i;
5546    // IPv4: bytes being compared are {TTL, Protocol}
5547    uint16_t ff_rules_v4[6]={
5548        (uint16_t)(0xFF06 - v4_hops),
5549        (uint16_t)(0xFE11 - v4_hops),
5550        (uint16_t)(0xFF11 - v4_hops),
5551        (uint16_t)(0xFE06 - v4_hops),
5552        (uint16_t)(0xFF01 - v4_hops),
5553        (uint16_t)(0xFE01 - v4_hops),
5554    }  ;
5555    // IPv6: bytes being compared are {NextHdr, HopLimit}
5556    uint16_t ff_rules_v6[2]={
5557        (uint16_t)(0x3CFF - hops),
5558        (uint16_t)(0x3CFE - hops),
5559    }  ;
5560    uint16_t *ff_rules;
5561    uint16_t num_rules;
5562    uint32_t mask=0;
5563    int  rule_id;
5564
5565    if (  CGlobalInfo::m_options.preview.get_ipv6_mode_enable() ){
5566        ff_rules = &ff_rules_v6[0];
5567        num_rules = sizeof(ff_rules_v6)/sizeof(ff_rules_v6[0]);
5568    }else{
5569        ff_rules = &ff_rules_v4[0];
5570        num_rules = sizeof(ff_rules_v4)/sizeof(ff_rules_v4[0]);
5571    }
5572
5573    clear_rx_filter_rules(_if);
5574
5575    uint8_t len = 24;
5576    for (rule_id=0; rule_id<num_rules; rule_id++ ) {
5577        /* clear rule all */
5578        for (i=0; i<0xff; i+=4) {
5579            _if->pci_reg_write( (E1000_FHFT(rule_id)+i) , 0);
5580        }
5581
5582        if (  CGlobalInfo::m_options.preview.get_vlan_mode_enable() ){
5583            len += 8;
5584            if (  CGlobalInfo::m_options.preview.get_ipv6_mode_enable() ){
5585                // IPv6 VLAN: NextHdr/HopLimit offset = 0x18
5586                _if->pci_reg_write( (E1000_FHFT(rule_id)+(3*16)+0) , PKT_NTOHS(ff_rules[rule_id]) );
5587                _if->pci_reg_write( (E1000_FHFT(rule_id)+(3*16)+8) , 0x03); /* MASK */
5588            }else{
5589                // IPv4 VLAN: TTL/Protocol offset = 0x1A
5590                _if->pci_reg_write( (E1000_FHFT(rule_id)+(3*16)+0) , (PKT_NTOHS(ff_rules[rule_id])<<16) );
5591                _if->pci_reg_write( (E1000_FHFT(rule_id)+(3*16)+8) , 0x0C); /* MASK */
5592            }
5593        }else{
5594            if (  CGlobalInfo::m_options.preview.get_ipv6_mode_enable() ){
5595                // IPv6: NextHdr/HopLimit offset = 0x14
5596                _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16)+4) , PKT_NTOHS(ff_rules[rule_id]) );
5597                _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16)+8) , 0x30); /* MASK */
5598            }else{
5599                // IPv4: TTL/Protocol offset = 0x16
5600                _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16)+4) , (PKT_NTOHS(ff_rules[rule_id])<<16) );
5601                _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16)+8) , 0xC0); /* MASK */
5602            }
5603        }
5604
5605        // FLEX_PRIO[[18:16] = 1, RQUEUE[10:8] = 1
5606        _if->pci_reg_write( (E1000_FHFT(rule_id)+0xFC) , (1<<16) | (1<<8)  | len);
5607
5608        mask |=(1<<rule_id);
5609    }
5610
5611    /* enable all rules */
5612    _if->pci_reg_write(E1000_WUFC, (mask<<16) | (1<<14) );
5613
5614    return (0);
5615}
5616
5617// Sadly, DPDK has no support for i350 filters, so we need to implement by writing to registers.
5618int CTRexExtendedDriverBase1G::configure_rx_filter_rules_stateless(CPhyEthIF * _if) {
5619    /* enable filter to pass packet to rx queue 1 */
5620    _if->pci_reg_write( E1000_IMIR(0), 0x00020000);
5621    _if->pci_reg_write( E1000_IMIREXT(0), 0x00081000);
5622
5623    uint8_t len = 24;
5624    uint32_t mask = 0;
5625    int rule_id;
5626
5627    clear_rx_filter_rules(_if);
5628
5629    rule_id = 0;
5630    mask |= 0x1 << rule_id;
5631    // filter for byte 18 of packet (msb of IP ID) should equal ff
5632    _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16)) ,  0x00ff0000);
5633    _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16) + 8) , 0x04); /* MASK */
5634    // + bytes 12 + 13 (ether type) should indicate IP.
5635    _if->pci_reg_write( (E1000_FHFT(rule_id)+(1*16) + 4) ,  0x00000008);
5636    _if->pci_reg_write( (E1000_FHFT(rule_id)+(1*16) + 8) , 0x30); /* MASK */
5637    // FLEX_PRIO[[18:16] = 1, RQUEUE[10:8] = 1
5638    _if->pci_reg_write( (E1000_FHFT(rule_id) + 0xFC) , (1 << 16) | (1 << 8) | len);
5639
5640    // same as 0, but with vlan. type should be vlan. Inside vlan, should be IP with lsb of IP ID equals 0xff
5641    rule_id = 1;
5642    mask |= 0x1 << rule_id;
5643    // filter for byte 22 of packet (msb of IP ID) should equal ff
5644    _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16) + 4) ,  0x00ff0000);
5645    _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16) + 8) , 0x40 | 0x03); /* MASK */
5646    // + bytes 12 + 13 (ether type) should indicate VLAN.
5647    _if->pci_reg_write( (E1000_FHFT(rule_id)+(1*16) + 4) ,  0x00000081);
5648    _if->pci_reg_write( (E1000_FHFT(rule_id)+(1*16) + 8) , 0x30); /* MASK */
5649    // + bytes 16 + 17 (vlan type) should indicate IP.
5650    _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16) ) ,  0x00000008);
5651    // Was written together with IP ID filter
5652    // _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16) + 8) , 0x03); /* MASK */
5653    // FLEX_PRIO[[18:16] = 1, RQUEUE[10:8] = 1
5654    _if->pci_reg_write( (E1000_FHFT(rule_id) + 0xFC) , (1 << 16) | (1 << 8) | len);
5655
5656    rule_id = 2;
5657    mask |= 0x1 << rule_id;
5658    // ipv6 flow stat
5659    // filter for byte 16 of packet (part of flow label) should equal 0xff
5660    _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16)) ,  0x000000ff);
5661    _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16) + 8) , 0x01); /* MASK */
5662    // + bytes 12 + 13 (ether type) should indicate IPv6.
5663    _if->pci_reg_write( (E1000_FHFT(rule_id)+(1*16) + 4) ,  0x0000dd86);
5664    _if->pci_reg_write( (E1000_FHFT(rule_id)+(1*16) + 8) , 0x30); /* MASK */
5665    // FLEX_PRIO[[18:16] = 1, RQUEUE[10:8] = 1
5666    _if->pci_reg_write( (E1000_FHFT(rule_id) + 0xFC) , (1 << 16) | (1 << 8) | len);
5667
5668    rule_id = 3;
5669    mask |= 0x1 << rule_id;
5670    // same as 2, with vlan. Type is vlan. Inside vlan, IPv6 with flow label second bits 4-11 equals 0xff
5671    // filter for byte 20 of packet (part of flow label) should equal 0xff
5672    _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16) + 4) ,  0x000000ff);
5673    _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16) + 8) , 0x10 | 0x03); /* MASK */
5674    // + bytes 12 + 13 (ether type) should indicate VLAN.
5675    _if->pci_reg_write( (E1000_FHFT(rule_id)+(1*16) + 4) ,  0x00000081);
5676    _if->pci_reg_write( (E1000_FHFT(rule_id)+(1*16) + 8) , 0x30); /* MASK */
5677    // + bytes 16 + 17 (vlan type) should indicate IP.
5678    _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16) ) ,  0x0000dd86);
5679    // Was written together with flow label filter
5680    // _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16) + 8) , 0x03); /* MASK */
5681    // FLEX_PRIO[[18:16] = 1, RQUEUE[10:8] = 1
5682    _if->pci_reg_write( (E1000_FHFT(rule_id) + 0xFC) , (1 << 16) | (1 << 8) | len);
5683
5684    /* enable rules */
5685    _if->pci_reg_write(E1000_WUFC, (mask << 16) | (1 << 14) );
5686
5687    return (0);
5688}
5689
5690// clear registers of rules
5691void CTRexExtendedDriverBase1G::clear_rx_filter_rules(CPhyEthIF * _if) {
5692    for (int rule_id = 0 ; rule_id < 8; rule_id++) {
5693        for (int i = 0; i < 0xff; i += 4) {
5694            _if->pci_reg_write( (E1000_FHFT(rule_id) + i) , 0);
5695        }
5696    }
5697}
5698
5699int CTRexExtendedDriverBase1G::set_rcv_all(CPhyEthIF * _if, bool set_on) {
5700    // byte 12 equals 08 - for IPv4 and ARP
5701    //                86 - For IPv6
5702    //                81 - For VLAN
5703    //                88 - For MPLS
5704    uint8_t eth_types[] = {0x08, 0x86, 0x81, 0x88};
5705    uint32_t mask = 0;
5706
5707    clear_rx_filter_rules(_if);
5708
5709    if (set_on) {
5710        for (int rule_id = 0; rule_id < sizeof(eth_types); rule_id++) {
5711            mask |= 0x1 << rule_id;
5712            // Filter for byte 12 of packet
5713            _if->pci_reg_write( (E1000_FHFT(rule_id)+(1*16) + 4) ,  0x000000 | eth_types[rule_id]);
5714            _if->pci_reg_write( (E1000_FHFT(rule_id)+(1*16) + 8) , 0x10); /* MASK */
5715            // FLEX_PRIO[[18:16] = 1, RQUEUE[10:8] = 1, len = 24
5716            _if->pci_reg_write( (E1000_FHFT(rule_id) + 0xFC) , (1 << 16) | (1 << 8) | 24);
5717        }
5718    } else {
5719        configure_rx_filter_rules(_if);
5720    }
5721
5722    return 0;
5723}
5724
5725void CTRexExtendedDriverBase1G::get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats){
5726
5727    stats->ipackets     +=  _if->pci_reg_read(E1000_GPRC) ;
5728
5729    stats->ibytes       +=  (_if->pci_reg_read(E1000_GORCL) );
5730    stats->ibytes       +=  (((uint64_t)_if->pci_reg_read(E1000_GORCH))<<32);
5731
5732
5733    stats->opackets     +=  _if->pci_reg_read(E1000_GPTC);
5734    stats->obytes       +=  _if->pci_reg_read(E1000_GOTCL) ;
5735    stats->obytes       +=  ( (((uint64_t)_if->pci_reg_read(IXGBE_GOTCH))<<32) );
5736
5737    stats->f_ipackets   +=  0;
5738    stats->f_ibytes     += 0;
5739
5740
5741    stats->ierrors      +=  ( _if->pci_reg_read(E1000_RNBC) +
5742                              _if->pci_reg_read(E1000_CRCERRS) +
5743                              _if->pci_reg_read(E1000_ALGNERRC ) +
5744                              _if->pci_reg_read(E1000_SYMERRS ) +
5745                              _if->pci_reg_read(E1000_RXERRC ) +
5746
5747                              _if->pci_reg_read(E1000_ROC)+
5748                              _if->pci_reg_read(E1000_RUC)+
5749                              _if->pci_reg_read(E1000_RJC) +
5750
5751                              _if->pci_reg_read(E1000_XONRXC)+
5752                              _if->pci_reg_read(E1000_XONTXC)+
5753                              _if->pci_reg_read(E1000_XOFFRXC)+
5754                              _if->pci_reg_read(E1000_XOFFTXC)+
5755                              _if->pci_reg_read(E1000_FCRUC)
5756                              );
5757
5758    stats->oerrors      +=  0;
5759    stats->imcasts      =  0;
5760    stats->rx_nombuf    =  0;
5761}
5762
5763void CTRexExtendedDriverBase1G::clear_extended_stats(CPhyEthIF * _if){
5764}
5765
5766#if 0
5767int CTRexExtendedDriverBase1G::get_rx_stats(CPhyEthIF * _if, uint32_t *pkts, uint32_t *prev_pkts
5768                                            ,uint32_t *bytes, uint32_t *prev_bytes, int min, int max) {
5769    uint32_t port_id = _if->get_port_id();
5770    return g_trex.m_rx_sl.get_rx_stats(port_id, pkts, prev_pkts, bytes, prev_bytes, min, max);
5771}
5772#endif
5773
5774void CTRexExtendedDriverBase10G::clear_extended_stats(CPhyEthIF * _if){
5775    _if->pci_reg_read(IXGBE_RXNFGPC);
5776}
5777
5778void CTRexExtendedDriverBase10G::update_global_config_fdir(port_cfg_t * cfg){
5779    cfg->update_global_config_fdir_10g();
5780}
5781
5782void CTRexExtendedDriverBase10G::update_configuration(port_cfg_t * cfg){
5783    cfg->m_tx_conf.tx_thresh.pthresh = TX_PTHRESH;
5784    cfg->m_tx_conf.tx_thresh.hthresh = TX_HTHRESH;
5785    cfg->m_tx_conf.tx_thresh.wthresh = TX_WTHRESH;
5786}
5787
5788int CTRexExtendedDriverBase10G::configure_rx_filter_rules(CPhyEthIF * _if) {
5789    if ( get_is_stateless() ) {
5790        return configure_rx_filter_rules_stateless(_if);
5791    } else {
5792        return configure_rx_filter_rules_statefull(_if);
5793    }
5794
5795    return 0;
5796}
5797
5798int CTRexExtendedDriverBase10G::configure_rx_filter_rules_stateless(CPhyEthIF * _if) {
5799    uint8_t port_id = _if->get_rte_port_id();
5800    int  ip_id_lsb;
5801
5802    // 0..MAX_FLOW_STATS-1 is for rules using ip_id.
5803    // MAX_FLOW_STATS rule is for the payload rules. Meaning counter value is in the payload
5804    for (ip_id_lsb = 0; ip_id_lsb <= MAX_FLOW_STATS; ip_id_lsb++ ) {
5805        struct rte_eth_fdir_filter fdir_filter;
5806        int res = 0;
5807
5808        memset(&fdir_filter,0,sizeof(fdir_filter));
5809        fdir_filter.input.flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_OTHER;
5810        fdir_filter.soft_id = ip_id_lsb; // We can use the ip_id_lsb also as filter soft_id
5811        fdir_filter.input.flow_ext.flexbytes[0] = 0xff;
5812        fdir_filter.input.flow_ext.flexbytes[1] = ip_id_lsb;
5813        fdir_filter.action.rx_queue = 1;
5814        fdir_filter.action.behavior = RTE_ETH_FDIR_ACCEPT;
5815        fdir_filter.action.report_status = RTE_ETH_FDIR_NO_REPORT_STATUS;
5816        res = rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, RTE_ETH_FILTER_ADD, &fdir_filter);
5817
5818        if (res != 0) {
5819            rte_exit(EXIT_FAILURE, " ERROR rte_eth_dev_filter_ctrl : %d\n",res);
5820        }
5821    }
5822
5823    return 0;
5824}
5825
5826int CTRexExtendedDriverBase10G::configure_rx_filter_rules_statefull(CPhyEthIF * _if) {
5827    uint8_t port_id=_if->get_rte_port_id();
5828    uint16_t hops = get_rx_check_hops();
5829    uint16_t v4_hops = (hops << 8)&0xff00;
5830
5831    /* enable rule 0 SCTP -> queue 1 for latency  */
5832    /* 1<<21 means that queue 1 is for SCTP */
5833    _if->pci_reg_write(IXGBE_L34T_IMIR(0),(1<<21));
5834    _if->pci_reg_write(IXGBE_FTQF(0),
5835                       IXGBE_FTQF_PROTOCOL_SCTP|
5836                       (IXGBE_FTQF_PRIORITY_MASK<<IXGBE_FTQF_PRIORITY_SHIFT)|
5837                       ((0x0f)<<IXGBE_FTQF_5TUPLE_MASK_SHIFT)|IXGBE_FTQF_QUEUE_ENABLE);
5838
5839    // IPv4: bytes being compared are {TTL, Protocol}
5840    uint16_t ff_rules_v4[6]={
5841        (uint16_t)(0xFF11 - v4_hops),
5842        (uint16_t)(0xFE11 - v4_hops),
5843        (uint16_t)(0xFF06 - v4_hops),
5844        (uint16_t)(0xFE06 - v4_hops),
5845        (uint16_t)(0xFF01 - v4_hops),
5846        (uint16_t)(0xFE01 - v4_hops),
5847    };
5848    // IPv6: bytes being compared are {NextHdr, HopLimit}
5849    uint16_t ff_rules_v6[6]={
5850        (uint16_t)(0x3CFF - hops),
5851        (uint16_t)(0x3CFE - hops),
5852    };
5853
5854    uint16_t *ff_rules;
5855    uint16_t num_rules;
5856    int  rule_id;
5857
5858    if (  CGlobalInfo::m_options.preview.get_ipv6_mode_enable() ){
5859        ff_rules = &ff_rules_v6[0];
5860        num_rules = sizeof(ff_rules_v6)/sizeof(ff_rules_v6[0]);
5861    }else{
5862        ff_rules = &ff_rules_v4[0];
5863        num_rules = sizeof(ff_rules_v4)/sizeof(ff_rules_v4[0]);
5864    }
5865
5866    for (rule_id=0; rule_id<num_rules; rule_id++ ) {
5867        struct rte_eth_fdir_filter fdir_filter;
5868        uint16_t ff_rule = ff_rules[rule_id];
5869        int res = 0;
5870
5871        memset(&fdir_filter,0,sizeof(fdir_filter));
5872        /* TOS/PROTO */
5873        if (  CGlobalInfo::m_options.preview.get_ipv6_mode_enable() ){
5874            fdir_filter.input.flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_OTHER;
5875        }else{
5876            fdir_filter.input.flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_OTHER;
5877        }
5878        fdir_filter.soft_id = rule_id;
5879
5880        fdir_filter.input.flow_ext.flexbytes[0] = (ff_rule >> 8) & 0xff;
5881        fdir_filter.input.flow_ext.flexbytes[1] = ff_rule & 0xff;
5882        fdir_filter.action.rx_queue = 1;
5883        fdir_filter.action.behavior = RTE_ETH_FDIR_ACCEPT;
5884        fdir_filter.action.report_status = RTE_ETH_FDIR_NO_REPORT_STATUS;
5885        res = rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, RTE_ETH_FILTER_ADD, &fdir_filter);
5886
5887        if (res != 0) {
5888            rte_exit(EXIT_FAILURE, " ERROR rte_eth_dev_filter_ctrl : %d\n",res);
5889        }
5890    }
5891    return (0);
5892}
5893
5894void CTRexExtendedDriverBase10G::get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats){
5895
5896    int i;
5897    uint64_t t=0;
5898
5899    if ( !get_is_stateless() ) {
5900
5901        for (i=0; i<8;i++) {
5902            t+=_if->pci_reg_read(IXGBE_MPC(i));
5903        }
5904    }
5905
5906    stats->ipackets     +=  _if->pci_reg_read(IXGBE_GPRC) ;
5907
5908    stats->ibytes       +=  (_if->pci_reg_read(IXGBE_GORCL) +(((uint64_t)_if->pci_reg_read(IXGBE_GORCH))<<32));
5909
5910
5911
5912    stats->opackets     +=  _if->pci_reg_read(IXGBE_GPTC);
5913    stats->obytes       +=  (_if->pci_reg_read(IXGBE_GOTCL) +(((uint64_t)_if->pci_reg_read(IXGBE_GOTCH))<<32));
5914
5915    stats->f_ipackets   +=  _if->pci_reg_read(IXGBE_RXDGPC);
5916    stats->f_ibytes     += (_if->pci_reg_read(IXGBE_RXDGBCL) +(((uint64_t)_if->pci_reg_read(IXGBE_RXDGBCH))<<32));
5917
5918
5919    stats->ierrors      +=  ( _if->pci_reg_read(IXGBE_RLEC) +
5920                              _if->pci_reg_read(IXGBE_ERRBC) +
5921                              _if->pci_reg_read(IXGBE_CRCERRS) +
5922                              _if->pci_reg_read(IXGBE_ILLERRC ) +
5923                              _if->pci_reg_read(IXGBE_ROC)+
5924                              _if->pci_reg_read(IXGBE_RUC)+t);
5925
5926    stats->oerrors      +=  0;
5927    stats->imcasts      =  0;
5928    stats->rx_nombuf    =  0;
5929
5930}
5931
5932int CTRexExtendedDriverBase10G::wait_for_stable_link(){
5933    wait_x_sec(1 + CGlobalInfo::m_options.m_wait_before_traffic);
5934    return (0);
5935}
5936
5937CFlowStatParser *CTRexExtendedDriverBase10G::get_flow_stat_parser() {
5938    CFlowStatParser *parser = new C82599Parser(CGlobalInfo::m_options.preview.get_vlan_mode_enable() ? true:false);
5939    assert (parser);
5940    return parser;
5941}
5942
5943void CTRexExtendedDriverBase40G::clear_extended_stats(CPhyEthIF * _if){
5944    rte_eth_stats_reset(_if->get_port_id());
5945}
5946
5947void CTRexExtendedDriverBaseVIC::update_configuration(port_cfg_t * cfg){
5948    cfg->m_tx_conf.tx_thresh.pthresh = TX_PTHRESH;
5949    cfg->m_tx_conf.tx_thresh.hthresh = TX_HTHRESH;
5950    cfg->m_tx_conf.tx_thresh.wthresh = TX_WTHRESH;
5951    cfg->m_port_conf.rxmode.max_rx_pkt_len =9*1000-10;
5952}
5953
5954void CTRexExtendedDriverBase40G::update_configuration(port_cfg_t * cfg){
5955    cfg->m_tx_conf.tx_thresh.pthresh = TX_PTHRESH;
5956    cfg->m_tx_conf.tx_thresh.hthresh = TX_HTHRESH;
5957    cfg->m_tx_conf.tx_thresh.wthresh = TX_WTHRESH;
5958    cfg->update_global_config_fdir_40g();
5959}
5960
5961// What is the type of the rule the respective hw_id counter counts.
5962struct fdir_hw_id_params_t {
5963    uint16_t rule_type;
5964    uint16_t l4_proto;
5965};
5966
5967static struct fdir_hw_id_params_t fdir_hw_id_rule_params[512];
5968
5969/* Add rule to send packets with protocol 'type', and ttl 'ttl' to rx queue 1 */
5970// ttl is used in statefull mode, and ip_id in stateless. We configure the driver registers so that only one of them applies.
5971// So, the rule will apply if packet has either the correct ttl or IP ID, depending if we are in statfull or stateless.
5972void CTRexExtendedDriverBase40G::add_del_rules(enum rte_filter_op op, uint8_t port_id, uint16_t type, uint8_t ttl
5973                                               , uint16_t ip_id, uint16_t l4_proto, int queue, uint16_t stat_idx) {
5974    int ret=rte_eth_dev_filter_supported(port_id, RTE_ETH_FILTER_FDIR);
5975    static int filter_soft_id = 0;
5976
5977    if ( ret != 0 ){
5978        rte_exit(EXIT_FAILURE, "rte_eth_dev_filter_supported "
5979                 "err=%d, port=%u \n",
5980                 ret, port_id);
5981    }
5982
5983    struct rte_eth_fdir_filter filter;
5984
5985    memset(&filter,0,sizeof(struct rte_eth_fdir_filter));
5986
5987#if 0
5988    printf("40g::%s rules: port:%d type:%d ttl:%d ip_id:%x l4:%d q:%d hw index:%d\n"
5989           , (op == RTE_ETH_FILTER_ADD) ?  "add" : "del"
5990           , port_id, type, ttl, ip_id, l4_proto, queue, stat_idx);
5991#endif
5992
5993    filter.action.rx_queue = queue;
5994    filter.action.behavior =RTE_ETH_FDIR_ACCEPT;
5995    filter.action.report_status =RTE_ETH_FDIR_NO_REPORT_STATUS;
5996    filter.action.stat_count_index = stat_idx;
5997    filter.soft_id = filter_soft_id++;
5998    filter.input.flow_type = type;
5999
6000    if (op == RTE_ETH_FILTER_ADD) {
6001        fdir_hw_id_rule_params[stat_idx].rule_type = type;
6002        fdir_hw_id_rule_params[stat_idx].l4_proto = l4_proto;
6003    }
6004
6005    switch (type) {
6006    case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
6007    case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
6008    case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
6009    case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
6010        filter.input.flow.ip4_flow.ttl=ttl;
6011        filter.input.flow.ip4_flow.ip_id = ip_id;
6012        if (l4_proto != 0)
6013            filter.input.flow.ip4_flow.proto = l4_proto;
6014        break;
6015    case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
6016    case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
6017    case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
6018        filter.input.flow.ipv6_flow.hop_limits=ttl;
6019        filter.input.flow.ipv6_flow.flow_label = ip_id;
6020        filter.input.flow.ipv6_flow.