main_dpdk.cpp revision 42d71595
1/*
2  Hanoh Haim
3  Cisco Systems, Inc.
4*/
5
6/*
7  Copyright (c) 2015-2016 Cisco Systems, Inc.
8
9  Licensed under the Apache License, Version 2.0 (the "License");
10  you may not use this file except in compliance with the License.
11  You may obtain a copy of the License at
12
13  http://www.apache.org/licenses/LICENSE-2.0
14
15  Unless required by applicable law or agreed to in writing, software
16  distributed under the License is distributed on an "AS IS" BASIS,
17  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18  See the License for the specific language governing permissions and
19  limitations under the License.
20*/
21#include <assert.h>
22#include <pthread.h>
23#include <signal.h>
24#include <pwd.h>
25#include <stdio.h>
26#include <string.h>
27#include <unistd.h>
28#include <zmq.h>
29#include <rte_common.h>
30#include <rte_log.h>
31#include <rte_memory.h>
32#include <rte_memcpy.h>
33#include <rte_memzone.h>
34#include <rte_tailq.h>
35#include <rte_eal.h>
36#include <rte_per_lcore.h>
37#include <rte_launch.h>
38#include <rte_atomic.h>
39#include <rte_cycles.h>
40#include <rte_prefetch.h>
41#include <rte_lcore.h>
42#include <rte_per_lcore.h>
43#include <rte_branch_prediction.h>
44#include <rte_interrupts.h>
45#include <rte_pci.h>
46#include <rte_debug.h>
47#include <rte_ether.h>
48#include <rte_ethdev.h>
49#include <rte_ring.h>
50#include <rte_mempool.h>
51#include <rte_mbuf.h>
52#include <rte_random.h>
53#include <rte_version.h>
54#include <rte_ip.h>
55
56#include "bp_sim.h"
57#include "os_time.h"
58#include "common/arg/SimpleGlob.h"
59#include "common/arg/SimpleOpt.h"
60#include "common/basic_utils.h"
61#include "stateless/cp/trex_stateless.h"
62#include "stateless/dp/trex_stream_node.h"
63#include "stateless/messaging/trex_stateless_messaging.h"
64#include "stateless/rx/trex_stateless_rx_core.h"
65#include "publisher/trex_publisher.h"
66#include "../linux_dpdk/version.h"
67extern "C" {
68#include "dpdk/drivers/net/ixgbe/base/ixgbe_type.h"
69#include "dpdk_funcs.h"
70}
71#include "dpdk/drivers/net/e1000/base/e1000_regs.h"
72#include "global_io_mode.h"
73#include "utl_term_io.h"
74#include "msg_manager.h"
75#include "platform_cfg.h"
76#include "pre_test.h"
77#include "stateful_rx_core.h"
78#include "debug.h"
79#include "pkt_gen.h"
80#include "trex_port_attr.h"
81#include "internal_api/trex_platform_api.h"
82#include "main_dpdk.h"
83#include "trex_watchdog.h"
84
85#define RX_CHECK_MIX_SAMPLE_RATE 8
86#define RX_CHECK_MIX_SAMPLE_RATE_1G 2
87
88
89#define SOCKET0         0
90
91#define MAX_PKT_BURST   32
92
93#define BP_MAX_CORES 32
94#define BP_MAX_TX_QUEUE 16
95#define BP_MASTER_AND_LATENCY 2
96
97#define RTE_TEST_RX_DESC_DEFAULT 64
98#define RTE_TEST_RX_LATENCY_DESC_DEFAULT (1*1024)
99
100#define RTE_TEST_RX_DESC_VM_DEFAULT 512
101#define RTE_TEST_TX_DESC_VM_DEFAULT 512
102
103typedef struct rte_mbuf * (*rte_mbuf_convert_to_one_seg_t)(struct rte_mbuf *m);
104struct rte_mbuf *  rte_mbuf_convert_to_one_seg(struct rte_mbuf *m);
105extern "C" int rte_eth_dev_get_port_by_addr(const struct rte_pci_addr *addr, uint8_t *port_id);
106void reorder_dpdk_ports();
107
108#define RTE_TEST_TX_DESC_DEFAULT 512
109#define RTE_TEST_RX_DESC_DROP    0
110
111static int max_stat_hw_id_seen = 0;
112static int max_stat_hw_id_seen_payload = 0;
113
114static inline int get_vm_one_queue_enable(){
115    return (CGlobalInfo::m_options.preview.get_vm_one_queue_enable() ?1:0);
116}
117
118static inline int get_is_rx_thread_enabled() {
119    return ((CGlobalInfo::m_options.is_rx_enabled() || CGlobalInfo::m_options.is_stateless()) ?1:0);
120}
121
122struct port_cfg_t;
123
124#define MAX_DPDK_ARGS 40
125static CPlatformYamlInfo global_platform_cfg_info;
126static int global_dpdk_args_num ;
127static char * global_dpdk_args[MAX_DPDK_ARGS];
128static char global_cores_str[100];
129static char global_prefix_str[100];
130static char global_loglevel_str[20];
131static char global_master_id_str[10];
132
133class CTRexExtendedDriverBase {
134public:
135
136    /* by default NIC driver adds CRC */
137    virtual bool has_crc_added() {
138        return true;
139    }
140
141    virtual int get_min_sample_rate(void)=0;
142    virtual void update_configuration(port_cfg_t * cfg)=0;
143    virtual void update_global_config_fdir(port_cfg_t * cfg)=0;
144
145    virtual bool is_hardware_filter_is_supported(){
146        return(false);
147    }
148    virtual int configure_rx_filter_rules(CPhyEthIF * _if)=0;
149    virtual int add_del_rx_flow_stat_rule(uint8_t port_id, enum rte_filter_op op, uint16_t l3, uint8_t l4
150                                          , uint8_t ipv6_next_h, uint16_t id) {return -1;};
151    virtual bool is_hardware_support_drop_queue(){
152        return(false);
153    }
154
155    virtual int stop_queue(CPhyEthIF * _if, uint16_t q_num);
156    virtual void get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats)=0;
157    virtual void clear_extended_stats(CPhyEthIF * _if)=0;
158    virtual int  wait_for_stable_link();
159    virtual void wait_after_link_up();
160    virtual bool hw_rx_stat_supported(){return false;}
161    virtual int get_rx_stats(CPhyEthIF * _if, uint32_t *pkts, uint32_t *prev_pkts, uint32_t *bytes, uint32_t *prev_bytes
162                             , int min, int max) {return -1;}
163    virtual void reset_rx_stats(CPhyEthIF * _if, uint32_t *stats, int min, int len) {}
164    virtual int dump_fdir_global_stats(CPhyEthIF * _if, FILE *fd) { return -1;}
165    virtual int get_stat_counters_num() {return 0;}
166    virtual int get_rx_stat_capabilities() {return 0;}
167    virtual int verify_fw_ver(int i) {return 0;}
168    virtual CFlowStatParser *get_flow_stat_parser();
169    virtual int set_rcv_all(CPhyEthIF * _if, bool set_on)=0;
170    virtual TRexPortAttr * create_port_attr(uint8_t port_id) = 0;
171
172    /* Does this NIC type support automatic packet dropping in case of a link down?
173       in case it is supported the packets will be dropped, else there would be a back pressure to tx queues
174       this interface is used as a workaround to let TRex work without link in stateless mode, driver that
175       does not support that will be failed at init time because it will cause watchdog due to watchdog hang */
176    virtual bool drop_packets_incase_of_linkdown() {
177        return (false);
178    }
179};
180
181
182class CTRexExtendedDriverBase1G : public CTRexExtendedDriverBase {
183
184public:
185    CTRexExtendedDriverBase1G(){
186    }
187
188    TRexPortAttr * create_port_attr(uint8_t port_id) {
189        return new DpdkTRexPortAttr(port_id, false, true);
190    }
191
192    static CTRexExtendedDriverBase * create(){
193        return ( new CTRexExtendedDriverBase1G() );
194    }
195
196    virtual void update_global_config_fdir(port_cfg_t * cfg);
197
198    virtual int get_min_sample_rate(void){
199        return ( RX_CHECK_MIX_SAMPLE_RATE_1G);
200    }
201    virtual void update_configuration(port_cfg_t * cfg);
202
203    virtual bool is_hardware_filter_is_supported(){
204        return (true);
205    }
206
207    virtual int stop_queue(CPhyEthIF * _if, uint16_t q_num);
208    virtual int configure_rx_filter_rules(CPhyEthIF * _if);
209    virtual int configure_rx_filter_rules_statefull(CPhyEthIF * _if);
210    virtual int configure_rx_filter_rules_stateless(CPhyEthIF * _if);
211    virtual void clear_rx_filter_rules(CPhyEthIF * _if);
212    virtual bool is_hardware_support_drop_queue(){
213        return(true);
214    }
215
216    virtual void get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats);
217    virtual void clear_extended_stats(CPhyEthIF * _if);
218    virtual int dump_fdir_global_stats(CPhyEthIF * _if, FILE *fd) {return 0;}
219    virtual int get_stat_counters_num() {return MAX_FLOW_STATS;}
220    virtual int get_rx_stat_capabilities() {
221        return TrexPlatformApi::IF_STAT_IPV4_ID | TrexPlatformApi::IF_STAT_RX_BYTES_COUNT
222            | TrexPlatformApi::IF_STAT_PAYLOAD;
223    }
224    virtual int wait_for_stable_link();
225    virtual void wait_after_link_up();
226    virtual int set_rcv_all(CPhyEthIF * _if, bool set_on);
227};
228
229class CTRexExtendedDriverBase1GVm : public CTRexExtendedDriverBase {
230
231public:
232    CTRexExtendedDriverBase1GVm(){
233        /* we are working in mode that we have 1 queue for rx and one queue for tx*/
234        CGlobalInfo::m_options.preview.set_vm_one_queue_enable(true);
235    }
236
237    TRexPortAttr * create_port_attr(uint8_t port_id) {
238        return new DpdkTRexPortAttr(port_id, true, true);
239    }
240
241    virtual bool has_crc_added() {
242        return false;
243    }
244
245    static CTRexExtendedDriverBase * create(){
246        return ( new CTRexExtendedDriverBase1GVm() );
247    }
248
249    virtual void update_global_config_fdir(port_cfg_t * cfg){
250
251    }
252
253    virtual int get_min_sample_rate(void){
254        return ( RX_CHECK_MIX_SAMPLE_RATE_1G);
255    }
256    virtual void update_configuration(port_cfg_t * cfg);
257
258    virtual bool is_hardware_filter_is_supported(){
259        return (true);
260    }
261
262    virtual int configure_rx_filter_rules(CPhyEthIF * _if);
263
264    virtual bool is_hardware_support_drop_queue(){
265        return(false);
266    }
267
268    virtual int stop_queue(CPhyEthIF * _if, uint16_t q_num);
269    virtual void get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats);
270    virtual void clear_extended_stats(CPhyEthIF * _if);
271    virtual int wait_for_stable_link();
272    virtual int get_stat_counters_num() {return MAX_FLOW_STATS;}
273    virtual int get_rx_stat_capabilities() {
274        return TrexPlatformApi::IF_STAT_IPV4_ID | TrexPlatformApi::IF_STAT_RX_BYTES_COUNT
275            | TrexPlatformApi::IF_STAT_PAYLOAD;
276    }
277    virtual int set_rcv_all(CPhyEthIF * _if, bool set_on) {return 0;}
278};
279
280
281class CTRexExtendedDriverBase10G : public CTRexExtendedDriverBase {
282public:
283    CTRexExtendedDriverBase10G(){
284    }
285
286    TRexPortAttr * create_port_attr(uint8_t port_id) {
287        return new DpdkTRexPortAttr(port_id, false, true);
288    }
289
290    static CTRexExtendedDriverBase * create(){
291        return ( new CTRexExtendedDriverBase10G() );
292    }
293
294    virtual void update_global_config_fdir(port_cfg_t * cfg);
295
296    virtual int get_min_sample_rate(void){
297        return (RX_CHECK_MIX_SAMPLE_RATE);
298    }
299    virtual void update_configuration(port_cfg_t * cfg);
300
301    virtual bool is_hardware_filter_is_supported(){
302        return (true);
303    }
304    virtual int configure_rx_filter_rules(CPhyEthIF * _if);
305    virtual int configure_rx_filter_rules_stateless(CPhyEthIF * _if);
306    virtual int configure_rx_filter_rules_statefull(CPhyEthIF * _if);
307    virtual bool is_hardware_support_drop_queue(){
308        return(true);
309    }
310    virtual void get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats);
311    virtual void clear_extended_stats(CPhyEthIF * _if);
312    virtual int wait_for_stable_link();
313    virtual int get_stat_counters_num() {return MAX_FLOW_STATS;}
314    virtual int get_rx_stat_capabilities() {
315        return TrexPlatformApi::IF_STAT_IPV4_ID | TrexPlatformApi::IF_STAT_RX_BYTES_COUNT
316            | TrexPlatformApi::IF_STAT_PAYLOAD;
317    }
318    virtual CFlowStatParser *get_flow_stat_parser();
319    virtual int set_rcv_all(CPhyEthIF * _if, bool set_on) {return 0;}
320};
321
322class CTRexExtendedDriverBase40G : public CTRexExtendedDriverBase10G {
323public:
324    CTRexExtendedDriverBase40G(){
325        // Since we support only 128 counters per if, it is OK to configure here 4 statically.
326        // If we want to support more counters in case of card having less interfaces, we
327        // Will have to identify the number of interfaces dynamically.
328        m_if_per_card = 4;
329    }
330
331    TRexPortAttr * create_port_attr(uint8_t port_id) {
332        // disabling flow control on 40G using DPDK API causes the interface to malfunction
333        return new DpdkTRexPortAttr(port_id, false, false);
334    }
335
336    static CTRexExtendedDriverBase * create(){
337        return ( new CTRexExtendedDriverBase40G() );
338    }
339
340    virtual void update_global_config_fdir(port_cfg_t * cfg){
341    }
342    virtual void update_configuration(port_cfg_t * cfg);
343    virtual int configure_rx_filter_rules(CPhyEthIF * _if);
344    virtual int add_del_rx_flow_stat_rule(uint8_t port_id, enum rte_filter_op op, uint16_t l3_proto
345                                          , uint8_t l4_proto, uint8_t ipv6_next_h, uint16_t id);
346    virtual bool is_hardware_filter_is_supported(){
347        return (true);
348    }
349
350    virtual bool is_hardware_support_drop_queue(){
351        return(true);
352    }
353    virtual void get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats);
354    virtual void clear_extended_stats(CPhyEthIF * _if);
355    virtual void reset_rx_stats(CPhyEthIF * _if, uint32_t *stats, int min, int len);
356    virtual int get_rx_stats(CPhyEthIF * _if, uint32_t *pkts, uint32_t *prev_pkts, uint32_t *bytes, uint32_t *prev_bytes, int min, int max);
357    virtual int dump_fdir_global_stats(CPhyEthIF * _if, FILE *fd);
358    virtual int get_stat_counters_num() {return MAX_FLOW_STATS;}
359    virtual int get_rx_stat_capabilities() {
360        return TrexPlatformApi::IF_STAT_IPV4_ID | TrexPlatformApi::IF_STAT_PAYLOAD;
361    }
362    virtual int wait_for_stable_link();
363    virtual bool hw_rx_stat_supported(){return true;}
364    virtual int verify_fw_ver(int i);
365    virtual CFlowStatParser *get_flow_stat_parser();
366    virtual int set_rcv_all(CPhyEthIF * _if, bool set_on);
367
368private:
369    virtual void add_del_rules(enum rte_filter_op op, uint8_t port_id, uint16_t type, uint8_t ttl
370                               , uint16_t ip_id, uint8_t l4_proto, int queue, uint16_t stat_idx);
371    virtual int add_del_eth_type_rule(uint8_t port_id, enum rte_filter_op op, uint16_t eth_type);
372    virtual int configure_rx_filter_rules_statefull(CPhyEthIF * _if);
373
374    virtual bool drop_packets_incase_of_linkdown() {
375        return (true);
376    }
377
378private:
379    uint8_t m_if_per_card;
380};
381
382class CTRexExtendedDriverBaseVIC : public CTRexExtendedDriverBase {
383public:
384    CTRexExtendedDriverBaseVIC(){
385        m_if_per_card=2;
386    }
387
388    TRexPortAttr * create_port_attr(uint8_t port_id) {
389        return new DpdkTRexPortAttr(port_id, false, false);
390    }
391
392    static CTRexExtendedDriverBase * create(){
393        return ( new CTRexExtendedDriverBaseVIC() );
394    }
395
396    virtual bool is_hardware_filter_is_supported(){
397        return (true);
398    }
399    virtual void update_global_config_fdir(port_cfg_t * cfg){
400    }
401
402
403    virtual bool is_hardware_support_drop_queue(){
404        return(true);
405    }
406
407    void clear_extended_stats(CPhyEthIF * _if);
408
409    void get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats);
410
411
412    virtual int get_min_sample_rate(void){
413        return (RX_CHECK_MIX_SAMPLE_RATE);
414    }
415
416    virtual int verify_fw_ver(int i);
417
418    virtual void update_configuration(port_cfg_t * cfg);
419
420    virtual int configure_rx_filter_rules(CPhyEthIF * _if);
421    virtual int add_del_rx_flow_stat_rule(uint8_t port_id, enum rte_filter_op op, uint16_t l3_proto
422                                          , uint8_t l4_proto, uint8_t ipv6_next_h, uint16_t id);
423
424
425    virtual void reset_rx_stats(CPhyEthIF * _if, uint32_t *stats, int min, int len);
426    virtual int get_rx_stats(CPhyEthIF * _if, uint32_t *pkts, uint32_t *prev_pkts, uint32_t *bytes, uint32_t *prev_bytes, int min, int max);
427    virtual int get_stat_counters_num() {return MAX_FLOW_STATS;}
428    virtual int get_rx_stat_capabilities() {
429        return TrexPlatformApi::IF_STAT_IPV4_ID | TrexPlatformApi::IF_STAT_PAYLOAD;
430    }
431    virtual bool hw_rx_stat_supported(){return false;}
432    virtual CFlowStatParser *get_flow_stat_parser();
433    virtual int dump_fdir_global_stats(CPhyEthIF * _if, FILE *fd);
434    virtual int set_rcv_all(CPhyEthIF * _if, bool set_on);
435
436private:
437
438    virtual void add_del_rules(enum rte_filter_op op, uint8_t port_id, uint16_t type, uint8_t ttl
439                               , uint16_t ip_id, uint8_t l4_proto, int queue, uint16_t stat_idx);
440    virtual int add_del_eth_type_rule(uint8_t port_id, enum rte_filter_op op, uint16_t eth_type);
441    virtual int configure_rx_filter_rules_statefull(CPhyEthIF * _if);
442
443private:
444    uint8_t m_if_per_card;
445};
446
447
448class CTRexExtendedDriverBaseMlnx5G : public CTRexExtendedDriverBase10G {
449public:
450
451    CTRexExtendedDriverBaseMlnx5G(){
452        // Since we support only 128 counters per if, it is OK to configure here 4 statically.
453        // If we want to support more counters in case of card having less interfaces, we
454        // Will have to identify the number of interfaces dynamically.
455        m_if_per_card = 2;
456    }
457
458    TRexPortAttr * create_port_attr(uint8_t port_id) {
459        // disabling flow control on 40G using DPDK API causes the interface to malfunction
460        return new DpdkTRexPortAttr(port_id, false, false);
461    }
462
463
464    static CTRexExtendedDriverBase * create(){
465        return ( new CTRexExtendedDriverBaseMlnx5G() );
466    }
467
468    virtual void update_global_config_fdir(port_cfg_t * cfg){
469    }
470
471    virtual void update_configuration(port_cfg_t * cfg);
472
473    virtual int configure_rx_filter_rules(CPhyEthIF * _if);
474    virtual int add_del_rx_flow_stat_rule(uint8_t port_id, enum rte_filter_op op, uint16_t l3_proto
475                                                          , uint8_t l4_proto, uint8_t ipv6_next_h, uint16_t id);
476
477    virtual bool is_hardware_filter_is_supported(){
478        return (true);
479    }
480
481    virtual bool is_hardware_support_drop_queue(){
482        return(true);
483    }
484    virtual void get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats);
485    virtual void clear_extended_stats(CPhyEthIF * _if);
486    virtual void reset_rx_stats(CPhyEthIF * _if, uint32_t *stats, int min, int len);
487    virtual int get_rx_stats(CPhyEthIF * _if, uint32_t *pkts, uint32_t *prev_pkts, uint32_t *bytes, uint32_t *prev_bytes, int min, int max);
488    virtual int dump_fdir_global_stats(CPhyEthIF * _if, FILE *fd);
489    virtual int get_stat_counters_num() {return MAX_FLOW_STATS;}
490    virtual int get_rx_stat_capabilities() {
491        return TrexPlatformApi::IF_STAT_IPV4_ID | TrexPlatformApi::IF_STAT_PAYLOAD;
492    }
493    virtual int wait_for_stable_link();
494    // disabling flow control on 40G using DPDK API causes the interface to malfunction
495    virtual bool flow_control_disable_supported(){return false;}
496    virtual bool hw_rx_stat_supported(){return false;}
497    virtual CFlowStatParser *get_flow_stat_parser();
498    virtual int set_rcv_all(CPhyEthIF * _if, bool set_on){
499        /* TBD need to support that */
500        return (-1);
501    }
502
503
504private:
505    virtual void add_del_rules(enum rte_filter_op op, uint8_t port_id,
506                               uint16_t type, uint8_t ttl,
507                               uint16_t ip_id,
508                               uint8_t l4_proto,
509                               int queue,
510                               uint16_t stat_idx);
511    virtual int configure_rx_filter_rules_statfull(CPhyEthIF * _if);
512
513private:
514    uint8_t m_if_per_card;
515};
516
517
518
519
520
521typedef CTRexExtendedDriverBase * (*create_object_t) (void);
522
523
524class CTRexExtendedDriverRec {
525public:
526    std::string         m_driver_name;
527    create_object_t     m_constructor;
528};
529
530class CTRexExtendedDriverDb {
531public:
532
533    const std::string & get_driver_name() {
534        return m_driver_name;
535    }
536
537    bool is_driver_exists(std::string name);
538
539
540
541    void set_driver_name(std::string name){
542        m_driver_was_set=true;
543        m_driver_name=name;
544        printf(" set driver name %s \n",name.c_str());
545        m_drv=create_driver(m_driver_name);
546        assert(m_drv);
547    }
548
549    CTRexExtendedDriverBase * get_drv(){
550        if (!m_driver_was_set) {
551            printf(" ERROR too early to use this object !\n");
552            printf(" need to set the right driver \n");
553            assert(0);
554        }
555        assert(m_drv);
556        return (m_drv);
557    }
558
559public:
560
561    static CTRexExtendedDriverDb * Ins();
562
563private:
564    CTRexExtendedDriverBase * create_driver(std::string name);
565
566    CTRexExtendedDriverDb(){
567        register_driver(std::string("rte_ixgbe_pmd"),CTRexExtendedDriverBase10G::create);
568        register_driver(std::string("rte_igb_pmd"),CTRexExtendedDriverBase1G::create);
569        register_driver(std::string("rte_i40e_pmd"),CTRexExtendedDriverBase40G::create);
570        register_driver(std::string("rte_enic_pmd"),CTRexExtendedDriverBaseVIC::create);
571        register_driver(std::string("librte_pmd_mlx5"),CTRexExtendedDriverBaseMlnx5G::create);
572
573
574        /* virtual devices */
575        register_driver(std::string("rte_em_pmd"),CTRexExtendedDriverBase1GVm::create);
576        register_driver(std::string("rte_vmxnet3_pmd"),CTRexExtendedDriverBase1GVm::create);
577        register_driver(std::string("rte_virtio_pmd"),CTRexExtendedDriverBase1GVm::create);
578
579
580
581
582        m_driver_was_set=false;
583        m_drv=0;
584        m_driver_name="";
585    }
586    void register_driver(std::string name,create_object_t func);
587    static CTRexExtendedDriverDb * m_ins;
588    bool        m_driver_was_set;
589    std::string m_driver_name;
590    CTRexExtendedDriverBase * m_drv;
591    std::vector <CTRexExtendedDriverRec*>     m_list;
592
593};
594
595CTRexExtendedDriverDb * CTRexExtendedDriverDb::m_ins;
596
597
598void CTRexExtendedDriverDb::register_driver(std::string name,
599                                            create_object_t func){
600    CTRexExtendedDriverRec * rec;
601    rec = new CTRexExtendedDriverRec();
602    rec->m_driver_name=name;
603    rec->m_constructor=func;
604    m_list.push_back(rec);
605}
606
607
608bool CTRexExtendedDriverDb::is_driver_exists(std::string name){
609    int i;
610    for (i=0; i<(int)m_list.size(); i++) {
611        if (m_list[i]->m_driver_name == name) {
612            return (true);
613        }
614    }
615    return (false);
616}
617
618
619CTRexExtendedDriverBase * CTRexExtendedDriverDb::create_driver(std::string name){
620    int i;
621    for (i=0; i<(int)m_list.size(); i++) {
622        if (m_list[i]->m_driver_name == name) {
623            return ( m_list[i]->m_constructor() );
624        }
625    }
626    return( (CTRexExtendedDriverBase *)0);
627}
628
629
630
631CTRexExtendedDriverDb * CTRexExtendedDriverDb::Ins(){
632    if (!m_ins) {
633        m_ins = new CTRexExtendedDriverDb();
634    }
635    return (m_ins);
636}
637
638static CTRexExtendedDriverBase *  get_ex_drv(){
639
640    return ( CTRexExtendedDriverDb::Ins()->get_drv());
641}
642
643static inline int get_min_sample_rate(void){
644    return ( get_ex_drv()->get_min_sample_rate());
645}
646
647// cores =0==1,1*2,2,3,4,5,6
648// An enum for all the option types
649enum { OPT_HELP,
650       OPT_MODE_BATCH,
651       OPT_MODE_INTERACTIVE,
652       OPT_NODE_DUMP,
653       OPT_DUMP_INTERFACES,
654       OPT_UT,
655       OPT_CORES,
656       OPT_SINGLE_CORE,
657       OPT_FLIP_CLIENT_SERVER,
658       OPT_FLOW_FLIP_CLIENT_SERVER,
659       OPT_FLOW_FLIP_CLIENT_SERVER_SIDE,
660       OPT_RATE_MULT,
661       OPT_DURATION,
662       OPT_PLATFORM_FACTOR,
663       OPT_PUB_DISABLE,
664       OPT_LIMT_NUM_OF_PORTS,
665       OPT_PLAT_CFG_FILE,
666       OPT_MBUF_FACTOR,
667       OPT_LATENCY,
668       OPT_NO_CLEAN_FLOW_CLOSE,
669       OPT_LATENCY_MASK,
670       OPT_ONLY_LATENCY,
671       OPT_LATENCY_PREVIEW ,
672       OPT_WAIT_BEFORE_TRAFFIC,
673       OPT_PCAP,
674       OPT_RX_CHECK,
675       OPT_IO_MODE,
676       OPT_IPV6,
677       OPT_LEARN,
678       OPT_LEARN_MODE,
679       OPT_LEARN_VERIFY,
680       OPT_L_PKT_MODE,
681       OPT_NO_FLOW_CONTROL,
682       OPT_VLAN,
683       OPT_RX_CHECK_HOPS,
684       OPT_CLIENT_CFG_FILE,
685       OPT_NO_KEYBOARD_INPUT,
686       OPT_VIRT_ONE_TX_RX_QUEUE,
687       OPT_PREFIX,
688       OPT_SEND_DEBUG_PKT,
689       OPT_NO_WATCHDOG,
690       OPT_ALLOW_COREDUMP,
691       OPT_CHECKSUM_OFFLOAD,
692       OPT_CLOSE,
693       OPT_ARP_REF_PER,
694};
695
696/* these are the argument types:
697   SO_NONE --    no argument needed
698   SO_REQ_SEP -- single required argument
699   SO_MULTI --   multiple arguments needed
700*/
701static CSimpleOpt::SOption parser_options[] =
702    {
703        { OPT_HELP,                   "-?",                SO_NONE   },
704        { OPT_HELP,                   "-h",                SO_NONE   },
705        { OPT_HELP,                   "--help",            SO_NONE   },
706        { OPT_UT,                     "--ut",              SO_NONE   },
707        { OPT_MODE_BATCH,             "-f",                SO_REQ_SEP},
708        { OPT_MODE_INTERACTIVE,       "-i",                SO_NONE   },
709        { OPT_PLAT_CFG_FILE,          "--cfg",             SO_REQ_SEP},
710        { OPT_SINGLE_CORE,            "-s",                SO_NONE  },
711        { OPT_FLIP_CLIENT_SERVER,"--flip",SO_NONE  },
712        { OPT_FLOW_FLIP_CLIENT_SERVER,"-p",SO_NONE  },
713        { OPT_FLOW_FLIP_CLIENT_SERVER_SIDE,"-e",SO_NONE  },
714        { OPT_NO_CLEAN_FLOW_CLOSE,"--nc",SO_NONE  },
715        { OPT_LIMT_NUM_OF_PORTS,"--limit-ports", SO_REQ_SEP },
716        { OPT_CORES     , "-c",         SO_REQ_SEP },
717        { OPT_NODE_DUMP , "-v",         SO_REQ_SEP },
718        { OPT_DUMP_INTERFACES , "--dump-interfaces",         SO_MULTI },
719        { OPT_LATENCY , "-l",         SO_REQ_SEP },
720        { OPT_DURATION     , "-d",  SO_REQ_SEP },
721        { OPT_PLATFORM_FACTOR     , "-pm",  SO_REQ_SEP },
722        { OPT_PUB_DISABLE     , "-pubd",  SO_NONE },
723        { OPT_RATE_MULT     , "-m",  SO_REQ_SEP },
724        { OPT_LATENCY_MASK     , "--lm",  SO_REQ_SEP },
725        { OPT_ONLY_LATENCY, "--lo",  SO_NONE  },
726        { OPT_LATENCY_PREVIEW ,       "-k",   SO_REQ_SEP   },
727        { OPT_WAIT_BEFORE_TRAFFIC ,   "-w",   SO_REQ_SEP   },
728        { OPT_PCAP,       "--pcap",       SO_NONE   },
729        { OPT_RX_CHECK,   "--rx-check",  SO_REQ_SEP },
730        { OPT_IO_MODE,   "--iom",  SO_REQ_SEP },
731        { OPT_RX_CHECK_HOPS, "--hops", SO_REQ_SEP },
732        { OPT_IPV6,       "--ipv6",       SO_NONE   },
733        { OPT_LEARN, "--learn",       SO_NONE   },
734        { OPT_LEARN_MODE, "--learn-mode",       SO_REQ_SEP   },
735        { OPT_LEARN_VERIFY, "--learn-verify",       SO_NONE   },
736        { OPT_L_PKT_MODE, "--l-pkt-mode",       SO_REQ_SEP   },
737        { OPT_NO_FLOW_CONTROL, "--no-flow-control-change",       SO_NONE   },
738        { OPT_VLAN,       "--vlan",       SO_NONE   },
739        { OPT_CLIENT_CFG_FILE, "--client_cfg", SO_REQ_SEP },
740        { OPT_CLIENT_CFG_FILE, "--client-cfg", SO_REQ_SEP },
741        { OPT_NO_KEYBOARD_INPUT ,"--no-key", SO_NONE   },
742        { OPT_VIRT_ONE_TX_RX_QUEUE, "--vm-sim", SO_NONE },
743        { OPT_PREFIX, "--prefix", SO_REQ_SEP },
744        { OPT_SEND_DEBUG_PKT, "--send-debug-pkt", SO_REQ_SEP },
745        { OPT_MBUF_FACTOR     , "--mbuf-factor",  SO_REQ_SEP },
746        { OPT_NO_WATCHDOG ,     "--no-watchdog",  SO_NONE  },
747        { OPT_ALLOW_COREDUMP ,  "--allow-coredump",  SO_NONE  },
748        { OPT_CHECKSUM_OFFLOAD, "--checksum-offload", SO_NONE },
749        { OPT_CLOSE, "--close-at-end", SO_NONE },
750        { OPT_ARP_REF_PER, "--arp-refresh-period", SO_REQ_SEP },
751        SO_END_OF_OPTIONS
752    };
753
754static int usage(){
755
756    printf(" Usage: t-rex-64 [mode] <options>\n\n");
757    printf(" mode is one of:\n");
758    printf("   -f <file> : YAML file with traffic template configuration (Will run TRex in 'stateful' mode)\n");
759    printf("   -i        : Run TRex in 'stateless' mode\n");
760    printf("\n");
761
762    printf(" Available options are:\n");
763    printf(" --allow-coredump           : Allow creation of core dump \n");
764    printf(" --arp-refresh-period       : Period in seconds between sending of gratuitous ARP for our addresses. Value of 0 means 'never send' \n");
765    printf(" -c <num>>                  : Number of hardware threads to allocate for each port pair. Overrides the 'c' argument from config file \n");
766    printf(" --cfg <file>               : Use file as TRex config file instead of the default /etc/trex_cfg.yaml \n");
767    printf(" --checksum-offload         : Enable IP, TCP and UDP tx checksum offloading, using DPDK. This requires all used interfaces to support this \n");
768    printf(" --client_cfg <file>        : YAML file describing clients configuration \n");
769    printf(" --close-at-end             : Call rte_eth_dev_stop and close at exit. Calling these functions caused link down issues in older versions, \n");
770    printf("                               so we do not call them by default for now. Leaving this as option in case someone thinks it is helpful for him \n");
771    printf("                               This it temporary option. Will be removed in the future \n");
772    printf(" -d                         : Duration of the test in sec (default is 3600). Look also at --nc \n");
773    printf(" -e                         : Like -p but src/dst IP will be chosen according to the port (i.e. on client port send all packets with client src and server dest, and vice versa on server port \n");
774    printf(" --flip                     : Each flow will be sent both from client to server and server to client. This can acheive better port utilization when flow traffic is asymmetric \n");
775    printf(" --hops <hops>              : If rx check is enabled, the hop number can be assigned. See manual for details \n");
776    printf(" --iom  <mode>              : IO mode  for server output [0- silent, 1- normal , 2- short] \n");
777    printf(" --ipv6                     : Work in ipv6 mode \n");
778    printf(" -k  <num>                  : Run 'warm up' traffic for num seconds before starting the test. \n");
779    printf(" -l <rate>                  : In parallel to the test, run latency check, sending packets at rate/sec from each interface \n");
780    printf("    Rate of zero means no latency check \n");
781    printf(" --learn (deprecated). Replaced by --learn-mode. To get older behaviour, use --learn-mode 2 \n");
782    printf(" --learn-mode [1-3]         : Work in NAT environments, learn the dynamic NAT translation and ALG \n");
783    printf("      1    Use TCP ACK in first SYN to pass NAT translation information. Will work only for TCP streams. Initial SYN packet must be first packet in stream \n");
784    printf("      2    Add special IP option to pass NAT translation information. Will not work on certain firewalls if they drop packets with IP options \n");
785    printf("      3    Like 1, but without support for sequence number randomization in server->clien direction. Performance (flow/second) better than 1 \n");
786    printf(" --learn-verify             : Test the NAT translation mechanism. Should be used when there is no NAT in the setup \n");
787    printf(" --limit-ports              : Limit number of ports used. Must be even number (TRex always uses port pairs) \n");
788    printf(" --lm                       : Hex mask of cores that should send traffic \n");
789    printf("    For example: Value of 0x5 will cause only ports 0 and 2 to send traffic \n");
790    printf(" --lo                       : Only run latency test \n");
791    printf(" --l-pkt-mode <0-3>         : Set mode for sending latency packets \n");
792    printf("      0 (default)    send SCTP packets  \n");
793    printf("      1              Send ICMP request packets  \n");
794    printf("      2              Send ICMP requests from client side, and response from server side (for working with firewall) \n");
795    printf("      3              Send ICMP requests with sequence ID 0 from both sides \n");
796    printf(" -m <num>                   : Rate multiplier.  Multiply basic rate of templates by this number \n");
797    printf(" --mbuf-factor              : Factor for packet memory \n");
798    printf(" --nc                       : If set, will not wait for all flows to be closed, before terminating - see manual for more information \n");
799    printf(" --no-flow-control-change   : By default TRex disables flow-control. If this option is given, it does not touch it \n");
800    printf(" --no-key                   : Daemon mode, don't get input from keyboard \n");
801    printf(" --no-watchdog              : Disable watchdog \n");
802    printf(" -p                         : Send all flow packets from the same interface (choosed randomly between client ad server ports) without changing their src/dst IP \n");
803    printf(" -pm                        : Platform factor. If you have splitter in the setup, you can multiply the total results by this factor \n");
804    printf("    e.g --pm 2.0 will multiply all the results bps in this factor \n");
805    printf(" --prefix <nam>             : For running multi TRex instances on the same machine. Each instance should have different name \n");
806    printf(" -pubd                      : Disable monitors publishers \n");
807    printf(" --rx-check  <rate>         : Enable rx check. TRex will sample flows at 1/rate and check order, latency and more \n");
808    printf(" -s                         : Single core. Run only one data path core. For debug \n");
809    printf(" --send-debug-pkt <proto>   : Do not run traffic generator. Just send debug packet and dump receive queues \n");
810    printf("    Supported protocols are 1 for icmp, 2 for UDP, 3 for TCP, 4 for ARP, 5 for 9K UDP \n");
811    printf(" -v <verbosity level>       : The higher the value, print more debug information \n");
812    printf(" --vlan                     : Relevant only for stateless mode with Intel 82599 10G NIC \n");
813    printf("                              When configuring flow stat and latency per stream rules, assume all streams uses VLAN \n");
814    printf(" --vm-sim                   : Simulate vm with driver of one input queue and one output queue \n");
815    printf(" -w  <num>                  : Wait num seconds between init of interfaces and sending traffic, default is 1 \n");
816    printf("\n");
817    printf(" Examples: ");
818    printf(" basic trex run for 20 sec and multiplier of 10 \n");
819    printf("  t-rex-64 -f cap2/dns.yaml -m 10 -d 20 \n");
820    printf("\n\n");
821    printf(" Copyright (c) 2015-2016 Cisco Systems, Inc.    \n");
822    printf("                                                                  \n");
823    printf(" Licensed under the Apache License, Version 2.0 (the 'License') \n");
824    printf(" you may not use this file except in compliance with the License. \n");
825    printf(" You may obtain a copy of the License at                          \n");
826    printf("                                                                  \n");
827    printf("    http://www.apache.org/licenses/LICENSE-2.0                    \n");
828    printf("                                                                  \n");
829    printf(" Unless required by applicable law or agreed to in writing, software \n");
830    printf(" distributed under the License is distributed on an \"AS IS\" BASIS,   \n");
831    printf(" WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. \n");
832    printf(" See the License for the specific language governing permissions and      \n");
833    printf(" limitations under the License.                                           \n");
834    printf(" \n");
835    printf(" Open Source Components / Libraries \n");
836    printf(" DPDK       (BSD)       \n");
837    printf(" YAML-CPP   (BSD)       \n");
838    printf(" JSONCPP    (MIT)       \n");
839    printf(" \n");
840    printf(" Open Source Binaries \n");
841    printf(" ZMQ        (LGPL v3plus) \n");
842    printf(" \n");
843    printf(" Version : %s   \n",VERSION_BUILD_NUM);
844    printf(" DPDK version : %s   \n",rte_version());
845    printf(" User    : %s   \n",VERSION_USER);
846    printf(" Date    : %s , %s \n",get_build_date(),get_build_time());
847    printf(" Uuid    : %s    \n",VERSION_UIID);
848    printf(" Git SHA : %s    \n",VERSION_GIT_SHA);
849    return (0);
850}
851
852
853int gtest_main(int argc, char **argv) ;
854
855static void parse_err(const std::string &msg) {
856    std::cout << "\nArgument Parsing Error: \n\n" << "*** "<< msg << "\n\n";
857    exit(-1);
858}
859
860static int parse_options(int argc, char *argv[], CParserOption* po, bool first_time ) {
861    CSimpleOpt args(argc, argv, parser_options);
862
863    bool latency_was_set=false;
864    (void)latency_was_set;
865    char ** rgpszArg = NULL;
866    bool opt_vlan_was_set = false;
867
868    int a=0;
869    int node_dump=0;
870
871    po->preview.setFileWrite(true);
872    po->preview.setRealTime(true);
873    uint32_t tmp_data;
874
875    po->m_run_mode = CParserOption::RUN_MODE_INVALID;
876
877    while ( args.Next() ){
878        if (args.LastError() == SO_SUCCESS) {
879            switch (args.OptionId()) {
880
881            case OPT_UT :
882                parse_err("Supported only in simulation");
883                break;
884
885            case OPT_HELP:
886                usage();
887                return -1;
888
889            case OPT_MODE_BATCH:
890                if (po->m_run_mode != CParserOption::RUN_MODE_INVALID) {
891                    parse_err("Please specify single run mode");
892                }
893                po->m_run_mode = CParserOption::RUN_MODE_BATCH;
894                po->cfg_file = args.OptionArg();
895                break;
896
897            case OPT_MODE_INTERACTIVE:
898                if (po->m_run_mode != CParserOption::RUN_MODE_INVALID) {
899                    parse_err("Please specify single run mode");
900                }
901                po->m_run_mode = CParserOption::RUN_MODE_INTERACTIVE;
902                break;
903
904            case OPT_NO_KEYBOARD_INPUT  :
905                po->preview.set_no_keyboard(true);
906                break;
907
908            case OPT_CLIENT_CFG_FILE :
909                po->client_cfg_file = args.OptionArg();
910                break;
911
912            case OPT_PLAT_CFG_FILE :
913                po->platform_cfg_file = args.OptionArg();
914                break;
915
916            case OPT_SINGLE_CORE :
917                po->preview.setSingleCore(true);
918                break;
919
920            case OPT_IPV6:
921                po->preview.set_ipv6_mode_enable(true);
922                break;
923
924
925            case OPT_LEARN :
926                po->m_learn_mode = CParserOption::LEARN_MODE_IP_OPTION;
927                break;
928
929            case OPT_LEARN_MODE :
930                sscanf(args.OptionArg(),"%d", &tmp_data);
931                if (! po->is_valid_opt_val(tmp_data, CParserOption::LEARN_MODE_DISABLED, CParserOption::LEARN_MODE_MAX, "--learn-mode")) {
932                    exit(-1);
933                }
934                po->m_learn_mode = (uint8_t)tmp_data;
935                break;
936
937            case OPT_LEARN_VERIFY :
938                // must configure learn_mode for learn verify to work. If different learn mode will be given later, it will be set instead.
939                if (po->m_learn_mode == 0) {
940                    po->m_learn_mode = CParserOption::LEARN_MODE_IP_OPTION;
941                }
942                po->preview.set_learn_and_verify_mode_enable(true);
943                break;
944
945            case OPT_L_PKT_MODE :
946                sscanf(args.OptionArg(),"%d", &tmp_data);
947                if (! po->is_valid_opt_val(tmp_data, 0, L_PKT_SUBMODE_0_SEQ, "--l-pkt-mode")) {
948                    exit(-1);
949                }
950                po->m_l_pkt_mode=(uint8_t)tmp_data;
951                break;
952
953            case OPT_NO_FLOW_CONTROL:
954                po->preview.set_disable_flow_control_setting(true);
955                break;
956            case OPT_VLAN:
957                opt_vlan_was_set = true;
958                break;
959            case OPT_LIMT_NUM_OF_PORTS :
960                po->m_expected_portd =atoi(args.OptionArg());
961                break;
962            case  OPT_CORES  :
963                po->preview.setCores(atoi(args.OptionArg()));
964                break;
965            case OPT_FLIP_CLIENT_SERVER :
966                po->preview.setClientServerFlip(true);
967                break;
968            case OPT_NO_CLEAN_FLOW_CLOSE :
969                po->preview.setNoCleanFlowClose(true);
970                break;
971            case OPT_FLOW_FLIP_CLIENT_SERVER :
972                po->preview.setClientServerFlowFlip(true);
973                break;
974            case OPT_FLOW_FLIP_CLIENT_SERVER_SIDE:
975                po->preview.setClientServerFlowFlipAddr(true);
976                break;
977            case OPT_NODE_DUMP:
978                a=atoi(args.OptionArg());
979                node_dump=1;
980                po->preview.setFileWrite(false);
981                break;
982            case OPT_DUMP_INTERFACES:
983                if (first_time) {
984                    rgpszArg = args.MultiArg(1);
985                    while (rgpszArg != NULL) {
986                        po->dump_interfaces.push_back(rgpszArg[0]);
987                        rgpszArg = args.MultiArg(1);
988                    }
989                }
990                if (po->m_run_mode != CParserOption::RUN_MODE_INVALID) {
991                    parse_err("Please specify single run mode (-i for stateless, or -f <file> for stateful");
992                }
993                po->m_run_mode = CParserOption::RUN_MODE_DUMP_INFO;
994                break;
995            case OPT_MBUF_FACTOR:
996                sscanf(args.OptionArg(),"%f", &po->m_mbuf_factor);
997                break;
998            case OPT_RATE_MULT :
999                sscanf(args.OptionArg(),"%f", &po->m_factor);
1000                break;
1001            case OPT_DURATION :
1002                sscanf(args.OptionArg(),"%f", &po->m_duration);
1003                break;
1004            case OPT_PUB_DISABLE:
1005                po->preview.set_zmq_publish_enable(false);
1006                break;
1007            case OPT_PLATFORM_FACTOR:
1008                sscanf(args.OptionArg(),"%f", &po->m_platform_factor);
1009                break;
1010            case OPT_LATENCY :
1011                latency_was_set=true;
1012                sscanf(args.OptionArg(),"%d", &po->m_latency_rate);
1013                break;
1014            case OPT_LATENCY_MASK :
1015                sscanf(args.OptionArg(),"%x", &po->m_latency_mask);
1016                break;
1017            case OPT_ONLY_LATENCY :
1018                po->preview.setOnlyLatency(true);
1019                break;
1020            case OPT_NO_WATCHDOG :
1021                po->preview.setWDDisable(true);
1022                break;
1023            case OPT_ALLOW_COREDUMP :
1024                po->preview.setCoreDumpEnable(true);
1025                break;
1026            case  OPT_LATENCY_PREVIEW :
1027                sscanf(args.OptionArg(),"%d", &po->m_latency_prev);
1028                break;
1029            case  OPT_WAIT_BEFORE_TRAFFIC :
1030                sscanf(args.OptionArg(),"%d", &po->m_wait_before_traffic);
1031                break;
1032            case OPT_PCAP:
1033                po->preview.set_pcap_mode_enable(true);
1034                break;
1035            case OPT_RX_CHECK :
1036                sscanf(args.OptionArg(),"%d", &tmp_data);
1037                po->m_rx_check_sample=(uint16_t)tmp_data;
1038                po->preview.set_rx_check_enable(true);
1039                break;
1040            case OPT_RX_CHECK_HOPS :
1041                sscanf(args.OptionArg(),"%d", &tmp_data);
1042                po->m_rx_check_hops = (uint16_t)tmp_data;
1043                break;
1044            case OPT_IO_MODE :
1045                sscanf(args.OptionArg(),"%d", &tmp_data);
1046                po->m_io_mode=(uint16_t)tmp_data;
1047                break;
1048
1049            case OPT_VIRT_ONE_TX_RX_QUEUE:
1050                po->preview.set_vm_one_queue_enable(true);
1051                break;
1052
1053            case OPT_PREFIX:
1054                po->prefix = args.OptionArg();
1055                break;
1056
1057            case OPT_SEND_DEBUG_PKT:
1058                sscanf(args.OptionArg(),"%d", &tmp_data);
1059                po->m_debug_pkt_proto = (uint8_t)tmp_data;
1060                break;
1061
1062            case OPT_CHECKSUM_OFFLOAD:
1063                po->preview.setChecksumOffloadEnable(true);
1064                break;
1065
1066            case OPT_CLOSE:
1067                po->preview.setCloseEnable(true);
1068                break;
1069            case  OPT_ARP_REF_PER:
1070                sscanf(args.OptionArg(),"%d", &tmp_data);
1071                po->m_arp_ref_per=(uint16_t)tmp_data;
1072                break;
1073
1074            default:
1075                usage();
1076                return -1;
1077                break;
1078            } // End of switch
1079        }// End of IF
1080        else {
1081            usage();
1082            return -1;
1083        }
1084    } // End of while
1085
1086
1087    if ((po->m_run_mode ==  CParserOption::RUN_MODE_INVALID) ) {
1088        parse_err("Please provide single run mode. -f <file> for stateful or -i for stateless (interactive)");
1089    }
1090
1091    if (CGlobalInfo::is_learn_mode() && po->preview.get_ipv6_mode_enable()) {
1092        parse_err("--learn mode is not supported with --ipv6, beacuse there is no such thing as NAT66 (ipv6 to ipv6 translation) \n" \
1093                  "If you think it is important, please open a defect or write to TRex mailing list\n");
1094    }
1095
1096    if (po->preview.get_is_rx_check_enable() ||  po->is_latency_enabled() || CGlobalInfo::is_learn_mode()
1097        || (CGlobalInfo::m_options.m_arp_ref_per != 0)) {
1098        po->set_rx_enabled();
1099    }
1100
1101    if ( node_dump ){
1102        po->preview.setVMode(a);
1103    }
1104
1105    /* if we have a platform factor we need to devided by it so we can still work with normalized yaml profile  */
1106    po->m_factor = po->m_factor/po->m_platform_factor;
1107
1108    uint32_t cores=po->preview.getCores();
1109    if ( cores > ((BP_MAX_CORES)/2-1) ) {
1110        fprintf(stderr, " Error: maximum supported core number is: %d \n",((BP_MAX_CORES)/2-1));
1111        return -1;
1112    }
1113
1114
1115    if ( first_time ){
1116        /* only first time read the configuration file */
1117        if ( po->platform_cfg_file.length() >0  ) {
1118            if ( node_dump ){
1119                printf("Using configuration file %s \n",po->platform_cfg_file.c_str());
1120            }
1121            global_platform_cfg_info.load_from_yaml_file(po->platform_cfg_file);
1122            if ( node_dump ){
1123                global_platform_cfg_info.Dump(stdout);
1124            }
1125        }else{
1126            if ( utl_is_file_exists("/etc/trex_cfg.yaml") ){
1127                if ( node_dump ){
1128                    printf("Using configuration file /etc/trex_cfg.yaml \n");
1129                }
1130                global_platform_cfg_info.load_from_yaml_file("/etc/trex_cfg.yaml");
1131                if ( node_dump ){
1132                    global_platform_cfg_info.Dump(stdout);
1133                }
1134            }
1135        }
1136    }
1137
1138    if ( get_is_stateless() ) {
1139        if ( opt_vlan_was_set ) {
1140            po->preview.set_vlan_mode_enable(true);
1141        }
1142        if (CGlobalInfo::m_options.client_cfg_file != "") {
1143            parse_err("Client config file is not supported with interactive (stateless) mode ");
1144        }
1145        if ( po->m_duration ) {
1146            parse_err("Duration is not supported with interactive (stateless) mode ");
1147        }
1148
1149        if ( po->preview.get_is_rx_check_enable() ) {
1150            parse_err("Rx check is not supported with interactive (stateless) mode ");
1151        }
1152
1153        if  ( (po->is_latency_enabled()) || (po->preview.getOnlyLatency()) ){
1154            parse_err("Latency check is not supported with interactive (stateless) mode ");
1155        }
1156
1157        if ( po->preview.getSingleCore() ){
1158            parse_err("Single core is not supported with interactive (stateless) mode ");
1159        }
1160
1161    }
1162    else {
1163        if ( !po->m_duration ) {
1164            po->m_duration = 3600.0;
1165        }
1166    }
1167    return 0;
1168}
1169
1170static int parse_options_wrapper(int argc, char *argv[], CParserOption* po, bool first_time ) {
1171    // copy, as arg parser sometimes changes the argv
1172    char ** argv_copy = (char **) malloc(sizeof(char *) * argc);
1173    for(int i=0; i<argc; i++) {
1174        argv_copy[i] = strdup(argv[i]);
1175    }
1176    int ret = parse_options(argc, argv_copy, po, first_time);
1177
1178    // free
1179    for(int i=0; i<argc; i++) {
1180        free(argv_copy[i]);
1181    }
1182    free(argv_copy);
1183    return ret;
1184}
1185
1186int main_test(int argc , char * argv[]);
1187
1188
1189#define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
1190#define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
1191#define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */
1192
1193/*
1194 * These default values are optimized for use with the Intel(R) 82599 10 GbE
1195 * Controller and the DPDK ixgbe PMD. Consider using other values for other
1196 * network controllers and/or network drivers.
1197 */
1198#define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */
1199#define TX_HTHRESH 0  /**< Default values of TX host threshold reg. */
1200#define TX_WTHRESH 0  /**< Default values of TX write-back threshold reg. */
1201
1202#define TX_WTHRESH_1G 1  /**< Default values of TX write-back threshold reg. */
1203#define TX_PTHRESH_1G 1 /**< Default values of TX prefetch threshold reg. */
1204
1205
1206struct port_cfg_t {
1207public:
1208    port_cfg_t(){
1209        memset(&m_port_conf,0,sizeof(m_port_conf));
1210        memset(&m_rx_conf,0,sizeof(m_rx_conf));
1211        memset(&m_tx_conf,0,sizeof(m_tx_conf));
1212        memset(&m_rx_drop_conf,0,sizeof(m_rx_drop_conf));
1213
1214        m_rx_conf.rx_thresh.pthresh = RX_PTHRESH;
1215        m_rx_conf.rx_thresh.hthresh = RX_HTHRESH;
1216        m_rx_conf.rx_thresh.wthresh = RX_WTHRESH;
1217        m_rx_conf.rx_free_thresh =32;
1218
1219        m_rx_drop_conf.rx_thresh.pthresh = 0;
1220        m_rx_drop_conf.rx_thresh.hthresh = 0;
1221        m_rx_drop_conf.rx_thresh.wthresh = 0;
1222        m_rx_drop_conf.rx_free_thresh =32;
1223        m_rx_drop_conf.rx_drop_en=1;
1224
1225        m_tx_conf.tx_thresh.pthresh = TX_PTHRESH;
1226        m_tx_conf.tx_thresh.hthresh = TX_HTHRESH;
1227        m_tx_conf.tx_thresh.wthresh = TX_WTHRESH;
1228
1229        m_port_conf.rxmode.jumbo_frame=1;
1230        m_port_conf.rxmode.max_rx_pkt_len =9*1024+22;
1231        m_port_conf.rxmode.hw_strip_crc=1;
1232    }
1233
1234
1235
1236    inline void update_var(void){
1237        get_ex_drv()->update_configuration(this);
1238    }
1239
1240    inline void update_global_config_fdir(void){
1241        get_ex_drv()->update_global_config_fdir(this);
1242    }
1243
1244    /* enable FDIR */
1245    inline void update_global_config_fdir_10g(void){
1246        m_port_conf.fdir_conf.mode=RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1247        m_port_conf.fdir_conf.pballoc=RTE_FDIR_PBALLOC_64K;
1248        m_port_conf.fdir_conf.status=RTE_FDIR_NO_REPORT_STATUS;
1249        /* Offset of flexbytes field in RX packets (in 16-bit word units). */
1250        /* Note: divide by 2 to convert byte offset to word offset */
1251        if (get_is_stateless()) {
1252            m_port_conf.fdir_conf.flexbytes_offset = (14+4)/2;
1253            /* Increment offset 4 bytes for the case where we add VLAN */
1254            if (  CGlobalInfo::m_options.preview.get_vlan_mode_enable() ) {
1255                m_port_conf.fdir_conf.flexbytes_offset += (4/2);
1256            }
1257        } else {
1258            if (  CGlobalInfo::m_options.preview.get_ipv6_mode_enable() ) {
1259                m_port_conf.fdir_conf.flexbytes_offset = (14+6)/2;
1260            } else {
1261                m_port_conf.fdir_conf.flexbytes_offset = (14+8)/2;
1262            }
1263
1264            /* Increment offset 4 bytes for the case where we add VLAN */
1265            if (  CGlobalInfo::m_options.preview.get_vlan_mode_enable() ) {
1266                m_port_conf.fdir_conf.flexbytes_offset += (4/2);
1267            }
1268        }
1269        m_port_conf.fdir_conf.drop_queue=1;
1270    }
1271
1272    inline void update_global_config_fdir_40g(void){
1273        m_port_conf.fdir_conf.mode=RTE_FDIR_MODE_PERFECT;
1274        m_port_conf.fdir_conf.pballoc=RTE_FDIR_PBALLOC_64K;
1275        m_port_conf.fdir_conf.status=RTE_FDIR_NO_REPORT_STATUS;
1276    }
1277
1278    struct rte_eth_conf     m_port_conf;
1279    struct rte_eth_rxconf   m_rx_conf;
1280    struct rte_eth_rxconf   m_rx_drop_conf;
1281    struct rte_eth_txconf   m_tx_conf;
1282};
1283
1284
1285/* this object is per core / per port / per queue
1286   each core will have 2 ports to send to
1287
1288
1289   port0                                port1
1290
1291   0,1,2,3,..15 out queue ( per core )       0,1,2,3,..15 out queue ( per core )
1292
1293*/
1294
1295
1296typedef struct cnt_name_ {
1297    uint32_t offset;
1298    char * name;
1299}cnt_name_t ;
1300
1301#define MY_REG(a) {a,(char *)#a}
1302
1303void CPhyEthIFStats::Clear() {
1304    ipackets = 0;
1305    ibytes = 0;
1306    f_ipackets = 0;
1307    f_ibytes = 0;
1308    opackets = 0;
1309    obytes = 0;
1310    ierrors = 0;
1311    oerrors = 0;
1312    imcasts = 0;
1313    rx_nombuf = 0;
1314    memset(&m_prev_stats, 0, sizeof(m_prev_stats));
1315    memset(m_rx_per_flow_pkts, 0, sizeof(m_rx_per_flow_pkts));
1316    memset(m_rx_per_flow_bytes, 0, sizeof(m_rx_per_flow_bytes));
1317}
1318
1319// dump all counters (even ones that equal 0)
1320void CPhyEthIFStats::DumpAll(FILE *fd) {
1321#define DP_A4(f) printf(" %-40s : %llu \n",#f, (unsigned long long)f)
1322#define DP_A(f) if (f) printf(" %-40s : %llu \n",#f, (unsigned long long)f)
1323    DP_A4(opackets);
1324    DP_A4(obytes);
1325    DP_A4(ipackets);
1326    DP_A4(ibytes);
1327    DP_A(ierrors);
1328    DP_A(oerrors);
1329}
1330
1331// dump all non zero counters
1332void CPhyEthIFStats::Dump(FILE *fd) {
1333    DP_A(opackets);
1334    DP_A(obytes);
1335    DP_A(f_ipackets);
1336    DP_A(f_ibytes);
1337    DP_A(ipackets);
1338    DP_A(ibytes);
1339    DP_A(ierrors);
1340    DP_A(oerrors);
1341    DP_A(imcasts);
1342    DP_A(rx_nombuf);
1343}
1344
1345void CPhyEthIgnoreStats::dump(FILE *fd) {
1346    DP_A4(opackets);
1347    DP_A4(obytes);
1348    DP_A4(ipackets);
1349    DP_A4(ibytes);
1350    DP_A4(m_tx_arp);
1351    DP_A4(m_rx_arp);
1352}
1353
1354// Clear the RX queue of an interface, dropping all packets
1355void CPhyEthIF::flush_rx_queue(void){
1356
1357    rte_mbuf_t * rx_pkts[32];
1358    int j=0;
1359    uint16_t cnt=0;
1360
1361    while (true) {
1362        j++;
1363        cnt = rx_burst(m_rx_queue,rx_pkts,32);
1364        if ( cnt ) {
1365            int i;
1366            for (i=0; i<(int)cnt;i++) {
1367                rte_mbuf_t * m=rx_pkts[i];
1368                /*printf("rx--\n");
1369                  rte_pktmbuf_dump(stdout,m, rte_pktmbuf_pkt_len(m));*/
1370                rte_pktmbuf_free(m);
1371            }
1372        }
1373        if ( ((cnt==0) && (j>10)) || (j>15) ) {
1374            break;
1375        }
1376    }
1377    if (cnt>0) {
1378        printf(" Warning can't flush rx-queue for port %d \n",(int)get_port_id());
1379    }
1380}
1381
1382
1383void CPhyEthIF::dump_stats_extended(FILE *fd){
1384
1385    cnt_name_t reg[]={
1386        MY_REG(IXGBE_GPTC), /* total packet */
1387        MY_REG(IXGBE_GOTCL), /* total bytes */
1388        MY_REG(IXGBE_GOTCH),
1389
1390        MY_REG(IXGBE_GPRC),
1391        MY_REG(IXGBE_GORCL),
1392        MY_REG(IXGBE_GORCH),
1393
1394
1395
1396        MY_REG(IXGBE_RXNFGPC),
1397        MY_REG(IXGBE_RXNFGBCL),
1398        MY_REG(IXGBE_RXNFGBCH),
1399        MY_REG(IXGBE_RXDGPC  ),
1400        MY_REG(IXGBE_RXDGBCL ),
1401        MY_REG(IXGBE_RXDGBCH  ),
1402        MY_REG(IXGBE_RXDDGPC ),
1403        MY_REG(IXGBE_RXDDGBCL ),
1404        MY_REG(IXGBE_RXDDGBCH  ),
1405        MY_REG(IXGBE_RXLPBKGPC ),
1406        MY_REG(IXGBE_RXLPBKGBCL),
1407        MY_REG(IXGBE_RXLPBKGBCH ),
1408        MY_REG(IXGBE_RXDLPBKGPC ),
1409        MY_REG(IXGBE_RXDLPBKGBCL),
1410        MY_REG(IXGBE_RXDLPBKGBCH ),
1411        MY_REG(IXGBE_TXDGPC      ),
1412        MY_REG(IXGBE_TXDGBCL     ),
1413        MY_REG(IXGBE_TXDGBCH     ),
1414        MY_REG(IXGBE_FDIRUSTAT ),
1415        MY_REG(IXGBE_FDIRFSTAT ),
1416        MY_REG(IXGBE_FDIRMATCH ),
1417        MY_REG(IXGBE_FDIRMISS )
1418
1419    };
1420    fprintf (fd," extended counters \n");
1421    int i;
1422    for (i=0; i<sizeof(reg)/sizeof(reg[0]); i++) {
1423        cnt_name_t *lp=&reg[i];
1424        uint32_t c=pci_reg_read(lp->offset);
1425        // xl710 bug. Counter values are -559038737 when they should be 0
1426        if (c && c != -559038737 ) {
1427            fprintf (fd," %s  : %d \n",lp->name,c);
1428        }
1429    }
1430}
1431
1432int CPhyEthIF::get_rx_stat_capabilities() {
1433    return get_ex_drv()->get_rx_stat_capabilities();
1434}
1435
1436
1437
1438void CPhyEthIF::configure(uint16_t nb_rx_queue,
1439                          uint16_t nb_tx_queue,
1440                          const struct rte_eth_conf *eth_conf){
1441    int ret;
1442    ret = rte_eth_dev_configure(m_port_id,
1443                                nb_rx_queue,
1444                                nb_tx_queue,
1445                                eth_conf);
1446
1447    if (ret < 0)
1448        rte_exit(EXIT_FAILURE, "Cannot configure device: "
1449                 "err=%d, port=%u\n",
1450                 ret, m_port_id);
1451
1452    /* get device info */
1453    rte_eth_dev_info_get(m_port_id, &m_dev_info);
1454
1455    if (CGlobalInfo::m_options.preview.getChecksumOffloadEnable()) {
1456        /* check if the device supports TCP and UDP checksum offloading */
1457        if ((m_dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) == 0) {
1458            rte_exit(EXIT_FAILURE, "Device does not support UDP checksum offload: "
1459                     "port=%u\n",
1460                     m_port_id);
1461        }
1462        if ((m_dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) == 0) {
1463            rte_exit(EXIT_FAILURE, "Device does not support TCP checksum offload: "
1464                     "port=%u\n",
1465                     m_port_id);
1466        }
1467    }
1468}
1469
1470
1471/*
1472
1473  rx-queue 0 - default- all traffic not goint to queue 1
1474  will be drop as queue is disable
1475
1476
1477  rx-queue 1 - Latency measurement packets will go here
1478
1479  pci_reg_write(IXGBE_L34T_IMIR(0),(1<<21));
1480
1481*/
1482
1483void CPhyEthIF::configure_rx_duplicate_rules(){
1484
1485    if ( get_is_rx_filter_enable() ){
1486
1487        if ( get_ex_drv()->is_hardware_filter_is_supported()==false ){
1488            printf(" ERROR this feature is not supported with current hardware \n");
1489            exit(1);
1490        }
1491        get_ex_drv()->configure_rx_filter_rules(this);
1492    }
1493}
1494
1495
1496void CPhyEthIF::stop_rx_drop_queue() {
1497    // In debug mode, we want to see all packets. Don't want to disable any queue.
1498    if ( get_vm_one_queue_enable() || (CGlobalInfo::m_options.m_debug_pkt_proto != 0)) {
1499        return;
1500    }
1501    if ( CGlobalInfo::m_options.is_rx_enabled() ) {
1502        if ( (!get_ex_drv()->is_hardware_support_drop_queue())  ) {
1503            printf(" ERROR latency feature is not supported with current hardware  \n");
1504            exit(1);
1505        }
1506    }
1507    get_ex_drv()->stop_queue(this, MAIN_DPDK_DATA_Q);
1508}
1509
1510
1511void CPhyEthIF::rx_queue_setup(uint16_t rx_queue_id,
1512                               uint16_t nb_rx_desc,
1513                               unsigned int socket_id,
1514                               const struct rte_eth_rxconf *rx_conf,
1515                               struct rte_mempool *mb_pool){
1516
1517    int ret = rte_eth_rx_queue_setup(m_port_id , rx_queue_id,
1518                                     nb_rx_desc,
1519                                     socket_id,
1520                                     rx_conf,
1521                                     mb_pool);
1522    if (ret < 0)
1523        rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup: "
1524                 "err=%d, port=%u\n",
1525                 ret, m_port_id);
1526}
1527
1528
1529
1530void CPhyEthIF::tx_queue_setup(uint16_t tx_queue_id,
1531                               uint16_t nb_tx_desc,
1532                               unsigned int socket_id,
1533                               const struct rte_eth_txconf *tx_conf){
1534
1535    int ret = rte_eth_tx_queue_setup( m_port_id,
1536                                      tx_queue_id,
1537                                      nb_tx_desc,
1538                                      socket_id,
1539                                      tx_conf);
1540    if (ret < 0)
1541        rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: "
1542                 "err=%d, port=%u queue=%u\n",
1543                 ret, m_port_id, tx_queue_id);
1544
1545}
1546
1547void CPhyEthIF::stop(){
1548    if (CGlobalInfo::m_options.preview.getCloseEnable()) {
1549        rte_eth_dev_stop(m_port_id);
1550        rte_eth_dev_close(m_port_id);
1551    }
1552}
1553
1554void CPhyEthIF::start(){
1555
1556    get_ex_drv()->clear_extended_stats(this);
1557
1558    int ret;
1559
1560    m_bw_tx.reset();
1561    m_bw_rx.reset();
1562
1563    m_stats.Clear();
1564    int i;
1565    for (i=0;i<10; i++ ) {
1566        ret = rte_eth_dev_start(m_port_id);
1567        if (ret==0) {
1568            return;
1569        }
1570        delay(1000);
1571    }
1572    if (ret < 0)
1573        rte_exit(EXIT_FAILURE, "rte_eth_dev_start: "
1574                 "err=%d, port=%u\n",
1575                 ret, m_port_id);
1576
1577}
1578
1579// Disabling flow control on interface
1580void CPhyEthIF::disable_flow_control(){
1581    int ret;
1582    // see trex-64 issue with loopback on the same NIC
1583    struct rte_eth_fc_conf fc_conf;
1584    memset(&fc_conf,0,sizeof(fc_conf));
1585    fc_conf.mode=RTE_FC_NONE;
1586    fc_conf.autoneg=1;
1587    fc_conf.pause_time=100;
1588    int i;
1589    for (i=0; i<5; i++) {
1590        ret=rte_eth_dev_flow_ctrl_set(m_port_id,&fc_conf);
1591        if (ret==0) {
1592            break;
1593        }
1594        delay(1000);
1595    }
1596    if (ret < 0)
1597        rte_exit(EXIT_FAILURE, "rte_eth_dev_flow_ctrl_set: "
1598                 "err=%d, port=%u\n probably link is down. Please check your link activity, or skip flow-control disabling, using: --no-flow-control-change option\n",
1599                 ret, m_port_id);
1600}
1601
1602/*
1603Get user frienly devices description from saved env. var
1604Changes certain attributes based on description
1605*/
1606void DpdkTRexPortAttr::update_description(){
1607    struct rte_pci_addr pci_addr;
1608    char pci[16];
1609    char * envvar;
1610    std::string pci_envvar_name;
1611    pci_addr = rte_eth_devices[m_port_id].pci_dev->addr;
1612    snprintf(pci, sizeof(pci), "%04x:%02x:%02x.%d", pci_addr.domain, pci_addr.bus, pci_addr.devid, pci_addr.function);
1613    intf_info_st.pci_addr = pci;
1614    pci_envvar_name = "pci" + intf_info_st.pci_addr;
1615    std::replace(pci_envvar_name.begin(), pci_envvar_name.end(), ':', '_');
1616    std::replace(pci_envvar_name.begin(), pci_envvar_name.end(), '.', '_');
1617    envvar = std::getenv(pci_envvar_name.c_str());
1618    if (envvar) {
1619        intf_info_st.description = envvar;
1620    } else {
1621        intf_info_st.description = "Unknown";
1622    }
1623    if (intf_info_st.description.find("82599ES") != std::string::npos) { // works for 82599EB etc. DPDK does not distinguish them
1624        flag_is_link_change_supported = false;
1625    }
1626    if (intf_info_st.description.find("82545EM") != std::string::npos) { // in virtual E1000, DPDK claims fc is supported, but it's not
1627        flag_is_fc_change_supported = false;
1628        flag_is_led_change_supported = false;
1629    }
1630    if ( CGlobalInfo::m_options.preview.getVMode() > 0){
1631        printf("port %d desc: %s\n", m_port_id, intf_info_st.description.c_str());
1632    }
1633}
1634
1635int DpdkTRexPortAttr::set_led(bool on){
1636    if (on) {
1637        return rte_eth_led_on(m_port_id);
1638    }else{
1639        return rte_eth_led_off(m_port_id);
1640    }
1641}
1642
1643int DpdkTRexPortAttr::get_flow_ctrl(int &mode) {
1644    int ret = rte_eth_dev_flow_ctrl_get(m_port_id, &fc_conf_tmp);
1645    if (ret) {
1646        return ret;
1647    }
1648    mode = (int) fc_conf_tmp.mode;
1649    return 0;
1650}
1651
1652int DpdkTRexPortAttr::set_flow_ctrl(int mode) {
1653    if (!flag_is_fc_change_supported) {
1654        return -ENOTSUP;
1655    }
1656    int ret = rte_eth_dev_flow_ctrl_get(m_port_id, &fc_conf_tmp);
1657    if (ret) {
1658        return ret;
1659    }
1660    fc_conf_tmp.mode = (enum rte_eth_fc_mode) mode;
1661    return rte_eth_dev_flow_ctrl_set(m_port_id, &fc_conf_tmp);
1662}
1663
1664void DpdkTRexPortAttr::reset_xstats() {
1665    rte_eth_xstats_reset(m_port_id);
1666}
1667
1668int DpdkTRexPortAttr::get_xstats_values(xstats_values_t &xstats_values) {
1669    int size = rte_eth_xstats_get(m_port_id, NULL, 0);
1670    if (size < 0) {
1671        return size;
1672    }
1673    xstats_values_tmp.resize(size);
1674    xstats_values.resize(size);
1675    size = rte_eth_xstats_get(m_port_id, xstats_values_tmp.data(), size);
1676    if (size < 0) {
1677        return size;
1678    }
1679    for (int i=0; i<size; i++) {
1680        xstats_values[xstats_values_tmp[i].id] = xstats_values_tmp[i].value;
1681    }
1682    return 0;
1683}
1684
1685int DpdkTRexPortAttr::get_xstats_names(xstats_names_t &xstats_names){
1686    int size = rte_eth_xstats_get_names(m_port_id, NULL, 0);
1687    if (size < 0) {
1688        return size;
1689    }
1690    xstats_names_tmp.resize(size);
1691    xstats_names.resize(size);
1692    size = rte_eth_xstats_get_names(m_port_id, xstats_names_tmp.data(), size);
1693    if (size < 0) {
1694        return size;
1695    }
1696    for (int i=0; i<size; i++) {
1697        xstats_names[i] = xstats_names_tmp[i].name;
1698    }
1699    return 0;
1700}
1701
1702void DpdkTRexPortAttr::dump_link(FILE *fd){
1703    fprintf(fd,"port : %d \n",(int)m_port_id);
1704    fprintf(fd,"------------\n");
1705
1706    fprintf(fd,"link         : ");
1707    if (m_link.link_status) {
1708        fprintf(fd," link : Link Up - speed %u Mbps - %s\n",
1709                (unsigned) m_link.link_speed,
1710                (m_link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1711                ("full-duplex") : ("half-duplex\n"));
1712    } else {
1713        fprintf(fd," Link Down\n");
1714    }
1715    fprintf(fd,"promiscuous  : %d \n",get_promiscuous());
1716}
1717
1718void DpdkTRexPortAttr::update_device_info(){
1719    rte_eth_dev_info_get(m_port_id, &dev_info);
1720}
1721
1722void DpdkTRexPortAttr::get_supported_speeds(supp_speeds_t &supp_speeds){
1723    uint32_t speed_capa = dev_info.speed_capa;
1724    if (speed_capa & ETH_LINK_SPEED_1G)
1725        supp_speeds.push_back(ETH_SPEED_NUM_1G);
1726    if (speed_capa & ETH_LINK_SPEED_10G)
1727        supp_speeds.push_back(ETH_SPEED_NUM_10G);
1728    if (speed_capa & ETH_LINK_SPEED_40G)
1729        supp_speeds.push_back(ETH_SPEED_NUM_40G);
1730    if (speed_capa & ETH_LINK_SPEED_100G)
1731        supp_speeds.push_back(ETH_SPEED_NUM_100G);
1732}
1733
1734void DpdkTRexPortAttr::update_link_status(){
1735    rte_eth_link_get(m_port_id, &m_link);
1736}
1737
1738bool DpdkTRexPortAttr::update_link_status_nowait(){
1739    rte_eth_link new_link;
1740    bool changed = false;
1741    rte_eth_link_get_nowait(m_port_id, &new_link);
1742    if (new_link.link_speed != m_link.link_speed ||
1743                new_link.link_duplex != m_link.link_duplex ||
1744                    new_link.link_autoneg != m_link.link_autoneg ||
1745                        new_link.link_status != m_link.link_status) {
1746        changed = true;
1747    }
1748    m_link = new_link;
1749    return changed;
1750}
1751
1752int DpdkTRexPortAttr::add_mac(char * mac){
1753    struct ether_addr mac_addr;
1754    for (int i=0; i<6;i++) {
1755        mac_addr.addr_bytes[i] =mac[i];
1756    }
1757    return rte_eth_dev_mac_addr_add(m_port_id, &mac_addr,0);
1758}
1759
1760int DpdkTRexPortAttr::set_promiscuous(bool enable){
1761    if (enable) {
1762        rte_eth_promiscuous_enable(m_port_id);
1763    }else{
1764        rte_eth_promiscuous_disable(m_port_id);
1765    }
1766    return 0;
1767}
1768
1769int DpdkTRexPortAttr::set_link_up(bool up){
1770    if (up) {
1771        return rte_eth_dev_set_link_up(m_port_id);
1772    }else{
1773        return rte_eth_dev_set_link_down(m_port_id);
1774    }
1775}
1776
1777bool DpdkTRexPortAttr::get_promiscuous(){
1778    int ret=rte_eth_promiscuous_get(m_port_id);
1779    if (ret<0) {
1780        rte_exit(EXIT_FAILURE, "rte_eth_promiscuous_get: "
1781                 "err=%d, port=%u\n",
1782                 ret, m_port_id);
1783
1784    }
1785    return ( ret?true:false);
1786}
1787
1788
1789void DpdkTRexPortAttr::macaddr_get(struct ether_addr *mac_addr){
1790    rte_eth_macaddr_get(m_port_id , mac_addr);
1791}
1792
1793int CPhyEthIF::dump_fdir_global_stats(FILE *fd) {
1794    return get_ex_drv()->dump_fdir_global_stats(this, fd);
1795}
1796
1797void dump_hw_state(FILE *fd,struct ixgbe_hw_stats *hs ){
1798
1799#define DP_A1(f) if (hs->f) fprintf(fd," %-40s : %llu \n",#f, (unsigned long long)hs->f)
1800#define DP_A2(f,m) for (i=0;i<m; i++) { if (hs->f[i]) fprintf(fd," %-40s[%d] : %llu \n",#f,i, (unsigned long long)hs->f[i]); }
1801    int i;
1802
1803    //for (i=0;i<8; i++) { if (hs->mpc[i]) fprintf(fd," %-40s[%d] : %llu \n","mpc",i,hs->mpc[i]); }
1804    DP_A2(mpc,8);
1805    DP_A1(crcerrs);
1806    DP_A1(illerrc);
1807    //DP_A1(errbc);
1808    DP_A1(mspdc);
1809    DP_A1(mpctotal);
1810    DP_A1(mlfc);
1811    DP_A1(mrfc);
1812    DP_A1(rlec);
1813    //DP_A1(lxontxc);
1814    //DP_A1(lxonrxc);
1815    //DP_A1(lxofftxc);
1816    //DP_A1(lxoffrxc);
1817    //DP_A2(pxontxc,8);
1818    //DP_A2(pxonrxc,8);
1819    //DP_A2(pxofftxc,8);
1820    //DP_A2(pxoffrxc,8);
1821
1822    //DP_A1(prc64);
1823    //DP_A1(prc127);
1824    //DP_A1(prc255);
1825    // DP_A1(prc511);
1826    //DP_A1(prc1023);
1827    //DP_A1(prc1522);
1828
1829    DP_A1(gprc);
1830    DP_A1(bprc);
1831    DP_A1(mprc);
1832    DP_A1(gptc);
1833    DP_A1(gorc);
1834    DP_A1(gotc);
1835    DP_A2(rnbc,8);
1836    DP_A1(ruc);
1837    DP_A1(rfc);
1838    DP_A1(roc);
1839    DP_A1(rjc);
1840    DP_A1(mngprc);
1841    DP_A1(mngpdc);
1842    DP_A1(mngptc);
1843    DP_A1(tor);
1844    DP_A1(tpr);
1845    DP_A1(tpt);
1846    DP_A1(ptc64);
1847    DP_A1(ptc127);
1848    DP_A1(ptc255);
1849    DP_A1(ptc511);
1850    DP_A1(ptc1023);
1851    DP_A1(ptc1522);
1852    DP_A1(mptc);
1853    DP_A1(bptc);
1854    DP_A1(xec);
1855    DP_A2(qprc,16);
1856    DP_A2(qptc,16);
1857    DP_A2(qbrc,16);
1858    DP_A2(qbtc,16);
1859    DP_A2(qprdc,16);
1860    DP_A2(pxon2offc,8);
1861    DP_A1(fdirustat_add);
1862    DP_A1(fdirustat_remove);
1863    DP_A1(fdirfstat_fadd);
1864    DP_A1(fdirfstat_fremove);
1865    DP_A1(fdirmatch);
1866    DP_A1(fdirmiss);
1867    DP_A1(fccrc);
1868    DP_A1(fclast);
1869    DP_A1(fcoerpdc);
1870    DP_A1(fcoeprc);
1871    DP_A1(fcoeptc);
1872    DP_A1(fcoedwrc);
1873    DP_A1(fcoedwtc);
1874    DP_A1(fcoe_noddp);
1875    DP_A1(fcoe_noddp_ext_buff);
1876    DP_A1(ldpcec);
1877    DP_A1(pcrc8ec);
1878    DP_A1(b2ospc);
1879    DP_A1(b2ogprc);
1880    DP_A1(o2bgptc);
1881    DP_A1(o2bspc);
1882}
1883
1884void CPhyEthIF::set_ignore_stats_base(CPreTestStats &pre_stats) {
1885    // reading m_stats, so drivers saving prev in m_stats will be updated.
1886    // Actually, we want m_stats to be cleared
1887    get_ex_drv()->get_extended_stats(this, &m_stats);
1888
1889    m_ignore_stats.ipackets = m_stats.ipackets;
1890    m_ignore_stats.ibytes = m_stats.ibytes;
1891    m_ignore_stats.opackets = m_stats.opackets;
1892    m_ignore_stats.obytes = m_stats.obytes;
1893    m_stats.ipackets = 0;
1894    m_stats.opackets = 0;
1895    m_stats.ibytes = 0;
1896    m_stats.obytes = 0;
1897
1898    m_ignore_stats.m_tx_arp = pre_stats.m_tx_arp;
1899    m_ignore_stats.m_rx_arp = pre_stats.m_rx_arp;
1900
1901    if (CGlobalInfo::m_options.preview.getVMode() >= 3) {
1902        fprintf(stdout, "Pre test statistics for port %d\n", get_port_id());
1903        m_ignore_stats.dump(stdout);
1904    }
1905}
1906
1907void CPhyEthIF::dump_stats(FILE *fd){
1908
1909    update_counters();
1910
1911    fprintf(fd,"port : %d \n",(int)m_port_id);
1912    fprintf(fd,"------------\n");
1913    m_stats.DumpAll(fd);
1914    //m_stats.Dump(fd);
1915    printf (" Tx : %.1fMb/sec  \n",m_last_tx_rate);
1916    //printf (" Rx : %.1fMb/sec  \n",m_last_rx_rate);
1917}
1918
1919void CPhyEthIF::stats_clear(){
1920    rte_eth_stats_reset(m_port_id);
1921    m_stats.Clear();
1922}
1923
1924class CCorePerPort  {
1925public:
1926    CCorePerPort (){
1927        m_tx_queue_id=0;
1928        m_len=0;
1929        int i;
1930        for (i=0; i<MAX_PKT_BURST; i++) {
1931            m_table[i]=0;
1932        }
1933        m_port=0;
1934    }
1935    uint8_t                 m_tx_queue_id;
1936    uint8_t                 m_tx_queue_id_lat; // q id for tx of latency pkts
1937    uint16_t                m_len;
1938    rte_mbuf_t *            m_table[MAX_PKT_BURST];
1939    CPhyEthIF  *            m_port;
1940};
1941
1942
1943#define MAX_MBUF_CACHE 100
1944
1945
1946/* per core/gbe queue port for trasmitt */
1947class CCoreEthIF : public CVirtualIF {
1948public:
1949    enum {
1950     INVALID_Q_ID = 255
1951    };
1952
1953public:
1954
1955    CCoreEthIF(){
1956        m_mbuf_cache=0;
1957    }
1958
1959    bool Create(uint8_t             core_id,
1960                uint8_t            tx_client_queue_id,
1961                CPhyEthIF  *        tx_client_port,
1962                uint8_t            tx_server_queue_id,
1963                CPhyEthIF  *        tx_server_port,
1964                uint8_t             tx_q_id_lat);
1965    void Delete();
1966
1967    virtual int open_file(std::string file_name){
1968        return (0);
1969    }
1970
1971    virtual int close_file(void){
1972        return (flush_tx_queue());
1973    }
1974    __attribute__ ((noinline)) int send_node_flow_stat(rte_mbuf *m, CGenNodeStateless * node_sl
1975                                                       , CCorePerPort *  lp_port
1976                                                       , CVirtualIFPerSideStats  * lp_stats, bool is_const);
1977    virtual int send_node(CGenNode * node);
1978    virtual void send_one_pkt(pkt_dir_t dir, rte_mbuf_t *m);
1979    virtual int flush_tx_queue(void);
1980    __attribute__ ((noinline)) void handle_rx_queue();
1981    __attribute__ ((noinline)) void handle_slowpath_features(CGenNode *node, rte_mbuf_t *m, uint8_t *p, pkt_dir_t dir);
1982
1983    void apply_client_cfg(const ClientCfgBase *cfg, rte_mbuf_t *m, pkt_dir_t dir, uint8_t *p);
1984
1985    bool process_rx_pkt(pkt_dir_t   dir,rte_mbuf_t * m);
1986
1987    virtual int update_mac_addr_from_global_cfg(pkt_dir_t       dir, uint8_t * p);
1988
1989    virtual pkt_dir_t port_id_to_dir(uint8_t port_id);
1990    void GetCoreCounters(CVirtualIFPerSideStats *stats);
1991    void DumpCoreStats(FILE *fd);
1992    void DumpIfStats(FILE *fd);
1993    static void DumpIfCfgHeader(FILE *fd);
1994    void DumpIfCfg(FILE *fd);
1995
1996    socket_id_t get_socket_id(){
1997        return ( CGlobalInfo::m_socket.port_to_socket( m_ports[0].m_port->get_port_id() ) );
1998    }
1999
2000    const CCorePerPort * get_ports() {
2001        return m_ports;
2002    }
2003
2004protected:
2005
2006    int send_burst(CCorePerPort * lp_port,
2007                   uint16_t len,
2008                   CVirtualIFPerSideStats  * lp_stats);
2009    int send_pkt(CCorePerPort * lp_port,
2010                 rte_mbuf_t *m,
2011                 CVirtualIFPerSideStats  * lp_stats);
2012    int send_pkt_lat(CCorePerPort * lp_port,
2013                 rte_mbuf_t *m,
2014                 CVirtualIFPerSideStats  * lp_stats);
2015
2016    void add_vlan(rte_mbuf_t *m, uint16_t vlan_id);
2017
2018protected:
2019    uint8_t      m_core_id;
2020    uint16_t     m_mbuf_cache;
2021    CCorePerPort m_ports[CS_NUM]; /* each core has 2 tx queues 1. client side and server side */
2022    CNodeRing *  m_ring_to_rx;
2023
2024} __rte_cache_aligned; ;
2025
2026class CCoreEthIFStateless : public CCoreEthIF {
2027public:
2028    virtual int send_node_flow_stat(rte_mbuf *m, CGenNodeStateless * node_sl, CCorePerPort *  lp_port
2029                                    , CVirtualIFPerSideStats  * lp_stats, bool is_const);
2030    virtual int send_node(CGenNode * node);
2031protected:
2032    int handle_slow_path_node(CGenNode *node);
2033    int send_pcap_node(CGenNodePCAP *pcap_node);
2034};
2035
2036bool CCoreEthIF::Create(uint8_t             core_id,
2037                        uint8_t             tx_client_queue_id,
2038                        CPhyEthIF  *        tx_client_port,
2039                        uint8_t             tx_server_queue_id,
2040                        CPhyEthIF  *        tx_server_port,
2041                        uint8_t tx_q_id_lat ) {
2042    m_ports[CLIENT_SIDE].m_tx_queue_id = tx_client_queue_id;
2043    m_ports[CLIENT_SIDE].m_port        = tx_client_port;
2044    m_ports[CLIENT_SIDE].m_tx_queue_id_lat = tx_q_id_lat;
2045    m_ports[SERVER_SIDE].m_tx_queue_id = tx_server_queue_id;
2046    m_ports[SERVER_SIDE].m_port        = tx_server_port;
2047    m_ports[SERVER_SIDE].m_tx_queue_id_lat = tx_q_id_lat;
2048    m_core_id = core_id;
2049
2050    CMessagingManager * rx_dp=CMsgIns::Ins()->getRxDp();
2051    m_ring_to_rx = rx_dp->getRingDpToCp(core_id-1);
2052    assert( m_ring_to_rx);
2053    return (true);
2054}
2055
2056// This function is only relevant if we are in VM. In this case, we only have one rx queue. Can't have
2057// rules to drop queue 0 packets, and pass queue 1 packets to RX core, like in other cases.
2058// We receive all packets in the same core that transmitted, and handle them to RX core.
2059void CCoreEthIF::handle_rx_queue(void) {
2060    if ( likely( ! get_vm_one_queue_enable() ) ) {
2061        return;
2062    }
2063
2064    pkt_dir_t dir;
2065    bool is_rx = get_is_rx_thread_enabled();
2066    for (dir=CLIENT_SIDE; dir<CS_NUM; dir++) {
2067        CCorePerPort * lp_port=&m_ports[dir];
2068        CPhyEthIF * lp=lp_port->m_port;
2069
2070        rte_mbuf_t * rx_pkts[32];
2071        int j=0;
2072
2073        while (true) {
2074            j++;
2075            uint16_t cnt =lp->rx_burst(0,rx_pkts,32);
2076            if ( cnt ) {
2077                int i;
2078                for (i=0; i<(int)cnt;i++) {
2079                    rte_mbuf_t * m=rx_pkts[i];
2080                    if ( is_rx ){
2081                        if (!process_rx_pkt(dir,m)){
2082                            rte_pktmbuf_free(m);
2083                        }
2084                    }else{
2085                        rte_pktmbuf_free(m);
2086                    }
2087                }
2088            }
2089            if ((cnt<5) || j>10 ) {
2090                break;
2091            }
2092        }
2093    }
2094}
2095
2096int CCoreEthIF::flush_tx_queue(void){
2097    /* flush both sides */
2098    pkt_dir_t dir;
2099    for (dir = CLIENT_SIDE; dir < CS_NUM; dir++) {
2100        CCorePerPort * lp_port = &m_ports[dir];
2101        CVirtualIFPerSideStats  * lp_stats = &m_stats[dir];
2102        if ( likely(lp_port->m_len > 0) ) {
2103            send_burst(lp_port, lp_port->m_len, lp_stats);
2104            lp_port->m_len = 0;
2105        }
2106    }
2107
2108    handle_rx_queue();
2109
2110    return 0;
2111}
2112
2113void CCoreEthIF::GetCoreCounters(CVirtualIFPerSideStats *stats){
2114    stats->Clear();
2115    pkt_dir_t   dir ;
2116    for (dir=CLIENT_SIDE; dir<CS_NUM; dir++) {
2117        stats->Add(&m_stats[dir]);
2118    }
2119}
2120
2121void CCoreEthIF::DumpCoreStats(FILE *fd){
2122    fprintf (fd,"------------------------ \n");
2123    fprintf (fd," per core stats core id : %d  \n",m_core_id);
2124    fprintf (fd,"------------------------ \n");
2125
2126    CVirtualIFPerSideStats stats;
2127    GetCoreCounters(&stats);
2128    stats.Dump(stdout);
2129}
2130
2131void CCoreEthIF::DumpIfCfgHeader(FILE *fd){
2132    fprintf (fd," core, c-port, c-queue, s-port, s-queue, lat-queue\n");
2133    fprintf (fd," ------------------------------------------\n");
2134}
2135
2136void CCoreEthIF::DumpIfCfg(FILE *fd){
2137    fprintf (fd," %d   %6u %6u  %6u  %6u %6u  \n",m_core_id,
2138             m_ports[CLIENT_SIDE].m_port->get_port_id(),
2139             m_ports[CLIENT_SIDE].m_tx_queue_id,
2140             m_ports[SERVER_SIDE].m_port->get_port_id(),
2141             m_ports[SERVER_SIDE].m_tx_queue_id,
2142             m_ports[SERVER_SIDE].m_tx_queue_id_lat
2143             );
2144}
2145
2146
2147void CCoreEthIF::DumpIfStats(FILE *fd){
2148
2149    fprintf (fd,"------------------------ \n");
2150    fprintf (fd," per core per if stats id : %d  \n",m_core_id);
2151    fprintf (fd,"------------------------ \n");
2152
2153    const char * t[]={"client","server"};
2154    pkt_dir_t   dir ;
2155    for (dir=CLIENT_SIDE; dir<CS_NUM; dir++) {
2156        CCorePerPort * lp=&m_ports[dir];
2157        CVirtualIFPerSideStats * lpstats = &m_stats[dir];
2158        fprintf (fd," port %d, queue id :%d  - %s \n",lp->m_port->get_port_id(),lp->m_tx_queue_id,t[dir] );
2159        fprintf (fd," ---------------------------- \n");
2160        lpstats->Dump(fd);
2161    }
2162}
2163
2164#define DELAY_IF_NEEDED
2165
2166int CCoreEthIF::send_burst(CCorePerPort * lp_port,
2167                           uint16_t len,
2168                           CVirtualIFPerSideStats  * lp_stats){
2169
2170    uint16_t ret = lp_port->m_port->tx_burst(lp_port->m_tx_queue_id,lp_port->m_table,len);
2171#ifdef DELAY_IF_NEEDED
2172    while ( unlikely( ret<len ) ){
2173        rte_delay_us(1);
2174        lp_stats->m_tx_queue_full += 1;
2175        uint16_t ret1=lp_port->m_port->tx_burst(lp_port->m_tx_queue_id,
2176                                                &lp_port->m_table[ret],
2177                                                len-ret);
2178        ret+=ret1;
2179    }
2180#else
2181    /* CPU has burst of packets larger than TX can send. Need to drop packets */
2182    if ( unlikely(ret < len) ) {
2183        lp_stats->m_tx_drop += (len-ret);
2184        uint16_t i;
2185        for (i=ret; i<len;i++) {
2186            rte_mbuf_t * m=lp_port->m_table[i];
2187            rte_pktmbuf_free(m);
2188        }
2189    }
2190#endif
2191
2192    return (0);
2193}
2194
2195
2196int CCoreEthIF::send_pkt(CCorePerPort * lp_port,
2197                         rte_mbuf_t      *m,
2198                         CVirtualIFPerSideStats  * lp_stats
2199                         ){
2200
2201    uint16_t len = lp_port->m_len;
2202    lp_port->m_table[len]=m;
2203    len++;
2204    /* enough pkts to be sent */
2205    if (unlikely(len == MAX_PKT_BURST)) {
2206        send_burst(lp_port, MAX_PKT_BURST,lp_stats);
2207        len = 0;
2208    }
2209    lp_port->m_len = len;
2210
2211    return (0);
2212}
2213
2214int CCoreEthIF::send_pkt_lat(CCorePerPort *lp_port, rte_mbuf_t *m, CVirtualIFPerSideStats *lp_stats) {
2215    // We allow sending only from first core of each port. This is serious internal bug otherwise.
2216    assert(lp_port->m_tx_queue_id_lat != INVALID_Q_ID);
2217
2218    int ret = lp_port->m_port->tx_burst(lp_port->m_tx_queue_id_lat, &m, 1);
2219
2220#ifdef DELAY_IF_NEEDED
2221    while ( unlikely( ret != 1 ) ){
2222        rte_delay_us(1);
2223        lp_stats->m_tx_queue_full += 1;
2224        ret = lp_port->m_port->tx_burst(lp_port->m_tx_queue_id_lat, &m, 1);
2225    }
2226
2227#else
2228    if ( unlikely( ret != 1 ) ) {
2229        lp_stats->m_tx_drop ++;
2230        rte_pktmbuf_free(m);
2231        return 0;
2232    }
2233
2234#endif
2235
2236    return ret;
2237}
2238
2239void CCoreEthIF::send_one_pkt(pkt_dir_t       dir,
2240                              rte_mbuf_t      *m){
2241    CCorePerPort *  lp_port=&m_ports[dir];
2242    CVirtualIFPerSideStats  * lp_stats = &m_stats[dir];
2243    send_pkt(lp_port,m,lp_stats);
2244    /* flush */
2245    send_burst(lp_port,lp_port->m_len,lp_stats);
2246    lp_port->m_len = 0;
2247}
2248
2249int CCoreEthIFStateless::send_node_flow_stat(rte_mbuf *m, CGenNodeStateless * node_sl, CCorePerPort *  lp_port
2250                                             , CVirtualIFPerSideStats  * lp_stats, bool is_const) {
2251    // Defining this makes 10% percent packet loss. 1% packet reorder.
2252# ifdef ERR_CNTRS_TEST
2253    static int temp=1;
2254    temp++;
2255#endif
2256
2257    uint16_t hw_id = node_sl->get_stat_hw_id();
2258    rte_mbuf *mi;
2259    struct flow_stat_payload_header *fsp_head = NULL;
2260
2261    if (hw_id >= MAX_FLOW_STATS) {
2262        // payload rule hw_ids are in the range right above ip id rules
2263        uint16_t hw_id_payload = hw_id - MAX_FLOW_STATS;
2264        if (hw_id_payload > max_stat_hw_id_seen_payload) {
2265            max_stat_hw_id_seen_payload = hw_id_payload;
2266        }
2267
2268        mi = node_sl->alloc_flow_stat_mbuf(m, fsp_head, is_const);
2269        fsp_head->seq = lp_stats->m_lat_data[hw_id_payload].get_seq_num();
2270        fsp_head->hw_id = hw_id_payload;
2271        fsp_head->flow_seq = lp_stats->m_lat_data[hw_id_payload].get_flow_seq();
2272        fsp_head->magic = FLOW_STAT_PAYLOAD_MAGIC;
2273
2274        lp_stats->m_lat_data[hw_id_payload].inc_seq_num();
2275#ifdef ERR_CNTRS_TEST
2276        if (temp % 10 == 0) {
2277            fsp_head->seq = lp_stats->m_lat_data[hw_id_payload].inc_seq_num();
2278        }
2279        if ((temp - 1) % 100 == 0) {
2280            fsp_head->seq = lp_stats->m_lat_data[hw_id_payload].get_seq_num() - 4;
2281        }
2282#endif
2283    } else {
2284        // ip id rule
2285        if (hw_id > max_stat_hw_id_seen) {
2286            max_stat_hw_id_seen = hw_id;
2287        }
2288        mi = m;
2289    }
2290    tx_per_flow_t *lp_s = &lp_stats->m_tx_per_flow[hw_id];
2291    lp_s->add_pkts(1);
2292    lp_s->add_bytes(mi->pkt_len + 4); // We add 4 because of ethernet CRC
2293
2294    if (hw_id >= MAX_FLOW_STATS) {
2295        fsp_head->time_stamp = os_get_hr_tick_64();
2296        send_pkt_lat(lp_port, mi, lp_stats);
2297    } else {
2298        send_pkt(lp_port, mi, lp_stats);
2299    }
2300    return 0;
2301}
2302
2303int CCoreEthIFStateless::send_node(CGenNode * no) {
2304    /* if a node is marked as slow path - single IF to redirect it to slow path */
2305    if (no->get_is_slow_path()) {
2306        return handle_slow_path_node(no);
2307    }
2308
2309    CGenNodeStateless * node_sl=(CGenNodeStateless *) no;
2310
2311    /* check that we have mbuf  */
2312    rte_mbuf_t *    m;
2313
2314    pkt_dir_t dir=(pkt_dir_t)node_sl->get_mbuf_cache_dir();
2315    CCorePerPort *  lp_port=&m_ports[dir];
2316    CVirtualIFPerSideStats  * lp_stats = &m_stats[dir];
2317    if ( likely(node_sl->is_cache_mbuf_array()) ) {
2318        m=node_sl->cache_mbuf_array_get_cur();
2319        rte_pktmbuf_refcnt_update(m,1);
2320    }else{
2321        m=node_sl->get_cache_mbuf();
2322
2323        if (m) {
2324            /* cache case */
2325            rte_pktmbuf_refcnt_update(m,1);
2326        }else{
2327            m=node_sl->alloc_node_with_vm();
2328            assert(m);
2329        }
2330    }
2331
2332    if (unlikely(node_sl->is_stat_needed())) {
2333        if ( unlikely(node_sl->is_cache_mbuf_array()) ) {
2334            // No support for latency + cache. If user asks for cache on latency stream, we change cache to 0.
2335            // assert here just to make sure.
2336            assert(1);
2337        }
2338        return send_node_flow_stat(m, node_sl, lp_port, lp_stats, (node_sl->get_cache_mbuf()) ? true : false);
2339    } else {
2340        send_pkt(lp_port,m,lp_stats);
2341    }
2342
2343    return (0);
2344};
2345
2346int CCoreEthIFStateless::send_pcap_node(CGenNodePCAP *pcap_node) {
2347    rte_mbuf_t *m = pcap_node->get_pkt();
2348    if (!m) {
2349        return (-1);
2350    }
2351
2352    pkt_dir_t dir = (pkt_dir_t)pcap_node->get_mbuf_dir();
2353    CCorePerPort *lp_port=&m_ports[dir];
2354    CVirtualIFPerSideStats *lp_stats = &m_stats[dir];
2355
2356    send_pkt(lp_port, m, lp_stats);
2357
2358    return (0);
2359}
2360
2361/**
2362 * slow path code goes here
2363 *
2364 */
2365int CCoreEthIFStateless::handle_slow_path_node(CGenNode * no) {
2366
2367    if (no->m_type == CGenNode::PCAP_PKT) {
2368        return send_pcap_node((CGenNodePCAP *)no);
2369    }
2370
2371    return (-1);
2372}
2373
2374void CCoreEthIF::apply_client_cfg(const ClientCfgBase *cfg, rte_mbuf_t *m, pkt_dir_t dir, uint8_t *p) {
2375
2376    assert(cfg);
2377
2378    /* take the right direction config */
2379    const ClientCfgDirBase &cfg_dir = ( (dir == CLIENT_SIDE) ? cfg->m_initiator : cfg->m_responder);
2380
2381    /* dst mac */
2382    if (cfg_dir.has_dst_mac_addr()) {
2383        memcpy(p, cfg_dir.get_dst_mac_addr(), 6);
2384    }
2385
2386    /* src mac */
2387    if (cfg_dir.has_src_mac_addr()) {
2388        memcpy(p + 6, cfg_dir.get_src_mac_addr(), 6);
2389    }
2390
2391    /* VLAN */
2392    if (cfg_dir.has_vlan()) {
2393        add_vlan(m, cfg_dir.get_vlan());
2394    }
2395}
2396
2397
2398void CCoreEthIF::add_vlan(rte_mbuf_t *m, uint16_t vlan_id) {
2399    m->ol_flags = PKT_TX_VLAN_PKT;
2400    m->l2_len   = 14;
2401    m->vlan_tci = vlan_id;
2402}
2403
2404/**
2405 * slow path features goes here (avoid multiple IFs)
2406 *
2407 */
2408void CCoreEthIF::handle_slowpath_features(CGenNode *node, rte_mbuf_t *m, uint8_t *p, pkt_dir_t dir) {
2409
2410
2411    /* MAC ovverride */
2412    if ( unlikely( CGlobalInfo::m_options.preview.get_mac_ip_overide_enable() ) ) {
2413        /* client side */
2414        if ( node->is_initiator_pkt() ) {
2415            *((uint32_t*)(p+6)) = PKT_NTOHL(node->m_src_ip);
2416        }
2417    }
2418
2419    /* flag is faster than checking the node pointer (another cacheline) */
2420    if ( unlikely(CGlobalInfo::m_options.preview.get_is_client_cfg_enable() ) ) {
2421        apply_client_cfg(node->m_client_cfg, m, dir, p);
2422    }
2423
2424}
2425
2426int CCoreEthIF::send_node(CGenNode * node) {
2427
2428    if ( unlikely( node->get_cache_mbuf() !=NULL ) ) {
2429        pkt_dir_t       dir;
2430        rte_mbuf_t *    m=node->get_cache_mbuf();
2431        dir=(pkt_dir_t)node->get_mbuf_cache_dir();
2432        CCorePerPort *  lp_port=&m_ports[dir];
2433        CVirtualIFPerSideStats  * lp_stats = &m_stats[dir];
2434        rte_pktmbuf_refcnt_update(m,1);
2435        send_pkt(lp_port,m,lp_stats);
2436        return (0);
2437    }
2438
2439
2440    CFlowPktInfo *  lp=node->m_pkt_info;
2441    rte_mbuf_t *    m=lp->generate_new_mbuf(node);
2442
2443    pkt_dir_t       dir;
2444    bool            single_port;
2445
2446    dir         = node->cur_interface_dir();
2447    single_port = node->get_is_all_flow_from_same_dir() ;
2448
2449
2450    if ( unlikely( CGlobalInfo::m_options.preview.get_vlan_mode_enable() ) ){
2451        /* which vlan to choose 0 or 1*/
2452        uint8_t vlan_port = (node->m_src_ip &1);
2453        uint16_t vlan_id  = CGlobalInfo::m_options.m_vlan_port[vlan_port];
2454
2455        if (likely( vlan_id >0 ) ) {
2456            dir = dir ^ vlan_port;
2457        }else{
2458            /* both from the same dir but with VLAN0 */
2459            vlan_id = CGlobalInfo::m_options.m_vlan_port[0];
2460            dir = dir ^ 0;
2461        }
2462
2463        add_vlan(m, vlan_id);
2464    }
2465
2466    CCorePerPort *lp_port = &m_ports[dir];
2467    CVirtualIFPerSideStats *lp_stats = &m_stats[dir];
2468
2469    if (unlikely(m==0)) {
2470        lp_stats->m_tx_alloc_error++;
2471        return(0);
2472    }
2473
2474    /* update mac addr dest/src 12 bytes */
2475    uint8_t *p   = rte_pktmbuf_mtod(m, uint8_t*);
2476    uint8_t p_id = lp_port->m_port->get_port_id();
2477
2478    memcpy(p,CGlobalInfo::m_options.get_dst_src_mac_addr(p_id),12);
2479
2480     /* when slowpath features are on */
2481    if ( unlikely( CGlobalInfo::m_options.preview.get_is_slowpath_features_on() ) ) {
2482        handle_slowpath_features(node, m, p, dir);
2483    }
2484
2485
2486    if ( unlikely( node->is_rx_check_enabled() ) ) {
2487        lp_stats->m_tx_rx_check_pkt++;
2488        lp->do_generate_new_mbuf_rxcheck(m, node, single_port);
2489        lp_stats->m_template.inc_template( node->get_template_id( ));
2490    }else{
2491        // cache only if it is not sample as this is more complex mbuf struct
2492        if ( unlikely( node->can_cache_mbuf() ) ) {
2493            if ( !CGlobalInfo::m_options.preview.isMbufCacheDisabled() ){
2494                m_mbuf_cache++;
2495                if (m_mbuf_cache < MAX_MBUF_CACHE) {
2496                    /* limit the number of object to cache */
2497                    node->set_mbuf_cache_dir( dir);
2498                    node->set_cache_mbuf(m);
2499                    rte_pktmbuf_refcnt_update(m,1);
2500                }
2501            }
2502        }
2503    }
2504
2505    /*printf("send packet -- \n");
2506      rte_pktmbuf_dump(stdout,m, rte_pktmbuf_pkt_len(m));*/
2507
2508    /* send the packet */
2509    send_pkt(lp_port,m,lp_stats);
2510    return (0);
2511}
2512
2513
2514int CCoreEthIF::update_mac_addr_from_global_cfg(pkt_dir_t  dir, uint8_t * p){
2515    assert(p);
2516    assert(dir<2);
2517
2518    CCorePerPort *  lp_port=&m_ports[dir];
2519    uint8_t p_id=lp_port->m_port->get_port_id();
2520    memcpy(p,CGlobalInfo::m_options.get_dst_src_mac_addr(p_id),12);
2521    return (0);
2522}
2523
2524pkt_dir_t
2525CCoreEthIF::port_id_to_dir(uint8_t port_id) {
2526
2527    for (pkt_dir_t dir = 0; dir < CS_NUM; dir++) {
2528        if (m_ports[dir].m_port->get_port_id() == port_id) {
2529            return dir;
2530        }
2531    }
2532
2533    return (CS_INVALID);
2534}
2535
2536class CLatencyHWPort : public CPortLatencyHWBase {
2537public:
2538    void Create(CPhyEthIF  * p,
2539                uint8_t tx_queue,
2540                uint8_t rx_queue){
2541        m_port=p;
2542        m_tx_queue_id=tx_queue;
2543        m_rx_queue_id=rx_queue;
2544    }
2545
2546    virtual int tx(rte_mbuf_t * m){
2547        rte_mbuf_t * tx_pkts[2];
2548        tx_pkts[0]=m;
2549        if ( likely( CGlobalInfo::m_options.preview.get_vlan_mode_enable() ) ){
2550            /* vlan mode is the default */
2551            /* set the vlan */
2552            m->ol_flags = PKT_TX_VLAN_PKT;
2553            m->vlan_tci =CGlobalInfo::m_options.m_vlan_port[0];
2554            m->l2_len   =14;
2555        }
2556        uint16_t res=m_port->tx_burst(m_tx_queue_id,tx_pkts,1);
2557        if ( res == 0 ) {
2558            rte_pktmbuf_free(m);
2559            //printf(" queue is full for latency packet !!\n");
2560            return (-1);
2561
2562        }
2563#if 0
2564        fprintf(stdout," ==> %f.03 send packet ..\n",now_sec());
2565        uint8_t *p1=rte_pktmbuf_mtod(m, uint8_t*);
2566        uint16_t pkt_size1=rte_pktmbuf_pkt_len(m);
2567        utl_DumpBuffer(stdout,p1,pkt_size1,0);
2568#endif
2569
2570        return (0);
2571    }
2572    virtual rte_mbuf_t * rx(){
2573        rte_mbuf_t * rx_pkts[1];
2574        uint16_t cnt=m_port->rx_burst(m_rx_queue_id,rx_pkts,1);
2575        if (cnt) {
2576            return (rx_pkts[0]);
2577        }else{
2578            return (0);
2579        }
2580    }
2581
2582    virtual uint16_t rx_burst(struct rte_mbuf **rx_pkts,
2583                              uint16_t nb_pkts){
2584        uint16_t cnt=m_port->rx_burst(m_rx_queue_id,rx_pkts,nb_pkts);
2585        return (cnt);
2586    }
2587
2588
2589private:
2590    CPhyEthIF  * m_port;
2591    uint8_t      m_tx_queue_id ;
2592    uint8_t      m_rx_queue_id;
2593};
2594
2595
2596class CLatencyVmPort : public CPortLatencyHWBase {
2597public:
2598    void Create(uint8_t port_index,CNodeRing * ring,
2599                CLatencyManager * mgr){
2600        m_dir        = (port_index%2);
2601        m_ring_to_dp = ring;
2602        m_mgr        = mgr;
2603    }
2604
2605    virtual int tx(rte_mbuf_t * m){
2606        if ( likely( CGlobalInfo::m_options.preview.get_vlan_mode_enable() ) ){
2607            /* vlan mode is the default */
2608            /* set the vlan */
2609            m->ol_flags = PKT_TX_VLAN_PKT;
2610            m->vlan_tci =CGlobalInfo::m_options.m_vlan_port[0];
2611            m->l2_len   =14;
2612        }
2613
2614        /* allocate node */
2615        CGenNodeLatencyPktInfo * node=(CGenNodeLatencyPktInfo * )CGlobalInfo::create_node();
2616        if ( node ) {
2617            node->m_msg_type = CGenNodeMsgBase::LATENCY_PKT;
2618            node->m_dir      = m_dir;
2619            node->m_pkt      = m;
2620            node->m_latency_offset = m_mgr->get_latency_header_offset();
2621
2622            if ( m_ring_to_dp->Enqueue((CGenNode*)node) ==0 ){
2623                return (0);
2624            }
2625        }
2626        return (-1);
2627    }
2628
2629    virtual rte_mbuf_t * rx(){
2630        return (0);
2631    }
2632
2633    virtual uint16_t rx_burst(struct rte_mbuf **rx_pkts,
2634                              uint16_t nb_pkts){
2635        return (0);
2636    }
2637
2638
2639private:
2640    uint8_t                          m_dir;
2641    CNodeRing *                      m_ring_to_dp;   /* ring dp -> latency thread */
2642    CLatencyManager *                m_mgr;
2643};
2644
2645
2646
2647class CPerPortStats {
2648public:
2649    uint64_t opackets;
2650    uint64_t obytes;
2651    uint64_t ipackets;
2652    uint64_t ibytes;
2653    uint64_t ierrors;
2654    uint64_t oerrors;
2655    tx_per_flow_t m_tx_per_flow[MAX_FLOW_STATS + MAX_FLOW_STATS_PAYLOAD];
2656    tx_per_flow_t m_prev_tx_per_flow[MAX_FLOW_STATS + MAX_FLOW_STATS_PAYLOAD];
2657
2658    float     m_total_tx_bps;
2659    float     m_total_tx_pps;
2660
2661    float     m_total_rx_bps;
2662    float     m_total_rx_pps;
2663
2664    float     m_cpu_util;
2665};
2666
2667class CGlobalStats {
2668public:
2669    enum DumpFormat {
2670        dmpSTANDARD,
2671        dmpTABLE
2672    };
2673
2674    uint64_t  m_total_tx_pkts;
2675    uint64_t  m_total_rx_pkts;
2676    uint64_t  m_total_tx_bytes;
2677    uint64_t  m_total_rx_bytes;
2678
2679    uint64_t  m_total_alloc_error;
2680    uint64_t  m_total_queue_full;
2681    uint64_t  m_total_queue_drop;
2682
2683    uint64_t  m_total_clients;
2684    uint64_t  m_total_servers;
2685    uint64_t  m_active_sockets;
2686
2687    uint64_t  m_total_nat_time_out;
2688    uint64_t  m_total_nat_time_out_wait_ack;
2689    uint64_t  m_total_nat_no_fid  ;
2690    uint64_t  m_total_nat_active  ;
2691    uint64_t  m_total_nat_syn_wait;
2692    uint64_t  m_total_nat_open    ;
2693    uint64_t  m_total_nat_learn_error    ;
2694
2695    CPerTxthreadTemplateInfo m_template;
2696
2697    float     m_socket_util;
2698
2699    float m_platform_factor;
2700    float m_tx_bps;
2701    float m_rx_bps;
2702    float m_tx_pps;
2703    float m_rx_pps;
2704    float m_tx_cps;
2705    float m_tx_expected_cps;
2706    float m_tx_expected_pps;
2707    float m_tx_expected_bps;
2708    float m_rx_drop_bps;
2709    float m_active_flows;
2710    float m_open_flows;
2711    float m_cpu_util;
2712    float m_cpu_util_raw;
2713    float m_rx_cpu_util;
2714    float m_bw_per_core;
2715    uint8_t m_threads;
2716
2717    uint32_t      m_num_of_ports;
2718    CPerPortStats m_port[TREX_MAX_PORTS];
2719public:
2720    void Dump(FILE *fd,DumpFormat mode);
2721    void DumpAllPorts(FILE *fd);
2722    void dump_json(std::string & json, bool baseline);
2723private:
2724    std::string get_field(const char *name, float &f);
2725    std::string get_field(const char *name, uint64_t &f);
2726    std::string get_field_port(int port, const char *name, float &f);
2727    std::string get_field_port(int port, const char *name, uint64_t &f);
2728
2729};
2730
2731std::string CGlobalStats::get_field(const char *name, float &f){
2732    char buff[200];
2733    if(f <= -10.0 or f >= 10.0)
2734        snprintf(buff, sizeof(buff), "\"%s\":%.1f,",name,f);
2735    else
2736        snprintf(buff, sizeof(buff), "\"%s\":%.3e,",name,f);
2737    return (std::string(buff));
2738}
2739
2740std::string CGlobalStats::get_field(const char *name, uint64_t &f){
2741    char buff[200];
2742    snprintf(buff,  sizeof(buff), "\"%s\":%llu,", name, (unsigned long long)f);
2743    return (std::string(buff));
2744}
2745
2746std::string CGlobalStats::get_field_port(int port, const char *name, float &f){
2747    char buff[200];
2748    if(f <= -10.0 or f >= 10.0)
2749        snprintf(buff,  sizeof(buff), "\"%s-%d\":%.1f,", name, port, f);
2750    else
2751        snprintf(buff, sizeof(buff), "\"%s-%d\":%.3e,", name, port, f);
2752    return (std::string(buff));
2753}
2754
2755std::string CGlobalStats::get_field_port(int port, const char *name, uint64_t &f){
2756    char buff[200];
2757    snprintf(buff, sizeof(buff), "\"%s-%d\":%llu,",name, port, (unsigned long long)f);
2758    return (std::string(buff));
2759}
2760
2761
2762void CGlobalStats::dump_json(std::string & json, bool baseline){
2763    /* refactor this to JSON */
2764
2765    json="{\"name\":\"trex-global\",\"type\":0,";
2766    if (baseline) {
2767        json += "\"baseline\": true,";
2768    }
2769
2770    json +="\"data\":{";
2771
2772    char ts_buff[200];
2773    snprintf(ts_buff , sizeof(ts_buff), "\"ts\":{\"value\":%lu, \"freq\":%lu},", os_get_hr_tick_64(), os_get_hr_freq());
2774    json+= std::string(ts_buff);
2775
2776#define GET_FIELD(f) get_field(#f, f)
2777#define GET_FIELD_PORT(p,f) get_field_port(p, #f, lp->f)
2778
2779    json+=GET_FIELD(m_cpu_util);
2780    json+=GET_FIELD(m_cpu_util_raw);
2781    json+=GET_FIELD(m_bw_per_core);
2782    json+=GET_FIELD(m_rx_cpu_util);
2783    json+=GET_FIELD(m_platform_factor);
2784    json+=GET_FIELD(m_tx_bps);
2785    json+=GET_FIELD(m_rx_bps);
2786    json+=GET_FIELD(m_tx_pps);
2787    json+=GET_FIELD(m_rx_pps);
2788    json+=GET_FIELD(m_tx_cps);
2789    json+=GET_FIELD(m_tx_expected_cps);
2790    json+=GET_FIELD(m_tx_expected_pps);
2791    json+=GET_FIELD(m_tx_expected_bps);
2792    json+=GET_FIELD(m_total_alloc_error);
2793    json+=GET_FIELD(m_total_queue_full);
2794    json+=GET_FIELD(m_total_queue_drop);
2795    json+=GET_FIELD(m_rx_drop_bps);
2796    json+=GET_FIELD(m_active_flows);
2797    json+=GET_FIELD(m_open_flows);
2798
2799    json+=GET_FIELD(m_total_tx_pkts);
2800    json+=GET_FIELD(m_total_rx_pkts);
2801    json+=GET_FIELD(m_total_tx_bytes);
2802    json+=GET_FIELD(m_total_rx_bytes);
2803
2804    json+=GET_FIELD(m_total_clients);
2805    json+=GET_FIELD(m_total_servers);
2806    json+=GET_FIELD(m_active_sockets);
2807    json+=GET_FIELD(m_socket_util);
2808
2809    json+=GET_FIELD(m_total_nat_time_out);
2810    json+=GET_FIELD(m_total_nat_time_out_wait_ack);
2811    json+=GET_FIELD(m_total_nat_no_fid );
2812    json+=GET_FIELD(m_total_nat_active );
2813    json+=GET_FIELD(m_total_nat_syn_wait);
2814    json+=GET_FIELD(m_total_nat_open   );
2815    json+=GET_FIELD(m_total_nat_learn_error);
2816
2817    int i;
2818    for (i=0; i<(int)m_num_of_ports; i++) {
2819        CPerPortStats * lp=&m_port[i];
2820        json+=GET_FIELD_PORT(i,opackets) ;
2821        json+=GET_FIELD_PORT(i,obytes)   ;
2822        json+=GET_FIELD_PORT(i,ipackets) ;
2823        json+=GET_FIELD_PORT(i,ibytes)   ;
2824        json+=GET_FIELD_PORT(i,ierrors)  ;
2825        json+=GET_FIELD_PORT(i,oerrors)  ;
2826        json+=GET_FIELD_PORT(i,m_total_tx_bps);
2827        json+=GET_FIELD_PORT(i,m_total_tx_pps);
2828        json+=GET_FIELD_PORT(i,m_total_rx_bps);
2829        json+=GET_FIELD_PORT(i,m_total_rx_pps);
2830        json+=GET_FIELD_PORT(i,m_cpu_util);
2831    }
2832    json+=m_template.dump_as_json("template");
2833    json+="\"unknown\":0}}"  ;
2834}
2835
2836void CGlobalStats::DumpAllPorts(FILE *fd){
2837
2838    //fprintf (fd," Total-Tx-Pkts   : %s  \n",double_to_human_str((double)m_total_tx_pkts,"pkts",KBYE_1000).c_str());
2839    //fprintf (fd," Total-Rx-Pkts   : %s  \n",double_to_human_str((double)m_total_rx_pkts,"pkts",KBYE_1000).c_str());
2840
2841    //fprintf (fd," Total-Tx-Bytes  : %s  \n",double_to_human_str((double)m_total_tx_bytes,"bytes",KBYE_1000).c_str());
2842    //fprintf (fd," Total-Rx-Bytes  : %s  \n",double_to_human_str((double)m_total_rx_bytes,"bytes",KBYE_1000).c_str());
2843
2844
2845
2846    fprintf (fd," Cpu Utilization : %2.1f  %%  %2.1f Gb/core \n",m_cpu_util,m_bw_per_core);
2847    fprintf (fd," Platform_factor : %2.1f  \n",m_platform_factor);
2848    fprintf (fd," Total-Tx        : %s  ",double_to_human_str(m_tx_bps,"bps",KBYE_1000).c_str());
2849    if ( CGlobalInfo::is_learn_mode() ) {
2850        fprintf (fd," NAT time out    : %8llu", (unsigned long long)m_total_nat_time_out);
2851        if (CGlobalInfo::is_learn_mode(CParserOption::LEARN_MODE_TCP_ACK)) {
2852            fprintf (fd," (%llu in wait for syn+ack)\n", (unsigned long long)m_total_nat_time_out_wait_ack);
2853        } else {
2854            fprintf (fd, "\n");
2855        }
2856    }else{
2857        fprintf (fd,"\n");
2858    }
2859
2860
2861    fprintf (fd," Total-Rx        : %s  ",double_to_human_str(m_rx_bps,"bps",KBYE_1000).c_str());
2862    if ( CGlobalInfo::is_learn_mode() ) {
2863        fprintf (fd," NAT aged flow id: %8llu \n", (unsigned long long)m_total_nat_no_fid);
2864    }else{
2865        fprintf (fd,"\n");
2866    }
2867
2868    fprintf (fd," Total-PPS       : %s  ",double_to_human_str(m_tx_pps,"pps",KBYE_1000).c_str());
2869    if ( CGlobalInfo::is_learn_mode() ) {
2870        fprintf (fd," Total NAT active: %8llu", (unsigned long long)m_total_nat_active);
2871        if (CGlobalInfo::is_learn_mode(CParserOption::LEARN_MODE_TCP_ACK)) {
2872            fprintf (fd," (%llu waiting for syn)\n", (unsigned long long)m_total_nat_syn_wait);
2873        } else {
2874            fprintf (fd, "\n");
2875        }
2876    }else{
2877        fprintf (fd,"\n");
2878    }
2879
2880    fprintf (fd," Total-CPS       : %s  ",double_to_human_str(m_tx_cps,"cps",KBYE_1000).c_str());
2881    if ( CGlobalInfo::is_learn_mode() ) {
2882        fprintf (fd," Total NAT opened: %8llu \n", (unsigned long long)m_total_nat_open);
2883    }else{
2884        fprintf (fd,"\n");
2885    }
2886    fprintf (fd,"\n");
2887    fprintf (fd," Expected-PPS    : %s  ",double_to_human_str(m_tx_expected_pps,"pps",KBYE_1000).c_str());
2888    if ( CGlobalInfo::is_learn_verify_mode() ) {
2889        fprintf (fd," NAT learn errors: %8llu \n", (unsigned long long)m_total_nat_learn_error);
2890    }else{
2891        fprintf (fd,"\n");
2892    }
2893    fprintf (fd," Expected-CPS    : %s  \n",double_to_human_str(m_tx_expected_cps,"cps",KBYE_1000).c_str());
2894    fprintf (fd," Expected-BPS    : %s  \n",double_to_human_str(m_tx_expected_bps,"bps",KBYE_1000).c_str());
2895    fprintf (fd,"\n");
2896    fprintf (fd," Active-flows    : %8llu  Clients : %8llu   Socket-util : %3.4f %%    \n",
2897             (unsigned long long)m_active_flows,
2898             (unsigned long long)m_total_clients,
2899             m_socket_util);
2900    fprintf (fd," Open-flows      : %8llu  Servers : %8llu   Socket : %8llu Socket/Clients :  %.1f \n",
2901             (unsigned long long)m_open_flows,
2902             (unsigned long long)m_total_servers,
2903             (unsigned long long)m_active_sockets,
2904             (float)m_active_sockets/(float)m_total_clients);
2905
2906    if (m_total_alloc_error) {
2907        fprintf (fd," Total_alloc_err  : %llu         \n", (unsigned long long)m_total_alloc_error);
2908    }
2909    if ( m_total_queue_full ){
2910        fprintf (fd," Total_queue_full : %llu         \n", (unsigned long long)m_total_queue_full);
2911    }
2912    if (m_total_queue_drop) {
2913        fprintf (fd," Total_queue_drop : %llu         \n", (unsigned long long)m_total_queue_drop);
2914    }
2915
2916    //m_template.Dump(fd);
2917
2918    fprintf (fd," drop-rate       : %s   \n",double_to_human_str(m_rx_drop_bps,"bps",KBYE_1000).c_str() );
2919}
2920
2921
2922void CGlobalStats::Dump(FILE *fd,DumpFormat mode){
2923    int i;
2924    int port_to_show=m_num_of_ports;
2925    if (port_to_show>4) {
2926        port_to_show=4;
2927        fprintf (fd," per port - limited to 4   \n");
2928    }
2929
2930
2931    if ( mode== dmpSTANDARD ){
2932        fprintf (fd," --------------- \n");
2933        for (i=0; i<(int)port_to_show; i++) {
2934            CPerPortStats * lp=&m_port[i];
2935            fprintf(fd,"port : %d \n",(int)i);
2936            fprintf(fd,"------------\n");
2937#define GS_DP_A4(f) fprintf(fd," %-40s : %llu \n",#f, (unsigned long long)lp->f)
2938#define GS_DP_A(f) if (lp->f) fprintf(fd," %-40s : %llu \n",#f, (unsigned long long)lp->f)
2939            GS_DP_A4(opackets);
2940            GS_DP_A4(obytes);
2941            GS_DP_A4(ipackets);
2942            GS_DP_A4(ibytes);
2943            GS_DP_A(ierrors);
2944            GS_DP_A(oerrors);
2945            fprintf (fd," Tx : %s  \n",double_to_human_str((double)lp->m_total_tx_bps,"bps",KBYE_1000).c_str());
2946        }
2947    }else{
2948        fprintf(fd," %10s ","ports");
2949        for (i=0; i<(int)port_to_show; i++) {
2950            fprintf(fd,"| %15d ",i);
2951        }
2952        fprintf(fd,"\n");
2953        fprintf(fd," -----------------------------------------------------------------------------------------\n");
2954        std::string names[]={"opackets","obytes","ipackets","ibytes","ierrors","oerrors","Tx Bw"
2955        };
2956        for (i=0; i<7; i++) {
2957            fprintf(fd," %10s ",names[i].c_str());
2958            int j=0;
2959            for (j=0; j<port_to_show;j++) {
2960                CPerPortStats * lp=&m_port[j];
2961                uint64_t cnt;
2962                switch (i) {
2963                case 0:
2964                    cnt=lp->opackets;
2965                    fprintf(fd,"| %15lu ",cnt);
2966
2967                    break;
2968                case 1:
2969                    cnt=lp->obytes;
2970                    fprintf(fd,"| %15lu ",cnt);
2971
2972                    break;
2973                case 2:
2974                    cnt=lp->ipackets;
2975                    fprintf(fd,"| %15lu ",cnt);
2976
2977                    break;
2978                case 3:
2979                    cnt=lp->ibytes;
2980                    fprintf(fd,"| %15lu ",cnt);
2981
2982                    break;
2983                case 4:
2984                    cnt=lp->ierrors;
2985                    fprintf(fd,"| %15lu ",cnt);
2986
2987                    break;
2988                case 5:
2989                    cnt=lp->oerrors;
2990                    fprintf(fd,"| %15lu ",cnt);
2991
2992                    break;
2993                case 6:
2994                    fprintf(fd,"| %15s ",double_to_human_str((double)lp->m_total_tx_bps,"bps",KBYE_1000).c_str());
2995                    break;
2996                default:
2997                    cnt=0xffffff;
2998                }
2999            } /* ports */
3000            fprintf(fd, "\n");
3001        }/* fields*/
3002    }
3003
3004
3005}
3006
3007class CGlobalTRex  {
3008
3009public:
3010
3011    /**
3012     * different types of shutdown causes
3013     */
3014    typedef enum {
3015        SHUTDOWN_NONE,
3016        SHUTDOWN_TEST_ENDED,
3017        SHUTDOWN_CTRL_C,
3018        SHUTDOWN_SIGINT,
3019        SHUTDOWN_SIGTERM,
3020        SHUTDOWN_RPC_REQ
3021    } shutdown_rc_e;
3022
3023
3024    CGlobalTRex (){
3025        m_max_ports=4;
3026        m_max_cores=1;
3027        m_cores_to_dual_ports=0;
3028        m_max_queues_per_port=0;
3029        m_fl_was_init=false;
3030        m_expected_pps=0.0;
3031        m_expected_cps=0.0;
3032        m_expected_bps=0.0;
3033        m_trex_stateless = NULL;
3034        m_mark_for_shutdown = SHUTDOWN_NONE;
3035    }
3036
3037    bool Create();
3038    void Delete();
3039    int  ixgbe_prob_init();
3040    int  cores_prob_init();
3041    int  queues_prob_init();
3042    int  ixgbe_start();
3043    int  ixgbe_rx_queue_flush();
3044    void ixgbe_configure_mg();
3045    void rx_sl_configure();
3046    bool is_all_links_are_up(bool dump=false);
3047    void pre_test();
3048
3049    /**
3050     * mark for shutdown
3051     * on the next check - the control plane will
3052     * call shutdown()
3053     */
3054    void mark_for_shutdown(shutdown_rc_e rc) {
3055
3056        if (is_marked_for_shutdown()) {
3057            return;
3058        }
3059
3060        m_mark_for_shutdown = rc;
3061    }
3062
3063private:
3064    void register_signals();
3065
3066    /* try to stop all datapath cores and RX core */
3067    void try_stop_all_cores();
3068    /* send message to all dp cores */
3069    int  send_message_all_dp(TrexStatelessCpToDpMsgBase *msg);
3070    int  send_message_to_rx(TrexStatelessCpToRxMsgBase *msg);
3071    void check_for_dp_message_from_core(int thread_id);
3072
3073    bool is_marked_for_shutdown() const {
3074        return (m_mark_for_shutdown != SHUTDOWN_NONE);
3075    }
3076
3077    /**
3078     * shutdown sequence
3079     *
3080     */
3081    void shutdown();
3082
3083public:
3084    void check_for_dp_messages();
3085    int start_master_statefull();
3086    int start_master_stateless();
3087    int run_in_core(virtual_thread_id_t virt_core_id);
3088    int core_for_rx(){
3089        if ( (! get_is_rx_thread_enabled()) ) {
3090            return -1;
3091        }else{
3092            return m_max_cores - 1;
3093        }
3094    }
3095    int run_in_rx_core();
3096    int run_in_master();
3097
3098    void handle_fast_path();
3099    void handle_slow_path();
3100
3101    int stop_master();
3102    /* return the minimum number of dp cores needed to support the active ports
3103       this is for c==1 or  m_cores_mul==1
3104    */
3105    int get_base_num_cores(){
3106        return (m_max_ports>>1);
3107    }
3108
3109    int get_cores_tx(){
3110        /* 0 - master
3111           num_of_cores -
3112           last for latency */
3113        if ( (! get_is_rx_thread_enabled()) ) {
3114            return (m_max_cores - 1 );
3115        } else {
3116            return (m_max_cores - BP_MASTER_AND_LATENCY );
3117        }
3118    }
3119
3120private:
3121    bool is_all_cores_finished();
3122
3123public:
3124
3125    void publish_async_data(bool sync_now, bool baseline = false);
3126    void publish_async_barrier(uint32_t key);
3127    void publish_async_port_attr_changed(uint8_t port_id);
3128
3129    void dump_stats(FILE *fd,
3130                    CGlobalStats::DumpFormat format);
3131    void dump_template_info(std::string & json);
3132    bool sanity_check();
3133    void update_stats(void);
3134    tx_per_flow_t get_flow_tx_stats(uint8_t port, uint16_t hw_id);
3135    tx_per_flow_t clear_flow_tx_stats(uint8_t port, uint16_t index, bool is_lat);
3136    void get_stats(CGlobalStats & stats);
3137    float get_cpu_util_per_interface(uint8_t port_id);
3138    void dump_post_test_stats(FILE *fd);
3139    void dump_config(FILE *fd);
3140    void dump_links_status(FILE *fd);
3141
3142public:
3143    port_cfg_t  m_port_cfg;
3144    uint32_t    m_max_ports;    /* active number of ports supported options are  2,4,8,10,12  */
3145    uint32_t    m_max_cores;    /* current number of cores , include master and latency  ==> ( master)1+c*(m_max_ports>>1)+1( latency )  */
3146    uint32_t    m_cores_mul;    /* how cores multipler given  c=4 ==> m_cores_mul */
3147    uint32_t    m_max_queues_per_port;
3148    uint32_t    m_cores_to_dual_ports; /* number of ports that will handle dual ports */
3149    uint16_t    m_latency_tx_queue_id;
3150    // statistic
3151    CPPSMeasure  m_cps;
3152    float        m_expected_pps;
3153    float        m_expected_cps;
3154    float        m_expected_bps;//bps
3155    float        m_last_total_cps;
3156
3157    CPhyEthIF   m_ports[TREX_MAX_PORTS];
3158    CCoreEthIF          m_cores_vif_sf[BP_MAX_CORES]; /* counted from 1 , 2,3 core zero is reserved - stateful */
3159    CCoreEthIFStateless m_cores_vif_sl[BP_MAX_CORES]; /* counted from 1 , 2,3 core zero is reserved - stateless*/
3160    CCoreEthIF *        m_cores_vif[BP_MAX_CORES];
3161    CParserOption m_po ;
3162    CFlowGenList  m_fl;
3163    bool          m_fl_was_init;
3164    volatile uint8_t       m_signal[BP_MAX_CORES] __rte_cache_aligned ; // Signal to main core when DP thread finished
3165    volatile bool m_sl_rx_running; // Signal main core when RX thread finished
3166    CLatencyManager     m_mg; // statefull RX core
3167    CRxCoreStateless    m_rx_sl; // stateless RX core
3168    CTrexGlobalIoMode   m_io_modes;
3169    CTRexExtendedDriverBase * m_drv;
3170
3171private:
3172    CLatencyHWPort      m_latency_vports[TREX_MAX_PORTS];    /* read hardware driver */
3173    CLatencyVmPort      m_latency_vm_vports[TREX_MAX_PORTS]; /* vm driver */
3174    CLatencyPktInfo     m_latency_pkt;
3175    TrexPublisher       m_zmq_publisher;
3176    CGlobalStats        m_stats;
3177    uint32_t            m_stats_cnt;
3178    std::mutex          m_cp_lock;
3179
3180    TrexMonitor         m_monitor;
3181
3182    shutdown_rc_e       m_mark_for_shutdown;
3183
3184public:
3185    TrexStateless       *m_trex_stateless;
3186
3187};
3188
3189// Before starting, send gratuitous ARP on our addresses, and try to resolve dst MAC addresses.
3190void CGlobalTRex::pre_test() {
3191    CPretest pretest(m_max_ports);
3192    bool resolve_needed = false;
3193    uint8_t empty_mac[ETHER_ADDR_LEN] = {0,0,0,0,0,0};
3194    bool need_grat_arp[TREX_MAX_PORTS];
3195
3196    if (CGlobalInfo::m_options.preview.get_is_client_cfg_enable()) {
3197        std::vector<ClientCfgCompactEntry *> conf;
3198        m_fl.get_client_cfg_ip_list(conf);
3199
3200        // If we got src MAC for port in global config, take it, otherwise use src MAC from DPDK
3201        uint8_t port_macs[m_max_ports][ETHER_ADDR_LEN];
3202        for (int port_id = 0; port_id < m_max_ports; port_id++) {
3203            uint8_t empty_mac[ETHER_ADDR_LEN] = {0,0,0,0,0,0};
3204            if (! memcmp( CGlobalInfo::m_options.m_mac_addr[port_id].u.m_mac.src, empty_mac, ETHER_ADDR_LEN)) {
3205                rte_eth_macaddr_get(port_id,
3206                                    (struct ether_addr *)&CGlobalInfo::m_options.m_mac_addr[port_id].u.m_mac.src);
3207            }
3208            memcpy(port_macs[port_id], CGlobalInfo::m_options.m_mac_addr[port_id].u.m_mac.src, ETHER_ADDR_LEN);
3209        }
3210
3211        for (std::vector<ClientCfgCompactEntry *>::iterator it = conf.begin(); it != conf.end(); it++) {
3212            uint8_t port = (*it)->get_port();
3213            uint16_t vlan = (*it)->get_vlan();
3214            uint32_t count = (*it)->get_count();
3215            uint32_t dst_ip = (*it)->get_dst_ip();
3216            uint32_t src_ip = (*it)->get_src_ip();
3217
3218            for (int i = 0; i < count; i++) {
3219                //??? handle ipv6;
3220                if ((*it)->is_ipv4()) {
3221                    pretest.add_next_hop(port, dst_ip + i, vlan);
3222                }
3223            }
3224            if (!src_ip) {
3225                src_ip = CGlobalInfo::m_options.m_ip_cfg[port].get_ip();
3226                if (!src_ip) {
3227                    fprintf(stderr, "No matching src ip for port: %d ip:%s vlan: %d\n"
3228                            , port, ip_to_str(dst_ip).c_str(), vlan);
3229                    fprintf(stderr, "You must specify src_ip in client config file or in TRex config file\n");
3230                    exit(1);
3231                }
3232            }
3233            pretest.add_ip(port, src_ip, vlan, port_macs[port]);
3234            COneIPv4Info ipv4(src_ip, vlan, port_macs[port], port);
3235            m_mg.add_grat_arp_src(ipv4);
3236
3237            delete *it;
3238        }
3239        if ( CGlobalInfo::m_options.preview.getVMode() > 1) {
3240            fprintf(stdout, "*******Pretest for client cfg********\n");
3241            pretest.dump(stdout);
3242            }
3243    } else {
3244        for (int port_id = 0; port_id < m_max_ports; port_id++) {
3245            if (! memcmp( CGlobalInfo::m_options.m_mac_addr[port_id].u.m_mac.dest, empty_mac, ETHER_ADDR_LEN)) {
3246                resolve_needed = true;
3247            } else {
3248                resolve_needed = false;
3249            }
3250            if (! memcmp( CGlobalInfo::m_options.m_mac_addr[port_id].u.m_mac.src, empty_mac, ETHER_ADDR_LEN)) {
3251                rte_eth_macaddr_get(port_id,
3252                                    (struct ether_addr *)&CGlobalInfo::m_options.m_mac_addr[port_id].u.m_mac.src);
3253                need_grat_arp[port_id] = true;
3254            } else {
3255                // If we got src MAC from config file, do not send gratuitous ARP for it
3256                // (for compatibility with old behaviour)
3257                need_grat_arp[port_id] = false;
3258            }
3259
3260            pretest.add_ip(port_id, CGlobalInfo::m_options.m_ip_cfg[port_id].get_ip()
3261                           , CGlobalInfo::m_options.m_ip_cfg[port_id].get_vlan()
3262                           , CGlobalInfo::m_options.m_mac_addr[port_id].u.m_mac.src);
3263
3264            if (resolve_needed) {
3265                pretest.add_next_hop(port_id, CGlobalInfo::m_options.m_ip_cfg[port_id].get_def_gw()
3266                                     , CGlobalInfo::m_options.m_ip_cfg[port_id].get_vlan());
3267            }
3268        }
3269    }
3270
3271    for (int port_id = 0; port_id < m_max_ports; port_id++) {
3272        CPhyEthIF *pif = &m_ports[port_id];
3273        // Configure port to send all packets to software
3274        CTRexExtendedDriverDb::Ins()->get_drv()->set_rcv_all(pif, true);
3275    }
3276
3277
3278    pretest.send_grat_arp_all();
3279    bool ret;
3280    int count = 0;
3281    bool resolve_failed = false;
3282    do {
3283        ret = pretest.resolve_all();
3284        count++;
3285    } while ((ret != true) && (count < 10));
3286    if (ret != true) {
3287        resolve_failed = true;
3288    }
3289
3290    if ( CGlobalInfo::m_options.preview.getVMode() > 1) {
3291        fprintf(stdout, "*******Pretest after resolving ********\n");
3292        pretest.dump(stdout);
3293    }
3294
3295    if (CGlobalInfo::m_options.preview.get_is_client_cfg_enable()) {
3296        CManyIPInfo pretest_result;
3297        pretest.get_results(pretest_result);
3298        if (resolve_failed) {
3299            fprintf(stderr, "Resolution of following IPs failed. Exiting.\n");
3300            for (const COneIPInfo *ip=pretest_result.get_next(); ip != NULL;
3301                   ip = pretest_result.get_next()) {
3302                if (ip->resolve_needed()) {
3303                    ip->dump(stderr, "  ");
3304                }
3305            }
3306            exit(1);
3307        }
3308        m_fl.set_client_config_resolved_macs(pretest_result);
3309        if ( CGlobalInfo::m_options.preview.getVMode() > 1) {
3310            m_fl.dump_client_config(stdout);
3311        }
3312
3313        bool port_found[TREX_MAX_PORTS];
3314        for (int port_id = 0; port_id < m_max_ports; port_id++) {
3315            port_found[port_id] = false;
3316        }
3317        // If client config enabled, we don't resolve MACs from trex_cfg.yaml. For latency (-l)
3318        // We need to able to send packets from RX core, so need to configure MAC/vlan for each port.
3319        for (const COneIPInfo *ip=pretest_result.get_next(); ip != NULL; ip = pretest_result.get_next()) {
3320            // Use first MAC/vlan we see on each port
3321            uint8_t port_id = ip->get_port();
3322            uint16_t vlan = ip->get_vlan();
3323            if ( ! port_found[port_id]) {
3324                port_found[port_id] = true;
3325                ip->get_mac(CGlobalInfo::m_options.m_mac_addr[port_id].u.m_mac.dest);
3326                CGlobalInfo::m_options.m_ip_cfg[port_id].set_vlan(vlan);
3327            }
3328        }
3329    } else {
3330        uint8_t mac[ETHER_ADDR_LEN];
3331        for (int port_id = 0; port_id < m_max_ports; port_id++) {
3332            if (! memcmp(CGlobalInfo::m_options.m_mac_addr[port_id].u.m_mac.dest, empty_mac, ETHER_ADDR_LEN)) {
3333                // we don't have dest MAC. Get it from what we resolved.
3334                uint32_t ip = CGlobalInfo::m_options.m_ip_cfg[port_id].get_def_gw();
3335                uint16_t vlan = CGlobalInfo::m_options.m_ip_cfg[port_id].get_vlan();
3336                if (! pretest.get_mac(port_id, ip, vlan, mac)) {
3337                    fprintf(stderr, "Failed resolving dest MAC for default gateway:%d.%d.%d.%d on port %d\n"
3338                            , (ip >> 24) & 0xFF, (ip >> 16) & 0xFF, (ip >> 8) & 0xFF, ip & 0xFF, port_id);
3339                    exit(1);
3340                }
3341                memcpy(CGlobalInfo::m_options.m_mac_addr[port_id].u.m_mac.dest, mac, ETHER_ADDR_LEN);
3342                // if port is connected in loopback, no need to send gratuitous ARP. It will only confuse our ingress counters.
3343                if (need_grat_arp[port_id] && (! pretest.is_loopback(port_id))) {
3344                    COneIPv4Info ipv4(CGlobalInfo::m_options.m_ip_cfg[port_id].get_ip()
3345                                      , CGlobalInfo::m_options.m_ip_cfg[port_id].get_vlan()
3346                                      , CGlobalInfo::m_options.m_mac_addr[port_id].u.m_mac.src
3347                                      , port_id);
3348                    m_mg.add_grat_arp_src(ipv4);
3349                }
3350            }
3351
3352            // update statistics baseline, so we can ignore what happened in pre test phase
3353            CPhyEthIF *pif = &m_ports[port_id];
3354            CPreTestStats pre_stats = pretest.get_stats(port_id);
3355            pif->set_ignore_stats_base(pre_stats);
3356
3357            // Configure port back to normal mode. Only relevant packets handled by software.
3358            CTRexExtendedDriverDb::Ins()->get_drv()->set_rcv_all(pif, false);
3359        }
3360    }
3361}
3362
3363/**
3364 * check for a single core
3365 *
3366 * @author imarom (19-Nov-15)
3367 *
3368 * @param thread_id
3369 */
3370void
3371CGlobalTRex::check_for_dp_message_from_core(int thread_id) {
3372
3373    CNodeRing *ring = CMsgIns::Ins()->getCpDp()->getRingDpToCp(thread_id);
3374
3375    /* fast path check */
3376    if ( likely ( ring->isEmpty() ) ) {
3377        return;
3378    }
3379
3380    while ( true ) {
3381        CGenNode * node = NULL;
3382        if (ring->Dequeue(node) != 0) {
3383            break;
3384        }
3385        assert(node);
3386
3387        TrexStatelessDpToCpMsgBase * msg = (TrexStatelessDpToCpMsgBase *)node;
3388        msg->handle();
3389        delete msg;
3390    }
3391
3392}
3393
3394/**
3395 * check for messages that arrived from DP to CP
3396 *
3397 */
3398void
3399CGlobalTRex::check_for_dp_messages() {
3400
3401    /* for all the cores - check for a new message */
3402    for (int i = 0; i < get_cores_tx(); i++) {
3403        check_for_dp_message_from_core(i);
3404    }
3405}
3406
3407bool CGlobalTRex::is_all_links_are_up(bool dump){
3408    bool all_link_are=true;
3409    int i;
3410    for (i=0; i<m_max_ports; i++) {
3411        CPhyEthIF * _if=&m_ports[i];
3412        _if->get_port_attr()->update_link_status();
3413        if ( dump ){
3414            _if->dump_stats(stdout);
3415        }
3416        if ( _if->get_port_attr()->is_link_up() == false){
3417            all_link_are=false;
3418            break;
3419        }
3420    }
3421    return (all_link_are);
3422}
3423
3424void CGlobalTRex::try_stop_all_cores(){
3425
3426    TrexStatelessDpQuit * dp_msg= new TrexStatelessDpQuit();
3427    TrexStatelessRxQuit * rx_msg= new TrexStatelessRxQuit();
3428    send_message_all_dp(dp_msg);
3429    if (get_is_stateless()) {
3430        send_message_to_rx(rx_msg);
3431    }
3432    delete dp_msg;
3433    // no need to delete rx_msg. Deleted by receiver
3434    bool all_core_finished = false;
3435    int i;
3436    for (i=0; i<20; i++) {
3437        if ( is_all_cores_finished() ){
3438            all_core_finished =true;
3439            break;
3440        }
3441        delay(100);
3442    }
3443    if ( all_core_finished ){
3444        m_zmq_publisher.publish_event(TrexPublisher::EVENT_SERVER_STOPPED);
3445        printf(" All cores stopped !! \n");
3446    }else{
3447        printf(" ERROR one of the DP core is stucked !\n");
3448    }
3449}
3450
3451
3452int  CGlobalTRex::send_message_all_dp(TrexStatelessCpToDpMsgBase *msg){
3453
3454    int max_threads=(int)CMsgIns::Ins()->getCpDp()->get_num_threads();
3455    int i;
3456
3457    for (i=0; i<max_threads; i++) {
3458        CNodeRing *ring = CMsgIns::Ins()->getCpDp()->getRingCpToDp((uint8_t)i);
3459        ring->Enqueue((CGenNode*)msg->clone());
3460    }
3461    return (0);
3462}
3463
3464int CGlobalTRex::send_message_to_rx(TrexStatelessCpToRxMsgBase *msg) {
3465    CNodeRing *ring = CMsgIns::Ins()->getCpRx()->getRingCpToDp(0);
3466    ring->Enqueue((CGenNode *) msg);
3467
3468    return (0);
3469}
3470
3471
3472int  CGlobalTRex::ixgbe_rx_queue_flush(){
3473    int i;
3474    for (i=0; i<m_max_ports; i++) {
3475        CPhyEthIF * _if=&m_ports[i];
3476        _if->flush_rx_queue();
3477    }
3478    return (0);
3479}
3480
3481
3482void CGlobalTRex::ixgbe_configure_mg(void) {
3483    int i;
3484    CLatencyManagerCfg mg_cfg;
3485    mg_cfg.m_max_ports = m_max_ports;
3486
3487    uint32_t latency_rate=CGlobalInfo::m_options.m_latency_rate;
3488
3489    if ( latency_rate ) {
3490        mg_cfg.m_cps = (double)latency_rate ;
3491    } else {
3492        // If RX core needed, we need something to make the scheduler running.
3493        // If nothing configured, send 1 CPS latency measurement packets.
3494        if (CGlobalInfo::m_options.m_arp_ref_per == 0) {
3495            mg_cfg.m_cps = 1.0;
3496        } else {
3497            mg_cfg.m_cps = 0;
3498        }
3499    }
3500
3501    if ( get_vm_one_queue_enable() ) {
3502        /* vm mode, indirect queues  */
3503        for (i=0; i<m_max_ports; i++) {
3504
3505            CMessagingManager * rx_dp=CMsgIns::Ins()->getRxDp();
3506
3507            uint8_t thread_id = (i>>1);
3508
3509            CNodeRing * r = rx_dp->getRingCpToDp(thread_id);
3510            m_latency_vm_vports[i].Create((uint8_t)i,r,&m_mg);
3511
3512            mg_cfg.m_ports[i] =&m_latency_vm_vports[i];
3513        }
3514
3515    }else{
3516        for (i=0; i<m_max_ports; i++) {
3517            CPhyEthIF * _if=&m_ports[i];
3518            _if->dump_stats(stdout);
3519            m_latency_vports[i].Create(_if,m_latency_tx_queue_id,1);
3520
3521            mg_cfg.m_ports[i] =&m_latency_vports[i];
3522        }
3523    }
3524
3525
3526    m_mg.Create(&mg_cfg);
3527    m_mg.set_mask(CGlobalInfo::m_options.m_latency_mask);
3528}
3529
3530// init m_rx_sl object for stateless rx core
3531void CGlobalTRex::rx_sl_configure(void) {
3532    CRxSlCfg rx_sl_cfg;
3533    int i;
3534
3535    rx_sl_cfg.m_max_ports = m_max_ports;
3536
3537    if ( get_vm_one_queue_enable() ) {
3538        /* vm mode, indirect queues  */
3539        for (i=0; i < m_max_ports; i++) {
3540            CMessagingManager * rx_dp = CMsgIns::Ins()->getRxDp();
3541            uint8_t thread_id = (i >> 1);
3542            CNodeRing * r = rx_dp->getRingCpToDp(thread_id);
3543            m_latency_vm_vports[i].Create((uint8_t)i, r, &m_mg);
3544            rx_sl_cfg.m_ports[i] = &m_latency_vm_vports[i];
3545        }
3546    } else {
3547        for (i = 0; i < m_max_ports; i++) {
3548            CPhyEthIF * _if = &m_ports[i];
3549            m_latency_vports[i].Create(_if, m_latency_tx_queue_id, 1);
3550            rx_sl_cfg.m_ports[i] = &m_latency_vports[i];
3551        }
3552    }
3553
3554    m_rx_sl.create(rx_sl_cfg);
3555}
3556
3557int  CGlobalTRex::ixgbe_start(void){
3558    int i;
3559    for (i=0; i<m_max_ports; i++) {
3560
3561        CPhyEthIF * _if=&m_ports[i];
3562        _if->Create((uint8_t)i);
3563        /* last TX queue if for latency check */
3564        if ( get_vm_one_queue_enable() ) {
3565            /* one tx one rx */
3566
3567            /* VMXNET3 does claim to support 16K but somehow does not work */
3568            /* reduce to 2000 */
3569            m_port_cfg.m_port_conf.rxmode.max_rx_pkt_len = 2000;
3570
3571            _if->configure(1,
3572                           1,
3573                           &m_port_cfg.m_port_conf);
3574
3575            /* will not be used */
3576            m_latency_tx_queue_id= m_cores_to_dual_ports;
3577
3578            socket_id_t socket_id = CGlobalInfo::m_socket.port_to_socket((port_id_t)i);
3579            assert(CGlobalInfo::m_mem_pool[socket_id].m_mbuf_pool_2048);
3580
3581
3582
3583            _if->set_rx_queue(0);
3584            _if->rx_queue_setup(0,
3585                                RTE_TEST_RX_DESC_VM_DEFAULT,
3586                                socket_id,
3587                                &m_port_cfg.m_rx_conf,
3588                                CGlobalInfo::m_mem_pool[socket_id].m_mbuf_pool_2048);
3589
3590            int qid;
3591            for ( qid=0; qid<(m_max_queues_per_port); qid++) {
3592                _if->tx_queue_setup((uint16_t)qid,
3593                                    RTE_TEST_TX_DESC_VM_DEFAULT ,
3594                                    socket_id,
3595                                    &m_port_cfg.m_tx_conf);
3596
3597            }
3598
3599        }else{
3600            _if->configure(2,
3601                           m_cores_to_dual_ports+1,
3602                           &m_port_cfg.m_port_conf);
3603
3604            /* the latency queue for latency measurement packets */
3605            m_latency_tx_queue_id= m_cores_to_dual_ports;
3606
3607            socket_id_t socket_id = CGlobalInfo::m_socket.port_to_socket((port_id_t)i);
3608            assert(CGlobalInfo::m_mem_pool[socket_id].m_mbuf_pool_2048);
3609
3610
3611            /* drop queue */
3612            _if->rx_queue_setup(0,
3613                                RTE_TEST_RX_DESC_DEFAULT,
3614                                socket_id,
3615                                &m_port_cfg.m_rx_conf,
3616                                CGlobalInfo::m_mem_pool[socket_id].m_mbuf_pool_2048);
3617
3618
3619            /* set the filter queue */
3620            _if->set_rx_queue(1);
3621            /* latency measurement ring is 1 */
3622            _if->rx_queue_setup(1,
3623                                RTE_TEST_RX_LATENCY_DESC_DEFAULT,
3624                                socket_id,
3625                                &m_port_cfg.m_rx_conf,
3626                                CGlobalInfo::m_mem_pool[socket_id].m_mbuf_pool_9k);
3627
3628            int qid;
3629            for ( qid=0; qid<(m_max_queues_per_port+1); qid++) {
3630                _if->tx_queue_setup((uint16_t)qid,
3631                                    RTE_TEST_TX_DESC_DEFAULT ,
3632                                    socket_id,
3633                                    &m_port_cfg.m_tx_conf);
3634
3635            }
3636
3637        }
3638
3639
3640        _if->stats_clear();
3641
3642        _if->start();
3643        _if->configure_rx_duplicate_rules();
3644
3645        if ( ! get_vm_one_queue_enable()  && ! CGlobalInfo::m_options.preview.get_is_disable_flow_control_setting()
3646             && _if->get_port_attr()->is_fc_change_supported()) {
3647            _if->disable_flow_control();
3648        }
3649
3650        _if->get_port_attr()->add_mac((char *)CGlobalInfo::m_options.get_src_mac_addr(i));
3651
3652        fflush(stdout);
3653    }
3654
3655    if ( !is_all_links_are_up()  ){
3656        /* wait for ports to be stable */
3657        get_ex_drv()->wait_for_stable_link();
3658
3659        if ( !is_all_links_are_up(true) /*&& !get_is_stateless()*/ ){ // disable start with link down for now
3660
3661            /* temporary solution for trex-192 issue, solve the case for X710/XL710, will work for both Statless and Stateful */
3662            if (  get_ex_drv()->drop_packets_incase_of_linkdown() ){
3663                printf(" WARNING : there is no link on one of the ports, driver support auto drop in case of link down - continue\n");
3664            }else{
3665                dump_links_status(stdout);
3666                rte_exit(EXIT_FAILURE, " "
3667                         " one of the link is down \n");
3668            }
3669        }
3670    } else {
3671        get_ex_drv()->wait_after_link_up();
3672    }
3673
3674    dump_links_status(stdout);
3675
3676    ixgbe_rx_queue_flush();
3677
3678    if (! get_is_stateless()) {
3679        ixgbe_configure_mg();
3680    } else {
3681        rx_sl_configure();
3682    }
3683
3684
3685    /* core 0 - control
3686       core 1 - port 0-0,1-0,
3687       core 2 - port 2-0,3-0,
3688       core 3 - port 0-1,1-1,
3689       core 4 - port 2-1,3-1,
3690
3691    */
3692    int port_offset=0;
3693    uint8_t lat_q_id;
3694
3695    if ( get_vm_one_queue_enable() ) {
3696        lat_q_id = 0;
3697    } else {
3698        lat_q_id = get_cores_tx() / get_base_num_cores();
3699    }
3700    for (i=0; i<get_cores_tx(); i++) {
3701        int j=(i+1);
3702        int queue_id=((j-1)/get_base_num_cores() );   /* for the first min core queue 0 , then queue 1 etc */
3703        if ( get_is_stateless() ){
3704            m_cores_vif[j]=&m_cores_vif_sl[j];
3705        }else{
3706            m_cores_vif[j]=&m_cores_vif_sf[j];
3707        }
3708        m_cores_vif[j]->Create(j,
3709                               queue_id,
3710                               &m_ports[port_offset], /* 0,2*/
3711                               queue_id,
3712                               &m_ports[port_offset+1], /*1,3*/
3713                               lat_q_id);
3714        port_offset+=2;
3715        if (port_offset == m_max_ports) {
3716            port_offset = 0;
3717            // We want to allow sending latency packets only from first core handling a port
3718            lat_q_id = CCoreEthIF::INVALID_Q_ID;
3719        }
3720    }
3721
3722    fprintf(stdout," -------------------------------\n");
3723    CCoreEthIF::DumpIfCfgHeader(stdout);
3724    for (i=0; i<get_cores_tx(); i++) {
3725        m_cores_vif[i+1]->DumpIfCfg(stdout);
3726    }
3727    fprintf(stdout," -------------------------------\n");
3728
3729    return (0);
3730}
3731
3732static void trex_termination_handler(int signum);
3733
3734void CGlobalTRex::register_signals() {
3735    struct sigaction action;
3736
3737    /* handler */
3738    action.sa_handler = trex_termination_handler;
3739
3740    /* blocked signals during handling */
3741    sigemptyset(&action.sa_mask);
3742    sigaddset(&action.sa_mask, SIGINT);
3743    sigaddset(&action.sa_mask, SIGTERM);
3744
3745    /* no flags */
3746    action.sa_flags = 0;
3747
3748    /* register */
3749    sigaction(SIGINT,  &action, NULL);
3750    sigaction(SIGTERM, &action, NULL);
3751}
3752
3753bool CGlobalTRex::Create(){
3754    CFlowsYamlInfo     pre_yaml_info;
3755
3756    register_signals();
3757
3758    m_stats_cnt =0;
3759    if (!get_is_stateless()) {
3760        pre_yaml_info.load_from_yaml_file(CGlobalInfo::m_options.cfg_file);
3761    }
3762
3763    if ( !m_zmq_publisher.Create( CGlobalInfo::m_options.m_zmq_port,
3764                                  !CGlobalInfo::m_options.preview.get_zmq_publish_enable() ) ){
3765        return (false);
3766    }
3767
3768    if ( pre_yaml_info.m_vlan_info.m_enable ){
3769        CGlobalInfo::m_options.preview.set_vlan_mode_enable(true);
3770    }
3771    /* End update pre flags */
3772
3773    ixgbe_prob_init();
3774    cores_prob_init();
3775    queues_prob_init();
3776
3777    /* allocate rings */
3778    assert( CMsgIns::Ins()->Create(get_cores_tx()) );
3779
3780    if ( sizeof(CGenNodeNatInfo) != sizeof(CGenNode)  ) {
3781        printf("ERROR sizeof(CGenNodeNatInfo) %lu != sizeof(CGenNode) %lu must be the same size \n",sizeof(CGenNodeNatInfo),sizeof(CGenNode));
3782        assert(0);
3783    }
3784
3785    if ( sizeof(CGenNodeLatencyPktInfo) != sizeof(CGenNode)  ) {
3786        printf("ERROR sizeof(CGenNodeLatencyPktInfo) %lu != sizeof(CGenNode) %lu must be the same size \n",sizeof(CGenNodeLatencyPktInfo),sizeof(CGenNode));
3787        assert(0);
3788    }
3789
3790    /* allocate the memory */
3791
3792    uint32_t rx_mbuf = 0 ;
3793
3794    if ( get_vm_one_queue_enable() ) {
3795        rx_mbuf = (m_max_ports * RTE_TEST_RX_DESC_VM_DEFAULT);
3796    }else{
3797        rx_mbuf = (m_max_ports * (RTE_TEST_RX_LATENCY_DESC_DEFAULT+RTE_TEST_RX_DESC_DEFAULT));
3798    }
3799
3800    CGlobalInfo::init_pools(rx_mbuf);
3801    ixgbe_start();
3802    dump_config(stdout);
3803
3804    /* start stateless */
3805    if (get_is_stateless()) {
3806
3807        TrexStatelessCfg cfg;
3808
3809        TrexRpcServerConfig rpc_req_resp_cfg(TrexRpcServerConfig::RPC_PROT_TCP,
3810                                             global_platform_cfg_info.m_zmq_rpc_port,
3811                                             &m_cp_lock);
3812
3813        cfg.m_port_count         = CGlobalInfo::m_options.m_expected_portd;
3814        cfg.m_rpc_req_resp_cfg   = &rpc_req_resp_cfg;
3815        cfg.m_rpc_server_verbose = false;
3816        cfg.m_platform_api       = new TrexDpdkPlatformApi();
3817        cfg.m_publisher          = &m_zmq_publisher;
3818
3819        m_trex_stateless = new TrexStateless(cfg);
3820    }
3821
3822    return (true);
3823
3824}
3825void CGlobalTRex::Delete(){
3826    m_zmq_publisher.Delete();
3827}
3828
3829
3830
3831int  CGlobalTRex::ixgbe_prob_init(void){
3832
3833    m_max_ports  = rte_eth_dev_count();
3834    if (m_max_ports == 0)
3835        rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n");
3836
3837    printf(" Number of ports found: %d \n",m_max_ports);
3838
3839    if ( m_max_ports %2 !=0 ) {
3840        rte_exit(EXIT_FAILURE, " Number of ports %d should be even, mask the one port in the configuration file  \n, ",
3841                 m_max_ports);
3842    }
3843
3844    if ( CGlobalInfo::m_options.get_expected_ports() > TREX_MAX_PORTS ) {
3845        rte_exit(EXIT_FAILURE, " Maximum ports supported are %d, use the configuration file to set the expected number of ports   \n",TREX_MAX_PORTS);
3846    }
3847
3848    if ( CGlobalInfo::m_options.get_expected_ports() > m_max_ports ){
3849        rte_exit(EXIT_FAILURE, " There are %d ports you expected more %d,use the configuration file to set the expected number of ports   \n",
3850                 m_max_ports,
3851                 CGlobalInfo::m_options.get_expected_ports());
3852    }
3853    if (CGlobalInfo::m_options.get_expected_ports() < m_max_ports ) {
3854        /* limit the number of ports */
3855        m_max_ports=CGlobalInfo::m_options.get_expected_ports();
3856    }
3857    assert(m_max_ports <= TREX_MAX_PORTS);
3858
3859    struct rte_eth_dev_info dev_info;
3860    rte_eth_dev_info_get((uint8_t) 0,&dev_info);
3861
3862    if ( CGlobalInfo::m_options.preview.getVMode() > 0){
3863        printf("\n\n");
3864        printf("if_index : %d \n",dev_info.if_index);
3865        printf("driver name : %s \n",dev_info.driver_name);
3866        printf("min_rx_bufsize : %d \n",dev_info.min_rx_bufsize);
3867        printf("max_rx_pktlen  : %d \n",dev_info.max_rx_pktlen);
3868        printf("max_rx_queues  : %d \n",dev_info.max_rx_queues);
3869        printf("max_tx_queues  : %d \n",dev_info.max_tx_queues);
3870        printf("max_mac_addrs  : %d \n",dev_info.max_mac_addrs);
3871
3872        printf("rx_offload_capa : %x \n",dev_info.rx_offload_capa);
3873        printf("tx_offload_capa : %x \n",dev_info.tx_offload_capa);
3874    }
3875
3876
3877
3878    if ( !CTRexExtendedDriverDb::Ins()->is_driver_exists(dev_info.driver_name) ){
3879        printf(" Error: driver %s is not supported. Please consult the documentation for a list of supported drivers\n"
3880               ,dev_info.driver_name);
3881        exit(1);
3882    }
3883
3884    int i;
3885    struct rte_eth_dev_info dev_info1;
3886
3887    for (i=1; i<m_max_ports; i++) {
3888        rte_eth_dev_info_get((uint8_t) i,&dev_info1);
3889        if ( strcmp(dev_info1.driver_name,dev_info.driver_name)!=0) {
3890            printf(" ERROR all device should have the same type  %s != %s \n",dev_info1.driver_name,dev_info.driver_name);
3891            exit(1);
3892        }
3893    }
3894
3895    CTRexExtendedDriverDb::Ins()->set_driver_name(dev_info.driver_name);
3896    m_drv = CTRexExtendedDriverDb::Ins()->get_drv();
3897
3898    // check if firmware version is new enough
3899    for (i = 0; i < m_max_ports; i++) {
3900        if (m_drv->verify_fw_ver(i) < 0) {
3901            // error message printed by verify_fw_ver
3902            exit(1);
3903        }
3904    }
3905
3906    m_port_cfg.update_var();
3907
3908    if ( get_is_rx_filter_enable() ){
3909        m_port_cfg.update_global_config_fdir();
3910    }
3911
3912    if ( get_vm_one_queue_enable() ) {
3913        /* verify that we have only one thread/core per dual- interface */
3914        if ( CGlobalInfo::m_options.preview.getCores()>1 ) {
3915            printf(" ERROR the number of cores should be 1 when the driver support only one tx queue and one rx queue \n");
3916            exit(1);
3917        }
3918    }
3919    return (0);
3920}
3921
3922int  CGlobalTRex::cores_prob_init(){
3923    m_max_cores = rte_lcore_count();
3924    assert(m_max_cores>0);
3925    return (0);
3926}
3927
3928int  CGlobalTRex::queues_prob_init(){
3929
3930    if (m_max_cores < 2) {
3931        rte_exit(EXIT_FAILURE, "number of cores should be at least 2 \n");
3932    }
3933
3934    assert((m_max_ports>>1) <= get_cores_tx() );
3935
3936    m_cores_mul = CGlobalInfo::m_options.preview.getCores();
3937
3938    m_cores_to_dual_ports  = m_cores_mul;
3939
3940    /* core 0 - control
3941       -core 1 - port 0/1
3942       -core 2 - port 2/3
3943       -core 3 - port 0/1
3944       -core 4 - port 2/3
3945
3946       m_cores_to_dual_ports = 2;
3947    */
3948
3949    /* number of queue - 1 per core for dual ports*/
3950    m_max_queues_per_port  = m_cores_to_dual_ports;
3951
3952    if (m_max_queues_per_port > BP_MAX_TX_QUEUE) {
3953        rte_exit(EXIT_FAILURE,
3954                 "maximum number of queue should be maximum %d  \n",BP_MAX_TX_QUEUE);
3955    }
3956
3957    assert(m_max_queues_per_port>0);
3958    return (0);
3959}
3960
3961
3962void CGlobalTRex::dump_config(FILE *fd){
3963    fprintf(fd," number of ports         : %u \n",m_max_ports);
3964    fprintf(fd," max cores for 2 ports   : %u \n",m_cores_to_dual_ports);
3965    fprintf(fd," max queue per port      : %u \n",m_max_queues_per_port);
3966}
3967
3968
3969void CGlobalTRex::dump_links_status(FILE *fd){
3970    for (int i=0; i<m_max_ports; i++) {
3971        m_ports[i].get_port_attr()->update_link_status_nowait();
3972        m_ports[i].get_port_attr()->dump_link(fd);
3973    }
3974}
3975
3976
3977void CGlobalTRex::dump_post_test_stats(FILE *fd){
3978    uint64_t pkt_out=0;
3979    uint64_t pkt_out_bytes=0;
3980    uint64_t pkt_in_bytes=0;
3981    uint64_t pkt_in=0;
3982    uint64_t sw_pkt_out=0;
3983    uint64_t sw_pkt_out_err=0;
3984    uint64_t sw_pkt_out_bytes=0;
3985    uint64_t tx_arp = 0;
3986    uint64_t rx_arp = 0;
3987
3988    int i;
3989    for (i=0; i<get_cores_tx(); i++) {
3990        CCoreEthIF * erf_vif = m_cores_vif[i+1];
3991        CVirtualIFPerSideStats stats;
3992        erf_vif->GetCoreCounters(&stats);
3993        sw_pkt_out     += stats.m_tx_pkt;
3994        sw_pkt_out_err += stats.m_tx_drop +stats.m_tx_queue_full +stats.m_tx_alloc_error ;
3995        sw_pkt_out_bytes +=stats.m_tx_bytes;
3996    }
3997
3998
3999    for (i=0; i<m_max_ports; i++) {
4000        CPhyEthIF * _if=&m_ports[i];
4001        pkt_in  +=_if->get_stats().ipackets;
4002        pkt_in_bytes +=_if->get_stats().ibytes;
4003        pkt_out +=_if->get_stats().opackets;
4004        pkt_out_bytes +=_if->get_stats().obytes;
4005        tx_arp += _if->get_ignore_stats().get_tx_arp();
4006        rx_arp += _if->get_ignore_stats().get_rx_arp();
4007    }
4008    if ( CGlobalInfo::m_options.is_latency_enabled() ){
4009        sw_pkt_out += m_mg.get_total_pkt();
4010        sw_pkt_out_bytes +=m_mg.get_total_bytes();
4011    }
4012
4013
4014    fprintf (fd," summary stats \n");
4015    fprintf (fd," -------------- \n");
4016
4017    if (pkt_in > pkt_out)
4018        {
4019            fprintf (fd, " Total-pkt-drop       : 0 pkts \n");
4020            if (pkt_in > pkt_out * 1.01)
4021                fprintf (fd, " Warning : number of rx packets exceeds 101%% of tx packets!\n");
4022        }
4023    else
4024        fprintf (fd, " Total-pkt-drop       : %llu pkts \n", (unsigned long long) (pkt_out - pkt_in));
4025    fprintf (fd," Total-tx-bytes       : %llu bytes \n", (unsigned long long)pkt_out_bytes);
4026    fprintf (fd," Total-tx-sw-bytes    : %llu bytes \n", (unsigned long long)sw_pkt_out_bytes);
4027    fprintf (fd," Total-rx-bytes       : %llu byte \n", (unsigned long long)pkt_in_bytes);
4028
4029    fprintf (fd," \n");
4030
4031    fprintf (fd," Total-tx-pkt         : %llu pkts \n", (unsigned long long)pkt_out);
4032    fprintf (fd," Total-rx-pkt         : %llu pkts \n", (unsigned long long)pkt_in);
4033    fprintf (fd," Total-sw-tx-pkt      : %llu pkts \n", (unsigned long long)sw_pkt_out);
4034    fprintf (fd," Total-sw-err         : %llu pkts \n", (unsigned long long)sw_pkt_out_err);
4035    fprintf (fd," Total ARP sent       : %llu pkts \n", (unsigned long long)tx_arp);
4036    fprintf (fd," Total ARP received   : %llu pkts \n", (unsigned long long)rx_arp);
4037
4038
4039    if ( CGlobalInfo::m_options.is_latency_enabled() ){
4040        fprintf (fd," maximum-latency   : %.0f usec \n",m_mg.get_max_latency());
4041        fprintf (fd," average-latency   : %.0f usec \n",m_mg.get_avr_latency());
4042        fprintf (fd," latency-any-error : %s  \n",m_mg.is_any_error()?"ERROR":"OK");
4043    }
4044
4045
4046}
4047
4048
4049void CGlobalTRex::update_stats(){
4050
4051    int i;
4052    for (i=0; i<m_max_ports; i++) {
4053        CPhyEthIF * _if=&m_ports[i];
4054        _if->update_counters();
4055    }
4056    uint64_t total_open_flows=0;
4057
4058
4059    CFlowGenListPerThread   * lpt;
4060    for (i=0; i<get_cores_tx(); i++) {
4061        lpt = m_fl.m_threads_info[i];
4062        total_open_flows +=   lpt->m_stats.m_total_open_flows ;
4063    }
4064    m_last_total_cps = m_cps.add(total_open_flows);
4065
4066}
4067
4068tx_per_flow_t CGlobalTRex::get_flow_tx_stats(uint8_t port, uint16_t index) {
4069    return m_stats.m_port[port].m_tx_per_flow[index] - m_stats.m_port[port].m_prev_tx_per_flow[index];
4070}
4071
4072// read stats. Return read value, and clear.
4073tx_per_flow_t CGlobalTRex::clear_flow_tx_stats(uint8_t port, uint16_t index, bool is_lat) {
4074    uint8_t port0;
4075    CFlowGenListPerThread * lpt;
4076    tx_per_flow_t ret;
4077
4078    m_stats.m_port[port].m_tx_per_flow[index].clear();
4079
4080    for (int i=0; i < get_cores_tx(); i++) {
4081        lpt = m_fl.m_threads_info[i];
4082        port0 = lpt->getDualPortId() * 2;
4083        if ((port == port0) || (port == port0 + 1)) {
4084            m_stats.m_port[port].m_tx_per_flow[index] +=
4085                lpt->m_node_gen.m_v_if->m_stats[port - port0].m_tx_per_flow[index];
4086            if (is_lat)
4087                lpt->m_node_gen.m_v_if->m_stats[port - port0].m_lat_data[index - MAX_FLOW_STATS].reset();
4088        }
4089    }
4090
4091    ret = m_stats.m_port[port].m_tx_per_flow[index] - m_stats.m_port[port].m_prev_tx_per_flow[index];
4092
4093    // Since we return diff from prev, following "clears" the stats.
4094    m_stats.m_port[port].m_prev_tx_per_flow[index] = m_stats.m_port[port].m_tx_per_flow[index];
4095
4096    return ret;
4097}
4098
4099void CGlobalTRex::get_stats(CGlobalStats & stats){
4100
4101    int i;
4102    float total_tx=0.0;
4103    float total_rx=0.0;
4104    float total_tx_pps=0.0;
4105    float total_rx_pps=0.0;
4106
4107    stats.m_total_tx_pkts  = 0;
4108    stats.m_total_rx_pkts  = 0;
4109    stats.m_total_tx_bytes = 0;
4110    stats.m_total_rx_bytes = 0;
4111    stats.m_total_alloc_error=0;
4112    stats.m_total_queue_full=0;
4113    stats.m_total_queue_drop=0;
4114
4115
4116    stats.m_num_of_ports = m_max_ports;
4117    stats.m_cpu_util = m_fl.GetCpuUtil();
4118    stats.m_cpu_util_raw = m_fl.GetCpuUtilRaw();
4119    if (get_is_stateless()) {
4120        stats.m_rx_cpu_util = m_rx_sl.get_cpu_util();
4121    }
4122    stats.m_threads      = m_fl.m_threads_info.size();
4123
4124    for (i=0; i<m_max_ports; i++) {
4125        CPhyEthIF * _if=&m_ports[i];
4126        CPerPortStats * stp=&stats.m_port[i];
4127
4128        CPhyEthIFStats & st =_if->get_stats();
4129
4130        stp->opackets = st.opackets;
4131        stp->obytes   = st.obytes;
4132        stp->ipackets = st.ipackets;
4133        stp->ibytes   = st.ibytes;
4134        stp->ierrors  = st.ierrors;
4135        stp->oerrors  = st.oerrors;
4136        stp->m_total_tx_bps = _if->get_last_tx_rate()*_1Mb_DOUBLE;
4137        stp->m_total_tx_pps = _if->get_last_tx_pps_rate();
4138        stp->m_total_rx_bps = _if->get_last_rx_rate()*_1Mb_DOUBLE;
4139        stp->m_total_rx_pps = _if->get_last_rx_pps_rate();
4140
4141        stats.m_total_tx_pkts  += st.opackets;
4142        stats.m_total_rx_pkts  += st.ipackets;
4143        stats.m_total_tx_bytes += st.obytes;
4144        stats.m_total_rx_bytes += st.ibytes;
4145
4146        total_tx +=_if->get_last_tx_rate();
4147        total_rx +=_if->get_last_rx_rate();
4148        total_tx_pps +=_if->get_last_tx_pps_rate();
4149        total_rx_pps +=_if->get_last_rx_pps_rate();
4150        // IP ID rules
4151        for (uint16_t flow = 0; flow <= max_stat_hw_id_seen; flow++) {
4152            stats.m_port[i].m_tx_per_flow[flow].clear();
4153        }
4154        // payload rules
4155        for (uint16_t flow = MAX_FLOW_STATS; flow <= MAX_FLOW_STATS + max_stat_hw_id_seen_payload; flow++) {
4156            stats.m_port[i].m_tx_per_flow[flow].clear();
4157        }
4158
4159        stp->m_cpu_util = get_cpu_util_per_interface(i);
4160
4161    }
4162
4163    uint64_t total_open_flows=0;
4164    uint64_t total_active_flows=0;
4165
4166    uint64_t total_clients=0;
4167    uint64_t total_servers=0;
4168    uint64_t active_sockets=0;
4169    uint64_t total_sockets=0;
4170
4171
4172    uint64_t total_nat_time_out =0;
4173    uint64_t total_nat_time_out_wait_ack =0;
4174    uint64_t total_nat_no_fid   =0;
4175    uint64_t total_nat_active   =0;
4176    uint64_t total_nat_syn_wait = 0;
4177    uint64_t total_nat_open     =0;
4178    uint64_t total_nat_learn_error=0;
4179
4180    CFlowGenListPerThread   * lpt;
4181    stats.m_template.Clear();
4182    for (i=0; i<get_cores_tx(); i++) {
4183        lpt = m_fl.m_threads_info[i];
4184        total_open_flows +=   lpt->m_stats.m_total_open_flows ;
4185        total_active_flows += (lpt->m_stats.m_total_open_flows-lpt->m_stats.m_total_close_flows) ;
4186
4187        stats.m_total_alloc_error += lpt->m_node_gen.m_v_if->m_stats[0].m_tx_alloc_error+
4188            lpt->m_node_gen.m_v_if->m_stats[1].m_tx_alloc_error;
4189        stats.m_total_queue_full +=lpt->m_node_gen.m_v_if->m_stats[0].m_tx_queue_full+
4190            lpt->m_node_gen.m_v_if->m_stats[1].m_tx_queue_full;
4191
4192        stats.m_total_queue_drop +=lpt->m_node_gen.m_v_if->m_stats[0].m_tx_drop+
4193            lpt->m_node_gen.m_v_if->m_stats[1].m_tx_drop;
4194
4195        stats.m_template.Add(&lpt->m_node_gen.m_v_if->m_stats[0].m_template);
4196        stats.m_template.Add(&lpt->m_node_gen.m_v_if->m_stats[1].m_template);
4197
4198
4199        total_clients   += lpt->m_smart_gen.getTotalClients();
4200        total_servers   += lpt->m_smart_gen.getTotalServers();
4201        active_sockets  += lpt->m_smart_gen.ActiveSockets();
4202        total_sockets   += lpt->m_smart_gen.MaxSockets();
4203
4204        total_nat_time_out +=lpt->m_stats.m_nat_flow_timeout;
4205        total_nat_time_out_wait_ack += lpt->m_stats.m_nat_flow_timeout_wait_ack;
4206        total_nat_no_fid   +=lpt->m_stats.m_nat_lookup_no_flow_id ;
4207        total_nat_active   +=lpt->m_stats.m_nat_lookup_add_flow_id - lpt->m_stats.m_nat_lookup_remove_flow_id;
4208        total_nat_syn_wait += lpt->m_stats.m_nat_lookup_add_flow_id - lpt->m_stats.m_nat_lookup_wait_ack_state;
4209        total_nat_open     +=lpt->m_stats.m_nat_lookup_add_flow_id;
4210        total_nat_learn_error   +=lpt->m_stats.m_nat_flow_learn_error;
4211        uint8_t port0 = lpt->getDualPortId() *2;
4212        // IP ID rules
4213        for (uint16_t flow = 0; flow <= max_stat_hw_id_seen; flow++) {
4214            stats.m_port[port0].m_tx_per_flow[flow] +=
4215                lpt->m_node_gen.m_v_if->m_stats[0].m_tx_per_flow[flow];
4216            stats.m_port[port0 + 1].m_tx_per_flow[flow] +=
4217                lpt->m_node_gen.m_v_if->m_stats[1].m_tx_per_flow[flow];
4218        }
4219        // payload rules
4220        for (uint16_t flow = MAX_FLOW_STATS; flow <= MAX_FLOW_STATS + max_stat_hw_id_seen_payload; flow++) {
4221            stats.m_port[port0].m_tx_per_flow[flow] +=
4222                lpt->m_node_gen.m_v_if->m_stats[0].m_tx_per_flow[flow];
4223            stats.m_port[port0 + 1].m_tx_per_flow[flow] +=
4224                lpt->m_node_gen.m_v_if->m_stats[1].m_tx_per_flow[flow];
4225        }
4226
4227    }
4228
4229    stats.m_total_nat_time_out = total_nat_time_out;
4230    stats.m_total_nat_time_out_wait_ack = total_nat_time_out_wait_ack;
4231    stats.m_total_nat_no_fid   = total_nat_no_fid;
4232    stats.m_total_nat_active   = total_nat_active;
4233    stats.m_total_nat_syn_wait = total_nat_syn_wait;
4234    stats.m_total_nat_open     = total_nat_open;
4235    stats.m_total_nat_learn_error     = total_nat_learn_error;
4236
4237    stats.m_total_clients = total_clients;
4238    stats.m_total_servers = total_servers;
4239    stats.m_active_sockets = active_sockets;
4240
4241    if (total_sockets != 0) {
4242        stats.m_socket_util =100.0*(double)active_sockets/(double)total_sockets;
4243    } else {
4244        stats.m_socket_util = 0;
4245    }
4246
4247
4248
4249    float drop_rate=total_tx-total_rx;
4250    if ( (drop_rate<0.0)  || (drop_rate < 0.1*total_tx ) )  {
4251        drop_rate=0.0;
4252    }
4253    float pf =CGlobalInfo::m_options.m_platform_factor;
4254    stats.m_platform_factor = pf;
4255
4256    stats.m_active_flows = total_active_flows*pf;
4257    stats.m_open_flows   = total_open_flows*pf;
4258    stats.m_rx_drop_bps   = drop_rate*pf *_1Mb_DOUBLE;
4259
4260    stats.m_tx_bps        = total_tx*pf*_1Mb_DOUBLE;
4261    stats.m_rx_bps        = total_rx*pf*_1Mb_DOUBLE;
4262    stats.m_tx_pps        = total_tx_pps*pf;
4263    stats.m_rx_pps        = total_rx_pps*pf;
4264    stats.m_tx_cps        = m_last_total_cps*pf;
4265    if(stats.m_cpu_util < 0.0001)
4266        stats.m_bw_per_core = 0;
4267    else
4268        stats.m_bw_per_core   = 2*(stats.m_tx_bps/1e9)*100.0/(stats.m_cpu_util*stats.m_threads);
4269
4270    stats.m_tx_expected_cps        = m_expected_cps*pf;
4271    stats.m_tx_expected_pps        = m_expected_pps*pf;
4272    stats.m_tx_expected_bps        = m_expected_bps*pf;
4273}
4274
4275float
4276CGlobalTRex::get_cpu_util_per_interface(uint8_t port_id) {
4277    CPhyEthIF * _if = &m_ports[port_id];
4278
4279    float    tmp = 0;
4280    uint8_t  cnt = 0;
4281    for (const auto &p : _if->get_core_list()) {
4282        uint8_t core_id = p.first;
4283        CFlowGenListPerThread *lp = m_fl.m_threads_info[core_id];
4284        if (lp->is_port_active(port_id)) {
4285            tmp += lp->m_cpu_cp_u.GetVal();
4286            cnt++;
4287        }
4288    }
4289
4290    return ( (cnt > 0) ? (tmp / cnt) : 0);
4291
4292}
4293
4294bool CGlobalTRex::sanity_check(){
4295
4296    CFlowGenListPerThread   * lpt;
4297    uint32_t errors=0;
4298    int i;
4299    for (i=0; i<get_cores_tx(); i++) {
4300        lpt = m_fl.m_threads_info[i];
4301        errors   += lpt->m_smart_gen.getErrorAllocationCounter();
4302    }
4303
4304    if ( errors ) {
4305        printf(" ERRORs sockets allocation errors! \n");
4306        printf(" you should allocate more clients in the pool \n");
4307        return(true);
4308    }
4309    return ( false);
4310}
4311
4312
4313/* dump the template info */
4314void CGlobalTRex::dump_template_info(std::string & json){
4315    CFlowGenListPerThread   * lpt = m_fl.m_threads_info[0];
4316    CFlowsYamlInfo * yaml_info=&lpt->m_yaml_info;
4317
4318    json="{\"name\":\"template_info\",\"type\":0,\"data\":[";
4319    int i;
4320    for (i=0; i<yaml_info->m_vec.size()-1; i++) {
4321        CFlowYamlInfo * r=&yaml_info->m_vec[i] ;
4322        json+="\""+ r->m_name+"\"";
4323        json+=",";
4324    }
4325    json+="\""+yaml_info->m_vec[i].m_name+"\"";
4326    json+="]}" ;
4327}
4328
4329void CGlobalTRex::dump_stats(FILE *fd, CGlobalStats::DumpFormat format){
4330
4331    update_stats();
4332    get_stats(m_stats);
4333
4334    if (format==CGlobalStats::dmpTABLE) {
4335        if ( m_io_modes.m_g_mode == CTrexGlobalIoMode::gNORMAL ){
4336            switch (m_io_modes.m_pp_mode ){
4337            case CTrexGlobalIoMode::ppDISABLE:
4338                fprintf(fd,"\n+Per port stats disabled \n");
4339                break;
4340            case CTrexGlobalIoMode::ppTABLE:
4341                fprintf(fd,"\n-Per port stats table \n");
4342                m_stats.Dump(fd,CGlobalStats::dmpTABLE);
4343                break;
4344            case CTrexGlobalIoMode::ppSTANDARD:
4345                fprintf(fd,"\n-Per port stats - standard\n");
4346                m_stats.Dump(fd,CGlobalStats::dmpSTANDARD);
4347                break;
4348            };
4349
4350            switch (m_io_modes.m_ap_mode ){
4351            case   CTrexGlobalIoMode::apDISABLE:
4352                fprintf(fd,"\n+Global stats disabled \n");
4353                break;
4354            case   CTrexGlobalIoMode::apENABLE:
4355                fprintf(fd,"\n-Global stats enabled \n");
4356                m_stats.DumpAllPorts(fd);
4357                break;
4358            };
4359        }
4360    }else{
4361        /* at exit , always need to dump it in standartd mode for scripts*/
4362        m_stats.Dump(fd,format);
4363        m_stats.DumpAllPorts(fd);
4364    }
4365
4366}
4367
4368void
4369CGlobalTRex::publish_async_data(bool sync_now, bool baseline) {
4370    std::string json;
4371
4372    /* refactor to update, dump, and etc. */
4373    if (sync_now) {
4374        update_stats();
4375        get_stats(m_stats);
4376    }
4377
4378    m_stats.dump_json(json, baseline);
4379    m_zmq_publisher.publish_json(json);
4380
4381    /* generator json , all cores are the same just sample the first one */
4382    m_fl.m_threads_info[0]->m_node_gen.dump_json(json);
4383    m_zmq_publisher.publish_json(json);
4384
4385
4386    if ( !get_is_stateless() ){
4387        dump_template_info(json);
4388        m_zmq_publisher.publish_json(json);
4389    }
4390
4391    if ( get_is_rx_check_mode() ) {
4392        m_mg.rx_check_dump_json(json );
4393        m_zmq_publisher.publish_json(json);
4394    }
4395
4396    /* backward compatible */
4397    m_mg.dump_json(json );
4398    m_zmq_publisher.publish_json(json);
4399
4400    /* more info */
4401    m_mg.dump_json_v2(json );
4402    m_zmq_publisher.publish_json(json);
4403
4404    if (get_is_stateless()) {
4405        std::string stat_json;
4406        std::string latency_json;
4407        if (m_trex_stateless->m_rx_flow_stat.dump_json(stat_json, latency_json, baseline)) {
4408            m_zmq_publisher.publish_json(stat_json);
4409            m_zmq_publisher.publish_json(latency_json);
4410        }
4411    }
4412}
4413
4414void
4415CGlobalTRex::publish_async_barrier(uint32_t key) {
4416    m_zmq_publisher.publish_barrier(key);
4417}
4418
4419void
4420CGlobalTRex:: publish_async_port_attr_changed(uint8_t port_id) {
4421    Json::Value data;
4422    data["port_id"] = port_id;
4423    TRexPortAttr * _attr = m_ports[port_id].get_port_attr();
4424
4425    /* attributes */
4426    data["attr"]["speed"] = _attr->get_link_speed();
4427    data["attr"]["promiscuous"]["enabled"] = _attr->get_promiscuous();
4428    data["attr"]["link"]["up"] = _attr->is_link_up();
4429    int mode;
4430    int ret = _attr->get_flow_ctrl(mode);
4431    if (ret != 0) {
4432        mode = -1;
4433    }
4434    data["attr"]["fc"]["mode"] = mode;
4435
4436    m_zmq_publisher.publish_event(TrexPublisher::EVENT_PORT_ATTR_CHANGED, data);
4437}
4438
4439void
4440CGlobalTRex::handle_slow_path() {
4441    m_stats_cnt+=1;
4442
4443    // update speed, link up/down etc.
4444    for (int i=0; i<m_max_ports; i++) {
4445        bool changed = m_ports[i].get_port_attr()->update_link_status_nowait();
4446        if (changed) {
4447            publish_async_port_attr_changed(i);
4448        }
4449    }
4450
4451    if ( CGlobalInfo::m_options.preview.get_no_keyboard() ==false ) {
4452        if ( m_io_modes.handle_io_modes() ) {
4453            mark_for_shutdown(SHUTDOWN_CTRL_C);
4454            return;
4455        }
4456    }
4457
4458    if ( sanity_check() ) {
4459        mark_for_shutdown(SHUTDOWN_TEST_ENDED);
4460        return;
4461    }
4462
4463    if (m_io_modes.m_g_mode != CTrexGlobalIoMode::gDISABLE ) {
4464        fprintf(stdout,"\033[2J");
4465        fprintf(stdout,"\033[2H");
4466
4467    } else {
4468        if ( m_io_modes.m_g_disable_first  ) {
4469            m_io_modes.m_g_disable_first=false;
4470            fprintf(stdout,"\033[2J");
4471            fprintf(stdout,"\033[2H");
4472            printf("clean !!!\n");
4473            fflush(stdout);
4474        }
4475    }
4476
4477
4478    if (m_io_modes.m_g_mode == CTrexGlobalIoMode::gHELP ) {
4479        m_io_modes.DumpHelp(stdout);
4480    }
4481
4482    dump_stats(stdout,CGlobalStats::dmpTABLE);
4483
4484    if (m_io_modes.m_g_mode == CTrexGlobalIoMode::gNORMAL ) {
4485        fprintf (stdout," current time    : %.1f sec  \n",now_sec());
4486        float d= CGlobalInfo::m_options.m_duration - now_sec();
4487        if (d<0) {
4488            d=0;
4489
4490        }
4491        fprintf (stdout," test duration   : %.1f sec  \n",d);
4492    }
4493
4494    if (m_io_modes.m_g_mode == CTrexGlobalIoMode::gMem) {
4495
4496        if ( m_stats_cnt%4==0) {
4497            fprintf (stdout," %s \n",CGlobalInfo::dump_pool_as_json_str().c_str());
4498        }
4499    }
4500
4501
4502    if ( CGlobalInfo::m_options.is_rx_enabled() && (! get_is_stateless())) {
4503        m_mg.update();
4504
4505        if ( m_io_modes.m_g_mode ==  CTrexGlobalIoMode::gNORMAL ) {
4506            switch (m_io_modes.m_l_mode) {
4507            case CTrexGlobalIoMode::lDISABLE:
4508                fprintf(stdout,"\n+Latency stats disabled \n");
4509                break;
4510            case CTrexGlobalIoMode::lENABLE:
4511                fprintf(stdout,"\n-Latency stats enabled \n");
4512                m_mg.DumpShort(stdout);
4513                break;
4514            case CTrexGlobalIoMode::lENABLE_Extended:
4515                fprintf(stdout,"\n-Latency stats extended \n");
4516                m_mg.Dump(stdout);
4517                break;
4518            }
4519
4520            if ( get_is_rx_check_mode() ) {
4521
4522                switch (m_io_modes.m_rc_mode) {
4523                case CTrexGlobalIoMode::rcDISABLE:
4524                    fprintf(stdout,"\n+Rx Check stats disabled \n");
4525                    break;
4526                case CTrexGlobalIoMode::rcENABLE:
4527                    fprintf(stdout,"\n-Rx Check stats enabled \n");
4528                    m_mg.DumpShortRxCheck(stdout);
4529                    break;
4530                case CTrexGlobalIoMode::rcENABLE_Extended:
4531                    fprintf(stdout,"\n-Rx Check stats enhanced \n");
4532                    m_mg.DumpRxCheck(stdout);
4533                    break;
4534                }
4535            }
4536        }
4537    }
4538    if ( m_io_modes.m_g_mode ==  CTrexGlobalIoMode::gNAT ) {
4539        if ( m_io_modes.m_nat_mode == CTrexGlobalIoMode::natENABLE ) {
4540            if (CGlobalInfo::is_learn_mode(CParserOption::LEARN_MODE_TCP_ACK)) {
4541                fprintf(stdout, "NAT flow table info\n");
4542                m_mg.dump_nat_flow_table(stdout);
4543            } else {
4544                fprintf(stdout, "\nThis is only relevant in --learn-mode %d\n", CParserOption::LEARN_MODE_TCP_ACK);
4545            }
4546        }
4547    }
4548
4549    /* publish data */
4550    publish_async_data(false);
4551}
4552
4553
4554void
4555CGlobalTRex::handle_fast_path() {
4556    /* check from messages from DP */
4557    check_for_dp_messages();
4558
4559    /* measure CPU utilization by sampling (we sample 1000 to get an accurate sampling) */
4560    for (int i = 0; i < 1000; i++) {
4561        m_fl.UpdateFast();
4562
4563        if (get_is_stateless()) {
4564            m_rx_sl.update_cpu_util();
4565        }else{
4566            m_mg.update_fast();
4567        }
4568
4569        rte_pause();
4570    }
4571
4572
4573    if ( is_all_cores_finished() ) {
4574        mark_for_shutdown(SHUTDOWN_TEST_ENDED);
4575    }
4576}
4577
4578
4579/**
4580 * shutdown sequence
4581 *
4582 */
4583void CGlobalTRex::shutdown() {
4584    std::stringstream ss;
4585    ss << " *** TRex is shutting down - cause: '";
4586
4587    switch (m_mark_for_shutdown) {
4588
4589    case SHUTDOWN_TEST_ENDED:
4590        ss << "test has ended'";
4591        break;
4592
4593    case SHUTDOWN_CTRL_C:
4594        ss << "CTRL + C detected'";
4595        break;
4596
4597    case SHUTDOWN_SIGINT:
4598        ss << "received signal SIGINT'";
4599        break;
4600
4601    case SHUTDOWN_SIGTERM:
4602        ss << "received signal SIGTERM'";
4603        break;
4604
4605    case SHUTDOWN_RPC_REQ:
4606        ss << "server received RPC 'shutdown' request'";
4607        break;
4608
4609    default:
4610        assert(0);
4611    }
4612
4613    /* report */
4614    std::cout << ss.str() << "\n";
4615
4616    /* first stop the WD */
4617    TrexWatchDog::getInstance().stop();
4618
4619    /* stateless shutdown */
4620    if (get_is_stateless()) {
4621        m_trex_stateless->shutdown();
4622    }
4623
4624    if (!is_all_cores_finished()) {
4625        try_stop_all_cores();
4626    }
4627
4628    m_mg.stop();
4629
4630    delay(1000);
4631
4632    /* shutdown drivers */
4633    for (int i = 0; i < m_max_ports; i++) {
4634        m_ports[i].stop();
4635    }
4636    if (m_mark_for_shutdown != SHUTDOWN_TEST_ENDED) {
4637        /* we should stop latency and exit to stop agents */
4638        exit(-1);
4639    }
4640}
4641
4642
4643int CGlobalTRex::run_in_master() {
4644
4645    //rte_thread_setname(pthread_self(), "TRex Control");
4646
4647    if ( get_is_stateless() ) {
4648        m_trex_stateless->launch_control_plane();
4649    }
4650
4651    /* exception and scope safe */
4652    std::unique_lock<std::mutex> cp_lock(m_cp_lock);
4653
4654    uint32_t slow_path_counter = 0;
4655
4656    const int FASTPATH_DELAY_MS = 10;
4657    const int SLOWPATH_DELAY_MS = 500;
4658
4659    m_monitor.create("master", 2);
4660    TrexWatchDog::getInstance().register_monitor(&m_monitor);
4661
4662    TrexWatchDog::getInstance().start();
4663
4664    while (!is_marked_for_shutdown()) {
4665
4666        /* fast path */
4667        handle_fast_path();
4668
4669        /* slow path */
4670        if (slow_path_counter >= SLOWPATH_DELAY_MS) {
4671            handle_slow_path();
4672            slow_path_counter = 0;
4673        }
4674
4675
4676        cp_lock.unlock();
4677        delay(FASTPATH_DELAY_MS);
4678        slow_path_counter += FASTPATH_DELAY_MS;
4679        cp_lock.lock();
4680
4681        m_monitor.tickle();
4682    }
4683
4684    /* on exit release the lock */
4685    cp_lock.unlock();
4686
4687    /* shutdown everything gracefully */
4688    shutdown();
4689
4690    return (0);
4691}
4692
4693
4694
4695int CGlobalTRex::run_in_rx_core(void){
4696
4697    rte_thread_setname(pthread_self(), "TRex RX");
4698
4699    if (get_is_stateless()) {
4700        m_sl_rx_running = true;
4701        m_rx_sl.start();
4702        m_sl_rx_running = false;
4703    } else {
4704        if ( CGlobalInfo::m_options.is_rx_enabled() ){
4705            m_sl_rx_running = false;
4706            m_mg.start(0, true);
4707        }
4708    }
4709
4710    return (0);
4711}
4712
4713int CGlobalTRex::run_in_core(virtual_thread_id_t virt_core_id){
4714    std::stringstream ss;
4715
4716    ss << "Trex DP core " << int(virt_core_id);
4717    rte_thread_setname(pthread_self(), ss.str().c_str());
4718
4719    CPreviewMode *lp=&CGlobalInfo::m_options.preview;
4720    if ( lp->getSingleCore() &&
4721         (virt_core_id==2 ) &&
4722         (lp-> getCores() ==1) ){
4723        printf(" bypass this core \n");
4724        m_signal[virt_core_id]=1;
4725        return (0);
4726    }
4727
4728
4729    assert(m_fl_was_init);
4730    CFlowGenListPerThread   * lpt;
4731
4732    lpt = m_fl.m_threads_info[virt_core_id-1];
4733
4734    /* register a watchdog handle on current core */
4735    lpt->m_monitor.create(ss.str(), 1);
4736    TrexWatchDog::getInstance().register_monitor(&lpt->m_monitor);
4737
4738    if (get_is_stateless()) {
4739        lpt->start_stateless_daemon(*lp);
4740    }else{
4741        lpt->start_generate_stateful(CGlobalInfo::m_options.out_file,*lp);
4742    }
4743
4744    /* done - remove this from the watchdog (we might wait on join for a long time) */
4745    lpt->m_monitor.disable();
4746
4747    m_signal[virt_core_id]=1;
4748    return (0);
4749}
4750
4751
4752int CGlobalTRex::stop_master(){
4753
4754    delay(1000);
4755    fprintf(stdout," ==================\n");
4756    fprintf(stdout," interface sum \n");
4757    fprintf(stdout," ==================\n");
4758    dump_stats(stdout,CGlobalStats::dmpSTANDARD);
4759    fprintf(stdout," ==================\n");
4760    fprintf(stdout," \n\n");
4761
4762    fprintf(stdout," ==================\n");
4763    fprintf(stdout," interface sum \n");
4764    fprintf(stdout," ==================\n");
4765
4766    CFlowGenListPerThread   * lpt;
4767    uint64_t total_tx_rx_check=0;
4768
4769    int i;
4770    for (i=0; i<get_cores_tx(); i++) {
4771        lpt = m_fl.m_threads_info[i];
4772        CCoreEthIF * erf_vif = m_cores_vif[i+1];
4773
4774        erf_vif->DumpCoreStats(stdout);
4775        erf_vif->DumpIfStats(stdout);
4776        total_tx_rx_check+=erf_vif->m_stats[CLIENT_SIDE].m_tx_rx_check_pkt+
4777            erf_vif->m_stats[SERVER_SIDE].m_tx_rx_check_pkt;
4778    }
4779
4780    fprintf(stdout," ==================\n");
4781    fprintf(stdout," generators \n");
4782    fprintf(stdout," ==================\n");
4783    for (i=0; i<get_cores_tx(); i++) {
4784        lpt = m_fl.m_threads_info[i];
4785        lpt->m_node_gen.DumpHist(stdout);
4786        lpt->DumpStats(stdout);
4787    }
4788    if ( CGlobalInfo::m_options.is_latency_enabled() ){
4789        fprintf(stdout," ==================\n");
4790        fprintf(stdout," latency \n");
4791        fprintf(stdout," ==================\n");
4792        m_mg.DumpShort(stdout);
4793        m_mg.Dump(stdout);
4794        m_mg.DumpShortRxCheck(stdout);
4795        m_mg.DumpRxCheck(stdout);
4796        m_mg.DumpRxCheckVerification(stdout,total_tx_rx_check);
4797    }
4798
4799    dump_stats(stdout,CGlobalStats::dmpSTANDARD);
4800    dump_post_test_stats(stdout);
4801    m_fl.Delete();
4802
4803    return (0);
4804}
4805
4806bool CGlobalTRex::is_all_cores_finished() {
4807    int i;
4808    for (i=0; i<get_cores_tx(); i++) {
4809        if ( m_signal[i+1]==0){
4810            return false;
4811        }
4812    }
4813    if (m_sl_rx_running)
4814        return false;
4815
4816    return true;
4817}
4818
4819
4820int CGlobalTRex::start_master_stateless(){
4821    int i;
4822    for (i=0; i<BP_MAX_CORES; i++) {
4823        m_signal[i]=0;
4824    }
4825    m_fl.Create();
4826    m_expected_pps = 0;
4827    m_expected_cps = 0;
4828    m_expected_bps = 0;
4829
4830    m_fl.generate_p_thread_info(get_cores_tx());
4831    CFlowGenListPerThread   * lpt;
4832
4833    for (i=0; i<get_cores_tx(); i++) {
4834        lpt = m_fl.m_threads_info[i];
4835        CVirtualIF * erf_vif = m_cores_vif[i+1];
4836        lpt->set_vif(erf_vif);
4837        lpt->m_node_gen.m_socket_id =m_cores_vif[i+1]->get_socket_id();
4838    }
4839    m_fl_was_init=true;
4840
4841    return (0);
4842}
4843
4844int CGlobalTRex::start_master_statefull() {
4845    int i;
4846    for (i=0; i<BP_MAX_CORES; i++) {
4847        m_signal[i]=0;
4848    }
4849
4850    m_fl.Create();
4851    m_fl.load_from_yaml(CGlobalInfo::m_options.cfg_file,get_cores_tx());
4852
4853    /* client config */
4854    if (CGlobalInfo::m_options.client_cfg_file != "") {
4855        try {
4856            m_fl.load_client_config_file(CGlobalInfo::m_options.client_cfg_file);
4857        } catch (const std::runtime_error &e) {
4858            std::cout << "\n*** " << e.what() << "\n\n";
4859            exit(-1);
4860        }
4861        CGlobalInfo::m_options.preview.set_client_cfg_enable(true);
4862        m_fl.set_client_config_tuple_gen_info(&m_fl.m_yaml_info.m_tuple_gen);
4863        pre_test();
4864    }
4865
4866    /* verify options */
4867    try {
4868        CGlobalInfo::m_options.verify();
4869    } catch (const std::runtime_error &e) {
4870        std::cout << "\n*** " << e.what() << "\n\n";
4871        exit(-1);
4872    }
4873
4874    m_expected_pps = m_fl.get_total_pps();
4875    m_expected_cps = 1000.0*m_fl.get_total_kcps();
4876    m_expected_bps = m_fl.get_total_tx_bps();
4877    if ( m_fl.get_total_repeat_flows() > 2000) {
4878        /* disable flows cache */
4879        CGlobalInfo::m_options.preview.setDisableMbufCache(true);
4880    }
4881
4882    CTupleGenYamlInfo * tg=&m_fl.m_yaml_info.m_tuple_gen;
4883
4884    m_mg.set_ip( tg->m_client_pool[0].get_ip_start(),
4885                 tg->m_server_pool[0].get_ip_start(),
4886                 tg->m_client_pool[0].getDualMask()
4887                 );
4888
4889    if (  CGlobalInfo::m_options.preview.getVMode() >0 ) {
4890        m_fl.DumpCsv(stdout);
4891        for (i=0; i<100; i++) {
4892            fprintf(stdout,"\n");
4893        }
4894        fflush(stdout);
4895    }
4896
4897    m_fl.generate_p_thread_info(get_cores_tx());
4898    CFlowGenListPerThread   * lpt;
4899
4900    for (i=0; i<get_cores_tx(); i++) {
4901        lpt = m_fl.m_threads_info[i];
4902        //CNullIF * erf_vif = new CNullIF();
4903        CVirtualIF * erf_vif = m_cores_vif[i+1];
4904        lpt->set_vif(erf_vif);
4905        /* socket id */
4906        lpt->m_node_gen.m_socket_id =m_cores_vif[i+1]->get_socket_id();
4907
4908    }
4909    m_fl_was_init=true;
4910
4911    return (0);
4912}
4913
4914
4915////////////////////////////////////////////
4916static CGlobalTRex g_trex;
4917
4918void CPhyEthIF::update_counters() {
4919    get_ex_drv()->get_extended_stats(this, &m_stats);
4920    CRXCoreIgnoreStat ign_stats;
4921    g_trex.m_mg.get_ignore_stats(m_port_id, ign_stats, true);
4922    m_stats.obytes -= ign_stats.get_tx_bytes();
4923    m_stats.opackets -= ign_stats.get_tx_pkts();
4924    m_ignore_stats.opackets += ign_stats.get_tx_pkts();
4925    m_ignore_stats.obytes += ign_stats.get_tx_bytes();
4926    m_ignore_stats.m_tx_arp += ign_stats.get_tx_arp();
4927
4928    m_last_tx_rate      =  m_bw_tx.add(m_stats.obytes);
4929    m_last_rx_rate      =  m_bw_rx.add(m_stats.ibytes);
4930    m_last_tx_pps       =  m_pps_tx.add(m_stats.opackets);
4931    m_last_rx_pps       =  m_pps_rx.add(m_stats.ipackets);
4932}
4933
4934bool CPhyEthIF::Create(uint8_t portid) {
4935    m_port_id      = portid;
4936    m_last_rx_rate = 0.0;
4937    m_last_tx_rate = 0.0;
4938    m_last_tx_pps  = 0.0;
4939    m_port_attr    = g_trex.m_drv->create_port_attr(portid);
4940
4941    return true;
4942}
4943
4944const std::vector<std::pair<uint8_t, uint8_t>> &
4945CPhyEthIF::get_core_list() {
4946
4947    /* lazy find */
4948    if (m_core_id_list.size() == 0) {
4949
4950        for (uint8_t core_id = 0; core_id < g_trex.get_cores_tx(); core_id++) {
4951
4952            /* iterate over all the directions*/
4953            for (uint8_t dir = 0 ; dir < CS_NUM; dir++) {
4954                if (g_trex.m_cores_vif[core_id + 1]->get_ports()[dir].m_port->get_port_id() == m_port_id) {
4955                    m_core_id_list.push_back(std::make_pair(core_id, dir));
4956                }
4957            }
4958        }
4959    }
4960
4961    return m_core_id_list;
4962
4963}
4964
4965int CPhyEthIF::reset_hw_flow_stats() {
4966    if (get_ex_drv()->hw_rx_stat_supported()) {
4967        get_ex_drv()->reset_rx_stats(this, m_stats.m_fdir_prev_pkts, 0, MAX_FLOW_STATS);
4968    } else {
4969        g_trex.m_rx_sl.reset_rx_stats(get_port_id());
4970    }
4971    return 0;
4972}
4973
4974// get/reset flow director counters
4975// return 0 if OK. -1 if operation not supported.
4976// rx_stats, tx_stats - arrays of len max - min + 1. Returning rx, tx updated absolute values.
4977// min, max - minimum, maximum counters range to get
4978// reset - If true, need to reset counter value after reading
4979int CPhyEthIF::get_flow_stats(rx_per_flow_t *rx_stats, tx_per_flow_t *tx_stats, int min, int max, bool reset) {
4980    uint32_t diff_pkts[MAX_FLOW_STATS];
4981    uint32_t diff_bytes[MAX_FLOW_STATS];
4982    bool hw_rx_stat_supported = get_ex_drv()->hw_rx_stat_supported();
4983
4984    if (hw_rx_stat_supported) {
4985        if (get_ex_drv()->get_rx_stats(this, diff_pkts, m_stats.m_fdir_prev_pkts
4986                                       , diff_bytes, m_stats.m_fdir_prev_bytes, min, max) < 0) {
4987            return -1;
4988        }
4989    } else {
4990        g_trex.m_rx_sl.get_rx_stats(get_port_id(), rx_stats, min, max, reset, TrexPlatformApi::IF_STAT_IPV4_ID);
4991    }
4992
4993    for (int i = min; i <= max; i++) {
4994        if ( reset ) {
4995            // return value so far, and reset
4996            if (hw_rx_stat_supported) {
4997                if (rx_stats != NULL) {
4998                    rx_stats[i - min].set_pkts(m_stats.m_rx_per_flow_pkts[i] + diff_pkts[i]);
4999                    rx_stats[i - min].set_bytes(m_stats.m_rx_per_flow_bytes[i] + diff_bytes[i]);
5000                }
5001                m_stats.m_rx_per_flow_pkts[i] = 0;
5002                m_stats.m_rx_per_flow_bytes[i] = 0;
5003                get_ex_drv()->reset_rx_stats(this, &m_stats.m_fdir_prev_pkts[i], i, 1);
5004
5005            }
5006            if (tx_stats != NULL) {
5007                tx_stats[i - min] = g_trex.clear_flow_tx_stats(m_port_id, i, false);
5008            }
5009        } else {
5010            if (hw_rx_stat_supported) {
5011                m_stats.m_rx_per_flow_pkts[i] += diff_pkts[i];
5012                m_stats.m_rx_per_flow_bytes[i] += diff_bytes[i];
5013                if (rx_stats != NULL) {
5014                    rx_stats[i - min].set_pkts(m_stats.m_rx_per_flow_pkts[i]);
5015                    rx_stats[i - min].set_bytes(m_stats.m_rx_per_flow_bytes[i]);
5016                }
5017            }
5018            if (tx_stats != NULL) {
5019                tx_stats[i - min] = g_trex.get_flow_tx_stats(m_port_id, i);
5020            }
5021        }
5022    }
5023
5024    return 0;
5025}
5026
5027int CPhyEthIF::get_flow_stats_payload(rx_per_flow_t *rx_stats, tx_per_flow_t *tx_stats, int min, int max, bool reset) {
5028    g_trex.m_rx_sl.get_rx_stats(get_port_id(), rx_stats, min, max, reset, TrexPlatformApi::IF_STAT_PAYLOAD);
5029    for (int i = min; i <= max; i++) {
5030        if ( reset ) {
5031            if (tx_stats != NULL) {
5032                tx_stats[i - min] = g_trex.clear_flow_tx_stats(m_port_id, i + MAX_FLOW_STATS, true);
5033            }
5034        } else {
5035            if (tx_stats != NULL) {
5036                tx_stats[i - min] = g_trex.get_flow_tx_stats(m_port_id, i + MAX_FLOW_STATS);
5037            }
5038        }
5039    }
5040
5041    return 0;
5042}
5043
5044// If needed, send packets to rx core for processing.
5045// This is relevant only in VM case, where we receive packets to the working DP core (only 1 DP core in this case)
5046bool CCoreEthIF::process_rx_pkt(pkt_dir_t dir, rte_mbuf_t * m) {
5047    CFlowStatParser parser;
5048    uint32_t ip_id;
5049
5050    if (parser.parse(rte_pktmbuf_mtod(m, uint8_t*), rte_pktmbuf_pkt_len(m)) != 0) {
5051        return false;
5052    }
5053    bool send=false;
5054
5055    // e1000 on ESXI hands us the packet with the ethernet FCS
5056    if (parser.get_pkt_size() < rte_pktmbuf_pkt_len(m)) {
5057        rte_pktmbuf_trim(m, rte_pktmbuf_pkt_len(m) - parser.get_pkt_size());
5058    }
5059
5060    if ( get_is_stateless() ) {
5061        // In stateless RX, we only care about flow stat packets
5062        if ((parser.get_ip_id(ip_id) == 0) && ((ip_id & 0xff00) == IP_ID_RESERVE_BASE)) {
5063            send = true;
5064        }
5065    } else {
5066        CLatencyPktMode *c_l_pkt_mode = g_trex.m_mg.c_l_pkt_mode;
5067        bool is_lateancy_pkt =  c_l_pkt_mode->IsLatencyPkt((IPHeader *)parser.get_l4()) &
5068            CCPortLatency::IsLatencyPkt(parser.get_l4() + c_l_pkt_mode->l4_header_len());
5069
5070        if (is_lateancy_pkt) {
5071            send = true;
5072        } else {
5073            if ( get_is_rx_filter_enable() ) {
5074                uint8_t max_ttl = 0xff - get_rx_check_hops();
5075                uint8_t pkt_ttl = parser.get_ttl();
5076                if ( (pkt_ttl==max_ttl) || (pkt_ttl==(max_ttl-1) ) ) {
5077                    send=true;
5078                }
5079            }
5080        }
5081    }
5082
5083
5084    if (send) {
5085        CGenNodeLatencyPktInfo * node=(CGenNodeLatencyPktInfo * )CGlobalInfo::create_node();
5086        if ( node ) {
5087            node->m_msg_type = CGenNodeMsgBase::LATENCY_PKT;
5088            node->m_dir      = dir;
5089            node->m_latency_offset = 0xdead;
5090            node->m_pkt      = m;
5091            if ( m_ring_to_rx->Enqueue((CGenNode*)node)==0 ){
5092            }else{
5093                CGlobalInfo::free_node((CGenNode *)node);
5094                send=false;
5095            }
5096
5097#ifdef LATENCY_QUEUE_TRACE_
5098            printf("rx to cp --\n");
5099            rte_pktmbuf_dump(stdout,m, rte_pktmbuf_pkt_len(m));
5100#endif
5101        }else{
5102            send=false;
5103        }
5104    }
5105    return (send);
5106}
5107
5108TrexStateless * get_stateless_obj() {
5109    return g_trex.m_trex_stateless;
5110}
5111
5112CRxCoreStateless * get_rx_sl_core_obj() {
5113    return &g_trex.m_rx_sl;
5114}
5115
5116static int latency_one_lcore(__attribute__((unused)) void *dummy)
5117{
5118    CPlatformSocketInfo * lpsock=&CGlobalInfo::m_socket;
5119    physical_thread_id_t  phy_id =rte_lcore_id();
5120
5121    if ( lpsock->thread_phy_is_rx(phy_id) ) {
5122        g_trex.run_in_rx_core();
5123    }else{
5124
5125        if ( lpsock->thread_phy_is_master( phy_id ) ) {
5126            g_trex.run_in_master();
5127            delay(1);
5128        }else{
5129            delay((uint32_t)(1000.0*CGlobalInfo::m_options.m_duration));
5130            /* this core has stopped */
5131            g_trex.m_signal[ lpsock->thread_phy_to_virt( phy_id ) ]=1;
5132        }
5133    }
5134    return 0;
5135}
5136
5137
5138
5139static int slave_one_lcore(__attribute__((unused)) void *dummy)
5140{
5141    CPlatformSocketInfo * lpsock=&CGlobalInfo::m_socket;
5142    physical_thread_id_t  phy_id =rte_lcore_id();
5143
5144    if ( lpsock->thread_phy_is_rx(phy_id) ) {
5145        g_trex.run_in_rx_core();
5146    }else{
5147        if ( lpsock->thread_phy_is_master( phy_id ) ) {
5148            g_trex.run_in_master();
5149            delay(1);
5150        }else{
5151            g_trex.run_in_core( lpsock->thread_phy_to_virt( phy_id ) );
5152        }
5153    }
5154    return 0;
5155}
5156
5157
5158
5159uint32_t get_cores_mask(uint32_t cores,int offset){
5160    int i;
5161
5162    uint32_t res=1;
5163
5164    uint32_t mask=(1<<(offset+1));
5165    for (i=0; i<(cores-1); i++) {
5166        res |= mask ;
5167        mask = mask <<1;
5168    }
5169    return (res);
5170}
5171
5172
5173static char *g_exe_name;
5174const char *get_exe_name() {
5175    return g_exe_name;
5176}
5177
5178
5179int main(int argc , char * argv[]){
5180    g_exe_name = argv[0];
5181
5182    return ( main_test(argc , argv));
5183}
5184
5185
5186int update_global_info_from_platform_file(){
5187
5188    CPlatformYamlInfo *cg=&global_platform_cfg_info;
5189
5190    CGlobalInfo::m_socket.Create(&cg->m_platform);
5191
5192
5193    if (!cg->m_info_exist) {
5194        /* nothing to do ! */
5195        return 0;
5196    }
5197
5198    CGlobalInfo::m_options.prefix =cg->m_prefix;
5199    CGlobalInfo::m_options.preview.setCores(cg->m_thread_per_dual_if);
5200
5201    if ( cg->m_port_limit_exist ){
5202        CGlobalInfo::m_options.m_expected_portd =cg->m_port_limit;
5203    }
5204
5205    if ( cg->m_enable_zmq_pub_exist ){
5206        CGlobalInfo::m_options.preview.set_zmq_publish_enable(cg->m_enable_zmq_pub);
5207        CGlobalInfo::m_options.m_zmq_port = cg->m_zmq_pub_port;
5208    }
5209    if ( cg->m_telnet_exist ){
5210        CGlobalInfo::m_options.m_telnet_port = cg->m_telnet_port;
5211    }
5212
5213    if ( cg->m_mac_info_exist ){
5214        int i;
5215        /* cop the file info */
5216
5217        int port_size=cg->m_mac_info.size();
5218
5219        if ( port_size > TREX_MAX_PORTS ){
5220            port_size = TREX_MAX_PORTS;
5221        }
5222        for (i=0; i<port_size; i++){
5223            cg->m_mac_info[i].copy_src(( char *)CGlobalInfo::m_options.m_mac_addr[i].u.m_mac.src)   ;
5224            cg->m_mac_info[i].copy_dest(( char *)CGlobalInfo::m_options.m_mac_addr[i].u.m_mac.dest)  ;
5225            CGlobalInfo::m_options.m_ip_cfg[i].set_def_gw(cg->m_mac_info[i].get_def_gw());
5226            CGlobalInfo::m_options.m_ip_cfg[i].set_ip(cg->m_mac_info[i].get_ip());
5227            CGlobalInfo::m_options.m_ip_cfg[i].set_mask(cg->m_mac_info[i].get_mask());
5228            CGlobalInfo::m_options.m_ip_cfg[i].set_vlan(cg->m_mac_info[i].get_vlan());
5229        }
5230    }
5231
5232    /* mul by interface type */
5233    float mul=1.0;
5234    if (cg->m_port_bandwidth_gb<10) {
5235        cg->m_port_bandwidth_gb=10.0;
5236    }
5237
5238    mul = mul*(float)cg->m_port_bandwidth_gb/10.0;
5239    mul= mul * (float)cg->m_port_limit/2.0;
5240
5241    mul= mul * CGlobalInfo::m_options.m_mbuf_factor;
5242
5243
5244    CGlobalInfo::m_memory_cfg.set_pool_cache_size(RTE_MEMPOOL_CACHE_MAX_SIZE);
5245
5246    CGlobalInfo::m_memory_cfg.set_number_of_dp_cors(
5247                                                    CGlobalInfo::m_options.get_number_of_dp_cores_needed() );
5248
5249    CGlobalInfo::m_memory_cfg.set(cg->m_memory,mul);
5250    return (0);
5251}
5252
5253extern "C" int eal_cpu_detected(unsigned lcore_id);
5254// return mask representing available cores
5255int core_mask_calc() {
5256    uint32_t mask = 0;
5257    int lcore_id;
5258
5259    for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
5260        if (eal_cpu_detected(lcore_id)) {
5261            mask |= (1 << lcore_id);
5262        }
5263    }
5264
5265    return mask;
5266}
5267
5268// Return number of set bits in i
5269uint32_t num_set_bits(uint32_t i)
5270{
5271    i = i - ((i >> 1) & 0x55555555);
5272    i = (i & 0x33333333) + ((i >> 2) & 0x33333333);
5273    return (((i + (i >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24;
5274}
5275
5276// sanity check if the cores we want to use really exist
5277int core_mask_sanity(uint32_t wanted_core_mask) {
5278    uint32_t calc_core_mask = core_mask_calc();
5279    uint32_t wanted_core_num, calc_core_num;
5280
5281    wanted_core_num = num_set_bits(wanted_core_mask);
5282    calc_core_num = num_set_bits(calc_core_mask);
5283
5284    if (calc_core_num == 1) {
5285        printf ("Error: You have only 1 core available. Minimum configuration requires 2 cores\n");
5286        printf("        If you are running on VM, consider adding more cores if possible\n");
5287        return -1;
5288    }
5289    if (wanted_core_num > calc_core_num) {
5290        printf("Error: You have %d threads available, but you asked for %d threads.\n", calc_core_num, wanted_core_num);
5291        printf("       Calculation is: -c <num>(%d) * dual ports (%d) + 1 master thread %s"
5292               , CGlobalInfo::m_options.preview.getCores(), CGlobalInfo::m_options.get_expected_dual_ports()
5293               , get_is_rx_thread_enabled() ? "+1 latency thread (because of -l flag)\n" : "\n");
5294        if (CGlobalInfo::m_options.preview.getCores() > 1)
5295            printf("       Maybe try smaller -c <num>.\n");
5296        printf("       If you are running on VM, consider adding more cores if possible\n");
5297        return -1;
5298    }
5299
5300    if (wanted_core_mask != (wanted_core_mask & calc_core_mask)) {
5301        printf ("Serious error: Something is wrong with the hardware. Wanted core mask is %x. Existing core mask is %x\n", wanted_core_mask, calc_core_mask);
5302        return -1;
5303    }
5304
5305    return 0;
5306}
5307
5308int  update_dpdk_args(void){
5309
5310    CPlatformSocketInfo * lpsock=&CGlobalInfo::m_socket;
5311    CParserOption * lpop= &CGlobalInfo::m_options;
5312
5313    lpsock->set_rx_thread_is_enabled(get_is_rx_thread_enabled());
5314    lpsock->set_number_of_threads_per_ports(lpop->preview.getCores() );
5315    lpsock->set_number_of_dual_ports(lpop->get_expected_dual_ports());
5316    if ( !lpsock->sanity_check() ){
5317        printf(" ERROR in configuration file \n");
5318        return (-1);
5319    }
5320
5321    if ( CGlobalInfo::m_options.preview.getVMode() > 0  ) {
5322        lpsock->dump(stdout);
5323    }
5324
5325    snprintf(global_cores_str, sizeof(global_cores_str), "0x%llx" ,(unsigned long long)lpsock->get_cores_mask());
5326    if (core_mask_sanity(strtol(global_cores_str, NULL, 16)) < 0) {
5327        return -1;
5328    }
5329
5330    /* set the DPDK options */
5331    global_dpdk_args_num = 0;
5332
5333    global_dpdk_args[global_dpdk_args_num++]=(char *)"xx";
5334    global_dpdk_args[global_dpdk_args_num++]=(char *)"-c";
5335    global_dpdk_args[global_dpdk_args_num++]=(char *)global_cores_str;
5336    global_dpdk_args[global_dpdk_args_num++]=(char *)"-n";
5337    global_dpdk_args[global_dpdk_args_num++]=(char *)"4";
5338
5339    if ( CGlobalInfo::m_options.preview.getVMode() == 0  ) {
5340        global_dpdk_args[global_dpdk_args_num++]=(char *)"--log-level";
5341        snprintf(global_loglevel_str, sizeof(global_loglevel_str), "%d", 4);
5342        global_dpdk_args[global_dpdk_args_num++]=(char *)global_loglevel_str;
5343    }else{
5344        global_dpdk_args[global_dpdk_args_num++]=(char *)"--log-level";
5345        snprintf(global_loglevel_str, sizeof(global_loglevel_str), "%d", CGlobalInfo::m_options.preview.getVMode()+1);
5346        global_dpdk_args[global_dpdk_args_num++]=(char *)global_loglevel_str;
5347    }
5348
5349    global_dpdk_args[global_dpdk_args_num++] = (char *)"--master-lcore";
5350
5351    snprintf(global_master_id_str, sizeof(global_master_id_str), "%u", lpsock->get_master_phy_id());
5352    global_dpdk_args[global_dpdk_args_num++] = global_master_id_str;
5353
5354    /* add white list */
5355    if (lpop->m_run_mode == CParserOption::RUN_MODE_DUMP_INFO and lpop->dump_interfaces.size()) {
5356        for (int i=0; i<(int)lpop->dump_interfaces.size(); i++) {
5357            global_dpdk_args[global_dpdk_args_num++]=(char *)"-w";
5358            global_dpdk_args[global_dpdk_args_num++]=(char *)lpop->dump_interfaces[i].c_str();
5359        }
5360    }
5361    else {
5362        for (int i=0; i<(int)global_platform_cfg_info.m_if_list.size(); i++) {
5363            global_dpdk_args[global_dpdk_args_num++]=(char *)"-w";
5364            global_dpdk_args[global_dpdk_args_num++]=(char *)global_platform_cfg_info.m_if_list[i].c_str();
5365        }
5366    }
5367
5368
5369
5370    if ( lpop->prefix.length()  ){
5371        global_dpdk_args[global_dpdk_args_num++]=(char *)"--file-prefix";
5372        snprintf(global_prefix_str, sizeof(global_prefix_str), "%s", lpop->prefix.c_str());
5373        global_dpdk_args[global_dpdk_args_num++]=(char *)global_prefix_str;
5374        global_dpdk_args[global_dpdk_args_num++]=(char *)"-m";
5375        if (global_platform_cfg_info.m_limit_memory.length()) {
5376            global_dpdk_args[global_dpdk_args_num++]=(char *)global_platform_cfg_info.m_limit_memory.c_str();
5377        }else{
5378            global_dpdk_args[global_dpdk_args_num++]=(char *)"1024";
5379        }
5380    }
5381
5382
5383    if ( CGlobalInfo::m_options.preview.getVMode() > 0  ) {
5384        printf("args \n");
5385        int i;
5386        for (i=0; i<global_dpdk_args_num; i++) {
5387            printf(" %s \n",global_dpdk_args[i]);
5388        }
5389    }
5390    return (0);
5391}
5392
5393
5394int sim_load_list_of_cap_files(CParserOption * op){
5395
5396    CFlowGenList fl;
5397    fl.Create();
5398    fl.load_from_yaml(op->cfg_file,1);
5399    if ( op->preview.getVMode() >0 ) {
5400        fl.DumpCsv(stdout);
5401    }
5402    uint32_t start=    os_get_time_msec();
5403
5404    CErfIF erf_vif;
5405
5406    fl.generate_p_thread_info(1);
5407    CFlowGenListPerThread   * lpt;
5408    lpt=fl.m_threads_info[0];
5409    lpt->set_vif(&erf_vif);
5410
5411    if ( (op->preview.getVMode() >1)  || op->preview.getFileWrite() ) {
5412        lpt->start_generate_stateful(op->out_file,op->preview);
5413    }
5414
5415    lpt->m_node_gen.DumpHist(stdout);
5416
5417    uint32_t stop=    os_get_time_msec();
5418    printf(" d time = %ul %ul \n",stop-start,os_get_time_freq());
5419    fl.Delete();
5420    return (0);
5421}
5422
5423void dump_interfaces_info() {
5424    printf("Showing interfaces info.\n");
5425    uint8_t m_max_ports = rte_eth_dev_count();
5426    struct ether_addr mac_addr;
5427    char mac_str[ETHER_ADDR_FMT_SIZE];
5428    struct rte_pci_addr pci_addr;
5429
5430    for (uint8_t port_id=0; port_id<m_max_ports; port_id++) {
5431        // PCI, MAC and Driver
5432        pci_addr = rte_eth_devices[port_id].pci_dev->addr;
5433        rte_eth_macaddr_get(port_id, &mac_addr);
5434        ether_format_addr(mac_str, sizeof mac_str, &mac_addr);
5435        printf("PCI: %04x:%02x:%02x.%d - MAC: %s - Driver: %s\n",
5436            pci_addr.domain, pci_addr.bus, pci_addr.devid, pci_addr.function, mac_str,
5437            rte_eth_devices[port_id].pci_dev->driver->name);
5438    }
5439}
5440
5441int main_test(int argc , char * argv[]){
5442
5443
5444    utl_termio_init();
5445
5446    int ret;
5447    unsigned lcore_id;
5448    printf("Starting  TRex %s please wait  ... \n",VERSION_BUILD_NUM);
5449
5450    CGlobalInfo::m_options.preview.clean();
5451
5452    if ( parse_options_wrapper(argc, argv, &CGlobalInfo::m_options,true ) != 0){
5453        exit(-1);
5454    }
5455
5456    /* enable core dump if requested */
5457    if (CGlobalInfo::m_options.preview.getCoreDumpEnable()) {
5458        utl_set_coredump_size(-1);
5459    }
5460    else {
5461        utl_set_coredump_size(0);
5462    }
5463
5464
5465    update_global_info_from_platform_file();
5466
5467    /* It is not a mistake. Give the user higher priorty over the configuration file */
5468    if (parse_options_wrapper(argc, argv, &CGlobalInfo::m_options ,false) != 0) {
5469        exit(-1);
5470    }
5471
5472
5473    if ( CGlobalInfo::m_options.preview.getVMode() > 0){
5474        CGlobalInfo::m_options.dump(stdout);
5475        CGlobalInfo::m_memory_cfg.Dump(stdout);
5476    }
5477
5478
5479    if (update_dpdk_args() < 0) {
5480        return -1;
5481    }
5482
5483    CParserOption * po=&CGlobalInfo::m_options;
5484
5485
5486    if ( CGlobalInfo::m_options.preview.getVMode() == 0  ) {
5487        rte_set_log_level(1);
5488
5489    }
5490    uid_t uid;
5491    uid = geteuid ();
5492    if ( uid != 0 ) {
5493        printf("ERROR you must run with superuser priviliges \n");
5494        printf("User id   : %d \n",uid);
5495        printf("try 'sudo' %s \n",argv[0]);
5496        return (-1);
5497    }
5498
5499    /* set affinity to the master core as default */
5500    cpu_set_t mask;
5501    CPU_ZERO(&mask);
5502    CPU_SET(CGlobalInfo::m_socket.get_master_phy_id(), &mask);
5503    pthread_setaffinity_np(pthread_self(), sizeof(mask), &mask);
5504
5505    ret = rte_eal_init(global_dpdk_args_num, (char **)global_dpdk_args);
5506    if (ret < 0){
5507        printf(" You might need to run ./trex-cfg  once  \n");
5508        rte_exit(EXIT_FAILURE, "Invalid EAL arguments\n");
5509    }
5510    if (CGlobalInfo::m_options.m_run_mode == CParserOption::RUN_MODE_DUMP_INFO) {
5511        dump_interfaces_info();
5512        exit(0);
5513    }
5514    reorder_dpdk_ports();
5515    time_init();
5516
5517    /* check if we are in simulation mode */
5518    if ( CGlobalInfo::m_options.out_file != "" ){
5519        printf(" t-rex simulation mode into %s \n",CGlobalInfo::m_options.out_file.c_str());
5520        return ( sim_load_list_of_cap_files(&CGlobalInfo::m_options) );
5521    }
5522
5523    if ( !g_trex.Create() ){
5524        exit(1);
5525    }
5526
5527    if (po->preview.get_is_rx_check_enable() &&  (po->m_rx_check_sample< get_min_sample_rate()) ) {
5528        po->m_rx_check_sample = get_min_sample_rate();
5529        printf("Warning:rx check sample rate should not be lower than %d. Setting it to %d\n",get_min_sample_rate(),get_min_sample_rate());
5530    }
5531
5532    /* set dump mode */
5533    g_trex.m_io_modes.set_mode((CTrexGlobalIoMode::CliDumpMode)CGlobalInfo::m_options.m_io_mode);
5534
5535    /* disable WD if needed */
5536    bool wd_enable = (CGlobalInfo::m_options.preview.getWDDisable() ? false : true);
5537    TrexWatchDog::getInstance().init(wd_enable);
5538
5539    g_trex.m_sl_rx_running = false;
5540    if ( get_is_stateless() ) {
5541        g_trex.start_master_stateless();
5542
5543    }else{
5544        g_trex.start_master_statefull();
5545    }
5546
5547    // For unit testing of HW rules and queues configuration. Just send some packets and exit.
5548    if (CGlobalInfo::m_options.m_debug_pkt_proto != 0) {
5549        CTrexDebug debug = CTrexDebug(g_trex.m_ports, g_trex.m_max_ports);
5550        int ret;
5551
5552        if (CGlobalInfo::m_options.m_debug_pkt_proto == D_PKT_TYPE_HW_TOGGLE_TEST) {
5553            // Unit test: toggle many times between receive all and stateless/stateful modes,
5554            // to test resiliency of add/delete fdir filters
5555            printf("Starting receive all/normal mode toggle unit test\n");
5556            for (int i = 0; i < 100; i++) {
5557                for (int port_id = 0; port_id < g_trex.m_max_ports; port_id++) {
5558                    CPhyEthIF *pif = &g_trex.m_ports[port_id];
5559                    CTRexExtendedDriverDb::Ins()->get_drv()->set_rcv_all(pif, true);
5560                }
5561                ret = debug.test_send(D_PKT_TYPE_HW_VERIFY_RCV_ALL);
5562                if (ret != 0) {
5563                    printf("Iteration %d: Receive all mode failed\n", i);
5564                    exit(ret);
5565                }
5566
5567                for (int port_id = 0; port_id < g_trex.m_max_ports; port_id++) {
5568                    CPhyEthIF *pif = &g_trex.m_ports[port_id];
5569                    CTRexExtendedDriverDb::Ins()->get_drv()->configure_rx_filter_rules(pif);
5570                }
5571
5572                ret = debug.test_send(D_PKT_TYPE_HW_VERIFY);
5573                if (ret != 0) {
5574                    printf("Iteration %d: Normal mode failed\n", i);
5575                    exit(ret);
5576                }
5577
5578                printf("Iteration %d OK\n", i);
5579            }
5580            exit(0);
5581        } else {
5582            if (CGlobalInfo::m_options.m_debug_pkt_proto == D_PKT_TYPE_HW_VERIFY_RCV_ALL) {
5583                for (int port_id = 0; port_id < g_trex.m_max_ports; port_id++) {
5584                    CPhyEthIF *pif = &g_trex.m_ports[port_id];
5585                    CTRexExtendedDriverDb::Ins()->get_drv()->set_rcv_all(pif, true);
5586                }
5587            }
5588            ret = debug.test_send(CGlobalInfo::m_options.m_debug_pkt_proto);
5589            exit(ret);
5590        }
5591    }
5592
5593    // in case of client config, we already run pretest
5594    if (! CGlobalInfo::m_options.preview.get_is_client_cfg_enable()) {
5595        g_trex.pre_test();
5596    }
5597
5598    // after doing all needed ARP resolution, we need to flush queues, and stop our drop queue
5599    g_trex.ixgbe_rx_queue_flush();
5600    for (int i = 0; i < g_trex.m_max_ports; i++) {
5601        CPhyEthIF *_if = &g_trex.m_ports[i];
5602        _if->stop_rx_drop_queue();
5603    }
5604
5605    if ( CGlobalInfo::m_options.is_latency_enabled()
5606         && (CGlobalInfo::m_options.m_latency_prev > 0)) {
5607        uint32_t pkts = CGlobalInfo::m_options.m_latency_prev *
5608            CGlobalInfo::m_options.m_latency_rate;
5609        printf("Starting warm up phase for %d sec\n",CGlobalInfo::m_options.m_latency_prev);
5610        g_trex.m_mg.start(pkts, NULL);
5611        delay(CGlobalInfo::m_options.m_latency_prev* 1000);
5612        printf("Finished \n");
5613        g_trex.m_mg.reset();
5614    }
5615
5616    if ( CGlobalInfo::m_options.preview.getOnlyLatency() ){
5617        rte_eal_mp_remote_launch(latency_one_lcore, NULL, CALL_MASTER);
5618        RTE_LCORE_FOREACH_SLAVE(lcore_id) {
5619            if (rte_eal_wait_lcore(lcore_id) < 0)
5620                return -1;
5621        }
5622        g_trex.stop_master();
5623
5624        return (0);
5625    }
5626
5627    if ( CGlobalInfo::m_options.preview.getSingleCore() ) {
5628        g_trex.run_in_core(1);
5629        g_trex.stop_master();
5630        return (0);
5631    }
5632
5633    rte_eal_mp_remote_launch(slave_one_lcore, NULL, CALL_MASTER);
5634    RTE_LCORE_FOREACH_SLAVE(lcore_id) {
5635        if (rte_eal_wait_lcore(lcore_id) < 0)
5636            return -1;
5637    }
5638
5639    g_trex.stop_master();
5640    g_trex.Delete();
5641    utl_termio_reset();
5642
5643    return (0);
5644}
5645
5646void wait_x_sec(int sec) {
5647    int i;
5648    printf(" wait %d sec ", sec);
5649    fflush(stdout);
5650    for (i=0; i<sec; i++) {
5651        delay(1000);
5652        printf(".");
5653        fflush(stdout);
5654    }
5655    printf("\n");
5656    fflush(stdout);
5657}
5658
5659/*
5660Changes the order of rte_eth_devices array elements
5661to be consistent with our /etc/trex_cfg.yaml
5662*/
5663void reorder_dpdk_ports() {
5664    rte_eth_dev rte_eth_devices_temp[RTE_MAX_ETHPORTS];
5665    uint8_t m_port_map[RTE_MAX_ETHPORTS];
5666    struct rte_pci_addr addr;
5667    uint8_t port_id;
5668
5669    // gather port relation information and save current array to temp
5670    for (int i=0; i<(int)global_platform_cfg_info.m_if_list.size(); i++) {
5671        memcpy(&rte_eth_devices_temp[i], &rte_eth_devices[i], sizeof rte_eth_devices[i]);
5672        if (eal_parse_pci_BDF(global_platform_cfg_info.m_if_list[i].c_str(), &addr) != 0 && eal_parse_pci_DomBDF(global_platform_cfg_info.m_if_list[i].c_str(), &addr) != 0) {
5673            printf("Failed mapping TRex port id to DPDK id: %d\n", i);
5674            exit(1);
5675        }
5676        rte_eth_dev_get_port_by_addr(&addr, &port_id);
5677        m_port_map[port_id] = i;
5678        // print the relation in verbose mode
5679        if ( CGlobalInfo::m_options.preview.getVMode() > 0){
5680            printf("TRex cfg port id: %d <-> DPDK port id: %d\n", i, port_id);
5681        }
5682    }
5683
5684    // actual reorder
5685    for (int i=0; i<(int)global_platform_cfg_info.m_if_list.size(); i++) {
5686        memcpy(&rte_eth_devices[m_port_map[i]], &rte_eth_devices_temp[i], sizeof rte_eth_devices_temp[i]);
5687    }
5688}
5689
5690//////////////////////////////////////////////////////////////////////////////////////////////
5691//////////////////////////////////////////////////////////////////////////////////////////////
5692// driver section
5693//////////////////////////////////////////////////////////////////////////////////////////////
5694int CTRexExtendedDriverBase::stop_queue(CPhyEthIF * _if, uint16_t q_num) {
5695    uint8_t port_id=_if->get_rte_port_id();
5696    return (rte_eth_dev_rx_queue_stop(port_id, q_num));
5697}
5698
5699int CTRexExtendedDriverBase::wait_for_stable_link() {
5700    wait_x_sec(CGlobalInfo::m_options.m_wait_before_traffic);
5701    return 0;
5702}
5703
5704void CTRexExtendedDriverBase::wait_after_link_up() {
5705    wait_x_sec(CGlobalInfo::m_options.m_wait_before_traffic);
5706}
5707
5708CFlowStatParser *CTRexExtendedDriverBase::get_flow_stat_parser() {
5709    CFlowStatParser *parser = new CFlowStatParser();
5710    assert (parser);
5711    return parser;
5712}
5713
5714// in 1G we need to wait if links became ready to soon
5715void CTRexExtendedDriverBase1G::wait_after_link_up(){
5716    wait_x_sec(6 + CGlobalInfo::m_options.m_wait_before_traffic);
5717}
5718
5719int CTRexExtendedDriverBase1G::wait_for_stable_link(){
5720    wait_x_sec(9 + CGlobalInfo::m_options.m_wait_before_traffic);
5721    return(0);
5722}
5723
5724void CTRexExtendedDriverBase1G::update_configuration(port_cfg_t * cfg){
5725
5726    cfg->m_tx_conf.tx_thresh.pthresh = TX_PTHRESH_1G;
5727    cfg->m_tx_conf.tx_thresh.hthresh = TX_HTHRESH;
5728    cfg->m_tx_conf.tx_thresh.wthresh = 0;
5729}
5730
5731void CTRexExtendedDriverBase1G::update_global_config_fdir(port_cfg_t * cfg){
5732    // Configuration is done in configure_rx_filter_rules by writing to registers
5733}
5734
5735#define E1000_RXDCTL_QUEUE_ENABLE	0x02000000
5736// e1000 driver does not support the generic stop/start queue API, so we need to implement ourselves
5737int CTRexExtendedDriverBase1G::stop_queue(CPhyEthIF * _if, uint16_t q_num) {
5738    uint32_t reg_val = _if->pci_reg_read( E1000_RXDCTL(q_num));
5739    reg_val &= ~E1000_RXDCTL_QUEUE_ENABLE;
5740    _if->pci_reg_write( E1000_RXDCTL(q_num), reg_val);
5741    return 0;
5742}
5743
5744int CTRexExtendedDriverBase1G::configure_rx_filter_rules(CPhyEthIF * _if){
5745    if ( get_is_stateless() ) {
5746        return configure_rx_filter_rules_stateless(_if);
5747    } else {
5748        return configure_rx_filter_rules_statefull(_if);
5749    }
5750
5751    return 0;
5752}
5753
5754int CTRexExtendedDriverBase1G::configure_rx_filter_rules_statefull(CPhyEthIF * _if) {
5755    uint16_t hops = get_rx_check_hops();
5756    uint16_t v4_hops = (hops << 8)&0xff00;
5757    uint8_t protocol;
5758
5759    if (CGlobalInfo::m_options.m_l_pkt_mode == 0) {
5760        protocol = IPPROTO_SCTP;
5761    } else {
5762        protocol = IPPROTO_ICMP;
5763    }
5764    /* enable filter to pass packet to rx queue 1 */
5765    _if->pci_reg_write( E1000_IMIR(0), 0x00020000);
5766    _if->pci_reg_write( E1000_IMIREXT(0), 0x00081000);
5767    _if->pci_reg_write( E1000_TTQF(0),   protocol
5768                        | 0x00008100 /* enable */
5769                        | 0xE0010000 /* RX queue is 1 */
5770                        );
5771
5772
5773    /* 16  :   12 MAC , (2)0x0800,2      | DW0 , DW1
5774       6 bytes , TTL , PROTO     | DW2=0 , DW3=0x0000FF06
5775    */
5776    int i;
5777    // IPv4: bytes being compared are {TTL, Protocol}
5778    uint16_t ff_rules_v4[6]={
5779        (uint16_t)(0xFF06 - v4_hops),
5780        (uint16_t)(0xFE11 - v4_hops),
5781        (uint16_t)(0xFF11 - v4_hops),
5782        (uint16_t)(0xFE06 - v4_hops),
5783        (uint16_t)(0xFF01 - v4_hops),
5784        (uint16_t)(0xFE01 - v4_hops),
5785    }  ;
5786    // IPv6: bytes being compared are {NextHdr, HopLimit}
5787    uint16_t ff_rules_v6[2]={
5788        (uint16_t)(0x3CFF - hops),
5789        (uint16_t)(0x3CFE - hops),
5790    }  ;
5791    uint16_t *ff_rules;
5792    uint16_t num_rules;
5793    uint32_t mask=0;
5794    int  rule_id;
5795
5796    if (  CGlobalInfo::m_options.preview.get_ipv6_mode_enable() ){
5797        ff_rules = &ff_rules_v6[0];
5798        num_rules = sizeof(ff_rules_v6)/sizeof(ff_rules_v6[0]);
5799    }else{
5800        ff_rules = &ff_rules_v4[0];
5801        num_rules = sizeof(ff_rules_v4)/sizeof(ff_rules_v4[0]);
5802    }
5803
5804    clear_rx_filter_rules(_if);
5805
5806    uint8_t len = 24;
5807    for (rule_id=0; rule_id<num_rules; rule_id++ ) {
5808        /* clear rule all */
5809        for (i=0; i<0xff; i+=4) {
5810            _if->pci_reg_write( (E1000_FHFT(rule_id)+i) , 0);
5811        }
5812
5813        if (  CGlobalInfo::m_options.preview.get_vlan_mode_enable() ){
5814            len += 8;
5815            if (  CGlobalInfo::m_options.preview.get_ipv6_mode_enable() ){
5816                // IPv6 VLAN: NextHdr/HopLimit offset = 0x18
5817                _if->pci_reg_write( (E1000_FHFT(rule_id)+(3*16)+0) , PKT_NTOHS(ff_rules[rule_id]) );
5818                _if->pci_reg_write( (E1000_FHFT(rule_id)+(3*16)+8) , 0x03); /* MASK */
5819            }else{
5820                // IPv4 VLAN: TTL/Protocol offset = 0x1A
5821                _if->pci_reg_write( (E1000_FHFT(rule_id)+(3*16)+0) , (PKT_NTOHS(ff_rules[rule_id])<<16) );
5822                _if->pci_reg_write( (E1000_FHFT(rule_id)+(3*16)+8) , 0x0C); /* MASK */
5823            }
5824        }else{
5825            if (  CGlobalInfo::m_options.preview.get_ipv6_mode_enable() ){
5826                // IPv6: NextHdr/HopLimit offset = 0x14
5827                _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16)+4) , PKT_NTOHS(ff_rules[rule_id]) );
5828                _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16)+8) , 0x30); /* MASK */
5829            }else{
5830                // IPv4: TTL/Protocol offset = 0x16
5831                _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16)+4) , (PKT_NTOHS(ff_rules[rule_id])<<16) );
5832                _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16)+8) , 0xC0); /* MASK */
5833            }
5834        }
5835
5836        // FLEX_PRIO[[18:16] = 1, RQUEUE[10:8] = 1
5837        _if->pci_reg_write( (E1000_FHFT(rule_id)+0xFC) , (1<<16) | (1<<8)  | len);
5838
5839        mask |=(1<<rule_id);
5840    }
5841
5842    /* enable all rules */
5843    _if->pci_reg_write(E1000_WUFC, (mask<<16) | (1<<14) );
5844
5845    return (0);
5846}
5847
5848// Sadly, DPDK has no support for i350 filters, so we need to implement by writing to registers.
5849int CTRexExtendedDriverBase1G::configure_rx_filter_rules_stateless(CPhyEthIF * _if) {
5850    /* enable filter to pass packet to rx queue 1 */
5851    _if->pci_reg_write( E1000_IMIR(0), 0x00020000);
5852    _if->pci_reg_write( E1000_IMIREXT(0), 0x00081000);
5853
5854    uint8_t len = 24;
5855    uint32_t mask = 0;
5856    int rule_id;
5857
5858    clear_rx_filter_rules(_if);
5859
5860    rule_id = 0;
5861    mask |= 0x1 << rule_id;
5862    // filter for byte 18 of packet (msb of IP ID) should equal ff
5863    _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16)) ,  0x00ff0000);
5864    _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16) + 8) , 0x04); /* MASK */
5865    // + bytes 12 + 13 (ether type) should indicate IP.
5866    _if->pci_reg_write( (E1000_FHFT(rule_id)+(1*16) + 4) ,  0x00000008);
5867    _if->pci_reg_write( (E1000_FHFT(rule_id)+(1*16) + 8) , 0x30); /* MASK */
5868    // FLEX_PRIO[[18:16] = 1, RQUEUE[10:8] = 1
5869    _if->pci_reg_write( (E1000_FHFT(rule_id) + 0xFC) , (1 << 16) | (1 << 8) | len);
5870
5871    // same as 0, but with vlan. type should be vlan. Inside vlan, should be IP with lsb of IP ID equals 0xff
5872    rule_id = 1;
5873    mask |= 0x1 << rule_id;
5874    // filter for byte 22 of packet (msb of IP ID) should equal ff
5875    _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16) + 4) ,  0x00ff0000);
5876    _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16) + 8) , 0x40 | 0x03); /* MASK */
5877    // + bytes 12 + 13 (ether type) should indicate VLAN.
5878    _if->pci_reg_write( (E1000_FHFT(rule_id)+(1*16) + 4) ,  0x00000081);
5879    _if->pci_reg_write( (E1000_FHFT(rule_id)+(1*16) + 8) , 0x30); /* MASK */
5880    // + bytes 16 + 17 (vlan type) should indicate IP.
5881    _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16) ) ,  0x00000008);
5882    // Was written together with IP ID filter
5883    // _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16) + 8) , 0x03); /* MASK */
5884    // FLEX_PRIO[[18:16] = 1, RQUEUE[10:8] = 1
5885    _if->pci_reg_write( (E1000_FHFT(rule_id) + 0xFC) , (1 << 16) | (1 << 8) | len);
5886
5887    rule_id = 2;
5888    mask |= 0x1 << rule_id;
5889    // ipv6 flow stat
5890    // filter for byte 16 of packet (part of flow label) should equal 0xff
5891    _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16)) ,  0x000000ff);
5892    _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16) + 8) , 0x01); /* MASK */
5893    // + bytes 12 + 13 (ether type) should indicate IPv6.
5894    _if->pci_reg_write( (E1000_FHFT(rule_id)+(1*16) + 4) ,  0x0000dd86);
5895    _if->pci_reg_write( (E1000_FHFT(rule_id)+(1*16) + 8) , 0x30); /* MASK */
5896    // FLEX_PRIO[[18:16] = 1, RQUEUE[10:8] = 1
5897    _if->pci_reg_write( (E1000_FHFT(rule_id) + 0xFC) , (1 << 16) | (1 << 8) | len);
5898
5899    rule_id = 3;
5900    mask |= 0x1 << rule_id;
5901    // same as 2, with vlan. Type is vlan. Inside vlan, IPv6 with flow label second bits 4-11 equals 0xff
5902    // filter for byte 20 of packet (part of flow label) should equal 0xff
5903    _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16) + 4) ,  0x000000ff);
5904    _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16) + 8) , 0x10 | 0x03); /* MASK */
5905    // + bytes 12 + 13 (ether type) should indicate VLAN.
5906    _if->pci_reg_write( (E1000_FHFT(rule_id)+(1*16) + 4) ,  0x00000081);
5907    _if->pci_reg_write( (E1000_FHFT(rule_id)+(1*16) + 8) , 0x30); /* MASK */
5908    // + bytes 16 + 17 (vlan type) should indicate IP.
5909    _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16) ) ,  0x0000dd86);
5910    // Was written together with flow label filter
5911    // _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16) + 8) , 0x03); /* MASK */
5912    // FLEX_PRIO[[18:16] = 1, RQUEUE[10:8] = 1
5913    _if->pci_reg_write( (E1000_FHFT(rule_id) + 0xFC) , (1 << 16) | (1 << 8) | len);
5914
5915    /* enable rules */
5916    _if->pci_reg_write(E1000_WUFC, (mask << 16) | (1 << 14) );
5917
5918    return (0);
5919}
5920
5921// clear registers of rules
5922void CTRexExtendedDriverBase1G::clear_rx_filter_rules(CPhyEthIF * _if) {
5923    for (int rule_id = 0 ; rule_id < 8; rule_id++) {
5924        for (int i = 0; i < 0xff; i += 4) {
5925            _if->pci_reg_write( (E1000_FHFT(rule_id) + i) , 0);
5926        }
5927    }
5928}
5929
5930int CTRexExtendedDriverBase1G::set_rcv_all(CPhyEthIF * _if, bool set_on) {
5931    // byte 12 equals 08 - for IPv4 and ARP
5932    //                86 - For IPv6
5933    //                81 - For VLAN
5934    //                88 - For MPLS
5935    uint8_t eth_types[] = {0x08, 0x86, 0x81, 0x88};
5936    uint32_t mask = 0;
5937
5938    clear_rx_filter_rules(_if);
5939
5940    if (set_on) {
5941        for (int rule_id = 0; rule_id < sizeof(eth_types); rule_id++) {
5942            mask |= 0x1 << rule_id;
5943            // Filter for byte 12 of packet
5944            _if->pci_reg_write( (E1000_FHFT(rule_id)+(1*16) + 4) ,  0x000000 | eth_types[rule_id]);
5945            _if->pci_reg_write( (E1000_FHFT(rule_id)+(1*16) + 8) , 0x10); /* MASK */
5946            // FLEX_PRIO[[18:16] = 1, RQUEUE[10:8] = 1, len = 24
5947            _if->pci_reg_write( (E1000_FHFT(rule_id) + 0xFC) , (1 << 16) | (1 << 8) | 24);
5948        }
5949    } else {
5950        configure_rx_filter_rules(_if);
5951    }
5952
5953    return 0;
5954}
5955
5956void CTRexExtendedDriverBase1G::get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats){
5957
5958    stats->ipackets     +=  _if->pci_reg_read(E1000_GPRC) ;
5959
5960    stats->ibytes       +=  (_if->pci_reg_read(E1000_GORCL) );
5961    stats->ibytes       +=  (((uint64_t)_if->pci_reg_read(E1000_GORCH))<<32);
5962
5963
5964    stats->opackets     +=  _if->pci_reg_read(E1000_GPTC);
5965    stats->obytes       +=  _if->pci_reg_read(E1000_GOTCL) ;
5966    stats->obytes       +=  ( (((uint64_t)_if->pci_reg_read(IXGBE_GOTCH))<<32) );
5967
5968    stats->f_ipackets   +=  0;
5969    stats->f_ibytes     += 0;
5970
5971
5972    stats->ierrors      +=  ( _if->pci_reg_read(E1000_RNBC) +
5973                              _if->pci_reg_read(E1000_CRCERRS) +
5974                              _if->pci_reg_read(E1000_ALGNERRC ) +
5975                              _if->pci_reg_read(E1000_SYMERRS ) +
5976                              _if->pci_reg_read(E1000_RXERRC ) +
5977
5978                              _if->pci_reg_read(E1000_ROC)+
5979                              _if->pci_reg_read(E1000_RUC)+
5980                              _if->pci_reg_read(E1000_RJC) +
5981
5982                              _if->pci_reg_read(E1000_XONRXC)+
5983                              _if->pci_reg_read(E1000_XONTXC)+
5984                              _if->pci_reg_read(E1000_XOFFRXC)+
5985                              _if->pci_reg_read(E1000_XOFFTXC)+
5986                              _if->pci_reg_read(E1000_FCRUC)
5987                              );
5988
5989    stats->oerrors      +=  0;
5990    stats->imcasts      =  0;
5991    stats->rx_nombuf    =  0;
5992}
5993
5994void CTRexExtendedDriverBase1G::clear_extended_stats(CPhyEthIF * _if){
5995}
5996
5997#if 0
5998int CTRexExtendedDriverBase1G::get_rx_stats(CPhyEthIF * _if, uint32_t *pkts, uint32_t *prev_pkts
5999                                            ,uint32_t *bytes, uint32_t *prev_bytes, int min, int max) {
6000    uint32_t port_id = _if->get_port_id();
6001    return g_trex.m_rx_sl.get_rx_stats(port_id, pkts, prev_pkts, bytes, prev_bytes, min, max);
6002}
6003#endif
6004
6005void CTRexExtendedDriverBase10G::clear_extended_stats(CPhyEthIF * _if){
6006    _if->pci_reg_read(IXGBE_RXNFGPC);
6007}
6008
6009void CTRexExtendedDriverBase10G::update_global_config_fdir(port_cfg_t * cfg){
6010    cfg->update_global_config_fdir_10g();
6011}