main_dpdk.cpp revision 7b8fd5a8
1/*
2  Hanoh Haim
3  Cisco Systems, Inc.
4*/
5
6/*
7  Copyright (c) 2015-2016 Cisco Systems, Inc.
8
9  Licensed under the Apache License, Version 2.0 (the "License");
10  you may not use this file except in compliance with the License.
11  You may obtain a copy of the License at
12
13  http://www.apache.org/licenses/LICENSE-2.0
14
15  Unless required by applicable law or agreed to in writing, software
16  distributed under the License is distributed on an "AS IS" BASIS,
17  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18  See the License for the specific language governing permissions and
19  limitations under the License.
20*/
21#include <assert.h>
22#include <pthread.h>
23#include <signal.h>
24#include <pwd.h>
25#include <stdio.h>
26#include <string.h>
27#include <unistd.h>
28#include <zmq.h>
29#include <rte_common.h>
30#include <rte_log.h>
31#include <rte_memory.h>
32#include <rte_memcpy.h>
33#include <rte_memzone.h>
34#include <rte_tailq.h>
35#include <rte_eal.h>
36#include <rte_per_lcore.h>
37#include <rte_launch.h>
38#include <rte_atomic.h>
39#include <rte_cycles.h>
40#include <rte_prefetch.h>
41#include <rte_lcore.h>
42#include <rte_per_lcore.h>
43#include <rte_branch_prediction.h>
44#include <rte_interrupts.h>
45#include <rte_pci.h>
46#include <rte_debug.h>
47#include <rte_ether.h>
48#include <rte_ethdev.h>
49#include <rte_ring.h>
50#include <rte_mempool.h>
51#include <rte_mbuf.h>
52#include <rte_random.h>
53#include <rte_version.h>
54#include <rte_ip.h>
55
56#include "bp_sim.h"
57#include "os_time.h"
58#include "common/arg/SimpleGlob.h"
59#include "common/arg/SimpleOpt.h"
60#include "common/basic_utils.h"
61#include "stateless/cp/trex_stateless.h"
62#include "stateless/dp/trex_stream_node.h"
63#include "stateless/messaging/trex_stateless_messaging.h"
64#include "stateless/rx/trex_stateless_rx_core.h"
65#include "publisher/trex_publisher.h"
66#include "../linux_dpdk/version.h"
67extern "C" {
68#include "dpdk/drivers/net/ixgbe/base/ixgbe_type.h"
69#include "dpdk_funcs.h"
70}
71#include "dpdk/drivers/net/e1000/base/e1000_regs.h"
72#include "global_io_mode.h"
73#include "utl_term_io.h"
74#include "msg_manager.h"
75#include "platform_cfg.h"
76#include "pre_test.h"
77#include "stateful_rx_core.h"
78#include "debug.h"
79#include "pkt_gen.h"
80#include "trex_port_attr.h"
81#include "internal_api/trex_platform_api.h"
82#include "main_dpdk.h"
83#include "trex_watchdog.h"
84
85#define RX_CHECK_MIX_SAMPLE_RATE 8
86#define RX_CHECK_MIX_SAMPLE_RATE_1G 2
87
88
89#define SOCKET0         0
90
91#define MAX_PKT_BURST   32
92
93#define BP_MAX_CORES 32
94#define BP_MAX_TX_QUEUE 16
95#define BP_MASTER_AND_LATENCY 2
96
97#define RTE_TEST_RX_DESC_DEFAULT 64
98#define RTE_TEST_RX_LATENCY_DESC_DEFAULT (1*1024)
99#define RTE_TEST_RX_DESC_DEFAULT_MLX 8
100
101#define RTE_TEST_RX_DESC_VM_DEFAULT 512
102#define RTE_TEST_TX_DESC_VM_DEFAULT 512
103
104typedef struct rte_mbuf * (*rte_mbuf_convert_to_one_seg_t)(struct rte_mbuf *m);
105struct rte_mbuf *  rte_mbuf_convert_to_one_seg(struct rte_mbuf *m);
106extern "C" int rte_eth_dev_get_port_by_addr(const struct rte_pci_addr *addr, uint8_t *port_id);
107void reorder_dpdk_ports();
108
109#define RTE_TEST_TX_DESC_DEFAULT 512
110#define RTE_TEST_RX_DESC_DROP    0
111
112static int max_stat_hw_id_seen = 0;
113static int max_stat_hw_id_seen_payload = 0;
114
115static inline int get_vm_one_queue_enable(){
116    return (CGlobalInfo::m_options.preview.get_vm_one_queue_enable() ?1:0);
117}
118
119static inline int get_is_rx_thread_enabled() {
120    return ((CGlobalInfo::m_options.is_rx_enabled() || CGlobalInfo::m_options.is_stateless()) ?1:0);
121}
122
123struct port_cfg_t;
124
125#define MAX_DPDK_ARGS 50
126static CPlatformYamlInfo global_platform_cfg_info;
127static int global_dpdk_args_num ;
128static char * global_dpdk_args[MAX_DPDK_ARGS];
129static char global_cores_str[100];
130static char global_prefix_str[100];
131static char global_loglevel_str[20];
132static char global_master_id_str[10];
133
134class CTRexExtendedDriverBase {
135public:
136
137    /* by default NIC driver adds CRC */
138    virtual bool has_crc_added() {
139        return true;
140    }
141
142    virtual int get_min_sample_rate(void)=0;
143    virtual void update_configuration(port_cfg_t * cfg)=0;
144    virtual void update_global_config_fdir(port_cfg_t * cfg)=0;
145
146    virtual bool is_hardware_filter_is_supported(){
147        return(false);
148    }
149    virtual int configure_rx_filter_rules(CPhyEthIF * _if)=0;
150    virtual int add_del_rx_flow_stat_rule(uint8_t port_id, enum rte_filter_op op, uint16_t l3, uint8_t l4
151                                          , uint8_t ipv6_next_h, uint16_t id) {return 0;}
152    virtual bool is_hardware_support_drop_queue(){
153        return(false);
154    }
155
156    virtual int stop_queue(CPhyEthIF * _if, uint16_t q_num);
157    virtual void get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats)=0;
158    virtual void clear_extended_stats(CPhyEthIF * _if)=0;
159    virtual int  wait_for_stable_link();
160    virtual void wait_after_link_up();
161    virtual bool hw_rx_stat_supported(){return false;}
162    virtual int get_rx_stats(CPhyEthIF * _if, uint32_t *pkts, uint32_t *prev_pkts, uint32_t *bytes, uint32_t *prev_bytes
163                             , int min, int max) {return -1;}
164    virtual void reset_rx_stats(CPhyEthIF * _if, uint32_t *stats, int min, int len) {}
165    virtual int dump_fdir_global_stats(CPhyEthIF * _if, FILE *fd) { return -1;}
166    virtual int get_stat_counters_num() {return 0;}
167    virtual int get_rx_stat_capabilities() {return 0;}
168    virtual int verify_fw_ver(int i) {return 0;}
169    virtual CFlowStatParser *get_flow_stat_parser();
170    virtual int set_rcv_all(CPhyEthIF * _if, bool set_on)=0;
171    virtual TRexPortAttr * create_port_attr(uint8_t port_id) = 0;
172    virtual uint8_t get_num_crc_fix_bytes() {return 0;}
173
174    /* Does this NIC type support automatic packet dropping in case of a link down?
175       in case it is supported the packets will be dropped, else there would be a back pressure to tx queues
176       this interface is used as a workaround to let TRex work without link in stateless mode, driver that
177       does not support that will be failed at init time because it will cause watchdog due to watchdog hang */
178    virtual bool drop_packets_incase_of_linkdown() {
179        return (false);
180    }
181
182    /* Mellanox ConnectX-4 can drop only 35MPPS per Rx queue. to workaround this issue we will create multi rx queue and enable RSS. for Queue1 we will disable  RSS
183       return  zero for disable patch and rx queues number for enable
184    */
185
186    virtual uint16_t enable_rss_drop_workaround(void) {
187        return (0);
188    }
189
190};
191
192
193class CTRexExtendedDriverBase1G : public CTRexExtendedDriverBase {
194
195public:
196    CTRexExtendedDriverBase1G(){
197    }
198
199    TRexPortAttr * create_port_attr(uint8_t port_id) {
200        return new DpdkTRexPortAttr(port_id, false, true);
201    }
202
203    static CTRexExtendedDriverBase * create(){
204        return ( new CTRexExtendedDriverBase1G() );
205    }
206
207    virtual void update_global_config_fdir(port_cfg_t * cfg);
208
209    virtual int get_min_sample_rate(void){
210        return ( RX_CHECK_MIX_SAMPLE_RATE_1G);
211    }
212    virtual void update_configuration(port_cfg_t * cfg);
213
214    virtual bool is_hardware_filter_is_supported(){
215        return (true);
216    }
217
218    virtual int stop_queue(CPhyEthIF * _if, uint16_t q_num);
219    virtual int configure_rx_filter_rules(CPhyEthIF * _if);
220    virtual int configure_rx_filter_rules_statefull(CPhyEthIF * _if);
221    virtual int configure_rx_filter_rules_stateless(CPhyEthIF * _if);
222    virtual void clear_rx_filter_rules(CPhyEthIF * _if);
223    virtual bool is_hardware_support_drop_queue(){
224        return(true);
225    }
226
227    virtual void get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats);
228    virtual void clear_extended_stats(CPhyEthIF * _if);
229    virtual int dump_fdir_global_stats(CPhyEthIF * _if, FILE *fd) {return 0;}
230    virtual int get_stat_counters_num() {return MAX_FLOW_STATS;}
231    virtual int get_rx_stat_capabilities() {
232        return TrexPlatformApi::IF_STAT_IPV4_ID | TrexPlatformApi::IF_STAT_RX_BYTES_COUNT
233            | TrexPlatformApi::IF_STAT_PAYLOAD;
234    }
235    virtual int wait_for_stable_link();
236    virtual void wait_after_link_up();
237    virtual int set_rcv_all(CPhyEthIF * _if, bool set_on);
238};
239
240class CTRexExtendedDriverBase1GVm : public CTRexExtendedDriverBase {
241
242public:
243    CTRexExtendedDriverBase1GVm(){
244        /* we are working in mode that we have 1 queue for rx and one queue for tx*/
245        CGlobalInfo::m_options.preview.set_vm_one_queue_enable(true);
246    }
247
248    TRexPortAttr * create_port_attr(uint8_t port_id) {
249        return new DpdkTRexPortAttr(port_id, true, true);
250    }
251
252    virtual bool has_crc_added() {
253        return false;
254    }
255
256    static CTRexExtendedDriverBase * create(){
257        return ( new CTRexExtendedDriverBase1GVm() );
258    }
259
260    virtual void update_global_config_fdir(port_cfg_t * cfg){
261
262    }
263
264    virtual int get_min_sample_rate(void){
265        return ( RX_CHECK_MIX_SAMPLE_RATE_1G);
266    }
267    virtual void update_configuration(port_cfg_t * cfg);
268
269    virtual bool is_hardware_filter_is_supported(){
270        return (true);
271    }
272
273    virtual int configure_rx_filter_rules(CPhyEthIF * _if);
274
275    virtual bool is_hardware_support_drop_queue(){
276        return(false);
277    }
278
279    virtual int stop_queue(CPhyEthIF * _if, uint16_t q_num);
280    virtual void get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats);
281    virtual void clear_extended_stats(CPhyEthIF * _if);
282    virtual int wait_for_stable_link();
283    virtual int get_stat_counters_num() {return MAX_FLOW_STATS;}
284    virtual int get_rx_stat_capabilities() {
285        return TrexPlatformApi::IF_STAT_IPV4_ID | TrexPlatformApi::IF_STAT_RX_BYTES_COUNT
286            | TrexPlatformApi::IF_STAT_PAYLOAD;
287    }
288    virtual int set_rcv_all(CPhyEthIF * _if, bool set_on) {return 0;}
289};
290
291class CTRexExtendedDriverBaseE1000 : public CTRexExtendedDriverBase1GVm {
292    CTRexExtendedDriverBaseE1000() {
293        // E1000 driver is only relevant in VM in our case
294        CGlobalInfo::m_options.preview.set_vm_one_queue_enable(true);
295    }
296public:
297    static CTRexExtendedDriverBase * create() {
298        return ( new CTRexExtendedDriverBaseE1000() );
299    }
300    // e1000 driver handing us packets with ethernet CRC, so we need to chop them
301    virtual uint8_t get_num_crc_fix_bytes() {return 4;}
302};
303
304class CTRexExtendedDriverBase10G : public CTRexExtendedDriverBase {
305public:
306    CTRexExtendedDriverBase10G(){
307    }
308
309    TRexPortAttr * create_port_attr(uint8_t port_id) {
310        return new DpdkTRexPortAttr(port_id, false, true);
311    }
312
313    static CTRexExtendedDriverBase * create(){
314        return ( new CTRexExtendedDriverBase10G() );
315    }
316
317    virtual void update_global_config_fdir(port_cfg_t * cfg);
318
319    virtual int get_min_sample_rate(void){
320        return (RX_CHECK_MIX_SAMPLE_RATE);
321    }
322    virtual void update_configuration(port_cfg_t * cfg);
323
324    virtual bool is_hardware_filter_is_supported(){
325        return (true);
326    }
327    virtual int configure_rx_filter_rules(CPhyEthIF * _if);
328    virtual int configure_rx_filter_rules_stateless(CPhyEthIF * _if);
329    virtual int configure_rx_filter_rules_statefull(CPhyEthIF * _if);
330    virtual bool is_hardware_support_drop_queue(){
331        return(true);
332    }
333    virtual void get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats);
334    virtual void clear_extended_stats(CPhyEthIF * _if);
335    virtual int wait_for_stable_link();
336    virtual int get_stat_counters_num() {return MAX_FLOW_STATS;}
337    virtual int get_rx_stat_capabilities() {
338        return TrexPlatformApi::IF_STAT_IPV4_ID | TrexPlatformApi::IF_STAT_RX_BYTES_COUNT
339            | TrexPlatformApi::IF_STAT_PAYLOAD;
340    }
341    virtual CFlowStatParser *get_flow_stat_parser();
342    int add_del_eth_filter(CPhyEthIF * _if, bool is_add, uint16_t ethertype);
343    virtual int set_rcv_all(CPhyEthIF * _if, bool set_on);
344};
345
346class CTRexExtendedDriverBase40G : public CTRexExtendedDriverBase {
347public:
348    CTRexExtendedDriverBase40G(){
349        // Since we support only 128 counters per if, it is OK to configure here 4 statically.
350        // If we want to support more counters in case of card having less interfaces, we
351        // Will have to identify the number of interfaces dynamically.
352        m_if_per_card = 4;
353    }
354
355    TRexPortAttr * create_port_attr(uint8_t port_id) {
356        // disabling flow control on 40G using DPDK API causes the interface to malfunction
357        return new DpdkTRexPortAttr(port_id, false, false);
358    }
359
360    static CTRexExtendedDriverBase * create(){
361        return ( new CTRexExtendedDriverBase40G() );
362    }
363
364    virtual void update_global_config_fdir(port_cfg_t * cfg){
365    }
366    virtual int get_min_sample_rate(void){
367        return (RX_CHECK_MIX_SAMPLE_RATE);
368    }
369    virtual void update_configuration(port_cfg_t * cfg);
370    virtual int configure_rx_filter_rules(CPhyEthIF * _if);
371    virtual int add_del_rx_flow_stat_rule(uint8_t port_id, enum rte_filter_op op, uint16_t l3_proto
372                                          , uint8_t l4_proto, uint8_t ipv6_next_h, uint16_t id);
373    virtual bool is_hardware_filter_is_supported(){
374        return (true);
375    }
376
377    virtual bool is_hardware_support_drop_queue(){
378        return(true);
379    }
380    virtual void get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats);
381    virtual void clear_extended_stats(CPhyEthIF * _if);
382    virtual void reset_rx_stats(CPhyEthIF * _if, uint32_t *stats, int min, int len);
383    virtual int get_rx_stats(CPhyEthIF * _if, uint32_t *pkts, uint32_t *prev_pkts, uint32_t *bytes, uint32_t *prev_bytes, int min, int max);
384    virtual int dump_fdir_global_stats(CPhyEthIF * _if, FILE *fd);
385    virtual int get_stat_counters_num() {return MAX_FLOW_STATS;}
386    virtual int get_rx_stat_capabilities() {
387        return TrexPlatformApi::IF_STAT_IPV4_ID | TrexPlatformApi::IF_STAT_PAYLOAD;
388    }
389    virtual int wait_for_stable_link();
390    virtual bool hw_rx_stat_supported(){return true;}
391    virtual int verify_fw_ver(int i);
392    virtual CFlowStatParser *get_flow_stat_parser();
393    virtual int set_rcv_all(CPhyEthIF * _if, bool set_on);
394
395private:
396    virtual void add_del_rules(enum rte_filter_op op, uint8_t port_id, uint16_t type, uint8_t ttl
397                               , uint16_t ip_id, uint8_t l4_proto, int queue, uint16_t stat_idx);
398    virtual int add_del_eth_type_rule(uint8_t port_id, enum rte_filter_op op, uint16_t eth_type);
399    virtual int configure_rx_filter_rules_statefull(CPhyEthIF * _if);
400
401    virtual bool drop_packets_incase_of_linkdown() {
402        return (true);
403    }
404
405private:
406    uint8_t m_if_per_card;
407};
408
409class CTRexExtendedDriverBaseVIC : public CTRexExtendedDriverBase {
410public:
411    CTRexExtendedDriverBaseVIC(){
412    }
413
414    TRexPortAttr * create_port_attr(uint8_t port_id) {
415        return new DpdkTRexPortAttr(port_id, false, false);
416    }
417
418    static CTRexExtendedDriverBase * create(){
419        return ( new CTRexExtendedDriverBaseVIC() );
420    }
421
422    virtual bool is_hardware_filter_is_supported(){
423        return (true);
424    }
425    virtual void update_global_config_fdir(port_cfg_t * cfg){
426    }
427
428
429    virtual bool is_hardware_support_drop_queue(){
430        return(true);
431    }
432
433    void clear_extended_stats(CPhyEthIF * _if);
434
435    void get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats);
436
437
438    virtual int get_min_sample_rate(void){
439        return (RX_CHECK_MIX_SAMPLE_RATE);
440    }
441
442    virtual int verify_fw_ver(int i);
443
444    virtual void update_configuration(port_cfg_t * cfg);
445
446    virtual int configure_rx_filter_rules(CPhyEthIF * _if);
447    virtual void reset_rx_stats(CPhyEthIF * _if, uint32_t *stats, int min, int len);
448    virtual int get_rx_stats(CPhyEthIF * _if, uint32_t *pkts, uint32_t *prev_pkts, uint32_t *bytes, uint32_t *prev_bytes, int min, int max);
449    virtual int get_stat_counters_num() {return MAX_FLOW_STATS;}
450    virtual int get_rx_stat_capabilities() {
451        return TrexPlatformApi::IF_STAT_IPV4_ID | TrexPlatformApi::IF_STAT_PAYLOAD;
452    }
453    virtual CFlowStatParser *get_flow_stat_parser();
454    virtual int dump_fdir_global_stats(CPhyEthIF * _if, FILE *fd);
455    virtual int set_rcv_all(CPhyEthIF * _if, bool set_on);
456
457private:
458
459    virtual void add_del_rules(enum rte_filter_op op, uint8_t port_id, uint16_t type, uint16_t id
460                               , uint8_t l4_proto, uint8_t tos, int queue);
461    virtual int add_del_eth_type_rule(uint8_t port_id, enum rte_filter_op op, uint16_t eth_type);
462    virtual int configure_rx_filter_rules_statefull(CPhyEthIF * _if);
463
464};
465
466
467class CTRexExtendedDriverBaseMlnx5G : public CTRexExtendedDriverBase {
468public:
469    CTRexExtendedDriverBaseMlnx5G(){
470    }
471
472    TRexPortAttr * create_port_attr(uint8_t port_id) {
473        // disabling flow control on 40G using DPDK API causes the interface to malfunction
474        return new DpdkTRexPortAttr(port_id, false, false);
475    }
476
477    static CTRexExtendedDriverBase * create(){
478        return ( new CTRexExtendedDriverBaseMlnx5G() );
479    }
480
481    virtual void update_global_config_fdir(port_cfg_t * cfg){
482    }
483
484    virtual int get_min_sample_rate(void){
485        return (RX_CHECK_MIX_SAMPLE_RATE);
486    }
487    virtual void update_configuration(port_cfg_t * cfg);
488
489    virtual int configure_rx_filter_rules(CPhyEthIF * _if);
490    virtual bool is_hardware_filter_is_supported(){
491        return (true);
492    }
493
494    virtual bool is_hardware_support_drop_queue(){
495        return(true);
496    }
497    virtual void get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats);
498    virtual void clear_extended_stats(CPhyEthIF * _if);
499    virtual void reset_rx_stats(CPhyEthIF * _if, uint32_t *stats, int min, int len);
500    virtual int get_rx_stats(CPhyEthIF * _if, uint32_t *pkts, uint32_t *prev_pkts, uint32_t *bytes, uint32_t *prev_bytes, int min, int max);
501    virtual int dump_fdir_global_stats(CPhyEthIF * _if, FILE *fd);
502    virtual int get_stat_counters_num() {return MAX_FLOW_STATS;}
503    virtual int get_rx_stat_capabilities() {
504        return TrexPlatformApi::IF_STAT_IPV4_ID | TrexPlatformApi::IF_STAT_PAYLOAD;
505    }
506    virtual int wait_for_stable_link();
507    // disabling flow control on 40G using DPDK API causes the interface to malfunction
508    virtual bool flow_control_disable_supported(){return false;}
509    virtual CFlowStatParser *get_flow_stat_parser();
510    virtual int set_rcv_all(CPhyEthIF * _if, bool set_on);
511
512    virtual uint16_t enable_rss_drop_workaround(void) {
513        return (5);
514    }
515
516private:
517    virtual void add_del_rules(enum rte_filter_op op, uint8_t port_id, uint16_t type, uint16_t ip_id, uint8_t l4_proto
518                               , int queue);
519    virtual int add_del_rx_filter_rules(CPhyEthIF * _if, bool set_on);
520};
521
522typedef CTRexExtendedDriverBase * (*create_object_t) (void);
523
524
525class CTRexExtendedDriverRec {
526public:
527    std::string         m_driver_name;
528    create_object_t     m_constructor;
529};
530
531class CTRexExtendedDriverDb {
532public:
533
534    const std::string & get_driver_name() {
535        return m_driver_name;
536    }
537
538    bool is_driver_exists(std::string name);
539
540
541
542    void set_driver_name(std::string name){
543        m_driver_was_set=true;
544        m_driver_name=name;
545        printf(" set driver name %s \n",name.c_str());
546        m_drv=create_driver(m_driver_name);
547        assert(m_drv);
548    }
549
550    CTRexExtendedDriverBase * get_drv(){
551        if (!m_driver_was_set) {
552            printf(" ERROR too early to use this object !\n");
553            printf(" need to set the right driver \n");
554            assert(0);
555        }
556        assert(m_drv);
557        return (m_drv);
558    }
559
560public:
561
562    static CTRexExtendedDriverDb * Ins();
563
564private:
565    CTRexExtendedDriverBase * create_driver(std::string name);
566
567    CTRexExtendedDriverDb(){
568        register_driver(std::string("rte_ixgbe_pmd"),CTRexExtendedDriverBase10G::create);
569        register_driver(std::string("rte_igb_pmd"),CTRexExtendedDriverBase1G::create);
570        register_driver(std::string("rte_i40e_pmd"),CTRexExtendedDriverBase40G::create);
571        register_driver(std::string("rte_enic_pmd"),CTRexExtendedDriverBaseVIC::create);
572        register_driver(std::string("librte_pmd_mlx5"),CTRexExtendedDriverBaseMlnx5G::create);
573
574
575        /* virtual devices */
576        register_driver(std::string("rte_em_pmd"),CTRexExtendedDriverBaseE1000::create);
577        register_driver(std::string("rte_vmxnet3_pmd"),CTRexExtendedDriverBase1GVm::create);
578        register_driver(std::string("rte_virtio_pmd"),CTRexExtendedDriverBase1GVm::create);
579
580
581
582
583        m_driver_was_set=false;
584        m_drv=0;
585        m_driver_name="";
586    }
587    void register_driver(std::string name,create_object_t func);
588    static CTRexExtendedDriverDb * m_ins;
589    bool        m_driver_was_set;
590    std::string m_driver_name;
591    CTRexExtendedDriverBase * m_drv;
592    std::vector <CTRexExtendedDriverRec*>     m_list;
593
594};
595
596CTRexExtendedDriverDb * CTRexExtendedDriverDb::m_ins;
597
598
599void CTRexExtendedDriverDb::register_driver(std::string name,
600                                            create_object_t func){
601    CTRexExtendedDriverRec * rec;
602    rec = new CTRexExtendedDriverRec();
603    rec->m_driver_name=name;
604    rec->m_constructor=func;
605    m_list.push_back(rec);
606}
607
608
609bool CTRexExtendedDriverDb::is_driver_exists(std::string name){
610    int i;
611    for (i=0; i<(int)m_list.size(); i++) {
612        if (m_list[i]->m_driver_name == name) {
613            return (true);
614        }
615    }
616    return (false);
617}
618
619
620CTRexExtendedDriverBase * CTRexExtendedDriverDb::create_driver(std::string name){
621    int i;
622    for (i=0; i<(int)m_list.size(); i++) {
623        if (m_list[i]->m_driver_name == name) {
624            return ( m_list[i]->m_constructor() );
625        }
626    }
627    return( (CTRexExtendedDriverBase *)0);
628}
629
630
631
632CTRexExtendedDriverDb * CTRexExtendedDriverDb::Ins(){
633    if (!m_ins) {
634        m_ins = new CTRexExtendedDriverDb();
635    }
636    return (m_ins);
637}
638
639static CTRexExtendedDriverBase *  get_ex_drv(){
640
641    return ( CTRexExtendedDriverDb::Ins()->get_drv());
642}
643
644static inline int get_min_sample_rate(void){
645    return ( get_ex_drv()->get_min_sample_rate());
646}
647
648// cores =0==1,1*2,2,3,4,5,6
649// An enum for all the option types
650enum { OPT_HELP,
651       OPT_MODE_BATCH,
652       OPT_MODE_INTERACTIVE,
653       OPT_NODE_DUMP,
654       OPT_DUMP_INTERFACES,
655       OPT_UT,
656       OPT_CORES,
657       OPT_SINGLE_CORE,
658       OPT_FLIP_CLIENT_SERVER,
659       OPT_FLOW_FLIP_CLIENT_SERVER,
660       OPT_FLOW_FLIP_CLIENT_SERVER_SIDE,
661       OPT_RATE_MULT,
662       OPT_DURATION,
663       OPT_PLATFORM_FACTOR,
664       OPT_PUB_DISABLE,
665       OPT_LIMT_NUM_OF_PORTS,
666       OPT_PLAT_CFG_FILE,
667       OPT_MBUF_FACTOR,
668       OPT_LATENCY,
669       OPT_NO_CLEAN_FLOW_CLOSE,
670       OPT_LATENCY_MASK,
671       OPT_ONLY_LATENCY,
672       OPT_LATENCY_PREVIEW ,
673       OPT_WAIT_BEFORE_TRAFFIC,
674       OPT_PCAP,
675       OPT_RX_CHECK,
676       OPT_IO_MODE,
677       OPT_IPV6,
678       OPT_LEARN,
679       OPT_LEARN_MODE,
680       OPT_LEARN_VERIFY,
681       OPT_L_PKT_MODE,
682       OPT_NO_FLOW_CONTROL,
683       OPT_VLAN,
684       OPT_RX_CHECK_HOPS,
685       OPT_CLIENT_CFG_FILE,
686       OPT_NO_KEYBOARD_INPUT,
687       OPT_VIRT_ONE_TX_RX_QUEUE,
688       OPT_PREFIX,
689       OPT_SEND_DEBUG_PKT,
690       OPT_NO_WATCHDOG,
691       OPT_ALLOW_COREDUMP,
692       OPT_CHECKSUM_OFFLOAD,
693       OPT_CLOSE,
694       OPT_ARP_REF_PER,
695       OPT_NO_OFED_CHECK,
696       OPT_ACTIVE_FLOW
697};
698
699/* these are the argument types:
700   SO_NONE --    no argument needed
701   SO_REQ_SEP -- single required argument
702   SO_MULTI --   multiple arguments needed
703*/
704static CSimpleOpt::SOption parser_options[] =
705    {
706        { OPT_HELP,                   "-?",                SO_NONE    },
707        { OPT_HELP,                   "-h",                SO_NONE    },
708        { OPT_HELP,                   "--help",            SO_NONE    },
709        { OPT_UT,                     "--ut",              SO_NONE    },
710        { OPT_MODE_BATCH,             "-f",                SO_REQ_SEP },
711        { OPT_MODE_INTERACTIVE,       "-i",                SO_NONE    },
712        { OPT_PLAT_CFG_FILE,          "--cfg",             SO_REQ_SEP },
713        { OPT_SINGLE_CORE,            "-s",                SO_NONE    },
714        { OPT_FLIP_CLIENT_SERVER,     "--flip",            SO_NONE    },
715        { OPT_FLOW_FLIP_CLIENT_SERVER,"-p",                SO_NONE    },
716        { OPT_FLOW_FLIP_CLIENT_SERVER_SIDE, "-e",          SO_NONE    },
717        { OPT_NO_CLEAN_FLOW_CLOSE,    "--nc",              SO_NONE    },
718        { OPT_LIMT_NUM_OF_PORTS,      "--limit-ports",     SO_REQ_SEP },
719        { OPT_CORES,                  "-c",                SO_REQ_SEP },
720        { OPT_NODE_DUMP,              "-v",                SO_REQ_SEP },
721        { OPT_DUMP_INTERFACES,        "--dump-interfaces", SO_MULTI   },
722        { OPT_LATENCY,                "-l",                SO_REQ_SEP },
723        { OPT_DURATION,               "-d",                SO_REQ_SEP },
724        { OPT_PLATFORM_FACTOR,        "-pm",               SO_REQ_SEP },
725        { OPT_PUB_DISABLE,            "-pubd",             SO_NONE    },
726        { OPT_RATE_MULT,              "-m",                SO_REQ_SEP },
727        { OPT_LATENCY_MASK,           "--lm",              SO_REQ_SEP },
728        { OPT_ONLY_LATENCY,           "--lo",              SO_NONE    },
729        { OPT_LATENCY_PREVIEW,        "-k",                SO_REQ_SEP },
730        { OPT_WAIT_BEFORE_TRAFFIC,    "-w",                SO_REQ_SEP },
731        { OPT_PCAP,                   "--pcap",            SO_NONE    },
732        { OPT_RX_CHECK,               "--rx-check",        SO_REQ_SEP },
733        { OPT_IO_MODE,                "--iom",             SO_REQ_SEP },
734        { OPT_RX_CHECK_HOPS,          "--hops",            SO_REQ_SEP },
735        { OPT_IPV6,                   "--ipv6",            SO_NONE    },
736        { OPT_LEARN,                  "--learn",           SO_NONE    },
737        { OPT_LEARN_MODE,             "--learn-mode",      SO_REQ_SEP },
738        { OPT_LEARN_VERIFY,           "--learn-verify",    SO_NONE    },
739        { OPT_L_PKT_MODE,             "--l-pkt-mode",      SO_REQ_SEP },
740        { OPT_NO_FLOW_CONTROL,        "--no-flow-control-change", SO_NONE },
741        { OPT_VLAN,                   "--vlan",            SO_NONE    },
742        { OPT_CLIENT_CFG_FILE,        "--client_cfg",      SO_REQ_SEP },
743        { OPT_CLIENT_CFG_FILE,        "--client-cfg",      SO_REQ_SEP },
744        { OPT_NO_KEYBOARD_INPUT,      "--no-key",          SO_NONE    },
745        { OPT_VIRT_ONE_TX_RX_QUEUE,   "--vm-sim",          SO_NONE    },
746        { OPT_PREFIX,                 "--prefix",          SO_REQ_SEP },
747        { OPT_SEND_DEBUG_PKT,         "--send-debug-pkt",  SO_REQ_SEP },
748        { OPT_MBUF_FACTOR,            "--mbuf-factor",     SO_REQ_SEP },
749        { OPT_NO_WATCHDOG,            "--no-watchdog",     SO_NONE    },
750        { OPT_ALLOW_COREDUMP,         "--allow-coredump",  SO_NONE    },
751        { OPT_CHECKSUM_OFFLOAD,       "--checksum-offload", SO_NONE   },
752        { OPT_ACTIVE_FLOW,            "--active-flows",   SO_REQ_SEP    },
753        { OPT_CLOSE,                  "--close-at-end",    SO_NONE    },
754        { OPT_ARP_REF_PER,            "--arp-refresh-period", SO_REQ_SEP },
755        { OPT_NO_OFED_CHECK,          "--no-ofed-check",   SO_NONE    },
756
757        SO_END_OF_OPTIONS
758    };
759
760static int usage(){
761
762    printf(" Usage: t-rex-64 [mode] <options>\n\n");
763    printf(" mode is one of:\n");
764    printf("   -f <file> : YAML file with traffic template configuration (Will run TRex in 'stateful' mode)\n");
765    printf("   -i        : Run TRex in 'stateless' mode\n");
766    printf("\n");
767
768    printf(" Available options are:\n");
769    printf(" --allow-coredump           : Allow creation of core dump \n");
770    printf(" --arp-refresh-period       : Period in seconds between sending of gratuitous ARP for our addresses. Value of 0 means 'never send' \n");
771    printf(" -c <num>>                  : Number of hardware threads to allocate for each port pair. Overrides the 'c' argument from config file \n");
772    printf(" --cfg <file>               : Use file as TRex config file instead of the default /etc/trex_cfg.yaml \n");
773    printf(" --checksum-offload         : Enable IP, TCP and UDP tx checksum offloading, using DPDK. This requires all used interfaces to support this \n");
774    printf(" --client_cfg <file>        : YAML file describing clients configuration \n");
775    printf(" --close-at-end             : Call rte_eth_dev_stop and close at exit. Calling these functions caused link down issues in older versions, \n");
776    printf("                               so we do not call them by default for now. Leaving this as option in case someone thinks it is helpful for him \n");
777    printf("                               This it temporary option. Will be removed in the future \n");
778    printf(" -d                         : Duration of the test in sec (default is 3600). Look also at --nc \n");
779    printf(" -e                         : Like -p but src/dst IP will be chosen according to the port (i.e. on client port send all packets with client src and server dest, and vice versa on server port \n");
780    printf(" --flip                     : Each flow will be sent both from client to server and server to client. This can acheive better port utilization when flow traffic is asymmetric \n");
781    printf(" --hops <hops>              : If rx check is enabled, the hop number can be assigned. See manual for details \n");
782    printf(" --iom  <mode>              : IO mode  for server output [0- silent, 1- normal , 2- short] \n");
783    printf(" --ipv6                     : Work in ipv6 mode \n");
784    printf(" -k  <num>                  : Run 'warm up' traffic for num seconds before starting the test. \n");
785    printf(" -l <rate>                  : In parallel to the test, run latency check, sending packets at rate/sec from each interface \n");
786    printf(" --l-pkt-mode <0-3>         : Set mode for sending latency packets \n");
787    printf("      0 (default)    send SCTP packets  \n");
788    printf("      1              Send ICMP request packets  \n");
789    printf("      2              Send ICMP requests from client side, and response from server side (for working with firewall) \n");
790    printf("      3              Send ICMP requests with sequence ID 0 from both sides \n");
791    printf("    Rate of zero means no latency check \n");
792    printf(" --learn (deprecated). Replaced by --learn-mode. To get older behaviour, use --learn-mode 2 \n");
793    printf(" --learn-mode [1-3]         : Used for working in NAT environments. Dynamically learn the NAT translation done by the DUT \n");
794    printf("      1    In case of TCP flow, use TCP ACK in first SYN to pass NAT translation information. Initial SYN packet must be first packet in the TCP flow \n");
795    printf("           In case of UDP stream, NAT translation information will pass in IP ID field of first packet in flow. This means that this field is changed by TRex\n");
796    printf("      2    Add special IP option to pass NAT translation information to first packet of each flow. Will not work on certain firewalls if they drop packets with IP options \n");
797    printf("      3    Like 1, but without support for sequence number randomization in server->clien direction. Performance (flow/second) better than 1 \n");
798    printf(" --learn-verify             : Test the NAT translation mechanism. Should be used when there is no NAT in the setup \n");
799    printf(" --limit-ports              : Limit number of ports used. Must be even number (TRex always uses port pairs) \n");
800    printf(" --lm                       : Hex mask of cores that should send traffic \n");
801    printf("    For example: Value of 0x5 will cause only ports 0 and 2 to send traffic \n");
802    printf(" --lo                       : Only run latency test \n");
803    printf(" -m <num>                   : Rate multiplier.  Multiply basic rate of templates by this number \n");
804    printf(" --mbuf-factor              : Factor for packet memory \n");
805    printf(" --nc                       : If set, will not wait for all flows to be closed, before terminating - see manual for more information \n");
806    printf(" --no-flow-control-change   : By default TRex disables flow-control. If this option is given, it does not touch it \n");
807    printf(" --no-key                   : Daemon mode, don't get input from keyboard \n");
808    printf(" --no-ofed-check            : Disable the check of OFED version \n");
809    printf(" --no-watchdog              : Disable watchdog \n");
810    printf(" -p                         : Send all flow packets from the same interface (choosed randomly between client ad server ports) without changing their src/dst IP \n");
811    printf(" -pm                        : Platform factor. If you have splitter in the setup, you can multiply the total results by this factor \n");
812    printf("    e.g --pm 2.0 will multiply all the results bps in this factor \n");
813    printf(" --prefix <nam>             : For running multi TRex instances on the same machine. Each instance should have different name \n");
814    printf(" -pubd                      : Disable monitors publishers \n");
815    printf(" --rx-check  <rate>         : Enable rx check. TRex will sample flows at 1/rate and check order, latency and more \n");
816    printf(" -s                         : Single core. Run only one data path core. For debug \n");
817    printf(" --send-debug-pkt <proto>   : Do not run traffic generator. Just send debug packet and dump receive queues \n");
818    printf("    Supported protocols are 1 for icmp, 2 for UDP, 3 for TCP, 4 for ARP, 5 for 9K UDP \n");
819    printf(" -v <verbosity level>       : The higher the value, print more debug information \n");
820    printf(" --vlan                     : Relevant only for stateless mode with Intel 82599 10G NIC \n");
821    printf("                              When configuring flow stat and latency per stream rules, assume all streams uses VLAN \n");
822    printf(" --vm-sim                   : Simulate vm with driver of one input queue and one output queue \n");
823    printf(" -w  <num>                  : Wait num seconds between init of interfaces and sending traffic, default is 1 \n");
824
825    printf(" --active-flows             : An experimental switch to scale up or down the number of active flows.  \n");
826    printf("                              It is not accurate due to the quantization of flow scheduler and in some case does not work. \n");
827    printf("                              Example --active-flows 500000 wil set the ballpark of the active flow to be ~0.5M \n");
828
829    printf("\n");
830    printf(" Examples: ");
831    printf(" basic trex run for 20 sec and multiplier of 10 \n");
832    printf("  t-rex-64 -f cap2/dns.yaml -m 10 -d 20 \n");
833    printf("\n\n");
834    printf(" Copyright (c) 2015-2016 Cisco Systems, Inc.    \n");
835    printf("                                                                  \n");
836    printf(" Licensed under the Apache License, Version 2.0 (the 'License') \n");
837    printf(" you may not use this file except in compliance with the License. \n");
838    printf(" You may obtain a copy of the License at                          \n");
839    printf("                                                                  \n");
840    printf("    http://www.apache.org/licenses/LICENSE-2.0                    \n");
841    printf("                                                                  \n");
842    printf(" Unless required by applicable law or agreed to in writing, software \n");
843    printf(" distributed under the License is distributed on an \"AS IS\" BASIS,   \n");
844    printf(" WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. \n");
845    printf(" See the License for the specific language governing permissions and      \n");
846    printf(" limitations under the License.                                           \n");
847    printf(" \n");
848    printf(" Open Source Components / Libraries \n");
849    printf(" DPDK       (BSD)       \n");
850    printf(" YAML-CPP   (BSD)       \n");
851    printf(" JSONCPP    (MIT)       \n");
852    printf(" \n");
853    printf(" Open Source Binaries \n");
854    printf(" ZMQ        (LGPL v3plus) \n");
855    printf(" \n");
856    printf(" Version : %s   \n",VERSION_BUILD_NUM);
857    printf(" DPDK version : %s   \n",rte_version());
858    printf(" User    : %s   \n",VERSION_USER);
859    printf(" Date    : %s , %s \n",get_build_date(),get_build_time());
860    printf(" Uuid    : %s    \n",VERSION_UIID);
861    printf(" Git SHA : %s    \n",VERSION_GIT_SHA);
862    return (0);
863}
864
865
866int gtest_main(int argc, char **argv) ;
867
868static void parse_err(const std::string &msg) {
869    std::cout << "\nArgument Parsing Error: \n\n" << "*** "<< msg << "\n\n";
870    exit(-1);
871}
872
873static int parse_options(int argc, char *argv[], CParserOption* po, bool first_time ) {
874    CSimpleOpt args(argc, argv, parser_options);
875
876    bool latency_was_set=false;
877    (void)latency_was_set;
878    char ** rgpszArg = NULL;
879    bool opt_vlan_was_set = false;
880
881    int a=0;
882    int node_dump=0;
883
884    po->preview.setFileWrite(true);
885    po->preview.setRealTime(true);
886    uint32_t tmp_data;
887    float tmp_double;
888
889    po->m_run_mode = CParserOption::RUN_MODE_INVALID;
890
891    while ( args.Next() ){
892        if (args.LastError() == SO_SUCCESS) {
893            switch (args.OptionId()) {
894
895            case OPT_UT :
896                parse_err("Supported only in simulation");
897                break;
898
899            case OPT_HELP:
900                usage();
901                return -1;
902
903            case OPT_MODE_BATCH:
904                if (po->m_run_mode != CParserOption::RUN_MODE_INVALID) {
905                    parse_err("Please specify single run mode");
906                }
907                po->m_run_mode = CParserOption::RUN_MODE_BATCH;
908                po->cfg_file = args.OptionArg();
909                break;
910
911            case OPT_MODE_INTERACTIVE:
912                if (po->m_run_mode != CParserOption::RUN_MODE_INVALID) {
913                    parse_err("Please specify single run mode");
914                }
915                po->m_run_mode = CParserOption::RUN_MODE_INTERACTIVE;
916                break;
917
918            case OPT_NO_KEYBOARD_INPUT  :
919                po->preview.set_no_keyboard(true);
920                break;
921
922            case OPT_CLIENT_CFG_FILE :
923                po->client_cfg_file = args.OptionArg();
924                break;
925
926            case OPT_PLAT_CFG_FILE :
927                po->platform_cfg_file = args.OptionArg();
928                break;
929
930            case OPT_SINGLE_CORE :
931                po->preview.setSingleCore(true);
932                break;
933
934            case OPT_IPV6:
935                po->preview.set_ipv6_mode_enable(true);
936                break;
937
938
939            case OPT_LEARN :
940                po->m_learn_mode = CParserOption::LEARN_MODE_IP_OPTION;
941                break;
942
943            case OPT_LEARN_MODE :
944                sscanf(args.OptionArg(),"%d", &tmp_data);
945                if (! po->is_valid_opt_val(tmp_data, CParserOption::LEARN_MODE_DISABLED, CParserOption::LEARN_MODE_MAX, "--learn-mode")) {
946                    exit(-1);
947                }
948                po->m_learn_mode = (uint8_t)tmp_data;
949                break;
950
951            case OPT_LEARN_VERIFY :
952                // must configure learn_mode for learn verify to work. If different learn mode will be given later, it will be set instead.
953                if (po->m_learn_mode == 0) {
954                    po->m_learn_mode = CParserOption::LEARN_MODE_IP_OPTION;
955                }
956                po->preview.set_learn_and_verify_mode_enable(true);
957                break;
958
959            case OPT_L_PKT_MODE :
960                sscanf(args.OptionArg(),"%d", &tmp_data);
961                if (! po->is_valid_opt_val(tmp_data, 0, L_PKT_SUBMODE_0_SEQ, "--l-pkt-mode")) {
962                    exit(-1);
963                }
964                po->m_l_pkt_mode=(uint8_t)tmp_data;
965                break;
966
967            case OPT_NO_FLOW_CONTROL:
968                po->preview.set_disable_flow_control_setting(true);
969                break;
970            case OPT_VLAN:
971                opt_vlan_was_set = true;
972                break;
973            case OPT_LIMT_NUM_OF_PORTS :
974                po->m_expected_portd =atoi(args.OptionArg());
975                break;
976            case  OPT_CORES  :
977                po->preview.setCores(atoi(args.OptionArg()));
978                break;
979            case OPT_FLIP_CLIENT_SERVER :
980                po->preview.setClientServerFlip(true);
981                break;
982            case OPT_NO_CLEAN_FLOW_CLOSE :
983                po->preview.setNoCleanFlowClose(true);
984                break;
985            case OPT_FLOW_FLIP_CLIENT_SERVER :
986                po->preview.setClientServerFlowFlip(true);
987                break;
988            case OPT_FLOW_FLIP_CLIENT_SERVER_SIDE:
989                po->preview.setClientServerFlowFlipAddr(true);
990                break;
991            case OPT_NODE_DUMP:
992                a=atoi(args.OptionArg());
993                node_dump=1;
994                po->preview.setFileWrite(false);
995                break;
996            case OPT_DUMP_INTERFACES:
997                if (first_time) {
998                    rgpszArg = args.MultiArg(1);
999                    while (rgpszArg != NULL) {
1000                        po->dump_interfaces.push_back(rgpszArg[0]);
1001                        rgpszArg = args.MultiArg(1);
1002                    }
1003                }
1004                if (po->m_run_mode != CParserOption::RUN_MODE_INVALID) {
1005                    parse_err("Please specify single run mode (-i for stateless, or -f <file> for stateful");
1006                }
1007                po->m_run_mode = CParserOption::RUN_MODE_DUMP_INFO;
1008                break;
1009            case OPT_MBUF_FACTOR:
1010                sscanf(args.OptionArg(),"%f", &po->m_mbuf_factor);
1011                break;
1012            case OPT_RATE_MULT :
1013                sscanf(args.OptionArg(),"%f", &po->m_factor);
1014                break;
1015            case OPT_DURATION :
1016                sscanf(args.OptionArg(),"%f", &po->m_duration);
1017                break;
1018            case OPT_PUB_DISABLE:
1019                po->preview.set_zmq_publish_enable(false);
1020                break;
1021            case OPT_PLATFORM_FACTOR:
1022                sscanf(args.OptionArg(),"%f", &po->m_platform_factor);
1023                break;
1024            case OPT_LATENCY :
1025                latency_was_set=true;
1026                sscanf(args.OptionArg(),"%d", &po->m_latency_rate);
1027                break;
1028            case OPT_LATENCY_MASK :
1029                sscanf(args.OptionArg(),"%x", &po->m_latency_mask);
1030                break;
1031            case OPT_ONLY_LATENCY :
1032                po->preview.setOnlyLatency(true);
1033                break;
1034            case OPT_NO_WATCHDOG :
1035                po->preview.setWDDisable(true);
1036                break;
1037            case OPT_ALLOW_COREDUMP :
1038                po->preview.setCoreDumpEnable(true);
1039                break;
1040            case  OPT_LATENCY_PREVIEW :
1041                sscanf(args.OptionArg(),"%d", &po->m_latency_prev);
1042                break;
1043            case  OPT_WAIT_BEFORE_TRAFFIC :
1044                sscanf(args.OptionArg(),"%d", &po->m_wait_before_traffic);
1045                break;
1046            case OPT_PCAP:
1047                po->preview.set_pcap_mode_enable(true);
1048                break;
1049            case OPT_ACTIVE_FLOW:
1050                sscanf(args.OptionArg(),"%f", &tmp_double);
1051                po->m_active_flows=(uint32_t)tmp_double;
1052                break;
1053            case OPT_RX_CHECK :
1054                sscanf(args.OptionArg(),"%d", &tmp_data);
1055                po->m_rx_check_sample=(uint16_t)tmp_data;
1056                po->preview.set_rx_check_enable(true);
1057                break;
1058            case OPT_RX_CHECK_HOPS :
1059                sscanf(args.OptionArg(),"%d", &tmp_data);
1060                po->m_rx_check_hops = (uint16_t)tmp_data;
1061                break;
1062            case OPT_IO_MODE :
1063                sscanf(args.OptionArg(),"%d", &tmp_data);
1064                po->m_io_mode=(uint16_t)tmp_data;
1065                break;
1066
1067            case OPT_VIRT_ONE_TX_RX_QUEUE:
1068                po->preview.set_vm_one_queue_enable(true);
1069                break;
1070
1071            case OPT_PREFIX:
1072                po->prefix = args.OptionArg();
1073                break;
1074
1075            case OPT_SEND_DEBUG_PKT:
1076                sscanf(args.OptionArg(),"%d", &tmp_data);
1077                po->m_debug_pkt_proto = (uint8_t)tmp_data;
1078                break;
1079
1080            case OPT_CHECKSUM_OFFLOAD:
1081                po->preview.setChecksumOffloadEnable(true);
1082                break;
1083
1084            case OPT_CLOSE:
1085                po->preview.setCloseEnable(true);
1086                break;
1087            case  OPT_ARP_REF_PER:
1088                sscanf(args.OptionArg(),"%d", &tmp_data);
1089                po->m_arp_ref_per=(uint16_t)tmp_data;
1090                break;
1091            case OPT_NO_OFED_CHECK:
1092                break;
1093
1094            default:
1095                printf("Error: option %s is not handled.\n\n", args.OptionText());
1096                usage();
1097                return -1;
1098                break;
1099            } // End of switch
1100        }// End of IF
1101        else {
1102            if (args.LastError() == SO_OPT_INVALID) {
1103                printf("Error: option %s is not recognized.\n\n", args.OptionText());
1104            } else if (args.LastError() == SO_ARG_MISSING) {
1105                printf("Error: option %s is expected to have argument.\n\n", args.OptionText());
1106            }
1107            usage();
1108            return -1;
1109        }
1110    } // End of while
1111
1112
1113    if ((po->m_run_mode ==  CParserOption::RUN_MODE_INVALID) ) {
1114        parse_err("Please provide single run mode. -f <file> for stateful or -i for stateless (interactive)");
1115    }
1116
1117    if (CGlobalInfo::is_learn_mode() && po->preview.get_ipv6_mode_enable()) {
1118        parse_err("--learn mode is not supported with --ipv6, beacuse there is no such thing as NAT66 (ipv6 to ipv6 translation) \n" \
1119                  "If you think it is important, please open a defect or write to TRex mailing list\n");
1120    }
1121
1122    if (po->preview.get_is_rx_check_enable() ||  po->is_latency_enabled() || CGlobalInfo::is_learn_mode()
1123        || (CGlobalInfo::m_options.m_arp_ref_per != 0) || get_vm_one_queue_enable()) {
1124        po->set_rx_enabled();
1125    }
1126
1127    if ( node_dump ){
1128        po->preview.setVMode(a);
1129    }
1130
1131    /* if we have a platform factor we need to devided by it so we can still work with normalized yaml profile  */
1132    po->m_factor = po->m_factor/po->m_platform_factor;
1133
1134    uint32_t cores=po->preview.getCores();
1135    if ( cores > ((BP_MAX_CORES)/2-1) ) {
1136        fprintf(stderr, " Error: maximum supported core number is: %d \n",((BP_MAX_CORES)/2-1));
1137        return -1;
1138    }
1139
1140
1141    if ( first_time ){
1142        /* only first time read the configuration file */
1143        if ( po->platform_cfg_file.length() >0  ) {
1144            if ( node_dump ){
1145                printf("Using configuration file %s \n",po->platform_cfg_file.c_str());
1146            }
1147            global_platform_cfg_info.load_from_yaml_file(po->platform_cfg_file);
1148            if ( node_dump ){
1149                global_platform_cfg_info.Dump(stdout);
1150            }
1151        }else{
1152            if ( utl_is_file_exists("/etc/trex_cfg.yaml") ){
1153                if ( node_dump ){
1154                    printf("Using configuration file /etc/trex_cfg.yaml \n");
1155                }
1156                global_platform_cfg_info.load_from_yaml_file("/etc/trex_cfg.yaml");
1157                if ( node_dump ){
1158                    global_platform_cfg_info.Dump(stdout);
1159                }
1160            }
1161        }
1162    }
1163
1164    if ( get_is_stateless() ) {
1165        if ( opt_vlan_was_set ) {
1166            po->preview.set_vlan_mode_enable(true);
1167        }
1168        if (CGlobalInfo::m_options.client_cfg_file != "") {
1169            parse_err("Client config file is not supported with interactive (stateless) mode ");
1170        }
1171        if ( po->m_duration ) {
1172            parse_err("Duration is not supported with interactive (stateless) mode ");
1173        }
1174
1175        if ( po->preview.get_is_rx_check_enable() ) {
1176            parse_err("Rx check is not supported with interactive (stateless) mode ");
1177        }
1178
1179        if  ( (po->is_latency_enabled()) || (po->preview.getOnlyLatency()) ){
1180            parse_err("Latency check is not supported with interactive (stateless) mode ");
1181        }
1182
1183        if ( po->preview.getSingleCore() ){
1184            parse_err("Single core is not supported with interactive (stateless) mode ");
1185        }
1186
1187    } else {
1188        if ( !po->m_duration ) {
1189            po->m_duration = 3600.0;
1190        }
1191        if ( global_platform_cfg_info.m_tw.m_info_exist ){
1192
1193            CTimerWheelYamlInfo *lp=&global_platform_cfg_info.m_tw;
1194            std::string  err;
1195            if (!lp->Verify(err)){
1196                parse_err(err);
1197            }
1198
1199            po->set_tw_bucket_time_in_usec(lp->m_bucket_time_usec);
1200            po->set_tw_buckets(lp->m_buckets);
1201            po->set_tw_levels(lp->m_levels);
1202        }
1203    }
1204    return 0;
1205}
1206
1207static int parse_options_wrapper(int argc, char *argv[], CParserOption* po, bool first_time ) {
1208    // copy, as arg parser sometimes changes the argv
1209    char ** argv_copy = (char **) malloc(sizeof(char *) * argc);
1210    for(int i=0; i<argc; i++) {
1211        argv_copy[i] = strdup(argv[i]);
1212    }
1213    int ret = parse_options(argc, argv_copy, po, first_time);
1214
1215    // free
1216    for(int i=0; i<argc; i++) {
1217        free(argv_copy[i]);
1218    }
1219    free(argv_copy);
1220    return ret;
1221}
1222
1223int main_test(int argc , char * argv[]);
1224
1225
1226#define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
1227#define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
1228#define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */
1229
1230/*
1231 * These default values are optimized for use with the Intel(R) 82599 10 GbE
1232 * Controller and the DPDK ixgbe PMD. Consider using other values for other
1233 * network controllers and/or network drivers.
1234 */
1235#define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */
1236#define TX_HTHRESH 0  /**< Default values of TX host threshold reg. */
1237#define TX_WTHRESH 0  /**< Default values of TX write-back threshold reg. */
1238
1239#define TX_WTHRESH_1G 1  /**< Default values of TX write-back threshold reg. */
1240#define TX_PTHRESH_1G 1 /**< Default values of TX prefetch threshold reg. */
1241
1242
1243struct port_cfg_t {
1244public:
1245    port_cfg_t(){
1246        memset(&m_port_conf,0,sizeof(m_port_conf));
1247        memset(&m_rx_conf,0,sizeof(m_rx_conf));
1248        memset(&m_tx_conf,0,sizeof(m_tx_conf));
1249        memset(&m_rx_drop_conf,0,sizeof(m_rx_drop_conf));
1250
1251        m_rx_conf.rx_thresh.pthresh = RX_PTHRESH;
1252        m_rx_conf.rx_thresh.hthresh = RX_HTHRESH;
1253        m_rx_conf.rx_thresh.wthresh = RX_WTHRESH;
1254        m_rx_conf.rx_free_thresh =32;
1255
1256        m_rx_drop_conf.rx_thresh.pthresh = 0;
1257        m_rx_drop_conf.rx_thresh.hthresh = 0;
1258        m_rx_drop_conf.rx_thresh.wthresh = 0;
1259        m_rx_drop_conf.rx_free_thresh =32;
1260        m_rx_drop_conf.rx_drop_en=1;
1261
1262        m_tx_conf.tx_thresh.pthresh = TX_PTHRESH;
1263        m_tx_conf.tx_thresh.hthresh = TX_HTHRESH;
1264        m_tx_conf.tx_thresh.wthresh = TX_WTHRESH;
1265
1266        m_port_conf.rxmode.jumbo_frame=1;
1267        m_port_conf.rxmode.max_rx_pkt_len =9*1024+22;
1268        m_port_conf.rxmode.hw_strip_crc=1;
1269    }
1270
1271
1272
1273    inline void update_var(void){
1274        get_ex_drv()->update_configuration(this);
1275    }
1276
1277    inline void update_global_config_fdir(void){
1278        get_ex_drv()->update_global_config_fdir(this);
1279    }
1280
1281    /* enable FDIR */
1282    inline void update_global_config_fdir_10g(void){
1283        m_port_conf.fdir_conf.mode=RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1284        m_port_conf.fdir_conf.pballoc=RTE_FDIR_PBALLOC_64K;
1285        m_port_conf.fdir_conf.status=RTE_FDIR_NO_REPORT_STATUS;
1286        /* Offset of flexbytes field in RX packets (in 16-bit word units). */
1287        /* Note: divide by 2 to convert byte offset to word offset */
1288        if (get_is_stateless()) {
1289            m_port_conf.fdir_conf.flexbytes_offset = (14+4)/2;
1290            /* Increment offset 4 bytes for the case where we add VLAN */
1291            if (  CGlobalInfo::m_options.preview.get_vlan_mode_enable() ) {
1292                m_port_conf.fdir_conf.flexbytes_offset += (4/2);
1293            }
1294        } else {
1295            if (  CGlobalInfo::m_options.preview.get_ipv6_mode_enable() ) {
1296                m_port_conf.fdir_conf.flexbytes_offset = (14+6)/2;
1297            } else {
1298                m_port_conf.fdir_conf.flexbytes_offset = (14+8)/2;
1299            }
1300
1301            /* Increment offset 4 bytes for the case where we add VLAN */
1302            if (  CGlobalInfo::m_options.preview.get_vlan_mode_enable() ) {
1303                m_port_conf.fdir_conf.flexbytes_offset += (4/2);
1304            }
1305        }
1306        m_port_conf.fdir_conf.drop_queue=1;
1307    }
1308
1309    inline void update_global_config_fdir_40g(void){
1310        m_port_conf.fdir_conf.mode=RTE_FDIR_MODE_PERFECT;
1311        m_port_conf.fdir_conf.pballoc=RTE_FDIR_PBALLOC_64K;
1312        m_port_conf.fdir_conf.status=RTE_FDIR_NO_REPORT_STATUS;
1313    }
1314
1315    struct rte_eth_conf     m_port_conf;
1316    struct rte_eth_rxconf   m_rx_conf;
1317    struct rte_eth_rxconf   m_rx_drop_conf;
1318    struct rte_eth_txconf   m_tx_conf;
1319};
1320
1321
1322/* this object is per core / per port / per queue
1323   each core will have 2 ports to send to
1324
1325
1326   port0                                port1
1327
1328   0,1,2,3,..15 out queue ( per core )       0,1,2,3,..15 out queue ( per core )
1329
1330*/
1331
1332
1333typedef struct cnt_name_ {
1334    uint32_t offset;
1335    char * name;
1336}cnt_name_t ;
1337
1338#define MY_REG(a) {a,(char *)#a}
1339
1340void CPhyEthIFStats::Clear() {
1341    ipackets = 0;
1342    ibytes = 0;
1343    f_ipackets = 0;
1344    f_ibytes = 0;
1345    opackets = 0;
1346    obytes = 0;
1347    ierrors = 0;
1348    oerrors = 0;
1349    imcasts = 0;
1350    rx_nombuf = 0;
1351    memset(&m_prev_stats, 0, sizeof(m_prev_stats));
1352    memset(m_rx_per_flow_pkts, 0, sizeof(m_rx_per_flow_pkts));
1353    memset(m_rx_per_flow_bytes, 0, sizeof(m_rx_per_flow_bytes));
1354}
1355
1356// dump all counters (even ones that equal 0)
1357void CPhyEthIFStats::DumpAll(FILE *fd) {
1358#define DP_A4(f) printf(" %-40s : %llu \n",#f, (unsigned long long)f)
1359#define DP_A(f) if (f) printf(" %-40s : %llu \n",#f, (unsigned long long)f)
1360    DP_A4(opackets);
1361    DP_A4(obytes);
1362    DP_A4(ipackets);
1363    DP_A4(ibytes);
1364    DP_A(ierrors);
1365    DP_A(oerrors);
1366}
1367
1368// dump all non zero counters
1369void CPhyEthIFStats::Dump(FILE *fd) {
1370    DP_A(opackets);
1371    DP_A(obytes);
1372    DP_A(f_ipackets);
1373    DP_A(f_ibytes);
1374    DP_A(ipackets);
1375    DP_A(ibytes);
1376    DP_A(ierrors);
1377    DP_A(oerrors);
1378    DP_A(imcasts);
1379    DP_A(rx_nombuf);
1380}
1381
1382void CPhyEthIgnoreStats::dump(FILE *fd) {
1383    DP_A4(opackets);
1384    DP_A4(obytes);
1385    DP_A4(ipackets);
1386    DP_A4(ibytes);
1387    DP_A4(m_tx_arp);
1388    DP_A4(m_rx_arp);
1389}
1390
1391// Clear the RX queue of an interface, dropping all packets
1392void CPhyEthIF::flush_rx_queue(void){
1393
1394    rte_mbuf_t * rx_pkts[32];
1395    int j=0;
1396    uint16_t cnt=0;
1397
1398    while (true) {
1399        j++;
1400        cnt = rx_burst(m_rx_queue,rx_pkts,32);
1401        if ( cnt ) {
1402            int i;
1403            for (i=0; i<(int)cnt;i++) {
1404                rte_mbuf_t * m=rx_pkts[i];
1405                /*printf("rx--\n");
1406                  rte_pktmbuf_dump(stdout,m, rte_pktmbuf_pkt_len(m));*/
1407                rte_pktmbuf_free(m);
1408            }
1409        }
1410        if ( ((cnt==0) && (j>10)) || (j>15) ) {
1411            break;
1412        }
1413    }
1414    if (cnt>0) {
1415        printf(" Warning can't flush rx-queue for port %d \n",(int)get_port_id());
1416    }
1417}
1418
1419
1420void CPhyEthIF::dump_stats_extended(FILE *fd){
1421
1422    cnt_name_t reg[]={
1423        MY_REG(IXGBE_GPTC), /* total packet */
1424        MY_REG(IXGBE_GOTCL), /* total bytes */
1425        MY_REG(IXGBE_GOTCH),
1426
1427        MY_REG(IXGBE_GPRC),
1428        MY_REG(IXGBE_GORCL),
1429        MY_REG(IXGBE_GORCH),
1430
1431
1432
1433        MY_REG(IXGBE_RXNFGPC),
1434        MY_REG(IXGBE_RXNFGBCL),
1435        MY_REG(IXGBE_RXNFGBCH),
1436        MY_REG(IXGBE_RXDGPC  ),
1437        MY_REG(IXGBE_RXDGBCL ),
1438        MY_REG(IXGBE_RXDGBCH  ),
1439        MY_REG(IXGBE_RXDDGPC ),
1440        MY_REG(IXGBE_RXDDGBCL ),
1441        MY_REG(IXGBE_RXDDGBCH  ),
1442        MY_REG(IXGBE_RXLPBKGPC ),
1443        MY_REG(IXGBE_RXLPBKGBCL),
1444        MY_REG(IXGBE_RXLPBKGBCH ),
1445        MY_REG(IXGBE_RXDLPBKGPC ),
1446        MY_REG(IXGBE_RXDLPBKGBCL),
1447        MY_REG(IXGBE_RXDLPBKGBCH ),
1448        MY_REG(IXGBE_TXDGPC      ),
1449        MY_REG(IXGBE_TXDGBCL     ),
1450        MY_REG(IXGBE_TXDGBCH     ),
1451        MY_REG(IXGBE_FDIRUSTAT ),
1452        MY_REG(IXGBE_FDIRFSTAT ),
1453        MY_REG(IXGBE_FDIRMATCH ),
1454        MY_REG(IXGBE_FDIRMISS )
1455
1456    };
1457    fprintf (fd," extended counters \n");
1458    int i;
1459    for (i=0; i<sizeof(reg)/sizeof(reg[0]); i++) {
1460        cnt_name_t *lp=&reg[i];
1461        uint32_t c=pci_reg_read(lp->offset);
1462        // xl710 bug. Counter values are -559038737 when they should be 0
1463        if (c && c != -559038737 ) {
1464            fprintf (fd," %s  : %d \n",lp->name,c);
1465        }
1466    }
1467}
1468
1469int CPhyEthIF::get_rx_stat_capabilities() {
1470    return get_ex_drv()->get_rx_stat_capabilities();
1471}
1472
1473
1474
1475void CPhyEthIF::configure(uint16_t nb_rx_queue,
1476                          uint16_t nb_tx_queue,
1477                          const struct rte_eth_conf *eth_conf){
1478    int ret;
1479    ret = rte_eth_dev_configure(m_port_id,
1480                                nb_rx_queue,
1481                                nb_tx_queue,
1482                                eth_conf);
1483
1484    if (ret < 0)
1485        rte_exit(EXIT_FAILURE, "Cannot configure device: "
1486                 "err=%d, port=%u\n",
1487                 ret, m_port_id);
1488
1489    /* get device info */
1490    rte_eth_dev_info_get(m_port_id, &m_dev_info);
1491
1492    if (CGlobalInfo::m_options.preview.getChecksumOffloadEnable()) {
1493        /* check if the device supports TCP and UDP checksum offloading */
1494        if ((m_dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) == 0) {
1495            rte_exit(EXIT_FAILURE, "Device does not support UDP checksum offload: "
1496                     "port=%u\n",
1497                     m_port_id);
1498        }
1499        if ((m_dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) == 0) {
1500            rte_exit(EXIT_FAILURE, "Device does not support TCP checksum offload: "
1501                     "port=%u\n",
1502                     m_port_id);
1503        }
1504    }
1505}
1506
1507
1508/*
1509
1510  rx-queue 0 - default- all traffic not goint to queue 1
1511  will be drop as queue is disable
1512
1513
1514  rx-queue 1 - Latency measurement packets will go here
1515
1516  pci_reg_write(IXGBE_L34T_IMIR(0),(1<<21));
1517
1518*/
1519
1520void CPhyEthIF::configure_rx_duplicate_rules(){
1521
1522    if ( get_is_rx_filter_enable() ){
1523
1524        if ( get_ex_drv()->is_hardware_filter_is_supported()==false ){
1525            printf(" ERROR this feature is not supported with current hardware \n");
1526            exit(1);
1527        }
1528        get_ex_drv()->configure_rx_filter_rules(this);
1529    }
1530}
1531
1532
1533void CPhyEthIF::stop_rx_drop_queue() {
1534    // In debug mode, we want to see all packets. Don't want to disable any queue.
1535    if ( get_vm_one_queue_enable() || (CGlobalInfo::m_options.m_debug_pkt_proto != 0)) {
1536        return;
1537    }
1538    if ( CGlobalInfo::m_options.is_rx_enabled() ) {
1539        if ( (!get_ex_drv()->is_hardware_support_drop_queue())  ) {
1540            printf(" ERROR latency feature is not supported with current hardware  \n");
1541            exit(1);
1542        }
1543    }
1544    get_ex_drv()->stop_queue(this, MAIN_DPDK_DATA_Q);
1545}
1546
1547
1548void CPhyEthIF::rx_queue_setup(uint16_t rx_queue_id,
1549                               uint16_t nb_rx_desc,
1550                               unsigned int socket_id,
1551                               const struct rte_eth_rxconf *rx_conf,
1552                               struct rte_mempool *mb_pool){
1553
1554    int ret = rte_eth_rx_queue_setup(m_port_id , rx_queue_id,
1555                                     nb_rx_desc,
1556                                     socket_id,
1557                                     rx_conf,
1558                                     mb_pool);
1559    if (ret < 0)
1560        rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup: "
1561                 "err=%d, port=%u\n",
1562                 ret, m_port_id);
1563}
1564
1565
1566
1567void CPhyEthIF::tx_queue_setup(uint16_t tx_queue_id,
1568                               uint16_t nb_tx_desc,
1569                               unsigned int socket_id,
1570                               const struct rte_eth_txconf *tx_conf){
1571
1572    int ret = rte_eth_tx_queue_setup( m_port_id,
1573                                      tx_queue_id,
1574                                      nb_tx_desc,
1575                                      socket_id,
1576                                      tx_conf);
1577    if (ret < 0)
1578        rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: "
1579                 "err=%d, port=%u queue=%u\n",
1580                 ret, m_port_id, tx_queue_id);
1581
1582}
1583
1584void CPhyEthIF::stop(){
1585    if (CGlobalInfo::m_options.preview.getCloseEnable()) {
1586        rte_eth_dev_stop(m_port_id);
1587        rte_eth_dev_close(m_port_id);
1588    }
1589}
1590
1591void CPhyEthIF::start(){
1592
1593    get_ex_drv()->clear_extended_stats(this);
1594
1595    int ret;
1596
1597    m_bw_tx.reset();
1598    m_bw_rx.reset();
1599
1600    m_stats.Clear();
1601    int i;
1602    for (i=0;i<10; i++ ) {
1603        ret = rte_eth_dev_start(m_port_id);
1604        if (ret==0) {
1605            return;
1606        }
1607        delay(1000);
1608    }
1609    if (ret < 0)
1610        rte_exit(EXIT_FAILURE, "rte_eth_dev_start: "
1611                 "err=%d, port=%u\n",
1612                 ret, m_port_id);
1613
1614}
1615
1616// Disabling flow control on interface
1617void CPhyEthIF::disable_flow_control(){
1618    int ret;
1619    // see trex-64 issue with loopback on the same NIC
1620    struct rte_eth_fc_conf fc_conf;
1621    memset(&fc_conf,0,sizeof(fc_conf));
1622    fc_conf.mode=RTE_FC_NONE;
1623    fc_conf.autoneg=1;
1624    fc_conf.pause_time=100;
1625    int i;
1626    for (i=0; i<5; i++) {
1627        ret=rte_eth_dev_flow_ctrl_set(m_port_id,&fc_conf);
1628        if (ret==0) {
1629            break;
1630        }
1631        delay(1000);
1632    }
1633    if (ret < 0)
1634        rte_exit(EXIT_FAILURE, "rte_eth_dev_flow_ctrl_set: "
1635                 "err=%d, port=%u\n probably link is down. Please check your link activity, or skip flow-control disabling, using: --no-flow-control-change option\n",
1636                 ret, m_port_id);
1637}
1638
1639/*
1640Get user frienly devices description from saved env. var
1641Changes certain attributes based on description
1642*/
1643void DpdkTRexPortAttr::update_description(){
1644    struct rte_pci_addr pci_addr;
1645    char pci[16];
1646    char * envvar;
1647    std::string pci_envvar_name;
1648    pci_addr = rte_eth_devices[m_port_id].pci_dev->addr;
1649    snprintf(pci, sizeof(pci), "%04x:%02x:%02x.%d", pci_addr.domain, pci_addr.bus, pci_addr.devid, pci_addr.function);
1650    intf_info_st.pci_addr = pci;
1651    pci_envvar_name = "pci" + intf_info_st.pci_addr;
1652    std::replace(pci_envvar_name.begin(), pci_envvar_name.end(), ':', '_');
1653    std::replace(pci_envvar_name.begin(), pci_envvar_name.end(), '.', '_');
1654    envvar = std::getenv(pci_envvar_name.c_str());
1655    if (envvar) {
1656        intf_info_st.description = envvar;
1657    } else {
1658        intf_info_st.description = "Unknown";
1659    }
1660    if (intf_info_st.description.find("82599ES") != std::string::npos) { // works for 82599EB etc. DPDK does not distinguish them
1661        flag_is_link_change_supported = false;
1662    }
1663    if (intf_info_st.description.find("82545EM") != std::string::npos) { // in virtual E1000, DPDK claims fc is supported, but it's not
1664        flag_is_fc_change_supported = false;
1665        flag_is_led_change_supported = false;
1666    }
1667    if ( CGlobalInfo::m_options.preview.getVMode() > 0){
1668        printf("port %d desc: %s\n", m_port_id, intf_info_st.description.c_str());
1669    }
1670}
1671
1672int DpdkTRexPortAttr::set_led(bool on){
1673    if (on) {
1674        return rte_eth_led_on(m_port_id);
1675    }else{
1676        return rte_eth_led_off(m_port_id);
1677    }
1678}
1679
1680int DpdkTRexPortAttr::get_flow_ctrl(int &mode) {
1681    int ret = rte_eth_dev_flow_ctrl_get(m_port_id, &fc_conf_tmp);
1682    if (ret) {
1683        mode = -1;
1684        return ret;
1685    }
1686    mode = (int) fc_conf_tmp.mode;
1687    return 0;
1688}
1689
1690int DpdkTRexPortAttr::set_flow_ctrl(int mode) {
1691    if (!flag_is_fc_change_supported) {
1692        return -ENOTSUP;
1693    }
1694    int ret = rte_eth_dev_flow_ctrl_get(m_port_id, &fc_conf_tmp);
1695    if (ret) {
1696        return ret;
1697    }
1698    fc_conf_tmp.mode = (enum rte_eth_fc_mode) mode;
1699    return rte_eth_dev_flow_ctrl_set(m_port_id, &fc_conf_tmp);
1700}
1701
1702void DpdkTRexPortAttr::reset_xstats() {
1703    rte_eth_xstats_reset(m_port_id);
1704}
1705
1706int DpdkTRexPortAttr::get_xstats_values(xstats_values_t &xstats_values) {
1707    int size = rte_eth_xstats_get(m_port_id, NULL, 0);
1708    if (size < 0) {
1709        return size;
1710    }
1711    xstats_values_tmp.resize(size);
1712    xstats_values.resize(size);
1713    size = rte_eth_xstats_get(m_port_id, xstats_values_tmp.data(), size);
1714    if (size < 0) {
1715        return size;
1716    }
1717    for (int i=0; i<size; i++) {
1718        xstats_values[xstats_values_tmp[i].id] = xstats_values_tmp[i].value;
1719    }
1720    return 0;
1721}
1722
1723int DpdkTRexPortAttr::get_xstats_names(xstats_names_t &xstats_names){
1724    int size = rte_eth_xstats_get_names(m_port_id, NULL, 0);
1725    if (size < 0) {
1726        return size;
1727    }
1728    xstats_names_tmp.resize(size);
1729    xstats_names.resize(size);
1730    size = rte_eth_xstats_get_names(m_port_id, xstats_names_tmp.data(), size);
1731    if (size < 0) {
1732        return size;
1733    }
1734    for (int i=0; i<size; i++) {
1735        xstats_names[i] = xstats_names_tmp[i].name;
1736    }
1737    return 0;
1738}
1739
1740void DpdkTRexPortAttr::dump_link(FILE *fd){
1741    fprintf(fd,"port : %d \n",(int)m_port_id);
1742    fprintf(fd,"------------\n");
1743
1744    fprintf(fd,"link         : ");
1745    if (m_link.link_status) {
1746        fprintf(fd," link : Link Up - speed %u Mbps - %s\n",
1747                (unsigned) m_link.link_speed,
1748                (m_link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1749                ("full-duplex") : ("half-duplex\n"));
1750    } else {
1751        fprintf(fd," Link Down\n");
1752    }
1753    fprintf(fd,"promiscuous  : %d \n",get_promiscuous());
1754}
1755
1756void DpdkTRexPortAttr::update_device_info(){
1757    rte_eth_dev_info_get(m_port_id, &dev_info);
1758}
1759
1760void DpdkTRexPortAttr::get_supported_speeds(supp_speeds_t &supp_speeds){
1761    uint32_t speed_capa = dev_info.speed_capa;
1762    if (speed_capa & ETH_LINK_SPEED_1G)
1763        supp_speeds.push_back(ETH_SPEED_NUM_1G);
1764    if (speed_capa & ETH_LINK_SPEED_10G)
1765        supp_speeds.push_back(ETH_SPEED_NUM_10G);
1766    if (speed_capa & ETH_LINK_SPEED_40G)
1767        supp_speeds.push_back(ETH_SPEED_NUM_40G);
1768    if (speed_capa & ETH_LINK_SPEED_100G)
1769        supp_speeds.push_back(ETH_SPEED_NUM_100G);
1770}
1771
1772void DpdkTRexPortAttr::update_link_status(){
1773    rte_eth_link_get(m_port_id, &m_link);
1774}
1775
1776bool DpdkTRexPortAttr::update_link_status_nowait(){
1777    rte_eth_link new_link;
1778    bool changed = false;
1779    rte_eth_link_get_nowait(m_port_id, &new_link);
1780
1781    if (new_link.link_speed != m_link.link_speed ||
1782                new_link.link_duplex != m_link.link_duplex ||
1783                    new_link.link_autoneg != m_link.link_autoneg ||
1784                        new_link.link_status != m_link.link_status) {
1785        changed = true;
1786
1787        /* in case of link status change - notify the dest object */
1788        if (new_link.link_status != m_link.link_status) {
1789            on_link_down();
1790        }
1791    }
1792
1793    m_link = new_link;
1794    return changed;
1795}
1796
1797int DpdkTRexPortAttr::add_mac(char * mac){
1798    struct ether_addr mac_addr;
1799    for (int i=0; i<6;i++) {
1800        mac_addr.addr_bytes[i] =mac[i];
1801    }
1802    return rte_eth_dev_mac_addr_add(m_port_id, &mac_addr,0);
1803}
1804
1805int DpdkTRexPortAttr::set_promiscuous(bool enable){
1806    if (enable) {
1807        rte_eth_promiscuous_enable(m_port_id);
1808    }else{
1809        rte_eth_promiscuous_disable(m_port_id);
1810    }
1811    return 0;
1812}
1813
1814int DpdkTRexPortAttr::set_link_up(bool up){
1815    if (up) {
1816        return rte_eth_dev_set_link_up(m_port_id);
1817    }else{
1818        return rte_eth_dev_set_link_down(m_port_id);
1819    }
1820}
1821
1822bool DpdkTRexPortAttr::get_promiscuous(){
1823    int ret=rte_eth_promiscuous_get(m_port_id);
1824    if (ret<0) {
1825        rte_exit(EXIT_FAILURE, "rte_eth_promiscuous_get: "
1826                 "err=%d, port=%u\n",
1827                 ret, m_port_id);
1828
1829    }
1830    return ( ret?true:false);
1831}
1832
1833
1834void DpdkTRexPortAttr::get_hw_src_mac(struct ether_addr *mac_addr){
1835    rte_eth_macaddr_get(m_port_id , mac_addr);
1836}
1837
1838int CPhyEthIF::dump_fdir_global_stats(FILE *fd) {
1839    return get_ex_drv()->dump_fdir_global_stats(this, fd);
1840}
1841
1842void dump_hw_state(FILE *fd,struct ixgbe_hw_stats *hs ){
1843
1844#define DP_A1(f) if (hs->f) fprintf(fd," %-40s : %llu \n",#f, (unsigned long long)hs->f)
1845#define DP_A2(f,m) for (i=0;i<m; i++) { if (hs->f[i]) fprintf(fd," %-40s[%d] : %llu \n",#f,i, (unsigned long long)hs->f[i]); }
1846    int i;
1847
1848    //for (i=0;i<8; i++) { if (hs->mpc[i]) fprintf(fd," %-40s[%d] : %llu \n","mpc",i,hs->mpc[i]); }
1849    DP_A2(mpc,8);
1850    DP_A1(crcerrs);
1851    DP_A1(illerrc);
1852    //DP_A1(errbc);
1853    DP_A1(mspdc);
1854    DP_A1(mpctotal);
1855    DP_A1(mlfc);
1856    DP_A1(mrfc);
1857    DP_A1(rlec);
1858    //DP_A1(lxontxc);
1859    //DP_A1(lxonrxc);
1860    //DP_A1(lxofftxc);
1861    //DP_A1(lxoffrxc);
1862    //DP_A2(pxontxc,8);
1863    //DP_A2(pxonrxc,8);
1864    //DP_A2(pxofftxc,8);
1865    //DP_A2(pxoffrxc,8);
1866
1867    //DP_A1(prc64);
1868    //DP_A1(prc127);
1869    //DP_A1(prc255);
1870    // DP_A1(prc511);
1871    //DP_A1(prc1023);
1872    //DP_A1(prc1522);
1873
1874    DP_A1(gprc);
1875    DP_A1(bprc);
1876    DP_A1(mprc);
1877    DP_A1(gptc);
1878    DP_A1(gorc);
1879    DP_A1(gotc);
1880    DP_A2(rnbc,8);
1881    DP_A1(ruc);
1882    DP_A1(rfc);
1883    DP_A1(roc);
1884    DP_A1(rjc);
1885    DP_A1(mngprc);
1886    DP_A1(mngpdc);
1887    DP_A1(mngptc);
1888    DP_A1(tor);
1889    DP_A1(tpr);
1890    DP_A1(tpt);
1891    DP_A1(ptc64);
1892    DP_A1(ptc127);
1893    DP_A1(ptc255);
1894    DP_A1(ptc511);
1895    DP_A1(ptc1023);
1896    DP_A1(ptc1522);
1897    DP_A1(mptc);
1898    DP_A1(bptc);
1899    DP_A1(xec);
1900    DP_A2(qprc,16);
1901    DP_A2(qptc,16);
1902    DP_A2(qbrc,16);
1903    DP_A2(qbtc,16);
1904    DP_A2(qprdc,16);
1905    DP_A2(pxon2offc,8);
1906    DP_A1(fdirustat_add);
1907    DP_A1(fdirustat_remove);
1908    DP_A1(fdirfstat_fadd);
1909    DP_A1(fdirfstat_fremove);
1910    DP_A1(fdirmatch);
1911    DP_A1(fdirmiss);
1912    DP_A1(fccrc);
1913    DP_A1(fclast);
1914    DP_A1(fcoerpdc);
1915    DP_A1(fcoeprc);
1916    DP_A1(fcoeptc);
1917    DP_A1(fcoedwrc);
1918    DP_A1(fcoedwtc);
1919    DP_A1(fcoe_noddp);
1920    DP_A1(fcoe_noddp_ext_buff);
1921    DP_A1(ldpcec);
1922    DP_A1(pcrc8ec);
1923    DP_A1(b2ospc);
1924    DP_A1(b2ogprc);
1925    DP_A1(o2bgptc);
1926    DP_A1(o2bspc);
1927}
1928
1929void CPhyEthIF::set_ignore_stats_base(CPreTestStats &pre_stats) {
1930    // reading m_stats, so drivers saving prev in m_stats will be updated.
1931    // Actually, we want m_stats to be cleared
1932    get_ex_drv()->get_extended_stats(this, &m_stats);
1933
1934    m_ignore_stats.ipackets = m_stats.ipackets;
1935    m_ignore_stats.ibytes = m_stats.ibytes;
1936    m_ignore_stats.opackets = m_stats.opackets;
1937    m_ignore_stats.obytes = m_stats.obytes;
1938    m_stats.ipackets = 0;
1939    m_stats.opackets = 0;
1940    m_stats.ibytes = 0;
1941    m_stats.obytes = 0;
1942
1943    m_ignore_stats.m_tx_arp = pre_stats.m_tx_arp;
1944    m_ignore_stats.m_rx_arp = pre_stats.m_rx_arp;
1945
1946    if (CGlobalInfo::m_options.preview.getVMode() >= 3) {
1947        fprintf(stdout, "Pre test statistics for port %d\n", get_port_id());
1948        m_ignore_stats.dump(stdout);
1949    }
1950}
1951
1952void CPhyEthIF::dump_stats(FILE *fd){
1953
1954    update_counters();
1955
1956    fprintf(fd,"port : %d \n",(int)m_port_id);
1957    fprintf(fd,"------------\n");
1958    m_stats.DumpAll(fd);
1959    //m_stats.Dump(fd);
1960    printf (" Tx : %.1fMb/sec  \n",m_last_tx_rate);
1961    //printf (" Rx : %.1fMb/sec  \n",m_last_rx_rate);
1962}
1963
1964void CPhyEthIF::stats_clear(){
1965    rte_eth_stats_reset(m_port_id);
1966    m_stats.Clear();
1967}
1968
1969class CCorePerPort  {
1970public:
1971    CCorePerPort (){
1972        m_tx_queue_id=0;
1973        m_len=0;
1974        int i;
1975        for (i=0; i<MAX_PKT_BURST; i++) {
1976            m_table[i]=0;
1977        }
1978        m_port=0;
1979    }
1980    uint8_t                 m_tx_queue_id;
1981    uint8_t                 m_tx_queue_id_lat; // q id for tx of latency pkts
1982    uint16_t                m_len;
1983    rte_mbuf_t *            m_table[MAX_PKT_BURST];
1984    CPhyEthIF  *            m_port;
1985};
1986
1987
1988#define MAX_MBUF_CACHE 100
1989
1990
1991/* per core/gbe queue port for trasmitt */
1992class CCoreEthIF : public CVirtualIF {
1993public:
1994    enum {
1995     INVALID_Q_ID = 255
1996    };
1997
1998public:
1999
2000    CCoreEthIF(){
2001        m_mbuf_cache=0;
2002    }
2003
2004    bool Create(uint8_t             core_id,
2005                uint8_t            tx_client_queue_id,
2006                CPhyEthIF  *        tx_client_port,
2007                uint8_t            tx_server_queue_id,
2008                CPhyEthIF  *        tx_server_port,
2009                uint8_t             tx_q_id_lat);
2010    void Delete();
2011
2012    virtual int open_file(std::string file_name){
2013        return (0);
2014    }
2015
2016    virtual int close_file(void){
2017        return (flush_tx_queue());
2018    }
2019    __attribute__ ((noinline)) int send_node_flow_stat(rte_mbuf *m, CGenNodeStateless * node_sl
2020                                                       , CCorePerPort *  lp_port
2021                                                       , CVirtualIFPerSideStats  * lp_stats, bool is_const);
2022    virtual int send_node(CGenNode * node);
2023    virtual void send_one_pkt(pkt_dir_t dir, rte_mbuf_t *m);
2024    virtual int flush_tx_queue(void);
2025    __attribute__ ((noinline)) void handle_slowpath_features(CGenNode *node, rte_mbuf_t *m, uint8_t *p, pkt_dir_t dir);
2026
2027    void apply_client_cfg(const ClientCfgBase *cfg, rte_mbuf_t *m, pkt_dir_t dir, uint8_t *p);
2028
2029    bool process_rx_pkt(pkt_dir_t   dir,rte_mbuf_t * m);
2030
2031    virtual int update_mac_addr_from_global_cfg(pkt_dir_t       dir, uint8_t * p);
2032
2033    virtual pkt_dir_t port_id_to_dir(uint8_t port_id);
2034    void GetCoreCounters(CVirtualIFPerSideStats *stats);
2035    void DumpCoreStats(FILE *fd);
2036    void DumpIfStats(FILE *fd);
2037    static void DumpIfCfgHeader(FILE *fd);
2038    void DumpIfCfg(FILE *fd);
2039
2040    socket_id_t get_socket_id(){
2041        return ( CGlobalInfo::m_socket.port_to_socket( m_ports[0].m_port->get_port_id() ) );
2042    }
2043
2044    const CCorePerPort * get_ports() {
2045        return m_ports;
2046    }
2047
2048protected:
2049
2050    int send_burst(CCorePerPort * lp_port,
2051                   uint16_t len,
2052                   CVirtualIFPerSideStats  * lp_stats);
2053    int send_pkt(CCorePerPort * lp_port,
2054                 rte_mbuf_t *m,
2055                 CVirtualIFPerSideStats  * lp_stats);
2056    int send_pkt_lat(CCorePerPort * lp_port,
2057                 rte_mbuf_t *m,
2058                 CVirtualIFPerSideStats  * lp_stats);
2059
2060    void add_vlan(rte_mbuf_t *m, uint16_t vlan_id);
2061
2062protected:
2063    uint8_t      m_core_id;
2064    uint16_t     m_mbuf_cache;
2065    CCorePerPort m_ports[CS_NUM]; /* each core has 2 tx queues 1. client side and server side */
2066    CNodeRing *  m_ring_to_rx;
2067
2068} __rte_cache_aligned; ;
2069
2070class CCoreEthIFStateless : public CCoreEthIF {
2071public:
2072    virtual int send_node_flow_stat(rte_mbuf *m, CGenNodeStateless * node_sl, CCorePerPort *  lp_port
2073                                    , CVirtualIFPerSideStats  * lp_stats, bool is_const);
2074    virtual int send_node(CGenNode * node);
2075protected:
2076    int handle_slow_path_node(CGenNode *node);
2077    int send_pcap_node(CGenNodePCAP *pcap_node);
2078};
2079
2080bool CCoreEthIF::Create(uint8_t             core_id,
2081                        uint8_t             tx_client_queue_id,
2082                        CPhyEthIF  *        tx_client_port,
2083                        uint8_t             tx_server_queue_id,
2084                        CPhyEthIF  *        tx_server_port,
2085                        uint8_t tx_q_id_lat ) {
2086    m_ports[CLIENT_SIDE].m_tx_queue_id = tx_client_queue_id;
2087    m_ports[CLIENT_SIDE].m_port        = tx_client_port;
2088    m_ports[CLIENT_SIDE].m_tx_queue_id_lat = tx_q_id_lat;
2089    m_ports[SERVER_SIDE].m_tx_queue_id = tx_server_queue_id;
2090    m_ports[SERVER_SIDE].m_port        = tx_server_port;
2091    m_ports[SERVER_SIDE].m_tx_queue_id_lat = tx_q_id_lat;
2092    m_core_id = core_id;
2093
2094    CMessagingManager * rx_dp=CMsgIns::Ins()->getRxDp();
2095    m_ring_to_rx = rx_dp->getRingDpToCp(core_id-1);
2096    assert( m_ring_to_rx);
2097    return (true);
2098}
2099
2100int CCoreEthIF::flush_tx_queue(void){
2101    /* flush both sides */
2102    pkt_dir_t dir;
2103    for (dir = CLIENT_SIDE; dir < CS_NUM; dir++) {
2104        CCorePerPort * lp_port = &m_ports[dir];
2105        CVirtualIFPerSideStats  * lp_stats = &m_stats[dir];
2106        if ( likely(lp_port->m_len > 0) ) {
2107            send_burst(lp_port, lp_port->m_len, lp_stats);
2108            lp_port->m_len = 0;
2109        }
2110    }
2111
2112    return 0;
2113}
2114
2115void CCoreEthIF::GetCoreCounters(CVirtualIFPerSideStats *stats){
2116    stats->Clear();
2117    pkt_dir_t   dir ;
2118    for (dir=CLIENT_SIDE; dir<CS_NUM; dir++) {
2119        stats->Add(&m_stats[dir]);
2120    }
2121}
2122
2123void CCoreEthIF::DumpCoreStats(FILE *fd){
2124    fprintf (fd,"------------------------ \n");
2125    fprintf (fd," per core stats core id : %d  \n",m_core_id);
2126    fprintf (fd,"------------------------ \n");
2127
2128    CVirtualIFPerSideStats stats;
2129    GetCoreCounters(&stats);
2130    stats.Dump(stdout);
2131}
2132
2133void CCoreEthIF::DumpIfCfgHeader(FILE *fd){
2134    fprintf (fd," core, c-port, c-queue, s-port, s-queue, lat-queue\n");
2135    fprintf (fd," ------------------------------------------\n");
2136}
2137
2138void CCoreEthIF::DumpIfCfg(FILE *fd){
2139    fprintf (fd," %d   %6u %6u  %6u  %6u %6u  \n",m_core_id,
2140             m_ports[CLIENT_SIDE].m_port->get_port_id(),
2141             m_ports[CLIENT_SIDE].m_tx_queue_id,
2142             m_ports[SERVER_SIDE].m_port->get_port_id(),
2143             m_ports[SERVER_SIDE].m_tx_queue_id,
2144             m_ports[SERVER_SIDE].m_tx_queue_id_lat
2145             );
2146}
2147
2148
2149void CCoreEthIF::DumpIfStats(FILE *fd){
2150
2151    fprintf (fd,"------------------------ \n");
2152    fprintf (fd," per core per if stats id : %d  \n",m_core_id);
2153    fprintf (fd,"------------------------ \n");
2154
2155    const char * t[]={"client","server"};
2156    pkt_dir_t   dir ;
2157    for (dir=CLIENT_SIDE; dir<CS_NUM; dir++) {
2158        CCorePerPort * lp=&m_ports[dir];
2159        CVirtualIFPerSideStats * lpstats = &m_stats[dir];
2160        fprintf (fd," port %d, queue id :%d  - %s \n",lp->m_port->get_port_id(),lp->m_tx_queue_id,t[dir] );
2161        fprintf (fd," ---------------------------- \n");
2162        lpstats->Dump(fd);
2163    }
2164}
2165
2166#define DELAY_IF_NEEDED
2167
2168int CCoreEthIF::send_burst(CCorePerPort * lp_port,
2169                           uint16_t len,
2170                           CVirtualIFPerSideStats  * lp_stats){
2171
2172    uint16_t ret = lp_port->m_port->tx_burst(lp_port->m_tx_queue_id,lp_port->m_table,len);
2173#ifdef DELAY_IF_NEEDED
2174    while ( unlikely( ret<len ) ){
2175        rte_delay_us(1);
2176        lp_stats->m_tx_queue_full += 1;
2177        uint16_t ret1=lp_port->m_port->tx_burst(lp_port->m_tx_queue_id,
2178                                                &lp_port->m_table[ret],
2179                                                len-ret);
2180        ret+=ret1;
2181    }
2182#else
2183    /* CPU has burst of packets larger than TX can send. Need to drop packets */
2184    if ( unlikely(ret < len) ) {
2185        lp_stats->m_tx_drop += (len-ret);
2186        uint16_t i;
2187        for (i=ret; i<len;i++) {
2188            rte_mbuf_t * m=lp_port->m_table[i];
2189            rte_pktmbuf_free(m);
2190        }
2191    }
2192#endif
2193
2194    return (0);
2195}
2196
2197
2198int CCoreEthIF::send_pkt(CCorePerPort * lp_port,
2199                         rte_mbuf_t      *m,
2200                         CVirtualIFPerSideStats  * lp_stats
2201                         ){
2202
2203    uint16_t len = lp_port->m_len;
2204    lp_port->m_table[len]=m;
2205    len++;
2206    /* enough pkts to be sent */
2207    if (unlikely(len == MAX_PKT_BURST)) {
2208        send_burst(lp_port, MAX_PKT_BURST,lp_stats);
2209        len = 0;
2210    }
2211    lp_port->m_len = len;
2212
2213    return (0);
2214}
2215
2216int CCoreEthIF::send_pkt_lat(CCorePerPort *lp_port, rte_mbuf_t *m, CVirtualIFPerSideStats *lp_stats) {
2217    // We allow sending only from first core of each port. This is serious internal bug otherwise.
2218    assert(lp_port->m_tx_queue_id_lat != INVALID_Q_ID);
2219
2220    int ret = lp_port->m_port->tx_burst(lp_port->m_tx_queue_id_lat, &m, 1);
2221
2222#ifdef DELAY_IF_NEEDED
2223    while ( unlikely( ret != 1 ) ){
2224        rte_delay_us(1);
2225        lp_stats->m_tx_queue_full += 1;
2226        ret = lp_port->m_port->tx_burst(lp_port->m_tx_queue_id_lat, &m, 1);
2227    }
2228
2229#else
2230    if ( unlikely( ret != 1 ) ) {
2231        lp_stats->m_tx_drop ++;
2232        rte_pktmbuf_free(m);
2233        return 0;
2234    }
2235
2236#endif
2237
2238    return ret;
2239}
2240
2241void CCoreEthIF::send_one_pkt(pkt_dir_t       dir,
2242                              rte_mbuf_t      *m){
2243    CCorePerPort *  lp_port=&m_ports[dir];
2244    CVirtualIFPerSideStats  * lp_stats = &m_stats[dir];
2245    send_pkt(lp_port,m,lp_stats);
2246    /* flush */
2247    send_burst(lp_port,lp_port->m_len,lp_stats);
2248    lp_port->m_len = 0;
2249}
2250
2251int CCoreEthIFStateless::send_node_flow_stat(rte_mbuf *m, CGenNodeStateless * node_sl, CCorePerPort *  lp_port
2252                                             , CVirtualIFPerSideStats  * lp_stats, bool is_const) {
2253    // Defining this makes 10% percent packet loss. 1% packet reorder.
2254# ifdef ERR_CNTRS_TEST
2255    static int temp=1;
2256    temp++;
2257#endif
2258
2259    uint16_t hw_id = node_sl->get_stat_hw_id();
2260    rte_mbuf *mi;
2261    struct flow_stat_payload_header *fsp_head = NULL;
2262
2263    if (hw_id >= MAX_FLOW_STATS) {
2264        // payload rule hw_ids are in the range right above ip id rules
2265        uint16_t hw_id_payload = hw_id - MAX_FLOW_STATS;
2266        if (hw_id_payload > max_stat_hw_id_seen_payload) {
2267            max_stat_hw_id_seen_payload = hw_id_payload;
2268        }
2269
2270        mi = node_sl->alloc_flow_stat_mbuf(m, fsp_head, is_const);
2271        fsp_head->seq = lp_stats->m_lat_data[hw_id_payload].get_seq_num();
2272        fsp_head->hw_id = hw_id_payload;
2273        fsp_head->flow_seq = lp_stats->m_lat_data[hw_id_payload].get_flow_seq();
2274        fsp_head->magic = FLOW_STAT_PAYLOAD_MAGIC;
2275
2276        lp_stats->m_lat_data[hw_id_payload].inc_seq_num();
2277#ifdef ERR_CNTRS_TEST
2278        if (temp % 10 == 0) {
2279            fsp_head->seq = lp_stats->m_lat_data[hw_id_payload].inc_seq_num();
2280        }
2281        if ((temp - 1) % 100 == 0) {
2282            fsp_head->seq = lp_stats->m_lat_data[hw_id_payload].get_seq_num() - 4;
2283        }
2284#endif
2285    } else {
2286        // ip id rule
2287        if (hw_id > max_stat_hw_id_seen) {
2288            max_stat_hw_id_seen = hw_id;
2289        }
2290        mi = m;
2291    }
2292    tx_per_flow_t *lp_s = &lp_stats->m_tx_per_flow[hw_id];
2293    lp_s->add_pkts(1);
2294    lp_s->add_bytes(mi->pkt_len + 4); // We add 4 because of ethernet CRC
2295
2296    if (hw_id >= MAX_FLOW_STATS) {
2297        fsp_head->time_stamp = os_get_hr_tick_64();
2298        send_pkt_lat(lp_port, mi, lp_stats);
2299    } else {
2300        send_pkt(lp_port, mi, lp_stats);
2301    }
2302    return 0;
2303}
2304
2305int CCoreEthIFStateless::send_node(CGenNode * no) {
2306    /* if a node is marked as slow path - single IF to redirect it to slow path */
2307    if (no->get_is_slow_path()) {
2308        return handle_slow_path_node(no);
2309    }
2310
2311    CGenNodeStateless * node_sl=(CGenNodeStateless *) no;
2312
2313    /* check that we have mbuf  */
2314    rte_mbuf_t *    m;
2315
2316    pkt_dir_t dir=(pkt_dir_t)node_sl->get_mbuf_cache_dir();
2317    CCorePerPort *  lp_port=&m_ports[dir];
2318    CVirtualIFPerSideStats  * lp_stats = &m_stats[dir];
2319    if ( likely(node_sl->is_cache_mbuf_array()) ) {
2320        m=node_sl->cache_mbuf_array_get_cur();
2321        rte_pktmbuf_refcnt_update(m,1);
2322    }else{
2323        m=node_sl->get_cache_mbuf();
2324
2325        if (m) {
2326            /* cache case */
2327            rte_pktmbuf_refcnt_update(m,1);
2328        }else{
2329            m=node_sl->alloc_node_with_vm();
2330            assert(m);
2331        }
2332    }
2333
2334    if (unlikely(node_sl->is_stat_needed())) {
2335        if ( unlikely(node_sl->is_cache_mbuf_array()) ) {
2336            // No support for latency + cache. If user asks for cache on latency stream, we change cache to 0.
2337            // assert here just to make sure.
2338            assert(1);
2339        }
2340        return send_node_flow_stat(m, node_sl, lp_port, lp_stats, (node_sl->get_cache_mbuf()) ? true : false);
2341    } else {
2342        send_pkt(lp_port,m,lp_stats);
2343    }
2344
2345    return (0);
2346};
2347
2348int CCoreEthIFStateless::send_pcap_node(CGenNodePCAP *pcap_node) {
2349    rte_mbuf_t *m = pcap_node->get_pkt();
2350    if (!m) {
2351        return (-1);
2352    }
2353
2354    pkt_dir_t dir = (pkt_dir_t)pcap_node->get_mbuf_dir();
2355    CCorePerPort *lp_port=&m_ports[dir];
2356    CVirtualIFPerSideStats *lp_stats = &m_stats[dir];
2357
2358    send_pkt(lp_port, m, lp_stats);
2359
2360    return (0);
2361}
2362
2363/**
2364 * slow path code goes here
2365 *
2366 */
2367int CCoreEthIFStateless::handle_slow_path_node(CGenNode * no) {
2368
2369    if (no->m_type == CGenNode::PCAP_PKT) {
2370        return send_pcap_node((CGenNodePCAP *)no);
2371    }
2372
2373    return (-1);
2374}
2375
2376void CCoreEthIF::apply_client_cfg(const ClientCfgBase *cfg, rte_mbuf_t *m, pkt_dir_t dir, uint8_t *p) {
2377
2378    assert(cfg);
2379
2380    /* take the right direction config */
2381    const ClientCfgDirBase &cfg_dir = ( (dir == CLIENT_SIDE) ? cfg->m_initiator : cfg->m_responder);
2382
2383    /* dst mac */
2384    if (cfg_dir.has_dst_mac_addr()) {
2385        memcpy(p, cfg_dir.get_dst_mac_addr(), 6);
2386    }
2387
2388    /* src mac */
2389    if (cfg_dir.has_src_mac_addr()) {
2390        memcpy(p + 6, cfg_dir.get_src_mac_addr(), 6);
2391    }
2392
2393    /* VLAN */
2394    if (cfg_dir.has_vlan()) {
2395        add_vlan(m, cfg_dir.get_vlan());
2396    }
2397}
2398
2399
2400void CCoreEthIF::add_vlan(rte_mbuf_t *m, uint16_t vlan_id) {
2401    m->ol_flags = PKT_TX_VLAN_PKT;
2402    m->l2_len   = 14;
2403    m->vlan_tci = vlan_id;
2404}
2405
2406/**
2407 * slow path features goes here (avoid multiple IFs)
2408 *
2409 */
2410void CCoreEthIF::handle_slowpath_features(CGenNode *node, rte_mbuf_t *m, uint8_t *p, pkt_dir_t dir) {
2411
2412
2413    /* MAC ovverride */
2414    if ( unlikely( CGlobalInfo::m_options.preview.get_mac_ip_overide_enable() ) ) {
2415        /* client side */
2416        if ( node->is_initiator_pkt() ) {
2417            *((uint32_t*)(p+6)) = PKT_NTOHL(node->m_src_ip);
2418        }
2419    }
2420
2421    /* flag is faster than checking the node pointer (another cacheline) */
2422    if ( unlikely(CGlobalInfo::m_options.preview.get_is_client_cfg_enable() ) ) {
2423        apply_client_cfg(node->m_client_cfg, m, dir, p);
2424    }
2425
2426}
2427
2428int CCoreEthIF::send_node(CGenNode * node) {
2429
2430#ifdef OPT_REPEAT_MBUF
2431
2432    if ( unlikely( node->get_cache_mbuf() !=NULL ) ) {
2433        pkt_dir_t       dir;
2434        rte_mbuf_t *    m=node->get_cache_mbuf();
2435        dir=(pkt_dir_t)node->get_mbuf_cache_dir();
2436        CCorePerPort *  lp_port=&m_ports[dir];
2437        CVirtualIFPerSideStats  * lp_stats = &m_stats[dir];
2438        rte_pktmbuf_refcnt_update(m,1);
2439        send_pkt(lp_port,m,lp_stats);
2440        return (0);
2441    }
2442#endif
2443
2444    CFlowPktInfo *  lp=node->m_pkt_info;
2445    rte_mbuf_t *    m=lp->generate_new_mbuf(node);
2446
2447    pkt_dir_t       dir;
2448    bool            single_port;
2449
2450    dir         = node->cur_interface_dir();
2451    single_port = node->get_is_all_flow_from_same_dir() ;
2452
2453
2454    if ( unlikely( CGlobalInfo::m_options.preview.get_vlan_mode_enable() ) ){
2455        /* which vlan to choose 0 or 1*/
2456        uint8_t vlan_port = (node->m_src_ip &1);
2457        uint16_t vlan_id  = CGlobalInfo::m_options.m_vlan_port[vlan_port];
2458
2459        if (likely( vlan_id >0 ) ) {
2460            dir = dir ^ vlan_port;
2461        }else{
2462            /* both from the same dir but with VLAN0 */
2463            vlan_id = CGlobalInfo::m_options.m_vlan_port[0];
2464            dir = dir ^ 0;
2465        }
2466
2467        add_vlan(m, vlan_id);
2468    }
2469
2470    CCorePerPort *lp_port = &m_ports[dir];
2471    CVirtualIFPerSideStats *lp_stats = &m_stats[dir];
2472
2473    if (unlikely(m==0)) {
2474        lp_stats->m_tx_alloc_error++;
2475        return(0);
2476    }
2477
2478    /* update mac addr dest/src 12 bytes */
2479    uint8_t *p   = rte_pktmbuf_mtod(m, uint8_t*);
2480    uint8_t p_id = lp_port->m_port->get_port_id();
2481
2482    memcpy(p,CGlobalInfo::m_options.get_dst_src_mac_addr(p_id),12);
2483
2484     /* when slowpath features are on */
2485    if ( unlikely( CGlobalInfo::m_options.preview.get_is_slowpath_features_on() ) ) {
2486        handle_slowpath_features(node, m, p, dir);
2487    }
2488
2489
2490    if ( unlikely( node->is_rx_check_enabled() ) ) {
2491        lp_stats->m_tx_rx_check_pkt++;
2492        lp->do_generate_new_mbuf_rxcheck(m, node, single_port);
2493        lp_stats->m_template.inc_template( node->get_template_id( ));
2494    }else{
2495
2496#ifdef OPT_REPEAT_MBUF
2497        // cache only if it is not sample as this is more complex mbuf struct
2498        if ( unlikely( node->can_cache_mbuf() ) ) {
2499            if ( !CGlobalInfo::m_options.preview.isMbufCacheDisabled() ){
2500                m_mbuf_cache++;
2501                if (m_mbuf_cache < MAX_MBUF_CACHE) {
2502                    /* limit the number of object to cache */
2503                    node->set_mbuf_cache_dir( dir);
2504                    node->set_cache_mbuf(m);
2505                    rte_pktmbuf_refcnt_update(m,1);
2506                }
2507            }
2508        }
2509#endif
2510
2511    }
2512
2513    /*printf("send packet -- \n");
2514      rte_pktmbuf_dump(stdout,m, rte_pktmbuf_pkt_len(m));*/
2515
2516    /* send the packet */
2517    send_pkt(lp_port,m,lp_stats);
2518    return (0);
2519}
2520
2521
2522int CCoreEthIF::update_mac_addr_from_global_cfg(pkt_dir_t  dir, uint8_t * p){
2523    assert(p);
2524    assert(dir<2);
2525
2526    CCorePerPort *  lp_port=&m_ports[dir];
2527    uint8_t p_id=lp_port->m_port->get_port_id();
2528    memcpy(p,CGlobalInfo::m_options.get_dst_src_mac_addr(p_id),12);
2529    return (0);
2530}
2531
2532pkt_dir_t
2533CCoreEthIF::port_id_to_dir(uint8_t port_id) {
2534
2535    for (pkt_dir_t dir = 0; dir < CS_NUM; dir++) {
2536        if (m_ports[dir].m_port->get_port_id() == port_id) {
2537            return dir;
2538        }
2539    }
2540
2541    return (CS_INVALID);
2542}
2543
2544class CLatencyHWPort : public CPortLatencyHWBase {
2545public:
2546    void Create(CPhyEthIF  * p,
2547                uint8_t tx_queue,
2548                uint8_t rx_queue){
2549        m_port=p;
2550        m_tx_queue_id=tx_queue;
2551        m_rx_queue_id=rx_queue;
2552    }
2553
2554    virtual int tx(rte_mbuf_t *m) {
2555        rte_mbuf_t *tx_pkts[2];
2556
2557        tx_pkts[0] = m;
2558        if ( likely( CGlobalInfo::m_options.preview.get_vlan_mode_enable() ) ){
2559            /* vlan mode is the default */
2560            /* set the vlan */
2561            m->ol_flags = PKT_TX_VLAN_PKT;
2562            m->vlan_tci =CGlobalInfo::m_options.m_vlan_port[0];
2563            m->l2_len   =14;
2564        }
2565        uint16_t res=m_port->tx_burst(m_tx_queue_id,tx_pkts,1);
2566        if ( res == 0 ) {
2567            rte_pktmbuf_free(m);
2568            //printf(" queue is full for latency packet !!\n");
2569            return (-1);
2570
2571        }
2572#if 0
2573        fprintf(stdout," ==> %f.03 send packet ..\n",now_sec());
2574        uint8_t *p1=rte_pktmbuf_mtod(m, uint8_t*);
2575        uint16_t pkt_size1=rte_pktmbuf_pkt_len(m);
2576        utl_DumpBuffer(stdout,p1,pkt_size1,0);
2577#endif
2578
2579        return (0);
2580    }
2581
2582
2583    /* nothing special with HW implementation */
2584    virtual int tx_latency(rte_mbuf_t *m) {
2585        return tx(m);
2586    }
2587
2588    virtual rte_mbuf_t * rx(){
2589        rte_mbuf_t * rx_pkts[1];
2590        uint16_t cnt=m_port->rx_burst(m_rx_queue_id,rx_pkts,1);
2591        if (cnt) {
2592            return (rx_pkts[0]);
2593        }else{
2594            return (0);
2595        }
2596    }
2597
2598
2599    virtual uint16_t rx_burst(struct rte_mbuf **rx_pkts,
2600                              uint16_t nb_pkts){
2601        uint16_t cnt=m_port->rx_burst(m_rx_queue_id,rx_pkts,nb_pkts);
2602        return (cnt);
2603    }
2604
2605
2606private:
2607    CPhyEthIF  * m_port;
2608    uint8_t      m_tx_queue_id ;
2609    uint8_t      m_rx_queue_id;
2610};
2611
2612
2613class CLatencyVmPort : public CPortLatencyHWBase {
2614public:
2615    void Create(uint8_t port_index,
2616                CNodeRing *ring,
2617                CLatencyManager *mgr,
2618                CPhyEthIF  *p) {
2619
2620        m_dir        = (port_index % 2);
2621        m_ring_to_dp = ring;
2622        m_mgr        = mgr;
2623        m_port       = p;
2624    }
2625
2626
2627    virtual int tx(rte_mbuf_t *m) {
2628        return tx_common(m, false);
2629    }
2630
2631    virtual int tx_latency(rte_mbuf_t *m) {
2632        return tx_common(m, true);
2633    }
2634
2635    virtual rte_mbuf_t * rx() {
2636        rte_mbuf_t * rx_pkts[1];
2637        uint16_t cnt = m_port->rx_burst(0, rx_pkts, 1);
2638        if (cnt) {
2639            return (rx_pkts[0]);
2640        } else {
2641            return (0);
2642        }
2643    }
2644
2645    virtual uint16_t rx_burst(struct rte_mbuf **rx_pkts, uint16_t nb_pkts) {
2646        uint16_t cnt = m_port->rx_burst(0, rx_pkts, nb_pkts);
2647        return (cnt);
2648    }
2649
2650private:
2651      virtual int tx_common(rte_mbuf_t *m, bool fix_timestamp) {
2652
2653        if ( likely( CGlobalInfo::m_options.preview.get_vlan_mode_enable() ) ){
2654            /* vlan mode is the default */
2655            /* set the vlan */
2656            m->ol_flags = PKT_TX_VLAN_PKT;
2657            m->vlan_tci =CGlobalInfo::m_options.m_vlan_port[0];
2658            m->l2_len   =14;
2659        }
2660
2661        /* allocate node */
2662        CGenNodeLatencyPktInfo *node=(CGenNodeLatencyPktInfo * )CGlobalInfo::create_node();
2663        if (!node) {
2664            return (-1);
2665        }
2666
2667        node->m_msg_type = CGenNodeMsgBase::LATENCY_PKT;
2668        node->m_dir      = m_dir;
2669        node->m_pkt      = m;
2670
2671        if (fix_timestamp) {
2672            node->m_latency_offset = m_mgr->get_latency_header_offset();
2673            node->m_update_ts = 1;
2674        } else {
2675            node->m_update_ts = 0;
2676        }
2677
2678        if ( m_ring_to_dp->Enqueue((CGenNode*)node) != 0 ){
2679            return (-1);
2680        }
2681
2682        return (0);
2683    }
2684
2685    CPhyEthIF  * m_port;
2686    uint8_t                          m_dir;
2687    CNodeRing *                      m_ring_to_dp;   /* ring dp -> latency thread */
2688    CLatencyManager *                m_mgr;
2689};
2690
2691
2692
2693class CPerPortStats {
2694public:
2695    uint64_t opackets;
2696    uint64_t obytes;
2697    uint64_t ipackets;
2698    uint64_t ibytes;
2699    uint64_t ierrors;
2700    uint64_t oerrors;
2701    tx_per_flow_t m_tx_per_flow[MAX_FLOW_STATS + MAX_FLOW_STATS_PAYLOAD];
2702    tx_per_flow_t m_prev_tx_per_flow[MAX_FLOW_STATS + MAX_FLOW_STATS_PAYLOAD];
2703
2704    float     m_total_tx_bps;
2705    float     m_total_tx_pps;
2706
2707    float     m_total_rx_bps;
2708    float     m_total_rx_pps;
2709
2710    float     m_cpu_util;
2711    bool      m_link_up = true;
2712    bool      m_link_was_down = false;
2713};
2714
2715class CGlobalStats {
2716public:
2717    enum DumpFormat {
2718        dmpSTANDARD,
2719        dmpTABLE
2720    };
2721
2722    uint64_t  m_total_tx_pkts;
2723    uint64_t  m_total_rx_pkts;
2724    uint64_t  m_total_tx_bytes;
2725    uint64_t  m_total_rx_bytes;
2726
2727    uint64_t  m_total_alloc_error;
2728    uint64_t  m_total_queue_full;
2729    uint64_t  m_total_queue_drop;
2730
2731    uint64_t  m_total_clients;
2732    uint64_t  m_total_servers;
2733    uint64_t  m_active_sockets;
2734
2735    uint64_t  m_total_nat_time_out;
2736    uint64_t  m_total_nat_time_out_wait_ack;
2737    uint64_t  m_total_nat_no_fid  ;
2738    uint64_t  m_total_nat_active  ;
2739    uint64_t  m_total_nat_syn_wait;
2740    uint64_t  m_total_nat_open    ;
2741    uint64_t  m_total_nat_learn_error    ;
2742
2743    CPerTxthreadTemplateInfo m_template;
2744
2745    float     m_socket_util;
2746
2747    float m_platform_factor;
2748    float m_tx_bps;
2749    float m_rx_bps;
2750    float m_tx_pps;
2751    float m_rx_pps;
2752    float m_tx_cps;
2753    float m_tx_expected_cps;
2754    float m_tx_expected_pps;
2755    float m_tx_expected_bps;
2756    float m_rx_drop_bps;
2757    float m_active_flows;
2758    float m_open_flows;
2759    float m_cpu_util;
2760    float m_cpu_util_raw;
2761    float m_rx_cpu_util;
2762    float m_bw_per_core;
2763    uint8_t m_threads;
2764
2765    uint32_t      m_num_of_ports;
2766    CPerPortStats m_port[TREX_MAX_PORTS];
2767public:
2768    void Dump(FILE *fd,DumpFormat mode);
2769    void DumpAllPorts(FILE *fd);
2770    void dump_json(std::string & json, bool baseline);
2771private:
2772    std::string get_field(const char *name, float &f);
2773    std::string get_field(const char *name, uint64_t &f);
2774    std::string get_field_port(int port, const char *name, float &f);
2775    std::string get_field_port(int port, const char *name, uint64_t &f);
2776
2777};
2778
2779std::string CGlobalStats::get_field(const char *name, float &f){
2780    char buff[200];
2781    if(f <= -10.0 or f >= 10.0)
2782        snprintf(buff, sizeof(buff), "\"%s\":%.1f,",name,f);
2783    else
2784        snprintf(buff, sizeof(buff), "\"%s\":%.3e,",name,f);
2785    return (std::string(buff));
2786}
2787
2788std::string CGlobalStats::get_field(const char *name, uint64_t &f){
2789    char buff[200];
2790    snprintf(buff,  sizeof(buff), "\"%s\":%llu,", name, (unsigned long long)f);
2791    return (std::string(buff));
2792}
2793
2794std::string CGlobalStats::get_field_port(int port, const char *name, float &f){
2795    char buff[200];
2796    if(f <= -10.0 or f >= 10.0)
2797        snprintf(buff,  sizeof(buff), "\"%s-%d\":%.1f,", name, port, f);
2798    else
2799        snprintf(buff, sizeof(buff), "\"%s-%d\":%.3e,", name, port, f);
2800    return (std::string(buff));
2801}
2802
2803std::string CGlobalStats::get_field_port(int port, const char *name, uint64_t &f){
2804    char buff[200];
2805    snprintf(buff, sizeof(buff), "\"%s-%d\":%llu,",name, port, (unsigned long long)f);
2806    return (std::string(buff));
2807}
2808
2809
2810void CGlobalStats::dump_json(std::string & json, bool baseline){
2811    /* refactor this to JSON */
2812
2813    json="{\"name\":\"trex-global\",\"type\":0,";
2814    if (baseline) {
2815        json += "\"baseline\": true,";
2816    }
2817
2818    json +="\"data\":{";
2819
2820    char ts_buff[200];
2821    snprintf(ts_buff , sizeof(ts_buff), "\"ts\":{\"value\":%lu, \"freq\":%lu},", os_get_hr_tick_64(), os_get_hr_freq());
2822    json+= std::string(ts_buff);
2823
2824#define GET_FIELD(f) get_field(#f, f)
2825#define GET_FIELD_PORT(p,f) get_field_port(p, #f, lp->f)
2826
2827    json+=GET_FIELD(m_cpu_util);
2828    json+=GET_FIELD(m_cpu_util_raw);
2829    json+=GET_FIELD(m_bw_per_core);
2830    json+=GET_FIELD(m_rx_cpu_util);
2831    json+=GET_FIELD(m_platform_factor);
2832    json+=GET_FIELD(m_tx_bps);
2833    json+=GET_FIELD(m_rx_bps);
2834    json+=GET_FIELD(m_tx_pps);
2835    json+=GET_FIELD(m_rx_pps);
2836    json+=GET_FIELD(m_tx_cps);
2837    json+=GET_FIELD(m_tx_expected_cps);
2838    json+=GET_FIELD(m_tx_expected_pps);
2839    json+=GET_FIELD(m_tx_expected_bps);
2840    json+=GET_FIELD(m_total_alloc_error);
2841    json+=GET_FIELD(m_total_queue_full);
2842    json+=GET_FIELD(m_total_queue_drop);
2843    json+=GET_FIELD(m_rx_drop_bps);
2844    json+=GET_FIELD(m_active_flows);
2845    json+=GET_FIELD(m_open_flows);
2846
2847    json+=GET_FIELD(m_total_tx_pkts);
2848    json+=GET_FIELD(m_total_rx_pkts);
2849    json+=GET_FIELD(m_total_tx_bytes);
2850    json+=GET_FIELD(m_total_rx_bytes);
2851
2852    json+=GET_FIELD(m_total_clients);
2853    json+=GET_FIELD(m_total_servers);
2854    json+=GET_FIELD(m_active_sockets);
2855    json+=GET_FIELD(m_socket_util);
2856
2857    json+=GET_FIELD(m_total_nat_time_out);
2858    json+=GET_FIELD(m_total_nat_time_out_wait_ack);
2859    json+=GET_FIELD(m_total_nat_no_fid );
2860    json+=GET_FIELD(m_total_nat_active );
2861    json+=GET_FIELD(m_total_nat_syn_wait);
2862    json+=GET_FIELD(m_total_nat_open   );
2863    json+=GET_FIELD(m_total_nat_learn_error);
2864
2865    int i;
2866    for (i=0; i<(int)m_num_of_ports; i++) {
2867        CPerPortStats * lp=&m_port[i];
2868        json+=GET_FIELD_PORT(i,opackets) ;
2869        json+=GET_FIELD_PORT(i,obytes)   ;
2870        json+=GET_FIELD_PORT(i,ipackets) ;
2871        json+=GET_FIELD_PORT(i,ibytes)   ;
2872        json+=GET_FIELD_PORT(i,ierrors)  ;
2873        json+=GET_FIELD_PORT(i,oerrors)  ;
2874        json+=GET_FIELD_PORT(i,m_total_tx_bps);
2875        json+=GET_FIELD_PORT(i,m_total_tx_pps);
2876        json+=GET_FIELD_PORT(i,m_total_rx_bps);
2877        json+=GET_FIELD_PORT(i,m_total_rx_pps);
2878        json+=GET_FIELD_PORT(i,m_cpu_util);
2879    }
2880    json+=m_template.dump_as_json("template");
2881    json+="\"unknown\":0}}"  ;
2882}
2883
2884void CGlobalStats::DumpAllPorts(FILE *fd){
2885
2886    //fprintf (fd," Total-Tx-Pkts   : %s  \n",double_to_human_str((double)m_total_tx_pkts,"pkts",KBYE_1000).c_str());
2887    //fprintf (fd," Total-Rx-Pkts   : %s  \n",double_to_human_str((double)m_total_rx_pkts,"pkts",KBYE_1000).c_str());
2888
2889    //fprintf (fd," Total-Tx-Bytes  : %s  \n",double_to_human_str((double)m_total_tx_bytes,"bytes",KBYE_1000).c_str());
2890    //fprintf (fd," Total-Rx-Bytes  : %s  \n",double_to_human_str((double)m_total_rx_bytes,"bytes",KBYE_1000).c_str());
2891
2892
2893
2894    fprintf (fd," Cpu Utilization : %2.1f  %%  %2.1f Gb/core \n",m_cpu_util,m_bw_per_core);
2895    fprintf (fd," Platform_factor : %2.1f  \n",m_platform_factor);
2896    fprintf (fd," Total-Tx        : %s  ",double_to_human_str(m_tx_bps,"bps",KBYE_1000).c_str());
2897    if ( CGlobalInfo::is_learn_mode() ) {
2898        fprintf (fd," NAT time out    : %8llu", (unsigned long long)m_total_nat_time_out);
2899        if (CGlobalInfo::is_learn_mode(CParserOption::LEARN_MODE_TCP_ACK)) {
2900            fprintf (fd," (%llu in wait for syn+ack)\n", (unsigned long long)m_total_nat_time_out_wait_ack);
2901        } else {
2902            fprintf (fd, "\n");
2903        }
2904    }else{
2905        fprintf (fd,"\n");
2906    }
2907
2908
2909    fprintf (fd," Total-Rx        : %s  ",double_to_human_str(m_rx_bps,"bps",KBYE_1000).c_str());
2910    if ( CGlobalInfo::is_learn_mode() ) {
2911        fprintf (fd," NAT aged flow id: %8llu \n", (unsigned long long)m_total_nat_no_fid);
2912    }else{
2913        fprintf (fd,"\n");
2914    }
2915
2916    fprintf (fd," Total-PPS       : %s  ",double_to_human_str(m_tx_pps,"pps",KBYE_1000).c_str());
2917    if ( CGlobalInfo::is_learn_mode() ) {
2918        fprintf (fd," Total NAT active: %8llu", (unsigned long long)m_total_nat_active);
2919        if (CGlobalInfo::is_learn_mode(CParserOption::LEARN_MODE_TCP_ACK)) {
2920            fprintf (fd," (%llu waiting for syn)\n", (unsigned long long)m_total_nat_syn_wait);
2921        } else {
2922            fprintf (fd, "\n");
2923        }
2924    }else{
2925        fprintf (fd,"\n");
2926    }
2927
2928    fprintf (fd," Total-CPS       : %s  ",double_to_human_str(m_tx_cps,"cps",KBYE_1000).c_str());
2929    if ( CGlobalInfo::is_learn_mode() ) {
2930        fprintf (fd," Total NAT opened: %8llu \n", (unsigned long long)m_total_nat_open);
2931    }else{
2932        fprintf (fd,"\n");
2933    }
2934    fprintf (fd,"\n");
2935    fprintf (fd," Expected-PPS    : %s  ",double_to_human_str(m_tx_expected_pps,"pps",KBYE_1000).c_str());
2936    if ( CGlobalInfo::is_learn_verify_mode() ) {
2937        fprintf (fd," NAT learn errors: %8llu \n", (unsigned long long)m_total_nat_learn_error);
2938    }else{
2939        fprintf (fd,"\n");
2940    }
2941    fprintf (fd," Expected-CPS    : %s  \n",double_to_human_str(m_tx_expected_cps,"cps",KBYE_1000).c_str());
2942    fprintf (fd," Expected-BPS    : %s  \n",double_to_human_str(m_tx_expected_bps,"bps",KBYE_1000).c_str());
2943    fprintf (fd,"\n");
2944    fprintf (fd," Active-flows    : %8llu  Clients : %8llu   Socket-util : %3.4f %%    \n",
2945             (unsigned long long)m_active_flows,
2946             (unsigned long long)m_total_clients,
2947             m_socket_util);
2948    fprintf (fd," Open-flows      : %8llu  Servers : %8llu   Socket : %8llu Socket/Clients :  %.1f \n",
2949             (unsigned long long)m_open_flows,
2950             (unsigned long long)m_total_servers,
2951             (unsigned long long)m_active_sockets,
2952             (float)m_active_sockets/(float)m_total_clients);
2953
2954    if (m_total_alloc_error) {
2955        fprintf (fd," Total_alloc_err  : %llu         \n", (unsigned long long)m_total_alloc_error);
2956    }
2957    if ( m_total_queue_full ){
2958        fprintf (fd," Total_queue_full : %llu         \n", (unsigned long long)m_total_queue_full);
2959    }
2960    if (m_total_queue_drop) {
2961        fprintf (fd," Total_queue_drop : %llu         \n", (unsigned long long)m_total_queue_drop);
2962    }
2963
2964    //m_template.Dump(fd);
2965
2966    fprintf (fd," drop-rate       : %s   \n",double_to_human_str(m_rx_drop_bps,"bps",KBYE_1000).c_str() );
2967}
2968
2969
2970void CGlobalStats::Dump(FILE *fd,DumpFormat mode){
2971    int i;
2972    int port_to_show=m_num_of_ports;
2973    if (port_to_show>4) {
2974        port_to_show=4;
2975        fprintf (fd," per port - limited to 4   \n");
2976    }
2977
2978
2979    if ( mode== dmpSTANDARD ){
2980        fprintf (fd," --------------- \n");
2981        for (i=0; i<(int)port_to_show; i++) {
2982            CPerPortStats * lp=&m_port[i];
2983            fprintf(fd,"port : %d ",(int)i);
2984            if ( ! lp->m_link_up ) {
2985                fprintf(fd," (link DOWN)");
2986            }
2987            fprintf(fd,"\n------------\n");
2988#define GS_DP_A4(f) fprintf(fd," %-40s : %llu \n",#f, (unsigned long long)lp->f)
2989#define GS_DP_A(f) if (lp->f) fprintf(fd," %-40s : %llu \n",#f, (unsigned long long)lp->f)
2990            GS_DP_A4(opackets);
2991            GS_DP_A4(obytes);
2992            GS_DP_A4(ipackets);
2993            GS_DP_A4(ibytes);
2994            GS_DP_A(ierrors);
2995            GS_DP_A(oerrors);
2996            fprintf (fd," Tx : %s  \n",double_to_human_str((double)lp->m_total_tx_bps,"bps",KBYE_1000).c_str());
2997        }
2998    }else{
2999        fprintf(fd," %10s ","ports");
3000        for (i=0; i<(int)port_to_show; i++) {
3001            CPerPortStats * lp=&m_port[i];
3002            if ( lp->m_link_up ) {
3003                fprintf(fd,"| %15d ",i);
3004            } else {
3005                std::string port_with_state = "(link DOWN) " + std::to_string(i);
3006                fprintf(fd,"| %15s ",port_with_state.c_str());
3007            }
3008        }
3009        fprintf(fd,"\n");
3010        fprintf(fd," -----------------------------------------------------------------------------------------\n");
3011        std::string names[]={"opackets","obytes","ipackets","ibytes","ierrors","oerrors","Tx Bw"
3012        };
3013        for (i=0; i<7; i++) {
3014            fprintf(fd," %10s ",names[i].c_str());
3015            int j=0;
3016            for (j=0; j<port_to_show;j++) {
3017                CPerPortStats * lp=&m_port[j];
3018                uint64_t cnt;
3019                switch (i) {
3020                case 0:
3021                    cnt=lp->opackets;
3022                    fprintf(fd,"| %15lu ",cnt);
3023
3024                    break;
3025                case 1:
3026                    cnt=lp->obytes;
3027                    fprintf(fd,"| %15lu ",cnt);
3028
3029                    break;
3030                case 2:
3031                    cnt=lp->ipackets;
3032                    fprintf(fd,"| %15lu ",cnt);
3033
3034                    break;
3035                case 3:
3036                    cnt=lp->ibytes;
3037                    fprintf(fd,"| %15lu ",cnt);
3038
3039                    break;
3040                case 4:
3041                    cnt=lp->ierrors;
3042                    fprintf(fd,"| %15lu ",cnt);
3043
3044                    break;
3045                case 5:
3046                    cnt=lp->oerrors;
3047                    fprintf(fd,"| %15lu ",cnt);
3048
3049                    break;
3050                case 6:
3051                    fprintf(fd,"| %15s ",double_to_human_str((double)lp->m_total_tx_bps,"bps",KBYE_1000).c_str());
3052                    break;
3053                default:
3054                    cnt=0xffffff;
3055                }
3056            } /* ports */
3057            fprintf(fd, "\n");
3058        }/* fields*/
3059    }
3060
3061
3062}
3063
3064class CGlobalTRex  {
3065
3066public:
3067
3068    /**
3069     * different types of shutdown causes
3070     */
3071    typedef enum {
3072        SHUTDOWN_NONE,
3073        SHUTDOWN_TEST_ENDED,
3074        SHUTDOWN_CTRL_C,
3075        SHUTDOWN_SIGINT,
3076        SHUTDOWN_SIGTERM,
3077        SHUTDOWN_RPC_REQ
3078    } shutdown_rc_e;
3079
3080
3081    CGlobalTRex (){
3082        m_max_ports=4;
3083        m_max_cores=1;
3084        m_cores_to_dual_ports=0;
3085        m_max_queues_per_port=0;
3086        m_fl_was_init=false;
3087        m_expected_pps=0.0;
3088        m_expected_cps=0.0;
3089        m_expected_bps=0.0;
3090        m_trex_stateless = NULL;
3091        m_mark_for_shutdown = SHUTDOWN_NONE;
3092    }
3093
3094    bool Create();
3095    void Delete();
3096    int  ixgbe_prob_init();
3097    int  cores_prob_init();
3098    int  queues_prob_init();
3099    int  ixgbe_start();
3100    int  ixgbe_rx_queue_flush();
3101    void ixgbe_configure_mg();
3102    void rx_sl_configure();
3103    bool is_all_links_are_up(bool dump=false);
3104    void pre_test();
3105
3106    /**
3107     * mark for shutdown
3108     * on the next check - the control plane will
3109     * call shutdown()
3110     */
3111    void mark_for_shutdown(shutdown_rc_e rc) {
3112
3113        if (is_marked_for_shutdown()) {
3114            return;
3115        }
3116
3117        m_mark_for_shutdown = rc;
3118    }
3119
3120private:
3121    void register_signals();
3122
3123    /* try to stop all datapath cores and RX core */
3124    void try_stop_all_cores();
3125    /* send message to all dp cores */
3126    int  send_message_all_dp(TrexStatelessCpToDpMsgBase *msg);
3127    int  send_message_to_rx(TrexStatelessCpToRxMsgBase *msg);
3128    void check_for_dp_message_from_core(int thread_id);
3129
3130    bool is_marked_for_shutdown() const {
3131        return (m_mark_for_shutdown != SHUTDOWN_NONE);
3132    }
3133
3134    /**
3135     * shutdown sequence
3136     *
3137     */
3138    void shutdown();
3139
3140public:
3141    void check_for_dp_messages();
3142    int start_master_statefull();
3143    int start_master_stateless();
3144    int run_in_core(virtual_thread_id_t virt_core_id);
3145    int core_for_rx(){
3146        if ( (! get_is_rx_thread_enabled()) ) {
3147            return -1;
3148        }else{
3149            return m_max_cores - 1;
3150        }
3151    }
3152    int run_in_rx_core();
3153    int run_in_master();
3154
3155    void handle_fast_path();
3156    void handle_slow_path();
3157
3158    int stop_master();
3159    /* return the minimum number of dp cores needed to support the active ports
3160       this is for c==1 or  m_cores_mul==1
3161    */
3162    int get_base_num_cores(){
3163        return (m_max_ports>>1);
3164    }
3165
3166    int get_cores_tx(){
3167        /* 0 - master
3168           num_of_cores -
3169           last for latency */
3170        if ( (! get_is_rx_thread_enabled()) ) {
3171            return (m_max_cores - 1 );
3172        } else {
3173            return (m_max_cores - BP_MASTER_AND_LATENCY );
3174        }
3175    }
3176
3177private:
3178    bool is_all_cores_finished();
3179
3180public:
3181
3182    void publish_async_data(bool sync_now, bool baseline = false);
3183    void publish_async_barrier(uint32_t key);
3184    void publish_async_port_attr_changed(uint8_t port_id);
3185
3186    void dump_stats(FILE *fd,
3187                    CGlobalStats::DumpFormat format);
3188    void dump_template_info(std::string & json);
3189    bool sanity_check();
3190    void update_stats(void);
3191    tx_per_flow_t get_flow_tx_stats(uint8_t port, uint16_t hw_id);
3192    tx_per_flow_t clear_flow_tx_stats(uint8_t port, uint16_t index, bool is_lat);
3193    void get_stats(CGlobalStats & stats);
3194    float get_cpu_util_per_interface(uint8_t port_id);
3195    void dump_post_test_stats(FILE *fd);
3196    void dump_config(FILE *fd);
3197    void dump_links_status(FILE *fd);
3198
3199    bool lookup_port_by_mac(const uint8_t *mac, uint8_t &port_id);
3200
3201public:
3202    port_cfg_t  m_port_cfg;
3203    uint32_t    m_max_ports;    /* active number of ports supported options are  2,4,8,10,12  */
3204    uint32_t    m_max_cores;    /* current number of cores , include master and latency  ==> ( master)1+c*(m_max_ports>>1)+1( latency )  */
3205    uint32_t    m_cores_mul;    /* how cores multipler given  c=4 ==> m_cores_mul */
3206    uint32_t    m_max_queues_per_port; // Number of TX queues per port
3207    uint32_t    m_cores_to_dual_ports; /* number of TX cores allocated for each port pair */
3208    uint16_t    m_rx_core_tx_q_id; /* TX q used by rx core */
3209    // statistic
3210    CPPSMeasure  m_cps;
3211    float        m_expected_pps;
3212    float        m_expected_cps;
3213    float        m_expected_bps;//bps
3214    float        m_last_total_cps;
3215
3216    CPhyEthIF   m_ports[TREX_MAX_PORTS];
3217    CCoreEthIF          m_cores_vif_sf[BP_MAX_CORES]; /* counted from 1 , 2,3 core zero is reserved - stateful */
3218    CCoreEthIFStateless m_cores_vif_sl[BP_MAX_CORES]; /* counted from 1 , 2,3 core zero is reserved - stateless*/
3219    CCoreEthIF *        m_cores_vif[BP_MAX_CORES];
3220    CParserOption m_po ;
3221    CFlowGenList  m_fl;
3222    bool          m_fl_was_init;
3223    volatile uint8_t       m_signal[BP_MAX_CORES] __rte_cache_aligned ; // Signal to main core when DP thread finished
3224    volatile bool m_sl_rx_running; // Signal main core when RX thread finished
3225    CLatencyManager     m_mg; // statefull RX core
3226    CRxCoreStateless    m_rx_sl; // stateless RX core
3227    CTrexGlobalIoMode   m_io_modes;
3228    CTRexExtendedDriverBase * m_drv;
3229
3230private:
3231    CLatencyHWPort      m_latency_vports[TREX_MAX_PORTS];    /* read hardware driver */
3232    CLatencyVmPort      m_latency_vm_vports[TREX_MAX_PORTS]; /* vm driver */
3233    CLatencyPktInfo     m_latency_pkt;
3234    TrexPublisher       m_zmq_publisher;
3235    CGlobalStats        m_stats;
3236    uint32_t            m_stats_cnt;
3237    std::mutex          m_cp_lock;
3238
3239    TrexMonitor         m_monitor;
3240
3241    shutdown_rc_e       m_mark_for_shutdown;
3242
3243public:
3244    TrexStateless       *m_trex_stateless;
3245
3246};
3247
3248// Before starting, send gratuitous ARP on our addresses, and try to resolve dst MAC addresses.
3249void CGlobalTRex::pre_test() {
3250    CPretest pretest(m_max_ports);
3251    bool resolve_needed = false;
3252    uint8_t empty_mac[ETHER_ADDR_LEN] = {0,0,0,0,0,0};
3253    bool need_grat_arp[TREX_MAX_PORTS];
3254
3255    if (CGlobalInfo::m_options.preview.get_is_client_cfg_enable()) {
3256        std::vector<ClientCfgCompactEntry *> conf;
3257        m_fl.get_client_cfg_ip_list(conf);
3258
3259        // If we got src MAC for port in global config, take it, otherwise use src MAC from DPDK
3260        uint8_t port_macs[m_max_ports][ETHER_ADDR_LEN];
3261        for (int port_id = 0; port_id < m_max_ports; port_id++) {
3262            memcpy(port_macs[port_id], CGlobalInfo::m_options.m_mac_addr[port_id].u.m_mac.src, ETHER_ADDR_LEN);
3263        }
3264
3265        for (std::vector<ClientCfgCompactEntry *>::iterator it = conf.begin(); it != conf.end(); it++) {
3266            uint8_t port = (*it)->get_port();
3267            uint16_t vlan = (*it)->get_vlan();
3268            uint32_t count = (*it)->get_count();
3269            uint32_t dst_ip = (*it)->get_dst_ip();
3270            uint32_t src_ip = (*it)->get_src_ip();
3271
3272            for (int i = 0; i < count; i++) {
3273                //??? handle ipv6;
3274                if ((*it)->is_ipv4()) {
3275                    pretest.add_next_hop(port, dst_ip + i, vlan);
3276                }
3277            }
3278            if (!src_ip) {
3279                src_ip = CGlobalInfo::m_options.m_ip_cfg[port].get_ip();
3280                if (!src_ip) {
3281                    fprintf(stderr, "No matching src ip for port: %d ip:%s vlan: %d\n"
3282                            , port, ip_to_str(dst_ip).c_str(), vlan);
3283                    fprintf(stderr, "You must specify src_ip in client config file or in TRex config file\n");
3284                    exit(1);
3285                }
3286            }
3287            pretest.add_ip(port, src_ip, vlan, port_macs[port]);
3288            COneIPv4Info ipv4(src_ip, vlan, port_macs[port], port);
3289            m_mg.add_grat_arp_src(ipv4);
3290
3291            delete *it;
3292        }
3293        if ( CGlobalInfo::m_options.preview.getVMode() > 1) {
3294            fprintf(stdout, "*******Pretest for client cfg********\n");
3295            pretest.dump(stdout);
3296            }
3297    } else {
3298        for (int port_id = 0; port_id < m_max_ports; port_id++) {
3299            if (! memcmp( CGlobalInfo::m_options.m_mac_addr[port_id].u.m_mac.dest, empty_mac, ETHER_ADDR_LEN)) {
3300                resolve_needed = true;
3301            } else {
3302                resolve_needed = false;
3303            }
3304
3305            need_grat_arp[port_id] = CGlobalInfo::m_options.m_ip_cfg[port_id].get_ip() != 0;
3306
3307            pretest.add_ip(port_id, CGlobalInfo::m_options.m_ip_cfg[port_id].get_ip()
3308                           , CGlobalInfo::m_options.m_ip_cfg[port_id].get_vlan()
3309                           , CGlobalInfo::m_options.m_mac_addr[port_id].u.m_mac.src);
3310
3311            if (resolve_needed) {
3312                pretest.add_next_hop(port_id, CGlobalInfo::m_options.m_ip_cfg[port_id].get_def_gw()
3313                                     , CGlobalInfo::m_options.m_ip_cfg[port_id].get_vlan());
3314            }
3315        }
3316    }
3317
3318    for (int port_id = 0; port_id < m_max_ports; port_id++) {
3319        CPhyEthIF *pif = &m_ports[port_id];
3320        // Configure port to send all packets to software
3321        CTRexExtendedDriverDb::Ins()->get_drv()->set_rcv_all(pif, true);
3322    }
3323
3324
3325    pretest.send_grat_arp_all();
3326    bool ret;
3327    int count = 0;
3328    bool resolve_failed = false;
3329    do {
3330        ret = pretest.resolve_all();
3331        count++;
3332    } while ((ret != true) && (count < 10));
3333    if (ret != true) {
3334        resolve_failed = true;
3335    }
3336
3337    if ( CGlobalInfo::m_options.preview.getVMode() > 1) {
3338        fprintf(stdout, "*******Pretest after resolving ********\n");
3339        pretest.dump(stdout);
3340    }
3341
3342    if (CGlobalInfo::m_options.preview.get_is_client_cfg_enable()) {
3343        CManyIPInfo pretest_result;
3344        pretest.get_results(pretest_result);
3345        if (resolve_failed) {
3346            fprintf(stderr, "Resolution of following IPs failed. Exiting.\n");
3347            for (const COneIPInfo *ip=pretest_result.get_next(); ip != NULL;
3348                   ip = pretest_result.get_next()) {
3349                if (ip->resolve_needed()) {
3350                    ip->dump(stderr, "  ");
3351                }
3352            }
3353            exit(1);
3354        }
3355        m_fl.set_client_config_resolved_macs(pretest_result);
3356        if ( CGlobalInfo::m_options.preview.getVMode() > 1) {
3357            m_fl.dump_client_config(stdout);
3358        }
3359
3360        bool port_found[TREX_MAX_PORTS];
3361        for (int port_id = 0; port_id < m_max_ports; port_id++) {
3362            port_found[port_id] = false;
3363        }
3364        // If client config enabled, we don't resolve MACs from trex_cfg.yaml. For latency (-l)
3365        // We need to able to send packets from RX core, so need to configure MAC/vlan for each port.
3366        for (const COneIPInfo *ip=pretest_result.get_next(); ip != NULL; ip = pretest_result.get_next()) {
3367            // Use first MAC/vlan we see on each port
3368            uint8_t port_id = ip->get_port();
3369            uint16_t vlan = ip->get_vlan();
3370            if ( ! port_found[port_id]) {
3371                port_found[port_id] = true;
3372                ip->get_mac(CGlobalInfo::m_options.m_mac_addr[port_id].u.m_mac.dest);
3373                CGlobalInfo::m_options.m_ip_cfg[port_id].set_vlan(vlan);
3374            }
3375        }
3376    } else {
3377        uint8_t mac[ETHER_ADDR_LEN];
3378        for (int port_id = 0; port_id < m_max_ports; port_id++) {
3379            if (! memcmp(CGlobalInfo::m_options.m_mac_addr[port_id].u.m_mac.dest, empty_mac, ETHER_ADDR_LEN)) {
3380                // we don't have dest MAC. Get it from what we resolved.
3381                uint32_t ip = CGlobalInfo::m_options.m_ip_cfg[port_id].get_def_gw();
3382                uint16_t vlan = CGlobalInfo::m_options.m_ip_cfg[port_id].get_vlan();
3383
3384                if (!pretest.get_mac(port_id, ip, vlan, mac)) {
3385                    fprintf(stderr, "Failed resolving dest MAC for default gateway:%d.%d.%d.%d on port %d\n"
3386                            , (ip >> 24) & 0xFF, (ip >> 16) & 0xFF, (ip >> 8) & 0xFF, ip & 0xFF, port_id);
3387
3388                    if (get_is_stateless()) {
3389                        continue;
3390                    } else {
3391                        exit(1);
3392                    }
3393                }
3394
3395
3396
3397                memcpy(CGlobalInfo::m_options.m_mac_addr[port_id].u.m_mac.dest, mac, ETHER_ADDR_LEN);
3398                // if port is connected in loopback, no need to send gratuitous ARP. It will only confuse our ingress counters.
3399                if (need_grat_arp[port_id] && (! pretest.is_loopback(port_id))) {
3400                    COneIPv4Info ipv4(CGlobalInfo::m_options.m_ip_cfg[port_id].get_ip()
3401                                      , CGlobalInfo::m_options.m_ip_cfg[port_id].get_vlan()
3402                                      , CGlobalInfo::m_options.m_mac_addr[port_id].u.m_mac.src
3403                                      , port_id);
3404                    m_mg.add_grat_arp_src(ipv4);
3405                }
3406            }
3407
3408            // update statistics baseline, so we can ignore what happened in pre test phase
3409            CPhyEthIF *pif = &m_ports[port_id];
3410            CPreTestStats pre_stats = pretest.get_stats(port_id);
3411            pif->set_ignore_stats_base(pre_stats);
3412
3413            // Configure port back to normal mode. Only relevant packets handled by software.
3414            CTRexExtendedDriverDb::Ins()->get_drv()->set_rcv_all(pif, false);
3415
3416           }
3417        }
3418
3419    /* for stateless only - set port mode */
3420    if (get_is_stateless()) {
3421        for (int port_id = 0; port_id < m_max_ports; port_id++) {
3422            uint32_t src_ipv4 = CGlobalInfo::m_options.m_ip_cfg[port_id].get_ip();
3423            uint32_t dg = CGlobalInfo::m_options.m_ip_cfg[port_id].get_def_gw();
3424            const uint8_t *dst_mac = CGlobalInfo::m_options.m_mac_addr[port_id].u.m_mac.dest;
3425
3426            /* L3 mode */
3427            if (src_ipv4 && dg) {
3428                if (memcmp(dst_mac, empty_mac, 6) == 0) {
3429                    m_trex_stateless->get_port_by_id(port_id)->set_l3_mode(src_ipv4, dg);
3430                } else {
3431                    m_trex_stateless->get_port_by_id(port_id)->set_l3_mode(src_ipv4, dg, dst_mac);
3432                }
3433
3434            /* L2 mode */
3435            } else if (CGlobalInfo::m_options.m_mac_addr[port_id].u.m_mac.is_set) {
3436                m_trex_stateless->get_port_by_id(port_id)->set_l2_mode(dst_mac);
3437            }
3438        }
3439    }
3440
3441
3442}
3443
3444/**
3445 * check for a single core
3446 *
3447 * @author imarom (19-Nov-15)
3448 *
3449 * @param thread_id
3450 */
3451void
3452CGlobalTRex::check_for_dp_message_from_core(int thread_id) {
3453
3454    CNodeRing *ring = CMsgIns::Ins()->getCpDp()->getRingDpToCp(thread_id);
3455
3456    /* fast path check */
3457    if ( likely ( ring->isEmpty() ) ) {
3458        return;
3459    }
3460
3461    while ( true ) {
3462        CGenNode * node = NULL;
3463        if (ring->Dequeue(node) != 0) {
3464            break;
3465        }
3466        assert(node);
3467
3468        TrexStatelessDpToCpMsgBase * msg = (TrexStatelessDpToCpMsgBase *)node;
3469        msg->handle();
3470        delete msg;
3471    }
3472
3473}
3474
3475/**
3476 * check for messages that arrived from DP to CP
3477 *
3478 */
3479void
3480CGlobalTRex::check_for_dp_messages() {
3481
3482    /* for all the cores - check for a new message */
3483    for (int i = 0; i < get_cores_tx(); i++) {
3484        check_for_dp_message_from_core(i);
3485    }
3486}
3487
3488bool CGlobalTRex::is_all_links_are_up(bool dump){
3489    bool all_link_are=true;
3490    int i;
3491    for (i=0; i<m_max_ports; i++) {
3492        CPhyEthIF * _if=&m_ports[i];
3493        _if->get_port_attr()->update_link_status();
3494        if ( dump ){
3495            _if->dump_stats(stdout);
3496        }
3497        if ( _if->get_port_attr()->is_link_up() == false){
3498            all_link_are=false;
3499            break;
3500        }
3501    }
3502    return (all_link_are);
3503}
3504
3505void CGlobalTRex::try_stop_all_cores(){
3506
3507    TrexStatelessDpQuit * dp_msg= new TrexStatelessDpQuit();
3508    send_message_all_dp(dp_msg);
3509    delete dp_msg;
3510
3511    if (get_is_stateless()) {
3512        TrexStatelessRxQuit * rx_msg= new TrexStatelessRxQuit();
3513        send_message_to_rx(rx_msg);
3514    }
3515
3516    // no need to delete rx_msg. Deleted by receiver
3517    bool all_core_finished = false;
3518    int i;
3519    for (i=0; i<20; i++) {
3520        if ( is_all_cores_finished() ){
3521            all_core_finished =true;
3522            break;
3523        }
3524        delay(100);
3525    }
3526    if ( all_core_finished ){
3527        m_zmq_publisher.publish_event(TrexPublisher::EVENT_SERVER_STOPPED);
3528        printf(" All cores stopped !! \n");
3529    }else{
3530        printf(" ERROR one of the DP core is stucked !\n");
3531    }
3532}
3533
3534
3535int  CGlobalTRex::send_message_all_dp(TrexStatelessCpToDpMsgBase *msg){
3536
3537    int max_threads=(int)CMsgIns::Ins()->getCpDp()->get_num_threads();
3538    int i;
3539
3540    for (i=0; i<max_threads; i++) {
3541        CNodeRing *ring = CMsgIns::Ins()->getCpDp()->getRingCpToDp((uint8_t)i);
3542        ring->Enqueue((CGenNode*)msg->clone());
3543    }
3544    return (0);
3545}
3546
3547int CGlobalTRex::send_message_to_rx(TrexStatelessCpToRxMsgBase *msg) {
3548    CNodeRing *ring = CMsgIns::Ins()->getCpRx()->getRingCpToDp(0);
3549    ring->Enqueue((CGenNode *) msg);
3550
3551    return (0);
3552}
3553
3554
3555int  CGlobalTRex::ixgbe_rx_queue_flush(){
3556    int i;
3557    for (i=0; i<m_max_ports; i++) {
3558        CPhyEthIF * _if=&m_ports[i];
3559        _if->flush_rx_queue();
3560    }
3561    return (0);
3562}
3563
3564
3565// init stateful rx core
3566void CGlobalTRex::ixgbe_configure_mg(void) {
3567    int i;
3568    CLatencyManagerCfg mg_cfg;
3569    mg_cfg.m_max_ports = m_max_ports;
3570
3571    uint32_t latency_rate=CGlobalInfo::m_options.m_latency_rate;
3572
3573    if ( latency_rate ) {
3574        mg_cfg.m_cps = (double)latency_rate ;
3575    } else {
3576        // If RX core needed, we need something to make the scheduler running.
3577        // If nothing configured, send 1 CPS latency measurement packets.
3578        if (CGlobalInfo::m_options.m_arp_ref_per == 0) {
3579            mg_cfg.m_cps = 1.0;
3580        } else {
3581            mg_cfg.m_cps = 0;
3582        }
3583    }
3584
3585    if ( get_vm_one_queue_enable() ) {
3586        /* vm mode, indirect queues  */
3587        for (i=0; i<m_max_ports; i++) {
3588            CPhyEthIF * _if = &m_ports[i];
3589            CMessagingManager * rx_dp=CMsgIns::Ins()->getRxDp();
3590
3591            uint8_t thread_id = (i>>1);
3592
3593            CNodeRing * r = rx_dp->getRingCpToDp(thread_id);
3594            m_latency_vm_vports[i].Create((uint8_t)i, r, &m_mg, _if);
3595
3596            mg_cfg.m_ports[i] =&m_latency_vm_vports[i];
3597        }
3598
3599    }else{
3600        for (i=0; i<m_max_ports; i++) {
3601            CPhyEthIF * _if=&m_ports[i];
3602            _if->dump_stats(stdout);
3603            m_latency_vports[i].Create(_if, m_rx_core_tx_q_id, 1);
3604
3605            mg_cfg.m_ports[i] =&m_latency_vports[i];
3606        }
3607    }
3608
3609
3610    m_mg.Create(&mg_cfg);
3611    m_mg.set_mask(CGlobalInfo::m_options.m_latency_mask);
3612}
3613
3614// init m_rx_sl object for stateless rx core
3615void CGlobalTRex::rx_sl_configure(void) {
3616    CRxSlCfg rx_sl_cfg;
3617    int i;
3618
3619    rx_sl_cfg.m_max_ports = m_max_ports;
3620    rx_sl_cfg.m_num_crc_fix_bytes = get_ex_drv()->get_num_crc_fix_bytes();
3621
3622    if ( get_vm_one_queue_enable() ) {
3623        /* vm mode, indirect queues  */
3624        for (i=0; i < m_max_ports; i++) {
3625            CPhyEthIF * _if = &m_ports[i];
3626            CMessagingManager * rx_dp = CMsgIns::Ins()->getRxDp();
3627            uint8_t thread_id = (i >> 1);
3628            CNodeRing * r = rx_dp->getRingCpToDp(thread_id);
3629            m_latency_vm_vports[i].Create(i, r, &m_mg, _if);
3630            rx_sl_cfg.m_ports[i] = &m_latency_vm_vports[i];
3631        }
3632    } else {
3633        for (i = 0; i < m_max_ports; i++) {
3634            CPhyEthIF * _if = &m_ports[i];
3635            m_latency_vports[i].Create(_if, m_rx_core_tx_q_id, 1);
3636            rx_sl_cfg.m_ports[i] = &m_latency_vports[i];
3637        }
3638    }
3639
3640    m_rx_sl.create(rx_sl_cfg);
3641}
3642
3643int  CGlobalTRex::ixgbe_start(void){
3644    int i;
3645    for (i=0; i<m_max_ports; i++) {
3646        socket_id_t socket_id = CGlobalInfo::m_socket.port_to_socket((port_id_t)i);
3647        assert(CGlobalInfo::m_mem_pool[socket_id].m_mbuf_pool_2048);
3648        CPhyEthIF * _if=&m_ports[i];
3649        _if->Create((uint8_t)i);
3650        uint16_t rx_rss = get_ex_drv()->enable_rss_drop_workaround();
3651
3652        if ( get_vm_one_queue_enable() ) {
3653            /* VMXNET3 does claim to support 16K but somehow does not work */
3654            /* reduce to 2000 */
3655            m_port_cfg.m_port_conf.rxmode.max_rx_pkt_len = 2000;
3656            /* In VM case, there is one tx q and one rx q */
3657            _if->configure(1, 1, &m_port_cfg.m_port_conf);
3658            // Only 1 rx queue, so use it for everything
3659            m_rx_core_tx_q_id = 0;
3660            _if->set_rx_queue(0);
3661            _if->rx_queue_setup(0, RTE_TEST_RX_DESC_VM_DEFAULT, socket_id, &m_port_cfg.m_rx_conf,
3662                                CGlobalInfo::m_mem_pool[socket_id].m_mbuf_pool_2048);
3663            // 1 TX queue in VM case
3664            _if->tx_queue_setup(0, RTE_TEST_TX_DESC_VM_DEFAULT, socket_id, &m_port_cfg.m_tx_conf);
3665        } else {
3666            // 2 rx queues.
3667            // TX queues: 1 for each core handling the port pair + 1 for latency pkts + 1 for use by RX core
3668
3669            uint16_t rx_queues;
3670
3671            if (rx_rss==0) {
3672                rx_queues=2;
3673            }else{
3674                rx_queues=rx_rss;
3675            }
3676
3677            _if->configure(rx_queues, m_cores_to_dual_ports + 2, &m_port_cfg.m_port_conf);
3678            m_rx_core_tx_q_id = m_cores_to_dual_ports;
3679
3680            if ( rx_rss ) {
3681                int j=0;
3682                for (j=0;j<rx_rss; j++) {
3683                        if (j==MAIN_DPDK_RX_Q){
3684                            continue;
3685                        }
3686                        /* drop queue */
3687                        _if->rx_queue_setup(j,
3688                                        RTE_TEST_RX_DESC_DEFAULT_MLX,
3689                                        socket_id,
3690                                        &m_port_cfg.m_rx_conf,
3691                                        CGlobalInfo::m_mem_pool[socket_id].m_mbuf_pool_2048);
3692
3693
3694                }
3695            }else{
3696                 // setup RX drop queue
3697                _if->rx_queue_setup(MAIN_DPDK_DATA_Q,
3698                                    RTE_TEST_RX_DESC_DEFAULT,
3699                                    socket_id,
3700                                    &m_port_cfg.m_rx_conf,
3701                                    CGlobalInfo::m_mem_pool[socket_id].m_mbuf_pool_2048);
3702                // setup RX filter queue
3703                _if->set_rx_queue(MAIN_DPDK_RX_Q);
3704            }
3705
3706            _if->rx_queue_setup(MAIN_DPDK_RX_Q,
3707                                RTE_TEST_RX_LATENCY_DESC_DEFAULT,
3708                                socket_id,
3709                                &m_port_cfg.m_rx_conf,
3710                                CGlobalInfo::m_mem_pool[socket_id].m_mbuf_pool_9k);
3711
3712            for (int qid = 0; qid < m_max_queues_per_port; qid++) {
3713                _if->tx_queue_setup((uint16_t)qid,
3714                                    RTE_TEST_TX_DESC_DEFAULT ,
3715                                    socket_id,
3716                                    &m_port_cfg.m_tx_conf);
3717            }
3718        }
3719
3720        if ( rx_rss ){
3721            _if->configure_rss_redirect_table(rx_rss,MAIN_DPDK_RX_Q);
3722        }
3723
3724        _if->stats_clear();
3725        _if->start();
3726        _if->configure_rx_duplicate_rules();
3727
3728        if ( ! get_vm_one_queue_enable()  && ! CGlobalInfo::m_options.preview.get_is_disable_flow_control_setting()
3729             && _if->get_port_attr()->is_fc_change_supported()) {
3730            _if->disable_flow_control();
3731        }
3732
3733        _if->get_port_attr()->add_mac((char *)CGlobalInfo::m_options.get_src_mac_addr(i));
3734
3735        fflush(stdout);
3736    }
3737
3738    if ( !is_all_links_are_up()  ){
3739        /* wait for ports to be stable */
3740        get_ex_drv()->wait_for_stable_link();
3741
3742        if ( !is_all_links_are_up(true) /*&& !get_is_stateless()*/ ){ // disable start with link down for now
3743
3744            /* temporary solution for trex-192 issue, solve the case for X710/XL710, will work for both Statless and Stateful */
3745            if (  get_ex_drv()->drop_packets_incase_of_linkdown() ){
3746                printf(" WARNING : there is no link on one of the ports, driver support auto drop in case of link down - continue\n");
3747            }else{
3748                dump_links_status(stdout);
3749                rte_exit(EXIT_FAILURE, " One of the links is down \n");
3750            }
3751        }
3752    } else {
3753        get_ex_drv()->wait_after_link_up();
3754    }
3755
3756    dump_links_status(stdout);
3757
3758    ixgbe_rx_queue_flush();
3759
3760    if (! get_is_stateless()) {
3761        ixgbe_configure_mg();
3762    }
3763
3764
3765    /* core 0 - control
3766       core 1 - port 0-0,1-0,
3767       core 2 - port 2-0,3-0,
3768       core 3 - port 0-1,1-1,
3769       core 4 - port 2-1,3-1,
3770
3771    */
3772    int port_offset=0;
3773    uint8_t lat_q_id;
3774
3775    if ( get_vm_one_queue_enable() ) {
3776        lat_q_id = 0;
3777    } else {
3778        lat_q_id = get_cores_tx() / get_base_num_cores() + 1;
3779    }
3780    for (i=0; i<get_cores_tx(); i++) {
3781        int j=(i+1);
3782        int queue_id=((j-1)/get_base_num_cores() );   /* for the first min core queue 0 , then queue 1 etc */
3783        if ( get_is_stateless() ){
3784            m_cores_vif[j]=&m_cores_vif_sl[j];
3785        }else{
3786            m_cores_vif[j]=&m_cores_vif_sf[j];
3787        }
3788        m_cores_vif[j]->Create(j,
3789                               queue_id,
3790                               &m_ports[port_offset], /* 0,2*/
3791                               queue_id,
3792                               &m_ports[port_offset+1], /*1,3*/
3793                               lat_q_id);
3794        port_offset+=2;
3795        if (port_offset == m_max_ports) {
3796            port_offset = 0;
3797            // We want to allow sending latency packets only from first core handling a port
3798            lat_q_id = CCoreEthIF::INVALID_Q_ID;
3799        }
3800    }
3801
3802    fprintf(stdout," -------------------------------\n");
3803    fprintf(stdout, "RX core uses TX queue number %d on all ports\n", m_rx_core_tx_q_id);
3804    CCoreEthIF::DumpIfCfgHeader(stdout);
3805    for (i=0; i<get_cores_tx(); i++) {
3806        m_cores_vif[i+1]->DumpIfCfg(stdout);
3807    }
3808    fprintf(stdout," -------------------------------\n");
3809
3810    return (0);
3811}
3812
3813static void trex_termination_handler(int signum);
3814
3815void CGlobalTRex::register_signals() {
3816    struct sigaction action;
3817
3818    /* handler */
3819    action.sa_handler = trex_termination_handler;
3820
3821    /* blocked signals during handling */
3822    sigemptyset(&action.sa_mask);
3823    sigaddset(&action.sa_mask, SIGINT);
3824    sigaddset(&action.sa_mask, SIGTERM);
3825
3826    /* no flags */
3827    action.sa_flags = 0;
3828
3829    /* register */
3830    sigaction(SIGINT,  &action, NULL);
3831    sigaction(SIGTERM, &action, NULL);
3832}
3833
3834bool CGlobalTRex::Create(){
3835    CFlowsYamlInfo     pre_yaml_info;
3836
3837    register_signals();
3838
3839    m_stats_cnt =0;
3840    if (!get_is_stateless()) {
3841        pre_yaml_info.load_from_yaml_file(CGlobalInfo::m_options.cfg_file);
3842        if ( CGlobalInfo::m_options.preview.getVMode() > 0){
3843            CGlobalInfo::m_options.dump(stdout);
3844            CGlobalInfo::m_memory_cfg.Dump(stdout);
3845        }
3846    }
3847
3848    if ( !m_zmq_publisher.Create( CGlobalInfo::m_options.m_zmq_port,
3849                                  !CGlobalInfo::m_options.preview.get_zmq_publish_enable() ) ){
3850        return (false);
3851    }
3852
3853    if ( pre_yaml_info.m_vlan_info.m_enable ){
3854        CGlobalInfo::m_options.preview.set_vlan_mode_enable(true);
3855    }
3856    /* End update pre flags */
3857
3858    ixgbe_prob_init();
3859    cores_prob_init();
3860    queues_prob_init();
3861
3862    /* allocate rings */
3863    assert( CMsgIns::Ins()->Create(get_cores_tx()) );
3864
3865    if ( sizeof(CGenNodeNatInfo) != sizeof(CGenNode)  ) {
3866        printf("ERROR sizeof(CGenNodeNatInfo) %lu != sizeof(CGenNode) %lu must be the same size \n",sizeof(CGenNodeNatInfo),sizeof(CGenNode));
3867        assert(0);
3868    }
3869
3870    if ( sizeof(CGenNodeLatencyPktInfo) != sizeof(CGenNode)  ) {
3871        printf("ERROR sizeof(CGenNodeLatencyPktInfo) %lu != sizeof(CGenNode) %lu must be the same size \n",sizeof(CGenNodeLatencyPktInfo),sizeof(CGenNode));
3872        assert(0);
3873    }
3874
3875    /* allocate the memory */
3876
3877    uint32_t rx_mbuf = 0 ;
3878
3879    if ( get_vm_one_queue_enable() ) {
3880        rx_mbuf = (m_max_ports * RTE_TEST_RX_DESC_VM_DEFAULT);
3881    }else{
3882        rx_mbuf = (m_max_ports * (RTE_TEST_RX_LATENCY_DESC_DEFAULT+RTE_TEST_RX_DESC_DEFAULT));
3883    }
3884
3885    CGlobalInfo::init_pools(rx_mbuf);
3886    ixgbe_start();
3887    dump_config(stdout);
3888
3889    /* start stateless */
3890    if (get_is_stateless()) {
3891
3892        TrexStatelessCfg cfg;
3893
3894        TrexRpcServerConfig rpc_req_resp_cfg(TrexRpcServerConfig::RPC_PROT_TCP,
3895                                             global_platform_cfg_info.m_zmq_rpc_port,
3896                                             &m_cp_lock);
3897
3898        cfg.m_port_count         = CGlobalInfo::m_options.m_expected_portd;
3899        cfg.m_rpc_req_resp_cfg   = &rpc_req_resp_cfg;
3900        cfg.m_rpc_server_verbose = false;
3901        cfg.m_platform_api       = new TrexDpdkPlatformApi();
3902        cfg.m_publisher          = &m_zmq_publisher;
3903
3904        m_trex_stateless = new TrexStateless(cfg);
3905
3906        rx_sl_configure();
3907    }
3908
3909    return (true);
3910
3911}
3912void CGlobalTRex::Delete(){
3913
3914    m_zmq_publisher.Delete();
3915    m_fl.Delete();
3916
3917    if (m_trex_stateless) {
3918        delete m_trex_stateless;
3919        m_trex_stateless = NULL;
3920    }
3921}
3922
3923
3924
3925int  CGlobalTRex::ixgbe_prob_init(void){
3926
3927    m_max_ports  = rte_eth_dev_count();
3928    if (m_max_ports == 0)
3929        rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n");
3930
3931    printf(" Number of ports found: %d \n",m_max_ports);
3932
3933    if ( m_max_ports %2 !=0 ) {
3934        rte_exit(EXIT_FAILURE, " Number of ports %d should be even, mask the one port in the configuration file  \n, ",
3935                 m_max_ports);
3936    }
3937
3938    if ( CGlobalInfo::m_options.get_expected_ports() > TREX_MAX_PORTS ) {
3939        rte_exit(EXIT_FAILURE, " Maximum ports supported are %d, use the configuration file to set the expected number of ports   \n",TREX_MAX_PORTS);
3940    }
3941
3942    if ( CGlobalInfo::m_options.get_expected_ports() > m_max_ports ){
3943        rte_exit(EXIT_FAILURE, " There are %d ports you expected more %d,use the configuration file to set the expected number of ports   \n",
3944                 m_max_ports,
3945                 CGlobalInfo::m_options.get_expected_ports());
3946    }
3947    if (CGlobalInfo::m_options.get_expected_ports() < m_max_ports ) {
3948        /* limit the number of ports */
3949        m_max_ports=CGlobalInfo::m_options.get_expected_ports();
3950    }
3951    assert(m_max_ports <= TREX_MAX_PORTS);
3952
3953    struct rte_eth_dev_info dev_info;
3954    rte_eth_dev_info_get((uint8_t) 0,&dev_info);
3955
3956    if ( CGlobalInfo::m_options.preview.getVMode() > 0){
3957        printf("\n\n");
3958        printf("if_index : %d \n",dev_info.if_index);
3959        printf("driver name : %s \n",dev_info.driver_name);
3960        printf("min_rx_bufsize : %d \n",dev_info.min_rx_bufsize);
3961        printf("max_rx_pktlen  : %d \n",dev_info.max_rx_pktlen);
3962        printf("max_rx_queues  : %d \n",dev_info.max_rx_queues);
3963        printf("max_tx_queues  : %d \n",dev_info.max_tx_queues);
3964        printf("max_mac_addrs  : %d \n",dev_info.max_mac_addrs);
3965
3966        printf("rx_offload_capa : %x \n",dev_info.rx_offload_capa);
3967        printf("tx_offload_capa : %x \n",dev_info.tx_offload_capa);
3968    }
3969
3970
3971
3972    if ( !CTRexExtendedDriverDb::Ins()->is_driver_exists(dev_info.driver_name) ){
3973        printf(" Error: driver %s is not supported. Please consult the documentation for a list of supported drivers\n"
3974               ,dev_info.driver_name);
3975        exit(1);
3976    }
3977
3978    int i;
3979    struct rte_eth_dev_info dev_info1;
3980
3981    for (i=1; i<m_max_ports; i++) {
3982        rte_eth_dev_info_get((uint8_t) i,&dev_info1);
3983        if ( strcmp(dev_info1.driver_name,dev_info.driver_name)!=0) {
3984            printf(" ERROR all device should have the same type  %s != %s \n",dev_info1.driver_name,dev_info.driver_name);
3985            exit(1);
3986        }
3987    }
3988
3989    CTRexExtendedDriverDb::Ins()->set_driver_name(dev_info.driver_name);
3990    m_drv = CTRexExtendedDriverDb::Ins()->get_drv();
3991
3992    // check if firmware version is new enough
3993    for (i = 0; i < m_max_ports; i++) {
3994        if (m_drv->verify_fw_ver(i) < 0) {
3995            // error message printed by verify_fw_ver
3996            exit(1);
3997        }
3998    }
3999
4000    m_port_cfg.update_var();
4001
4002    if ( get_is_rx_filter_enable() ){
4003        m_port_cfg.update_global_config_fdir();
4004    }
4005
4006    if ( get_vm_one_queue_enable() ) {
4007        /* verify that we have only one thread/core per dual- interface */
4008        if ( CGlobalInfo::m_options.preview.getCores()>1 ) {
4009            printf(" ERROR the number of cores should be 1 when the driver support only one tx queue and one rx queue \n");
4010            exit(1);
4011        }
4012    }
4013    return (0);
4014}
4015
4016int  CGlobalTRex::cores_prob_init(){
4017    m_max_cores = rte_lcore_count();
4018    assert(m_max_cores>0);
4019    return (0);
4020}
4021
4022int  CGlobalTRex::queues_prob_init(){
4023
4024    if (m_max_cores < 2) {
4025        rte_exit(EXIT_FAILURE, "number of cores should be at least 2 \n");
4026    }
4027
4028    assert((m_max_ports>>1) <= get_cores_tx() );
4029
4030    m_cores_mul = CGlobalInfo::m_options.preview.getCores();
4031
4032    m_cores_to_dual_ports  = m_cores_mul;
4033
4034    /* core 0 - control
4035       -core 1 - port 0/1
4036       -core 2 - port 2/3
4037       -core 3 - port 0/1
4038       -core 4 - port 2/3
4039
4040       m_cores_to_dual_ports = 2;
4041    */
4042
4043    // One q for each core allowed to send on this port + 1 for latency q (Used in stateless) + 1 for RX core.
4044    m_max_queues_per_port  = m_cores_to_dual_ports + 2;
4045
4046    if (m_max_queues_per_port > BP_MAX_TX_QUEUE) {
4047        rte_exit(EXIT_FAILURE,
4048                 "Error: Number of TX queues exceeds %d. Try running with lower -c <val> \n",BP_MAX_TX_QUEUE);
4049    }
4050
4051    assert(m_max_queues_per_port>0);
4052    return (0);
4053}
4054
4055
4056void CGlobalTRex::dump_config(FILE *fd){
4057    fprintf(fd," number of ports         : %u \n",m_max_ports);
4058    fprintf(fd," max cores for 2 ports   : %u \n",m_cores_to_dual_ports);
4059    fprintf(fd," max queue per port      : %u \n",m_max_queues_per_port);
4060}
4061
4062
4063void CGlobalTRex::dump_links_status(FILE *fd){
4064    for (int i=0; i<m_max_ports; i++) {
4065        m_ports[i].get_port_attr()->update_link_status_nowait();
4066        m_ports[i].get_port_attr()->dump_link(fd);
4067    }
4068}
4069
4070bool CGlobalTRex::lookup_port_by_mac(const uint8_t *mac, uint8_t &port_id) {
4071    for (int i = 0; i < m_max_ports; i++) {
4072        if (memcmp(m_ports[i].get_port_attr()->get_layer_cfg().get_ether().get_src(), mac, 6) == 0) {
4073            port_id = i;
4074            return true;
4075        }
4076    }
4077
4078    return false;
4079}
4080
4081void CGlobalTRex::dump_post_test_stats(FILE *fd){
4082    uint64_t pkt_out=0;
4083    uint64_t pkt_out_bytes=0;
4084    uint64_t pkt_in_bytes=0;
4085    uint64_t pkt_in=0;
4086    uint64_t sw_pkt_out=0;
4087    uint64_t sw_pkt_out_err=0;
4088    uint64_t sw_pkt_out_bytes=0;
4089    uint64_t tx_arp = 0;
4090    uint64_t rx_arp = 0;
4091
4092    int i;
4093    for (i=0; i<get_cores_tx(); i++) {
4094        CCoreEthIF * erf_vif = m_cores_vif[i+1];
4095        CVirtualIFPerSideStats stats;
4096        erf_vif->GetCoreCounters(&stats);
4097        sw_pkt_out     += stats.m_tx_pkt;
4098        sw_pkt_out_err += stats.m_tx_drop +stats.m_tx_queue_full +stats.m_tx_alloc_error ;
4099        sw_pkt_out_bytes +=stats.m_tx_bytes;
4100    }
4101
4102
4103    for (i=0; i<m_max_ports; i++) {
4104        CPhyEthIF * _if=&m_ports[i];
4105        pkt_in  +=_if->get_stats().ipackets;
4106        pkt_in_bytes +=_if->get_stats().ibytes;
4107        pkt_out +=_if->get_stats().opackets;
4108        pkt_out_bytes +=_if->get_stats().obytes;
4109        tx_arp += _if->get_ignore_stats().get_tx_arp();
4110        rx_arp += _if->get_ignore_stats().get_rx_arp();
4111    }
4112    if ( CGlobalInfo::m_options.is_latency_enabled() ){
4113        sw_pkt_out += m_mg.get_total_pkt();
4114        sw_pkt_out_bytes +=m_mg.get_total_bytes();
4115    }
4116
4117
4118    fprintf (fd," summary stats \n");
4119    fprintf (fd," -------------- \n");
4120
4121    if (pkt_in > pkt_out)
4122        {
4123            fprintf (fd, " Total-pkt-drop       : 0 pkts \n");
4124            if (pkt_in > pkt_out * 1.01)
4125                fprintf (fd, " Warning : number of rx packets exceeds 101%% of tx packets!\n");
4126        }
4127    else
4128        fprintf (fd, " Total-pkt-drop       : %llu pkts \n", (unsigned long long) (pkt_out - pkt_in));
4129    for (i=0; i<m_max_ports; i++) {
4130        if ( m_stats.m_port[i].m_link_was_down ) {
4131            fprintf (fd, " WARNING: Link was down at port %d during test (at least for some time)!\n", i);
4132        }
4133    }
4134    fprintf (fd," Total-tx-bytes       : %llu bytes \n", (unsigned long long)pkt_out_bytes);
4135    fprintf (fd," Total-tx-sw-bytes    : %llu bytes \n", (unsigned long long)sw_pkt_out_bytes);
4136    fprintf (fd," Total-rx-bytes       : %llu byte \n", (unsigned long long)pkt_in_bytes);
4137
4138    fprintf (fd," \n");
4139
4140    fprintf (fd," Total-tx-pkt         : %llu pkts \n", (unsigned long long)pkt_out);
4141    fprintf (fd," Total-rx-pkt         : %llu pkts \n", (unsigned long long)pkt_in);
4142    fprintf (fd," Total-sw-tx-pkt      : %llu pkts \n", (unsigned long long)sw_pkt_out);
4143    fprintf (fd," Total-sw-err         : %llu pkts \n", (unsigned long long)sw_pkt_out_err);
4144    fprintf (fd," Total ARP sent       : %llu pkts \n", (unsigned long long)tx_arp);
4145    fprintf (fd," Total ARP received   : %llu pkts \n", (unsigned long long)rx_arp);
4146
4147
4148    if ( CGlobalInfo::m_options.is_latency_enabled() ){
4149        fprintf (fd," maximum-latency   : %.0f usec \n",m_mg.get_max_latency());
4150        fprintf (fd," average-latency   : %.0f usec \n",m_mg.get_avr_latency());
4151        fprintf (fd," latency-any-error : %s  \n",m_mg.is_any_error()?"ERROR":"OK");
4152    }
4153
4154
4155}
4156
4157
4158void CGlobalTRex::update_stats(){
4159
4160    int i;
4161    for (i=0; i<m_max_ports; i++) {
4162        CPhyEthIF * _if=&m_ports[i];
4163        _if->update_counters();
4164    }
4165    uint64_t total_open_flows=0;
4166
4167
4168    CFlowGenListPerThread   * lpt;
4169    for (i=0; i<get_cores_tx(); i++) {
4170        lpt = m_fl.m_threads_info[i];
4171        total_open_flows +=   lpt->m_stats.m_total_open_flows ;
4172    }
4173    m_last_total_cps = m_cps.add(total_open_flows);
4174
4175}
4176
4177tx_per_flow_t CGlobalTRex::get_flow_tx_stats(uint8_t port, uint16_t index) {
4178    return m_stats.m_port[port].m_tx_per_flow[index] - m_stats.m_port[port].m_prev_tx_per_flow[index];
4179}
4180
4181// read stats. Return read value, and clear.
4182tx_per_flow_t CGlobalTRex::clear_flow_tx_stats(uint8_t port, uint16_t index, bool is_lat) {
4183    uint8_t port0;
4184    CFlowGenListPerThread * lpt;
4185    tx_per_flow_t ret;
4186
4187    m_stats.m_port[port].m_tx_per_flow[index].clear();
4188
4189    for (int i=0; i < get_cores_tx(); i++) {
4190        lpt = m_fl.m_threads_info[i];
4191        port0 = lpt->getDualPortId() * 2;
4192        if ((port == port0) || (port == port0 + 1)) {
4193            m_stats.m_port[port].m_tx_per_flow[index] +=
4194                lpt->m_node_gen.m_v_if->m_stats[port - port0].m_tx_per_flow[index];
4195            if (is_lat)
4196                lpt->m_node_gen.m_v_if->m_stats[port - port0].m_lat_data[index - MAX_FLOW_STATS].reset();
4197        }
4198    }
4199
4200    ret = m_stats.m_port[port].m_tx_per_flow[index] - m_stats.m_port[port].m_prev_tx_per_flow[index];
4201
4202    // Since we return diff from prev, following "clears" the stats.
4203    m_stats.m_port[port].m_prev_tx_per_flow[index] = m_stats.m_port[port].m_tx_per_flow[index];
4204
4205    return ret;
4206}
4207
4208void CGlobalTRex::get_stats(CGlobalStats & stats){
4209
4210    int i;
4211    float total_tx=0.0;
4212    float total_rx=0.0;
4213    float total_tx_pps=0.0;
4214    float total_rx_pps=0.0;
4215
4216    stats.m_total_tx_pkts  = 0;
4217    stats.m_total_rx_pkts  = 0;
4218    stats.m_total_tx_bytes = 0;
4219    stats.m_total_rx_bytes = 0;
4220    stats.m_total_alloc_error=0;
4221    stats.m_total_queue_full=0;
4222    stats.m_total_queue_drop=0;
4223
4224
4225    stats.m_num_of_ports = m_max_ports;
4226    stats.m_cpu_util = m_fl.GetCpuUtil();
4227    stats.m_cpu_util_raw = m_fl.GetCpuUtilRaw();
4228    if (get_is_stateless()) {
4229        stats.m_rx_cpu_util = m_rx_sl.get_cpu_util();
4230    }
4231    stats.m_threads      = m_fl.m_threads_info.size();
4232
4233    for (i=0; i<m_max_ports; i++) {
4234        CPhyEthIF * _if=&m_ports[i];
4235        CPerPortStats * stp=&stats.m_port[i];
4236
4237        CPhyEthIFStats & st =_if->get_stats();
4238
4239        stp->opackets = st.opackets;
4240        stp->obytes   = st.obytes;
4241        stp->ipackets = st.ipackets;
4242        stp->ibytes   = st.ibytes;
4243        stp->ierrors  = st.ierrors;
4244        stp->oerrors  = st.oerrors;
4245        stp->m_total_tx_bps = _if->get_last_tx_rate()*_1Mb_DOUBLE;
4246        stp->m_total_tx_pps = _if->get_last_tx_pps_rate();
4247        stp->m_total_rx_bps = _if->get_last_rx_rate()*_1Mb_DOUBLE;
4248        stp->m_total_rx_pps = _if->get_last_rx_pps_rate();
4249        stp->m_link_up        = _if->get_port_attr()->is_link_up();
4250        stp->m_link_was_down |= ! _if->get_port_attr()->is_link_up();
4251
4252        stats.m_total_tx_pkts  += st.opackets;
4253        stats.m_total_rx_pkts  += st.ipackets;
4254        stats.m_total_tx_bytes += st.obytes;
4255        stats.m_total_rx_bytes += st.ibytes;
4256
4257        total_tx +=_if->get_last_tx_rate();
4258        total_rx +=_if->get_last_rx_rate();
4259        total_tx_pps +=_if->get_last_tx_pps_rate();
4260        total_rx_pps +=_if->get_last_rx_pps_rate();
4261        // IP ID rules
4262        for (uint16_t flow = 0; flow <= max_stat_hw_id_seen; flow++) {
4263            stats.m_port[i].m_tx_per_flow[flow].clear();
4264        }
4265        // payload rules
4266        for (uint16_t flow = MAX_FLOW_STATS; flow <= MAX_FLOW_STATS + max_stat_hw_id_seen_payload; flow++) {
4267            stats.m_port[i].m_tx_per_flow[flow].clear();
4268        }
4269
4270        stp->m_cpu_util = get_cpu_util_per_interface(i);
4271
4272    }
4273
4274    uint64_t total_open_flows=0;
4275    uint64_t total_active_flows=0;
4276
4277    uint64_t total_clients=0;
4278    uint64_t total_servers=0;
4279    uint64_t active_sockets=0;
4280    uint64_t total_sockets=0;
4281
4282
4283    uint64_t total_nat_time_out =0;
4284    uint64_t total_nat_time_out_wait_ack =0;
4285    uint64_t total_nat_no_fid   =0;
4286    uint64_t total_nat_active   =0;
4287    uint64_t total_nat_syn_wait = 0;
4288    uint64_t total_nat_open     =0;
4289    uint64_t total_nat_learn_error=0;
4290
4291    CFlowGenListPerThread   * lpt;
4292    stats.m_template.Clear();
4293    for (i=0; i<get_cores_tx(); i++) {
4294        lpt = m_fl.m_threads_info[i];
4295        total_open_flows +=   lpt->m_stats.m_total_open_flows ;
4296        total_active_flows += (lpt->m_stats.m_total_open_flows-lpt->m_stats.m_total_close_flows) ;
4297
4298        stats.m_total_alloc_error += lpt->m_node_gen.m_v_if->m_stats[0].m_tx_alloc_error+
4299            lpt->m_node_gen.m_v_if->m_stats[1].m_tx_alloc_error;
4300        stats.m_total_queue_full +=lpt->m_node_gen.m_v_if->m_stats[0].m_tx_queue_full+
4301            lpt->m_node_gen.m_v_if->m_stats[1].m_tx_queue_full;
4302
4303        stats.m_total_queue_drop +=lpt->m_node_gen.m_v_if->m_stats[0].m_tx_drop+
4304            lpt->m_node_gen.m_v_if->m_stats[1].m_tx_drop;
4305
4306        stats.m_template.Add(&lpt->m_node_gen.m_v_if->m_stats[0].m_template);
4307        stats.m_template.Add(&lpt->m_node_gen.m_v_if->m_stats[1].m_template);
4308
4309
4310        total_clients   += lpt->m_smart_gen.getTotalClients();
4311        total_servers   += lpt->m_smart_gen.getTotalServers();
4312        active_sockets  += lpt->m_smart_gen.ActiveSockets();
4313        total_sockets   += lpt->m_smart_gen.MaxSockets();
4314
4315        total_nat_time_out +=lpt->m_stats.m_nat_flow_timeout;
4316        total_nat_time_out_wait_ack += lpt->m_stats.m_nat_flow_timeout_wait_ack;
4317        total_nat_no_fid   +=lpt->m_stats.m_nat_lookup_no_flow_id ;
4318        total_nat_active   +=lpt->m_stats.m_nat_lookup_add_flow_id - lpt->m_stats.m_nat_lookup_remove_flow_id;
4319        total_nat_syn_wait += lpt->m_stats.m_nat_lookup_add_flow_id - lpt->m_stats.m_nat_lookup_wait_ack_state;
4320        total_nat_open     +=lpt->m_stats.m_nat_lookup_add_flow_id;
4321        total_nat_learn_error   +=lpt->m_stats.m_nat_flow_learn_error;
4322        uint8_t port0 = lpt->getDualPortId() *2;
4323        // IP ID rules
4324        for (uint16_t flow = 0; flow <= max_stat_hw_id_seen; flow++) {
4325            stats.m_port[port0].m_tx_per_flow[flow] +=
4326                lpt->m_node_gen.m_v_if->m_stats[0].m_tx_per_flow[flow];
4327            stats.m_port[port0 + 1].m_tx_per_flow[flow] +=
4328                lpt->m_node_gen.m_v_if->m_stats[1].m_tx_per_flow[flow];
4329        }
4330        // payload rules
4331        for (uint16_t flow = MAX_FLOW_STATS; flow <= MAX_FLOW_STATS + max_stat_hw_id_seen_payload; flow++) {
4332            stats.m_port[port0].m_tx_per_flow[flow] +=
4333                lpt->m_node_gen.m_v_if->m_stats[0].m_tx_per_flow[flow];
4334            stats.m_port[port0 + 1].m_tx_per_flow[flow] +=
4335                lpt->m_node_gen.m_v_if->m_stats[1].m_tx_per_flow[flow];
4336        }
4337
4338    }
4339
4340    stats.m_total_nat_time_out = total_nat_time_out;
4341    stats.m_total_nat_time_out_wait_ack = total_nat_time_out_wait_ack;
4342    stats.m_total_nat_no_fid   = total_nat_no_fid;
4343    stats.m_total_nat_active   = total_nat_active;
4344    stats.m_total_nat_syn_wait = total_nat_syn_wait;
4345    stats.m_total_nat_open     = total_nat_open;
4346    stats.m_total_nat_learn_error     = total_nat_learn_error;
4347
4348    stats.m_total_clients = total_clients;
4349    stats.m_total_servers = total_servers;
4350    stats.m_active_sockets = active_sockets;
4351
4352    if (total_sockets != 0) {
4353        stats.m_socket_util =100.0*(double)active_sockets/(double)total_sockets;
4354    } else {
4355        stats.m_socket_util = 0;
4356    }
4357
4358
4359
4360    float drop_rate=total_tx-total_rx;
4361    if ( (drop_rate<0.0)  || (drop_rate < 0.1*total_tx ) )  {
4362        drop_rate=0.0;
4363    }
4364    float pf =CGlobalInfo::m_options.m_platform_factor;
4365    stats.m_platform_factor = pf;
4366
4367    stats.m_active_flows = total_active_flows*pf;
4368    stats.m_open_flows   = total_open_flows*pf;
4369    stats.m_rx_drop_bps   = drop_rate*pf *_1Mb_DOUBLE;
4370
4371    stats.m_tx_bps        = total_tx*pf*_1Mb_DOUBLE;
4372    stats.m_rx_bps        = total_rx*pf*_1Mb_DOUBLE;
4373    stats.m_tx_pps        = total_tx_pps*pf;
4374    stats.m_rx_pps        = total_rx_pps*pf;
4375    stats.m_tx_cps        = m_last_total_cps*pf;
4376    if(stats.m_cpu_util < 0.0001)
4377        stats.m_bw_per_core = 0;
4378    else
4379        stats.m_bw_per_core   = 2*(stats.m_tx_bps/1e9)*100.0/(stats.m_cpu_util*stats.m_threads);
4380
4381    stats.m_tx_expected_cps        = m_expected_cps*pf;
4382    stats.m_tx_expected_pps        = m_expected_pps*pf;
4383    stats.m_tx_expected_bps        = m_expected_bps*pf;
4384}
4385
4386float
4387CGlobalTRex::get_cpu_util_per_interface(uint8_t port_id) {
4388    CPhyEthIF * _if = &m_ports[port_id];
4389
4390    float    tmp = 0;
4391    uint8_t  cnt = 0;
4392    for (const auto &p : _if->get_core_list()) {
4393        uint8_t core_id = p.first;
4394        CFlowGenListPerThread *lp = m_fl.m_threads_info[core_id];
4395        if (lp->is_port_active(port_id)) {
4396            tmp += lp->m_cpu_cp_u.GetVal();
4397            cnt++;
4398        }
4399    }
4400
4401    return ( (cnt > 0) ? (tmp / cnt) : 0);
4402
4403}
4404
4405bool CGlobalTRex::sanity_check(){
4406
4407    CFlowGenListPerThread   * lpt;
4408    uint32_t errors=0;
4409    int i;
4410    for (i=0; i<get_cores_tx(); i++) {
4411        lpt = m_fl.m_threads_info[i];
4412        errors   += lpt->m_smart_gen.getErrorAllocationCounter();
4413    }
4414
4415    if ( errors ) {
4416        printf(" ERRORs sockets allocation errors! \n");
4417        printf(" you should allocate more clients in the pool \n");
4418        return(true);
4419    }
4420    return ( false);
4421}
4422
4423
4424/* dump the template info */
4425void CGlobalTRex::dump_template_info(std::string & json){
4426    CFlowGenListPerThread   * lpt = m_fl.m_threads_info[0];
4427    CFlowsYamlInfo * yaml_info=&lpt->m_yaml_info;
4428
4429    json="{\"name\":\"template_info\",\"type\":0,\"data\":[";
4430    int i;
4431    for (i=0; i<yaml_info->m_vec.size()-1; i++) {
4432        CFlowYamlInfo * r=&yaml_info->m_vec[i] ;
4433        json+="\""+ r->m_name+"\"";
4434        json+=",";
4435    }
4436    json+="\""+yaml_info->m_vec[i].m_name+"\"";
4437    json+="]}" ;
4438}
4439
4440void CGlobalTRex::dump_stats(FILE *fd, CGlobalStats::DumpFormat format){
4441
4442    update_stats();
4443    get_stats(m_stats);
4444
4445    if (format==CGlobalStats::dmpTABLE) {
4446        if ( m_io_modes.m_g_mode == CTrexGlobalIoMode::gNORMAL ){
4447            switch (m_io_modes.m_pp_mode ){
4448            case CTrexGlobalIoMode::ppDISABLE:
4449                fprintf(fd,"\n+Per port stats disabled \n");
4450                break;
4451            case CTrexGlobalIoMode::ppTABLE:
4452                fprintf(fd,"\n-Per port stats table \n");
4453                m_stats.Dump(fd,CGlobalStats::dmpTABLE);
4454                break;
4455            case CTrexGlobalIoMode::ppSTANDARD:
4456                fprintf(fd,"\n-Per port stats - standard\n");
4457                m_stats.Dump(fd,CGlobalStats::dmpSTANDARD);
4458                break;
4459            };
4460
4461            switch (m_io_modes.m_ap_mode ){
4462            case   CTrexGlobalIoMode::apDISABLE:
4463                fprintf(fd,"\n+Global stats disabled \n");
4464                break;
4465            case   CTrexGlobalIoMode::apENABLE:
4466                fprintf(fd,"\n-Global stats enabled \n");
4467                m_stats.DumpAllPorts(fd);
4468                break;
4469            };
4470        }
4471    }else{
4472        /* at exit , always need to dump it in standartd mode for scripts*/
4473        m_stats.Dump(fd,format);
4474        m_stats.DumpAllPorts(fd);
4475    }
4476
4477}
4478
4479void
4480CGlobalTRex::publish_async_data(bool sync_now, bool baseline) {
4481    std::string json;
4482
4483    /* refactor to update, dump, and etc. */
4484    if (sync_now) {
4485        update_stats();
4486        get_stats(m_stats);
4487    }
4488
4489    m_stats.dump_json(json, baseline);
4490    m_zmq_publisher.publish_json(json);
4491
4492    /* generator json , all cores are the same just sample the first one */
4493    m_fl.m_threads_info[0]->m_node_gen.dump_json(json);
4494    m_zmq_publisher.publish_json(json);
4495
4496
4497    if ( !get_is_stateless() ){
4498        dump_template_info(json);
4499        m_zmq_publisher.publish_json(json);
4500    }
4501
4502    if ( get_is_rx_check_mode() ) {
4503        m_mg.rx_check_dump_json(json );
4504        m_zmq_publisher.publish_json(json);
4505    }
4506
4507    /* backward compatible */
4508    m_mg.dump_json(json );
4509    m_zmq_publisher.publish_json(json);
4510
4511    /* more info */
4512    m_mg.dump_json_v2(json );
4513    m_zmq_publisher.publish_json(json);
4514
4515    if (get_is_stateless()) {
4516        std::string stat_json;
4517        std::string latency_json;
4518        if (m_trex_stateless->m_rx_flow_stat.dump_json(stat_json, latency_json, baseline)) {
4519            m_zmq_publisher.publish_json(stat_json);
4520            m_zmq_publisher.publish_json(latency_json);
4521        }
4522    }
4523}
4524
4525void
4526CGlobalTRex::publish_async_barrier(uint32_t key) {
4527    m_zmq_publisher.publish_barrier(key);
4528}
4529
4530void
4531CGlobalTRex:: publish_async_port_attr_changed(uint8_t port_id) {
4532    Json::Value data;
4533    data["port_id"] = port_id;
4534    TRexPortAttr * _attr = m_ports[port_id].get_port_attr();
4535
4536    _attr->to_json(data["attr"]);
4537
4538    m_zmq_publisher.publish_event(TrexPublisher::EVENT_PORT_ATTR_CHANGED, data);
4539}
4540
4541void
4542CGlobalTRex::handle_slow_path() {
4543    m_stats_cnt+=1;
4544
4545    // update speed, link up/down etc.
4546    for (int i=0; i<m_max_ports; i++) {
4547        bool changed = m_ports[i].get_port_attr()->update_link_status_nowait();
4548        if (changed) {
4549            publish_async_port_attr_changed(i);
4550        }
4551    }
4552
4553    if ( CGlobalInfo::m_options.preview.get_no_keyboard() ==false ) {
4554        if ( m_io_modes.handle_io_modes() ) {
4555            mark_for_shutdown(SHUTDOWN_CTRL_C);
4556            return;
4557        }
4558    }
4559
4560    if ( sanity_check() ) {
4561        mark_for_shutdown(SHUTDOWN_TEST_ENDED);
4562        return;
4563    }
4564
4565    if (m_io_modes.m_g_mode != CTrexGlobalIoMode::gDISABLE ) {
4566        fprintf(stdout,"\033[2J");
4567        fprintf(stdout,"\033[2H");
4568
4569    } else {
4570        if ( m_io_modes.m_g_disable_first  ) {
4571            m_io_modes.m_g_disable_first=false;
4572            fprintf(stdout,"\033[2J");
4573            fprintf(stdout,"\033[2H");
4574            printf("clean !!!\n");
4575            fflush(stdout);
4576        }
4577    }
4578
4579
4580    if (m_io_modes.m_g_mode == CTrexGlobalIoMode::gHELP ) {
4581        m_io_modes.DumpHelp(stdout);
4582    }
4583
4584    dump_stats(stdout,CGlobalStats::dmpTABLE);
4585
4586    if (m_io_modes.m_g_mode == CTrexGlobalIoMode::gNORMAL ) {
4587        fprintf (stdout," current time    : %.1f sec  \n",now_sec());
4588        float d= CGlobalInfo::m_options.m_duration - now_sec();
4589        if (d<0) {
4590            d=0;
4591
4592        }
4593        fprintf (stdout," test duration   : %.1f sec  \n",d);
4594    }
4595
4596    if (m_io_modes.m_g_mode == CTrexGlobalIoMode::gMem) {
4597
4598        if ( m_stats_cnt%4==0) {
4599            fprintf (stdout," %s \n",CGlobalInfo::dump_pool_as_json_str().c_str());
4600        }
4601    }
4602
4603
4604    if ( CGlobalInfo::m_options.is_rx_enabled() && (! get_is_stateless())) {
4605        m_mg.update();
4606
4607        if ( m_io_modes.m_g_mode ==  CTrexGlobalIoMode::gNORMAL ) {
4608            if (CGlobalInfo::m_options.m_latency_rate != 0) {
4609                switch (m_io_modes.m_l_mode) {
4610                case CTrexGlobalIoMode::lDISABLE:
4611                    fprintf(stdout, "\n+Latency stats disabled \n");
4612                    break;
4613                case CTrexGlobalIoMode::lENABLE:
4614                    fprintf(stdout, "\n-Latency stats enabled \n");
4615                    m_mg.DumpShort(stdout);
4616                    break;
4617                case CTrexGlobalIoMode::lENABLE_Extended:
4618                    fprintf(stdout, "\n-Latency stats extended \n");
4619                    m_mg.Dump(stdout);
4620                    break;
4621                }
4622            }
4623
4624            if ( get_is_rx_check_mode() ) {
4625
4626                switch (m_io_modes.m_rc_mode) {
4627                case CTrexGlobalIoMode::rcDISABLE:
4628                    fprintf(stdout,"\n+Rx Check stats disabled \n");
4629                    break;
4630                case CTrexGlobalIoMode::rcENABLE:
4631                    fprintf(stdout,"\n-Rx Check stats enabled \n");
4632                    m_mg.DumpShortRxCheck(stdout);
4633                    break;
4634                case CTrexGlobalIoMode::rcENABLE_Extended:
4635                    fprintf(stdout,"\n-Rx Check stats enhanced \n");
4636                    m_mg.DumpRxCheck(stdout);
4637                    break;
4638                }
4639            }
4640        }
4641    }
4642    if ( m_io_modes.m_g_mode ==  CTrexGlobalIoMode::gNAT ) {
4643        if ( m_io_modes.m_nat_mode == CTrexGlobalIoMode::natENABLE ) {
4644            if (CGlobalInfo::is_learn_mode(CParserOption::LEARN_MODE_TCP_ACK)) {
4645                fprintf(stdout, "NAT flow table info\n");
4646                m_mg.dump_nat_flow_table(stdout);
4647            } else {
4648                fprintf(stdout, "\nThis is only relevant in --learn-mode %d\n", CParserOption::LEARN_MODE_TCP_ACK);
4649            }
4650        }
4651    }
4652
4653    /* publish data */
4654    publish_async_data(false);
4655}
4656
4657
4658void
4659CGlobalTRex::handle_fast_path() {
4660    /* check from messages from DP */
4661    check_for_dp_messages();
4662
4663    /* measure CPU utilization by sampling (we sample 1000 to get an accurate sampling) */
4664    for (int i = 0; i < 1000; i++) {
4665        m_fl.UpdateFast();
4666
4667        if (get_is_stateless()) {
4668            m_rx_sl.update_cpu_util();
4669        }else{
4670            m_mg.update_fast();
4671        }
4672
4673        rte_pause();
4674    }
4675
4676
4677    if ( is_all_cores_finished() ) {
4678        mark_for_shutdown(SHUTDOWN_TEST_ENDED);
4679    }
4680}
4681
4682
4683/**
4684 * shutdown sequence
4685 *
4686 */
4687void CGlobalTRex::shutdown() {
4688    std::stringstream ss;
4689    ss << " *** TRex is shutting down - cause: '";
4690
4691    switch (m_mark_for_shutdown) {
4692
4693    case SHUTDOWN_TEST_ENDED:
4694        ss << "test has ended'";
4695        break;
4696
4697    case SHUTDOWN_CTRL_C:
4698        ss << "CTRL + C detected'";
4699        break;
4700
4701    case SHUTDOWN_SIGINT:
4702        ss << "received signal SIGINT'";
4703        break;
4704
4705    case SHUTDOWN_SIGTERM:
4706        ss << "received signal SIGTERM'";
4707        break;
4708
4709    case SHUTDOWN_RPC_REQ:
4710        ss << "server received RPC 'shutdown' request'";
4711        break;
4712
4713    default:
4714        assert(0);
4715    }
4716
4717    /* report */
4718    std::cout << ss.str() << "\n";
4719
4720    /* first stop the WD */
4721    TrexWatchDog::getInstance().stop();
4722
4723    /* stateless shutdown */
4724    if (get_is_stateless()) {
4725        m_trex_stateless->shutdown();
4726    }
4727
4728    if (!is_all_cores_finished()) {
4729        try_stop_all_cores();
4730    }
4731
4732    m_mg.stop();
4733
4734    delay(1000);
4735
4736    /* shutdown drivers */
4737    for (int i = 0; i < m_max_ports; i++) {
4738        m_ports[i].stop();
4739    }
4740
4741    if (m_mark_for_shutdown != SHUTDOWN_TEST_ENDED) {
4742        /* we should stop latency and exit to stop agents */
4743        Delete();
4744        utl_termio_reset();
4745        exit(-1);
4746    }
4747}
4748
4749
4750int CGlobalTRex::run_in_master() {
4751
4752    //rte_thread_setname(pthread_self(), "TRex Control");
4753
4754    if ( get_is_stateless() ) {
4755        m_trex_stateless->launch_control_plane();
4756    }
4757
4758    /* exception and scope safe */
4759    std::unique_lock<std::mutex> cp_lock(m_cp_lock);
4760
4761    uint32_t slow_path_counter = 0;
4762
4763    const int FASTPATH_DELAY_MS = 10;
4764    const int SLOWPATH_DELAY_MS = 500;
4765
4766    m_monitor.create("master", 2);
4767    TrexWatchDog::getInstance().register_monitor(&m_monitor);
4768
4769    TrexWatchDog::getInstance().start();
4770
4771    while (!is_marked_for_shutdown()) {
4772
4773        /* fast path */
4774        handle_fast_path();
4775
4776        /* slow path */
4777        if (slow_path_counter >= SLOWPATH_DELAY_MS) {
4778            handle_slow_path();
4779            slow_path_counter = 0;
4780        }
4781
4782        m_monitor.disable(30); //assume we will wake up
4783
4784        cp_lock.unlock();
4785        delay(FASTPATH_DELAY_MS);
4786        slow_path_counter += FASTPATH_DELAY_MS;
4787        cp_lock.lock();
4788
4789        m_monitor.enable();
4790    }
4791
4792    /* on exit release the lock */
4793    cp_lock.unlock();
4794
4795    /* shutdown everything gracefully */
4796    shutdown();
4797
4798    return (0);
4799}
4800
4801
4802
4803int CGlobalTRex::run_in_rx_core(void){
4804
4805    rte_thread_setname(pthread_self(), "TRex RX");
4806
4807    if (get_is_stateless()) {
4808        m_sl_rx_running = true;
4809        m_rx_sl.start();
4810        m_sl_rx_running = false;
4811    } else {
4812        if ( CGlobalInfo::m_options.is_rx_enabled() ){
4813            m_sl_rx_running = false;
4814            m_mg.start(0, true);
4815        }
4816    }
4817
4818    return (0);
4819}
4820
4821int CGlobalTRex::run_in_core(virtual_thread_id_t virt_core_id){
4822    std::stringstream ss;
4823
4824    ss << "Trex DP core " << int(virt_core_id);
4825    rte_thread_setname(pthread_self(), ss.str().c_str());
4826
4827    CPreviewMode *lp=&CGlobalInfo::m_options.preview;
4828    if ( lp->getSingleCore() &&
4829         (virt_core_id==2 ) &&
4830         (lp-> getCores() ==1) ){
4831        printf(" bypass this core \n");
4832        m_signal[virt_core_id]=1;
4833        return (0);
4834    }
4835
4836
4837    assert(m_fl_was_init);
4838    CFlowGenListPerThread   * lpt;
4839
4840    lpt = m_fl.m_threads_info[virt_core_id-1];
4841
4842    /* register a watchdog handle on current core */
4843    lpt->m_monitor.create(ss.str(), 1);
4844    TrexWatchDog::getInstance().register_monitor(&lpt->m_monitor);
4845
4846    if (get_is_stateless()) {
4847        lpt->start_stateless_daemon(*lp);
4848    }else{
4849        lpt->start_generate_stateful(CGlobalInfo::m_options.out_file,*lp);
4850    }
4851
4852    /* done - remove this from the watchdog (we might wait on join for a long time) */
4853    lpt->m_monitor.disable();
4854
4855    m_signal[virt_core_id]=1;
4856    return (0);
4857}
4858
4859
4860int CGlobalTRex::stop_master(){
4861
4862    delay(1000);
4863    fprintf(stdout," ==================\n");
4864    fprintf(stdout," interface sum \n");
4865    fprintf(stdout," ==================\n");
4866    dump_stats(stdout,CGlobalStats::dmpSTANDARD);
4867    fprintf(stdout," ==================\n");
4868    fprintf(stdout," \n\n");
4869
4870    fprintf(stdout," ==================\n");
4871    fprintf(stdout," interface sum \n");
4872    fprintf(stdout," ==================\n");
4873
4874    CFlowGenListPerThread   * lpt;
4875    uint64_t total_tx_rx_check=0;
4876
4877    int i;
4878    for (i=0; i<get_cores_tx(); i++) {
4879        lpt = m_fl.m_threads_info[i];
4880        CCoreEthIF * erf_vif = m_cores_vif[i+1];
4881
4882        erf_vif->DumpCoreStats(stdout);
4883        erf_vif->DumpIfStats(stdout);
4884        total_tx_rx_check+=erf_vif->m_stats[CLIENT_SIDE].m_tx_rx_check_pkt+
4885            erf_vif->m_stats[SERVER_SIDE].m_tx_rx_check_pkt;
4886    }
4887
4888    fprintf(stdout," ==================\n");
4889    fprintf(stdout," generators \n");
4890    fprintf(stdout," ==================\n");
4891    for (i=0; i<get_cores_tx(); i++) {
4892        lpt = m_fl.m_threads_info[i];
4893        lpt->m_node_gen.DumpHist(stdout);
4894        lpt->DumpStats(stdout);
4895    }
4896    if ( CGlobalInfo::m_options.is_latency_enabled() ){
4897        fprintf(stdout," ==================\n");
4898        fprintf(stdout," latency \n");
4899        fprintf(stdout," ==================\n");
4900        m_mg.DumpShort(stdout);
4901        m_mg.Dump(stdout);
4902        m_mg.DumpShortRxCheck(stdout);
4903        m_mg.DumpRxCheck(stdout);
4904        m_mg.DumpRxCheckVerification(stdout,total_tx_rx_check);
4905    }
4906
4907    dump_stats(stdout,CGlobalStats::dmpSTANDARD);
4908    dump_post_test_stats(stdout);
4909
4910    return (0);
4911}
4912
4913bool CGlobalTRex::is_all_cores_finished() {
4914    int i;
4915    for (i=0; i<get_cores_tx(); i++) {
4916        if ( m_signal[i+1]==0){
4917            return false;
4918        }
4919    }
4920    if (m_sl_rx_running)
4921        return false;
4922
4923    return true;
4924}
4925
4926
4927int CGlobalTRex::start_master_stateless(){
4928    int i;
4929    for (i=0; i<BP_MAX_CORES; i++) {
4930        m_signal[i]=0;
4931    }
4932    m_fl.Create();
4933    m_expected_pps = 0;
4934    m_expected_cps = 0;
4935    m_expected_bps = 0;
4936
4937    m_fl.generate_p_thread_info(get_cores_tx());
4938    CFlowGenListPerThread   * lpt;
4939
4940    for (i=0; i<get_cores_tx(); i++) {
4941        lpt = m_fl.m_threads_info[i];
4942        CVirtualIF * erf_vif = m_cores_vif[i+1];
4943        lpt->set_vif(erf_vif);
4944        lpt->m_node_gen.m_socket_id =m_cores_vif[i+1]->get_socket_id();
4945    }
4946    m_fl_was_init=true;
4947
4948    return (0);
4949}
4950
4951int CGlobalTRex::start_master_statefull() {
4952    int i;
4953    for (i=0; i<BP_MAX_CORES; i++) {
4954        m_signal[i]=0;
4955    }
4956
4957    m_fl.Create();
4958    m_fl.load_from_yaml(CGlobalInfo::m_options.cfg_file,get_cores_tx());
4959
4960    if ( CGlobalInfo::m_options.m_active_flows>0 ) {
4961        m_fl.update_active_flows(CGlobalInfo::m_options.m_active_flows);
4962    }
4963
4964    /* client config */
4965    if (CGlobalInfo::m_options.client_cfg_file != "") {
4966        try {
4967            m_fl.load_client_config_file(CGlobalInfo::m_options.client_cfg_file);
4968        } catch (const std::runtime_error &e) {
4969            std::cout << "\n*** " << e.what() << "\n\n";
4970            exit(-1);
4971        }
4972        CGlobalInfo::m_options.preview.set_client_cfg_enable(true);
4973        m_fl.set_client_config_tuple_gen_info(&m_fl.m_yaml_info.m_tuple_gen);
4974        pre_test();
4975    }
4976
4977    /* verify options */
4978    try {
4979        CGlobalInfo::m_options.verify();
4980    } catch (const std::runtime_error &e) {
4981        std::cout << "\n*** " << e.what() << "\n\n";
4982        exit(-1);
4983    }
4984
4985    m_expected_pps = m_fl.get_total_pps();
4986    m_expected_cps = 1000.0*m_fl.get_total_kcps();
4987    m_expected_bps = m_fl.get_total_tx_bps();
4988    if ( m_fl.get_total_repeat_flows() > 2000) {
4989        /* disable flows cache */
4990        CGlobalInfo::m_options.preview.setDisableMbufCache(true);
4991    }
4992
4993    CTupleGenYamlInfo * tg=&m_fl.m_yaml_info.m_tuple_gen;
4994
4995    m_mg.set_ip( tg->m_client_pool[0].get_ip_start(),
4996                 tg->m_server_pool[0].get_ip_start(),
4997                 tg->m_client_pool[0].getDualMask()
4998                 );
4999
5000    if (  CGlobalInfo::m_options.preview.getVMode() >0 ) {
5001        m_fl.DumpCsv(stdout);
5002        for (i=0; i<100; i++) {
5003            fprintf(stdout,"\n");
5004        }
5005        fflush(stdout);
5006    }
5007
5008    m_fl.generate_p_thread_info(get_cores_tx());
5009    CFlowGenListPerThread   * lpt;
5010
5011    for (i=0; i<get_cores_tx(); i++) {
5012        lpt = m_fl.m_threads_info[i];
5013        //CNullIF * erf_vif = new CNullIF();
5014        CVirtualIF * erf_vif = m_cores_vif[i+1];
5015        lpt->set_vif(erf_vif);
5016        /* socket id */
5017        lpt->m_node_gen.m_socket_id =m_cores_vif[i+1]->get_socket_id();
5018
5019    }
5020    m_fl_was_init=true;
5021
5022    return (0);
5023}
5024
5025
5026////////////////////////////////////////////
5027static CGlobalTRex g_trex;
5028
5029
5030void CPhyEthIF::configure_rss_redirect_table(uint16_t numer_of_queues,
5031                                             uint16_t skip_queue){
5032
5033
5034     struct rte_eth_dev_info dev_info;
5035
5036     rte_eth_dev_info_get(m_port_id,&dev_info);
5037     assert(dev_info.reta_size>0);
5038
5039     int reta_conf_size =
5040          std::max(1, dev_info.reta_size / RTE_RETA_GROUP_SIZE);
5041
5042     struct rte_eth_rss_reta_entry64 reta_conf[reta_conf_size];
5043
5044     rte_eth_dev_rss_reta_query(m_port_id,&reta_conf[0],dev_info.reta_size);
5045
5046     int i,j;
5047
5048     for (j=0; j<reta_conf_size; j++) {
5049         uint16_t skip=0;
5050         reta_conf[j].mask = ~0ULL;
5051         for (i=0; i<RTE_RETA_GROUP_SIZE; i++) {
5052             uint16_t q;
5053             while (true) {
5054                 q=(i+skip)%numer_of_queues;
5055                 if (q!=skip_queue) {
5056                     break;
5057                 }
5058                 skip+=1;
5059             }
5060             reta_conf[j].reta[i]=q;
5061           //  printf(" %d %d %d \n",j,i,q);
5062         }
5063     }
5064     rte_eth_dev_rss_reta_update(m_port_id,&reta_conf[0],dev_info.reta_size);
5065
5066     rte_eth_dev_rss_reta_query(m_port_id,&reta_conf[0],dev_info.reta_size);
5067
5068     #if 0
5069     /* verification */
5070     for (j=0; j<reta_conf_size; j++) {
5071         for (i=0; i<RTE_RETA_GROUP_SIZE; i++) {
5072             printf(" R  %d %d %d \n",j,i,reta_conf[j].reta[i]);
5073         }
5074     }
5075     #endif
5076
5077}
5078
5079
5080void CPhyEthIF::update_counters() {
5081    get_ex_drv()->get_extended_stats(this, &m_stats);
5082    CRXCoreIgnoreStat ign_stats;
5083
5084    if (get_is_stateless()) {
5085        g_trex.m_rx_sl.get_ignore_stats(m_port_id, ign_stats, true);
5086    } else {
5087        g_trex.m_mg.get_ignore_stats(m_port_id, ign_stats, true);
5088    }
5089
5090    m_stats.obytes -= ign_stats.get_tx_bytes();
5091    m_stats.opackets -= ign_stats.get_tx_pkts();
5092    m_ignore_stats.opackets += ign_stats.get_tx_pkts();
5093    m_ignore_stats.obytes += ign_stats.get_tx_bytes();
5094    m_ignore_stats.m_tx_arp += ign_stats.get_tx_arp();
5095
5096    m_last_tx_rate      =  m_bw_tx.add(m_stats.obytes);
5097    m_last_rx_rate      =  m_bw_rx.add(m_stats.ibytes);
5098    m_last_tx_pps       =  m_pps_tx.add(m_stats.opackets);
5099    m_last_rx_pps       =  m_pps_rx.add(m_stats.ipackets);
5100}
5101
5102bool CPhyEthIF::Create(uint8_t portid) {
5103    m_port_id      = portid;
5104    m_last_rx_rate = 0.0;
5105    m_last_tx_rate = 0.0;
5106    m_last_tx_pps  = 0.0;
5107    m_port_attr    = g_trex.m_drv->create_port_attr(portid);
5108
5109    /* set src MAC addr */
5110    uint8_t empty_mac[ETHER_ADDR_LEN] = {0,0,0,0,0,0};
5111    if (! memcmp( CGlobalInfo::m_options.m_mac_addr[m_port_id].u.m_mac.src, empty_mac, ETHER_ADDR_LEN)) {
5112        rte_eth_macaddr_get(m_port_id,
5113                            (struct ether_addr *)&CGlobalInfo::m_options.m_mac_addr[m_port_id].u.m_mac.src);
5114    }
5115
5116    return true;
5117}
5118
5119const std::vector<std::pair<uint8_t, uint8_t>> &
5120CPhyEthIF::get_core_list() {
5121
5122    /* lazy find */
5123    if (m_core_id_list.size() == 0) {
5124
5125        for (uint8_t core_id = 0; core_id < g_trex.get_cores_tx(); core_id++) {
5126
5127            /* iterate over all the directions*/
5128            for (uint8_t dir = 0 ; dir < CS_NUM; dir++) {
5129                if (g_trex.m_cores_vif[core_id + 1]->get_ports()[dir].m_port->get_port_id() == m_port_id) {
5130                    m_core_id_list.push_back(std::make_pair(core_id, dir));
5131                }
5132            }
5133        }
5134    }
5135
5136    return m_core_id_list;
5137
5138}
5139
5140int CPhyEthIF::reset_hw_flow_stats() {
5141    if (get_ex_drv()->hw_rx_stat_supported()) {
5142        get_ex_drv()->reset_rx_stats(this, m_stats.m_fdir_prev_pkts, 0, MAX_FLOW_STATS);
5143    } else {
5144        g_trex.m_rx_sl.reset_rx_stats(get_port_id());
5145    }
5146    return 0;
5147}
5148
5149// get/reset flow director counters
5150// return 0 if OK. -1 if operation not supported.
5151// rx_stats, tx_stats - arrays of len max - min + 1. Returning rx, tx updated absolute values.
5152// min, max - minimum, maximum counters range to get
5153// reset - If true, need to reset counter value after reading
5154int CPhyEthIF::get_flow_stats(rx_per_flow_t *rx_stats, tx_per_flow_t *tx_stats, int min, int max, bool reset) {
5155    uint32_t diff_pkts[MAX_FLOW_STATS];
5156    uint32_t diff_bytes[MAX_FLOW_STATS];
5157    bool hw_rx_stat_supported = get_ex_drv()->hw_rx_stat_supported();
5158
5159    if (hw_rx_stat_supported) {
5160        if (get_ex_drv()->get_rx_stats(this, diff_pkts, m_stats.m_fdir_prev_pkts
5161                                       , diff_bytes, m_stats.m_fdir_prev_bytes, min, max) < 0) {
5162            return -1;
5163        }
5164    } else {
5165        g_trex.m_rx_sl.get_rx_stats(get_port_id(), rx_stats, min, max, reset, TrexPlatformApi::IF_STAT_IPV4_ID);
5166    }
5167
5168    for (int i = min; i <= max; i++) {
5169        if ( reset ) {
5170            // return value so far, and reset
5171            if (hw_rx_stat_supported) {
5172                if (rx_stats != NULL) {
5173                    rx_stats[i - min].set_pkts(m_stats.m_rx_per_flow_pkts[i] + diff_pkts[i]);
5174                    rx_stats[i - min].set_bytes(m_stats.m_rx_per_flow_bytes[i] + diff_bytes[i]);
5175                }
5176                m_stats.m_rx_per_flow_pkts[i] = 0;
5177                m_stats.m_rx_per_flow_bytes[i] = 0;
5178                get_ex_drv()->reset_rx_stats(this, &m_stats.m_fdir_prev_pkts[i], i, 1);
5179
5180            }
5181            if (tx_stats != NULL) {
5182                tx_stats[i - min] = g_trex.clear_flow_tx_stats(m_port_id, i, false);
5183            }
5184        } else {
5185            if (hw_rx_stat_supported) {
5186                m_stats.m_rx_per_flow_pkts[i] += diff_pkts[i];
5187                m_stats.m_rx_per_flow_bytes[i] += diff_bytes[i];
5188                if (rx_stats != NULL) {
5189                    rx_stats[i - min].set_pkts(m_stats.m_rx_per_flow_pkts[i]);
5190                    rx_stats[i - min].set_bytes(m_stats.m_rx_per_flow_bytes[i]);
5191                }
5192            }
5193            if (tx_stats != NULL) {
5194                tx_stats[i - min] = g_trex.get_flow_tx_stats(m_port_id, i);
5195            }
5196        }
5197    }
5198
5199    return 0;
5200}
5201
5202int CPhyEthIF::get_flow_stats_payload(rx_per_flow_t *rx_stats, tx_per_flow_t *tx_stats, int min, int max, bool reset) {
5203    g_trex.m_rx_sl.get_rx_stats(get_port_id(), rx_stats, min, max, reset, TrexPlatformApi::IF_STAT_PAYLOAD);
5204    for (int i = min; i <= max; i++) {
5205        if ( reset ) {
5206            if (tx_stats != NULL) {
5207                tx_stats[i - min] = g_trex.clear_flow_tx_stats(m_port_id, i + MAX_FLOW_STATS, true);
5208            }
5209        } else {
5210            if (tx_stats != NULL) {
5211                tx_stats[i - min] = g_trex.get_flow_tx_stats(m_port_id, i + MAX_FLOW_STATS);
5212            }
5213        }
5214    }
5215
5216    return 0;
5217}
5218
5219// If needed, send packets to rx core for processing.
5220// This is relevant only in VM case, where we receive packets to the working DP core (only 1 DP core in this case)
5221bool CCoreEthIF::process_rx_pkt(pkt_dir_t dir, rte_mbuf_t * m) {
5222    CFlowStatParser parser;
5223    uint32_t ip_id;
5224
5225    if (parser.parse(rte_pktmbuf_mtod(m, uint8_t*), rte_pktmbuf_pkt_len(m)) != 0) {
5226        return false;
5227    }
5228    bool send=false;
5229
5230    // e1000 on ESXI hands us the packet with the ethernet FCS
5231    if (parser.get_pkt_size() < rte_pktmbuf_pkt_len(m)) {
5232        rte_pktmbuf_trim(m, rte_pktmbuf_pkt_len(m) - parser.get_pkt_size());
5233    }
5234
5235    if ( get_is_stateless() ) {
5236        // In stateless RX, we only care about flow stat packets
5237        if ((parser.get_ip_id(ip_id) == 0) && ((ip_id & 0xff00) == IP_ID_RESERVE_BASE)) {
5238            send = true;
5239        }
5240    } else {
5241        CLatencyPktMode *c_l_pkt_mode = g_trex.m_mg.c_l_pkt_mode;
5242        bool is_lateancy_pkt =  c_l_pkt_mode->IsLatencyPkt((IPHeader *)parser.get_l4()) &
5243            CCPortLatency::IsLatencyPkt(parser.get_l4() + c_l_pkt_mode->l4_header_len());
5244
5245        if (is_lateancy_pkt) {
5246            send = true;
5247        } else {
5248            if ( get_is_rx_filter_enable() ) {
5249                uint8_t max_ttl = 0xff - get_rx_check_hops();
5250                uint8_t pkt_ttl = parser.get_ttl();
5251                if ( (pkt_ttl==max_ttl) || (pkt_ttl==(max_ttl-1) ) ) {
5252                    send=true;
5253                }
5254            }
5255        }
5256    }
5257
5258
5259    if (send) {
5260        CGenNodeLatencyPktInfo * node=(CGenNodeLatencyPktInfo * )CGlobalInfo::create_node();
5261        if ( node ) {
5262            node->m_msg_type = CGenNodeMsgBase::LATENCY_PKT;
5263            node->m_dir      = dir;
5264            node->m_latency_offset = 0xdead;
5265            node->m_pkt      = m;
5266            if ( m_ring_to_rx->Enqueue((CGenNode*)node)==0 ){
5267            }else{
5268                CGlobalInfo::free_node((CGenNode *)node);
5269                send=false;
5270            }
5271
5272#ifdef LATENCY_QUEUE_TRACE_
5273            printf("rx to cp --\n");
5274            rte_pktmbuf_dump(stdout,m, rte_pktmbuf_pkt_len(m));
5275#endif
5276        }else{
5277            send=false;
5278        }
5279    }
5280    return (send);
5281}
5282
5283TrexStateless * get_stateless_obj() {
5284    return g_trex.m_trex_stateless;
5285}
5286
5287CRxCoreStateless * get_rx_sl_core_obj() {
5288    return &g_trex.m_rx_sl;
5289}
5290
5291static int latency_one_lcore(__attribute__((unused)) void *dummy)
5292{
5293    CPlatformSocketInfo * lpsock=&CGlobalInfo::m_socket;
5294    physical_thread_id_t  phy_id =rte_lcore_id();
5295
5296    if ( lpsock->thread_phy_is_rx(phy_id) ) {
5297        g_trex.run_in_rx_core();
5298    }else{
5299
5300        if ( lpsock->thread_phy_is_master( phy_id ) ) {
5301            g_trex.run_in_master();
5302            delay(1);
5303        }else{
5304            delay((uint32_t)(1000.0*CGlobalInfo::m_options.m_duration));
5305            /* this core has stopped */
5306            g_trex.m_signal[ lpsock->thread_phy_to_virt( phy_id ) ]=1;
5307        }
5308    }
5309    return 0;
5310}
5311
5312
5313
5314static int slave_one_lcore(__attribute__((unused)) void *dummy)
5315{
5316    CPlatformSocketInfo * lpsock=&CGlobalInfo::m_socket;
5317    physical_thread_id_t  phy_id =rte_lcore_id();
5318
5319    if ( lpsock->thread_phy_is_rx(phy_id) ) {
5320        g_trex.run_in_rx_core();
5321    }else{
5322        if ( lpsock->thread_phy_is_master( phy_id ) ) {
5323            g_trex.run_in_master();
5324            delay(1);
5325        }else{
5326            g_trex.run_in_core( lpsock->thread_phy_to_virt( phy_id ) );
5327        }
5328    }
5329    return 0;
5330}
5331
5332
5333
5334uint32_t get_cores_mask(uint32_t cores,int offset){
5335    int i;
5336
5337    uint32_t res=1;
5338
5339    uint32_t mask=(1<<(offset+1));
5340    for (i=0; i<(cores-1); i++) {
5341        res |= mask ;
5342        mask = mask <<1;
5343    }
5344    return (res);
5345}
5346
5347
5348static char *g_exe_name;
5349const char *get_exe_name() {
5350    return g_exe_name;
5351}
5352
5353
5354int main(int argc , char * argv[]){
5355    g_exe_name = argv[0];
5356
5357    return ( main_test(argc , argv));
5358}
5359
5360
5361int update_global_info_from_platform_file(){
5362
5363    CPlatformYamlInfo *cg=&global_platform_cfg_info;
5364
5365    CGlobalInfo::m_socket.Create(&cg->m_platform);
5366
5367
5368    if (!cg->m_info_exist) {
5369        /* nothing to do ! */
5370        return 0;
5371    }
5372
5373    CGlobalInfo::m_options.prefix =cg->m_prefix;
5374    CGlobalInfo::m_options.preview.setCores(cg->m_thread_per_dual_if);
5375
5376    if ( cg->m_port_limit_exist ){
5377        CGlobalInfo::m_options.m_expected_portd =cg->m_port_limit;
5378    }
5379
5380    if ( cg->m_enable_zmq_pub_exist ){
5381        CGlobalInfo::m_options.preview.set_zmq_publish_enable(cg->m_enable_zmq_pub);
5382        CGlobalInfo::m_options.m_zmq_port = cg->m_zmq_pub_port;
5383    }
5384    if ( cg->m_telnet_exist ){
5385        CGlobalInfo::m_options.m_telnet_port = cg->m_telnet_port;
5386    }
5387
5388    if ( cg->m_mac_info_exist ){
5389        int i;
5390        /* cop the file info */
5391
5392        int port_size=cg->m_mac_info.size();
5393
5394        if ( port_size > TREX_MAX_PORTS ){
5395            port_size = TREX_MAX_PORTS;
5396        }
5397        for (i=0; i<port_size; i++){
5398            cg->m_mac_info[i].copy_src(( char *)CGlobalInfo::m_options.m_mac_addr[i].u.m_mac.src)   ;
5399            cg->m_mac_info[i].copy_dest(( char *)CGlobalInfo::m_options.m_mac_addr[i].u.m_mac.dest)  ;
5400            CGlobalInfo::m_options.m_mac_addr[i].u.m_mac.is_set = 1;
5401
5402            CGlobalInfo::m_options.m_ip_cfg[i].set_def_gw(cg->m_mac_info[i].get_def_gw());
5403            CGlobalInfo::m_options.m_ip_cfg[i].set_ip(cg->m_mac_info[i].get_ip());
5404            CGlobalInfo::m_options.m_ip_cfg[i].set_mask(cg->m_mac_info[i].get_mask());
5405            CGlobalInfo::m_options.m_ip_cfg[i].set_vlan(cg->m_mac_info[i].get_vlan());
5406        }
5407    }
5408
5409    /* mul by interface type */
5410    float mul=1.0;
5411    if (cg->m_port_bandwidth_gb<10) {
5412        cg->m_port_bandwidth_gb=10.0;
5413    }
5414
5415    mul = mul*(float)cg->m_port_bandwidth_gb/10.0;
5416    mul= mul * (float)cg->m_port_limit/2.0;
5417
5418    mul= mul * CGlobalInfo::m_options.m_mbuf_factor;
5419
5420
5421    CGlobalInfo::m_memory_cfg.set_pool_cache_size(RTE_MEMPOOL_CACHE_MAX_SIZE);
5422
5423    CGlobalInfo::m_memory_cfg.set_number_of_dp_cors(
5424                                                    CGlobalInfo::m_options.get_number_of_dp_cores_needed() );
5425
5426    CGlobalInfo::m_memory_cfg.set(cg->m_memory,mul);
5427    return (0);
5428}
5429
5430extern "C" int eal_cpu_detected(unsigned lcore_id);
5431// return mask representing available cores
5432int core_mask_calc() {
5433    uint32_t mask = 0;
5434    int lcore_id;
5435
5436    for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
5437        if (eal_cpu_detected(lcore_id)) {
5438            mask |= (1 << lcore_id);
5439        }
5440    }
5441
5442    return mask;
5443}
5444
5445// Return number of set bits in i
5446uint32_t num_set_bits(uint32_t i)
5447{
5448    i = i - ((i >> 1) & 0x55555555);
5449    i = (i & 0x33333333) + ((i >> 2) & 0x33333333);
5450    return (((i + (i >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24;
5451}
5452
5453// sanity check if the cores we want to use really exist
5454int core_mask_sanity(uint32_t wanted_core_mask) {
5455    uint32_t calc_core_mask = core_mask_calc();
5456    uint32_t wanted_core_num, calc_core_num;
5457
5458    wanted_core_num = num_set_bits(wanted_core_mask);
5459    calc_core_num = num_set_bits(calc_core_mask);
5460
5461    if (calc_core_num == 1) {
5462        printf ("Error: You have only 1 core available. Minimum configuration requires 2 cores\n");
5463        printf("        If you are running on VM, consider adding more cores if possible\n");
5464        return -1;
5465    }
5466    if (wanted_core_num > calc_core_num) {
5467        printf("Error: You have %d threads available, but you asked for %d threads.\n", calc_core_num, wanted_core_num);
5468        printf("       Calculation is: -c <num>(%d) * dual ports (%d) + 1 master thread %s"
5469               , CGlobalInfo::m_options.preview.getCores(), CGlobalInfo::m_options.get_expected_dual_ports()
5470               , get_is_rx_thread_enabled() ? "+1 latency thread (because of -l flag)\n" : "\n");
5471        if (CGlobalInfo::m_options.preview.getCores() > 1)
5472            printf("       Maybe try smaller -c <num>.\n");
5473        printf("       If you are running on VM, consider adding more cores if possible\n");
5474        return -1;
5475    }
5476
5477    if (wanted_core_mask != (wanted_core_mask & calc_core_mask)) {
5478        printf ("Serious error: Something is wrong with the hardware. Wanted core mask is %x. Existing core mask is %x\n", wanted_core_mask, calc_core_mask);
5479        return -1;
5480    }
5481
5482    return 0;
5483}
5484
5485int  update_dpdk_args(void){
5486
5487    CPlatformSocketInfo * lpsock=&CGlobalInfo::m_socket;
5488    CParserOption * lpop= &CGlobalInfo::m_options;
5489
5490    lpsock->set_rx_thread_is_enabled(get_is_rx_thread_enabled());
5491    lpsock->set_number_of_threads_per_ports(lpop->preview.getCores() );
5492    lpsock->set_number_of_dual_ports(lpop->get_expected_dual_ports());
5493    if ( !lpsock->sanity_check() ){
5494        printf(" ERROR in configuration file \n");
5495        return (-1);
5496    }
5497
5498    if ( CGlobalInfo::m_options.preview.getVMode() > 0  ) {
5499        lpsock->dump(stdout);
5500    }
5501
5502    snprintf(global_cores_str, sizeof(global_cores_str), "0x%llx" ,(unsigned long long)lpsock->get_cores_mask());
5503    if (core_mask_sanity(strtol(global_cores_str, NULL, 16)) < 0) {
5504        return -1;
5505    }
5506
5507    /* set the DPDK options */
5508    global_dpdk_args_num = 0;
5509
5510    global_dpdk_args[global_dpdk_args_num++]=(char *)"xx";
5511    global_dpdk_args[global_dpdk_args_num++]=(char *)"-c";
5512    global_dpdk_args[global_dpdk_args_num++]=(char *)global_cores_str;
5513    global_dpdk_args[global_dpdk_args_num++]=(char *)"-n";
5514    global_dpdk_args[global_dpdk_args_num++]=(char *)"4";
5515
5516    if ( CGlobalInfo::m_options.preview.getVMode() == 0  ) {
5517        global_dpdk_args[global_dpdk_args_num++]=(char *)"--log-level";
5518        snprintf(global_loglevel_str, sizeof(global_loglevel_str), "%d", 4);
5519        global_dpdk_args[global_dpdk_args_num++]=(char *)global_loglevel_str;
5520    }else{
5521        global_dpdk_args[global_dpdk_args_num++]=(char *)"--log-level";
5522        snprintf(global_loglevel_str, sizeof(global_loglevel_str), "%d", CGlobalInfo::m_options.preview.getVMode()+1);
5523        global_dpdk_args[global_dpdk_args_num++]=(char *)global_loglevel_str;
5524    }
5525
5526    global_dpdk_args[global_dpdk_args_num++] = (char *)"--master-lcore";
5527
5528    snprintf(global_master_id_str, sizeof(global_master_id_str), "%u", lpsock->get_master_phy_id());
5529    global_dpdk_args[global_dpdk_args_num++] = global_master_id_str;
5530
5531    /* add white list */
5532    if (lpop->m_run_mode == CParserOption::RUN_MODE_DUMP_INFO and lpop->dump_interfaces.size()) {
5533        for (int i=0; i<(int)lpop->dump_interfaces.size(); i++) {
5534            global_dpdk_args[global_dpdk_args_num++]=(char *)"-w";
5535            global_dpdk_args[global_dpdk_args_num++]=(char *)lpop->dump_interfaces[i].c_str();
5536        }
5537    }
5538    else {
5539        for (int i=0; i<(int)global_platform_cfg_info.m_if_list.size(); i++) {
5540            global_dpdk_args[global_dpdk_args_num++]=(char *)"-w";
5541            global_dpdk_args[global_dpdk_args_num++]=(char *)global_platform_cfg_info.m_if_list[i].c_str();
5542        }
5543    }
5544
5545
5546
5547    if ( lpop->prefix.length()  ){
5548        global_dpdk_args[global_dpdk_args_num++]=(char *)"--file-prefix";
5549        snprintf(global_prefix_str, sizeof(global_prefix_str), "%s", lpop->prefix.c_str());
5550        global_dpdk_args[global_dpdk_args_num++]=(char *)global_prefix_str;
5551        global_dpdk_args[global_dpdk_args_num++]=(char *)"-m";
5552        if (global_platform_cfg_info.m_limit_memory.length()) {
5553            global_dpdk_args[global_dpdk_args_num++]=(char *)global_platform_cfg_info.m_limit_memory.c_str();
5554        }else{
5555            global_dpdk_args[global_dpdk_args_num++]=(char *)"1024";
5556        }
5557    }
5558
5559
5560    if ( CGlobalInfo::m_options.preview.getVMode() > 0  ) {
5561        printf("args \n");
5562        int i;
5563        for (i=0; i<global_dpdk_args_num; i++) {
5564            printf(" %s \n",global_dpdk_args[i]);
5565        }
5566    }
5567    return (0);
5568}
5569
5570
5571int sim_load_list_of_cap_files(CParserOption * op){
5572
5573    CFlowGenList fl;
5574    fl.Create();
5575    fl.load_from_yaml(op->cfg_file,1);
5576    if ( op->preview.getVMode() >0 ) {
5577        fl.DumpCsv(stdout);
5578    }
5579    uint32_t start=    os_get_time_msec();
5580
5581    CErfIF erf_vif;
5582
5583    fl.generate_p_thread_info(1);
5584    CFlowGenListPerThread   * lpt;
5585    lpt=fl.m_threads_info[0];
5586    lpt->set_vif(&erf_vif);
5587
5588    if ( (op->preview.getVMode() >1)  || op->preview.getFileWrite() ) {
5589        lpt->start_generate_stateful(op->out_file,op->preview);
5590    }
5591
5592    lpt->m_node_gen.DumpHist(stdout);
5593
5594    uint32_t stop=    os_get_time_msec();
5595    printf(" d time = %ul %ul \n",stop-start,os_get_time_freq());
5596    fl.Delete();
5597    return (0);
5598}
5599
5600void dump_interfaces_info() {
5601    printf("Showing interfaces info.\n");
5602    uint8_t m_max_ports = rte_eth_dev_count();
5603    struct ether_addr mac_addr;
5604    char mac_str[ETHER_ADDR_FMT_SIZE];
5605    struct rte_pci_addr pci_addr;
5606
5607    for (uint8_t port_id=0; port_id<m_max_ports; port_id++) {
5608        // PCI, MAC and Driver
5609        pci_addr = rte_eth_devices[port_id].pci_dev->addr;
5610        rte_eth_macaddr_get(port_id, &mac_addr);
5611        ether_format_addr(mac_str, sizeof mac_str, &mac_addr);
5612        printf("PCI: %04x:%02x:%02x.%d - MAC: %s - Driver: %s\n",
5613            pci_addr.domain, pci_addr.bus, pci_addr.devid, pci_addr.function, mac_str,
5614            rte_eth_devices[port_id].pci_dev->driver->name);
5615    }
5616}
5617
5618int main_test(int argc , char * argv[]){
5619
5620
5621    utl_termio_init();
5622
5623    int ret;
5624    unsigned lcore_id;
5625    printf("Starting  TRex %s please wait  ... \n",VERSION_BUILD_NUM);
5626
5627    CGlobalInfo::m_options.preview.clean();
5628
5629    if ( parse_options_wrapper(argc, argv, &CGlobalInfo::m_options,true ) != 0){
5630        exit(-1);
5631    }
5632
5633    /* enable core dump if requested */
5634    if (CGlobalInfo::m_options.preview.getCoreDumpEnable()) {
5635        utl_set_coredump_size(-1);
5636    }
5637    else {
5638        utl_set_coredump_size(0);
5639    }
5640
5641
5642    update_global_info_from_platform_file();
5643
5644    /* It is not a mistake. Give the user higher priorty over the configuration file */
5645    if (parse_options_wrapper(argc, argv, &CGlobalInfo::m_options ,false) != 0) {
5646        exit(-1);
5647    }
5648
5649
5650    if ( CGlobalInfo::m_options.preview.getVMode() > 0){
5651        CGlobalInfo::m_options.dump(stdout);
5652        CGlobalInfo::m_memory_cfg.Dump(stdout);
5653    }
5654
5655
5656    if (update_dpdk_args() < 0) {
5657        return -1;
5658    }
5659
5660    CParserOption * po=&CGlobalInfo::m_options;
5661
5662
5663    if ( CGlobalInfo::m_options.preview.getVMode() == 0  ) {
5664        rte_set_log_level(1);
5665
5666    }
5667    uid_t uid;
5668    uid = geteuid ();
5669    if ( uid != 0 ) {
5670        printf("ERROR you must run with superuser priviliges \n");
5671        printf("User id   : %d \n",uid);
5672        printf("try 'sudo' %s \n",argv[0]);
5673        return (-1);
5674    }
5675
5676    /* set affinity to the master core as default */
5677    cpu_set_t mask;
5678    CPU_ZERO(&mask);
5679    CPU_SET(CGlobalInfo::m_socket.get_master_phy_id(), &mask);
5680    pthread_setaffinity_np(pthread_self(), sizeof(mask), &mask);
5681
5682    ret = rte_eal_init(global_dpdk_args_num, (char **)global_dpdk_args);
5683    if (ret < 0){
5684        printf(" You might need to run ./trex-cfg  once  \n");
5685        rte_exit(EXIT_FAILURE, "Invalid EAL arguments\n");
5686    }
5687    if (CGlobalInfo::m_options.m_run_mode == CParserOption::RUN_MODE_DUMP_INFO) {
5688        dump_interfaces_info();
5689        exit(0);
5690    }
5691    reorder_dpdk_ports();
5692    time_init();
5693
5694    /* check if we are in simulation mode */
5695    if ( CGlobalInfo::m_options.out_file != "" ){
5696        printf(" t-rex simulation mode into %s \n",CGlobalInfo::m_options.out_file.c_str());
5697        return ( sim_load_list_of_cap_files(&CGlobalInfo::m_options) );
5698    }
5699
5700    if ( !g_trex.Create() ){
5701        exit(1);
5702    }
5703
5704    if (po->preview.get_is_rx_check_enable() &&  (po->m_rx_check_sample< get_min_sample_rate()) ) {
5705        po->m_rx_check_sample = get_min_sample_rate();
5706        printf("Warning:rx check sample rate should not be lower than %d. Setting it to %d\n",get_min_sample_rate(),get_min_sample_rate());
5707    }
5708
5709    /* set dump mode */
5710    g_trex.m_io_modes.set_mode((CTrexGlobalIoMode::CliDumpMode)CGlobalInfo::m_options.m_io_mode);
5711
5712    /* disable WD if needed */
5713    bool wd_enable = (CGlobalInfo::m_options.preview.getWDDisable() ? false : true);
5714    TrexWatchDog::getInstance().init(wd_enable);
5715
5716    g_trex.m_sl_rx_running = false;
5717    if ( get_is_stateless() ) {
5718        g_trex.start_master_stateless();
5719
5720    }else{
5721        g_trex.start_master_statefull();
5722    }
5723
5724    // For unit testing of HW rules and queues configuration. Just send some packets and exit.
5725    if (CGlobalInfo::m_options.m_debug_pkt_proto != 0) {
5726        CTrexDebug debug = CTrexDebug(g_trex.m_ports, g_trex.m_max_ports);
5727        int ret;
5728
5729        if (CGlobalInfo::m_options.m_debug_pkt_proto == D_PKT_TYPE_HW_TOGGLE_TEST) {
5730            // Unit test: toggle many times between receive all and stateless/stateful modes,
5731            // to test resiliency of add/delete fdir filters
5732            printf("Starting receive all/normal mode toggle unit test\n");
5733            for (int i = 0; i < 100; i++) {
5734                for (int port_id = 0; port_id < g_trex.m_max_ports; port_id++) {
5735                    CPhyEthIF *pif = &g_trex.m_ports[port_id];
5736                    CTRexExtendedDriverDb::Ins()->get_drv()->set_rcv_all(pif, true);
5737                }
5738                ret = debug.test_send(D_PKT_TYPE_HW_VERIFY_RCV_ALL);
5739                if (ret != 0) {
5740                    printf("Iteration %d: Receive all mode failed\n", i);
5741                    exit(ret);
5742                }
5743
5744                for (int port_id = 0; port_id < g_trex.m_max_ports; port_id++) {
5745                    CPhyEthIF *pif = &g_trex.m_ports[port_id];
5746                    CTRexExtendedDriverDb::Ins()->get_drv()->configure_rx_filter_rules(pif);
5747                }
5748
5749                ret = debug.test_send(D_PKT_TYPE_HW_VERIFY);
5750                if (ret != 0) {
5751                    printf("Iteration %d: Normal mode failed\n", i);
5752                    exit(ret);
5753                }
5754
5755                printf("Iteration %d OK\n", i);
5756            }
5757            exit(0);
5758        } else {
5759            if (CGlobalInfo::m_options.m_debug_pkt_proto == D_PKT_TYPE_HW_VERIFY_RCV_ALL) {
5760                for (int port_id = 0; port_id < g_trex.m_max_ports; port_id++) {
5761                    CPhyEthIF *pif = &g_trex.m_ports[port_id];
5762                    CTRexExtendedDriverDb::Ins()->get_drv()->set_rcv_all(pif, true);
5763                }
5764            }
5765            ret = debug.test_send(CGlobalInfo::m_options.m_debug_pkt_proto);
5766            exit(ret);
5767        }
5768    }
5769
5770    // in case of client config, we already run pretest
5771    if (! CGlobalInfo::m_options.preview.get_is_client_cfg_enable()) {
5772        g_trex.pre_test();
5773    }
5774
5775    // after doing all needed ARP resolution, we need to flush queues, and stop our drop queue
5776    g_trex.ixgbe_rx_queue_flush();
5777    for (int i = 0; i < g_trex.m_max_ports; i++) {
5778        CPhyEthIF *_if = &g_trex.m_ports[i];
5779        _if->stop_rx_drop_queue();
5780    }
5781
5782    if ( CGlobalInfo::m_options.is_latency_enabled()
5783         && (CGlobalInfo::m_options.m_latency_prev > 0)) {
5784        uint32_t pkts = CGlobalInfo::m_options.m_latency_prev *
5785            CGlobalInfo::m_options.m_latency_rate;
5786        printf("Starting warm up phase for %d sec\n",CGlobalInfo::m_options.m_latency_prev);
5787        g_trex.m_mg.start(pkts, NULL);
5788        delay(CGlobalInfo::m_options.m_latency_prev* 1000);
5789        printf("Finished \n");
5790        g_trex.m_mg.reset();
5791    }
5792
5793    if ( CGlobalInfo::m_options.preview.getOnlyLatency() ){
5794        rte_eal_mp_remote_launch(latency_one_lcore, NULL, CALL_MASTER);
5795        RTE_LCORE_FOREACH_SLAVE(lcore_id) {
5796            if (rte_eal_wait_lcore(lcore_id) < 0)
5797                return -1;
5798        }
5799        g_trex.stop_master();
5800
5801        return (0);
5802    }
5803
5804    if ( CGlobalInfo::m_options.preview.getSingleCore() ) {
5805        g_trex.run_in_core(1);
5806        g_trex.stop_master();
5807        return (0);
5808    }
5809
5810    rte_eal_mp_remote_launch(slave_one_lcore, NULL, CALL_MASTER);
5811    RTE_LCORE_FOREACH_SLAVE(lcore_id) {
5812        if (rte_eal_wait_lcore(lcore_id) < 0)
5813            return -1;
5814    }
5815
5816    g_trex.stop_master();
5817    g_trex.Delete();
5818    utl_termio_reset();
5819
5820    return (0);
5821}
5822
5823void wait_x_sec(int sec) {
5824    int i;
5825    printf(" wait %d sec ", sec);
5826    fflush(stdout);
5827    for (i=0; i<sec; i++) {
5828        delay(1000);
5829        printf(".");
5830        fflush(stdout);
5831    }
5832    printf("\n");
5833    fflush(stdout);
5834}
5835
5836/*
5837Changes the order of rte_eth_devices array elements
5838to be consistent with our /etc/trex_cfg.yaml
5839*/
5840void reorder_dpdk_ports() {
5841    rte_eth_dev rte_eth_devices_temp[RTE_MAX_ETHPORTS];
5842    uint8_t m_port_map[RTE_MAX_ETHPORTS];
5843    struct rte_pci_addr addr;
5844    uint8_t port_id;
5845
5846    // gather port relation information and save current array to temp
5847    for (int i=0; i<(int)global_platform_cfg_info.m_if_list.size(); i++) {
5848        memcpy(&rte_eth_devices_temp[i], &rte_eth_devices[i], sizeof rte_eth_devices[i]);
5849        if (eal_parse_pci_BDF(global_platform_cfg_info.m_if_list[i].c_str(), &addr) != 0 && eal_parse_pci_DomBDF(global_platform_cfg_info.m_if_list[i].c_str(), &addr) != 0) {
5850            printf("Failed mapping TRex port id to DPDK id: %d\n", i);
5851            exit(1);
5852        }
5853        rte_eth_dev_get_port_by_addr(&addr, &port_id);
5854        m_port_map[port_id] = i;
5855        // print the relation in verbose mode
5856        if ( CGlobalInfo::m_options.preview.getVMode() > 0){
5857            printf("TRex cfg port id: %d <-> DPDK port id: %d\n", i, port_id);
5858        }
5859    }
5860
5861    // actual reorder
5862    for (int i=0; i<(int)global_platform_cfg_info.m_if_list.size(); i++) {
5863        memcpy(&rte_eth_devices[m_port_map[i]], &rte_eth_devices_temp[i], sizeof rte_eth_devices_temp[i]);
5864    }
5865}
5866
5867//////////////////////////////////////////////////////////////////////////////////////////////
5868//////////////////////////////////////////////////////////////////////////////////////////////
5869// driver section
5870//////////////////////////////////////////////////////////////////////////////////////////////
5871int CTRexExtendedDriverBase::stop_queue(CPhyEthIF * _if, uint16_t q_num) {
5872    uint8_t port_id=_if->get_rte_port_id();
5873    return (rte_eth_dev_rx_queue_stop(port_id, q_num));
5874}
5875
5876int CTRexExtendedDriverBase::wait_for_stable_link() {
5877    wait_x_sec(CGlobalInfo::m_options.m_wait_before_traffic);
5878    return 0;
5879}
5880
5881void CTRexExtendedDriverBase::wait_after_link_up() {
5882    wait_x_sec(CGlobalInfo::m_options.m_wait_before_traffic);
5883}
5884
5885CFlowStatParser *CTRexExtendedDriverBase::get_flow_stat_parser() {
5886    CFlowStatParser *parser = new CFlowStatParser();
5887    assert (parser);
5888    return parser;
5889}
5890
5891// in 1G we need to wait if links became ready to soon
5892void CTRexExtendedDriverBase1G::wait_after_link_up(){
5893    wait_x_sec(6 + CGlobalInfo::m_options.m_wait_before_traffic);
5894}
5895
5896int CTRexExtendedDriverBase1G::wait_for_stable_link(){
5897    wait_x_sec(9 + CGlobalInfo::m_options.m_wait_before_traffic);
5898    return(0);
5899}
5900
5901void CTRexExtendedDriverBase1G::update_configuration(port_cfg_t * cfg){
5902
5903    cfg->m_tx_conf.tx_thresh.pthresh = TX_PTHRESH_1G;
5904    cfg->m_tx_conf.tx_thresh.hthresh = TX_HTHRESH;
5905    cfg->m_tx_conf.tx_thresh.wthresh = 0;
5906}
5907
5908void CTRexExtendedDriverBase1G::update_global_config_fdir(port_cfg_t * cfg){
5909    // Configuration is done in configure_rx_filter_rules by writing to registers
5910}
5911
5912#define E1000_RXDCTL_QUEUE_ENABLE	0x02000000
5913// e1000 driver does not support the generic stop/start queue API, so we need to implement ourselves
5914int CTRexExtendedDriverBase1G::stop_queue(CPhyEthIF * _if, uint16_t q_num) {
5915    uint32_t reg_val = _if->pci_reg_read( E1000_RXDCTL(q_num));
5916    reg_val &= ~E1000_RXDCTL_QUEUE_ENABLE;
5917    _if->pci_reg_write( E1000_RXDCTL(q_num), reg_val);
5918    return 0;
5919}
5920
5921int CTRexExtendedDriverBase1G::configure_rx_filter_rules(CPhyEthIF * _if){
5922    if ( get_is_stateless() ) {
5923        return configure_rx_filter_rules_stateless(_if);
5924    } else {
5925        return configure_rx_filter_rules_statefull(_if);
5926    }
5927
5928    return 0;
5929}
5930
5931int CTRexExtendedDriverBase1G::configure_rx_filter_rules_statefull(CPhyEthIF * _if) {
5932    uint16_t hops = get_rx_check_hops();
5933    uint16_t v4_hops = (hops << 8)&0xff00;
5934    uint8_t protocol;
5935
5936    if (CGlobalInfo::m_options.m_l_pkt_mode == 0) {
5937        protocol = IPPROTO_SCTP;
5938    } else {
5939        protocol = IPPROTO_ICMP;
5940    }
5941    /* enable filter to pass packet to rx queue 1 */
5942    _if->pci_reg_write( E1000_IMIR(0), 0x00020000);
5943    _if->pci_reg_write( E1000_IMIREXT(0), 0x00081000);
5944    _if->pci_reg_write( E1000_TTQF(0),   protocol
5945                        | 0x00008100 /* enable */
5946                        | 0xE0010000 /* RX queue is 1 */
5947                        );
5948
5949
5950    /* 16  :   12 MAC , (2)0x0800,2      | DW0 , DW1
5951       6 bytes , TTL , PROTO     | DW2=0 , DW3=0x0000FF06
5952    */
5953    int i;
5954    // IPv4: bytes being compared are {TTL, Protocol}
5955    uint16_t ff_rules_v4[6]={
5956        (uint16_t)(0xFF06 - v4_hops),
5957        (uint16_t)(0xFE11 - v4_hops),
5958        (uint16_t)(0xFF11 - v4_hops),
5959        (uint16_t)(0xFE06 - v4_hops),
5960        (uint16_t)(0xFF01 - v4_hops),
5961        (uint16_t)(0xFE01 - v4_hops),
5962    }  ;
5963    // IPv6: bytes being compared are {NextHdr, HopLimit}
5964    uint16_t ff_rules_v6[2]={
5965        (uint16_t)(0x3CFF - hops),
5966        (uint16_t)(0x3CFE - hops),
5967    }  ;
5968    uint16_t *ff_rules;
5969    uint16_t num_rules;
5970    uint32_t mask=0;
5971    int  rule_id;
5972
5973    if (  CGlobalInfo::m_options.preview.get_ipv6_mode_enable() ){
5974        ff_rules = &ff_rules_v6[0];
5975        num_rules = sizeof(ff_rules_v6)/sizeof(ff_rules_v6[0]);
5976    }else{
5977        ff_rules = &ff_rules_v4[0];
5978        num_rules = sizeof(ff_rules_v4)/sizeof(ff_rules_v4[0]);
5979    }
5980
5981    clear_rx_filter_rules(_if);
5982
5983    uint8_t len = 24;
5984    for (rule_id=0; rule_id<num_rules; rule_id++ ) {
5985        /* clear rule all */
5986        for (i=0; i<0xff; i+=4) {
5987            _if->pci_reg_write( (E1000_FHFT(rule_id)+i) , 0);
5988        }
5989
5990        if (  CGlobalInfo::m_options.preview.get_vlan_mode_enable() ){
5991            len += 8;
5992            if (  CGlobalInfo::m_options.preview.get_ipv6_mode_enable() ){
5993                // IPv6 VLAN: NextHdr/HopLimit offset = 0x18
5994                _if->pci_reg_write( (E1000_FHFT(rule_id)+(3*16)+0) , PKT_NTOHS(ff_rules[rule_id]) );
5995                _if->pci_reg_write( (E1000_FHFT(rule_id)+(3*16)+8) , 0x03); /* MASK */
5996            }else{
5997                // IPv4 VLAN: TTL/Protocol offset = 0x1A
5998                _if->pci_reg_write( (E1000_FHFT(rule_id)+(3*16)+0) , (PKT_NTOHS(ff_rules[rule_id])<<16) );
5999                _if->pci_reg_write( (E1000_FHFT(rule_id)+(3*16)+8) , 0x0C); /* MASK */
6000            }
6001        }else{
6002            if (  CGlobalInfo::m_options.preview.get_ipv6_mode_enable() ){
6003                // IPv6: NextHdr/HopLimit offset = 0x14
6004                _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16)+4) , PKT_NTOHS(ff_rules[rule_id]) );
6005                _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16)+8) , 0x30); /* MASK */
6006            }else{
6007                // IPv4: TTL/Protocol offset = 0x16
6008                _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16)+4) , (PKT_NTOHS(ff_rules[rule_id])<<16) );
6009                _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16)+8) , 0xC0); /* MASK */
6010            }
6011        }
6012
6013        // FLEX_PRIO[[18:16] = 1, RQUEUE[10:8] = 1
6014        _if->pci_reg_write( (E1000_FHFT(rule_id)+0xFC) , (1<<16) | (1<<8)  | len);
6015
6016        mask |=(1<<rule_id);
6017    }
6018
6019    /* enable all rules */
6020    _if->pci_reg_write(E1000_WUFC, (mask<<16) | (1<<14) );
6021
6022    return (0);
6023}
6024
6025// Sadly, DPDK has no support for i350 filters, so we need to implement by writing to registers.
6026int CTRexExtendedDriverBase1G::configure_rx_filter_rules_stateless(CPhyEthIF * _if) {
6027    /* enable filter to pass packet to rx queue 1 */
6028    _if->pci_reg_write( E1000_IMIR(0), 0x00020000);
6029    _if->pci_reg_write( E1000_IMIREXT(0), 0x00081000);
6030
6031    uint8_t len = 24;
6032    uint32_t mask = 0;
6033    int rule_id;
6034
6035    clear_rx_filter_rules(_if);
6036
6037    rule_id = 0;
6038    mask |= 0x1 << rule_id;
6039    // filter for byte 18 of packet (msb of IP ID) should equal ff
6040    _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16)) ,  0x00ff0000);
6041    _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16) + 8) , 0x04); /* MASK */
6042    // + bytes 12 + 13 (ether type) should indicate IP.
6043    _if->pci_reg_write( (E1000_FHFT(rule_id)+(1*16) + 4) ,  0x00000008);
6044    _if->pci_reg_write( (E1000_FHFT(rule_id)+(1*16) + 8) , 0x30); /* MASK */
6045    // FLEX_PRIO[[18:16] = 1, RQUEUE[10:8] = 1
6046    _if->