main_dpdk.cpp revision c25e1862
1/*
2  Hanoh Haim
3  Cisco Systems, Inc.
4*/
5
6/*
7  Copyright (c) 2015-2016 Cisco Systems, Inc.
8
9  Licensed under the Apache License, Version 2.0 (the "License");
10  you may not use this file except in compliance with the License.
11  You may obtain a copy of the License at
12
13  http://www.apache.org/licenses/LICENSE-2.0
14
15  Unless required by applicable law or agreed to in writing, software
16  distributed under the License is distributed on an "AS IS" BASIS,
17  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18  See the License for the specific language governing permissions and
19  limitations under the License.
20*/
21#include <assert.h>
22#include <pthread.h>
23#include <signal.h>
24#include <pwd.h>
25#include <stdio.h>
26#include <string.h>
27#include <unistd.h>
28#include <zmq.h>
29#include <rte_common.h>
30#include <rte_log.h>
31#include <rte_memory.h>
32#include <rte_memcpy.h>
33#include <rte_memzone.h>
34#include <rte_tailq.h>
35#include <rte_eal.h>
36#include <rte_per_lcore.h>
37#include <rte_launch.h>
38#include <rte_atomic.h>
39#include <rte_cycles.h>
40#include <rte_prefetch.h>
41#include <rte_lcore.h>
42#include <rte_per_lcore.h>
43#include <rte_branch_prediction.h>
44#include <rte_interrupts.h>
45#include <rte_pci.h>
46#include <rte_debug.h>
47#include <rte_ether.h>
48#include <rte_ethdev.h>
49#include <rte_ring.h>
50#include <rte_mempool.h>
51#include <rte_mbuf.h>
52#include <rte_random.h>
53#include <rte_version.h>
54#include <rte_ip.h>
55
56#include "bp_sim.h"
57#include "os_time.h"
58#include "common/arg/SimpleGlob.h"
59#include "common/arg/SimpleOpt.h"
60#include "common/basic_utils.h"
61#include "stateless/cp/trex_stateless.h"
62#include "stateless/dp/trex_stream_node.h"
63#include "stateless/messaging/trex_stateless_messaging.h"
64#include "stateless/rx/trex_stateless_rx_core.h"
65#include "publisher/trex_publisher.h"
66#include "../linux_dpdk/version.h"
67extern "C" {
68#include "dpdk/drivers/net/ixgbe/base/ixgbe_type.h"
69#include "dpdk_funcs.h"
70}
71#include "dpdk/drivers/net/e1000/base/e1000_regs.h"
72#include "global_io_mode.h"
73#include "utl_term_io.h"
74#include "msg_manager.h"
75#include "platform_cfg.h"
76#include "pre_test.h"
77#include "stateful_rx_core.h"
78#include "debug.h"
79#include "pkt_gen.h"
80#include "trex_port_attr.h"
81#include "internal_api/trex_platform_api.h"
82#include "main_dpdk.h"
83#include "trex_watchdog.h"
84
85#define RX_CHECK_MIX_SAMPLE_RATE 8
86#define RX_CHECK_MIX_SAMPLE_RATE_1G 2
87
88
89#define SOCKET0         0
90
91#define MAX_PKT_BURST   32
92
93#define BP_MAX_CORES 32
94#define BP_MAX_TX_QUEUE 16
95#define BP_MASTER_AND_LATENCY 2
96
97#define RTE_TEST_RX_DESC_DEFAULT 64
98#define RTE_TEST_RX_LATENCY_DESC_DEFAULT (1*1024)
99#define RTE_TEST_RX_DESC_DEFAULT_MLX 8
100
101#define RTE_TEST_RX_DESC_VM_DEFAULT 512
102#define RTE_TEST_TX_DESC_VM_DEFAULT 512
103
104typedef struct rte_mbuf * (*rte_mbuf_convert_to_one_seg_t)(struct rte_mbuf *m);
105struct rte_mbuf *  rte_mbuf_convert_to_one_seg(struct rte_mbuf *m);
106extern "C" int rte_eth_dev_get_port_by_addr(const struct rte_pci_addr *addr, uint8_t *port_id);
107void reorder_dpdk_ports();
108
109#define RTE_TEST_TX_DESC_DEFAULT 512
110#define RTE_TEST_RX_DESC_DROP    0
111
112static int max_stat_hw_id_seen = 0;
113static int max_stat_hw_id_seen_payload = 0;
114
115static inline int get_vm_one_queue_enable(){
116    return (CGlobalInfo::m_options.preview.get_vm_one_queue_enable() ?1:0);
117}
118
119static inline int get_is_rx_thread_enabled() {
120    return ((CGlobalInfo::m_options.is_rx_enabled() || CGlobalInfo::m_options.is_stateless()) ?1:0);
121}
122
123struct port_cfg_t;
124
125#define MAX_DPDK_ARGS 40
126static CPlatformYamlInfo global_platform_cfg_info;
127static int global_dpdk_args_num ;
128static char * global_dpdk_args[MAX_DPDK_ARGS];
129static char global_cores_str[100];
130static char global_prefix_str[100];
131static char global_loglevel_str[20];
132static char global_master_id_str[10];
133
134class CTRexExtendedDriverBase {
135public:
136
137    /* by default NIC driver adds CRC */
138    virtual bool has_crc_added() {
139        return true;
140    }
141
142    virtual int get_min_sample_rate(void)=0;
143    virtual void update_configuration(port_cfg_t * cfg)=0;
144    virtual void update_global_config_fdir(port_cfg_t * cfg)=0;
145
146    virtual bool is_hardware_filter_is_supported(){
147        return(false);
148    }
149    virtual int configure_rx_filter_rules(CPhyEthIF * _if)=0;
150    virtual int add_del_rx_flow_stat_rule(uint8_t port_id, enum rte_filter_op op, uint16_t l3, uint8_t l4
151                                          , uint8_t ipv6_next_h, uint16_t id) {return 0;}
152    virtual bool is_hardware_support_drop_queue(){
153        return(false);
154    }
155
156    virtual int stop_queue(CPhyEthIF * _if, uint16_t q_num);
157    virtual void get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats)=0;
158    virtual void clear_extended_stats(CPhyEthIF * _if)=0;
159    virtual int  wait_for_stable_link();
160    virtual void wait_after_link_up();
161    virtual bool hw_rx_stat_supported(){return false;}
162    virtual int get_rx_stats(CPhyEthIF * _if, uint32_t *pkts, uint32_t *prev_pkts, uint32_t *bytes, uint32_t *prev_bytes
163                             , int min, int max) {return -1;}
164    virtual void reset_rx_stats(CPhyEthIF * _if, uint32_t *stats, int min, int len) {}
165    virtual int dump_fdir_global_stats(CPhyEthIF * _if, FILE *fd) { return -1;}
166    virtual int get_stat_counters_num() {return 0;}
167    virtual int get_rx_stat_capabilities() {return 0;}
168    virtual int verify_fw_ver(int i) {return 0;}
169    virtual CFlowStatParser *get_flow_stat_parser();
170    virtual int set_rcv_all(CPhyEthIF * _if, bool set_on)=0;
171    virtual TRexPortAttr * create_port_attr(uint8_t port_id) = 0;
172    virtual uint8_t get_num_crc_fix_bytes() {return 0;}
173
174    /* Does this NIC type support automatic packet dropping in case of a link down?
175       in case it is supported the packets will be dropped, else there would be a back pressure to tx queues
176       this interface is used as a workaround to let TRex work without link in stateless mode, driver that
177       does not support that will be failed at init time because it will cause watchdog due to watchdog hang */
178    virtual bool drop_packets_incase_of_linkdown() {
179        return (false);
180    }
181
182    /* Mellanox ConnectX-4 can drop only 35MPPS per Rx queue. to workaround this issue we will create multi rx queue and enable RSS. for Queue1 we will disable  RSS
183       return  zero for disable patch and rx queues number for enable
184    */
185
186    virtual uint16_t enable_rss_drop_workaround(void) {
187        return (0);
188    }
189
190};
191
192
193class CTRexExtendedDriverBase1G : public CTRexExtendedDriverBase {
194
195public:
196    CTRexExtendedDriverBase1G(){
197    }
198
199    TRexPortAttr * create_port_attr(uint8_t port_id) {
200        return new DpdkTRexPortAttr(port_id, false, true);
201    }
202
203    static CTRexExtendedDriverBase * create(){
204        return ( new CTRexExtendedDriverBase1G() );
205    }
206
207    virtual void update_global_config_fdir(port_cfg_t * cfg);
208
209    virtual int get_min_sample_rate(void){
210        return ( RX_CHECK_MIX_SAMPLE_RATE_1G);
211    }
212    virtual void update_configuration(port_cfg_t * cfg);
213
214    virtual bool is_hardware_filter_is_supported(){
215        return (true);
216    }
217
218    virtual int stop_queue(CPhyEthIF * _if, uint16_t q_num);
219    virtual int configure_rx_filter_rules(CPhyEthIF * _if);
220    virtual int configure_rx_filter_rules_statefull(CPhyEthIF * _if);
221    virtual int configure_rx_filter_rules_stateless(CPhyEthIF * _if);
222    virtual void clear_rx_filter_rules(CPhyEthIF * _if);
223    virtual bool is_hardware_support_drop_queue(){
224        return(true);
225    }
226
227    virtual void get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats);
228    virtual void clear_extended_stats(CPhyEthIF * _if);
229    virtual int dump_fdir_global_stats(CPhyEthIF * _if, FILE *fd) {return 0;}
230    virtual int get_stat_counters_num() {return MAX_FLOW_STATS;}
231    virtual int get_rx_stat_capabilities() {
232        return TrexPlatformApi::IF_STAT_IPV4_ID | TrexPlatformApi::IF_STAT_RX_BYTES_COUNT
233            | TrexPlatformApi::IF_STAT_PAYLOAD;
234    }
235    virtual int wait_for_stable_link();
236    virtual void wait_after_link_up();
237    virtual int set_rcv_all(CPhyEthIF * _if, bool set_on);
238};
239
240class CTRexExtendedDriverBase1GVm : public CTRexExtendedDriverBase {
241
242public:
243    CTRexExtendedDriverBase1GVm(){
244        /* we are working in mode that we have 1 queue for rx and one queue for tx*/
245        CGlobalInfo::m_options.preview.set_vm_one_queue_enable(true);
246    }
247
248    TRexPortAttr * create_port_attr(uint8_t port_id) {
249        return new DpdkTRexPortAttr(port_id, true, true);
250    }
251
252    virtual bool has_crc_added() {
253        return false;
254    }
255
256    static CTRexExtendedDriverBase * create(){
257        return ( new CTRexExtendedDriverBase1GVm() );
258    }
259
260    virtual void update_global_config_fdir(port_cfg_t * cfg){
261
262    }
263
264    virtual int get_min_sample_rate(void){
265        return ( RX_CHECK_MIX_SAMPLE_RATE_1G);
266    }
267    virtual void update_configuration(port_cfg_t * cfg);
268
269    virtual bool is_hardware_filter_is_supported(){
270        return (true);
271    }
272
273    virtual int configure_rx_filter_rules(CPhyEthIF * _if);
274
275    virtual bool is_hardware_support_drop_queue(){
276        return(false);
277    }
278
279    virtual int stop_queue(CPhyEthIF * _if, uint16_t q_num);
280    virtual void get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats);
281    virtual void clear_extended_stats(CPhyEthIF * _if);
282    virtual int wait_for_stable_link();
283    virtual int get_stat_counters_num() {return MAX_FLOW_STATS;}
284    virtual int get_rx_stat_capabilities() {
285        return TrexPlatformApi::IF_STAT_IPV4_ID | TrexPlatformApi::IF_STAT_RX_BYTES_COUNT
286            | TrexPlatformApi::IF_STAT_PAYLOAD;
287    }
288    virtual int set_rcv_all(CPhyEthIF * _if, bool set_on) {return 0;}
289};
290
291class CTRexExtendedDriverBaseE1000 : public CTRexExtendedDriverBase1GVm {
292    CTRexExtendedDriverBaseE1000() {
293        // E1000 driver is only relevant in VM in our case
294        CGlobalInfo::m_options.preview.set_vm_one_queue_enable(true);
295    }
296public:
297    static CTRexExtendedDriverBase * create() {
298        return ( new CTRexExtendedDriverBaseE1000() );
299    }
300    // e1000 driver handing us packets with ethernet CRC, so we need to chop them
301    virtual uint8_t get_num_crc_fix_bytes() {return 4;}
302};
303
304class CTRexExtendedDriverBase10G : public CTRexExtendedDriverBase {
305public:
306    CTRexExtendedDriverBase10G(){
307    }
308
309    TRexPortAttr * create_port_attr(uint8_t port_id) {
310        return new DpdkTRexPortAttr(port_id, false, true);
311    }
312
313    static CTRexExtendedDriverBase * create(){
314        return ( new CTRexExtendedDriverBase10G() );
315    }
316
317    virtual void update_global_config_fdir(port_cfg_t * cfg);
318
319    virtual int get_min_sample_rate(void){
320        return (RX_CHECK_MIX_SAMPLE_RATE);
321    }
322    virtual void update_configuration(port_cfg_t * cfg);
323
324    virtual bool is_hardware_filter_is_supported(){
325        return (true);
326    }
327    virtual int configure_rx_filter_rules(CPhyEthIF * _if);
328    virtual int configure_rx_filter_rules_stateless(CPhyEthIF * _if);
329    virtual int configure_rx_filter_rules_statefull(CPhyEthIF * _if);
330    virtual bool is_hardware_support_drop_queue(){
331        return(true);
332    }
333    virtual void get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats);
334    virtual void clear_extended_stats(CPhyEthIF * _if);
335    virtual int wait_for_stable_link();
336    virtual int get_stat_counters_num() {return MAX_FLOW_STATS;}
337    virtual int get_rx_stat_capabilities() {
338        return TrexPlatformApi::IF_STAT_IPV4_ID | TrexPlatformApi::IF_STAT_RX_BYTES_COUNT
339            | TrexPlatformApi::IF_STAT_PAYLOAD;
340    }
341    virtual CFlowStatParser *get_flow_stat_parser();
342    int add_del_eth_filter(CPhyEthIF * _if, bool is_add, uint16_t ethertype);
343    virtual int set_rcv_all(CPhyEthIF * _if, bool set_on);
344};
345
346class CTRexExtendedDriverBase40G : public CTRexExtendedDriverBase {
347public:
348    CTRexExtendedDriverBase40G(){
349        // Since we support only 128 counters per if, it is OK to configure here 4 statically.
350        // If we want to support more counters in case of card having less interfaces, we
351        // Will have to identify the number of interfaces dynamically.
352        m_if_per_card = 4;
353    }
354
355    TRexPortAttr * create_port_attr(uint8_t port_id) {
356        // disabling flow control on 40G using DPDK API causes the interface to malfunction
357        return new DpdkTRexPortAttr(port_id, false, false);
358    }
359
360    static CTRexExtendedDriverBase * create(){
361        return ( new CTRexExtendedDriverBase40G() );
362    }
363
364    virtual void update_global_config_fdir(port_cfg_t * cfg){
365    }
366    virtual int get_min_sample_rate(void){
367        return (RX_CHECK_MIX_SAMPLE_RATE);
368    }
369    virtual void update_configuration(port_cfg_t * cfg);
370    virtual int configure_rx_filter_rules(CPhyEthIF * _if);
371    virtual int add_del_rx_flow_stat_rule(uint8_t port_id, enum rte_filter_op op, uint16_t l3_proto
372                                          , uint8_t l4_proto, uint8_t ipv6_next_h, uint16_t id);
373    virtual bool is_hardware_filter_is_supported(){
374        return (true);
375    }
376
377    virtual bool is_hardware_support_drop_queue(){
378        return(true);
379    }
380    virtual void get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats);
381    virtual void clear_extended_stats(CPhyEthIF * _if);
382    virtual void reset_rx_stats(CPhyEthIF * _if, uint32_t *stats, int min, int len);
383    virtual int get_rx_stats(CPhyEthIF * _if, uint32_t *pkts, uint32_t *prev_pkts, uint32_t *bytes, uint32_t *prev_bytes, int min, int max);
384    virtual int dump_fdir_global_stats(CPhyEthIF * _if, FILE *fd);
385    virtual int get_stat_counters_num() {return MAX_FLOW_STATS;}
386    virtual int get_rx_stat_capabilities() {
387        return TrexPlatformApi::IF_STAT_IPV4_ID | TrexPlatformApi::IF_STAT_PAYLOAD;
388    }
389    virtual int wait_for_stable_link();
390    virtual bool hw_rx_stat_supported(){return true;}
391    virtual int verify_fw_ver(int i);
392    virtual CFlowStatParser *get_flow_stat_parser();
393    virtual int set_rcv_all(CPhyEthIF * _if, bool set_on);
394
395private:
396    virtual void add_del_rules(enum rte_filter_op op, uint8_t port_id, uint16_t type, uint8_t ttl
397                               , uint16_t ip_id, uint8_t l4_proto, int queue, uint16_t stat_idx);
398    virtual int add_del_eth_type_rule(uint8_t port_id, enum rte_filter_op op, uint16_t eth_type);
399    virtual int configure_rx_filter_rules_statefull(CPhyEthIF * _if);
400
401    virtual bool drop_packets_incase_of_linkdown() {
402        return (true);
403    }
404
405private:
406    uint8_t m_if_per_card;
407};
408
409class CTRexExtendedDriverBaseVIC : public CTRexExtendedDriverBase {
410public:
411    CTRexExtendedDriverBaseVIC(){
412    }
413
414    TRexPortAttr * create_port_attr(uint8_t port_id) {
415        return new DpdkTRexPortAttr(port_id, false, false);
416    }
417
418    static CTRexExtendedDriverBase * create(){
419        return ( new CTRexExtendedDriverBaseVIC() );
420    }
421
422    virtual bool is_hardware_filter_is_supported(){
423        return (true);
424    }
425    virtual void update_global_config_fdir(port_cfg_t * cfg){
426    }
427
428
429    virtual bool is_hardware_support_drop_queue(){
430        return(true);
431    }
432
433    void clear_extended_stats(CPhyEthIF * _if);
434
435    void get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats);
436
437
438    virtual int get_min_sample_rate(void){
439        return (RX_CHECK_MIX_SAMPLE_RATE);
440    }
441
442    virtual int verify_fw_ver(int i);
443
444    virtual void update_configuration(port_cfg_t * cfg);
445
446    virtual int configure_rx_filter_rules(CPhyEthIF * _if);
447    virtual void reset_rx_stats(CPhyEthIF * _if, uint32_t *stats, int min, int len);
448    virtual int get_rx_stats(CPhyEthIF * _if, uint32_t *pkts, uint32_t *prev_pkts, uint32_t *bytes, uint32_t *prev_bytes, int min, int max);
449    virtual int get_stat_counters_num() {return MAX_FLOW_STATS;}
450    virtual int get_rx_stat_capabilities() {
451        return TrexPlatformApi::IF_STAT_IPV4_ID | TrexPlatformApi::IF_STAT_PAYLOAD;
452    }
453    virtual CFlowStatParser *get_flow_stat_parser();
454    virtual int dump_fdir_global_stats(CPhyEthIF * _if, FILE *fd);
455    virtual int set_rcv_all(CPhyEthIF * _if, bool set_on);
456
457private:
458
459    virtual void add_del_rules(enum rte_filter_op op, uint8_t port_id, uint16_t type, uint16_t id
460                               , uint8_t l4_proto, uint8_t tos, int queue);
461    virtual int add_del_eth_type_rule(uint8_t port_id, enum rte_filter_op op, uint16_t eth_type);
462    virtual int configure_rx_filter_rules_statefull(CPhyEthIF * _if);
463
464};
465
466
467class CTRexExtendedDriverBaseMlnx5G : public CTRexExtendedDriverBase {
468public:
469    CTRexExtendedDriverBaseMlnx5G(){
470    }
471
472    TRexPortAttr * create_port_attr(uint8_t port_id) {
473        // disabling flow control on 40G using DPDK API causes the interface to malfunction
474        return new DpdkTRexPortAttr(port_id, false, false);
475    }
476
477    static CTRexExtendedDriverBase * create(){
478        return ( new CTRexExtendedDriverBaseMlnx5G() );
479    }
480
481    virtual void update_global_config_fdir(port_cfg_t * cfg){
482    }
483
484    virtual int get_min_sample_rate(void){
485        return (RX_CHECK_MIX_SAMPLE_RATE);
486    }
487    virtual void update_configuration(port_cfg_t * cfg);
488
489    virtual int configure_rx_filter_rules(CPhyEthIF * _if);
490    virtual bool is_hardware_filter_is_supported(){
491        return (true);
492    }
493
494    virtual bool is_hardware_support_drop_queue(){
495        return(true);
496    }
497    virtual void get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats);
498    virtual void clear_extended_stats(CPhyEthIF * _if);
499    virtual void reset_rx_stats(CPhyEthIF * _if, uint32_t *stats, int min, int len);
500    virtual int get_rx_stats(CPhyEthIF * _if, uint32_t *pkts, uint32_t *prev_pkts, uint32_t *bytes, uint32_t *prev_bytes, int min, int max);
501    virtual int dump_fdir_global_stats(CPhyEthIF * _if, FILE *fd);
502    virtual int get_stat_counters_num() {return MAX_FLOW_STATS;}
503    virtual int get_rx_stat_capabilities() {
504        return TrexPlatformApi::IF_STAT_IPV4_ID | TrexPlatformApi::IF_STAT_PAYLOAD;
505    }
506    virtual int wait_for_stable_link();
507    // disabling flow control on 40G using DPDK API causes the interface to malfunction
508    virtual bool flow_control_disable_supported(){return false;}
509    virtual CFlowStatParser *get_flow_stat_parser();
510    virtual int set_rcv_all(CPhyEthIF * _if, bool set_on);
511
512    virtual uint16_t enable_rss_drop_workaround(void) {
513        return (5);
514    }
515
516private:
517    virtual void add_del_rules(enum rte_filter_op op, uint8_t port_id, uint16_t type, uint16_t ip_id, uint8_t l4_proto
518                               , int queue);
519    virtual int add_del_rx_filter_rules(CPhyEthIF * _if, bool set_on);
520};
521
522typedef CTRexExtendedDriverBase * (*create_object_t) (void);
523
524
525class CTRexExtendedDriverRec {
526public:
527    std::string         m_driver_name;
528    create_object_t     m_constructor;
529};
530
531class CTRexExtendedDriverDb {
532public:
533
534    const std::string & get_driver_name() {
535        return m_driver_name;
536    }
537
538    bool is_driver_exists(std::string name);
539
540
541
542    void set_driver_name(std::string name){
543        m_driver_was_set=true;
544        m_driver_name=name;
545        printf(" set driver name %s \n",name.c_str());
546        m_drv=create_driver(m_driver_name);
547        assert(m_drv);
548    }
549
550    CTRexExtendedDriverBase * get_drv(){
551        if (!m_driver_was_set) {
552            printf(" ERROR too early to use this object !\n");
553            printf(" need to set the right driver \n");
554            assert(0);
555        }
556        assert(m_drv);
557        return (m_drv);
558    }
559
560public:
561
562    static CTRexExtendedDriverDb * Ins();
563
564private:
565    CTRexExtendedDriverBase * create_driver(std::string name);
566
567    CTRexExtendedDriverDb(){
568        register_driver(std::string("rte_ixgbe_pmd"),CTRexExtendedDriverBase10G::create);
569        register_driver(std::string("rte_igb_pmd"),CTRexExtendedDriverBase1G::create);
570        register_driver(std::string("rte_i40e_pmd"),CTRexExtendedDriverBase40G::create);
571        register_driver(std::string("rte_enic_pmd"),CTRexExtendedDriverBaseVIC::create);
572        register_driver(std::string("librte_pmd_mlx5"),CTRexExtendedDriverBaseMlnx5G::create);
573
574
575        /* virtual devices */
576        register_driver(std::string("rte_em_pmd"),CTRexExtendedDriverBaseE1000::create);
577        register_driver(std::string("rte_vmxnet3_pmd"),CTRexExtendedDriverBase1GVm::create);
578        register_driver(std::string("rte_virtio_pmd"),CTRexExtendedDriverBase1GVm::create);
579
580
581
582
583        m_driver_was_set=false;
584        m_drv=0;
585        m_driver_name="";
586    }
587    void register_driver(std::string name,create_object_t func);
588    static CTRexExtendedDriverDb * m_ins;
589    bool        m_driver_was_set;
590    std::string m_driver_name;
591    CTRexExtendedDriverBase * m_drv;
592    std::vector <CTRexExtendedDriverRec*>     m_list;
593
594};
595
596CTRexExtendedDriverDb * CTRexExtendedDriverDb::m_ins;
597
598
599void CTRexExtendedDriverDb::register_driver(std::string name,
600                                            create_object_t func){
601    CTRexExtendedDriverRec * rec;
602    rec = new CTRexExtendedDriverRec();
603    rec->m_driver_name=name;
604    rec->m_constructor=func;
605    m_list.push_back(rec);
606}
607
608
609bool CTRexExtendedDriverDb::is_driver_exists(std::string name){
610    int i;
611    for (i=0; i<(int)m_list.size(); i++) {
612        if (m_list[i]->m_driver_name == name) {
613            return (true);
614        }
615    }
616    return (false);
617}
618
619
620CTRexExtendedDriverBase * CTRexExtendedDriverDb::create_driver(std::string name){
621    int i;
622    for (i=0; i<(int)m_list.size(); i++) {
623        if (m_list[i]->m_driver_name == name) {
624            return ( m_list[i]->m_constructor() );
625        }
626    }
627    return( (CTRexExtendedDriverBase *)0);
628}
629
630
631
632CTRexExtendedDriverDb * CTRexExtendedDriverDb::Ins(){
633    if (!m_ins) {
634        m_ins = new CTRexExtendedDriverDb();
635    }
636    return (m_ins);
637}
638
639static CTRexExtendedDriverBase *  get_ex_drv(){
640
641    return ( CTRexExtendedDriverDb::Ins()->get_drv());
642}
643
644static inline int get_min_sample_rate(void){
645    return ( get_ex_drv()->get_min_sample_rate());
646}
647
648// cores =0==1,1*2,2,3,4,5,6
649// An enum for all the option types
650enum { OPT_HELP,
651       OPT_MODE_BATCH,
652       OPT_MODE_INTERACTIVE,
653       OPT_NODE_DUMP,
654       OPT_DUMP_INTERFACES,
655       OPT_UT,
656       OPT_CORES,
657       OPT_SINGLE_CORE,
658       OPT_FLIP_CLIENT_SERVER,
659       OPT_FLOW_FLIP_CLIENT_SERVER,
660       OPT_FLOW_FLIP_CLIENT_SERVER_SIDE,
661       OPT_RATE_MULT,
662       OPT_DURATION,
663       OPT_PLATFORM_FACTOR,
664       OPT_PUB_DISABLE,
665       OPT_LIMT_NUM_OF_PORTS,
666       OPT_PLAT_CFG_FILE,
667       OPT_MBUF_FACTOR,
668       OPT_LATENCY,
669       OPT_NO_CLEAN_FLOW_CLOSE,
670       OPT_LATENCY_MASK,
671       OPT_ONLY_LATENCY,
672       OPT_LATENCY_PREVIEW ,
673       OPT_WAIT_BEFORE_TRAFFIC,
674       OPT_PCAP,
675       OPT_RX_CHECK,
676       OPT_IO_MODE,
677       OPT_IPV6,
678       OPT_LEARN,
679       OPT_LEARN_MODE,
680       OPT_LEARN_VERIFY,
681       OPT_L_PKT_MODE,
682       OPT_NO_FLOW_CONTROL,
683       OPT_VLAN,
684       OPT_RX_CHECK_HOPS,
685       OPT_CLIENT_CFG_FILE,
686       OPT_NO_KEYBOARD_INPUT,
687       OPT_VIRT_ONE_TX_RX_QUEUE,
688       OPT_PREFIX,
689       OPT_SEND_DEBUG_PKT,
690       OPT_NO_WATCHDOG,
691       OPT_ALLOW_COREDUMP,
692       OPT_CHECKSUM_OFFLOAD,
693       OPT_CLOSE,
694       OPT_ARP_REF_PER,
695       OPT_NO_OFED_CHECK,
696};
697
698/* these are the argument types:
699   SO_NONE --    no argument needed
700   SO_REQ_SEP -- single required argument
701   SO_MULTI --   multiple arguments needed
702*/
703static CSimpleOpt::SOption parser_options[] =
704    {
705        { OPT_HELP,                   "-?",                SO_NONE    },
706        { OPT_HELP,                   "-h",                SO_NONE    },
707        { OPT_HELP,                   "--help",            SO_NONE    },
708        { OPT_UT,                     "--ut",              SO_NONE    },
709        { OPT_MODE_BATCH,             "-f",                SO_REQ_SEP },
710        { OPT_MODE_INTERACTIVE,       "-i",                SO_NONE    },
711        { OPT_PLAT_CFG_FILE,          "--cfg",             SO_REQ_SEP },
712        { OPT_SINGLE_CORE,            "-s",                SO_NONE    },
713        { OPT_FLIP_CLIENT_SERVER,     "--flip",            SO_NONE    },
714        { OPT_FLOW_FLIP_CLIENT_SERVER,"-p",                SO_NONE    },
715        { OPT_FLOW_FLIP_CLIENT_SERVER_SIDE, "-e",          SO_NONE    },
716        { OPT_NO_CLEAN_FLOW_CLOSE,    "--nc",              SO_NONE    },
717        { OPT_LIMT_NUM_OF_PORTS,      "--limit-ports",     SO_REQ_SEP },
718        { OPT_CORES,                  "-c",                SO_REQ_SEP },
719        { OPT_NODE_DUMP,              "-v",                SO_REQ_SEP },
720        { OPT_DUMP_INTERFACES,        "--dump-interfaces", SO_MULTI   },
721        { OPT_LATENCY,                "-l",                SO_REQ_SEP },
722        { OPT_DURATION,               "-d",                SO_REQ_SEP },
723        { OPT_PLATFORM_FACTOR,        "-pm",               SO_REQ_SEP },
724        { OPT_PUB_DISABLE,            "-pubd",             SO_NONE    },
725        { OPT_RATE_MULT,              "-m",                SO_REQ_SEP },
726        { OPT_LATENCY_MASK,           "--lm",              SO_REQ_SEP },
727        { OPT_ONLY_LATENCY,           "--lo",              SO_NONE    },
728        { OPT_LATENCY_PREVIEW,        "-k",                SO_REQ_SEP },
729        { OPT_WAIT_BEFORE_TRAFFIC,    "-w",                SO_REQ_SEP },
730        { OPT_PCAP,                   "--pcap",            SO_NONE    },
731        { OPT_RX_CHECK,               "--rx-check",        SO_REQ_SEP },
732        { OPT_IO_MODE,                "--iom",             SO_REQ_SEP },
733        { OPT_RX_CHECK_HOPS,          "--hops",            SO_REQ_SEP },
734        { OPT_IPV6,                   "--ipv6",            SO_NONE    },
735        { OPT_LEARN,                  "--learn",           SO_NONE    },
736        { OPT_LEARN_MODE,             "--learn-mode",      SO_REQ_SEP },
737        { OPT_LEARN_VERIFY,           "--learn-verify",    SO_NONE    },
738        { OPT_L_PKT_MODE,             "--l-pkt-mode",      SO_REQ_SEP },
739        { OPT_NO_FLOW_CONTROL,        "--no-flow-control-change", SO_NONE },
740        { OPT_VLAN,                   "--vlan",            SO_NONE    },
741        { OPT_CLIENT_CFG_FILE,        "--client_cfg",      SO_REQ_SEP },
742        { OPT_CLIENT_CFG_FILE,        "--client-cfg",      SO_REQ_SEP },
743        { OPT_NO_KEYBOARD_INPUT,      "--no-key",          SO_NONE    },
744        { OPT_VIRT_ONE_TX_RX_QUEUE,   "--vm-sim",          SO_NONE    },
745        { OPT_PREFIX,                 "--prefix",          SO_REQ_SEP },
746        { OPT_SEND_DEBUG_PKT,         "--send-debug-pkt",  SO_REQ_SEP },
747        { OPT_MBUF_FACTOR,            "--mbuf-factor",     SO_REQ_SEP },
748        { OPT_NO_WATCHDOG,            "--no-watchdog",     SO_NONE    },
749        { OPT_ALLOW_COREDUMP,         "--allow-coredump",  SO_NONE    },
750        { OPT_CHECKSUM_OFFLOAD,       "--checksum-offload", SO_NONE   },
751        { OPT_CLOSE,                  "--close-at-end",    SO_NONE    },
752        { OPT_ARP_REF_PER,            "--arp-refresh-period", SO_REQ_SEP },
753        { OPT_NO_OFED_CHECK,          "--no-ofed-check",   SO_NONE    },
754        SO_END_OF_OPTIONS
755    };
756
757static int usage(){
758
759    printf(" Usage: t-rex-64 [mode] <options>\n\n");
760    printf(" mode is one of:\n");
761    printf("   -f <file> : YAML file with traffic template configuration (Will run TRex in 'stateful' mode)\n");
762    printf("   -i        : Run TRex in 'stateless' mode\n");
763    printf("\n");
764
765    printf(" Available options are:\n");
766    printf(" --allow-coredump           : Allow creation of core dump \n");
767    printf(" --arp-refresh-period       : Period in seconds between sending of gratuitous ARP for our addresses. Value of 0 means 'never send' \n");
768    printf(" -c <num>>                  : Number of hardware threads to allocate for each port pair. Overrides the 'c' argument from config file \n");
769    printf(" --cfg <file>               : Use file as TRex config file instead of the default /etc/trex_cfg.yaml \n");
770    printf(" --checksum-offload         : Enable IP, TCP and UDP tx checksum offloading, using DPDK. This requires all used interfaces to support this \n");
771    printf(" --client_cfg <file>        : YAML file describing clients configuration \n");
772    printf(" --close-at-end             : Call rte_eth_dev_stop and close at exit. Calling these functions caused link down issues in older versions, \n");
773    printf("                               so we do not call them by default for now. Leaving this as option in case someone thinks it is helpful for him \n");
774    printf("                               This it temporary option. Will be removed in the future \n");
775    printf(" -d                         : Duration of the test in sec (default is 3600). Look also at --nc \n");
776    printf(" -e                         : Like -p but src/dst IP will be chosen according to the port (i.e. on client port send all packets with client src and server dest, and vice versa on server port \n");
777    printf(" --flip                     : Each flow will be sent both from client to server and server to client. This can acheive better port utilization when flow traffic is asymmetric \n");
778    printf(" --hops <hops>              : If rx check is enabled, the hop number can be assigned. See manual for details \n");
779    printf(" --iom  <mode>              : IO mode  for server output [0- silent, 1- normal , 2- short] \n");
780    printf(" --ipv6                     : Work in ipv6 mode \n");
781    printf(" -k  <num>                  : Run 'warm up' traffic for num seconds before starting the test. \n");
782    printf(" -l <rate>                  : In parallel to the test, run latency check, sending packets at rate/sec from each interface \n");
783    printf(" --l-pkt-mode <0-3>         : Set mode for sending latency packets \n");
784    printf("      0 (default)    send SCTP packets  \n");
785    printf("      1              Send ICMP request packets  \n");
786    printf("      2              Send ICMP requests from client side, and response from server side (for working with firewall) \n");
787    printf("      3              Send ICMP requests with sequence ID 0 from both sides \n");
788    printf("    Rate of zero means no latency check \n");
789    printf(" --learn (deprecated). Replaced by --learn-mode. To get older behaviour, use --learn-mode 2 \n");
790    printf(" --learn-mode [1-3]         : Used for working in NAT environments. Dynamically learn the NAT translation done by the DUT \n");
791    printf("      1    In case of TCP flow, use TCP ACK in first SYN to pass NAT translation information. Initial SYN packet must be first packet in the TCP flow \n");
792    printf("           In case of UDP stream, NAT translation information will pass in IP ID field of first packet in flow. This means that this field is changed by TRex\n");
793    printf("      2    Add special IP option to pass NAT translation information to first packet of each flow. Will not work on certain firewalls if they drop packets with IP options \n");
794    printf("      3    Like 1, but without support for sequence number randomization in server->clien direction. Performance (flow/second) better than 1 \n");
795    printf(" --learn-verify             : Test the NAT translation mechanism. Should be used when there is no NAT in the setup \n");
796    printf(" --limit-ports              : Limit number of ports used. Must be even number (TRex always uses port pairs) \n");
797    printf(" --lm                       : Hex mask of cores that should send traffic \n");
798    printf("    For example: Value of 0x5 will cause only ports 0 and 2 to send traffic \n");
799    printf(" --lo                       : Only run latency test \n");
800    printf(" -m <num>                   : Rate multiplier.  Multiply basic rate of templates by this number \n");
801    printf(" --mbuf-factor              : Factor for packet memory \n");
802    printf(" --nc                       : If set, will not wait for all flows to be closed, before terminating - see manual for more information \n");
803    printf(" --no-flow-control-change   : By default TRex disables flow-control. If this option is given, it does not touch it \n");
804    printf(" --no-key                   : Daemon mode, don't get input from keyboard \n");
805    printf(" --no-ofed-check            : Disable the check of OFED version \n");
806    printf(" --no-watchdog              : Disable watchdog \n");
807    printf(" -p                         : Send all flow packets from the same interface (choosed randomly between client ad server ports) without changing their src/dst IP \n");
808    printf(" -pm                        : Platform factor. If you have splitter in the setup, you can multiply the total results by this factor \n");
809    printf("    e.g --pm 2.0 will multiply all the results bps in this factor \n");
810    printf(" --prefix <nam>             : For running multi TRex instances on the same machine. Each instance should have different name \n");
811    printf(" -pubd                      : Disable monitors publishers \n");
812    printf(" --rx-check  <rate>         : Enable rx check. TRex will sample flows at 1/rate and check order, latency and more \n");
813    printf(" -s                         : Single core. Run only one data path core. For debug \n");
814    printf(" --send-debug-pkt <proto>   : Do not run traffic generator. Just send debug packet and dump receive queues \n");
815    printf("    Supported protocols are 1 for icmp, 2 for UDP, 3 for TCP, 4 for ARP, 5 for 9K UDP \n");
816    printf(" -v <verbosity level>       : The higher the value, print more debug information \n");
817    printf(" --vlan                     : Relevant only for stateless mode with Intel 82599 10G NIC \n");
818    printf("                              When configuring flow stat and latency per stream rules, assume all streams uses VLAN \n");
819    printf(" --vm-sim                   : Simulate vm with driver of one input queue and one output queue \n");
820    printf(" -w  <num>                  : Wait num seconds between init of interfaces and sending traffic, default is 1 \n");
821    printf("\n");
822    printf(" Examples: ");
823    printf(" basic trex run for 20 sec and multiplier of 10 \n");
824    printf("  t-rex-64 -f cap2/dns.yaml -m 10 -d 20 \n");
825    printf("\n\n");
826    printf(" Copyright (c) 2015-2016 Cisco Systems, Inc.    \n");
827    printf("                                                                  \n");
828    printf(" Licensed under the Apache License, Version 2.0 (the 'License') \n");
829    printf(" you may not use this file except in compliance with the License. \n");
830    printf(" You may obtain a copy of the License at                          \n");
831    printf("                                                                  \n");
832    printf("    http://www.apache.org/licenses/LICENSE-2.0                    \n");
833    printf("                                                                  \n");
834    printf(" Unless required by applicable law or agreed to in writing, software \n");
835    printf(" distributed under the License is distributed on an \"AS IS\" BASIS,   \n");
836    printf(" WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. \n");
837    printf(" See the License for the specific language governing permissions and      \n");
838    printf(" limitations under the License.                                           \n");
839    printf(" \n");
840    printf(" Open Source Components / Libraries \n");
841    printf(" DPDK       (BSD)       \n");
842    printf(" YAML-CPP   (BSD)       \n");
843    printf(" JSONCPP    (MIT)       \n");
844    printf(" \n");
845    printf(" Open Source Binaries \n");
846    printf(" ZMQ        (LGPL v3plus) \n");
847    printf(" \n");
848    printf(" Version : %s   \n",VERSION_BUILD_NUM);
849    printf(" DPDK version : %s   \n",rte_version());
850    printf(" User    : %s   \n",VERSION_USER);
851    printf(" Date    : %s , %s \n",get_build_date(),get_build_time());
852    printf(" Uuid    : %s    \n",VERSION_UIID);
853    printf(" Git SHA : %s    \n",VERSION_GIT_SHA);
854    return (0);
855}
856
857
858int gtest_main(int argc, char **argv) ;
859
860static void parse_err(const std::string &msg) {
861    std::cout << "\nArgument Parsing Error: \n\n" << "*** "<< msg << "\n\n";
862    exit(-1);
863}
864
865static int parse_options(int argc, char *argv[], CParserOption* po, bool first_time ) {
866    CSimpleOpt args(argc, argv, parser_options);
867
868    bool latency_was_set=false;
869    (void)latency_was_set;
870    char ** rgpszArg = NULL;
871    bool opt_vlan_was_set = false;
872
873    int a=0;
874    int node_dump=0;
875
876    po->preview.setFileWrite(true);
877    po->preview.setRealTime(true);
878    uint32_t tmp_data;
879
880    po->m_run_mode = CParserOption::RUN_MODE_INVALID;
881
882    while ( args.Next() ){
883        if (args.LastError() == SO_SUCCESS) {
884            switch (args.OptionId()) {
885
886            case OPT_UT :
887                parse_err("Supported only in simulation");
888                break;
889
890            case OPT_HELP:
891                usage();
892                return -1;
893
894            case OPT_MODE_BATCH:
895                if (po->m_run_mode != CParserOption::RUN_MODE_INVALID) {
896                    parse_err("Please specify single run mode");
897                }
898                po->m_run_mode = CParserOption::RUN_MODE_BATCH;
899                po->cfg_file = args.OptionArg();
900                break;
901
902            case OPT_MODE_INTERACTIVE:
903                if (po->m_run_mode != CParserOption::RUN_MODE_INVALID) {
904                    parse_err("Please specify single run mode");
905                }
906                po->m_run_mode = CParserOption::RUN_MODE_INTERACTIVE;
907                break;
908
909            case OPT_NO_KEYBOARD_INPUT  :
910                po->preview.set_no_keyboard(true);
911                break;
912
913            case OPT_CLIENT_CFG_FILE :
914                po->client_cfg_file = args.OptionArg();
915                break;
916
917            case OPT_PLAT_CFG_FILE :
918                po->platform_cfg_file = args.OptionArg();
919                break;
920
921            case OPT_SINGLE_CORE :
922                po->preview.setSingleCore(true);
923                break;
924
925            case OPT_IPV6:
926                po->preview.set_ipv6_mode_enable(true);
927                break;
928
929
930            case OPT_LEARN :
931                po->m_learn_mode = CParserOption::LEARN_MODE_IP_OPTION;
932                break;
933
934            case OPT_LEARN_MODE :
935                sscanf(args.OptionArg(),"%d", &tmp_data);
936                if (! po->is_valid_opt_val(tmp_data, CParserOption::LEARN_MODE_DISABLED, CParserOption::LEARN_MODE_MAX, "--learn-mode")) {
937                    exit(-1);
938                }
939                po->m_learn_mode = (uint8_t)tmp_data;
940                break;
941
942            case OPT_LEARN_VERIFY :
943                // must configure learn_mode for learn verify to work. If different learn mode will be given later, it will be set instead.
944                if (po->m_learn_mode == 0) {
945                    po->m_learn_mode = CParserOption::LEARN_MODE_IP_OPTION;
946                }
947                po->preview.set_learn_and_verify_mode_enable(true);
948                break;
949
950            case OPT_L_PKT_MODE :
951                sscanf(args.OptionArg(),"%d", &tmp_data);
952                if (! po->is_valid_opt_val(tmp_data, 0, L_PKT_SUBMODE_0_SEQ, "--l-pkt-mode")) {
953                    exit(-1);
954                }
955                po->m_l_pkt_mode=(uint8_t)tmp_data;
956                break;
957
958            case OPT_NO_FLOW_CONTROL:
959                po->preview.set_disable_flow_control_setting(true);
960                break;
961            case OPT_VLAN:
962                opt_vlan_was_set = true;
963                break;
964            case OPT_LIMT_NUM_OF_PORTS :
965                po->m_expected_portd =atoi(args.OptionArg());
966                break;
967            case  OPT_CORES  :
968                po->preview.setCores(atoi(args.OptionArg()));
969                break;
970            case OPT_FLIP_CLIENT_SERVER :
971                po->preview.setClientServerFlip(true);
972                break;
973            case OPT_NO_CLEAN_FLOW_CLOSE :
974                po->preview.setNoCleanFlowClose(true);
975                break;
976            case OPT_FLOW_FLIP_CLIENT_SERVER :
977                po->preview.setClientServerFlowFlip(true);
978                break;
979            case OPT_FLOW_FLIP_CLIENT_SERVER_SIDE:
980                po->preview.setClientServerFlowFlipAddr(true);
981                break;
982            case OPT_NODE_DUMP:
983                a=atoi(args.OptionArg());
984                node_dump=1;
985                po->preview.setFileWrite(false);
986                break;
987            case OPT_DUMP_INTERFACES:
988                if (first_time) {
989                    rgpszArg = args.MultiArg(1);
990                    while (rgpszArg != NULL) {
991                        po->dump_interfaces.push_back(rgpszArg[0]);
992                        rgpszArg = args.MultiArg(1);
993                    }
994                }
995                if (po->m_run_mode != CParserOption::RUN_MODE_INVALID) {
996                    parse_err("Please specify single run mode (-i for stateless, or -f <file> for stateful");
997                }
998                po->m_run_mode = CParserOption::RUN_MODE_DUMP_INFO;
999                break;
1000            case OPT_MBUF_FACTOR:
1001                sscanf(args.OptionArg(),"%f", &po->m_mbuf_factor);
1002                break;
1003            case OPT_RATE_MULT :
1004                sscanf(args.OptionArg(),"%f", &po->m_factor);
1005                break;
1006            case OPT_DURATION :
1007                sscanf(args.OptionArg(),"%f", &po->m_duration);
1008                break;
1009            case OPT_PUB_DISABLE:
1010                po->preview.set_zmq_publish_enable(false);
1011                break;
1012            case OPT_PLATFORM_FACTOR:
1013                sscanf(args.OptionArg(),"%f", &po->m_platform_factor);
1014                break;
1015            case OPT_LATENCY :
1016                latency_was_set=true;
1017                sscanf(args.OptionArg(),"%d", &po->m_latency_rate);
1018                break;
1019            case OPT_LATENCY_MASK :
1020                sscanf(args.OptionArg(),"%x", &po->m_latency_mask);
1021                break;
1022            case OPT_ONLY_LATENCY :
1023                po->preview.setOnlyLatency(true);
1024                break;
1025            case OPT_NO_WATCHDOG :
1026                po->preview.setWDDisable(true);
1027                break;
1028            case OPT_ALLOW_COREDUMP :
1029                po->preview.setCoreDumpEnable(true);
1030                break;
1031            case  OPT_LATENCY_PREVIEW :
1032                sscanf(args.OptionArg(),"%d", &po->m_latency_prev);
1033                break;
1034            case  OPT_WAIT_BEFORE_TRAFFIC :
1035                sscanf(args.OptionArg(),"%d", &po->m_wait_before_traffic);
1036                break;
1037            case OPT_PCAP:
1038                po->preview.set_pcap_mode_enable(true);
1039                break;
1040            case OPT_RX_CHECK :
1041                sscanf(args.OptionArg(),"%d", &tmp_data);
1042                po->m_rx_check_sample=(uint16_t)tmp_data;
1043                po->preview.set_rx_check_enable(true);
1044                break;
1045            case OPT_RX_CHECK_HOPS :
1046                sscanf(args.OptionArg(),"%d", &tmp_data);
1047                po->m_rx_check_hops = (uint16_t)tmp_data;
1048                break;
1049            case OPT_IO_MODE :
1050                sscanf(args.OptionArg(),"%d", &tmp_data);
1051                po->m_io_mode=(uint16_t)tmp_data;
1052                break;
1053
1054            case OPT_VIRT_ONE_TX_RX_QUEUE:
1055                po->preview.set_vm_one_queue_enable(true);
1056                break;
1057
1058            case OPT_PREFIX:
1059                po->prefix = args.OptionArg();
1060                break;
1061
1062            case OPT_SEND_DEBUG_PKT:
1063                sscanf(args.OptionArg(),"%d", &tmp_data);
1064                po->m_debug_pkt_proto = (uint8_t)tmp_data;
1065                break;
1066
1067            case OPT_CHECKSUM_OFFLOAD:
1068                po->preview.setChecksumOffloadEnable(true);
1069                break;
1070
1071            case OPT_CLOSE:
1072                po->preview.setCloseEnable(true);
1073                break;
1074            case  OPT_ARP_REF_PER:
1075                sscanf(args.OptionArg(),"%d", &tmp_data);
1076                po->m_arp_ref_per=(uint16_t)tmp_data;
1077                break;
1078            case OPT_NO_OFED_CHECK:
1079                break;
1080
1081            default:
1082                printf("Error: option %s is not handled.\n\n", args.OptionText());
1083                usage();
1084                return -1;
1085                break;
1086            } // End of switch
1087        }// End of IF
1088        else {
1089            if (args.LastError() == SO_OPT_INVALID) {
1090                printf("Error: option %s is not recognized.\n\n", args.OptionText());
1091            } else if (args.LastError() == SO_ARG_MISSING) {
1092                printf("Error: option %s is expected to have argument.\n\n", args.OptionText());
1093            }
1094            usage();
1095            return -1;
1096        }
1097    } // End of while
1098
1099
1100    if ((po->m_run_mode ==  CParserOption::RUN_MODE_INVALID) ) {
1101        parse_err("Please provide single run mode. -f <file> for stateful or -i for stateless (interactive)");
1102    }
1103
1104    if (CGlobalInfo::is_learn_mode() && po->preview.get_ipv6_mode_enable()) {
1105        parse_err("--learn mode is not supported with --ipv6, beacuse there is no such thing as NAT66 (ipv6 to ipv6 translation) \n" \
1106                  "If you think it is important, please open a defect or write to TRex mailing list\n");
1107    }
1108
1109    if (po->preview.get_is_rx_check_enable() ||  po->is_latency_enabled() || CGlobalInfo::is_learn_mode()
1110        || (CGlobalInfo::m_options.m_arp_ref_per != 0) || get_vm_one_queue_enable()) {
1111        po->set_rx_enabled();
1112    }
1113
1114    if ( node_dump ){
1115        po->preview.setVMode(a);
1116    }
1117
1118    /* if we have a platform factor we need to devided by it so we can still work with normalized yaml profile  */
1119    po->m_factor = po->m_factor/po->m_platform_factor;
1120
1121    uint32_t cores=po->preview.getCores();
1122    if ( cores > ((BP_MAX_CORES)/2-1) ) {
1123        fprintf(stderr, " Error: maximum supported core number is: %d \n",((BP_MAX_CORES)/2-1));
1124        return -1;
1125    }
1126
1127
1128    if ( first_time ){
1129        /* only first time read the configuration file */
1130        if ( po->platform_cfg_file.length() >0  ) {
1131            if ( node_dump ){
1132                printf("Using configuration file %s \n",po->platform_cfg_file.c_str());
1133            }
1134            global_platform_cfg_info.load_from_yaml_file(po->platform_cfg_file);
1135            if ( node_dump ){
1136                global_platform_cfg_info.Dump(stdout);
1137            }
1138        }else{
1139            if ( utl_is_file_exists("/etc/trex_cfg.yaml") ){
1140                if ( node_dump ){
1141                    printf("Using configuration file /etc/trex_cfg.yaml \n");
1142                }
1143                global_platform_cfg_info.load_from_yaml_file("/etc/trex_cfg.yaml");
1144                if ( node_dump ){
1145                    global_platform_cfg_info.Dump(stdout);
1146                }
1147            }
1148        }
1149    }
1150
1151    if ( get_is_stateless() ) {
1152        if ( opt_vlan_was_set ) {
1153            po->preview.set_vlan_mode_enable(true);
1154        }
1155        if (CGlobalInfo::m_options.client_cfg_file != "") {
1156            parse_err("Client config file is not supported with interactive (stateless) mode ");
1157        }
1158        if ( po->m_duration ) {
1159            parse_err("Duration is not supported with interactive (stateless) mode ");
1160        }
1161
1162        if ( po->preview.get_is_rx_check_enable() ) {
1163            parse_err("Rx check is not supported with interactive (stateless) mode ");
1164        }
1165
1166        if  ( (po->is_latency_enabled()) || (po->preview.getOnlyLatency()) ){
1167            parse_err("Latency check is not supported with interactive (stateless) mode ");
1168        }
1169
1170        if ( po->preview.getSingleCore() ){
1171            parse_err("Single core is not supported with interactive (stateless) mode ");
1172        }
1173
1174    }
1175    else {
1176        if ( !po->m_duration ) {
1177            po->m_duration = 3600.0;
1178        }
1179    }
1180    return 0;
1181}
1182
1183static int parse_options_wrapper(int argc, char *argv[], CParserOption* po, bool first_time ) {
1184    // copy, as arg parser sometimes changes the argv
1185    char ** argv_copy = (char **) malloc(sizeof(char *) * argc);
1186    for(int i=0; i<argc; i++) {
1187        argv_copy[i] = strdup(argv[i]);
1188    }
1189    int ret = parse_options(argc, argv_copy, po, first_time);
1190
1191    // free
1192    for(int i=0; i<argc; i++) {
1193        free(argv_copy[i]);
1194    }
1195    free(argv_copy);
1196    return ret;
1197}
1198
1199int main_test(int argc , char * argv[]);
1200
1201
1202#define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
1203#define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
1204#define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */
1205
1206/*
1207 * These default values are optimized for use with the Intel(R) 82599 10 GbE
1208 * Controller and the DPDK ixgbe PMD. Consider using other values for other
1209 * network controllers and/or network drivers.
1210 */
1211#define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */
1212#define TX_HTHRESH 0  /**< Default values of TX host threshold reg. */
1213#define TX_WTHRESH 0  /**< Default values of TX write-back threshold reg. */
1214
1215#define TX_WTHRESH_1G 1  /**< Default values of TX write-back threshold reg. */
1216#define TX_PTHRESH_1G 1 /**< Default values of TX prefetch threshold reg. */
1217
1218
1219struct port_cfg_t {
1220public:
1221    port_cfg_t(){
1222        memset(&m_port_conf,0,sizeof(m_port_conf));
1223        memset(&m_rx_conf,0,sizeof(m_rx_conf));
1224        memset(&m_tx_conf,0,sizeof(m_tx_conf));
1225        memset(&m_rx_drop_conf,0,sizeof(m_rx_drop_conf));
1226
1227        m_rx_conf.rx_thresh.pthresh = RX_PTHRESH;
1228        m_rx_conf.rx_thresh.hthresh = RX_HTHRESH;
1229        m_rx_conf.rx_thresh.wthresh = RX_WTHRESH;
1230        m_rx_conf.rx_free_thresh =32;
1231
1232        m_rx_drop_conf.rx_thresh.pthresh = 0;
1233        m_rx_drop_conf.rx_thresh.hthresh = 0;
1234        m_rx_drop_conf.rx_thresh.wthresh = 0;
1235        m_rx_drop_conf.rx_free_thresh =32;
1236        m_rx_drop_conf.rx_drop_en=1;
1237
1238        m_tx_conf.tx_thresh.pthresh = TX_PTHRESH;
1239        m_tx_conf.tx_thresh.hthresh = TX_HTHRESH;
1240        m_tx_conf.tx_thresh.wthresh = TX_WTHRESH;
1241
1242        m_port_conf.rxmode.jumbo_frame=1;
1243        m_port_conf.rxmode.max_rx_pkt_len =9*1024+22;
1244        m_port_conf.rxmode.hw_strip_crc=1;
1245    }
1246
1247
1248
1249    inline void update_var(void){
1250        get_ex_drv()->update_configuration(this);
1251    }
1252
1253    inline void update_global_config_fdir(void){
1254        get_ex_drv()->update_global_config_fdir(this);
1255    }
1256
1257    /* enable FDIR */
1258    inline void update_global_config_fdir_10g(void){
1259        m_port_conf.fdir_conf.mode=RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1260        m_port_conf.fdir_conf.pballoc=RTE_FDIR_PBALLOC_64K;
1261        m_port_conf.fdir_conf.status=RTE_FDIR_NO_REPORT_STATUS;
1262        /* Offset of flexbytes field in RX packets (in 16-bit word units). */
1263        /* Note: divide by 2 to convert byte offset to word offset */
1264        if (get_is_stateless()) {
1265            m_port_conf.fdir_conf.flexbytes_offset = (14+4)/2;
1266            /* Increment offset 4 bytes for the case where we add VLAN */
1267            if (  CGlobalInfo::m_options.preview.get_vlan_mode_enable() ) {
1268                m_port_conf.fdir_conf.flexbytes_offset += (4/2);
1269            }
1270        } else {
1271            if (  CGlobalInfo::m_options.preview.get_ipv6_mode_enable() ) {
1272                m_port_conf.fdir_conf.flexbytes_offset = (14+6)/2;
1273            } else {
1274                m_port_conf.fdir_conf.flexbytes_offset = (14+8)/2;
1275            }
1276
1277            /* Increment offset 4 bytes for the case where we add VLAN */
1278            if (  CGlobalInfo::m_options.preview.get_vlan_mode_enable() ) {
1279                m_port_conf.fdir_conf.flexbytes_offset += (4/2);
1280            }
1281        }
1282        m_port_conf.fdir_conf.drop_queue=1;
1283    }
1284
1285    inline void update_global_config_fdir_40g(void){
1286        m_port_conf.fdir_conf.mode=RTE_FDIR_MODE_PERFECT;
1287        m_port_conf.fdir_conf.pballoc=RTE_FDIR_PBALLOC_64K;
1288        m_port_conf.fdir_conf.status=RTE_FDIR_NO_REPORT_STATUS;
1289    }
1290
1291    struct rte_eth_conf     m_port_conf;
1292    struct rte_eth_rxconf   m_rx_conf;
1293    struct rte_eth_rxconf   m_rx_drop_conf;
1294    struct rte_eth_txconf   m_tx_conf;
1295};
1296
1297
1298/* this object is per core / per port / per queue
1299   each core will have 2 ports to send to
1300
1301
1302   port0                                port1
1303
1304   0,1,2,3,..15 out queue ( per core )       0,1,2,3,..15 out queue ( per core )
1305
1306*/
1307
1308
1309typedef struct cnt_name_ {
1310    uint32_t offset;
1311    char * name;
1312}cnt_name_t ;
1313
1314#define MY_REG(a) {a,(char *)#a}
1315
1316void CPhyEthIFStats::Clear() {
1317    ipackets = 0;
1318    ibytes = 0;
1319    f_ipackets = 0;
1320    f_ibytes = 0;
1321    opackets = 0;
1322    obytes = 0;
1323    ierrors = 0;
1324    oerrors = 0;
1325    imcasts = 0;
1326    rx_nombuf = 0;
1327    memset(&m_prev_stats, 0, sizeof(m_prev_stats));
1328    memset(m_rx_per_flow_pkts, 0, sizeof(m_rx_per_flow_pkts));
1329    memset(m_rx_per_flow_bytes, 0, sizeof(m_rx_per_flow_bytes));
1330}
1331
1332// dump all counters (even ones that equal 0)
1333void CPhyEthIFStats::DumpAll(FILE *fd) {
1334#define DP_A4(f) printf(" %-40s : %llu \n",#f, (unsigned long long)f)
1335#define DP_A(f) if (f) printf(" %-40s : %llu \n",#f, (unsigned long long)f)
1336    DP_A4(opackets);
1337    DP_A4(obytes);
1338    DP_A4(ipackets);
1339    DP_A4(ibytes);
1340    DP_A(ierrors);
1341    DP_A(oerrors);
1342}
1343
1344// dump all non zero counters
1345void CPhyEthIFStats::Dump(FILE *fd) {
1346    DP_A(opackets);
1347    DP_A(obytes);
1348    DP_A(f_ipackets);
1349    DP_A(f_ibytes);
1350    DP_A(ipackets);
1351    DP_A(ibytes);
1352    DP_A(ierrors);
1353    DP_A(oerrors);
1354    DP_A(imcasts);
1355    DP_A(rx_nombuf);
1356}
1357
1358void CPhyEthIgnoreStats::dump(FILE *fd) {
1359    DP_A4(opackets);
1360    DP_A4(obytes);
1361    DP_A4(ipackets);
1362    DP_A4(ibytes);
1363    DP_A4(m_tx_arp);
1364    DP_A4(m_rx_arp);
1365}
1366
1367// Clear the RX queue of an interface, dropping all packets
1368void CPhyEthIF::flush_rx_queue(void){
1369
1370    rte_mbuf_t * rx_pkts[32];
1371    int j=0;
1372    uint16_t cnt=0;
1373
1374    while (true) {
1375        j++;
1376        cnt = rx_burst(m_rx_queue,rx_pkts,32);
1377        if ( cnt ) {
1378            int i;
1379            for (i=0; i<(int)cnt;i++) {
1380                rte_mbuf_t * m=rx_pkts[i];
1381                /*printf("rx--\n");
1382                  rte_pktmbuf_dump(stdout,m, rte_pktmbuf_pkt_len(m));*/
1383                rte_pktmbuf_free(m);
1384            }
1385        }
1386        if ( ((cnt==0) && (j>10)) || (j>15) ) {
1387            break;
1388        }
1389    }
1390    if (cnt>0) {
1391        printf(" Warning can't flush rx-queue for port %d \n",(int)get_port_id());
1392    }
1393}
1394
1395
1396void CPhyEthIF::dump_stats_extended(FILE *fd){
1397
1398    cnt_name_t reg[]={
1399        MY_REG(IXGBE_GPTC), /* total packet */
1400        MY_REG(IXGBE_GOTCL), /* total bytes */
1401        MY_REG(IXGBE_GOTCH),
1402
1403        MY_REG(IXGBE_GPRC),
1404        MY_REG(IXGBE_GORCL),
1405        MY_REG(IXGBE_GORCH),
1406
1407
1408
1409        MY_REG(IXGBE_RXNFGPC),
1410        MY_REG(IXGBE_RXNFGBCL),
1411        MY_REG(IXGBE_RXNFGBCH),
1412        MY_REG(IXGBE_RXDGPC  ),
1413        MY_REG(IXGBE_RXDGBCL ),
1414        MY_REG(IXGBE_RXDGBCH  ),
1415        MY_REG(IXGBE_RXDDGPC ),
1416        MY_REG(IXGBE_RXDDGBCL ),
1417        MY_REG(IXGBE_RXDDGBCH  ),
1418        MY_REG(IXGBE_RXLPBKGPC ),
1419        MY_REG(IXGBE_RXLPBKGBCL),
1420        MY_REG(IXGBE_RXLPBKGBCH ),
1421        MY_REG(IXGBE_RXDLPBKGPC ),
1422        MY_REG(IXGBE_RXDLPBKGBCL),
1423        MY_REG(IXGBE_RXDLPBKGBCH ),
1424        MY_REG(IXGBE_TXDGPC      ),
1425        MY_REG(IXGBE_TXDGBCL     ),
1426        MY_REG(IXGBE_TXDGBCH     ),
1427        MY_REG(IXGBE_FDIRUSTAT ),
1428        MY_REG(IXGBE_FDIRFSTAT ),
1429        MY_REG(IXGBE_FDIRMATCH ),
1430        MY_REG(IXGBE_FDIRMISS )
1431
1432    };
1433    fprintf (fd," extended counters \n");
1434    int i;
1435    for (i=0; i<sizeof(reg)/sizeof(reg[0]); i++) {
1436        cnt_name_t *lp=&reg[i];
1437        uint32_t c=pci_reg_read(lp->offset);
1438        // xl710 bug. Counter values are -559038737 when they should be 0
1439        if (c && c != -559038737 ) {
1440            fprintf (fd," %s  : %d \n",lp->name,c);
1441        }
1442    }
1443}
1444
1445int CPhyEthIF::get_rx_stat_capabilities() {
1446    return get_ex_drv()->get_rx_stat_capabilities();
1447}
1448
1449
1450
1451void CPhyEthIF::configure(uint16_t nb_rx_queue,
1452                          uint16_t nb_tx_queue,
1453                          const struct rte_eth_conf *eth_conf){
1454    int ret;
1455    ret = rte_eth_dev_configure(m_port_id,
1456                                nb_rx_queue,
1457                                nb_tx_queue,
1458                                eth_conf);
1459
1460    if (ret < 0)
1461        rte_exit(EXIT_FAILURE, "Cannot configure device: "
1462                 "err=%d, port=%u\n",
1463                 ret, m_port_id);
1464
1465    /* get device info */
1466    rte_eth_dev_info_get(m_port_id, &m_dev_info);
1467
1468    if (CGlobalInfo::m_options.preview.getChecksumOffloadEnable()) {
1469        /* check if the device supports TCP and UDP checksum offloading */
1470        if ((m_dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) == 0) {
1471            rte_exit(EXIT_FAILURE, "Device does not support UDP checksum offload: "
1472                     "port=%u\n",
1473                     m_port_id);
1474        }
1475        if ((m_dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) == 0) {
1476            rte_exit(EXIT_FAILURE, "Device does not support TCP checksum offload: "
1477                     "port=%u\n",
1478                     m_port_id);
1479        }
1480    }
1481}
1482
1483
1484/*
1485
1486  rx-queue 0 - default- all traffic not goint to queue 1
1487  will be drop as queue is disable
1488
1489
1490  rx-queue 1 - Latency measurement packets will go here
1491
1492  pci_reg_write(IXGBE_L34T_IMIR(0),(1<<21));
1493
1494*/
1495
1496void CPhyEthIF::configure_rx_duplicate_rules(){
1497
1498    if ( get_is_rx_filter_enable() ){
1499
1500        if ( get_ex_drv()->is_hardware_filter_is_supported()==false ){
1501            printf(" ERROR this feature is not supported with current hardware \n");
1502            exit(1);
1503        }
1504        get_ex_drv()->configure_rx_filter_rules(this);
1505    }
1506}
1507
1508
1509void CPhyEthIF::stop_rx_drop_queue() {
1510    // In debug mode, we want to see all packets. Don't want to disable any queue.
1511    if ( get_vm_one_queue_enable() || (CGlobalInfo::m_options.m_debug_pkt_proto != 0)) {
1512        return;
1513    }
1514    if ( CGlobalInfo::m_options.is_rx_enabled() ) {
1515        if ( (!get_ex_drv()->is_hardware_support_drop_queue())  ) {
1516            printf(" ERROR latency feature is not supported with current hardware  \n");
1517            exit(1);
1518        }
1519    }
1520    get_ex_drv()->stop_queue(this, MAIN_DPDK_DATA_Q);
1521}
1522
1523
1524void CPhyEthIF::rx_queue_setup(uint16_t rx_queue_id,
1525                               uint16_t nb_rx_desc,
1526                               unsigned int socket_id,
1527                               const struct rte_eth_rxconf *rx_conf,
1528                               struct rte_mempool *mb_pool){
1529
1530    int ret = rte_eth_rx_queue_setup(m_port_id , rx_queue_id,
1531                                     nb_rx_desc,
1532                                     socket_id,
1533                                     rx_conf,
1534                                     mb_pool);
1535    if (ret < 0)
1536        rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup: "
1537                 "err=%d, port=%u\n",
1538                 ret, m_port_id);
1539}
1540
1541
1542
1543void CPhyEthIF::tx_queue_setup(uint16_t tx_queue_id,
1544                               uint16_t nb_tx_desc,
1545                               unsigned int socket_id,
1546                               const struct rte_eth_txconf *tx_conf){
1547
1548    int ret = rte_eth_tx_queue_setup( m_port_id,
1549                                      tx_queue_id,
1550                                      nb_tx_desc,
1551                                      socket_id,
1552                                      tx_conf);
1553    if (ret < 0)
1554        rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: "
1555                 "err=%d, port=%u queue=%u\n",
1556                 ret, m_port_id, tx_queue_id);
1557
1558}
1559
1560void CPhyEthIF::stop(){
1561    if (CGlobalInfo::m_options.preview.getCloseEnable()) {
1562        rte_eth_dev_stop(m_port_id);
1563        rte_eth_dev_close(m_port_id);
1564    }
1565}
1566
1567void CPhyEthIF::start(){
1568
1569    get_ex_drv()->clear_extended_stats(this);
1570
1571    int ret;
1572
1573    m_bw_tx.reset();
1574    m_bw_rx.reset();
1575
1576    m_stats.Clear();
1577    int i;
1578    for (i=0;i<10; i++ ) {
1579        ret = rte_eth_dev_start(m_port_id);
1580        if (ret==0) {
1581            return;
1582        }
1583        delay(1000);
1584    }
1585    if (ret < 0)
1586        rte_exit(EXIT_FAILURE, "rte_eth_dev_start: "
1587                 "err=%d, port=%u\n",
1588                 ret, m_port_id);
1589
1590}
1591
1592// Disabling flow control on interface
1593void CPhyEthIF::disable_flow_control(){
1594    int ret;
1595    // see trex-64 issue with loopback on the same NIC
1596    struct rte_eth_fc_conf fc_conf;
1597    memset(&fc_conf,0,sizeof(fc_conf));
1598    fc_conf.mode=RTE_FC_NONE;
1599    fc_conf.autoneg=1;
1600    fc_conf.pause_time=100;
1601    int i;
1602    for (i=0; i<5; i++) {
1603        ret=rte_eth_dev_flow_ctrl_set(m_port_id,&fc_conf);
1604        if (ret==0) {
1605            break;
1606        }
1607        delay(1000);
1608    }
1609    if (ret < 0)
1610        rte_exit(EXIT_FAILURE, "rte_eth_dev_flow_ctrl_set: "
1611                 "err=%d, port=%u\n probably link is down. Please check your link activity, or skip flow-control disabling, using: --no-flow-control-change option\n",
1612                 ret, m_port_id);
1613}
1614
1615/*
1616Get user frienly devices description from saved env. var
1617Changes certain attributes based on description
1618*/
1619void DpdkTRexPortAttr::update_description(){
1620    struct rte_pci_addr pci_addr;
1621    char pci[16];
1622    char * envvar;
1623    std::string pci_envvar_name;
1624    pci_addr = rte_eth_devices[m_port_id].pci_dev->addr;
1625    snprintf(pci, sizeof(pci), "%04x:%02x:%02x.%d", pci_addr.domain, pci_addr.bus, pci_addr.devid, pci_addr.function);
1626    intf_info_st.pci_addr = pci;
1627    pci_envvar_name = "pci" + intf_info_st.pci_addr;
1628    std::replace(pci_envvar_name.begin(), pci_envvar_name.end(), ':', '_');
1629    std::replace(pci_envvar_name.begin(), pci_envvar_name.end(), '.', '_');
1630    envvar = std::getenv(pci_envvar_name.c_str());
1631    if (envvar) {
1632        intf_info_st.description = envvar;
1633    } else {
1634        intf_info_st.description = "Unknown";
1635    }
1636    if (intf_info_st.description.find("82599ES") != std::string::npos) { // works for 82599EB etc. DPDK does not distinguish them
1637        flag_is_link_change_supported = false;
1638    }
1639    if (intf_info_st.description.find("82545EM") != std::string::npos) { // in virtual E1000, DPDK claims fc is supported, but it's not
1640        flag_is_fc_change_supported = false;
1641        flag_is_led_change_supported = false;
1642    }
1643    if ( CGlobalInfo::m_options.preview.getVMode() > 0){
1644        printf("port %d desc: %s\n", m_port_id, intf_info_st.description.c_str());
1645    }
1646}
1647
1648int DpdkTRexPortAttr::set_led(bool on){
1649    if (on) {
1650        return rte_eth_led_on(m_port_id);
1651    }else{
1652        return rte_eth_led_off(m_port_id);
1653    }
1654}
1655
1656int DpdkTRexPortAttr::get_flow_ctrl(int &mode) {
1657    int ret = rte_eth_dev_flow_ctrl_get(m_port_id, &fc_conf_tmp);
1658    if (ret) {
1659        mode = -1;
1660        return ret;
1661    }
1662    mode = (int) fc_conf_tmp.mode;
1663    return 0;
1664}
1665
1666int DpdkTRexPortAttr::set_flow_ctrl(int mode) {
1667    if (!flag_is_fc_change_supported) {
1668        return -ENOTSUP;
1669    }
1670    int ret = rte_eth_dev_flow_ctrl_get(m_port_id, &fc_conf_tmp);
1671    if (ret) {
1672        return ret;
1673    }
1674    fc_conf_tmp.mode = (enum rte_eth_fc_mode) mode;
1675    return rte_eth_dev_flow_ctrl_set(m_port_id, &fc_conf_tmp);
1676}
1677
1678void DpdkTRexPortAttr::reset_xstats() {
1679    rte_eth_xstats_reset(m_port_id);
1680}
1681
1682int DpdkTRexPortAttr::get_xstats_values(xstats_values_t &xstats_values) {
1683    int size = rte_eth_xstats_get(m_port_id, NULL, 0);
1684    if (size < 0) {
1685        return size;
1686    }
1687    xstats_values_tmp.resize(size);
1688    xstats_values.resize(size);
1689    size = rte_eth_xstats_get(m_port_id, xstats_values_tmp.data(), size);
1690    if (size < 0) {
1691        return size;
1692    }
1693    for (int i=0; i<size; i++) {
1694        xstats_values[xstats_values_tmp[i].id] = xstats_values_tmp[i].value;
1695    }
1696    return 0;
1697}
1698
1699int DpdkTRexPortAttr::get_xstats_names(xstats_names_t &xstats_names){
1700    int size = rte_eth_xstats_get_names(m_port_id, NULL, 0);
1701    if (size < 0) {
1702        return size;
1703    }
1704    xstats_names_tmp.resize(size);
1705    xstats_names.resize(size);
1706    size = rte_eth_xstats_get_names(m_port_id, xstats_names_tmp.data(), size);
1707    if (size < 0) {
1708        return size;
1709    }
1710    for (int i=0; i<size; i++) {
1711        xstats_names[i] = xstats_names_tmp[i].name;
1712    }
1713    return 0;
1714}
1715
1716void DpdkTRexPortAttr::dump_link(FILE *fd){
1717    fprintf(fd,"port : %d \n",(int)m_port_id);
1718    fprintf(fd,"------------\n");
1719
1720    fprintf(fd,"link         : ");
1721    if (m_link.link_status) {
1722        fprintf(fd," link : Link Up - speed %u Mbps - %s\n",
1723                (unsigned) m_link.link_speed,
1724                (m_link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1725                ("full-duplex") : ("half-duplex\n"));
1726    } else {
1727        fprintf(fd," Link Down\n");
1728    }
1729    fprintf(fd,"promiscuous  : %d \n",get_promiscuous());
1730}
1731
1732void DpdkTRexPortAttr::update_device_info(){
1733    rte_eth_dev_info_get(m_port_id, &dev_info);
1734}
1735
1736void DpdkTRexPortAttr::get_supported_speeds(supp_speeds_t &supp_speeds){
1737    uint32_t speed_capa = dev_info.speed_capa;
1738    if (speed_capa & ETH_LINK_SPEED_1G)
1739        supp_speeds.push_back(ETH_SPEED_NUM_1G);
1740    if (speed_capa & ETH_LINK_SPEED_10G)
1741        supp_speeds.push_back(ETH_SPEED_NUM_10G);
1742    if (speed_capa & ETH_LINK_SPEED_40G)
1743        supp_speeds.push_back(ETH_SPEED_NUM_40G);
1744    if (speed_capa & ETH_LINK_SPEED_100G)
1745        supp_speeds.push_back(ETH_SPEED_NUM_100G);
1746}
1747
1748void DpdkTRexPortAttr::update_link_status(){
1749    rte_eth_link_get(m_port_id, &m_link);
1750}
1751
1752bool DpdkTRexPortAttr::update_link_status_nowait(){
1753    rte_eth_link new_link;
1754    bool changed = false;
1755    rte_eth_link_get_nowait(m_port_id, &new_link);
1756
1757    if (new_link.link_speed != m_link.link_speed ||
1758                new_link.link_duplex != m_link.link_duplex ||
1759                    new_link.link_autoneg != m_link.link_autoneg ||
1760                        new_link.link_status != m_link.link_status) {
1761        changed = true;
1762
1763        /* in case of link status change - notify the dest object */
1764        if (new_link.link_status != m_link.link_status) {
1765            on_link_down();
1766        }
1767    }
1768
1769    m_link = new_link;
1770    return changed;
1771}
1772
1773int DpdkTRexPortAttr::add_mac(char * mac){
1774    struct ether_addr mac_addr;
1775    for (int i=0; i<6;i++) {
1776        mac_addr.addr_bytes[i] =mac[i];
1777    }
1778    return rte_eth_dev_mac_addr_add(m_port_id, &mac_addr,0);
1779}
1780
1781int DpdkTRexPortAttr::set_promiscuous(bool enable){
1782    if (enable) {
1783        rte_eth_promiscuous_enable(m_port_id);
1784    }else{
1785        rte_eth_promiscuous_disable(m_port_id);
1786    }
1787    return 0;
1788}
1789
1790int DpdkTRexPortAttr::set_link_up(bool up){
1791    if (up) {
1792        return rte_eth_dev_set_link_up(m_port_id);
1793    }else{
1794        return rte_eth_dev_set_link_down(m_port_id);
1795    }
1796}
1797
1798bool DpdkTRexPortAttr::get_promiscuous(){
1799    int ret=rte_eth_promiscuous_get(m_port_id);
1800    if (ret<0) {
1801        rte_exit(EXIT_FAILURE, "rte_eth_promiscuous_get: "
1802                 "err=%d, port=%u\n",
1803                 ret, m_port_id);
1804
1805    }
1806    return ( ret?true:false);
1807}
1808
1809
1810void DpdkTRexPortAttr::get_hw_src_mac(struct ether_addr *mac_addr){
1811    rte_eth_macaddr_get(m_port_id , mac_addr);
1812}
1813
1814int CPhyEthIF::dump_fdir_global_stats(FILE *fd) {
1815    return get_ex_drv()->dump_fdir_global_stats(this, fd);
1816}
1817
1818void dump_hw_state(FILE *fd,struct ixgbe_hw_stats *hs ){
1819
1820#define DP_A1(f) if (hs->f) fprintf(fd," %-40s : %llu \n",#f, (unsigned long long)hs->f)
1821#define DP_A2(f,m) for (i=0;i<m; i++) { if (hs->f[i]) fprintf(fd," %-40s[%d] : %llu \n",#f,i, (unsigned long long)hs->f[i]); }
1822    int i;
1823
1824    //for (i=0;i<8; i++) { if (hs->mpc[i]) fprintf(fd," %-40s[%d] : %llu \n","mpc",i,hs->mpc[i]); }
1825    DP_A2(mpc,8);
1826    DP_A1(crcerrs);
1827    DP_A1(illerrc);
1828    //DP_A1(errbc);
1829    DP_A1(mspdc);
1830    DP_A1(mpctotal);
1831    DP_A1(mlfc);
1832    DP_A1(mrfc);
1833    DP_A1(rlec);
1834    //DP_A1(lxontxc);
1835    //DP_A1(lxonrxc);
1836    //DP_A1(lxofftxc);
1837    //DP_A1(lxoffrxc);
1838    //DP_A2(pxontxc,8);
1839    //DP_A2(pxonrxc,8);
1840    //DP_A2(pxofftxc,8);
1841    //DP_A2(pxoffrxc,8);
1842
1843    //DP_A1(prc64);
1844    //DP_A1(prc127);
1845    //DP_A1(prc255);
1846    // DP_A1(prc511);
1847    //DP_A1(prc1023);
1848    //DP_A1(prc1522);
1849
1850    DP_A1(gprc);
1851    DP_A1(bprc);
1852    DP_A1(mprc);
1853    DP_A1(gptc);
1854    DP_A1(gorc);
1855    DP_A1(gotc);
1856    DP_A2(rnbc,8);
1857    DP_A1(ruc);
1858    DP_A1(rfc);
1859    DP_A1(roc);
1860    DP_A1(rjc);
1861    DP_A1(mngprc);
1862    DP_A1(mngpdc);
1863    DP_A1(mngptc);
1864    DP_A1(tor);
1865    DP_A1(tpr);
1866    DP_A1(tpt);
1867    DP_A1(ptc64);
1868    DP_A1(ptc127);
1869    DP_A1(ptc255);
1870    DP_A1(ptc511);
1871    DP_A1(ptc1023);
1872    DP_A1(ptc1522);
1873    DP_A1(mptc);
1874    DP_A1(bptc);
1875    DP_A1(xec);
1876    DP_A2(qprc,16);
1877    DP_A2(qptc,16);
1878    DP_A2(qbrc,16);
1879    DP_A2(qbtc,16);
1880    DP_A2(qprdc,16);
1881    DP_A2(pxon2offc,8);
1882    DP_A1(fdirustat_add);
1883    DP_A1(fdirustat_remove);
1884    DP_A1(fdirfstat_fadd);
1885    DP_A1(fdirfstat_fremove);
1886    DP_A1(fdirmatch);
1887    DP_A1(fdirmiss);
1888    DP_A1(fccrc);
1889    DP_A1(fclast);
1890    DP_A1(fcoerpdc);
1891    DP_A1(fcoeprc);
1892    DP_A1(fcoeptc);
1893    DP_A1(fcoedwrc);
1894    DP_A1(fcoedwtc);
1895    DP_A1(fcoe_noddp);
1896    DP_A1(fcoe_noddp_ext_buff);
1897    DP_A1(ldpcec);
1898    DP_A1(pcrc8ec);
1899    DP_A1(b2ospc);
1900    DP_A1(b2ogprc);
1901    DP_A1(o2bgptc);
1902    DP_A1(o2bspc);
1903}
1904
1905void CPhyEthIF::set_ignore_stats_base(CPreTestStats &pre_stats) {
1906    // reading m_stats, so drivers saving prev in m_stats will be updated.
1907    // Actually, we want m_stats to be cleared
1908    get_ex_drv()->get_extended_stats(this, &m_stats);
1909
1910    m_ignore_stats.ipackets = m_stats.ipackets;
1911    m_ignore_stats.ibytes = m_stats.ibytes;
1912    m_ignore_stats.opackets = m_stats.opackets;
1913    m_ignore_stats.obytes = m_stats.obytes;
1914    m_stats.ipackets = 0;
1915    m_stats.opackets = 0;
1916    m_stats.ibytes = 0;
1917    m_stats.obytes = 0;
1918
1919    m_ignore_stats.m_tx_arp = pre_stats.m_tx_arp;
1920    m_ignore_stats.m_rx_arp = pre_stats.m_rx_arp;
1921
1922    if (CGlobalInfo::m_options.preview.getVMode() >= 3) {
1923        fprintf(stdout, "Pre test statistics for port %d\n", get_port_id());
1924        m_ignore_stats.dump(stdout);
1925    }
1926}
1927
1928void CPhyEthIF::dump_stats(FILE *fd){
1929
1930    update_counters();
1931
1932    fprintf(fd,"port : %d \n",(int)m_port_id);
1933    fprintf(fd,"------------\n");
1934    m_stats.DumpAll(fd);
1935    //m_stats.Dump(fd);
1936    printf (" Tx : %.1fMb/sec  \n",m_last_tx_rate);
1937    //printf (" Rx : %.1fMb/sec  \n",m_last_rx_rate);
1938}
1939
1940void CPhyEthIF::stats_clear(){
1941    rte_eth_stats_reset(m_port_id);
1942    m_stats.Clear();
1943}
1944
1945class CCorePerPort  {
1946public:
1947    CCorePerPort (){
1948        m_tx_queue_id=0;
1949        m_len=0;
1950        int i;
1951        for (i=0; i<MAX_PKT_BURST; i++) {
1952            m_table[i]=0;
1953        }
1954        m_port=0;
1955    }
1956    uint8_t                 m_tx_queue_id;
1957    uint8_t                 m_tx_queue_id_lat; // q id for tx of latency pkts
1958    uint16_t                m_len;
1959    rte_mbuf_t *            m_table[MAX_PKT_BURST];
1960    CPhyEthIF  *            m_port;
1961};
1962
1963
1964#define MAX_MBUF_CACHE 100
1965
1966
1967/* per core/gbe queue port for trasmitt */
1968class CCoreEthIF : public CVirtualIF {
1969public:
1970    enum {
1971     INVALID_Q_ID = 255
1972    };
1973
1974public:
1975
1976    CCoreEthIF(){
1977        m_mbuf_cache=0;
1978    }
1979
1980    bool Create(uint8_t             core_id,
1981                uint8_t            tx_client_queue_id,
1982                CPhyEthIF  *        tx_client_port,
1983                uint8_t            tx_server_queue_id,
1984                CPhyEthIF  *        tx_server_port,
1985                uint8_t             tx_q_id_lat);
1986    void Delete();
1987
1988    virtual int open_file(std::string file_name){
1989        return (0);
1990    }
1991
1992    virtual int close_file(void){
1993        return (flush_tx_queue());
1994    }
1995    __attribute__ ((noinline)) int send_node_flow_stat(rte_mbuf *m, CGenNodeStateless * node_sl
1996                                                       , CCorePerPort *  lp_port
1997                                                       , CVirtualIFPerSideStats  * lp_stats, bool is_const);
1998    virtual int send_node(CGenNode * node);
1999    virtual void send_one_pkt(pkt_dir_t dir, rte_mbuf_t *m);
2000    virtual int flush_tx_queue(void);
2001    __attribute__ ((noinline)) void handle_slowpath_features(CGenNode *node, rte_mbuf_t *m, uint8_t *p, pkt_dir_t dir);
2002
2003    void apply_client_cfg(const ClientCfgBase *cfg, rte_mbuf_t *m, pkt_dir_t dir, uint8_t *p);
2004
2005    bool process_rx_pkt(pkt_dir_t   dir,rte_mbuf_t * m);
2006
2007    virtual int update_mac_addr_from_global_cfg(pkt_dir_t       dir, uint8_t * p);
2008
2009    virtual pkt_dir_t port_id_to_dir(uint8_t port_id);
2010    void GetCoreCounters(CVirtualIFPerSideStats *stats);
2011    void DumpCoreStats(FILE *fd);
2012    void DumpIfStats(FILE *fd);
2013    static void DumpIfCfgHeader(FILE *fd);
2014    void DumpIfCfg(FILE *fd);
2015
2016    socket_id_t get_socket_id(){
2017        return ( CGlobalInfo::m_socket.port_to_socket( m_ports[0].m_port->get_port_id() ) );
2018    }
2019
2020    const CCorePerPort * get_ports() {
2021        return m_ports;
2022    }
2023
2024protected:
2025
2026    int send_burst(CCorePerPort * lp_port,
2027                   uint16_t len,
2028                   CVirtualIFPerSideStats  * lp_stats);
2029    int send_pkt(CCorePerPort * lp_port,
2030                 rte_mbuf_t *m,
2031                 CVirtualIFPerSideStats  * lp_stats);
2032    int send_pkt_lat(CCorePerPort * lp_port,
2033                 rte_mbuf_t *m,
2034                 CVirtualIFPerSideStats  * lp_stats);
2035
2036    void add_vlan(rte_mbuf_t *m, uint16_t vlan_id);
2037
2038protected:
2039    uint8_t      m_core_id;
2040    uint16_t     m_mbuf_cache;
2041    CCorePerPort m_ports[CS_NUM]; /* each core has 2 tx queues 1. client side and server side */
2042    CNodeRing *  m_ring_to_rx;
2043
2044} __rte_cache_aligned; ;
2045
2046class CCoreEthIFStateless : public CCoreEthIF {
2047public:
2048    virtual int send_node_flow_stat(rte_mbuf *m, CGenNodeStateless * node_sl, CCorePerPort *  lp_port
2049                                    , CVirtualIFPerSideStats  * lp_stats, bool is_const);
2050    virtual int send_node(CGenNode * node);
2051protected:
2052    int handle_slow_path_node(CGenNode *node);
2053    int send_pcap_node(CGenNodePCAP *pcap_node);
2054};
2055
2056bool CCoreEthIF::Create(uint8_t             core_id,
2057                        uint8_t             tx_client_queue_id,
2058                        CPhyEthIF  *        tx_client_port,
2059                        uint8_t             tx_server_queue_id,
2060                        CPhyEthIF  *        tx_server_port,
2061                        uint8_t tx_q_id_lat ) {
2062    m_ports[CLIENT_SIDE].m_tx_queue_id = tx_client_queue_id;
2063    m_ports[CLIENT_SIDE].m_port        = tx_client_port;
2064    m_ports[CLIENT_SIDE].m_tx_queue_id_lat = tx_q_id_lat;
2065    m_ports[SERVER_SIDE].m_tx_queue_id = tx_server_queue_id;
2066    m_ports[SERVER_SIDE].m_port        = tx_server_port;
2067    m_ports[SERVER_SIDE].m_tx_queue_id_lat = tx_q_id_lat;
2068    m_core_id = core_id;
2069
2070    CMessagingManager * rx_dp=CMsgIns::Ins()->getRxDp();
2071    m_ring_to_rx = rx_dp->getRingDpToCp(core_id-1);
2072    assert( m_ring_to_rx);
2073    return (true);
2074}
2075
2076int CCoreEthIF::flush_tx_queue(void){
2077    /* flush both sides */
2078    pkt_dir_t dir;
2079    for (dir = CLIENT_SIDE; dir < CS_NUM; dir++) {
2080        CCorePerPort * lp_port = &m_ports[dir];
2081        CVirtualIFPerSideStats  * lp_stats = &m_stats[dir];
2082        if ( likely(lp_port->m_len > 0) ) {
2083            send_burst(lp_port, lp_port->m_len, lp_stats);
2084            lp_port->m_len = 0;
2085        }
2086    }
2087
2088    return 0;
2089}
2090
2091void CCoreEthIF::GetCoreCounters(CVirtualIFPerSideStats *stats){
2092    stats->Clear();
2093    pkt_dir_t   dir ;
2094    for (dir=CLIENT_SIDE; dir<CS_NUM; dir++) {
2095        stats->Add(&m_stats[dir]);
2096    }
2097}
2098
2099void CCoreEthIF::DumpCoreStats(FILE *fd){
2100    fprintf (fd,"------------------------ \n");
2101    fprintf (fd," per core stats core id : %d  \n",m_core_id);
2102    fprintf (fd,"------------------------ \n");
2103
2104    CVirtualIFPerSideStats stats;
2105    GetCoreCounters(&stats);
2106    stats.Dump(stdout);
2107}
2108
2109void CCoreEthIF::DumpIfCfgHeader(FILE *fd){
2110    fprintf (fd," core, c-port, c-queue, s-port, s-queue, lat-queue\n");
2111    fprintf (fd," ------------------------------------------\n");
2112}
2113
2114void CCoreEthIF::DumpIfCfg(FILE *fd){
2115    fprintf (fd," %d   %6u %6u  %6u  %6u %6u  \n",m_core_id,
2116             m_ports[CLIENT_SIDE].m_port->get_port_id(),
2117             m_ports[CLIENT_SIDE].m_tx_queue_id,
2118             m_ports[SERVER_SIDE].m_port->get_port_id(),
2119             m_ports[SERVER_SIDE].m_tx_queue_id,
2120             m_ports[SERVER_SIDE].m_tx_queue_id_lat
2121             );
2122}
2123
2124
2125void CCoreEthIF::DumpIfStats(FILE *fd){
2126
2127    fprintf (fd,"------------------------ \n");
2128    fprintf (fd," per core per if stats id : %d  \n",m_core_id);
2129    fprintf (fd,"------------------------ \n");
2130
2131    const char * t[]={"client","server"};
2132    pkt_dir_t   dir ;
2133    for (dir=CLIENT_SIDE; dir<CS_NUM; dir++) {
2134        CCorePerPort * lp=&m_ports[dir];
2135        CVirtualIFPerSideStats * lpstats = &m_stats[dir];
2136        fprintf (fd," port %d, queue id :%d  - %s \n",lp->m_port->get_port_id(),lp->m_tx_queue_id,t[dir] );
2137        fprintf (fd," ---------------------------- \n");
2138        lpstats->Dump(fd);
2139    }
2140}
2141
2142#define DELAY_IF_NEEDED
2143
2144int CCoreEthIF::send_burst(CCorePerPort * lp_port,
2145                           uint16_t len,
2146                           CVirtualIFPerSideStats  * lp_stats){
2147
2148    uint16_t ret = lp_port->m_port->tx_burst(lp_port->m_tx_queue_id,lp_port->m_table,len);
2149#ifdef DELAY_IF_NEEDED
2150    while ( unlikely( ret<len ) ){
2151        rte_delay_us(1);
2152        lp_stats->m_tx_queue_full += 1;
2153        uint16_t ret1=lp_port->m_port->tx_burst(lp_port->m_tx_queue_id,
2154                                                &lp_port->m_table[ret],
2155                                                len-ret);
2156        ret+=ret1;
2157    }
2158#else
2159    /* CPU has burst of packets larger than TX can send. Need to drop packets */
2160    if ( unlikely(ret < len) ) {
2161        lp_stats->m_tx_drop += (len-ret);
2162        uint16_t i;
2163        for (i=ret; i<len;i++) {
2164            rte_mbuf_t * m=lp_port->m_table[i];
2165            rte_pktmbuf_free(m);
2166        }
2167    }
2168#endif
2169
2170    return (0);
2171}
2172
2173
2174int CCoreEthIF::send_pkt(CCorePerPort * lp_port,
2175                         rte_mbuf_t      *m,
2176                         CVirtualIFPerSideStats  * lp_stats
2177                         ){
2178
2179    uint16_t len = lp_port->m_len;
2180    lp_port->m_table[len]=m;
2181    len++;
2182    /* enough pkts to be sent */
2183    if (unlikely(len == MAX_PKT_BURST)) {
2184        send_burst(lp_port, MAX_PKT_BURST,lp_stats);
2185        len = 0;
2186    }
2187    lp_port->m_len = len;
2188
2189    return (0);
2190}
2191
2192int CCoreEthIF::send_pkt_lat(CCorePerPort *lp_port, rte_mbuf_t *m, CVirtualIFPerSideStats *lp_stats) {
2193    // We allow sending only from first core of each port. This is serious internal bug otherwise.
2194    assert(lp_port->m_tx_queue_id_lat != INVALID_Q_ID);
2195
2196    int ret = lp_port->m_port->tx_burst(lp_port->m_tx_queue_id_lat, &m, 1);
2197
2198#ifdef DELAY_IF_NEEDED
2199    while ( unlikely( ret != 1 ) ){
2200        rte_delay_us(1);
2201        lp_stats->m_tx_queue_full += 1;
2202        ret = lp_port->m_port->tx_burst(lp_port->m_tx_queue_id_lat, &m, 1);
2203    }
2204
2205#else
2206    if ( unlikely( ret != 1 ) ) {
2207        lp_stats->m_tx_drop ++;
2208        rte_pktmbuf_free(m);
2209        return 0;
2210    }
2211
2212#endif
2213
2214    return ret;
2215}
2216
2217void CCoreEthIF::send_one_pkt(pkt_dir_t       dir,
2218                              rte_mbuf_t      *m){
2219    CCorePerPort *  lp_port=&m_ports[dir];
2220    CVirtualIFPerSideStats  * lp_stats = &m_stats[dir];
2221    send_pkt(lp_port,m,lp_stats);
2222    /* flush */
2223    send_burst(lp_port,lp_port->m_len,lp_stats);
2224    lp_port->m_len = 0;
2225}
2226
2227int CCoreEthIFStateless::send_node_flow_stat(rte_mbuf *m, CGenNodeStateless * node_sl, CCorePerPort *  lp_port
2228                                             , CVirtualIFPerSideStats  * lp_stats, bool is_const) {
2229    // Defining this makes 10% percent packet loss. 1% packet reorder.
2230# ifdef ERR_CNTRS_TEST
2231    static int temp=1;
2232    temp++;
2233#endif
2234
2235    uint16_t hw_id = node_sl->get_stat_hw_id();
2236    rte_mbuf *mi;
2237    struct flow_stat_payload_header *fsp_head = NULL;
2238
2239    if (hw_id >= MAX_FLOW_STATS) {
2240        // payload rule hw_ids are in the range right above ip id rules
2241        uint16_t hw_id_payload = hw_id - MAX_FLOW_STATS;
2242        if (hw_id_payload > max_stat_hw_id_seen_payload) {
2243            max_stat_hw_id_seen_payload = hw_id_payload;
2244        }
2245
2246        mi = node_sl->alloc_flow_stat_mbuf(m, fsp_head, is_const);
2247        fsp_head->seq = lp_stats->m_lat_data[hw_id_payload].get_seq_num();
2248        fsp_head->hw_id = hw_id_payload;
2249        fsp_head->flow_seq = lp_stats->m_lat_data[hw_id_payload].get_flow_seq();
2250        fsp_head->magic = FLOW_STAT_PAYLOAD_MAGIC;
2251
2252        lp_stats->m_lat_data[hw_id_payload].inc_seq_num();
2253#ifdef ERR_CNTRS_TEST
2254        if (temp % 10 == 0) {
2255            fsp_head->seq = lp_stats->m_lat_data[hw_id_payload].inc_seq_num();
2256        }
2257        if ((temp - 1) % 100 == 0) {
2258            fsp_head->seq = lp_stats->m_lat_data[hw_id_payload].get_seq_num() - 4;
2259        }
2260#endif
2261    } else {
2262        // ip id rule
2263        if (hw_id > max_stat_hw_id_seen) {
2264            max_stat_hw_id_seen = hw_id;
2265        }
2266        mi = m;
2267    }
2268    tx_per_flow_t *lp_s = &lp_stats->m_tx_per_flow[hw_id];
2269    lp_s->add_pkts(1);
2270    lp_s->add_bytes(mi->pkt_len + 4); // We add 4 because of ethernet CRC
2271
2272    if (hw_id >= MAX_FLOW_STATS) {
2273        fsp_head->time_stamp = os_get_hr_tick_64();
2274        send_pkt_lat(lp_port, mi, lp_stats);
2275    } else {
2276        send_pkt(lp_port, mi, lp_stats);
2277    }
2278    return 0;
2279}
2280
2281int CCoreEthIFStateless::send_node(CGenNode * no) {
2282    /* if a node is marked as slow path - single IF to redirect it to slow path */
2283    if (no->get_is_slow_path()) {
2284        return handle_slow_path_node(no);
2285    }
2286
2287    CGenNodeStateless * node_sl=(CGenNodeStateless *) no;
2288
2289    /* check that we have mbuf  */
2290    rte_mbuf_t *    m;
2291
2292    pkt_dir_t dir=(pkt_dir_t)node_sl->get_mbuf_cache_dir();
2293    CCorePerPort *  lp_port=&m_ports[dir];
2294    CVirtualIFPerSideStats  * lp_stats = &m_stats[dir];
2295    if ( likely(node_sl->is_cache_mbuf_array()) ) {
2296        m=node_sl->cache_mbuf_array_get_cur();
2297        rte_pktmbuf_refcnt_update(m,1);
2298    }else{
2299        m=node_sl->get_cache_mbuf();
2300
2301        if (m) {
2302            /* cache case */
2303            rte_pktmbuf_refcnt_update(m,1);
2304        }else{
2305            m=node_sl->alloc_node_with_vm();
2306            assert(m);
2307        }
2308    }
2309
2310    if (unlikely(node_sl->is_stat_needed())) {
2311        if ( unlikely(node_sl->is_cache_mbuf_array()) ) {
2312            // No support for latency + cache. If user asks for cache on latency stream, we change cache to 0.
2313            // assert here just to make sure.
2314            assert(1);
2315        }
2316        return send_node_flow_stat(m, node_sl, lp_port, lp_stats, (node_sl->get_cache_mbuf()) ? true : false);
2317    } else {
2318        send_pkt(lp_port,m,lp_stats);
2319    }
2320
2321    return (0);
2322};
2323
2324int CCoreEthIFStateless::send_pcap_node(CGenNodePCAP *pcap_node) {
2325    rte_mbuf_t *m = pcap_node->get_pkt();
2326    if (!m) {
2327        return (-1);
2328    }
2329
2330    pkt_dir_t dir = (pkt_dir_t)pcap_node->get_mbuf_dir();
2331    CCorePerPort *lp_port=&m_ports[dir];
2332    CVirtualIFPerSideStats *lp_stats = &m_stats[dir];
2333
2334    send_pkt(lp_port, m, lp_stats);
2335
2336    return (0);
2337}
2338
2339/**
2340 * slow path code goes here
2341 *
2342 */
2343int CCoreEthIFStateless::handle_slow_path_node(CGenNode * no) {
2344
2345    if (no->m_type == CGenNode::PCAP_PKT) {
2346        return send_pcap_node((CGenNodePCAP *)no);
2347    }
2348
2349    return (-1);
2350}
2351
2352void CCoreEthIF::apply_client_cfg(const ClientCfgBase *cfg, rte_mbuf_t *m, pkt_dir_t dir, uint8_t *p) {
2353
2354    assert(cfg);
2355
2356    /* take the right direction config */
2357    const ClientCfgDirBase &cfg_dir = ( (dir == CLIENT_SIDE) ? cfg->m_initiator : cfg->m_responder);
2358
2359    /* dst mac */
2360    if (cfg_dir.has_dst_mac_addr()) {
2361        memcpy(p, cfg_dir.get_dst_mac_addr(), 6);
2362    }
2363
2364    /* src mac */
2365    if (cfg_dir.has_src_mac_addr()) {
2366        memcpy(p + 6, cfg_dir.get_src_mac_addr(), 6);
2367    }
2368
2369    /* VLAN */
2370    if (cfg_dir.has_vlan()) {
2371        add_vlan(m, cfg_dir.get_vlan());
2372    }
2373}
2374
2375
2376void CCoreEthIF::add_vlan(rte_mbuf_t *m, uint16_t vlan_id) {
2377    m->ol_flags = PKT_TX_VLAN_PKT;
2378    m->l2_len   = 14;
2379    m->vlan_tci = vlan_id;
2380}
2381
2382/**
2383 * slow path features goes here (avoid multiple IFs)
2384 *
2385 */
2386void CCoreEthIF::handle_slowpath_features(CGenNode *node, rte_mbuf_t *m, uint8_t *p, pkt_dir_t dir) {
2387
2388
2389    /* MAC ovverride */
2390    if ( unlikely( CGlobalInfo::m_options.preview.get_mac_ip_overide_enable() ) ) {
2391        /* client side */
2392        if ( node->is_initiator_pkt() ) {
2393            *((uint32_t*)(p+6)) = PKT_NTOHL(node->m_src_ip);
2394        }
2395    }
2396
2397    /* flag is faster than checking the node pointer (another cacheline) */
2398    if ( unlikely(CGlobalInfo::m_options.preview.get_is_client_cfg_enable() ) ) {
2399        apply_client_cfg(node->m_client_cfg, m, dir, p);
2400    }
2401
2402}
2403
2404int CCoreEthIF::send_node(CGenNode * node) {
2405
2406#ifdef OPT_REPEAT_MBUF
2407
2408    if ( unlikely( node->get_cache_mbuf() !=NULL ) ) {
2409        pkt_dir_t       dir;
2410        rte_mbuf_t *    m=node->get_cache_mbuf();
2411        dir=(pkt_dir_t)node->get_mbuf_cache_dir();
2412        CCorePerPort *  lp_port=&m_ports[dir];
2413        CVirtualIFPerSideStats  * lp_stats = &m_stats[dir];
2414        rte_pktmbuf_refcnt_update(m,1);
2415        send_pkt(lp_port,m,lp_stats);
2416        return (0);
2417    }
2418#endif
2419
2420    CFlowPktInfo *  lp=node->m_pkt_info;
2421    rte_mbuf_t *    m=lp->generate_new_mbuf(node);
2422
2423    pkt_dir_t       dir;
2424    bool            single_port;
2425
2426    dir         = node->cur_interface_dir();
2427    single_port = node->get_is_all_flow_from_same_dir() ;
2428
2429
2430    if ( unlikely( CGlobalInfo::m_options.preview.get_vlan_mode_enable() ) ){
2431        /* which vlan to choose 0 or 1*/
2432        uint8_t vlan_port = (node->m_src_ip &1);
2433        uint16_t vlan_id  = CGlobalInfo::m_options.m_vlan_port[vlan_port];
2434
2435        if (likely( vlan_id >0 ) ) {
2436            dir = dir ^ vlan_port;
2437        }else{
2438            /* both from the same dir but with VLAN0 */
2439            vlan_id = CGlobalInfo::m_options.m_vlan_port[0];
2440            dir = dir ^ 0;
2441        }
2442
2443        add_vlan(m, vlan_id);
2444    }
2445
2446    CCorePerPort *lp_port = &m_ports[dir];
2447    CVirtualIFPerSideStats *lp_stats = &m_stats[dir];
2448
2449    if (unlikely(m==0)) {
2450        lp_stats->m_tx_alloc_error++;
2451        return(0);
2452    }
2453
2454    /* update mac addr dest/src 12 bytes */
2455    uint8_t *p   = rte_pktmbuf_mtod(m, uint8_t*);
2456    uint8_t p_id = lp_port->m_port->get_port_id();
2457
2458    memcpy(p,CGlobalInfo::m_options.get_dst_src_mac_addr(p_id),12);
2459
2460     /* when slowpath features are on */
2461    if ( unlikely( CGlobalInfo::m_options.preview.get_is_slowpath_features_on() ) ) {
2462        handle_slowpath_features(node, m, p, dir);
2463    }
2464
2465
2466    if ( unlikely( node->is_rx_check_enabled() ) ) {
2467        lp_stats->m_tx_rx_check_pkt++;
2468        lp->do_generate_new_mbuf_rxcheck(m, node, single_port);
2469        lp_stats->m_template.inc_template( node->get_template_id( ));
2470    }else{
2471
2472#ifdef OPT_REPEAT_MBUF
2473        // cache only if it is not sample as this is more complex mbuf struct
2474        if ( unlikely( node->can_cache_mbuf() ) ) {
2475            if ( !CGlobalInfo::m_options.preview.isMbufCacheDisabled() ){
2476                m_mbuf_cache++;
2477                if (m_mbuf_cache < MAX_MBUF_CACHE) {
2478                    /* limit the number of object to cache */
2479                    node->set_mbuf_cache_dir( dir);
2480                    node->set_cache_mbuf(m);
2481                    rte_pktmbuf_refcnt_update(m,1);
2482                }
2483            }
2484        }
2485#endif
2486
2487    }
2488
2489    /*printf("send packet -- \n");
2490      rte_pktmbuf_dump(stdout,m, rte_pktmbuf_pkt_len(m));*/
2491
2492    /* send the packet */
2493    send_pkt(lp_port,m,lp_stats);
2494    return (0);
2495}
2496
2497
2498int CCoreEthIF::update_mac_addr_from_global_cfg(pkt_dir_t  dir, uint8_t * p){
2499    assert(p);
2500    assert(dir<2);
2501
2502    CCorePerPort *  lp_port=&m_ports[dir];
2503    uint8_t p_id=lp_port->m_port->get_port_id();
2504    memcpy(p,CGlobalInfo::m_options.get_dst_src_mac_addr(p_id),12);
2505    return (0);
2506}
2507
2508pkt_dir_t
2509CCoreEthIF::port_id_to_dir(uint8_t port_id) {
2510
2511    for (pkt_dir_t dir = 0; dir < CS_NUM; dir++) {
2512        if (m_ports[dir].m_port->get_port_id() == port_id) {
2513            return dir;
2514        }
2515    }
2516
2517    return (CS_INVALID);
2518}
2519
2520class CLatencyHWPort : public CPortLatencyHWBase {
2521public:
2522    void Create(CPhyEthIF  * p,
2523                uint8_t tx_queue,
2524                uint8_t rx_queue){
2525        m_port=p;
2526        m_tx_queue_id=tx_queue;
2527        m_rx_queue_id=rx_queue;
2528    }
2529
2530    virtual int tx(rte_mbuf_t *m) {
2531        rte_mbuf_t *tx_pkts[2];
2532
2533        tx_pkts[0] = m;
2534        if ( likely( CGlobalInfo::m_options.preview.get_vlan_mode_enable() ) ){
2535            /* vlan mode is the default */
2536            /* set the vlan */
2537            m->ol_flags = PKT_TX_VLAN_PKT;
2538            m->vlan_tci =CGlobalInfo::m_options.m_vlan_port[0];
2539            m->l2_len   =14;
2540        }
2541        uint16_t res=m_port->tx_burst(m_tx_queue_id,tx_pkts,1);
2542        if ( res == 0 ) {
2543            rte_pktmbuf_free(m);
2544            //printf(" queue is full for latency packet !!\n");
2545            return (-1);
2546
2547        }
2548#if 0
2549        fprintf(stdout," ==> %f.03 send packet ..\n",now_sec());
2550        uint8_t *p1=rte_pktmbuf_mtod(m, uint8_t*);
2551        uint16_t pkt_size1=rte_pktmbuf_pkt_len(m);
2552        utl_DumpBuffer(stdout,p1,pkt_size1,0);
2553#endif
2554
2555        return (0);
2556    }
2557
2558
2559    /* nothing special with HW implementation */
2560    virtual int tx_latency(rte_mbuf_t *m) {
2561        return tx(m);
2562    }
2563
2564    virtual rte_mbuf_t * rx(){
2565        rte_mbuf_t * rx_pkts[1];
2566        uint16_t cnt=m_port->rx_burst(m_rx_queue_id,rx_pkts,1);
2567        if (cnt) {
2568            return (rx_pkts[0]);
2569        }else{
2570            return (0);
2571        }
2572    }
2573
2574
2575    virtual uint16_t rx_burst(struct rte_mbuf **rx_pkts,
2576                              uint16_t nb_pkts){
2577        uint16_t cnt=m_port->rx_burst(m_rx_queue_id,rx_pkts,nb_pkts);
2578        return (cnt);
2579    }
2580
2581
2582private:
2583    CPhyEthIF  * m_port;
2584    uint8_t      m_tx_queue_id ;
2585    uint8_t      m_rx_queue_id;
2586};
2587
2588
2589class CLatencyVmPort : public CPortLatencyHWBase {
2590public:
2591    void Create(uint8_t port_index,
2592                CNodeRing *ring,
2593                CLatencyManager *mgr,
2594                CPhyEthIF  *p) {
2595
2596        m_dir        = (port_index % 2);
2597        m_ring_to_dp = ring;
2598        m_mgr        = mgr;
2599        m_port       = p;
2600    }
2601
2602
2603    virtual int tx(rte_mbuf_t *m) {
2604        return tx_common(m, false);
2605    }
2606
2607    virtual int tx_latency(rte_mbuf_t *m) {
2608        return tx_common(m, true);
2609    }
2610
2611    virtual rte_mbuf_t * rx() {
2612        rte_mbuf_t * rx_pkts[1];
2613        uint16_t cnt = m_port->rx_burst(0, rx_pkts, 1);
2614        if (cnt) {
2615            return (rx_pkts[0]);
2616        } else {
2617            return (0);
2618        }
2619    }
2620
2621    virtual uint16_t rx_burst(struct rte_mbuf **rx_pkts, uint16_t nb_pkts) {
2622        uint16_t cnt = m_port->rx_burst(0, rx_pkts, nb_pkts);
2623        return (cnt);
2624    }
2625
2626private:
2627      virtual int tx_common(rte_mbuf_t *m, bool fix_timestamp) {
2628
2629        if ( likely( CGlobalInfo::m_options.preview.get_vlan_mode_enable() ) ){
2630            /* vlan mode is the default */
2631            /* set the vlan */
2632            m->ol_flags = PKT_TX_VLAN_PKT;
2633            m->vlan_tci =CGlobalInfo::m_options.m_vlan_port[0];
2634            m->l2_len   =14;
2635        }
2636
2637        /* allocate node */
2638        CGenNodeLatencyPktInfo *node=(CGenNodeLatencyPktInfo * )CGlobalInfo::create_node();
2639        if (!node) {
2640            return (-1);
2641        }
2642
2643        node->m_msg_type = CGenNodeMsgBase::LATENCY_PKT;
2644        node->m_dir      = m_dir;
2645        node->m_pkt      = m;
2646
2647        if (fix_timestamp) {
2648            node->m_latency_offset = m_mgr->get_latency_header_offset();
2649            node->m_update_ts = 1;
2650        } else {
2651            node->m_update_ts = 0;
2652        }
2653
2654        if ( m_ring_to_dp->Enqueue((CGenNode*)node) != 0 ){
2655            return (-1);
2656        }
2657
2658        return (0);
2659    }
2660
2661    CPhyEthIF  * m_port;
2662    uint8_t                          m_dir;
2663    CNodeRing *                      m_ring_to_dp;   /* ring dp -> latency thread */
2664    CLatencyManager *                m_mgr;
2665};
2666
2667
2668
2669class CPerPortStats {
2670public:
2671    uint64_t opackets;
2672    uint64_t obytes;
2673    uint64_t ipackets;
2674    uint64_t ibytes;
2675    uint64_t ierrors;
2676    uint64_t oerrors;
2677    tx_per_flow_t m_tx_per_flow[MAX_FLOW_STATS + MAX_FLOW_STATS_PAYLOAD];
2678    tx_per_flow_t m_prev_tx_per_flow[MAX_FLOW_STATS + MAX_FLOW_STATS_PAYLOAD];
2679
2680    float     m_total_tx_bps;
2681    float     m_total_tx_pps;
2682
2683    float     m_total_rx_bps;
2684    float     m_total_rx_pps;
2685
2686    float     m_cpu_util;
2687    bool      m_link_up = true;
2688    bool      m_link_was_down = false;
2689};
2690
2691class CGlobalStats {
2692public:
2693    enum DumpFormat {
2694        dmpSTANDARD,
2695        dmpTABLE
2696    };
2697
2698    uint64_t  m_total_tx_pkts;
2699    uint64_t  m_total_rx_pkts;
2700    uint64_t  m_total_tx_bytes;
2701    uint64_t  m_total_rx_bytes;
2702
2703    uint64_t  m_total_alloc_error;
2704    uint64_t  m_total_queue_full;
2705    uint64_t  m_total_queue_drop;
2706
2707    uint64_t  m_total_clients;
2708    uint64_t  m_total_servers;
2709    uint64_t  m_active_sockets;
2710
2711    uint64_t  m_total_nat_time_out;
2712    uint64_t  m_total_nat_time_out_wait_ack;
2713    uint64_t  m_total_nat_no_fid  ;
2714    uint64_t  m_total_nat_active  ;
2715    uint64_t  m_total_nat_syn_wait;
2716    uint64_t  m_total_nat_open    ;
2717    uint64_t  m_total_nat_learn_error    ;
2718
2719    CPerTxthreadTemplateInfo m_template;
2720
2721    float     m_socket_util;
2722
2723    float m_platform_factor;
2724    float m_tx_bps;
2725    float m_rx_bps;
2726    float m_tx_pps;
2727    float m_rx_pps;
2728    float m_tx_cps;
2729    float m_tx_expected_cps;
2730    float m_tx_expected_pps;
2731    float m_tx_expected_bps;
2732    float m_rx_drop_bps;
2733    float m_active_flows;
2734    float m_open_flows;
2735    float m_cpu_util;
2736    float m_cpu_util_raw;
2737    float m_rx_cpu_util;
2738    float m_bw_per_core;
2739    uint8_t m_threads;
2740
2741    uint32_t      m_num_of_ports;
2742    CPerPortStats m_port[TREX_MAX_PORTS];
2743public:
2744    void Dump(FILE *fd,DumpFormat mode);
2745    void DumpAllPorts(FILE *fd);
2746    void dump_json(std::string & json, bool baseline);
2747private:
2748    std::string get_field(const char *name, float &f);
2749    std::string get_field(const char *name, uint64_t &f);
2750    std::string get_field_port(int port, const char *name, float &f);
2751    std::string get_field_port(int port, const char *name, uint64_t &f);
2752
2753};
2754
2755std::string CGlobalStats::get_field(const char *name, float &f){
2756    char buff[200];
2757    if(f <= -10.0 or f >= 10.0)
2758        snprintf(buff, sizeof(buff), "\"%s\":%.1f,",name,f);
2759    else
2760        snprintf(buff, sizeof(buff), "\"%s\":%.3e,",name,f);
2761    return (std::string(buff));
2762}
2763
2764std::string CGlobalStats::get_field(const char *name, uint64_t &f){
2765    char buff[200];
2766    snprintf(buff,  sizeof(buff), "\"%s\":%llu,", name, (unsigned long long)f);
2767    return (std::string(buff));
2768}
2769
2770std::string CGlobalStats::get_field_port(int port, const char *name, float &f){
2771    char buff[200];
2772    if(f <= -10.0 or f >= 10.0)
2773        snprintf(buff,  sizeof(buff), "\"%s-%d\":%.1f,", name, port, f);
2774    else
2775        snprintf(buff, sizeof(buff), "\"%s-%d\":%.3e,", name, port, f);
2776    return (std::string(buff));
2777}
2778
2779std::string CGlobalStats::get_field_port(int port, const char *name, uint64_t &f){
2780    char buff[200];
2781    snprintf(buff, sizeof(buff), "\"%s-%d\":%llu,",name, port, (unsigned long long)f);
2782    return (std::string(buff));
2783}
2784
2785
2786void CGlobalStats::dump_json(std::string & json, bool baseline){
2787    /* refactor this to JSON */
2788
2789    json="{\"name\":\"trex-global\",\"type\":0,";
2790    if (baseline) {
2791        json += "\"baseline\": true,";
2792    }
2793
2794    json +="\"data\":{";
2795
2796    char ts_buff[200];
2797    snprintf(ts_buff , sizeof(ts_buff), "\"ts\":{\"value\":%lu, \"freq\":%lu},", os_get_hr_tick_64(), os_get_hr_freq());
2798    json+= std::string(ts_buff);
2799
2800#define GET_FIELD(f) get_field(#f, f)
2801#define GET_FIELD_PORT(p,f) get_field_port(p, #f, lp->f)
2802
2803    json+=GET_FIELD(m_cpu_util);
2804    json+=GET_FIELD(m_cpu_util_raw);
2805    json+=GET_FIELD(m_bw_per_core);
2806    json+=GET_FIELD(m_rx_cpu_util);
2807    json+=GET_FIELD(m_platform_factor);
2808    json+=GET_FIELD(m_tx_bps);
2809    json+=GET_FIELD(m_rx_bps);
2810    json+=GET_FIELD(m_tx_pps);
2811    json+=GET_FIELD(m_rx_pps);
2812    json+=GET_FIELD(m_tx_cps);
2813    json+=GET_FIELD(m_tx_expected_cps);
2814    json+=GET_FIELD(m_tx_expected_pps);
2815    json+=GET_FIELD(m_tx_expected_bps);
2816    json+=GET_FIELD(m_total_alloc_error);
2817    json+=GET_FIELD(m_total_queue_full);
2818    json+=GET_FIELD(m_total_queue_drop);
2819    json+=GET_FIELD(m_rx_drop_bps);
2820    json+=GET_FIELD(m_active_flows);
2821    json+=GET_FIELD(m_open_flows);
2822
2823    json+=GET_FIELD(m_total_tx_pkts);
2824    json+=GET_FIELD(m_total_rx_pkts);
2825    json+=GET_FIELD(m_total_tx_bytes);
2826    json+=GET_FIELD(m_total_rx_bytes);
2827
2828    json+=GET_FIELD(m_total_clients);
2829    json+=GET_FIELD(m_total_servers);
2830    json+=GET_FIELD(m_active_sockets);
2831    json+=GET_FIELD(m_socket_util);
2832
2833    json+=GET_FIELD(m_total_nat_time_out);
2834    json+=GET_FIELD(m_total_nat_time_out_wait_ack);
2835    json+=GET_FIELD(m_total_nat_no_fid );
2836    json+=GET_FIELD(m_total_nat_active );
2837    json+=GET_FIELD(m_total_nat_syn_wait);
2838    json+=GET_FIELD(m_total_nat_open   );
2839    json+=GET_FIELD(m_total_nat_learn_error);
2840
2841    int i;
2842    for (i=0; i<(int)m_num_of_ports; i++) {
2843        CPerPortStats * lp=&m_port[i];
2844        json+=GET_FIELD_PORT(i,opackets) ;
2845        json+=GET_FIELD_PORT(i,obytes)   ;
2846        json+=GET_FIELD_PORT(i,ipackets) ;
2847        json+=GET_FIELD_PORT(i,ibytes)   ;
2848        json+=GET_FIELD_PORT(i,ierrors)  ;
2849        json+=GET_FIELD_PORT(i,oerrors)  ;
2850        json+=GET_FIELD_PORT(i,m_total_tx_bps);
2851        json+=GET_FIELD_PORT(i,m_total_tx_pps);
2852        json+=GET_FIELD_PORT(i,m_total_rx_bps);
2853        json+=GET_FIELD_PORT(i,m_total_rx_pps);
2854        json+=GET_FIELD_PORT(i,m_cpu_util);
2855    }
2856    json+=m_template.dump_as_json("template");
2857    json+="\"unknown\":0}}"  ;
2858}
2859
2860void CGlobalStats::DumpAllPorts(FILE *fd){
2861
2862    //fprintf (fd," Total-Tx-Pkts   : %s  \n",double_to_human_str((double)m_total_tx_pkts,"pkts",KBYE_1000).c_str());
2863    //fprintf (fd," Total-Rx-Pkts   : %s  \n",double_to_human_str((double)m_total_rx_pkts,"pkts",KBYE_1000).c_str());
2864
2865    //fprintf (fd," Total-Tx-Bytes  : %s  \n",double_to_human_str((double)m_total_tx_bytes,"bytes",KBYE_1000).c_str());
2866    //fprintf (fd," Total-Rx-Bytes  : %s  \n",double_to_human_str((double)m_total_rx_bytes,"bytes",KBYE_1000).c_str());
2867
2868
2869
2870    fprintf (fd," Cpu Utilization : %2.1f  %%  %2.1f Gb/core \n",m_cpu_util,m_bw_per_core);
2871    fprintf (fd," Platform_factor : %2.1f  \n",m_platform_factor);
2872    fprintf (fd," Total-Tx        : %s  ",double_to_human_str(m_tx_bps,"bps",KBYE_1000).c_str());
2873    if ( CGlobalInfo::is_learn_mode() ) {
2874        fprintf (fd," NAT time out    : %8llu", (unsigned long long)m_total_nat_time_out);
2875        if (CGlobalInfo::is_learn_mode(CParserOption::LEARN_MODE_TCP_ACK)) {
2876            fprintf (fd," (%llu in wait for syn+ack)\n", (unsigned long long)m_total_nat_time_out_wait_ack);
2877        } else {
2878            fprintf (fd, "\n");
2879        }
2880    }else{
2881        fprintf (fd,"\n");
2882    }
2883
2884
2885    fprintf (fd," Total-Rx        : %s  ",double_to_human_str(m_rx_bps,"bps",KBYE_1000).c_str());
2886    if ( CGlobalInfo::is_learn_mode() ) {
2887        fprintf (fd," NAT aged flow id: %8llu \n", (unsigned long long)m_total_nat_no_fid);
2888    }else{
2889        fprintf (fd,"\n");
2890    }
2891
2892    fprintf (fd," Total-PPS       : %s  ",double_to_human_str(m_tx_pps,"pps",KBYE_1000).c_str());
2893    if ( CGlobalInfo::is_learn_mode() ) {
2894        fprintf (fd," Total NAT active: %8llu", (unsigned long long)m_total_nat_active);
2895        if (CGlobalInfo::is_learn_mode(CParserOption::LEARN_MODE_TCP_ACK)) {
2896            fprintf (fd," (%llu waiting for syn)\n", (unsigned long long)m_total_nat_syn_wait);
2897        } else {
2898            fprintf (fd, "\n");
2899        }
2900    }else{
2901        fprintf (fd,"\n");
2902    }
2903
2904    fprintf (fd," Total-CPS       : %s  ",double_to_human_str(m_tx_cps,"cps",KBYE_1000).c_str());
2905    if ( CGlobalInfo::is_learn_mode() ) {
2906        fprintf (fd," Total NAT opened: %8llu \n", (unsigned long long)m_total_nat_open);
2907    }else{
2908        fprintf (fd,"\n");
2909    }
2910    fprintf (fd,"\n");
2911    fprintf (fd," Expected-PPS    : %s  ",double_to_human_str(m_tx_expected_pps,"pps",KBYE_1000).c_str());
2912    if ( CGlobalInfo::is_learn_verify_mode() ) {
2913        fprintf (fd," NAT learn errors: %8llu \n", (unsigned long long)m_total_nat_learn_error);
2914    }else{
2915        fprintf (fd,"\n");
2916    }
2917    fprintf (fd," Expected-CPS    : %s  \n",double_to_human_str(m_tx_expected_cps,"cps",KBYE_1000).c_str());
2918    fprintf (fd," Expected-BPS    : %s  \n",double_to_human_str(m_tx_expected_bps,"bps",KBYE_1000).c_str());
2919    fprintf (fd,"\n");
2920    fprintf (fd," Active-flows    : %8llu  Clients : %8llu   Socket-util : %3.4f %%    \n",
2921             (unsigned long long)m_active_flows,
2922             (unsigned long long)m_total_clients,
2923             m_socket_util);
2924    fprintf (fd," Open-flows      : %8llu  Servers : %8llu   Socket : %8llu Socket/Clients :  %.1f \n",
2925             (unsigned long long)m_open_flows,
2926             (unsigned long long)m_total_servers,
2927             (unsigned long long)m_active_sockets,
2928             (float)m_active_sockets/(float)m_total_clients);
2929
2930    if (m_total_alloc_error) {
2931        fprintf (fd," Total_alloc_err  : %llu         \n", (unsigned long long)m_total_alloc_error);
2932    }
2933    if ( m_total_queue_full ){
2934        fprintf (fd," Total_queue_full : %llu         \n", (unsigned long long)m_total_queue_full);
2935    }
2936    if (m_total_queue_drop) {
2937        fprintf (fd," Total_queue_drop : %llu         \n", (unsigned long long)m_total_queue_drop);
2938    }
2939
2940    //m_template.Dump(fd);
2941
2942    fprintf (fd," drop-rate       : %s   \n",double_to_human_str(m_rx_drop_bps,"bps",KBYE_1000).c_str() );
2943}
2944
2945
2946void CGlobalStats::Dump(FILE *fd,DumpFormat mode){
2947    int i;
2948    int port_to_show=m_num_of_ports;
2949    if (port_to_show>4) {
2950        port_to_show=4;
2951        fprintf (fd," per port - limited to 4   \n");
2952    }
2953
2954
2955    if ( mode== dmpSTANDARD ){
2956        fprintf (fd," --------------- \n");
2957        for (i=0; i<(int)port_to_show; i++) {
2958            CPerPortStats * lp=&m_port[i];
2959            fprintf(fd,"port : %d ",(int)i);
2960            if ( ! lp->m_link_up ) {
2961                fprintf(fd," (link DOWN)");
2962            }
2963            fprintf(fd,"\n------------\n");
2964#define GS_DP_A4(f) fprintf(fd," %-40s : %llu \n",#f, (unsigned long long)lp->f)
2965#define GS_DP_A(f) if (lp->f) fprintf(fd," %-40s : %llu \n",#f, (unsigned long long)lp->f)
2966            GS_DP_A4(opackets);
2967            GS_DP_A4(obytes);
2968            GS_DP_A4(ipackets);
2969            GS_DP_A4(ibytes);
2970            GS_DP_A(ierrors);
2971            GS_DP_A(oerrors);
2972            fprintf (fd," Tx : %s  \n",double_to_human_str((double)lp->m_total_tx_bps,"bps",KBYE_1000).c_str());
2973        }
2974    }else{
2975        fprintf(fd," %10s ","ports");
2976        for (i=0; i<(int)port_to_show; i++) {
2977            CPerPortStats * lp=&m_port[i];
2978            if ( lp->m_link_up ) {
2979                fprintf(fd,"| %15d ",i);
2980            } else {
2981                std::string port_with_state = "(link DOWN) " + std::to_string(i);
2982                fprintf(fd,"| %15s ",port_with_state.c_str());
2983            }
2984        }
2985        fprintf(fd,"\n");
2986        fprintf(fd," -----------------------------------------------------------------------------------------\n");
2987        std::string names[]={"opackets","obytes","ipackets","ibytes","ierrors","oerrors","Tx Bw"
2988        };
2989        for (i=0; i<7; i++) {
2990            fprintf(fd," %10s ",names[i].c_str());
2991            int j=0;
2992            for (j=0; j<port_to_show;j++) {
2993                CPerPortStats * lp=&m_port[j];
2994                uint64_t cnt;
2995                switch (i) {
2996                case 0:
2997                    cnt=lp->opackets;
2998                    fprintf(fd,"| %15lu ",cnt);
2999
3000                    break;
3001                case 1:
3002                    cnt=lp->obytes;
3003                    fprintf(fd,"| %15lu ",cnt);
3004
3005                    break;
3006                case 2:
3007                    cnt=lp->ipackets;
3008                    fprintf(fd,"| %15lu ",cnt);
3009
3010                    break;
3011                case 3:
3012                    cnt=lp->ibytes;
3013                    fprintf(fd,"| %15lu ",cnt);
3014
3015                    break;
3016                case 4:
3017                    cnt=lp->ierrors;
3018                    fprintf(fd,"| %15lu ",cnt);
3019
3020                    break;
3021                case 5:
3022                    cnt=lp->oerrors;
3023                    fprintf(fd,"| %15lu ",cnt);
3024
3025                    break;
3026                case 6:
3027                    fprintf(fd,"| %15s ",double_to_human_str((double)lp->m_total_tx_bps,"bps",KBYE_1000).c_str());
3028                    break;
3029                default:
3030                    cnt=0xffffff;
3031                }
3032            } /* ports */
3033            fprintf(fd, "\n");
3034        }/* fields*/
3035    }
3036
3037
3038}
3039
3040class CGlobalTRex  {
3041
3042public:
3043
3044    /**
3045     * different types of shutdown causes
3046     */
3047    typedef enum {
3048        SHUTDOWN_NONE,
3049        SHUTDOWN_TEST_ENDED,
3050        SHUTDOWN_CTRL_C,
3051        SHUTDOWN_SIGINT,
3052        SHUTDOWN_SIGTERM,
3053        SHUTDOWN_RPC_REQ
3054    } shutdown_rc_e;
3055
3056
3057    CGlobalTRex (){
3058        m_max_ports=4;
3059        m_max_cores=1;
3060        m_cores_to_dual_ports=0;
3061        m_max_queues_per_port=0;
3062        m_fl_was_init=false;
3063        m_expected_pps=0.0;
3064        m_expected_cps=0.0;
3065        m_expected_bps=0.0;
3066        m_trex_stateless = NULL;
3067        m_mark_for_shutdown = SHUTDOWN_NONE;
3068    }
3069
3070    bool Create();
3071    void Delete();
3072    int  ixgbe_prob_init();
3073    int  cores_prob_init();
3074    int  queues_prob_init();
3075    int  ixgbe_start();
3076    int  ixgbe_rx_queue_flush();
3077    void ixgbe_configure_mg();
3078    void rx_sl_configure();
3079    bool is_all_links_are_up(bool dump=false);
3080    void pre_test();
3081
3082    /**
3083     * mark for shutdown
3084     * on the next check - the control plane will
3085     * call shutdown()
3086     */
3087    void mark_for_shutdown(shutdown_rc_e rc) {
3088
3089        if (is_marked_for_shutdown()) {
3090            return;
3091        }
3092
3093        m_mark_for_shutdown = rc;
3094    }
3095
3096private:
3097    void register_signals();
3098
3099    /* try to stop all datapath cores and RX core */
3100    void try_stop_all_cores();
3101    /* send message to all dp cores */
3102    int  send_message_all_dp(TrexStatelessCpToDpMsgBase *msg);
3103    int  send_message_to_rx(TrexStatelessCpToRxMsgBase *msg);
3104    void check_for_dp_message_from_core(int thread_id);
3105
3106    bool is_marked_for_shutdown() const {
3107        return (m_mark_for_shutdown != SHUTDOWN_NONE);
3108    }
3109
3110    /**
3111     * shutdown sequence
3112     *
3113     */
3114    void shutdown();
3115
3116public:
3117    void check_for_dp_messages();
3118    int start_master_statefull();
3119    int start_master_stateless();
3120    int run_in_core(virtual_thread_id_t virt_core_id);
3121    int core_for_rx(){
3122        if ( (! get_is_rx_thread_enabled()) ) {
3123            return -1;
3124        }else{
3125            return m_max_cores - 1;
3126        }
3127    }
3128    int run_in_rx_core();
3129    int run_in_master();
3130
3131    void handle_fast_path();
3132    void handle_slow_path();
3133
3134    int stop_master();
3135    /* return the minimum number of dp cores needed to support the active ports
3136       this is for c==1 or  m_cores_mul==1
3137    */
3138    int get_base_num_cores(){
3139        return (m_max_ports>>1);
3140    }
3141
3142    int get_cores_tx(){
3143        /* 0 - master
3144           num_of_cores -
3145           last for latency */
3146        if ( (! get_is_rx_thread_enabled()) ) {
3147            return (m_max_cores - 1 );
3148        } else {
3149            return (m_max_cores - BP_MASTER_AND_LATENCY );
3150        }
3151    }
3152
3153private:
3154    bool is_all_cores_finished();
3155
3156public:
3157
3158    void publish_async_data(bool sync_now, bool baseline = false);
3159    void publish_async_barrier(uint32_t key);
3160    void publish_async_port_attr_changed(uint8_t port_id);
3161
3162    void dump_stats(FILE *fd,
3163                    CGlobalStats::DumpFormat format);
3164    void dump_template_info(std::string & json);
3165    bool sanity_check();
3166    void update_stats(void);
3167    tx_per_flow_t get_flow_tx_stats(uint8_t port, uint16_t hw_id);
3168    tx_per_flow_t clear_flow_tx_stats(uint8_t port, uint16_t index, bool is_lat);
3169    void get_stats(CGlobalStats & stats);
3170    float get_cpu_util_per_interface(uint8_t port_id);
3171    void dump_post_test_stats(FILE *fd);
3172    void dump_config(FILE *fd);
3173    void dump_links_status(FILE *fd);
3174
3175    bool lookup_port_by_mac(const uint8_t *mac, uint8_t &port_id);
3176
3177public:
3178    port_cfg_t  m_port_cfg;
3179    uint32_t    m_max_ports;    /* active number of ports supported options are  2,4,8,10,12  */
3180    uint32_t    m_max_cores;    /* current number of cores , include master and latency  ==> ( master)1+c*(m_max_ports>>1)+1( latency )  */
3181    uint32_t    m_cores_mul;    /* how cores multipler given  c=4 ==> m_cores_mul */
3182    uint32_t    m_max_queues_per_port; // Number of TX queues per port
3183    uint32_t    m_cores_to_dual_ports; /* number of TX cores allocated for each port pair */
3184    uint16_t    m_rx_core_tx_q_id; /* TX q used by rx core */
3185    // statistic
3186    CPPSMeasure  m_cps;
3187    float        m_expected_pps;
3188    float        m_expected_cps;
3189    float        m_expected_bps;//bps
3190    float        m_last_total_cps;
3191
3192    CPhyEthIF   m_ports[TREX_MAX_PORTS];
3193    CCoreEthIF          m_cores_vif_sf[BP_MAX_CORES]; /* counted from 1 , 2,3 core zero is reserved - stateful */
3194    CCoreEthIFStateless m_cores_vif_sl[BP_MAX_CORES]; /* counted from 1 , 2,3 core zero is reserved - stateless*/
3195    CCoreEthIF *        m_cores_vif[BP_MAX_CORES];
3196    CParserOption m_po ;
3197    CFlowGenList  m_fl;
3198    bool          m_fl_was_init;
3199    volatile uint8_t       m_signal[BP_MAX_CORES] __rte_cache_aligned ; // Signal to main core when DP thread finished
3200    volatile bool m_sl_rx_running; // Signal main core when RX thread finished
3201    CLatencyManager     m_mg; // statefull RX core
3202    CRxCoreStateless    m_rx_sl; // stateless RX core
3203    CTrexGlobalIoMode   m_io_modes;
3204    CTRexExtendedDriverBase * m_drv;
3205
3206private:
3207    CLatencyHWPort      m_latency_vports[TREX_MAX_PORTS];    /* read hardware driver */
3208    CLatencyVmPort      m_latency_vm_vports[TREX_MAX_PORTS]; /* vm driver */
3209    CLatencyPktInfo     m_latency_pkt;
3210    TrexPublisher       m_zmq_publisher;
3211    CGlobalStats        m_stats;
3212    uint32_t            m_stats_cnt;
3213    std::mutex          m_cp_lock;
3214
3215    TrexMonitor         m_monitor;
3216
3217    shutdown_rc_e       m_mark_for_shutdown;
3218
3219public:
3220    TrexStateless       *m_trex_stateless;
3221
3222};
3223
3224// Before starting, send gratuitous ARP on our addresses, and try to resolve dst MAC addresses.
3225void CGlobalTRex::pre_test() {
3226    CPretest pretest(m_max_ports);
3227    bool resolve_needed = false;
3228    uint8_t empty_mac[ETHER_ADDR_LEN] = {0,0,0,0,0,0};
3229    bool need_grat_arp[TREX_MAX_PORTS];
3230
3231    if (CGlobalInfo::m_options.preview.get_is_client_cfg_enable()) {
3232        std::vector<ClientCfgCompactEntry *> conf;
3233        m_fl.get_client_cfg_ip_list(conf);
3234
3235        // If we got src MAC for port in global config, take it, otherwise use src MAC from DPDK
3236        uint8_t port_macs[m_max_ports][ETHER_ADDR_LEN];
3237        for (int port_id = 0; port_id < m_max_ports; port_id++) {
3238            memcpy(port_macs[port_id], CGlobalInfo::m_options.m_mac_addr[port_id].u.m_mac.src, ETHER_ADDR_LEN);
3239        }
3240
3241        for (std::vector<ClientCfgCompactEntry *>::iterator it = conf.begin(); it != conf.end(); it++) {
3242            uint8_t port = (*it)->get_port();
3243            uint16_t vlan = (*it)->get_vlan();
3244            uint32_t count = (*it)->get_count();
3245            uint32_t dst_ip = (*it)->get_dst_ip();
3246            uint32_t src_ip = (*it)->get_src_ip();
3247
3248            for (int i = 0; i < count; i++) {
3249                //??? handle ipv6;
3250                if ((*it)->is_ipv4()) {
3251                    pretest.add_next_hop(port, dst_ip + i, vlan);
3252                }
3253            }
3254            if (!src_ip) {
3255                src_ip = CGlobalInfo::m_options.m_ip_cfg[port].get_ip();
3256                if (!src_ip) {
3257                    fprintf(stderr, "No matching src ip for port: %d ip:%s vlan: %d\n"
3258                            , port, ip_to_str(dst_ip).c_str(), vlan);
3259                    fprintf(stderr, "You must specify src_ip in client config file or in TRex config file\n");
3260                    exit(1);
3261                }
3262            }
3263            pretest.add_ip(port, src_ip, vlan, port_macs[port]);
3264            COneIPv4Info ipv4(src_ip, vlan, port_macs[port], port);
3265            m_mg.add_grat_arp_src(ipv4);
3266
3267            delete *it;
3268        }
3269        if ( CGlobalInfo::m_options.preview.getVMode() > 1) {
3270            fprintf(stdout, "*******Pretest for client cfg********\n");
3271            pretest.dump(stdout);
3272            }
3273    } else {
3274        for (int port_id = 0; port_id < m_max_ports; port_id++) {
3275            if (! memcmp( CGlobalInfo::m_options.m_mac_addr[port_id].u.m_mac.dest, empty_mac, ETHER_ADDR_LEN)) {
3276                resolve_needed = true;
3277            } else {
3278                resolve_needed = false;
3279            }
3280
3281            need_grat_arp[port_id] = CGlobalInfo::m_options.m_ip_cfg[port_id].get_ip() != 0;
3282
3283            pretest.add_ip(port_id, CGlobalInfo::m_options.m_ip_cfg[port_id].get_ip()
3284                           , CGlobalInfo::m_options.m_ip_cfg[port_id].get_vlan()
3285                           , CGlobalInfo::m_options.m_mac_addr[port_id].u.m_mac.src);
3286
3287            if (resolve_needed) {
3288                pretest.add_next_hop(port_id, CGlobalInfo::m_options.m_ip_cfg[port_id].get_def_gw()
3289                                     , CGlobalInfo::m_options.m_ip_cfg[port_id].get_vlan());
3290            }
3291        }
3292    }
3293
3294    for (int port_id = 0; port_id < m_max_ports; port_id++) {
3295        CPhyEthIF *pif = &m_ports[port_id];
3296        // Configure port to send all packets to software
3297        CTRexExtendedDriverDb::Ins()->get_drv()->set_rcv_all(pif, true);
3298    }
3299
3300
3301    pretest.send_grat_arp_all();
3302    bool ret;
3303    int count = 0;
3304    bool resolve_failed = false;
3305    do {
3306        ret = pretest.resolve_all();
3307        count++;
3308    } while ((ret != true) && (count < 10));
3309    if (ret != true) {
3310        resolve_failed = true;
3311    }
3312
3313    if ( CGlobalInfo::m_options.preview.getVMode() > 1) {
3314        fprintf(stdout, "*******Pretest after resolving ********\n");
3315        pretest.dump(stdout);
3316    }
3317
3318    if (CGlobalInfo::m_options.preview.get_is_client_cfg_enable()) {
3319        CManyIPInfo pretest_result;
3320        pretest.get_results(pretest_result);
3321        if (resolve_failed) {
3322            fprintf(stderr, "Resolution of following IPs failed. Exiting.\n");
3323            for (const COneIPInfo *ip=pretest_result.get_next(); ip != NULL;
3324                   ip = pretest_result.get_next()) {
3325                if (ip->resolve_needed()) {
3326                    ip->dump(stderr, "  ");
3327                }
3328            }
3329            exit(1);
3330        }
3331        m_fl.set_client_config_resolved_macs(pretest_result);
3332        if ( CGlobalInfo::m_options.preview.getVMode() > 1) {
3333            m_fl.dump_client_config(stdout);
3334        }
3335
3336        bool port_found[TREX_MAX_PORTS];
3337        for (int port_id = 0; port_id < m_max_ports; port_id++) {
3338            port_found[port_id] = false;
3339        }
3340        // If client config enabled, we don't resolve MACs from trex_cfg.yaml. For latency (-l)
3341        // We need to able to send packets from RX core, so need to configure MAC/vlan for each port.
3342        for (const COneIPInfo *ip=pretest_result.get_next(); ip != NULL; ip = pretest_result.get_next()) {
3343            // Use first MAC/vlan we see on each port
3344            uint8_t port_id = ip->get_port();
3345            uint16_t vlan = ip->get_vlan();
3346            if ( ! port_found[port_id]) {
3347                port_found[port_id] = true;
3348                ip->get_mac(CGlobalInfo::m_options.m_mac_addr[port_id].u.m_mac.dest);
3349                CGlobalInfo::m_options.m_ip_cfg[port_id].set_vlan(vlan);
3350            }
3351        }
3352    } else {
3353        uint8_t mac[ETHER_ADDR_LEN];
3354        for (int port_id = 0; port_id < m_max_ports; port_id++) {
3355            if (! memcmp(CGlobalInfo::m_options.m_mac_addr[port_id].u.m_mac.dest, empty_mac, ETHER_ADDR_LEN)) {
3356                // we don't have dest MAC. Get it from what we resolved.
3357                uint32_t ip = CGlobalInfo::m_options.m_ip_cfg[port_id].get_def_gw();
3358                uint16_t vlan = CGlobalInfo::m_options.m_ip_cfg[port_id].get_vlan();
3359
3360                if (!pretest.get_mac(port_id, ip, vlan, mac)) {
3361                    fprintf(stderr, "Failed resolving dest MAC for default gateway:%d.%d.%d.%d on port %d\n"
3362                            , (ip >> 24) & 0xFF, (ip >> 16) & 0xFF, (ip >> 8) & 0xFF, ip & 0xFF, port_id);
3363
3364                    if (get_is_stateless()) {
3365                        continue;
3366                    } else {
3367                        exit(1);
3368                    }
3369                }
3370
3371
3372
3373                memcpy(CGlobalInfo::m_options.m_mac_addr[port_id].u.m_mac.dest, mac, ETHER_ADDR_LEN);
3374                // if port is connected in loopback, no need to send gratuitous ARP. It will only confuse our ingress counters.
3375                if (need_grat_arp[port_id] && (! pretest.is_loopback(port_id))) {
3376                    COneIPv4Info ipv4(CGlobalInfo::m_options.m_ip_cfg[port_id].get_ip()
3377                                      , CGlobalInfo::m_options.m_ip_cfg[port_id].get_vlan()
3378                                      , CGlobalInfo::m_options.m_mac_addr[port_id].u.m_mac.src
3379                                      , port_id);
3380                    m_mg.add_grat_arp_src(ipv4);
3381                }
3382            }
3383
3384            // update statistics baseline, so we can ignore what happened in pre test phase
3385            CPhyEthIF *pif = &m_ports[port_id];
3386            CPreTestStats pre_stats = pretest.get_stats(port_id);
3387            pif->set_ignore_stats_base(pre_stats);
3388
3389            // Configure port back to normal mode. Only relevant packets handled by software.
3390            CTRexExtendedDriverDb::Ins()->get_drv()->set_rcv_all(pif, false);
3391
3392           }
3393        }
3394
3395    /* for stateless only - set port mode */
3396    if (get_is_stateless()) {
3397        for (int port_id = 0; port_id < m_max_ports; port_id++) {
3398            uint32_t src_ipv4 = CGlobalInfo::m_options.m_ip_cfg[port_id].get_ip();
3399            uint32_t dg = CGlobalInfo::m_options.m_ip_cfg[port_id].get_def_gw();
3400            const uint8_t *dst_mac = CGlobalInfo::m_options.m_mac_addr[port_id].u.m_mac.dest;
3401
3402            /* L3 mode */
3403            if (src_ipv4 && dg) {
3404                if (memcmp(dst_mac, empty_mac, 6) == 0) {
3405                    m_trex_stateless->get_port_by_id(port_id)->set_l3_mode(src_ipv4, dg);
3406                } else {
3407                    m_trex_stateless->get_port_by_id(port_id)->set_l3_mode(src_ipv4, dg, dst_mac);
3408                }
3409
3410            /* L2 mode */
3411            } else if (CGlobalInfo::m_options.m_mac_addr[port_id].u.m_mac.is_set) {
3412                m_trex_stateless->get_port_by_id(port_id)->set_l2_mode(dst_mac);
3413            }
3414        }
3415    }
3416
3417
3418}
3419
3420/**
3421 * check for a single core
3422 *
3423 * @author imarom (19-Nov-15)
3424 *
3425 * @param thread_id
3426 */
3427void
3428CGlobalTRex::check_for_dp_message_from_core(int thread_id) {
3429
3430    CNodeRing *ring = CMsgIns::Ins()->getCpDp()->getRingDpToCp(thread_id);
3431
3432    /* fast path check */
3433    if ( likely ( ring->isEmpty() ) ) {
3434        return;
3435    }
3436
3437    while ( true ) {
3438        CGenNode * node = NULL;
3439        if (ring->Dequeue(node) != 0) {
3440            break;
3441        }
3442        assert(node);
3443
3444        TrexStatelessDpToCpMsgBase * msg = (TrexStatelessDpToCpMsgBase *)node;
3445        msg->handle();
3446        delete msg;
3447    }
3448
3449}
3450
3451/**
3452 * check for messages that arrived from DP to CP
3453 *
3454 */
3455void
3456CGlobalTRex::check_for_dp_messages() {
3457
3458    /* for all the cores - check for a new message */
3459    for (int i = 0; i < get_cores_tx(); i++) {
3460        check_for_dp_message_from_core(i);
3461    }
3462}
3463
3464bool CGlobalTRex::is_all_links_are_up(bool dump){
3465    bool all_link_are=true;
3466    int i;
3467    for (i=0; i<m_max_ports; i++) {
3468        CPhyEthIF * _if=&m_ports[i];
3469        _if->get_port_attr()->update_link_status();
3470        if ( dump ){
3471            _if->dump_stats(stdout);
3472        }
3473        if ( _if->get_port_attr()->is_link_up() == false){
3474            all_link_are=false;
3475            break;
3476        }
3477    }
3478    return (all_link_are);
3479}
3480
3481void CGlobalTRex::try_stop_all_cores(){
3482
3483    TrexStatelessDpQuit * dp_msg= new TrexStatelessDpQuit();
3484    send_message_all_dp(dp_msg);
3485    delete dp_msg;
3486
3487    if (get_is_stateless()) {
3488        TrexStatelessRxQuit * rx_msg= new TrexStatelessRxQuit();
3489        send_message_to_rx(rx_msg);
3490    }
3491
3492    // no need to delete rx_msg. Deleted by receiver
3493    bool all_core_finished = false;
3494    int i;
3495    for (i=0; i<20; i++) {
3496        if ( is_all_cores_finished() ){
3497            all_core_finished =true;
3498            break;
3499        }
3500        delay(100);
3501    }
3502    if ( all_core_finished ){
3503        m_zmq_publisher.publish_event(TrexPublisher::EVENT_SERVER_STOPPED);
3504        printf(" All cores stopped !! \n");
3505    }else{
3506        printf(" ERROR one of the DP core is stucked !\n");
3507    }
3508}
3509
3510
3511int  CGlobalTRex::send_message_all_dp(TrexStatelessCpToDpMsgBase *msg){
3512
3513    int max_threads=(int)CMsgIns::Ins()->getCpDp()->get_num_threads();
3514    int i;
3515
3516    for (i=0; i<max_threads; i++) {
3517        CNodeRing *ring = CMsgIns::Ins()->getCpDp()->getRingCpToDp((uint8_t)i);
3518        ring->Enqueue((CGenNode*)msg->clone());
3519    }
3520    return (0);
3521}
3522
3523int CGlobalTRex::send_message_to_rx(TrexStatelessCpToRxMsgBase *msg) {
3524    CNodeRing *ring = CMsgIns::Ins()->getCpRx()->getRingCpToDp(0);
3525    ring->Enqueue((CGenNode *) msg);
3526
3527    return (0);
3528}
3529
3530
3531int  CGlobalTRex::ixgbe_rx_queue_flush(){
3532    int i;
3533    for (i=0; i<m_max_ports; i++) {
3534        CPhyEthIF * _if=&m_ports[i];
3535        _if->flush_rx_queue();
3536    }
3537    return (0);
3538}
3539
3540
3541// init stateful rx core
3542void CGlobalTRex::ixgbe_configure_mg(void) {
3543    int i;
3544    CLatencyManagerCfg mg_cfg;
3545    mg_cfg.m_max_ports = m_max_ports;
3546
3547    uint32_t latency_rate=CGlobalInfo::m_options.m_latency_rate;
3548
3549    if ( latency_rate ) {
3550        mg_cfg.m_cps = (double)latency_rate ;
3551    } else {
3552        // If RX core needed, we need something to make the scheduler running.
3553        // If nothing configured, send 1 CPS latency measurement packets.
3554        if (CGlobalInfo::m_options.m_arp_ref_per == 0) {
3555            mg_cfg.m_cps = 1.0;
3556        } else {
3557            mg_cfg.m_cps = 0;
3558        }
3559    }
3560
3561    if ( get_vm_one_queue_enable() ) {
3562        /* vm mode, indirect queues  */
3563        for (i=0; i<m_max_ports; i++) {
3564            CPhyEthIF * _if = &m_ports[i];
3565            CMessagingManager * rx_dp=CMsgIns::Ins()->getRxDp();
3566
3567            uint8_t thread_id = (i>>1);
3568
3569            CNodeRing * r = rx_dp->getRingCpToDp(thread_id);
3570            m_latency_vm_vports[i].Create((uint8_t)i, r, &m_mg, _if);
3571
3572            mg_cfg.m_ports[i] =&m_latency_vm_vports[i];
3573        }
3574
3575    }else{
3576        for (i=0; i<m_max_ports; i++) {
3577            CPhyEthIF * _if=&m_ports[i];
3578            _if->dump_stats(stdout);
3579            m_latency_vports[i].Create(_if, m_rx_core_tx_q_id, 1);
3580
3581            mg_cfg.m_ports[i] =&m_latency_vports[i];
3582        }
3583    }
3584
3585
3586    m_mg.Create(&mg_cfg);
3587    m_mg.set_mask(CGlobalInfo::m_options.m_latency_mask);
3588}
3589
3590// init m_rx_sl object for stateless rx core
3591void CGlobalTRex::rx_sl_configure(void) {
3592    CRxSlCfg rx_sl_cfg;
3593    int i;
3594
3595    rx_sl_cfg.m_max_ports = m_max_ports;
3596    rx_sl_cfg.m_num_crc_fix_bytes = get_ex_drv()->get_num_crc_fix_bytes();
3597
3598    if ( get_vm_one_queue_enable() ) {
3599        /* vm mode, indirect queues  */
3600        for (i=0; i < m_max_ports; i++) {
3601            CPhyEthIF * _if = &m_ports[i];
3602            CMessagingManager * rx_dp = CMsgIns::Ins()->getRxDp();
3603            uint8_t thread_id = (i >> 1);
3604            CNodeRing * r = rx_dp->getRingCpToDp(thread_id);
3605            m_latency_vm_vports[i].Create(i, r, &m_mg, _if);
3606            rx_sl_cfg.m_ports[i] = &m_latency_vm_vports[i];
3607        }
3608    } else {
3609        for (i = 0; i < m_max_ports; i++) {
3610            CPhyEthIF * _if = &m_ports[i];
3611            m_latency_vports[i].Create(_if, m_rx_core_tx_q_id, 1);
3612            rx_sl_cfg.m_ports[i] = &m_latency_vports[i];
3613        }
3614    }
3615
3616    m_rx_sl.create(rx_sl_cfg);
3617}
3618
3619int  CGlobalTRex::ixgbe_start(void){
3620    int i;
3621    for (i=0; i<m_max_ports; i++) {
3622        socket_id_t socket_id = CGlobalInfo::m_socket.port_to_socket((port_id_t)i);
3623        assert(CGlobalInfo::m_mem_pool[socket_id].m_mbuf_pool_2048);
3624        CPhyEthIF * _if=&m_ports[i];
3625        _if->Create((uint8_t)i);
3626        uint16_t rx_rss = get_ex_drv()->enable_rss_drop_workaround();
3627
3628        if ( get_vm_one_queue_enable() ) {
3629            /* VMXNET3 does claim to support 16K but somehow does not work */
3630            /* reduce to 2000 */
3631            m_port_cfg.m_port_conf.rxmode.max_rx_pkt_len = 2000;
3632            /* In VM case, there is one tx q and one rx q */
3633            _if->configure(1, 1, &m_port_cfg.m_port_conf);
3634            // Only 1 rx queue, so use it for everything
3635            m_rx_core_tx_q_id = 0;
3636            _if->set_rx_queue(0);
3637            _if->rx_queue_setup(0, RTE_TEST_RX_DESC_VM_DEFAULT, socket_id, &m_port_cfg.m_rx_conf,
3638                                CGlobalInfo::m_mem_pool[socket_id].m_mbuf_pool_2048);
3639            // 1 TX queue in VM case
3640            _if->tx_queue_setup(0, RTE_TEST_TX_DESC_VM_DEFAULT, socket_id, &m_port_cfg.m_tx_conf);
3641        } else {
3642            // 2 rx queues.
3643            // TX queues: 1 for each core handling the port pair + 1 for latency pkts + 1 for use by RX core
3644
3645            uint16_t rx_queues;
3646
3647            if (rx_rss==0) {
3648                rx_queues=2;
3649            }else{
3650                rx_queues=rx_rss;
3651            }
3652
3653            _if->configure(rx_queues, m_cores_to_dual_ports + 2, &m_port_cfg.m_port_conf);
3654            m_rx_core_tx_q_id = m_cores_to_dual_ports;
3655
3656            if ( rx_rss ) {
3657                int j=0;
3658                for (j=0;j<rx_rss; j++) {
3659                        if (j==MAIN_DPDK_RX_Q){
3660                            continue;
3661                        }
3662                        /* drop queue */
3663                        _if->rx_queue_setup(j,
3664                                        RTE_TEST_RX_DESC_DEFAULT_MLX,
3665                                        socket_id,
3666                                        &m_port_cfg.m_rx_conf,
3667                                        CGlobalInfo::m_mem_pool[socket_id].m_mbuf_pool_2048);
3668
3669
3670                }
3671            }else{
3672                 // setup RX drop queue
3673                _if->rx_queue_setup(MAIN_DPDK_DATA_Q,
3674                                    RTE_TEST_RX_DESC_DEFAULT,
3675                                    socket_id,
3676                                    &m_port_cfg.m_rx_conf,
3677                                    CGlobalInfo::m_mem_pool[socket_id].m_mbuf_pool_2048);
3678                // setup RX filter queue
3679                _if->set_rx_queue(MAIN_DPDK_RX_Q);
3680            }
3681
3682            _if->rx_queue_setup(MAIN_DPDK_RX_Q,
3683                                RTE_TEST_RX_LATENCY_DESC_DEFAULT,
3684                                socket_id,
3685                                &m_port_cfg.m_rx_conf,
3686                                CGlobalInfo::m_mem_pool[socket_id].m_mbuf_pool_9k);
3687
3688            for (int qid = 0; qid < m_max_queues_per_port; qid++) {
3689                _if->tx_queue_setup((uint16_t)qid,
3690                                    RTE_TEST_TX_DESC_DEFAULT ,
3691                                    socket_id,
3692                                    &m_port_cfg.m_tx_conf);
3693            }
3694        }
3695
3696        if ( rx_rss ){
3697            _if->configure_rss_redirect_table(rx_rss,MAIN_DPDK_RX_Q);
3698        }
3699
3700        _if->stats_clear();
3701        _if->start();
3702        _if->configure_rx_duplicate_rules();
3703
3704        if ( ! get_vm_one_queue_enable()  && ! CGlobalInfo::m_options.preview.get_is_disable_flow_control_setting()
3705             && _if->get_port_attr()->is_fc_change_supported()) {
3706            _if->disable_flow_control();
3707        }
3708
3709        _if->get_port_attr()->add_mac((char *)CGlobalInfo::m_options.get_src_mac_addr(i));
3710
3711        fflush(stdout);
3712    }
3713
3714    if ( !is_all_links_are_up()  ){
3715        /* wait for ports to be stable */
3716        get_ex_drv()->wait_for_stable_link();
3717
3718        if ( !is_all_links_are_up(true) /*&& !get_is_stateless()*/ ){ // disable start with link down for now
3719
3720            /* temporary solution for trex-192 issue, solve the case for X710/XL710, will work for both Statless and Stateful */
3721            if (  get_ex_drv()->drop_packets_incase_of_linkdown() ){
3722                printf(" WARNING : there is no link on one of the ports, driver support auto drop in case of link down - continue\n");
3723            }else{
3724                dump_links_status(stdout);
3725                rte_exit(EXIT_FAILURE, " One of the links is down \n");
3726            }
3727        }
3728    } else {
3729        get_ex_drv()->wait_after_link_up();
3730    }
3731
3732    dump_links_status(stdout);
3733
3734    ixgbe_rx_queue_flush();
3735
3736    if (! get_is_stateless()) {
3737        ixgbe_configure_mg();
3738    }
3739
3740
3741    /* core 0 - control
3742       core 1 - port 0-0,1-0,
3743       core 2 - port 2-0,3-0,
3744       core 3 - port 0-1,1-1,
3745       core 4 - port 2-1,3-1,
3746
3747    */
3748    int port_offset=0;
3749    uint8_t lat_q_id;
3750
3751    if ( get_vm_one_queue_enable() ) {
3752        lat_q_id = 0;
3753    } else {
3754        lat_q_id = get_cores_tx() / get_base_num_cores() + 1;
3755    }
3756    for (i=0; i<get_cores_tx(); i++) {
3757        int j=(i+1);
3758        int queue_id=((j-1)/get_base_num_cores() );   /* for the first min core queue 0 , then queue 1 etc */
3759        if ( get_is_stateless() ){
3760            m_cores_vif[j]=&m_cores_vif_sl[j];
3761        }else{
3762            m_cores_vif[j]=&m_cores_vif_sf[j];
3763        }
3764        m_cores_vif[j]->Create(j,
3765                               queue_id,
3766                               &m_ports[port_offset], /* 0,2*/
3767                               queue_id,
3768                               &m_ports[port_offset+1], /*1,3*/
3769                               lat_q_id);
3770        port_offset+=2;
3771        if (port_offset == m_max_ports) {
3772            port_offset = 0;
3773            // We want to allow sending latency packets only from first core handling a port
3774            lat_q_id = CCoreEthIF::INVALID_Q_ID;
3775        }
3776    }
3777
3778    fprintf(stdout," -------------------------------\n");
3779    fprintf(stdout, "RX core uses TX queue number %d on all ports\n", m_rx_core_tx_q_id);
3780    CCoreEthIF::DumpIfCfgHeader(stdout);
3781    for (i=0; i<get_cores_tx(); i++) {
3782        m_cores_vif[i+1]->DumpIfCfg(stdout);
3783    }
3784    fprintf(stdout," -------------------------------\n");
3785
3786    return (0);
3787}
3788
3789static void trex_termination_handler(int signum);
3790
3791void CGlobalTRex::register_signals() {
3792    struct sigaction action;
3793
3794    /* handler */
3795    action.sa_handler = trex_termination_handler;
3796
3797    /* blocked signals during handling */
3798    sigemptyset(&action.sa_mask);
3799    sigaddset(&action.sa_mask, SIGINT);
3800    sigaddset(&action.sa_mask, SIGTERM);
3801
3802    /* no flags */
3803    action.sa_flags = 0;
3804
3805    /* register */
3806    sigaction(SIGINT,  &action, NULL);
3807    sigaction(SIGTERM, &action, NULL);
3808}
3809
3810bool CGlobalTRex::Create(){
3811    CFlowsYamlInfo     pre_yaml_info;
3812
3813    register_signals();
3814
3815    m_stats_cnt =0;
3816    if (!get_is_stateless()) {
3817        pre_yaml_info.load_from_yaml_file(CGlobalInfo::m_options.cfg_file);
3818    }
3819
3820    if ( !m_zmq_publisher.Create( CGlobalInfo::m_options.m_zmq_port,
3821                                  !CGlobalInfo::m_options.preview.get_zmq_publish_enable() ) ){
3822        return (false);
3823    }
3824
3825    if ( pre_yaml_info.m_vlan_info.m_enable ){
3826        CGlobalInfo::m_options.preview.set_vlan_mode_enable(true);
3827    }
3828    /* End update pre flags */
3829
3830    ixgbe_prob_init();
3831    cores_prob_init();
3832    queues_prob_init();
3833
3834    /* allocate rings */
3835    assert( CMsgIns::Ins()->Create(get_cores_tx()) );
3836
3837    if ( sizeof(CGenNodeNatInfo) != sizeof(CGenNode)  ) {
3838        printf("ERROR sizeof(CGenNodeNatInfo) %lu != sizeof(CGenNode) %lu must be the same size \n",sizeof(CGenNodeNatInfo),sizeof(CGenNode));
3839        assert(0);
3840    }
3841
3842    if ( sizeof(CGenNodeLatencyPktInfo) != sizeof(CGenNode)  ) {
3843        printf("ERROR sizeof(CGenNodeLatencyPktInfo) %lu != sizeof(CGenNode) %lu must be the same size \n",sizeof(CGenNodeLatencyPktInfo),sizeof(CGenNode));
3844        assert(0);
3845    }
3846
3847    /* allocate the memory */
3848
3849    uint32_t rx_mbuf = 0 ;
3850
3851    if ( get_vm_one_queue_enable() ) {
3852        rx_mbuf = (m_max_ports * RTE_TEST_RX_DESC_VM_DEFAULT);
3853    }else{
3854        rx_mbuf = (m_max_ports * (RTE_TEST_RX_LATENCY_DESC_DEFAULT+RTE_TEST_RX_DESC_DEFAULT));
3855    }
3856
3857    CGlobalInfo::init_pools(rx_mbuf);
3858    ixgbe_start();
3859    dump_config(stdout);
3860
3861    /* start stateless */
3862    if (get_is_stateless()) {
3863
3864        TrexStatelessCfg cfg;
3865
3866        TrexRpcServerConfig rpc_req_resp_cfg(TrexRpcServerConfig::RPC_PROT_TCP,
3867                                             global_platform_cfg_info.m_zmq_rpc_port,
3868                                             &m_cp_lock);
3869
3870        cfg.m_port_count         = CGlobalInfo::m_options.m_expected_portd;
3871        cfg.m_rpc_req_resp_cfg   = &rpc_req_resp_cfg;
3872        cfg.m_rpc_server_verbose = false;
3873        cfg.m_platform_api       = new TrexDpdkPlatformApi();
3874        cfg.m_publisher          = &m_zmq_publisher;
3875
3876        m_trex_stateless = new TrexStateless(cfg);
3877
3878        rx_sl_configure();
3879    }
3880
3881    return (true);
3882
3883}
3884void CGlobalTRex::Delete(){
3885
3886    m_zmq_publisher.Delete();
3887    m_fl.Delete();
3888
3889    if (m_trex_stateless) {
3890        delete m_trex_stateless;
3891        m_trex_stateless = NULL;
3892    }
3893}
3894
3895
3896
3897int  CGlobalTRex::ixgbe_prob_init(void){
3898
3899    m_max_ports  = rte_eth_dev_count();
3900    if (m_max_ports == 0)
3901        rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n");
3902
3903    printf(" Number of ports found: %d \n",m_max_ports);
3904
3905    if ( m_max_ports %2 !=0 ) {
3906        rte_exit(EXIT_FAILURE, " Number of ports %d should be even, mask the one port in the configuration file  \n, ",
3907                 m_max_ports);
3908    }
3909
3910    if ( CGlobalInfo::m_options.get_expected_ports() > TREX_MAX_PORTS ) {
3911        rte_exit(EXIT_FAILURE, " Maximum ports supported are %d, use the configuration file to set the expected number of ports   \n",TREX_MAX_PORTS);
3912    }
3913
3914    if ( CGlobalInfo::m_options.get_expected_ports() > m_max_ports ){
3915        rte_exit(EXIT_FAILURE, " There are %d ports you expected more %d,use the configuration file to set the expected number of ports   \n",
3916                 m_max_ports,
3917                 CGlobalInfo::m_options.get_expected_ports());
3918    }
3919    if (CGlobalInfo::m_options.get_expected_ports() < m_max_ports ) {
3920        /* limit the number of ports */
3921        m_max_ports=CGlobalInfo::m_options.get_expected_ports();
3922    }
3923    assert(m_max_ports <= TREX_MAX_PORTS);
3924
3925    struct rte_eth_dev_info dev_info;
3926    rte_eth_dev_info_get((uint8_t) 0,&dev_info);
3927
3928    if ( CGlobalInfo::m_options.preview.getVMode() > 0){
3929        printf("\n\n");
3930        printf("if_index : %d \n",dev_info.if_index);
3931        printf("driver name : %s \n",dev_info.driver_name);
3932        printf("min_rx_bufsize : %d \n",dev_info.min_rx_bufsize);
3933        printf("max_rx_pktlen  : %d \n",dev_info.max_rx_pktlen);
3934        printf("max_rx_queues  : %d \n",dev_info.max_rx_queues);
3935        printf("max_tx_queues  : %d \n",dev_info.max_tx_queues);
3936        printf("max_mac_addrs  : %d \n",dev_info.max_mac_addrs);
3937
3938        printf("rx_offload_capa : %x \n",dev_info.rx_offload_capa);
3939        printf("tx_offload_capa : %x \n",dev_info.tx_offload_capa);
3940    }
3941
3942
3943
3944    if ( !CTRexExtendedDriverDb::Ins()->is_driver_exists(dev_info.driver_name) ){
3945        printf(" Error: driver %s is not supported. Please consult the documentation for a list of supported drivers\n"
3946               ,dev_info.driver_name);
3947        exit(1);
3948    }
3949
3950    int i;
3951    struct rte_eth_dev_info dev_info1;
3952
3953    for (i=1; i<m_max_ports; i++) {
3954        rte_eth_dev_info_get((uint8_t) i,&dev_info1);
3955        if ( strcmp(dev_info1.driver_name,dev_info.driver_name)!=0) {
3956            printf(" ERROR all device should have the same type  %s != %s \n",dev_info1.driver_name,dev_info.driver_name);
3957            exit(1);
3958        }
3959    }
3960
3961    CTRexExtendedDriverDb::Ins()->set_driver_name(dev_info.driver_name);
3962    m_drv = CTRexExtendedDriverDb::Ins()->get_drv();
3963
3964    // check if firmware version is new enough
3965    for (i = 0; i < m_max_ports; i++) {
3966        if (m_drv->verify_fw_ver(i) < 0) {
3967            // error message printed by verify_fw_ver
3968            exit(1);
3969        }
3970    }
3971
3972    m_port_cfg.update_var();
3973
3974    if ( get_is_rx_filter_enable() ){
3975        m_port_cfg.update_global_config_fdir();
3976    }
3977
3978    if ( get_vm_one_queue_enable() ) {
3979        /* verify that we have only one thread/core per dual- interface */
3980        if ( CGlobalInfo::m_options.preview.getCores()>1 ) {
3981            printf(" ERROR the number of cores should be 1 when the driver support only one tx queue and one rx queue \n");
3982            exit(1);
3983        }
3984    }
3985    return (0);
3986}
3987
3988int  CGlobalTRex::cores_prob_init(){
3989    m_max_cores = rte_lcore_count();
3990    assert(m_max_cores>0);
3991    return (0);
3992}
3993
3994int  CGlobalTRex::queues_prob_init(){
3995
3996    if (m_max_cores < 2) {
3997        rte_exit(EXIT_FAILURE, "number of cores should be at least 2 \n");
3998    }
3999
4000    assert((m_max_ports>>1) <= get_cores_tx() );
4001
4002    m_cores_mul = CGlobalInfo::m_options.preview.getCores();
4003
4004    m_cores_to_dual_ports  = m_cores_mul;
4005
4006    /* core 0 - control
4007       -core 1 - port 0/1
4008       -core 2 - port 2/3
4009       -core 3 - port 0/1
4010       -core 4 - port 2/3
4011
4012       m_cores_to_dual_ports = 2;
4013    */
4014
4015    // One q for each core allowed to send on this port + 1 for latency q (Used in stateless) + 1 for RX core.
4016    m_max_queues_per_port  = m_cores_to_dual_ports + 2;
4017
4018    if (m_max_queues_per_port > BP_MAX_TX_QUEUE) {
4019        rte_exit(EXIT_FAILURE,
4020                 "Error: Number of TX queues exceeds %d. Try running with lower -c <val> \n",BP_MAX_TX_QUEUE);
4021    }
4022
4023    assert(m_max_queues_per_port>0);
4024    return (0);
4025}
4026
4027
4028void CGlobalTRex::dump_config(FILE *fd){
4029    fprintf(fd," number of ports         : %u \n",m_max_ports);
4030    fprintf(fd," max cores for 2 ports   : %u \n",m_cores_to_dual_ports);
4031    fprintf(fd," max queue per port      : %u \n",m_max_queues_per_port);
4032}
4033
4034
4035void CGlobalTRex::dump_links_status(FILE *fd){
4036    for (int i=0; i<m_max_ports; i++) {
4037        m_ports[i].get_port_attr()->update_link_status_nowait();
4038        m_ports[i].get_port_attr()->dump_link(fd);
4039    }
4040}
4041
4042bool CGlobalTRex::lookup_port_by_mac(const uint8_t *mac, uint8_t &port_id) {
4043    for (int i = 0; i < m_max_ports; i++) {
4044        if (memcmp(m_ports[i].get_port_attr()->get_layer_cfg().get_ether().get_src(), mac, 6) == 0) {
4045            port_id = i;
4046            return true;
4047        }
4048    }
4049
4050    return false;
4051}
4052
4053void CGlobalTRex::dump_post_test_stats(FILE *fd){
4054    uint64_t pkt_out=0;
4055    uint64_t pkt_out_bytes=0;
4056    uint64_t pkt_in_bytes=0;
4057    uint64_t pkt_in=0;
4058    uint64_t sw_pkt_out=0;
4059    uint64_t sw_pkt_out_err=0;
4060    uint64_t sw_pkt_out_bytes=0;
4061    uint64_t tx_arp = 0;
4062    uint64_t rx_arp = 0;
4063
4064    int i;
4065    for (i=0; i<get_cores_tx(); i++) {
4066        CCoreEthIF * erf_vif = m_cores_vif[i+1];
4067        CVirtualIFPerSideStats stats;
4068        erf_vif->GetCoreCounters(&stats);
4069        sw_pkt_out     += stats.m_tx_pkt;
4070        sw_pkt_out_err += stats.m_tx_drop +stats.m_tx_queue_full +stats.m_tx_alloc_error ;
4071        sw_pkt_out_bytes +=stats.m_tx_bytes;
4072    }
4073
4074
4075    for (i=0; i<m_max_ports; i++) {
4076        CPhyEthIF * _if=&m_ports[i];
4077        pkt_in  +=_if->get_stats().ipackets;
4078        pkt_in_bytes +=_if->get_stats().ibytes;
4079        pkt_out +=_if->get_stats().opackets;
4080        pkt_out_bytes +=_if->get_stats().obytes;
4081        tx_arp += _if->get_ignore_stats().get_tx_arp();
4082        rx_arp += _if->get_ignore_stats().get_rx_arp();
4083    }
4084    if ( CGlobalInfo::m_options.is_latency_enabled() ){
4085        sw_pkt_out += m_mg.get_total_pkt();
4086        sw_pkt_out_bytes +=m_mg.get_total_bytes();
4087    }
4088
4089
4090    fprintf (fd," summary stats \n");
4091    fprintf (fd," -------------- \n");
4092
4093    if (pkt_in > pkt_out)
4094        {
4095            fprintf (fd, " Total-pkt-drop       : 0 pkts \n");
4096            if (pkt_in > pkt_out * 1.01)
4097                fprintf (fd, " Warning : number of rx packets exceeds 101%% of tx packets!\n");
4098        }
4099    else
4100        fprintf (fd, " Total-pkt-drop       : %llu pkts \n", (unsigned long long) (pkt_out - pkt_in));
4101    for (i=0; i<m_max_ports; i++) {
4102        if ( m_stats.m_port[i].m_link_was_down ) {
4103            fprintf (fd, " WARNING: Link was down at port %d during test (at least for some time)!\n", i);
4104        }
4105    }
4106    fprintf (fd," Total-tx-bytes       : %llu bytes \n", (unsigned long long)pkt_out_bytes);
4107    fprintf (fd," Total-tx-sw-bytes    : %llu bytes \n", (unsigned long long)sw_pkt_out_bytes);
4108    fprintf (fd," Total-rx-bytes       : %llu byte \n", (unsigned long long)pkt_in_bytes);
4109
4110    fprintf (fd," \n");
4111
4112    fprintf (fd," Total-tx-pkt         : %llu pkts \n", (unsigned long long)pkt_out);
4113    fprintf (fd," Total-rx-pkt         : %llu pkts \n", (unsigned long long)pkt_in);
4114    fprintf (fd," Total-sw-tx-pkt      : %llu pkts \n", (unsigned long long)sw_pkt_out);
4115    fprintf (fd," Total-sw-err         : %llu pkts \n", (unsigned long long)sw_pkt_out_err);
4116    fprintf (fd," Total ARP sent       : %llu pkts \n", (unsigned long long)tx_arp);
4117    fprintf (fd," Total ARP received   : %llu pkts \n", (unsigned long long)rx_arp);
4118
4119
4120    if ( CGlobalInfo::m_options.is_latency_enabled() ){
4121        fprintf (fd," maximum-latency   : %.0f usec \n",m_mg.get_max_latency());
4122        fprintf (fd," average-latency   : %.0f usec \n",m_mg.get_avr_latency());
4123        fprintf (fd," latency-any-error : %s  \n",m_mg.is_any_error()?"ERROR":"OK");
4124    }
4125
4126
4127}
4128
4129
4130void CGlobalTRex::update_stats(){
4131
4132    int i;
4133    for (i=0; i<m_max_ports; i++) {
4134        CPhyEthIF * _if=&m_ports[i];
4135        _if->update_counters();
4136    }
4137    uint64_t total_open_flows=0;
4138
4139
4140    CFlowGenListPerThread   * lpt;
4141    for (i=0; i<get_cores_tx(); i++) {
4142        lpt = m_fl.m_threads_info[i];
4143        total_open_flows +=   lpt->m_stats.m_total_open_flows ;
4144    }
4145    m_last_total_cps = m_cps.add(total_open_flows);
4146
4147}
4148
4149tx_per_flow_t CGlobalTRex::get_flow_tx_stats(uint8_t port, uint16_t index) {
4150    return m_stats.m_port[port].m_tx_per_flow[index] - m_stats.m_port[port].m_prev_tx_per_flow[index];
4151}
4152
4153// read stats. Return read value, and clear.
4154tx_per_flow_t CGlobalTRex::clear_flow_tx_stats(uint8_t port, uint16_t index, bool is_lat) {
4155    uint8_t port0;
4156    CFlowGenListPerThread * lpt;
4157    tx_per_flow_t ret;
4158
4159    m_stats.m_port[port].m_tx_per_flow[index].clear();
4160
4161    for (int i=0; i < get_cores_tx(); i++) {
4162        lpt = m_fl.m_threads_info[i];
4163        port0 = lpt->getDualPortId() * 2;
4164        if ((port == port0) || (port == port0 + 1)) {
4165            m_stats.m_port[port].m_tx_per_flow[index] +=
4166                lpt->m_node_gen.m_v_if->m_stats[port - port0].m_tx_per_flow[index];
4167            if (is_lat)
4168                lpt->m_node_gen.m_v_if->m_stats[port - port0].m_lat_data[index - MAX_FLOW_STATS].reset();
4169        }
4170    }
4171
4172    ret = m_stats.m_port[port].m_tx_per_flow[index] - m_stats.m_port[port].m_prev_tx_per_flow[index];
4173
4174    // Since we return diff from prev, following "clears" the stats.
4175    m_stats.m_port[port].m_prev_tx_per_flow[index] = m_stats.m_port[port].m_tx_per_flow[index];
4176
4177    return ret;
4178}
4179
4180void CGlobalTRex::get_stats(CGlobalStats & stats){
4181
4182    int i;
4183    float total_tx=0.0;
4184    float total_rx=0.0;
4185    float total_tx_pps=0.0;
4186    float total_rx_pps=0.0;
4187
4188    stats.m_total_tx_pkts  = 0;
4189    stats.m_total_rx_pkts  = 0;
4190    stats.m_total_tx_bytes = 0;
4191    stats.m_total_rx_bytes = 0;
4192    stats.m_total_alloc_error=0;
4193    stats.m_total_queue_full=0;
4194    stats.m_total_queue_drop=0;
4195
4196
4197    stats.m_num_of_ports = m_max_ports;
4198    stats.m_cpu_util = m_fl.GetCpuUtil();
4199    stats.m_cpu_util_raw = m_fl.GetCpuUtilRaw();
4200    if (get_is_stateless()) {
4201        stats.m_rx_cpu_util = m_rx_sl.get_cpu_util();
4202    }
4203    stats.m_threads      = m_fl.m_threads_info.size();
4204
4205    for (i=0; i<m_max_ports; i++) {
4206        CPhyEthIF * _if=&m_ports[i];
4207        CPerPortStats * stp=&stats.m_port[i];
4208
4209        CPhyEthIFStats & st =_if->get_stats();
4210
4211        stp->opackets = st.opackets;
4212        stp->obytes   = st.obytes;
4213        stp->ipackets = st.ipackets;
4214        stp->ibytes   = st.ibytes;
4215        stp->ierrors  = st.ierrors;
4216        stp->oerrors  = st.oerrors;
4217        stp->m_total_tx_bps = _if->get_last_tx_rate()*_1Mb_DOUBLE;
4218        stp->m_total_tx_pps = _if->get_last_tx_pps_rate();
4219        stp->m_total_rx_bps = _if->get_last_rx_rate()*_1Mb_DOUBLE;
4220        stp->m_total_rx_pps = _if->get_last_rx_pps_rate();
4221        stp->m_link_up        = _if->get_port_attr()->is_link_up();
4222        stp->m_link_was_down |= ! _if->get_port_attr()->is_link_up();
4223
4224        stats.m_total_tx_pkts  += st.opackets;
4225        stats.m_total_rx_pkts  += st.ipackets;
4226        stats.m_total_tx_bytes += st.obytes;
4227        stats.m_total_rx_bytes += st.ibytes;
4228
4229        total_tx +=_if->get_last_tx_rate();
4230        total_rx +=_if->get_last_rx_rate();
4231        total_tx_pps +=_if->get_last_tx_pps_rate();
4232        total_rx_pps +=_if->get_last_rx_pps_rate();
4233        // IP ID rules
4234        for (uint16_t flow = 0; flow <= max_stat_hw_id_seen; flow++) {
4235            stats.m_port[i].m_tx_per_flow[flow].clear();
4236        }
4237        // payload rules
4238        for (uint16_t flow = MAX_FLOW_STATS; flow <= MAX_FLOW_STATS + max_stat_hw_id_seen_payload; flow++) {
4239            stats.m_port[i].m_tx_per_flow[flow].clear();
4240        }
4241
4242        stp->m_cpu_util = get_cpu_util_per_interface(i);
4243
4244    }
4245
4246    uint64_t total_open_flows=0;
4247    uint64_t total_active_flows=0;
4248
4249    uint64_t total_clients=0;
4250    uint64_t total_servers=0;
4251    uint64_t active_sockets=0;
4252    uint64_t total_sockets=0;
4253
4254
4255    uint64_t total_nat_time_out =0;
4256    uint64_t total_nat_time_out_wait_ack =0;
4257    uint64_t total_nat_no_fid   =0;
4258    uint64_t total_nat_active   =0;
4259    uint64_t total_nat_syn_wait = 0;
4260    uint64_t total_nat_open     =0;
4261    uint64_t total_nat_learn_error=0;
4262
4263    CFlowGenListPerThread   * lpt;
4264    stats.m_template.Clear();
4265    for (i=0; i<get_cores_tx(); i++) {
4266        lpt = m_fl.m_threads_info[i];
4267        total_open_flows +=   lpt->m_stats.m_total_open_flows ;
4268        total_active_flows += (lpt->m_stats.m_total_open_flows-lpt->m_stats.m_total_close_flows) ;
4269
4270        stats.m_total_alloc_error += lpt->m_node_gen.m_v_if->m_stats[0].m_tx_alloc_error+
4271            lpt->m_node_gen.m_v_if->m_stats[1].m_tx_alloc_error;
4272        stats.m_total_queue_full +=lpt->m_node_gen.m_v_if->m_stats[0].m_tx_queue_full+
4273            lpt->m_node_gen.m_v_if->m_stats[1].m_tx_queue_full;
4274
4275        stats.m_total_queue_drop +=lpt->m_node_gen.m_v_if->m_stats[0].m_tx_drop+
4276            lpt->m_node_gen.m_v_if->m_stats[1].m_tx_drop;
4277
4278        stats.m_template.Add(&lpt->m_node_gen.m_v_if->m_stats[0].m_template);
4279        stats.m_template.Add(&lpt->m_node_gen.m_v_if->m_stats[1].m_template);
4280
4281
4282        total_clients   += lpt->m_smart_gen.getTotalClients();
4283        total_servers   += lpt->m_smart_gen.getTotalServers();
4284        active_sockets  += lpt->m_smart_gen.ActiveSockets();
4285        total_sockets   += lpt->m_smart_gen.MaxSockets();
4286
4287        total_nat_time_out +=lpt->m_stats.m_nat_flow_timeout;
4288        total_nat_time_out_wait_ack += lpt->m_stats.m_nat_flow_timeout_wait_ack;
4289        total_nat_no_fid   +=lpt->m_stats.m_nat_lookup_no_flow_id ;
4290        total_nat_active   +=lpt->m_stats.m_nat_lookup_add_flow_id - lpt->m_stats.m_nat_lookup_remove_flow_id;
4291        total_nat_syn_wait += lpt->m_stats.m_nat_lookup_add_flow_id - lpt->m_stats.m_nat_lookup_wait_ack_state;
4292        total_nat_open     +=lpt->m_stats.m_nat_lookup_add_flow_id;
4293        total_nat_learn_error   +=lpt->m_stats.m_nat_flow_learn_error;
4294        uint8_t port0 = lpt->getDualPortId() *2;
4295        // IP ID rules
4296        for (uint16_t flow = 0; flow <= max_stat_hw_id_seen; flow++) {
4297            stats.m_port[port0].m_tx_per_flow[flow] +=
4298                lpt->m_node_gen.m_v_if->m_stats[0].m_tx_per_flow[flow];
4299            stats.m_port[port0 + 1].m_tx_per_flow[flow] +=
4300                lpt->m_node_gen.m_v_if->m_stats[1].m_tx_per_flow[flow];
4301        }
4302        // payload rules
4303        for (uint16_t flow = MAX_FLOW_STATS; flow <= MAX_FLOW_STATS + max_stat_hw_id_seen_payload; flow++) {
4304            stats.m_port[port0].m_tx_per_flow[flow] +=
4305                lpt->m_node_gen.m_v_if->m_stats[0].m_tx_per_flow[flow];
4306            stats.m_port[port0 + 1].m_tx_per_flow[flow] +=
4307                lpt->m_node_gen.m_v_if->m_stats[1].m_tx_per_flow[flow];
4308        }
4309
4310    }
4311
4312    stats.m_total_nat_time_out = total_nat_time_out;
4313    stats.m_total_nat_time_out_wait_ack = total_nat_time_out_wait_ack;
4314    stats.m_total_nat_no_fid   = total_nat_no_fid;
4315    stats.m_total_nat_active   = total_nat_active;
4316    stats.m_total_nat_syn_wait = total_nat_syn_wait;
4317    stats.m_total_nat_open     = total_nat_open;
4318    stats.m_total_nat_learn_error     = total_nat_learn_error;
4319
4320    stats.m_total_clients = total_clients;
4321    stats.m_total_servers = total_servers;
4322    stats.m_active_sockets = active_sockets;
4323
4324    if (total_sockets != 0) {
4325        stats.m_socket_util =100.0*(double)active_sockets/(double)total_sockets;
4326    } else {
4327        stats.m_socket_util = 0;
4328    }
4329
4330
4331
4332    float drop_rate=total_tx-total_rx;
4333    if ( (drop_rate<0.0)  || (drop_rate < 0.1*total_tx ) )  {
4334        drop_rate=0.0;
4335    }
4336    float pf =CGlobalInfo::m_options.m_platform_factor;
4337    stats.m_platform_factor = pf;
4338
4339    stats.m_active_flows = total_active_flows*pf;
4340    stats.m_open_flows   = total_open_flows*pf;
4341    stats.m_rx_drop_bps   = drop_rate*pf *_1Mb_DOUBLE;
4342
4343    stats.m_tx_bps        = total_tx*pf*_1Mb_DOUBLE;
4344    stats.m_rx_bps        = total_rx*pf*_1Mb_DOUBLE;
4345    stats.m_tx_pps        = total_tx_pps*pf;
4346    stats.m_rx_pps        = total_rx_pps*pf;
4347    stats.m_tx_cps        = m_last_total_cps*pf;
4348    if(stats.m_cpu_util < 0.0001)
4349        stats.m_bw_per_core = 0;
4350    else
4351        stats.m_bw_per_core   = 2*(stats.m_tx_bps/1e9)*100.0/(stats.m_cpu_util*stats.m_threads);
4352
4353    stats.m_tx_expected_cps        = m_expected_cps*pf;
4354    stats.m_tx_expected_pps        = m_expected_pps*pf;
4355    stats.m_tx_expected_bps        = m_expected_bps*pf;
4356}
4357
4358float
4359CGlobalTRex::get_cpu_util_per_interface(uint8_t port_id) {
4360    CPhyEthIF * _if = &m_ports[port_id];
4361
4362    float    tmp = 0;
4363    uint8_t  cnt = 0;
4364    for (const auto &p : _if->get_core_list()) {
4365        uint8_t core_id = p.first;
4366        CFlowGenListPerThread *lp = m_fl.m_threads_info[core_id];
4367        if (lp->is_port_active(port_id)) {
4368            tmp += lp->m_cpu_cp_u.GetVal();
4369            cnt++;
4370        }
4371    }
4372
4373    return ( (cnt > 0) ? (tmp / cnt) : 0);
4374
4375}
4376
4377bool CGlobalTRex::sanity_check(){
4378
4379    CFlowGenListPerThread   * lpt;
4380    uint32_t errors=0;
4381    int i;
4382    for (i=0; i<get_cores_tx(); i++) {
4383        lpt = m_fl.m_threads_info[i];
4384        errors   += lpt->m_smart_gen.getErrorAllocationCounter();
4385    }
4386
4387    if ( errors ) {
4388        printf(" ERRORs sockets allocation errors! \n");
4389        printf(" you should allocate more clients in the pool \n");
4390        return(true);
4391    }
4392    return ( false);
4393}
4394
4395
4396/* dump the template info */
4397void CGlobalTRex::dump_template_info(std::string & json){
4398    CFlowGenListPerThread   * lpt = m_fl.m_threads_info[0];
4399    CFlowsYamlInfo * yaml_info=&lpt->m_yaml_info;
4400
4401    json="{\"name\":\"template_info\",\"type\":0,\"data\":[";
4402    int i;
4403    for (i=0; i<yaml_info->m_vec.size()-1; i++) {
4404        CFlowYamlInfo * r=&yaml_info->m_vec[i] ;
4405        json+="\""+ r->m_name+"\"";
4406        json+=",";
4407    }
4408    json+="\""+yaml_info->m_vec[i].m_name+"\"";
4409    json+="]}" ;
4410}
4411
4412void CGlobalTRex::dump_stats(FILE *fd, CGlobalStats::DumpFormat format){
4413
4414    update_stats();
4415    get_stats(m_stats);
4416
4417    if (format==CGlobalStats::dmpTABLE) {
4418        if ( m_io_modes.m_g_mode == CTrexGlobalIoMode::gNORMAL ){
4419            switch (m_io_modes.m_pp_mode ){
4420            case CTrexGlobalIoMode::ppDISABLE:
4421                fprintf(fd,"\n+Per port stats disabled \n");
4422                break;
4423            case CTrexGlobalIoMode::ppTABLE:
4424                fprintf(fd,"\n-Per port stats table \n");
4425                m_stats.Dump(fd,CGlobalStats::dmpTABLE);
4426                break;
4427            case CTrexGlobalIoMode::ppSTANDARD:
4428                fprintf(fd,"\n-Per port stats - standard\n");
4429                m_stats.Dump(fd,CGlobalStats::dmpSTANDARD);
4430                break;
4431            };
4432
4433            switch (m_io_modes.m_ap_mode ){
4434            case   CTrexGlobalIoMode::apDISABLE:
4435                fprintf(fd,"\n+Global stats disabled \n");
4436                break;
4437            case   CTrexGlobalIoMode::apENABLE:
4438                fprintf(fd,"\n-Global stats enabled \n");
4439                m_stats.DumpAllPorts(fd);
4440                break;
4441            };
4442        }
4443    }else{
4444        /* at exit , always need to dump it in standartd mode for scripts*/
4445        m_stats.Dump(fd,format);
4446        m_stats.DumpAllPorts(fd);
4447    }
4448
4449}
4450
4451void
4452CGlobalTRex::publish_async_data(bool sync_now, bool baseline) {
4453    std::string json;
4454
4455    /* refactor to update, dump, and etc. */
4456    if (sync_now) {
4457        update_stats();
4458        get_stats(m_stats);
4459    }
4460
4461    m_stats.dump_json(json, baseline);
4462    m_zmq_publisher.publish_json(json);
4463
4464    /* generator json , all cores are the same just sample the first one */
4465    m_fl.m_threads_info[0]->m_node_gen.dump_json(json);
4466    m_zmq_publisher.publish_json(json);
4467
4468
4469    if ( !get_is_stateless() ){
4470        dump_template_info(json);
4471        m_zmq_publisher.publish_json(json);
4472    }
4473
4474    if ( get_is_rx_check_mode() ) {
4475        m_mg.rx_check_dump_json(json );
4476        m_zmq_publisher.publish_json(json);
4477    }
4478
4479    /* backward compatible */
4480    m_mg.dump_json(json );
4481    m_zmq_publisher.publish_json(json);
4482
4483    /* more info */
4484    m_mg.dump_json_v2(json );
4485    m_zmq_publisher.publish_json(json);
4486
4487    if (get_is_stateless()) {
4488        std::string stat_json;
4489        std::string latency_json;
4490        if (m_trex_stateless->m_rx_flow_stat.dump_json(stat_json, latency_json, baseline)) {
4491            m_zmq_publisher.publish_json(stat_json);
4492            m_zmq_publisher.publish_json(latency_json);
4493        }
4494    }
4495}
4496
4497void
4498CGlobalTRex::publish_async_barrier(uint32_t key) {
4499    m_zmq_publisher.publish_barrier(key);
4500}
4501
4502void
4503CGlobalTRex:: publish_async_port_attr_changed(uint8_t port_id) {
4504    Json::Value data;
4505    data["port_id"] = port_id;
4506    TRexPortAttr * _attr = m_ports[port_id].get_port_attr();
4507
4508    _attr->to_json(data["attr"]);
4509
4510    m_zmq_publisher.publish_event(TrexPublisher::EVENT_PORT_ATTR_CHANGED, data);
4511}
4512
4513void
4514CGlobalTRex::handle_slow_path() {
4515    m_stats_cnt+=1;
4516
4517    // update speed, link up/down etc.
4518    for (int i=0; i<m_max_ports; i++) {
4519        bool changed = m_ports[i].get_port_attr()->update_link_status_nowait();
4520        if (changed) {
4521            publish_async_port_attr_changed(i);
4522        }
4523    }
4524
4525    if ( CGlobalInfo::m_options.preview.get_no_keyboard() ==false ) {
4526        if ( m_io_modes.handle_io_modes() ) {
4527            mark_for_shutdown(SHUTDOWN_CTRL_C);
4528            return;
4529        }
4530    }
4531
4532    if ( sanity_check() ) {
4533        mark_for_shutdown(SHUTDOWN_TEST_ENDED);
4534        return;
4535    }
4536
4537    if (m_io_modes.m_g_mode != CTrexGlobalIoMode::gDISABLE ) {
4538        fprintf(stdout,"\033[2J");
4539        fprintf(stdout,"\033[2H");
4540
4541    } else {
4542        if ( m_io_modes.m_g_disable_first  ) {
4543            m_io_modes.m_g_disable_first=false;
4544            fprintf(stdout,"\033[2J");
4545            fprintf(stdout,"\033[2H");
4546            printf("clean !!!\n");
4547            fflush(stdout);
4548        }
4549    }
4550
4551
4552    if (m_io_modes.m_g_mode == CTrexGlobalIoMode::gHELP ) {
4553        m_io_modes.DumpHelp(stdout);
4554    }
4555
4556    dump_stats(stdout,CGlobalStats::dmpTABLE);
4557
4558    if (m_io_modes.m_g_mode == CTrexGlobalIoMode::gNORMAL ) {
4559        fprintf (stdout," current time    : %.1f sec  \n",now_sec());
4560        float d= CGlobalInfo::m_options.m_duration - now_sec();
4561        if (d<0) {
4562            d=0;
4563
4564        }
4565        fprintf (stdout," test duration   : %.1f sec  \n",d);
4566    }
4567
4568    if (m_io_modes.m_g_mode == CTrexGlobalIoMode::gMem) {
4569
4570        if ( m_stats_cnt%4==0) {
4571            fprintf (stdout," %s \n",CGlobalInfo::dump_pool_as_json_str().c_str());
4572        }
4573    }
4574
4575
4576    if ( CGlobalInfo::m_options.is_rx_enabled() && (! get_is_stateless())) {
4577        m_mg.update();
4578
4579        if ( m_io_modes.m_g_mode ==  CTrexGlobalIoMode::gNORMAL ) {
4580            if (CGlobalInfo::m_options.m_latency_rate != 0) {
4581                switch (m_io_modes.m_l_mode) {
4582                case CTrexGlobalIoMode::lDISABLE:
4583                    fprintf(stdout, "\n+Latency stats disabled \n");
4584                    break;
4585                case CTrexGlobalIoMode::lENABLE:
4586                    fprintf(stdout, "\n-Latency stats enabled \n");
4587                    m_mg.DumpShort(stdout);
4588                    break;
4589                case CTrexGlobalIoMode::lENABLE_Extended:
4590                    fprintf(stdout, "\n-Latency stats extended \n");
4591                    m_mg.Dump(stdout);
4592                    break;
4593                }
4594            }
4595
4596            if ( get_is_rx_check_mode() ) {
4597
4598                switch (m_io_modes.m_rc_mode) {
4599                case CTrexGlobalIoMode::rcDISABLE:
4600                    fprintf(stdout,"\n+Rx Check stats disabled \n");
4601                    break;
4602                case CTrexGlobalIoMode::rcENABLE:
4603                    fprintf(stdout,"\n-Rx Check stats enabled \n");
4604                    m_mg.DumpShortRxCheck(stdout);
4605                    break;
4606                case CTrexGlobalIoMode::rcENABLE_Extended:
4607                    fprintf(stdout,"\n-Rx Check stats enhanced \n");
4608                    m_mg.DumpRxCheck(stdout);
4609                    break;
4610                }
4611            }
4612        }
4613    }
4614    if ( m_io_modes.m_g_mode ==  CTrexGlobalIoMode::gNAT ) {
4615        if ( m_io_modes.m_nat_mode == CTrexGlobalIoMode::natENABLE ) {
4616            if (CGlobalInfo::is_learn_mode(CParserOption::LEARN_MODE_TCP_ACK)) {
4617                fprintf(stdout, "NAT flow table info\n");
4618                m_mg.dump_nat_flow_table(stdout);
4619            } else {
4620                fprintf(stdout, "\nThis is only relevant in --learn-mode %d\n", CParserOption::LEARN_MODE_TCP_ACK);
4621            }
4622        }
4623    }
4624
4625    /* publish data */
4626    publish_async_data(false);
4627}
4628
4629
4630void
4631CGlobalTRex::handle_fast_path() {
4632    /* check from messages from DP */
4633    check_for_dp_messages();
4634
4635    /* measure CPU utilization by sampling (we sample 1000 to get an accurate sampling) */
4636    for (int i = 0; i < 1000; i++) {
4637        m_fl.UpdateFast();
4638
4639        if (get_is_stateless()) {
4640            m_rx_sl.update_cpu_util();
4641        }else{
4642            m_mg.update_fast();
4643        }
4644
4645        rte_pause();
4646    }
4647
4648
4649    if ( is_all_cores_finished() ) {
4650        mark_for_shutdown(SHUTDOWN_TEST_ENDED);
4651    }
4652}
4653
4654
4655/**
4656 * shutdown sequence
4657 *
4658 */
4659void CGlobalTRex::shutdown() {
4660    std::stringstream ss;
4661    ss << " *** TRex is shutting down - cause: '";
4662
4663    switch (m_mark_for_shutdown) {
4664
4665    case SHUTDOWN_TEST_ENDED:
4666        ss << "test has ended'";
4667        break;
4668
4669    case SHUTDOWN_CTRL_C:
4670        ss << "CTRL + C detected'";
4671        break;
4672
4673    case SHUTDOWN_SIGINT:
4674        ss << "received signal SIGINT'";
4675        break;
4676
4677    case SHUTDOWN_SIGTERM:
4678        ss << "received signal SIGTERM'";
4679        break;
4680
4681    case SHUTDOWN_RPC_REQ:
4682        ss << "server received RPC 'shutdown' request'";
4683        break;
4684
4685    default:
4686        assert(0);
4687    }
4688
4689    /* report */
4690    std::cout << ss.str() << "\n";
4691
4692    /* first stop the WD */
4693    TrexWatchDog::getInstance().stop();
4694
4695    /* stateless shutdown */
4696    if (get_is_stateless()) {
4697        m_trex_stateless->shutdown();
4698    }
4699
4700    if (!is_all_cores_finished()) {
4701        try_stop_all_cores();
4702    }
4703
4704    m_mg.stop();
4705
4706    delay(1000);
4707
4708    /* shutdown drivers */
4709    for (int i = 0; i < m_max_ports; i++) {
4710        m_ports[i].stop();
4711    }
4712
4713    if (m_mark_for_shutdown != SHUTDOWN_TEST_ENDED) {
4714        /* we should stop latency and exit to stop agents */
4715        Delete();
4716        utl_termio_reset();
4717        exit(-1);
4718    }
4719}
4720
4721
4722int CGlobalTRex::run_in_master() {
4723
4724    //rte_thread_setname(pthread_self(), "TRex Control");
4725
4726    if ( get_is_stateless() ) {
4727        m_trex_stateless->launch_control_plane();
4728    }
4729
4730    /* exception and scope safe */
4731    std::unique_lock<std::mutex> cp_lock(m_cp_lock);
4732
4733    uint32_t slow_path_counter = 0;
4734
4735    const int FASTPATH_DELAY_MS = 10;
4736    const int SLOWPATH_DELAY_MS = 500;
4737
4738    m_monitor.create("master", 2);
4739    TrexWatchDog::getInstance().register_monitor(&m_monitor);
4740
4741    TrexWatchDog::getInstance().start();
4742
4743    while (!is_marked_for_shutdown()) {
4744
4745        /* fast path */
4746        handle_fast_path();
4747
4748        /* slow path */
4749        if (slow_path_counter >= SLOWPATH_DELAY_MS) {
4750            handle_slow_path();
4751            slow_path_counter = 0;
4752        }
4753
4754
4755        cp_lock.unlock();
4756        delay(FASTPATH_DELAY_MS);
4757        slow_path_counter += FASTPATH_DELAY_MS;
4758        cp_lock.lock();
4759
4760        m_monitor.tickle();
4761    }
4762
4763    /* on exit release the lock */
4764    cp_lock.unlock();
4765
4766    /* shutdown everything gracefully */
4767    shutdown();
4768
4769    return (0);
4770}
4771
4772
4773
4774int CGlobalTRex::run_in_rx_core(void){
4775
4776    rte_thread_setname(pthread_self(), "TRex RX");
4777
4778    if (get_is_stateless()) {
4779        m_sl_rx_running = true;
4780        m_rx_sl.start();
4781        m_sl_rx_running = false;
4782    } else {
4783        if ( CGlobalInfo::m_options.is_rx_enabled() ){
4784            m_sl_rx_running = false;
4785            m_mg.start(0, true);
4786        }
4787    }
4788
4789    return (0);
4790}
4791
4792int CGlobalTRex::run_in_core(virtual_thread_id_t virt_core_id){
4793    std::stringstream ss;
4794
4795    ss << "Trex DP core " << int(virt_core_id);
4796    rte_thread_setname(pthread_self(), ss.str().c_str());
4797
4798    CPreviewMode *lp=&CGlobalInfo::m_options.preview;
4799    if ( lp->getSingleCore() &&
4800         (virt_core_id==2 ) &&
4801         (lp-> getCores() ==1) ){
4802        printf(" bypass this core \n");
4803        m_signal[virt_core_id]=1;
4804        return (0);
4805    }
4806
4807
4808    assert(m_fl_was_init);
4809    CFlowGenListPerThread   * lpt;
4810
4811    lpt = m_fl.m_threads_info[virt_core_id-1];
4812
4813    /* register a watchdog handle on current core */
4814    lpt->m_monitor.create(ss.str(), 1);
4815    TrexWatchDog::getInstance().register_monitor(&lpt->m_monitor);
4816
4817    if (get_is_stateless()) {
4818        lpt->start_stateless_daemon(*lp);
4819    }else{
4820        lpt->start_generate_stateful(CGlobalInfo::m_options.out_file,*lp);
4821    }
4822
4823    /* done - remove this from the watchdog (we might wait on join for a long time) */
4824    lpt->m_monitor.disable();
4825
4826    m_signal[virt_core_id]=1;
4827    return (0);
4828}
4829
4830
4831int CGlobalTRex::stop_master(){
4832
4833    delay(1000);
4834    fprintf(stdout," ==================\n");
4835    fprintf(stdout," interface sum \n");
4836    fprintf(stdout," ==================\n");
4837    dump_stats(stdout,CGlobalStats::dmpSTANDARD);
4838    fprintf(stdout," ==================\n");
4839    fprintf(stdout," \n\n");
4840
4841    fprintf(stdout," ==================\n");
4842    fprintf(stdout," interface sum \n");
4843    fprintf(stdout," ==================\n");
4844
4845    CFlowGenListPerThread   * lpt;
4846    uint64_t total_tx_rx_check=0;
4847
4848    int i;
4849    for (i=0; i<get_cores_tx(); i++) {
4850        lpt = m_fl.m_threads_info[i];
4851        CCoreEthIF * erf_vif = m_cores_vif[i+1];
4852
4853        erf_vif->DumpCoreStats(stdout);
4854        erf_vif->DumpIfStats(stdout);
4855        total_tx_rx_check+=erf_vif->m_stats[CLIENT_SIDE].m_tx_rx_check_pkt+
4856            erf_vif->m_stats[SERVER_SIDE].m_tx_rx_check_pkt;
4857    }
4858
4859    fprintf(stdout," ==================\n");
4860    fprintf(stdout," generators \n");
4861    fprintf(stdout," ==================\n");
4862    for (i=0; i<get_cores_tx(); i++) {
4863        lpt = m_fl.m_threads_info[i];
4864        lpt->m_node_gen.DumpHist(stdout);
4865        lpt->DumpStats(stdout);
4866    }
4867    if ( CGlobalInfo::m_options.is_latency_enabled() ){
4868        fprintf(stdout," ==================\n");
4869        fprintf(stdout," latency \n");
4870        fprintf(stdout," ==================\n");
4871        m_mg.DumpShort(stdout);
4872        m_mg.Dump(stdout);
4873        m_mg.DumpShortRxCheck(stdout);
4874        m_mg.DumpRxCheck(stdout);
4875        m_mg.DumpRxCheckVerification(stdout,total_tx_rx_check);
4876    }
4877
4878    dump_stats(stdout,CGlobalStats::dmpSTANDARD);
4879    dump_post_test_stats(stdout);
4880
4881    return (0);
4882}
4883
4884bool CGlobalTRex::is_all_cores_finished() {
4885    int i;
4886    for (i=0; i<get_cores_tx(); i++) {
4887        if ( m_signal[i+1]==0){
4888            return false;
4889        }
4890    }
4891    if (m_sl_rx_running)
4892        return false;
4893
4894    return true;
4895}
4896
4897
4898int CGlobalTRex::start_master_stateless(){
4899    int i;
4900    for (i=0; i<BP_MAX_CORES; i++) {
4901        m_signal[i]=0;
4902    }
4903    m_fl.Create();
4904    m_expected_pps = 0;
4905    m_expected_cps = 0;
4906    m_expected_bps = 0;
4907
4908    m_fl.generate_p_thread_info(get_cores_tx());
4909    CFlowGenListPerThread   * lpt;
4910
4911    for (i=0; i<get_cores_tx(); i++) {
4912        lpt = m_fl.m_threads_info[i];
4913        CVirtualIF * erf_vif = m_cores_vif[i+1];
4914        lpt->set_vif(erf_vif);
4915        lpt->m_node_gen.m_socket_id =m_cores_vif[i+1]->get_socket_id();
4916    }
4917    m_fl_was_init=true;
4918
4919    return (0);
4920}
4921
4922int CGlobalTRex::start_master_statefull() {
4923    int i;
4924    for (i=0; i<BP_MAX_CORES; i++) {
4925        m_signal[i]=0;
4926    }
4927
4928    m_fl.Create();
4929    m_fl.load_from_yaml(CGlobalInfo::m_options.cfg_file,get_cores_tx());
4930
4931    /* client config */
4932    if (CGlobalInfo::m_options.client_cfg_file != "") {
4933        try {
4934            m_fl.load_client_config_file(CGlobalInfo::m_options.client_cfg_file);
4935        } catch (const std::runtime_error &e) {
4936            std::cout << "\n*** " << e.what() << "\n\n";
4937            exit(-1);
4938        }
4939        CGlobalInfo::m_options.preview.set_client_cfg_enable(true);
4940        m_fl.set_client_config_tuple_gen_info(&m_fl.m_yaml_info.m_tuple_gen);
4941        pre_test();
4942    }
4943
4944    /* verify options */
4945    try {
4946        CGlobalInfo::m_options.verify();
4947    } catch (const std::runtime_error &e) {
4948        std::cout << "\n*** " << e.what() << "\n\n";
4949        exit(-1);
4950    }
4951
4952    m_expected_pps = m_fl.get_total_pps();
4953    m_expected_cps = 1000.0*m_fl.get_total_kcps();
4954    m_expected_bps = m_fl.get_total_tx_bps();
4955    if ( m_fl.get_total_repeat_flows() > 2000) {
4956        /* disable flows cache */
4957        CGlobalInfo::m_options.preview.setDisableMbufCache(true);
4958    }
4959
4960    CTupleGenYamlInfo * tg=&m_fl.m_yaml_info.m_tuple_gen;
4961
4962    m_mg.set_ip( tg->m_client_pool[0].get_ip_start(),
4963                 tg->m_server_pool[0].get_ip_start(),
4964                 tg->m_client_pool[0].getDualMask()
4965                 );
4966
4967    if (  CGlobalInfo::m_options.preview.getVMode() >0 ) {
4968        m_fl.DumpCsv(stdout);
4969        for (i=0; i<100; i++) {
4970            fprintf(stdout,"\n");
4971        }
4972        fflush(stdout);
4973    }
4974
4975    m_fl.generate_p_thread_info(get_cores_tx());
4976    CFlowGenListPerThread   * lpt;
4977
4978    for (i=0; i<get_cores_tx(); i++) {
4979        lpt = m_fl.m_threads_info[i];
4980        //CNullIF * erf_vif = new CNullIF();
4981        CVirtualIF * erf_vif = m_cores_vif[i+1];
4982        lpt->set_vif(erf_vif);
4983        /* socket id */
4984        lpt->m_node_gen.m_socket_id =m_cores_vif[i+1]->get_socket_id();
4985
4986    }
4987    m_fl_was_init=true;
4988
4989    return (0);
4990}
4991
4992
4993////////////////////////////////////////////
4994static CGlobalTRex g_trex;
4995
4996
4997void CPhyEthIF::configure_rss_redirect_table(uint16_t numer_of_queues,
4998                                             uint16_t skip_queue){
4999
5000
5001     struct rte_eth_dev_info dev_info;
5002
5003     rte_eth_dev_info_get(m_port_id,&dev_info);
5004     assert(dev_info.reta_size>0);
5005
5006     int reta_conf_size =
5007          std::max(1, dev_info.reta_size / RTE_RETA_GROUP_SIZE);
5008
5009     struct rte_eth_rss_reta_entry64 reta_conf[reta_conf_size];
5010
5011     rte_eth_dev_rss_reta_query(m_port_id,&reta_conf[0],dev_info.reta_size);
5012
5013     int i,j;
5014
5015     for (j=0; j<reta_conf_size; j++) {
5016         uint16_t skip=0;
5017         reta_conf[j].mask = ~0ULL;
5018         for (i=0; i<RTE_RETA_GROUP_SIZE; i++) {
5019             uint16_t q;
5020             while (true) {
5021                 q=(i+skip)%numer_of_queues;
5022                 if (q!=skip_queue) {
5023                     break;
5024                 }
5025                 skip+=1;
5026             }
5027             reta_conf[j].reta[i]=q;
5028           //  printf(" %d %d %d \n",j,i,q);
5029         }
5030     }
5031     rte_eth_dev_rss_reta_update(m_port_id,&reta_conf[0],dev_info.reta_size);
5032
5033     rte_eth_dev_rss_reta_query(m_port_id,&reta_conf[0],dev_info.reta_size);
5034
5035     #if 0
5036     /* verification */
5037     for (j=0; j<reta_conf_size; j++) {
5038         for (i=0; i<RTE_RETA_GROUP_SIZE; i++) {
5039             printf(" R  %d %d %d \n",j,i,reta_conf[j].reta[i]);
5040         }
5041     }
5042     #endif
5043
5044}
5045
5046
5047void CPhyEthIF::update_counters() {
5048    get_ex_drv()->get_extended_stats(this, &m_stats);
5049    CRXCoreIgnoreStat ign_stats;
5050
5051    if (get_is_stateless()) {
5052        g_trex.m_rx_sl.get_ignore_stats(m_port_id, ign_stats, true);
5053    } else {
5054        g_trex.m_mg.get_ignore_stats(m_port_id, ign_stats, true);
5055    }
5056
5057    m_stats.obytes -= ign_stats.get_tx_bytes();
5058    m_stats.opackets -= ign_stats.get_tx_pkts();
5059    m_ignore_stats.opackets += ign_stats.get_tx_pkts();
5060    m_ignore_stats.obytes += ign_stats.get_tx_bytes();
5061    m_ignore_stats.m_tx_arp += ign_stats.get_tx_arp();
5062
5063    m_last_tx_rate      =  m_bw_tx.add(m_stats.obytes);
5064    m_last_rx_rate      =  m_bw_rx.add(m_stats.ibytes);
5065    m_last_tx_pps       =  m_pps_tx.add(m_stats.opackets);
5066    m_last_rx_pps       =  m_pps_rx.add(m_stats.ipackets);
5067}
5068
5069bool CPhyEthIF::Create(uint8_t portid) {
5070    m_port_id      = portid;
5071    m_last_rx_rate = 0.0;
5072    m_last_tx_rate = 0.0;
5073    m_last_tx_pps  = 0.0;
5074    m_port_attr    = g_trex.m_drv->create_port_attr(portid);
5075
5076    /* set src MAC addr */
5077    uint8_t empty_mac[ETHER_ADDR_LEN] = {0,0,0,0,0,0};
5078    if (! memcmp( CGlobalInfo::m_options.m_mac_addr[m_port_id].u.m_mac.src, empty_mac, ETHER_ADDR_LEN)) {
5079        rte_eth_macaddr_get(m_port_id,
5080                            (struct ether_addr *)&CGlobalInfo::m_options.m_mac_addr[m_port_id].u.m_mac.src);
5081    }
5082
5083    return true;
5084}
5085
5086const std::vector<std::pair<uint8_t, uint8_t>> &
5087CPhyEthIF::get_core_list() {
5088
5089    /* lazy find */
5090    if (m_core_id_list.size() == 0) {
5091
5092        for (uint8_t core_id = 0; core_id < g_trex.get_cores_tx(); core_id++) {
5093
5094            /* iterate over all the directions*/
5095            for (uint8_t dir = 0 ; dir < CS_NUM; dir++) {
5096                if (g_trex.m_cores_vif[core_id + 1]->get_ports()[dir].m_port->get_port_id() == m_port_id) {
5097                    m_core_id_list.push_back(std::make_pair(core_id, dir));
5098                }
5099            }
5100        }
5101    }
5102
5103    return m_core_id_list;
5104
5105}
5106
5107int CPhyEthIF::reset_hw_flow_stats() {
5108    if (get_ex_drv()->hw_rx_stat_supported()) {
5109        get_ex_drv()->reset_rx_stats(this, m_stats.m_fdir_prev_pkts, 0, MAX_FLOW_STATS);
5110    } else {
5111        g_trex.m_rx_sl.reset_rx_stats(get_port_id());
5112    }
5113    return 0;
5114}
5115
5116// get/reset flow director counters
5117// return 0 if OK. -1 if operation not supported.
5118// rx_stats, tx_stats - arrays of len max - min + 1. Returning rx, tx updated absolute values.
5119// min, max - minimum, maximum counters range to get
5120// reset - If true, need to reset counter value after reading
5121int CPhyEthIF::get_flow_stats(rx_per_flow_t *rx_stats, tx_per_flow_t *tx_stats, int min, int max, bool reset) {
5122    uint32_t diff_pkts[MAX_FLOW_STATS];
5123    uint32_t diff_bytes[MAX_FLOW_STATS];
5124    bool hw_rx_stat_supported = get_ex_drv()->hw_rx_stat_supported();
5125
5126    if (hw_rx_stat_supported) {
5127        if (get_ex_drv()->get_rx_stats(this, diff_pkts, m_stats.m_fdir_prev_pkts
5128                                       , diff_bytes, m_stats.m_fdir_prev_bytes, min, max) < 0) {
5129            return -1;
5130        }
5131    } else {
5132        g_trex.m_rx_sl.get_rx_stats(get_port_id(), rx_stats, min, max, reset, TrexPlatformApi::IF_STAT_IPV4_ID);
5133    }
5134
5135    for (int i = min; i <= max; i++) {
5136        if ( reset ) {
5137            // return value so far, and reset
5138            if (hw_rx_stat_supported) {
5139                if (rx_stats != NULL) {
5140                    rx_stats[i - min].set_pkts(m_stats.m_rx_per_flow_pkts[i] + diff_pkts[i]);
5141                    rx_stats[i - min].set_bytes(m_stats.m_rx_per_flow_bytes[i] + diff_bytes[i]);
5142                }
5143                m_stats.m_rx_per_flow_pkts[i] = 0;
5144                m_stats.m_rx_per_flow_bytes[i] = 0;
5145                get_ex_drv()->reset_rx_stats(this, &m_stats.m_fdir_prev_pkts[i], i, 1);
5146
5147            }
5148            if (tx_stats != NULL) {
5149                tx_stats[i - min] = g_trex.clear_flow_tx_stats(m_port_id, i, false);
5150            }
5151        } else {
5152            if (hw_rx_stat_supported) {
5153                m_stats.m_rx_per_flow_pkts[i] += diff_pkts[i];
5154                m_stats.m_rx_per_flow_bytes[i] += diff_bytes[i];
5155                if (rx_stats != NULL) {
5156                    rx_stats[i - min].set_pkts(m_stats.m_rx_per_flow_pkts[i]);
5157                    rx_stats[i - min].set_bytes(m_stats.m_rx_per_flow_bytes[i]);
5158                }
5159            }
5160            if (tx_stats != NULL) {
5161                tx_stats[i - min] = g_trex.get_flow_tx_stats(m_port_id, i);
5162            }
5163        }
5164    }
5165
5166    return 0;
5167}
5168
5169int CPhyEthIF::get_flow_stats_payload(rx_per_flow_t *rx_stats, tx_per_flow_t *tx_stats, int min, int max, bool reset) {
5170    g_trex.m_rx_sl.get_rx_stats(get_port_id(), rx_stats, min, max, reset, TrexPlatformApi::IF_STAT_PAYLOAD);
5171    for (int i = min; i <= max; i++) {
5172        if ( reset ) {
5173            if (tx_stats != NULL) {
5174                tx_stats[i - min] = g_trex.clear_flow_tx_stats(m_port_id, i + MAX_FLOW_STATS, true);
5175            }
5176        } else {
5177            if (tx_stats != NULL) {
5178                tx_stats[i - min] = g_trex.get_flow_tx_stats(m_port_id, i + MAX_FLOW_STATS);
5179            }
5180        }
5181    }
5182
5183    return 0;
5184}
5185
5186// If needed, send packets to rx core for processing.
5187// This is relevant only in VM case, where we receive packets to the working DP core (only 1 DP core in this case)
5188bool CCoreEthIF::process_rx_pkt(pkt_dir_t dir, rte_mbuf_t * m) {
5189    CFlowStatParser parser;
5190    uint32_t ip_id;
5191
5192    if (parser.parse(rte_pktmbuf_mtod(m, uint8_t*), rte_pktmbuf_pkt_len(m)) != 0) {
5193        return false;
5194    }
5195    bool send=false;
5196
5197    // e1000 on ESXI hands us the packet with the ethernet FCS
5198    if (parser.get_pkt_size() < rte_pktmbuf_pkt_len(m)) {
5199        rte_pktmbuf_trim(m, rte_pktmbuf_pkt_len(m) - parser.get_pkt_size());
5200    }
5201
5202    if ( get_is_stateless() ) {
5203        // In stateless RX, we only care about flow stat packets
5204        if ((parser.get_ip_id(ip_id) == 0) && ((ip_id & 0xff00) == IP_ID_RESERVE_BASE)) {
5205            send = true;
5206        }
5207    } else {
5208        CLatencyPktMode *c_l_pkt_mode = g_trex.m_mg.c_l_pkt_mode;
5209        bool is_lateancy_pkt =  c_l_pkt_mode->IsLatencyPkt((IPHeader *)parser.get_l4()) &
5210            CCPortLatency::IsLatencyPkt(parser.get_l4() + c_l_pkt_mode->l4_header_len());
5211
5212        if (is_lateancy_pkt) {
5213            send = true;
5214        } else {
5215            if ( get_is_rx_filter_enable() ) {
5216                uint8_t max_ttl = 0xff - get_rx_check_hops();
5217                uint8_t pkt_ttl = parser.get_ttl();
5218                if ( (pkt_ttl==max_ttl) || (pkt_ttl==(max_ttl-1) ) ) {
5219                    send=true;
5220                }
5221            }
5222        }
5223    }
5224
5225
5226    if (send) {
5227        CGenNodeLatencyPktInfo * node=(CGenNodeLatencyPktInfo * )CGlobalInfo::create_node();
5228        if ( node ) {
5229            node->m_msg_type = CGenNodeMsgBase::LATENCY_PKT;
5230            node->m_dir      = dir;
5231            node->m_latency_offset = 0xdead;
5232            node->m_pkt      = m;
5233            if ( m_ring_to_rx->Enqueue((CGenNode*)node)==0 ){
5234            }else{
5235                CGlobalInfo::free_node((CGenNode *)node);
5236                send=false;
5237            }
5238
5239#ifdef LATENCY_QUEUE_TRACE_
5240            printf("rx to cp --\n");
5241            rte_pktmbuf_dump(stdout,m, rte_pktmbuf_pkt_len(m));
5242#endif
5243        }else{
5244            send=false;
5245        }
5246    }
5247    return (send);
5248}
5249
5250TrexStateless * get_stateless_obj() {
5251    return g_trex.m_trex_stateless;
5252}
5253
5254CRxCoreStateless * get_rx_sl_core_obj() {
5255    return &g_trex.m_rx_sl;
5256}
5257
5258static int latency_one_lcore(__attribute__((unused)) void *dummy)
5259{
5260    CPlatformSocketInfo * lpsock=&CGlobalInfo::m_socket;
5261    physical_thread_id_t  phy_id =rte_lcore_id();
5262
5263    if ( lpsock->thread_phy_is_rx(phy_id) ) {
5264        g_trex.run_in_rx_core();
5265    }else{
5266
5267        if ( lpsock->thread_phy_is_master( phy_id ) ) {
5268            g_trex.run_in_master();
5269            delay(1);
5270        }else{
5271            delay((uint32_t)(1000.0*CGlobalInfo::m_options.m_duration));
5272            /* this core has stopped */
5273            g_trex.m_signal[ lpsock->thread_phy_to_virt( phy_id ) ]=1;
5274        }
5275    }
5276    return 0;
5277}
5278
5279
5280
5281static int slave_one_lcore(__attribute__((unused)) void *dummy)
5282{
5283    CPlatformSocketInfo * lpsock=&CGlobalInfo::m_socket;
5284    physical_thread_id_t  phy_id =rte_lcore_id();
5285
5286    if ( lpsock->thread_phy_is_rx(phy_id) ) {
5287        g_trex.run_in_rx_core();
5288    }else{
5289        if ( lpsock->thread_phy_is_master( phy_id ) ) {
5290            g_trex.run_in_master();
5291            delay(1);
5292        }else{
5293            g_trex.run_in_core( lpsock->thread_phy_to_virt( phy_id ) );
5294        }
5295    }
5296    return 0;
5297}
5298
5299
5300
5301uint32_t get_cores_mask(uint32_t cores,int offset){
5302    int i;
5303
5304    uint32_t res=1;
5305
5306    uint32_t mask=(1<<(offset+1));
5307    for (i=0; i<(cores-1); i++) {
5308        res |= mask ;
5309        mask = mask <<1;
5310    }
5311    return (res);
5312}
5313
5314
5315static char *g_exe_name;
5316const char *get_exe_name() {
5317    return g_exe_name;
5318}
5319
5320
5321int main(int argc , char * argv[]){
5322    g_exe_name = argv[0];
5323
5324    return ( main_test(argc , argv));
5325}
5326
5327
5328int update_global_info_from_platform_file(){
5329
5330    CPlatformYamlInfo *cg=&global_platform_cfg_info;
5331
5332    CGlobalInfo::m_socket.Create(&cg->m_platform);
5333
5334
5335    if (!cg->m_info_exist) {
5336        /* nothing to do ! */
5337        return 0;
5338    }
5339
5340    CGlobalInfo::m_options.prefix =cg->m_prefix;
5341    CGlobalInfo::m_options.preview.setCores(cg->m_thread_per_dual_if);
5342
5343    if ( cg->m_port_limit_exist ){
5344        CGlobalInfo::m_options.m_expected_portd =cg->m_port_limit;
5345    }
5346
5347    if ( cg->m_enable_zmq_pub_exist ){
5348        CGlobalInfo::m_options.preview.set_zmq_publish_enable(cg->m_enable_zmq_pub);
5349        CGlobalInfo::m_options.m_zmq_port = cg->m_zmq_pub_port;
5350    }
5351    if ( cg->m_telnet_exist ){
5352        CGlobalInfo::m_options.m_telnet_port = cg->m_telnet_port;
5353    }
5354
5355    if ( cg->m_mac_info_exist ){
5356        int i;
5357        /* cop the file info */
5358
5359        int port_size=cg->m_mac_info.size();
5360
5361        if ( port_size > TREX_MAX_PORTS ){
5362            port_size = TREX_MAX_PORTS;
5363        }
5364        for (i=0; i<port_size; i++){
5365            cg->m_mac_info[i].copy_src(( char *)CGlobalInfo::m_options.m_mac_addr[i].u.m_mac.src)   ;
5366            cg->m_mac_info[i].copy_dest(( char *)CGlobalInfo::m_options.m_mac_addr[i].u.m_mac.dest)  ;
5367            CGlobalInfo::m_options.m_mac_addr[i].u.m_mac.is_set = 1;
5368
5369            CGlobalInfo::m_options.m_ip_cfg[i].set_def_gw(cg->m_mac_info[i].get_def_gw());
5370            CGlobalInfo::m_options.m_ip_cfg[i].set_ip(cg->m_mac_info[i].get_ip());
5371            CGlobalInfo::m_options.m_ip_cfg[i].set_mask(cg->m_mac_info[i].get_mask());
5372            CGlobalInfo::m_options.m_ip_cfg[i].set_vlan(cg->m_mac_info[i].get_vlan());
5373        }
5374    }
5375
5376    /* mul by interface type */
5377    float mul=1.0;
5378    if (cg->m_port_bandwidth_gb<10) {
5379        cg->m_port_bandwidth_gb=10.0;
5380    }
5381
5382    mul = mul*(float)cg->m_port_bandwidth_gb/10.0;
5383    mul= mul * (float)cg->m_port_limit/2.0;
5384
5385    mul= mul * CGlobalInfo::m_options.m_mbuf_factor;
5386
5387
5388    CGlobalInfo::m_memory_cfg.set_pool_cache_size(RTE_MEMPOOL_CACHE_MAX_SIZE);
5389
5390    CGlobalInfo::m_memory_cfg.set_number_of_dp_cors(
5391                                                    CGlobalInfo::m_options.get_number_of_dp_cores_needed() );
5392
5393    CGlobalInfo::m_memory_cfg.set(cg->m_memory,mul);
5394    return (0);
5395}
5396
5397extern "C" int eal_cpu_detected(unsigned lcore_id);
5398// return mask representing available cores
5399int core_mask_calc() {
5400    uint32_t mask = 0;
5401    int lcore_id;
5402
5403    for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
5404        if (eal_cpu_detected(lcore_id)) {
5405            mask |= (1 << lcore_id);
5406        }
5407    }
5408
5409    return mask;
5410}
5411
5412// Return number of set bits in i
5413uint32_t num_set_bits(uint32_t i)
5414{
5415    i = i - ((i >> 1) & 0x55555555);
5416    i = (i & 0x33333333) + ((i >> 2) & 0x33333333);
5417    return (((i + (i >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24;
5418}
5419
5420// sanity check if the cores we want to use really exist
5421int core_mask_sanity(uint32_t wanted_core_mask) {
5422    uint32_t calc_core_mask = core_mask_calc();
5423    uint32_t wanted_core_num, calc_core_num;
5424
5425    wanted_core_num = num_set_bits(wanted_core_mask);
5426    calc_core_num = num_set_bits(calc_core_mask);
5427
5428    if (calc_core_num == 1) {
5429        printf ("Error: You have only 1 core available. Minimum configuration requires 2 cores\n");
5430        printf("        If you are running on VM, consider adding more cores if possible\n");
5431        return -1;
5432    }
5433    if (wanted_core_num > calc_core_num) {
5434        printf("Error: You have %d threads available, but you asked for %d threads.\n", calc_core_num, wanted_core_num);
5435        printf("       Calculation is: -c <num>(%d) * dual ports (%d) + 1 master thread %s"
5436               , CGlobalInfo::m_options.preview.getCores(), CGlobalInfo::m_options.get_expected_dual_ports()
5437               , get_is_rx_thread_enabled() ? "+1 latency thread (because of -l flag)\n" : "\n");
5438        if (CGlobalInfo::m_options.preview.getCores() > 1)
5439            printf("       Maybe try smaller -c <num>.\n");
5440        printf("       If you are running on VM, consider adding more cores if possible\n");
5441        return -1;
5442    }
5443
5444    if (wanted_core_mask != (wanted_core_mask & calc_core_mask)) {
5445        printf ("Serious error: Something is wrong with the hardware. Wanted core mask is %x. Existing core mask is %x\n", wanted_core_mask, calc_core_mask);
5446        return -1;
5447    }
5448
5449    return 0;
5450}
5451
5452int  update_dpdk_args(void){
5453
5454    CPlatformSocketInfo * lpsock=&CGlobalInfo::m_socket;
5455    CParserOption * lpop= &CGlobalInfo::m_options;
5456
5457    lpsock->set_rx_thread_is_enabled(get_is_rx_thread_enabled());
5458    lpsock->set_number_of_threads_per_ports(lpop->preview.getCores() );
5459    lpsock->set_number_of_dual_ports(lpop->get_expected_dual_ports());
5460    if ( !lpsock->sanity_check() ){
5461        printf(" ERROR in configuration file \n");
5462        return (-1);
5463    }
5464
5465    if ( CGlobalInfo::m_options.preview.getVMode() > 0  ) {
5466        lpsock->dump(stdout);
5467    }
5468
5469    snprintf(global_cores_str, sizeof(global_cores_str), "0x%llx" ,(unsigned long long)lpsock->get_cores_mask());
5470    if (core_mask_sanity(strtol(global_cores_str, NULL, 16)) < 0) {
5471        return -1;
5472    }
5473
5474    /* set the DPDK options */
5475    global_dpdk_args_num = 0;
5476
5477    global_dpdk_args[global_dpdk_args_num++]=(char *)"xx";
5478    global_dpdk_args[global_dpdk_args_num++]=(char *)"-c";
5479    global_dpdk_args[global_dpdk_args_num++]=(char *)global_cores_str;
5480    global_dpdk_args[global_dpdk_args_num++]=(char *)"-n";
5481    global_dpdk_args[global_dpdk_args_num++]=(char *)"4";
5482
5483    if ( CGlobalInfo::m_options.preview.getVMode() == 0  ) {
5484        global_dpdk_args[global_dpdk_args_num++]=(char *)"--log-level";
5485        snprintf(global_loglevel_str, sizeof(global_loglevel_str), "%d", 4);
5486        global_dpdk_args[global_dpdk_args_num++]=(char *)global_loglevel_str;
5487    }else{
5488        global_dpdk_args[global_dpdk_args_num++]=(char *)"--log-level";
5489        snprintf(global_loglevel_str, sizeof(global_loglevel_str), "%d", CGlobalInfo::m_options.preview.getVMode()+1);
5490        global_dpdk_args[global_dpdk_args_num++]=(char *)global_loglevel_str;
5491    }
5492
5493    global_dpdk_args[global_dpdk_args_num++] = (char *)"--master-lcore";
5494
5495    snprintf(global_master_id_str, sizeof(global_master_id_str), "%u", lpsock->get_master_phy_id());
5496    global_dpdk_args[global_dpdk_args_num++] = global_master_id_str;
5497
5498    /* add white list */
5499    if (lpop->m_run_mode == CParserOption::RUN_MODE_DUMP_INFO and lpop->dump_interfaces.size()) {
5500        for (int i=0; i<(int)lpop->dump_interfaces.size(); i++) {
5501            global_dpdk_args[global_dpdk_args_num++]=(char *)"-w";
5502            global_dpdk_args[global_dpdk_args_num++]=(char *)lpop->dump_interfaces[i].c_str();
5503        }
5504    }
5505    else {
5506        for (int i=0; i<(int)global_platform_cfg_info.m_if_list.size(); i++) {
5507            global_dpdk_args[global_dpdk_args_num++]=(char *)"-w";
5508            global_dpdk_args[global_dpdk_args_num++]=(char *)global_platform_cfg_info.m_if_list[i].c_str();
5509        }
5510    }
5511
5512
5513
5514    if ( lpop->prefix.length()  ){
5515        global_dpdk_args[global_dpdk_args_num++]=(char *)"--file-prefix";
5516        snprintf(global_prefix_str, sizeof(global_prefix_str), "%s", lpop->prefix.c_str());
5517        global_dpdk_args[global_dpdk_args_num++]=(char *)global_prefix_str;
5518        global_dpdk_args[global_dpdk_args_num++]=(char *)"-m";
5519        if (global_platform_cfg_info.m_limit_memory.length()) {
5520            global_dpdk_args[global_dpdk_args_num++]=(char *)global_platform_cfg_info.m_limit_memory.c_str();
5521        }else{
5522            global_dpdk_args[global_dpdk_args_num++]=(char *)"1024";
5523        }
5524    }
5525
5526
5527    if ( CGlobalInfo::m_options.preview.getVMode() > 0  ) {
5528        printf("args \n");
5529        int i;
5530        for (i=0; i<global_dpdk_args_num; i++) {
5531            printf(" %s \n",global_dpdk_args[i]);
5532        }
5533    }
5534    return (0);
5535}
5536
5537
5538int sim_load_list_of_cap_files(CParserOption * op){
5539
5540    CFlowGenList fl;
5541    fl.Create();
5542    fl.load_from_yaml(op->cfg_file,1);
5543    if ( op->preview.getVMode() >0 ) {
5544        fl.DumpCsv(stdout);
5545    }
5546    uint32_t start=    os_get_time_msec();
5547
5548    CErfIF erf_vif;
5549
5550    fl.generate_p_thread_info(1);
5551    CFlowGenListPerThread   * lpt;
5552    lpt=fl.m_threads_info[0];
5553    lpt->set_vif(&erf_vif);
5554
5555    if ( (op->preview.getVMode() >1)  || op->preview.getFileWrite() ) {
5556        lpt->start_generate_stateful(op->out_file,op->preview);
5557    }
5558
5559    lpt->m_node_gen.DumpHist(stdout);
5560
5561    uint32_t stop=    os_get_time_msec();
5562    printf(" d time = %ul %ul \n",stop-start,os_get_time_freq());
5563    fl.Delete();
5564    return (0);
5565}
5566
5567void dump_interfaces_info() {
5568    printf("Showing interfaces info.\n");
5569    uint8_t m_max_ports = rte_eth_dev_count();
5570    struct ether_addr mac_addr;
5571    char mac_str[ETHER_ADDR_FMT_SIZE];
5572    struct rte_pci_addr pci_addr;
5573
5574    for (uint8_t port_id=0; port_id<m_max_ports; port_id++) {
5575        // PCI, MAC and Driver
5576        pci_addr = rte_eth_devices[port_id].pci_dev->addr;
5577        rte_eth_macaddr_get(port_id, &mac_addr);
5578        ether_format_addr(mac_str, sizeof mac_str, &mac_addr);
5579        printf("PCI: %04x:%02x:%02x.%d - MAC: %s - Driver: %s\n",
5580            pci_addr.domain, pci_addr.bus, pci_addr.devid, pci_addr.function, mac_str,
5581            rte_eth_devices[port_id].pci_dev->driver->name);
5582    }
5583}
5584
5585int main_test(int argc , char * argv[]){
5586
5587
5588    utl_termio_init();
5589
5590    int ret;
5591    unsigned lcore_id;
5592    printf("Starting  TRex %s please wait  ... \n",VERSION_BUILD_NUM);
5593
5594    CGlobalInfo::m_options.preview.clean();
5595
5596    if ( parse_options_wrapper(argc, argv, &CGlobalInfo::m_options,true ) != 0){
5597        exit(-1);
5598    }
5599
5600    /* enable core dump if requested */
5601    if (CGlobalInfo::m_options.preview.getCoreDumpEnable()) {
5602        utl_set_coredump_size(-1);
5603    }
5604    else {
5605        utl_set_coredump_size(0);
5606    }
5607
5608
5609    update_global_info_from_platform_file();
5610
5611    /* It is not a mistake. Give the user higher priorty over the configuration file */
5612    if (parse_options_wrapper(argc, argv, &CGlobalInfo::m_options ,false) != 0) {
5613        exit(-1);
5614    }
5615
5616
5617    if ( CGlobalInfo::m_options.preview.getVMode() > 0){
5618        CGlobalInfo::m_options.dump(stdout);
5619        CGlobalInfo::m_memory_cfg.Dump(stdout);
5620    }
5621
5622
5623    if (update_dpdk_args() < 0) {
5624        return -1;
5625    }
5626
5627    CParserOption * po=&CGlobalInfo::m_options;
5628
5629
5630    if ( CGlobalInfo::m_options.preview.getVMode() == 0  ) {
5631        rte_set_log_level(1);
5632
5633    }
5634    uid_t uid;
5635    uid = geteuid ();
5636    if ( uid != 0 ) {
5637        printf("ERROR you must run with superuser priviliges \n");
5638        printf("User id   : %d \n",uid);
5639        printf("try 'sudo' %s \n",argv[0]);
5640        return (-1);
5641    }
5642
5643    /* set affinity to the master core as default */
5644    cpu_set_t mask;
5645    CPU_ZERO(&mask);
5646    CPU_SET(CGlobalInfo::m_socket.get_master_phy_id(), &mask);
5647    pthread_setaffinity_np(pthread_self(), sizeof(mask), &mask);
5648
5649    ret = rte_eal_init(global_dpdk_args_num, (char **)global_dpdk_args);
5650    if (ret < 0){
5651        printf(" You might need to run ./trex-cfg  once  \n");
5652        rte_exit(EXIT_FAILURE, "Invalid EAL arguments\n");
5653    }
5654    if (CGlobalInfo::m_options.m_run_mode == CParserOption::RUN_MODE_DUMP_INFO) {
5655        dump_interfaces_info();
5656        exit(0);
5657    }
5658    reorder_dpdk_ports();
5659    time_init();
5660
5661    /* check if we are in simulation mode */
5662    if ( CGlobalInfo::m_options.out_file != "" ){
5663        printf(" t-rex simulation mode into %s \n",CGlobalInfo::m_options.out_file.c_str());
5664        return ( sim_load_list_of_cap_files(&CGlobalInfo::m_options) );
5665    }
5666
5667    if ( !g_trex.Create() ){
5668        exit(1);
5669    }
5670
5671    if (po->preview.get_is_rx_check_enable() &&  (po->m_rx_check_sample< get_min_sample_rate()) ) {
5672        po->m_rx_check_sample = get_min_sample_rate();
5673        printf("Warning:rx check sample rate should not be lower than %d. Setting it to %d\n",get_min_sample_rate(),get_min_sample_rate());
5674    }
5675
5676    /* set dump mode */
5677    g_trex.m_io_modes.set_mode((CTrexGlobalIoMode::CliDumpMode)CGlobalInfo::m_options.m_io_mode);
5678
5679    /* disable WD if needed */
5680    bool wd_enable = (CGlobalInfo::m_options.preview.getWDDisable() ? false : true);
5681    TrexWatchDog::getInstance().init(wd_enable);
5682
5683    g_trex.m_sl_rx_running = false;
5684    if ( get_is_stateless() ) {
5685        g_trex.start_master_stateless();
5686
5687    }else{
5688        g_trex.start_master_statefull();
5689    }
5690
5691    // For unit testing of HW rules and queues configuration. Just send some packets and exit.
5692    if (CGlobalInfo::m_options.m_debug_pkt_proto != 0) {
5693        CTrexDebug debug = CTrexDebug(g_trex.m_ports, g_trex.m_max_ports);
5694        int ret;
5695
5696        if (CGlobalInfo::m_options.m_debug_pkt_proto == D_PKT_TYPE_HW_TOGGLE_TEST) {
5697            // Unit test: toggle many times between receive all and stateless/stateful modes,
5698            // to test resiliency of add/delete fdir filters
5699            printf("Starting receive all/normal mode toggle unit test\n");
5700            for (int i = 0; i < 100; i++) {
5701                for (int port_id = 0; port_id < g_trex.m_max_ports; port_id++) {
5702                    CPhyEthIF *pif = &g_trex.m_ports[port_id];
5703                    CTRexExtendedDriverDb::Ins()->get_drv()->set_rcv_all(pif, true);
5704                }
5705                ret = debug.test_send(D_PKT_TYPE_HW_VERIFY_RCV_ALL);
5706                if (ret != 0) {
5707                    printf("Iteration %d: Receive all mode failed\n", i);
5708                    exit(ret);
5709                }
5710
5711                for (int port_id = 0; port_id < g_trex.m_max_ports; port_id++) {
5712                    CPhyEthIF *pif = &g_trex.m_ports[port_id];
5713                    CTRexExtendedDriverDb::Ins()->get_drv()->configure_rx_filter_rules(pif);
5714                }
5715
5716                ret = debug.test_send(D_PKT_TYPE_HW_VERIFY);
5717                if (ret != 0) {
5718                    printf("Iteration %d: Normal mode failed\n", i);
5719                    exit(ret);
5720                }
5721
5722                printf("Iteration %d OK\n", i);
5723            }
5724            exit(0);
5725        } else {
5726            if (CGlobalInfo::m_options.m_debug_pkt_proto == D_PKT_TYPE_HW_VERIFY_RCV_ALL) {
5727                for (int port_id = 0; port_id < g_trex.m_max_ports; port_id++) {
5728                    CPhyEthIF *pif = &g_trex.m_ports[port_id];
5729                    CTRexExtendedDriverDb::Ins()->get_drv()->set_rcv_all(pif, true);
5730                }
5731            }
5732            ret = debug.test_send(CGlobalInfo::m_options.m_debug_pkt_proto);
5733            exit(ret);
5734        }
5735    }
5736
5737    // in case of client config, we already run pretest
5738    if (! CGlobalInfo::m_options.preview.get_is_client_cfg_enable()) {
5739        g_trex.pre_test();
5740    }
5741
5742    // after doing all needed ARP resolution, we need to flush queues, and stop our drop queue
5743    g_trex.ixgbe_rx_queue_flush();
5744    for (int i = 0; i < g_trex.m_max_ports; i++) {
5745        CPhyEthIF *_if = &g_trex.m_ports[i];
5746        _if->stop_rx_drop_queue();
5747    }
5748
5749    if ( CGlobalInfo::m_options.is_latency_enabled()
5750         && (CGlobalInfo::m_options.m_latency_prev > 0)) {
5751        uint32_t pkts = CGlobalInfo::m_options.m_latency_prev *
5752            CGlobalInfo::m_options.m_latency_rate;
5753        printf("Starting warm up phase for %d sec\n",CGlobalInfo::m_options.m_latency_prev);
5754        g_trex.m_mg.start(pkts, NULL);
5755        delay(CGlobalInfo::m_options.m_latency_prev* 1000);
5756        printf("Finished \n");
5757        g_trex.m_mg.reset();
5758    }
5759
5760    if ( CGlobalInfo::m_options.preview.getOnlyLatency() ){
5761        rte_eal_mp_remote_launch(latency_one_lcore, NULL, CALL_MASTER);
5762        RTE_LCORE_FOREACH_SLAVE(lcore_id) {
5763            if (rte_eal_wait_lcore(lcore_id) < 0)
5764                return -1;
5765        }
5766        g_trex.stop_master();
5767
5768        return (0);
5769    }
5770
5771    if ( CGlobalInfo::m_options.preview.getSingleCore() ) {
5772        g_trex.run_in_core(1);
5773        g_trex.stop_master();
5774        return (0);
5775    }
5776
5777    rte_eal_mp_remote_launch(slave_one_lcore, NULL, CALL_MASTER);
5778    RTE_LCORE_FOREACH_SLAVE(lcore_id) {
5779        if (rte_eal_wait_lcore(lcore_id) < 0)
5780            return -1;
5781    }
5782
5783    g_trex.stop_master();
5784    g_trex.Delete();
5785    utl_termio_reset();
5786
5787    return (0);
5788}
5789
5790void wait_x_sec(int sec) {
5791    int i;
5792    printf(" wait %d sec ", sec);
5793    fflush(stdout);
5794    for (i=0; i<sec; i++) {
5795        delay(1000);
5796        printf(".");
5797        fflush(stdout);
5798    }
5799    printf("\n");
5800    fflush(stdout);
5801}
5802
5803/*
5804Changes the order of rte_eth_devices array elements
5805to be consistent with our /etc/trex_cfg.yaml
5806*/
5807void reorder_dpdk_ports() {
5808    rte_eth_dev rte_eth_devices_temp[RTE_MAX_ETHPORTS];
5809    uint8_t m_port_map[RTE_MAX_ETHPORTS];
5810    struct rte_pci_addr addr;
5811    uint8_t port_id;
5812
5813    // gather port relation information and save current array to temp
5814    for (int i=0; i<(int)global_platform_cfg_info.m_if_list.size(); i++) {
5815        memcpy(&rte_eth_devices_temp[i], &rte_eth_devices[i], sizeof rte_eth_devices[i]);
5816        if (eal_parse_pci_BDF(global_platform_cfg_info.m_if_list[i].c_str(), &addr) != 0 && eal_parse_pci_DomBDF(global_platform_cfg_info.m_if_list[i].c_str(), &addr) != 0) {
5817            printf("Failed mapping TRex port id to DPDK id: %d\n", i);
5818            exit(1);
5819        }
5820        rte_eth_dev_get_port_by_addr(&addr, &port_id);
5821        m_port_map[port_id] = i;
5822        // print the relation in verbose mode
5823        if ( CGlobalInfo::m_options.preview.getVMode() > 0){
5824            printf("TRex cfg port id: %d <-> DPDK port id: %d\n", i, port_id);
5825        }
5826    }
5827
5828    // actual reorder
5829    for (int i=0; i<(int)global_platform_cfg_info.m_if_list.size(); i++) {
5830        memcpy(&rte_eth_devices[m_port_map[i]], &rte_eth_devices_temp[i], sizeof rte_eth_devices_temp[i]);
5831    }
5832}
5833
5834//////////////////////////////////////////////////////////////////////////////////////////////
5835//////////////////////////////////////////////////////////////////////////////////////////////
5836// driver section
5837//////////////////////////////////////////////////////////////////////////////////////////////
5838int CTRexExtendedDriverBase::stop_queue(CPhyEthIF * _if, uint16_t q_num) {
5839    uint8_t port_id=_if->get_rte_port_id();
5840    return (rte_eth_dev_rx_queue_stop(port_id, q_num));
5841}
5842
5843int CTRexExtendedDriverBase::wait_for_stable_link() {
5844    wait_x_sec(CGlobalInfo::m_options.m_wait_before_traffic);
5845    return 0;
5846}
5847
5848void CTRexExtendedDriverBase::wait_after_link_up() {
5849    wait_x_sec(CGlobalInfo::m_options.m_wait_before_traffic);
5850}
5851
5852CFlowStatParser *CTRexExtendedDriverBase::get_flow_stat_parser() {
5853    CFlowStatParser *parser = new CFlowStatParser();
5854    assert (parser);
5855    return parser;
5856}
5857
5858// in 1G we need to wait if links became ready to soon
5859void CTRexExtendedDriverBase1G::wait_after_link_up(){
5860    wait_x_sec(6 + CGlobalInfo::m_options.m_wait_before_traffic);
5861}
5862
5863int CTRexExtendedDriverBase1G::wait_for_stable_link(){
5864    wait_x_sec(9 + CGlobalInfo::m_options.m_wait_before_traffic);
5865    return(0);
5866}
5867
5868void CTRexExtendedDriverBase1G::update_configuration(port_cfg_t * cfg){
5869
5870    cfg->m_tx_conf.tx_thresh.pthresh = TX_PTHRESH_1G;
5871    cfg->m_tx_conf.tx_thresh.hthresh = TX_HTHRESH;
5872    cfg->m_tx_conf.tx_thresh.wthresh = 0;
5873}
5874
5875void CTRexExtendedDriverBase1G::update_global_config_fdir(port_cfg_t * cfg){
5876    // Configuration is done in configure_rx_filter_rules by writing to registers
5877}
5878
5879#define E1000_RXDCTL_QUEUE_ENABLE	0x02000000
5880// e1000 driver does not support the generic stop/start queue API, so we need to implement ourselves
5881int CTRexExtendedDriverBase1G::stop_queue(CPhyEthIF * _if, uint16_t q_num) {
5882    uint32_t reg_val = _if->pci_reg_read( E1000_RXDCTL(q_num));
5883    reg_val &= ~E1000_RXDCTL_QUEUE_ENABLE;
5884    _if->pci_reg_write( E1000_RXDCTL(q_num), reg_val);
5885    return 0;
5886}
5887
5888int CTRexExtendedDriverBase1G::configure_rx_filter_rules(CPhyEthIF * _if){
5889    if ( get_is_stateless() ) {
5890        return configure_rx_filter_rules_stateless(_if);
5891    } else {
5892        return configure_rx_filter_rules_statefull(_if);
5893    }
5894
5895    return 0;
5896}
5897
5898int CTRexExtendedDriverBase1G::configure_rx_filter_rules_statefull(CPhyEthIF * _if) {
5899    uint16_t hops = get_rx_check_hops();
5900    uint16_t v4_hops = (hops << 8)&0xff00;
5901    uint8_t protocol;
5902
5903    if (CGlobalInfo::m_options.m_l_pkt_mode == 0) {
5904        protocol = IPPROTO_SCTP;
5905    } else {
5906        protocol = IPPROTO_ICMP;
5907    }
5908    /* enable filter to pass packet to rx queue 1 */
5909    _if->pci_reg_write( E1000_IMIR(0), 0x00020000);
5910    _if->pci_reg_write( E1000_IMIREXT(0), 0x00081000);
5911    _if->pci_reg_write( E1000_TTQF(0),   protocol
5912                        | 0x00008100 /* enable */
5913                        | 0xE0010000 /* RX queue is 1 */
5914                        );
5915
5916
5917    /* 16  :   12 MAC , (2)0x0800,2      | DW0 , DW1
5918       6 bytes , TTL , PROTO     | DW2=0 , DW3=0x0000FF06
5919    */
5920    int i;
5921    // IPv4: bytes being compared are {TTL, Protocol}
5922    uint16_t ff_rules_v4[6]={
5923        (uint16_t)(0xFF06 - v4_hops),
5924        (uint16_t)(0xFE11 - v4_hops),
5925        (uint16_t)(0xFF11 - v4_hops),
5926        (uint16_t)(0xFE06 - v4_hops),
5927        (uint16_t)(0xFF01 - v4_hops),
5928        (uint16_t)(0xFE01 - v4_hops),
5929    }  ;
5930    // IPv6: bytes being compared are {NextHdr, HopLimit}
5931    uint16_t ff_rules_v6[2]={
5932        (uint16_t)(0x3CFF - hops),
5933        (uint16_t)(0x3CFE - hops),
5934    }  ;
5935    uint16_t *ff_rules;
5936    uint16_t num_rules;
5937    uint32_t mask=0;
5938    int  rule_id;
5939
5940    if (  CGlobalInfo::m_options.preview.get_ipv6_mode_enable() ){
5941        ff_rules = &ff_rules_v6[0];
5942        num_rules = sizeof(ff_rules_v6)/sizeof(ff_rules_v6[0]);
5943    }else{
5944        ff_rules = &ff_rules_v4[0];
5945        num_rules = sizeof(ff_rules_v4)/sizeof(ff_rules_v4[0]);
5946    }
5947
5948    clear_rx_filter_rules(_if);
5949
5950    uint8_t len = 24;
5951    for (rule_id=0; rule_id<num_rules; rule_id++ ) {
5952        /* clear rule all */
5953        for (i=0; i<0xff; i+=4) {
5954            _if->pci_reg_write( (E1000_FHFT(rule_id)+i) , 0);
5955        }
5956
5957        if (  CGlobalInfo::m_options.preview.get_vlan_mode_enable() ){
5958            len += 8;
5959            if (  CGlobalInfo::m_options.preview.get_ipv6_mode_enable() ){
5960                // IPv6 VLAN: NextHdr/HopLimit offset = 0x18
5961                _if->pci_reg_write( (E1000_FHFT(rule_id)+(3*16)+0) , PKT_NTOHS(ff_rules[rule_id]) );
5962                _if->pci_reg_write( (E1000_FHFT(rule_id)+(3*16)+8) , 0x03); /* MASK */
5963            }else{
5964                // IPv4 VLAN: TTL/Protocol offset = 0x1A
5965                _if->pci_reg_write( (E1000_FHFT(rule_id)+(3*16)+0) , (PKT_NTOHS(ff_rules[rule_id])<<16) );
5966                _if->pci_reg_write( (E1000_FHFT(rule_id)+(3*16)+8) , 0x0C); /* MASK */
5967            }
5968        }else{
5969            if (  CGlobalInfo::m_options.preview.get_ipv6_mode_enable() ){
5970                // IPv6: NextHdr/HopLimit offset = 0x14
5971                _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16)+4) , PKT_NTOHS(ff_rules[rule_id]) );
5972                _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16)+8) , 0x30); /* MASK */
5973            }else{
5974                // IPv4: TTL/Protocol offset = 0x16
5975                _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16)+4) , (PKT_NTOHS(ff_rules[rule_id])<<16) );
5976                _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16)+8) , 0xC0); /* MASK */
5977            }
5978        }
5979
5980        // FLEX_PRIO[[18:16] = 1, RQUEUE[10:8] = 1
5981        _if->pci_reg_write( (E1000_FHFT(rule_id)+0xFC) , (1<<16) | (1<<8)  | len);
5982
5983        mask |=(1<<rule_id);
5984    }
5985
5986    /* enable all rules */
5987    _if->pci_reg_write(E1000_WUFC, (mask<<16) | (1<<14) );
5988
5989    return (0);
5990}
5991
5992// Sadly, DPDK has no support for i350 filters, so we need to implement by writing to registers.
5993int CTRexExtendedDriverBase1G::configure_rx_filter_rules_stateless(CPhyEthIF * _if) {
5994    /* enable filter to pass packet to rx queue 1 */
5995    _if->pci_reg_write( E1000_IMIR(0), 0x00020000);
5996    _if->pci_reg_write( E1000_IMIREXT(0), 0x00081000);
5997
5998    uint8_t len = 24;
5999    uint32_t mask = 0;
6000    int rule_id;
6001
6002    clear_rx_filter_rules(_if);
6003
6004    rule_id = 0;
6005    mask |= 0x1 << rule_id;
6006    // filter for byte 18 of packet (msb of IP ID) should equal ff
6007    _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16)) ,  0x00ff0000);
6008    _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16) + 8) , 0x04); /* MASK */
6009    // + bytes 12 + 13 (ether type) should indicate IP.
6010    _if->pci_reg_write( (E1000_FHFT(rule_id)+(1*16) + 4) ,  0x00000008);
6011    _if->pci_reg_write( (E1000_FHFT(rule_id)+(1*16) + 8) , 0x30); /* MASK */
6012    // FLEX_PRIO[[18:16] = 1, RQUEUE[10:8] = 1
6013    _if->pci_reg_write( (E1000_FHFT(rule_id) + 0xFC) , (1 << 16) | (1 << 8) | len);
6014
6015    // same as 0, but with vlan. type should be vlan. Inside vlan, should be IP with lsb of IP ID equals 0xff
6016    rule_id = 1;
6017    mask |= 0x1 << rule_id;
6018    // filter for byte 22 of packet (msb of IP ID) should equal ff
6019    _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16) + 4) ,  0x00ff0000);
6020    _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16) + 8) , 0x40 | 0x03); /* MASK */
6021    // + bytes 12 + 13 (ether type) should indicate VLAN.
6022    _if->pci_reg_write( (E1000_FHFT(rule_id)+(1*16) + 4) ,  0x00000081);
6023    _if->pci_reg_write( (E1000_FHFT(rule_id)+(1*16) + 8) , 0x30); /* MASK */
6024    // + bytes 16 + 17 (vlan type) should indicate IP.
6025    _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16) ) ,  0x00000008);
6026    // Was written together with IP ID filter
6027    // _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16) + 8) , 0x03); /* MASK */
6028    // FLEX_PRIO[[18:16] = 1, RQUEUE[10:8] = 1
6029    _if->pci_reg_write( (E1000_FHFT(rule_id) + 0xFC) , (1 << 16) | (1 << 8) | len);
6030
6031    rule_id = 2;
6032    mask |= 0x1 << rule_id;
6033    // ipv6 flow stat
6034    // filter for byte 16 of packet (part of flow label) should equal 0xff
6035    _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16)) ,  0x000000ff);
6036    _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16) + 8) , 0x01); /* MASK */
6037    // + bytes 12 + 13 (ether type) should indicate IPv6.
6038    _if->pci_reg_write( (E1000_FHFT(rule_id)+(1*16) + 4) ,  0x0000dd86);
6039    _if->pci_reg_write( (E1000_FHFT(