main_dpdk.cpp revision 0074ceee
1/*
2  Hanoh Haim
3  Cisco Systems, Inc.
4*/
5
6/*
7  Copyright (c) 2015-2016 Cisco Systems, Inc.
8
9  Licensed under the Apache License, Version 2.0 (the "License");
10  you may not use this file except in compliance with the License.
11  You may obtain a copy of the License at
12
13  http://www.apache.org/licenses/LICENSE-2.0
14
15  Unless required by applicable law or agreed to in writing, software
16  distributed under the License is distributed on an "AS IS" BASIS,
17  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18  See the License for the specific language governing permissions and
19  limitations under the License.
20*/
21#include <assert.h>
22#include <pthread.h>
23#include <signal.h>
24#include <pwd.h>
25#include <stdio.h>
26#include <string.h>
27#include <unistd.h>
28#include <zmq.h>
29#include <rte_common.h>
30#include <rte_log.h>
31#include <rte_memory.h>
32#include <rte_memcpy.h>
33#include <rte_memzone.h>
34#include <rte_tailq.h>
35#include <rte_eal.h>
36#include <rte_per_lcore.h>
37#include <rte_launch.h>
38#include <rte_atomic.h>
39#include <rte_cycles.h>
40#include <rte_prefetch.h>
41#include <rte_lcore.h>
42#include <rte_per_lcore.h>
43#include <rte_branch_prediction.h>
44#include <rte_interrupts.h>
45#include <rte_pci.h>
46#include <rte_debug.h>
47#include <rte_ether.h>
48#include <rte_ethdev.h>
49#include <rte_ring.h>
50#include <rte_mempool.h>
51#include <rte_mbuf.h>
52#include <rte_random.h>
53#include <rte_version.h>
54#include <rte_ip.h>
55
56#include "bp_sim.h"
57#include "os_time.h"
58#include "common/arg/SimpleGlob.h"
59#include "common/arg/SimpleOpt.h"
60#include "common/basic_utils.h"
61#include "stateless/cp/trex_stateless.h"
62#include "stateless/dp/trex_stream_node.h"
63#include "stateless/messaging/trex_stateless_messaging.h"
64#include "stateless/rx/trex_stateless_rx_core.h"
65#include "publisher/trex_publisher.h"
66#include "../linux_dpdk/version.h"
67extern "C" {
68#include "dpdk/drivers/net/ixgbe/base/ixgbe_type.h"
69#include "dpdk_funcs.h"
70}
71#include "dpdk/drivers/net/e1000/base/e1000_regs.h"
72#include "global_io_mode.h"
73#include "utl_term_io.h"
74#include "msg_manager.h"
75#include "platform_cfg.h"
76#include "pre_test.h"
77#include "stateful_rx_core.h"
78#include "debug.h"
79#include "pkt_gen.h"
80#include "trex_port_attr.h"
81#include "internal_api/trex_platform_api.h"
82#include "main_dpdk.h"
83#include "trex_watchdog.h"
84
85#define RX_CHECK_MIX_SAMPLE_RATE 8
86#define RX_CHECK_MIX_SAMPLE_RATE_1G 2
87
88
89#define SOCKET0         0
90
91#define MAX_PKT_BURST   32
92
93#define BP_MAX_CORES 32
94#define BP_MAX_TX_QUEUE 16
95#define BP_MASTER_AND_LATENCY 2
96
97#define RTE_TEST_RX_DESC_DEFAULT 64
98#define RTE_TEST_RX_LATENCY_DESC_DEFAULT (1*1024)
99
100#define RTE_TEST_RX_DESC_VM_DEFAULT 512
101#define RTE_TEST_TX_DESC_VM_DEFAULT 512
102
103typedef struct rte_mbuf * (*rte_mbuf_convert_to_one_seg_t)(struct rte_mbuf *m);
104struct rte_mbuf *  rte_mbuf_convert_to_one_seg(struct rte_mbuf *m);
105extern "C" int rte_eth_dev_get_port_by_addr(const struct rte_pci_addr *addr, uint8_t *port_id);
106void reorder_dpdk_ports();
107
108#define RTE_TEST_TX_DESC_DEFAULT 512
109#define RTE_TEST_RX_DESC_DROP    0
110
111static int max_stat_hw_id_seen = 0;
112static int max_stat_hw_id_seen_payload = 0;
113
114static inline int get_vm_one_queue_enable(){
115    return (CGlobalInfo::m_options.preview.get_vm_one_queue_enable() ?1:0);
116}
117
118static inline int get_is_rx_thread_enabled() {
119    return ((CGlobalInfo::m_options.is_rx_enabled() || CGlobalInfo::m_options.is_stateless()) ?1:0);
120}
121
122struct port_cfg_t;
123
124#define MAX_DPDK_ARGS 40
125static CPlatformYamlInfo global_platform_cfg_info;
126static int global_dpdk_args_num ;
127static char * global_dpdk_args[MAX_DPDK_ARGS];
128static char global_cores_str[100];
129static char global_prefix_str[100];
130static char global_loglevel_str[20];
131static char global_master_id_str[10];
132
133class CTRexExtendedDriverBase {
134public:
135
136    /* by default NIC driver adds CRC */
137    virtual bool has_crc_added() {
138        return true;
139    }
140
141    virtual int get_min_sample_rate(void)=0;
142    virtual void update_configuration(port_cfg_t * cfg)=0;
143    virtual void update_global_config_fdir(port_cfg_t * cfg)=0;
144
145    virtual bool is_hardware_filter_is_supported(){
146        return(false);
147    }
148    virtual int configure_rx_filter_rules(CPhyEthIF * _if)=0;
149    virtual int add_del_rx_flow_stat_rule(uint8_t port_id, enum rte_filter_op op, uint16_t l3, uint8_t l4
150                                          , uint8_t ipv6_next_h, uint16_t id) {return 0;}
151    virtual bool is_hardware_support_drop_queue(){
152        return(false);
153    }
154
155    virtual int stop_queue(CPhyEthIF * _if, uint16_t q_num);
156    virtual void get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats)=0;
157    virtual void clear_extended_stats(CPhyEthIF * _if)=0;
158    virtual int  wait_for_stable_link();
159    virtual void wait_after_link_up();
160    virtual bool hw_rx_stat_supported(){return false;}
161    virtual int get_rx_stats(CPhyEthIF * _if, uint32_t *pkts, uint32_t *prev_pkts, uint32_t *bytes, uint32_t *prev_bytes
162                             , int min, int max) {return -1;}
163    virtual void reset_rx_stats(CPhyEthIF * _if, uint32_t *stats, int min, int len) {}
164    virtual int dump_fdir_global_stats(CPhyEthIF * _if, FILE *fd) { return -1;}
165    virtual int get_stat_counters_num() {return 0;}
166    virtual int get_rx_stat_capabilities() {return 0;}
167    virtual int verify_fw_ver(int i) {return 0;}
168    virtual CFlowStatParser *get_flow_stat_parser();
169    virtual int set_rcv_all(CPhyEthIF * _if, bool set_on)=0;
170    virtual TRexPortAttr * create_port_attr(uint8_t port_id) = 0;
171    virtual uint8_t get_num_crc_fix_bytes() {return 0;}
172
173    /* Does this NIC type support automatic packet dropping in case of a link down?
174       in case it is supported the packets will be dropped, else there would be a back pressure to tx queues
175       this interface is used as a workaround to let TRex work without link in stateless mode, driver that
176       does not support that will be failed at init time because it will cause watchdog due to watchdog hang */
177    virtual bool drop_packets_incase_of_linkdown() {
178        return (false);
179    }
180};
181
182
183class CTRexExtendedDriverBase1G : public CTRexExtendedDriverBase {
184
185public:
186    CTRexExtendedDriverBase1G(){
187    }
188
189    TRexPortAttr * create_port_attr(uint8_t port_id) {
190        return new DpdkTRexPortAttr(port_id, false, true);
191    }
192
193    static CTRexExtendedDriverBase * create(){
194        return ( new CTRexExtendedDriverBase1G() );
195    }
196
197    virtual void update_global_config_fdir(port_cfg_t * cfg);
198
199    virtual int get_min_sample_rate(void){
200        return ( RX_CHECK_MIX_SAMPLE_RATE_1G);
201    }
202    virtual void update_configuration(port_cfg_t * cfg);
203
204    virtual bool is_hardware_filter_is_supported(){
205        return (true);
206    }
207
208    virtual int stop_queue(CPhyEthIF * _if, uint16_t q_num);
209    virtual int configure_rx_filter_rules(CPhyEthIF * _if);
210    virtual int configure_rx_filter_rules_statefull(CPhyEthIF * _if);
211    virtual int configure_rx_filter_rules_stateless(CPhyEthIF * _if);
212    virtual void clear_rx_filter_rules(CPhyEthIF * _if);
213    virtual bool is_hardware_support_drop_queue(){
214        return(true);
215    }
216
217    virtual void get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats);
218    virtual void clear_extended_stats(CPhyEthIF * _if);
219    virtual int dump_fdir_global_stats(CPhyEthIF * _if, FILE *fd) {return 0;}
220    virtual int get_stat_counters_num() {return MAX_FLOW_STATS;}
221    virtual int get_rx_stat_capabilities() {
222        return TrexPlatformApi::IF_STAT_IPV4_ID | TrexPlatformApi::IF_STAT_RX_BYTES_COUNT
223            | TrexPlatformApi::IF_STAT_PAYLOAD;
224    }
225    virtual int wait_for_stable_link();
226    virtual void wait_after_link_up();
227    virtual int set_rcv_all(CPhyEthIF * _if, bool set_on);
228};
229
230class CTRexExtendedDriverBase1GVm : public CTRexExtendedDriverBase {
231
232public:
233    CTRexExtendedDriverBase1GVm(){
234        /* we are working in mode that we have 1 queue for rx and one queue for tx*/
235        CGlobalInfo::m_options.preview.set_vm_one_queue_enable(true);
236    }
237
238    TRexPortAttr * create_port_attr(uint8_t port_id) {
239        return new DpdkTRexPortAttr(port_id, true, true);
240    }
241
242    virtual bool has_crc_added() {
243        return false;
244    }
245
246    static CTRexExtendedDriverBase * create(){
247        return ( new CTRexExtendedDriverBase1GVm() );
248    }
249
250    virtual void update_global_config_fdir(port_cfg_t * cfg){
251
252    }
253
254    virtual int get_min_sample_rate(void){
255        return ( RX_CHECK_MIX_SAMPLE_RATE_1G);
256    }
257    virtual void update_configuration(port_cfg_t * cfg);
258
259    virtual bool is_hardware_filter_is_supported(){
260        return (true);
261    }
262
263    virtual int configure_rx_filter_rules(CPhyEthIF * _if);
264
265    virtual bool is_hardware_support_drop_queue(){
266        return(false);
267    }
268
269    virtual int stop_queue(CPhyEthIF * _if, uint16_t q_num);
270    virtual void get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats);
271    virtual void clear_extended_stats(CPhyEthIF * _if);
272    virtual int wait_for_stable_link();
273    virtual int get_stat_counters_num() {return MAX_FLOW_STATS;}
274    virtual int get_rx_stat_capabilities() {
275        return TrexPlatformApi::IF_STAT_IPV4_ID | TrexPlatformApi::IF_STAT_RX_BYTES_COUNT
276            | TrexPlatformApi::IF_STAT_PAYLOAD;
277    }
278    virtual int set_rcv_all(CPhyEthIF * _if, bool set_on) {return 0;}
279};
280
281class CTRexExtendedDriverBaseE1000 : public CTRexExtendedDriverBase1GVm {
282    CTRexExtendedDriverBaseE1000() {
283        // E1000 driver is only relevant in VM in our case
284        CGlobalInfo::m_options.preview.set_vm_one_queue_enable(true);
285    }
286public:
287    static CTRexExtendedDriverBase * create() {
288        return ( new CTRexExtendedDriverBaseE1000() );
289    }
290    // e1000 driver handing us packets with ethernet CRC, so we need to chop them
291    virtual uint8_t get_num_crc_fix_bytes() {return 4;}
292};
293
294class CTRexExtendedDriverBase10G : public CTRexExtendedDriverBase {
295public:
296    CTRexExtendedDriverBase10G(){
297    }
298
299    TRexPortAttr * create_port_attr(uint8_t port_id) {
300        return new DpdkTRexPortAttr(port_id, false, true);
301    }
302
303    static CTRexExtendedDriverBase * create(){
304        return ( new CTRexExtendedDriverBase10G() );
305    }
306
307    virtual void update_global_config_fdir(port_cfg_t * cfg);
308
309    virtual int get_min_sample_rate(void){
310        return (RX_CHECK_MIX_SAMPLE_RATE);
311    }
312    virtual void update_configuration(port_cfg_t * cfg);
313
314    virtual bool is_hardware_filter_is_supported(){
315        return (true);
316    }
317    virtual int configure_rx_filter_rules(CPhyEthIF * _if);
318    virtual int configure_rx_filter_rules_stateless(CPhyEthIF * _if);
319    virtual int configure_rx_filter_rules_statefull(CPhyEthIF * _if);
320    virtual bool is_hardware_support_drop_queue(){
321        return(true);
322    }
323    virtual void get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats);
324    virtual void clear_extended_stats(CPhyEthIF * _if);
325    virtual int wait_for_stable_link();
326    virtual int get_stat_counters_num() {return MAX_FLOW_STATS;}
327    virtual int get_rx_stat_capabilities() {
328        return TrexPlatformApi::IF_STAT_IPV4_ID | TrexPlatformApi::IF_STAT_RX_BYTES_COUNT
329            | TrexPlatformApi::IF_STAT_PAYLOAD;
330    }
331    virtual CFlowStatParser *get_flow_stat_parser();
332    int add_del_eth_filter(CPhyEthIF * _if, bool is_add, uint16_t ethertype);
333    virtual int set_rcv_all(CPhyEthIF * _if, bool set_on);
334};
335
336class CTRexExtendedDriverBase40G : public CTRexExtendedDriverBase10G {
337public:
338    CTRexExtendedDriverBase40G(){
339        // Since we support only 128 counters per if, it is OK to configure here 4 statically.
340        // If we want to support more counters in case of card having less interfaces, we
341        // Will have to identify the number of interfaces dynamically.
342        m_if_per_card = 4;
343    }
344
345    TRexPortAttr * create_port_attr(uint8_t port_id) {
346        // disabling flow control on 40G using DPDK API causes the interface to malfunction
347        return new DpdkTRexPortAttr(port_id, false, false);
348    }
349
350    static CTRexExtendedDriverBase * create(){
351        return ( new CTRexExtendedDriverBase40G() );
352    }
353
354    virtual void update_global_config_fdir(port_cfg_t * cfg){
355    }
356    virtual void update_configuration(port_cfg_t * cfg);
357    virtual int configure_rx_filter_rules(CPhyEthIF * _if);
358    virtual int add_del_rx_flow_stat_rule(uint8_t port_id, enum rte_filter_op op, uint16_t l3_proto
359                                          , uint8_t l4_proto, uint8_t ipv6_next_h, uint16_t id);
360    virtual bool is_hardware_filter_is_supported(){
361        return (true);
362    }
363
364    virtual bool is_hardware_support_drop_queue(){
365        return(true);
366    }
367    virtual void get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats);
368    virtual void clear_extended_stats(CPhyEthIF * _if);
369    virtual void reset_rx_stats(CPhyEthIF * _if, uint32_t *stats, int min, int len);
370    virtual int get_rx_stats(CPhyEthIF * _if, uint32_t *pkts, uint32_t *prev_pkts, uint32_t *bytes, uint32_t *prev_bytes, int min, int max);
371    virtual int dump_fdir_global_stats(CPhyEthIF * _if, FILE *fd);
372    virtual int get_stat_counters_num() {return MAX_FLOW_STATS;}
373    virtual int get_rx_stat_capabilities() {
374        return TrexPlatformApi::IF_STAT_IPV4_ID | TrexPlatformApi::IF_STAT_PAYLOAD;
375    }
376    virtual int wait_for_stable_link();
377    virtual bool hw_rx_stat_supported(){return true;}
378    virtual int verify_fw_ver(int i);
379    virtual CFlowStatParser *get_flow_stat_parser();
380    virtual int set_rcv_all(CPhyEthIF * _if, bool set_on);
381
382private:
383    virtual void add_del_rules(enum rte_filter_op op, uint8_t port_id, uint16_t type, uint8_t ttl
384                               , uint16_t ip_id, uint8_t l4_proto, int queue, uint16_t stat_idx);
385    virtual int add_del_eth_type_rule(uint8_t port_id, enum rte_filter_op op, uint16_t eth_type);
386    virtual int configure_rx_filter_rules_statefull(CPhyEthIF * _if);
387
388    virtual bool drop_packets_incase_of_linkdown() {
389        return (true);
390    }
391
392private:
393    uint8_t m_if_per_card;
394};
395
396class CTRexExtendedDriverBaseVIC : public CTRexExtendedDriverBase {
397public:
398    CTRexExtendedDriverBaseVIC(){
399    }
400
401    TRexPortAttr * create_port_attr(uint8_t port_id) {
402        return new DpdkTRexPortAttr(port_id, false, false);
403    }
404
405    static CTRexExtendedDriverBase * create(){
406        return ( new CTRexExtendedDriverBaseVIC() );
407    }
408
409    virtual bool is_hardware_filter_is_supported(){
410        return (true);
411    }
412    virtual void update_global_config_fdir(port_cfg_t * cfg){
413    }
414
415
416    virtual bool is_hardware_support_drop_queue(){
417        return(true);
418    }
419
420    void clear_extended_stats(CPhyEthIF * _if);
421
422    void get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats);
423
424
425    virtual int get_min_sample_rate(void){
426        return (RX_CHECK_MIX_SAMPLE_RATE);
427    }
428
429    virtual int verify_fw_ver(int i);
430
431    virtual void update_configuration(port_cfg_t * cfg);
432
433    virtual int configure_rx_filter_rules(CPhyEthIF * _if);
434    virtual void reset_rx_stats(CPhyEthIF * _if, uint32_t *stats, int min, int len);
435    virtual int get_rx_stats(CPhyEthIF * _if, uint32_t *pkts, uint32_t *prev_pkts, uint32_t *bytes, uint32_t *prev_bytes, int min, int max);
436    virtual int get_stat_counters_num() {return MAX_FLOW_STATS;}
437    virtual int get_rx_stat_capabilities() {
438        return TrexPlatformApi::IF_STAT_IPV4_ID | TrexPlatformApi::IF_STAT_PAYLOAD;
439    }
440    virtual CFlowStatParser *get_flow_stat_parser();
441    virtual int dump_fdir_global_stats(CPhyEthIF * _if, FILE *fd);
442    virtual int set_rcv_all(CPhyEthIF * _if, bool set_on);
443
444private:
445
446    virtual void add_del_rules(enum rte_filter_op op, uint8_t port_id, uint16_t type, uint8_t ttl
447                               , uint16_t ip_id, uint8_t l4_proto, int queue);
448    virtual int add_del_eth_type_rule(uint8_t port_id, enum rte_filter_op op, uint16_t eth_type);
449    virtual int configure_rx_filter_rules_statefull(CPhyEthIF * _if);
450
451};
452
453
454class CTRexExtendedDriverBaseMlnx5G : public CTRexExtendedDriverBase10G {
455public:
456    CTRexExtendedDriverBaseMlnx5G(){
457    }
458
459    TRexPortAttr * create_port_attr(uint8_t port_id) {
460        // disabling flow control on 40G using DPDK API causes the interface to malfunction
461        return new DpdkTRexPortAttr(port_id, false, false);
462    }
463
464    static CTRexExtendedDriverBase * create(){
465        return ( new CTRexExtendedDriverBaseMlnx5G() );
466    }
467
468    virtual void update_global_config_fdir(port_cfg_t * cfg){
469    }
470
471    virtual void update_configuration(port_cfg_t * cfg);
472
473    virtual int configure_rx_filter_rules(CPhyEthIF * _if);
474    virtual bool is_hardware_filter_is_supported(){
475        return (true);
476    }
477
478    virtual bool is_hardware_support_drop_queue(){
479        return(true);
480    }
481    virtual void get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats);
482    virtual void clear_extended_stats(CPhyEthIF * _if);
483    virtual void reset_rx_stats(CPhyEthIF * _if, uint32_t *stats, int min, int len);
484    virtual int get_rx_stats(CPhyEthIF * _if, uint32_t *pkts, uint32_t *prev_pkts, uint32_t *bytes, uint32_t *prev_bytes, int min, int max);
485    virtual int dump_fdir_global_stats(CPhyEthIF * _if, FILE *fd);
486    virtual int get_stat_counters_num() {return MAX_FLOW_STATS;}
487    virtual int get_rx_stat_capabilities() {
488        return TrexPlatformApi::IF_STAT_IPV4_ID | TrexPlatformApi::IF_STAT_PAYLOAD;
489    }
490    virtual int wait_for_stable_link();
491    // disabling flow control on 40G using DPDK API causes the interface to malfunction
492    virtual bool flow_control_disable_supported(){return false;}
493    virtual CFlowStatParser *get_flow_stat_parser();
494    virtual int set_rcv_all(CPhyEthIF * _if, bool set_on);
495
496private:
497    virtual void add_del_rules(enum rte_filter_op op, uint8_t port_id, uint16_t type, uint16_t ip_id, uint8_t l4_proto
498                               , int queue);
499    virtual int add_del_rx_filter_rules(CPhyEthIF * _if, bool set_on);
500};
501
502typedef CTRexExtendedDriverBase * (*create_object_t) (void);
503
504
505class CTRexExtendedDriverRec {
506public:
507    std::string         m_driver_name;
508    create_object_t     m_constructor;
509};
510
511class CTRexExtendedDriverDb {
512public:
513
514    const std::string & get_driver_name() {
515        return m_driver_name;
516    }
517
518    bool is_driver_exists(std::string name);
519
520
521
522    void set_driver_name(std::string name){
523        m_driver_was_set=true;
524        m_driver_name=name;
525        printf(" set driver name %s \n",name.c_str());
526        m_drv=create_driver(m_driver_name);
527        assert(m_drv);
528    }
529
530    CTRexExtendedDriverBase * get_drv(){
531        if (!m_driver_was_set) {
532            printf(" ERROR too early to use this object !\n");
533            printf(" need to set the right driver \n");
534            assert(0);
535        }
536        assert(m_drv);
537        return (m_drv);
538    }
539
540public:
541
542    static CTRexExtendedDriverDb * Ins();
543
544private:
545    CTRexExtendedDriverBase * create_driver(std::string name);
546
547    CTRexExtendedDriverDb(){
548        register_driver(std::string("rte_ixgbe_pmd"),CTRexExtendedDriverBase10G::create);
549        register_driver(std::string("rte_igb_pmd"),CTRexExtendedDriverBase1G::create);
550        register_driver(std::string("rte_i40e_pmd"),CTRexExtendedDriverBase40G::create);
551        register_driver(std::string("rte_enic_pmd"),CTRexExtendedDriverBaseVIC::create);
552        register_driver(std::string("librte_pmd_mlx5"),CTRexExtendedDriverBaseMlnx5G::create);
553
554
555        /* virtual devices */
556        register_driver(std::string("rte_em_pmd"),CTRexExtendedDriverBaseE1000::create);
557        register_driver(std::string("rte_vmxnet3_pmd"),CTRexExtendedDriverBase1GVm::create);
558        register_driver(std::string("rte_virtio_pmd"),CTRexExtendedDriverBase1GVm::create);
559
560
561
562
563        m_driver_was_set=false;
564        m_drv=0;
565        m_driver_name="";
566    }
567    void register_driver(std::string name,create_object_t func);
568    static CTRexExtendedDriverDb * m_ins;
569    bool        m_driver_was_set;
570    std::string m_driver_name;
571    CTRexExtendedDriverBase * m_drv;
572    std::vector <CTRexExtendedDriverRec*>     m_list;
573
574};
575
576CTRexExtendedDriverDb * CTRexExtendedDriverDb::m_ins;
577
578
579void CTRexExtendedDriverDb::register_driver(std::string name,
580                                            create_object_t func){
581    CTRexExtendedDriverRec * rec;
582    rec = new CTRexExtendedDriverRec();
583    rec->m_driver_name=name;
584    rec->m_constructor=func;
585    m_list.push_back(rec);
586}
587
588
589bool CTRexExtendedDriverDb::is_driver_exists(std::string name){
590    int i;
591    for (i=0; i<(int)m_list.size(); i++) {
592        if (m_list[i]->m_driver_name == name) {
593            return (true);
594        }
595    }
596    return (false);
597}
598
599
600CTRexExtendedDriverBase * CTRexExtendedDriverDb::create_driver(std::string name){
601    int i;
602    for (i=0; i<(int)m_list.size(); i++) {
603        if (m_list[i]->m_driver_name == name) {
604            return ( m_list[i]->m_constructor() );
605        }
606    }
607    return( (CTRexExtendedDriverBase *)0);
608}
609
610
611
612CTRexExtendedDriverDb * CTRexExtendedDriverDb::Ins(){
613    if (!m_ins) {
614        m_ins = new CTRexExtendedDriverDb();
615    }
616    return (m_ins);
617}
618
619static CTRexExtendedDriverBase *  get_ex_drv(){
620
621    return ( CTRexExtendedDriverDb::Ins()->get_drv());
622}
623
624static inline int get_min_sample_rate(void){
625    return ( get_ex_drv()->get_min_sample_rate());
626}
627
628// cores =0==1,1*2,2,3,4,5,6
629// An enum for all the option types
630enum { OPT_HELP,
631       OPT_MODE_BATCH,
632       OPT_MODE_INTERACTIVE,
633       OPT_NODE_DUMP,
634       OPT_DUMP_INTERFACES,
635       OPT_UT,
636       OPT_CORES,
637       OPT_SINGLE_CORE,
638       OPT_FLIP_CLIENT_SERVER,
639       OPT_FLOW_FLIP_CLIENT_SERVER,
640       OPT_FLOW_FLIP_CLIENT_SERVER_SIDE,
641       OPT_RATE_MULT,
642       OPT_DURATION,
643       OPT_PLATFORM_FACTOR,
644       OPT_PUB_DISABLE,
645       OPT_LIMT_NUM_OF_PORTS,
646       OPT_PLAT_CFG_FILE,
647       OPT_MBUF_FACTOR,
648       OPT_LATENCY,
649       OPT_NO_CLEAN_FLOW_CLOSE,
650       OPT_LATENCY_MASK,
651       OPT_ONLY_LATENCY,
652       OPT_LATENCY_PREVIEW ,
653       OPT_WAIT_BEFORE_TRAFFIC,
654       OPT_PCAP,
655       OPT_RX_CHECK,
656       OPT_IO_MODE,
657       OPT_IPV6,
658       OPT_LEARN,
659       OPT_LEARN_MODE,
660       OPT_LEARN_VERIFY,
661       OPT_L_PKT_MODE,
662       OPT_NO_FLOW_CONTROL,
663       OPT_VLAN,
664       OPT_RX_CHECK_HOPS,
665       OPT_CLIENT_CFG_FILE,
666       OPT_NO_KEYBOARD_INPUT,
667       OPT_VIRT_ONE_TX_RX_QUEUE,
668       OPT_PREFIX,
669       OPT_SEND_DEBUG_PKT,
670       OPT_NO_WATCHDOG,
671       OPT_ALLOW_COREDUMP,
672       OPT_CHECKSUM_OFFLOAD,
673       OPT_CLOSE,
674       OPT_ARP_REF_PER,
675};
676
677/* these are the argument types:
678   SO_NONE --    no argument needed
679   SO_REQ_SEP -- single required argument
680   SO_MULTI --   multiple arguments needed
681*/
682static CSimpleOpt::SOption parser_options[] =
683    {
684        { OPT_HELP,                   "-?",                SO_NONE   },
685        { OPT_HELP,                   "-h",                SO_NONE   },
686        { OPT_HELP,                   "--help",            SO_NONE   },
687        { OPT_UT,                     "--ut",              SO_NONE   },
688        { OPT_MODE_BATCH,             "-f",                SO_REQ_SEP},
689        { OPT_MODE_INTERACTIVE,       "-i",                SO_NONE   },
690        { OPT_PLAT_CFG_FILE,          "--cfg",             SO_REQ_SEP},
691        { OPT_SINGLE_CORE,            "-s",                SO_NONE  },
692        { OPT_FLIP_CLIENT_SERVER,"--flip",SO_NONE  },
693        { OPT_FLOW_FLIP_CLIENT_SERVER,"-p",SO_NONE  },
694        { OPT_FLOW_FLIP_CLIENT_SERVER_SIDE,"-e",SO_NONE  },
695        { OPT_NO_CLEAN_FLOW_CLOSE,"--nc",SO_NONE  },
696        { OPT_LIMT_NUM_OF_PORTS,"--limit-ports", SO_REQ_SEP },
697        { OPT_CORES     , "-c",         SO_REQ_SEP },
698        { OPT_NODE_DUMP , "-v",         SO_REQ_SEP },
699        { OPT_DUMP_INTERFACES , "--dump-interfaces",         SO_MULTI },
700        { OPT_LATENCY , "-l",         SO_REQ_SEP },
701        { OPT_DURATION     , "-d",  SO_REQ_SEP },
702        { OPT_PLATFORM_FACTOR     , "-pm",  SO_REQ_SEP },
703        { OPT_PUB_DISABLE     , "-pubd",  SO_NONE },
704        { OPT_RATE_MULT     , "-m",  SO_REQ_SEP },
705        { OPT_LATENCY_MASK     , "--lm",  SO_REQ_SEP },
706        { OPT_ONLY_LATENCY, "--lo",  SO_NONE  },
707        { OPT_LATENCY_PREVIEW ,       "-k",   SO_REQ_SEP   },
708        { OPT_WAIT_BEFORE_TRAFFIC ,   "-w",   SO_REQ_SEP   },
709        { OPT_PCAP,       "--pcap",       SO_NONE   },
710        { OPT_RX_CHECK,   "--rx-check",  SO_REQ_SEP },
711        { OPT_IO_MODE,   "--iom",  SO_REQ_SEP },
712        { OPT_RX_CHECK_HOPS, "--hops", SO_REQ_SEP },
713        { OPT_IPV6,       "--ipv6",       SO_NONE   },
714        { OPT_LEARN, "--learn",       SO_NONE   },
715        { OPT_LEARN_MODE, "--learn-mode",       SO_REQ_SEP   },
716        { OPT_LEARN_VERIFY, "--learn-verify",       SO_NONE   },
717        { OPT_L_PKT_MODE, "--l-pkt-mode",       SO_REQ_SEP   },
718        { OPT_NO_FLOW_CONTROL, "--no-flow-control-change",       SO_NONE   },
719        { OPT_VLAN,       "--vlan",       SO_NONE   },
720        { OPT_CLIENT_CFG_FILE, "--client_cfg", SO_REQ_SEP },
721        { OPT_CLIENT_CFG_FILE, "--client-cfg", SO_REQ_SEP },
722        { OPT_NO_KEYBOARD_INPUT ,"--no-key", SO_NONE   },
723        { OPT_VIRT_ONE_TX_RX_QUEUE, "--vm-sim", SO_NONE },
724        { OPT_PREFIX, "--prefix", SO_REQ_SEP },
725        { OPT_SEND_DEBUG_PKT, "--send-debug-pkt", SO_REQ_SEP },
726        { OPT_MBUF_FACTOR     , "--mbuf-factor",  SO_REQ_SEP },
727        { OPT_NO_WATCHDOG ,     "--no-watchdog",  SO_NONE  },
728        { OPT_ALLOW_COREDUMP ,  "--allow-coredump",  SO_NONE  },
729        { OPT_CHECKSUM_OFFLOAD, "--checksum-offload", SO_NONE },
730        { OPT_CLOSE, "--close-at-end", SO_NONE },
731        { OPT_ARP_REF_PER, "--arp-refresh-period", SO_REQ_SEP },
732        SO_END_OF_OPTIONS
733    };
734
735static int usage(){
736
737    printf(" Usage: t-rex-64 [mode] <options>\n\n");
738    printf(" mode is one of:\n");
739    printf("   -f <file> : YAML file with traffic template configuration (Will run TRex in 'stateful' mode)\n");
740    printf("   -i        : Run TRex in 'stateless' mode\n");
741    printf("\n");
742
743    printf(" Available options are:\n");
744    printf(" --allow-coredump           : Allow creation of core dump \n");
745    printf(" --arp-refresh-period       : Period in seconds between sending of gratuitous ARP for our addresses. Value of 0 means 'never send' \n");
746    printf(" -c <num>>                  : Number of hardware threads to allocate for each port pair. Overrides the 'c' argument from config file \n");
747    printf(" --cfg <file>               : Use file as TRex config file instead of the default /etc/trex_cfg.yaml \n");
748    printf(" --checksum-offload         : Enable IP, TCP and UDP tx checksum offloading, using DPDK. This requires all used interfaces to support this \n");
749    printf(" --client_cfg <file>        : YAML file describing clients configuration \n");
750    printf(" --close-at-end             : Call rte_eth_dev_stop and close at exit. Calling these functions caused link down issues in older versions, \n");
751    printf("                               so we do not call them by default for now. Leaving this as option in case someone thinks it is helpful for him \n");
752    printf("                               This it temporary option. Will be removed in the future \n");
753    printf(" -d                         : Duration of the test in sec (default is 3600). Look also at --nc \n");
754    printf(" -e                         : Like -p but src/dst IP will be chosen according to the port (i.e. on client port send all packets with client src and server dest, and vice versa on server port \n");
755    printf(" --flip                     : Each flow will be sent both from client to server and server to client. This can acheive better port utilization when flow traffic is asymmetric \n");
756    printf(" --hops <hops>              : If rx check is enabled, the hop number can be assigned. See manual for details \n");
757    printf(" --iom  <mode>              : IO mode  for server output [0- silent, 1- normal , 2- short] \n");
758    printf(" --ipv6                     : Work in ipv6 mode \n");
759    printf(" -k  <num>                  : Run 'warm up' traffic for num seconds before starting the test. \n");
760    printf(" -l <rate>                  : In parallel to the test, run latency check, sending packets at rate/sec from each interface \n");
761    printf("    Rate of zero means no latency check \n");
762    printf(" --learn (deprecated). Replaced by --learn-mode. To get older behaviour, use --learn-mode 2 \n");
763    printf(" --learn-mode [1-3]         : Work in NAT environments, learn the dynamic NAT translation and ALG \n");
764    printf("      1    Use TCP ACK in first SYN to pass NAT translation information. Will work only for TCP streams. Initial SYN packet must be first packet in stream \n");
765    printf("      2    Add special IP option to pass NAT translation information. Will not work on certain firewalls if they drop packets with IP options \n");
766    printf("      3    Like 1, but without support for sequence number randomization in server->clien direction. Performance (flow/second) better than 1 \n");
767    printf(" --learn-verify             : Test the NAT translation mechanism. Should be used when there is no NAT in the setup \n");
768    printf(" --limit-ports              : Limit number of ports used. Must be even number (TRex always uses port pairs) \n");
769    printf(" --lm                       : Hex mask of cores that should send traffic \n");
770    printf("    For example: Value of 0x5 will cause only ports 0 and 2 to send traffic \n");
771    printf(" --lo                       : Only run latency test \n");
772    printf(" --l-pkt-mode <0-3>         : Set mode for sending latency packets \n");
773    printf("      0 (default)    send SCTP packets  \n");
774    printf("      1              Send ICMP request packets  \n");
775    printf("      2              Send ICMP requests from client side, and response from server side (for working with firewall) \n");
776    printf("      3              Send ICMP requests with sequence ID 0 from both sides \n");
777    printf(" -m <num>                   : Rate multiplier.  Multiply basic rate of templates by this number \n");
778    printf(" --mbuf-factor              : Factor for packet memory \n");
779    printf(" --nc                       : If set, will not wait for all flows to be closed, before terminating - see manual for more information \n");
780    printf(" --no-flow-control-change   : By default TRex disables flow-control. If this option is given, it does not touch it \n");
781    printf(" --no-key                   : Daemon mode, don't get input from keyboard \n");
782    printf(" --no-watchdog              : Disable watchdog \n");
783    printf(" -p                         : Send all flow packets from the same interface (choosed randomly between client ad server ports) without changing their src/dst IP \n");
784    printf(" -pm                        : Platform factor. If you have splitter in the setup, you can multiply the total results by this factor \n");
785    printf("    e.g --pm 2.0 will multiply all the results bps in this factor \n");
786    printf(" --prefix <nam>             : For running multi TRex instances on the same machine. Each instance should have different name \n");
787    printf(" -pubd                      : Disable monitors publishers \n");
788    printf(" --rx-check  <rate>         : Enable rx check. TRex will sample flows at 1/rate and check order, latency and more \n");
789    printf(" -s                         : Single core. Run only one data path core. For debug \n");
790    printf(" --send-debug-pkt <proto>   : Do not run traffic generator. Just send debug packet and dump receive queues \n");
791    printf("    Supported protocols are 1 for icmp, 2 for UDP, 3 for TCP, 4 for ARP, 5 for 9K UDP \n");
792    printf(" -v <verbosity level>       : The higher the value, print more debug information \n");
793    printf(" --vlan                     : Relevant only for stateless mode with Intel 82599 10G NIC \n");
794    printf("                              When configuring flow stat and latency per stream rules, assume all streams uses VLAN \n");
795    printf(" --vm-sim                   : Simulate vm with driver of one input queue and one output queue \n");
796    printf(" -w  <num>                  : Wait num seconds between init of interfaces and sending traffic, default is 1 \n");
797    printf("\n");
798    printf(" Examples: ");
799    printf(" basic trex run for 20 sec and multiplier of 10 \n");
800    printf("  t-rex-64 -f cap2/dns.yaml -m 10 -d 20 \n");
801    printf("\n\n");
802    printf(" Copyright (c) 2015-2016 Cisco Systems, Inc.    \n");
803    printf("                                                                  \n");
804    printf(" Licensed under the Apache License, Version 2.0 (the 'License') \n");
805    printf(" you may not use this file except in compliance with the License. \n");
806    printf(" You may obtain a copy of the License at                          \n");
807    printf("                                                                  \n");
808    printf("    http://www.apache.org/licenses/LICENSE-2.0                    \n");
809    printf("                                                                  \n");
810    printf(" Unless required by applicable law or agreed to in writing, software \n");
811    printf(" distributed under the License is distributed on an \"AS IS\" BASIS,   \n");
812    printf(" WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. \n");
813    printf(" See the License for the specific language governing permissions and      \n");
814    printf(" limitations under the License.                                           \n");
815    printf(" \n");
816    printf(" Open Source Components / Libraries \n");
817    printf(" DPDK       (BSD)       \n");
818    printf(" YAML-CPP   (BSD)       \n");
819    printf(" JSONCPP    (MIT)       \n");
820    printf(" \n");
821    printf(" Open Source Binaries \n");
822    printf(" ZMQ        (LGPL v3plus) \n");
823    printf(" \n");
824    printf(" Version : %s   \n",VERSION_BUILD_NUM);
825    printf(" DPDK version : %s   \n",rte_version());
826    printf(" User    : %s   \n",VERSION_USER);
827    printf(" Date    : %s , %s \n",get_build_date(),get_build_time());
828    printf(" Uuid    : %s    \n",VERSION_UIID);
829    printf(" Git SHA : %s    \n",VERSION_GIT_SHA);
830    return (0);
831}
832
833
834int gtest_main(int argc, char **argv) ;
835
836static void parse_err(const std::string &msg) {
837    std::cout << "\nArgument Parsing Error: \n\n" << "*** "<< msg << "\n\n";
838    exit(-1);
839}
840
841static int parse_options(int argc, char *argv[], CParserOption* po, bool first_time ) {
842    CSimpleOpt args(argc, argv, parser_options);
843
844    bool latency_was_set=false;
845    (void)latency_was_set;
846    char ** rgpszArg = NULL;
847    bool opt_vlan_was_set = false;
848
849    int a=0;
850    int node_dump=0;
851
852    po->preview.setFileWrite(true);
853    po->preview.setRealTime(true);
854    uint32_t tmp_data;
855
856    po->m_run_mode = CParserOption::RUN_MODE_INVALID;
857
858    while ( args.Next() ){
859        if (args.LastError() == SO_SUCCESS) {
860            switch (args.OptionId()) {
861
862            case OPT_UT :
863                parse_err("Supported only in simulation");
864                break;
865
866            case OPT_HELP:
867                usage();
868                return -1;
869
870            case OPT_MODE_BATCH:
871                if (po->m_run_mode != CParserOption::RUN_MODE_INVALID) {
872                    parse_err("Please specify single run mode");
873                }
874                po->m_run_mode = CParserOption::RUN_MODE_BATCH;
875                po->cfg_file = args.OptionArg();
876                break;
877
878            case OPT_MODE_INTERACTIVE:
879                if (po->m_run_mode != CParserOption::RUN_MODE_INVALID) {
880                    parse_err("Please specify single run mode");
881                }
882                po->m_run_mode = CParserOption::RUN_MODE_INTERACTIVE;
883                break;
884
885            case OPT_NO_KEYBOARD_INPUT  :
886                po->preview.set_no_keyboard(true);
887                break;
888
889            case OPT_CLIENT_CFG_FILE :
890                po->client_cfg_file = args.OptionArg();
891                break;
892
893            case OPT_PLAT_CFG_FILE :
894                po->platform_cfg_file = args.OptionArg();
895                break;
896
897            case OPT_SINGLE_CORE :
898                po->preview.setSingleCore(true);
899                break;
900
901            case OPT_IPV6:
902                po->preview.set_ipv6_mode_enable(true);
903                break;
904
905
906            case OPT_LEARN :
907                po->m_learn_mode = CParserOption::LEARN_MODE_IP_OPTION;
908                break;
909
910            case OPT_LEARN_MODE :
911                sscanf(args.OptionArg(),"%d", &tmp_data);
912                if (! po->is_valid_opt_val(tmp_data, CParserOption::LEARN_MODE_DISABLED, CParserOption::LEARN_MODE_MAX, "--learn-mode")) {
913                    exit(-1);
914                }
915                po->m_learn_mode = (uint8_t)tmp_data;
916                break;
917
918            case OPT_LEARN_VERIFY :
919                // must configure learn_mode for learn verify to work. If different learn mode will be given later, it will be set instead.
920                if (po->m_learn_mode == 0) {
921                    po->m_learn_mode = CParserOption::LEARN_MODE_IP_OPTION;
922                }
923                po->preview.set_learn_and_verify_mode_enable(true);
924                break;
925
926            case OPT_L_PKT_MODE :
927                sscanf(args.OptionArg(),"%d", &tmp_data);
928                if (! po->is_valid_opt_val(tmp_data, 0, L_PKT_SUBMODE_0_SEQ, "--l-pkt-mode")) {
929                    exit(-1);
930                }
931                po->m_l_pkt_mode=(uint8_t)tmp_data;
932                break;
933
934            case OPT_NO_FLOW_CONTROL:
935                po->preview.set_disable_flow_control_setting(true);
936                break;
937            case OPT_VLAN:
938                opt_vlan_was_set = true;
939                break;
940            case OPT_LIMT_NUM_OF_PORTS :
941                po->m_expected_portd =atoi(args.OptionArg());
942                break;
943            case  OPT_CORES  :
944                po->preview.setCores(atoi(args.OptionArg()));
945                break;
946            case OPT_FLIP_CLIENT_SERVER :
947                po->preview.setClientServerFlip(true);
948                break;
949            case OPT_NO_CLEAN_FLOW_CLOSE :
950                po->preview.setNoCleanFlowClose(true);
951                break;
952            case OPT_FLOW_FLIP_CLIENT_SERVER :
953                po->preview.setClientServerFlowFlip(true);
954                break;
955            case OPT_FLOW_FLIP_CLIENT_SERVER_SIDE:
956                po->preview.setClientServerFlowFlipAddr(true);
957                break;
958            case OPT_NODE_DUMP:
959                a=atoi(args.OptionArg());
960                node_dump=1;
961                po->preview.setFileWrite(false);
962                break;
963            case OPT_DUMP_INTERFACES:
964                if (first_time) {
965                    rgpszArg = args.MultiArg(1);
966                    while (rgpszArg != NULL) {
967                        po->dump_interfaces.push_back(rgpszArg[0]);
968                        rgpszArg = args.MultiArg(1);
969                    }
970                }
971                if (po->m_run_mode != CParserOption::RUN_MODE_INVALID) {
972                    parse_err("Please specify single run mode (-i for stateless, or -f <file> for stateful");
973                }
974                po->m_run_mode = CParserOption::RUN_MODE_DUMP_INFO;
975                break;
976            case OPT_MBUF_FACTOR:
977                sscanf(args.OptionArg(),"%f", &po->m_mbuf_factor);
978                break;
979            case OPT_RATE_MULT :
980                sscanf(args.OptionArg(),"%f", &po->m_factor);
981                break;
982            case OPT_DURATION :
983                sscanf(args.OptionArg(),"%f", &po->m_duration);
984                break;
985            case OPT_PUB_DISABLE:
986                po->preview.set_zmq_publish_enable(false);
987                break;
988            case OPT_PLATFORM_FACTOR:
989                sscanf(args.OptionArg(),"%f", &po->m_platform_factor);
990                break;
991            case OPT_LATENCY :
992                latency_was_set=true;
993                sscanf(args.OptionArg(),"%d", &po->m_latency_rate);
994                break;
995            case OPT_LATENCY_MASK :
996                sscanf(args.OptionArg(),"%x", &po->m_latency_mask);
997                break;
998            case OPT_ONLY_LATENCY :
999                po->preview.setOnlyLatency(true);
1000                break;
1001            case OPT_NO_WATCHDOG :
1002                po->preview.setWDDisable(true);
1003                break;
1004            case OPT_ALLOW_COREDUMP :
1005                po->preview.setCoreDumpEnable(true);
1006                break;
1007            case  OPT_LATENCY_PREVIEW :
1008                sscanf(args.OptionArg(),"%d", &po->m_latency_prev);
1009                break;
1010            case  OPT_WAIT_BEFORE_TRAFFIC :
1011                sscanf(args.OptionArg(),"%d", &po->m_wait_before_traffic);
1012                break;
1013            case OPT_PCAP:
1014                po->preview.set_pcap_mode_enable(true);
1015                break;
1016            case OPT_RX_CHECK :
1017                sscanf(args.OptionArg(),"%d", &tmp_data);
1018                po->m_rx_check_sample=(uint16_t)tmp_data;
1019                po->preview.set_rx_check_enable(true);
1020                break;
1021            case OPT_RX_CHECK_HOPS :
1022                sscanf(args.OptionArg(),"%d", &tmp_data);
1023                po->m_rx_check_hops = (uint16_t)tmp_data;
1024                break;
1025            case OPT_IO_MODE :
1026                sscanf(args.OptionArg(),"%d", &tmp_data);
1027                po->m_io_mode=(uint16_t)tmp_data;
1028                break;
1029
1030            case OPT_VIRT_ONE_TX_RX_QUEUE:
1031                po->preview.set_vm_one_queue_enable(true);
1032                break;
1033
1034            case OPT_PREFIX:
1035                po->prefix = args.OptionArg();
1036                break;
1037
1038            case OPT_SEND_DEBUG_PKT:
1039                sscanf(args.OptionArg(),"%d", &tmp_data);
1040                po->m_debug_pkt_proto = (uint8_t)tmp_data;
1041                break;
1042
1043            case OPT_CHECKSUM_OFFLOAD:
1044                po->preview.setChecksumOffloadEnable(true);
1045                break;
1046
1047            case OPT_CLOSE:
1048                po->preview.setCloseEnable(true);
1049                break;
1050            case  OPT_ARP_REF_PER:
1051                sscanf(args.OptionArg(),"%d", &tmp_data);
1052                po->m_arp_ref_per=(uint16_t)tmp_data;
1053                break;
1054
1055            default:
1056                usage();
1057                return -1;
1058                break;
1059            } // End of switch
1060        }// End of IF
1061        else {
1062            usage();
1063            return -1;
1064        }
1065    } // End of while
1066
1067
1068    if ((po->m_run_mode ==  CParserOption::RUN_MODE_INVALID) ) {
1069        parse_err("Please provide single run mode. -f <file> for stateful or -i for stateless (interactive)");
1070    }
1071
1072    if (CGlobalInfo::is_learn_mode() && po->preview.get_ipv6_mode_enable()) {
1073        parse_err("--learn mode is not supported with --ipv6, beacuse there is no such thing as NAT66 (ipv6 to ipv6 translation) \n" \
1074                  "If you think it is important, please open a defect or write to TRex mailing list\n");
1075    }
1076
1077    if (po->preview.get_is_rx_check_enable() ||  po->is_latency_enabled() || CGlobalInfo::is_learn_mode()
1078        || (CGlobalInfo::m_options.m_arp_ref_per != 0) || get_vm_one_queue_enable()) {
1079        po->set_rx_enabled();
1080    }
1081
1082    if ( node_dump ){
1083        po->preview.setVMode(a);
1084    }
1085
1086    /* if we have a platform factor we need to devided by it so we can still work with normalized yaml profile  */
1087    po->m_factor = po->m_factor/po->m_platform_factor;
1088
1089    uint32_t cores=po->preview.getCores();
1090    if ( cores > ((BP_MAX_CORES)/2-1) ) {
1091        fprintf(stderr, " Error: maximum supported core number is: %d \n",((BP_MAX_CORES)/2-1));
1092        return -1;
1093    }
1094
1095
1096    if ( first_time ){
1097        /* only first time read the configuration file */
1098        if ( po->platform_cfg_file.length() >0  ) {
1099            if ( node_dump ){
1100                printf("Using configuration file %s \n",po->platform_cfg_file.c_str());
1101            }
1102            global_platform_cfg_info.load_from_yaml_file(po->platform_cfg_file);
1103            if ( node_dump ){
1104                global_platform_cfg_info.Dump(stdout);
1105            }
1106        }else{
1107            if ( utl_is_file_exists("/etc/trex_cfg.yaml") ){
1108                if ( node_dump ){
1109                    printf("Using configuration file /etc/trex_cfg.yaml \n");
1110                }
1111                global_platform_cfg_info.load_from_yaml_file("/etc/trex_cfg.yaml");
1112                if ( node_dump ){
1113                    global_platform_cfg_info.Dump(stdout);
1114                }
1115            }
1116        }
1117    }
1118
1119    if ( get_is_stateless() ) {
1120        if ( opt_vlan_was_set ) {
1121            po->preview.set_vlan_mode_enable(true);
1122        }
1123        if (CGlobalInfo::m_options.client_cfg_file != "") {
1124            parse_err("Client config file is not supported with interactive (stateless) mode ");
1125        }
1126        if ( po->m_duration ) {
1127            parse_err("Duration is not supported with interactive (stateless) mode ");
1128        }
1129
1130        if ( po->preview.get_is_rx_check_enable() ) {
1131            parse_err("Rx check is not supported with interactive (stateless) mode ");
1132        }
1133
1134        if  ( (po->is_latency_enabled()) || (po->preview.getOnlyLatency()) ){
1135            parse_err("Latency check is not supported with interactive (stateless) mode ");
1136        }
1137
1138        if ( po->preview.getSingleCore() ){
1139            parse_err("Single core is not supported with interactive (stateless) mode ");
1140        }
1141
1142    }
1143    else {
1144        if ( !po->m_duration ) {
1145            po->m_duration = 3600.0;
1146        }
1147    }
1148    return 0;
1149}
1150
1151static int parse_options_wrapper(int argc, char *argv[], CParserOption* po, bool first_time ) {
1152    // copy, as arg parser sometimes changes the argv
1153    char ** argv_copy = (char **) malloc(sizeof(char *) * argc);
1154    for(int i=0; i<argc; i++) {
1155        argv_copy[i] = strdup(argv[i]);
1156    }
1157    int ret = parse_options(argc, argv_copy, po, first_time);
1158
1159    // free
1160    for(int i=0; i<argc; i++) {
1161        free(argv_copy[i]);
1162    }
1163    free(argv_copy);
1164    return ret;
1165}
1166
1167int main_test(int argc , char * argv[]);
1168
1169
1170#define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
1171#define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
1172#define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */
1173
1174/*
1175 * These default values are optimized for use with the Intel(R) 82599 10 GbE
1176 * Controller and the DPDK ixgbe PMD. Consider using other values for other
1177 * network controllers and/or network drivers.
1178 */
1179#define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */
1180#define TX_HTHRESH 0  /**< Default values of TX host threshold reg. */
1181#define TX_WTHRESH 0  /**< Default values of TX write-back threshold reg. */
1182
1183#define TX_WTHRESH_1G 1  /**< Default values of TX write-back threshold reg. */
1184#define TX_PTHRESH_1G 1 /**< Default values of TX prefetch threshold reg. */
1185
1186
1187struct port_cfg_t {
1188public:
1189    port_cfg_t(){
1190        memset(&m_port_conf,0,sizeof(m_port_conf));
1191        memset(&m_rx_conf,0,sizeof(m_rx_conf));
1192        memset(&m_tx_conf,0,sizeof(m_tx_conf));
1193        memset(&m_rx_drop_conf,0,sizeof(m_rx_drop_conf));
1194
1195        m_rx_conf.rx_thresh.pthresh = RX_PTHRESH;
1196        m_rx_conf.rx_thresh.hthresh = RX_HTHRESH;
1197        m_rx_conf.rx_thresh.wthresh = RX_WTHRESH;
1198        m_rx_conf.rx_free_thresh =32;
1199
1200        m_rx_drop_conf.rx_thresh.pthresh = 0;
1201        m_rx_drop_conf.rx_thresh.hthresh = 0;
1202        m_rx_drop_conf.rx_thresh.wthresh = 0;
1203        m_rx_drop_conf.rx_free_thresh =32;
1204        m_rx_drop_conf.rx_drop_en=1;
1205
1206        m_tx_conf.tx_thresh.pthresh = TX_PTHRESH;
1207        m_tx_conf.tx_thresh.hthresh = TX_HTHRESH;
1208        m_tx_conf.tx_thresh.wthresh = TX_WTHRESH;
1209
1210        m_port_conf.rxmode.jumbo_frame=1;
1211        m_port_conf.rxmode.max_rx_pkt_len =9*1024+22;
1212        m_port_conf.rxmode.hw_strip_crc=1;
1213    }
1214
1215
1216
1217    inline void update_var(void){
1218        get_ex_drv()->update_configuration(this);
1219    }
1220
1221    inline void update_global_config_fdir(void){
1222        get_ex_drv()->update_global_config_fdir(this);
1223    }
1224
1225    /* enable FDIR */
1226    inline void update_global_config_fdir_10g(void){
1227        m_port_conf.fdir_conf.mode=RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1228        m_port_conf.fdir_conf.pballoc=RTE_FDIR_PBALLOC_64K;
1229        m_port_conf.fdir_conf.status=RTE_FDIR_NO_REPORT_STATUS;
1230        /* Offset of flexbytes field in RX packets (in 16-bit word units). */
1231        /* Note: divide by 2 to convert byte offset to word offset */
1232        if (get_is_stateless()) {
1233            m_port_conf.fdir_conf.flexbytes_offset = (14+4)/2;
1234            /* Increment offset 4 bytes for the case where we add VLAN */
1235            if (  CGlobalInfo::m_options.preview.get_vlan_mode_enable() ) {
1236                m_port_conf.fdir_conf.flexbytes_offset += (4/2);
1237            }
1238        } else {
1239            if (  CGlobalInfo::m_options.preview.get_ipv6_mode_enable() ) {
1240                m_port_conf.fdir_conf.flexbytes_offset = (14+6)/2;
1241            } else {
1242                m_port_conf.fdir_conf.flexbytes_offset = (14+8)/2;
1243            }
1244
1245            /* Increment offset 4 bytes for the case where we add VLAN */
1246            if (  CGlobalInfo::m_options.preview.get_vlan_mode_enable() ) {
1247                m_port_conf.fdir_conf.flexbytes_offset += (4/2);
1248            }
1249        }
1250        m_port_conf.fdir_conf.drop_queue=1;
1251    }
1252
1253    inline void update_global_config_fdir_40g(void){
1254        m_port_conf.fdir_conf.mode=RTE_FDIR_MODE_PERFECT;
1255        m_port_conf.fdir_conf.pballoc=RTE_FDIR_PBALLOC_64K;
1256        m_port_conf.fdir_conf.status=RTE_FDIR_NO_REPORT_STATUS;
1257    }
1258
1259    struct rte_eth_conf     m_port_conf;
1260    struct rte_eth_rxconf   m_rx_conf;
1261    struct rte_eth_rxconf   m_rx_drop_conf;
1262    struct rte_eth_txconf   m_tx_conf;
1263};
1264
1265
1266/* this object is per core / per port / per queue
1267   each core will have 2 ports to send to
1268
1269
1270   port0                                port1
1271
1272   0,1,2,3,..15 out queue ( per core )       0,1,2,3,..15 out queue ( per core )
1273
1274*/
1275
1276
1277typedef struct cnt_name_ {
1278    uint32_t offset;
1279    char * name;
1280}cnt_name_t ;
1281
1282#define MY_REG(a) {a,(char *)#a}
1283
1284void CPhyEthIFStats::Clear() {
1285    ipackets = 0;
1286    ibytes = 0;
1287    f_ipackets = 0;
1288    f_ibytes = 0;
1289    opackets = 0;
1290    obytes = 0;
1291    ierrors = 0;
1292    oerrors = 0;
1293    imcasts = 0;
1294    rx_nombuf = 0;
1295    memset(&m_prev_stats, 0, sizeof(m_prev_stats));
1296    memset(m_rx_per_flow_pkts, 0, sizeof(m_rx_per_flow_pkts));
1297    memset(m_rx_per_flow_bytes, 0, sizeof(m_rx_per_flow_bytes));
1298}
1299
1300// dump all counters (even ones that equal 0)
1301void CPhyEthIFStats::DumpAll(FILE *fd) {
1302#define DP_A4(f) printf(" %-40s : %llu \n",#f, (unsigned long long)f)
1303#define DP_A(f) if (f) printf(" %-40s : %llu \n",#f, (unsigned long long)f)
1304    DP_A4(opackets);
1305    DP_A4(obytes);
1306    DP_A4(ipackets);
1307    DP_A4(ibytes);
1308    DP_A(ierrors);
1309    DP_A(oerrors);
1310}
1311
1312// dump all non zero counters
1313void CPhyEthIFStats::Dump(FILE *fd) {
1314    DP_A(opackets);
1315    DP_A(obytes);
1316    DP_A(f_ipackets);
1317    DP_A(f_ibytes);
1318    DP_A(ipackets);
1319    DP_A(ibytes);
1320    DP_A(ierrors);
1321    DP_A(oerrors);
1322    DP_A(imcasts);
1323    DP_A(rx_nombuf);
1324}
1325
1326void CPhyEthIgnoreStats::dump(FILE *fd) {
1327    DP_A4(opackets);
1328    DP_A4(obytes);
1329    DP_A4(ipackets);
1330    DP_A4(ibytes);
1331    DP_A4(m_tx_arp);
1332    DP_A4(m_rx_arp);
1333}
1334
1335// Clear the RX queue of an interface, dropping all packets
1336void CPhyEthIF::flush_rx_queue(void){
1337
1338    rte_mbuf_t * rx_pkts[32];
1339    int j=0;
1340    uint16_t cnt=0;
1341
1342    while (true) {
1343        j++;
1344        cnt = rx_burst(m_rx_queue,rx_pkts,32);
1345        if ( cnt ) {
1346            int i;
1347            for (i=0; i<(int)cnt;i++) {
1348                rte_mbuf_t * m=rx_pkts[i];
1349                /*printf("rx--\n");
1350                  rte_pktmbuf_dump(stdout,m, rte_pktmbuf_pkt_len(m));*/
1351                rte_pktmbuf_free(m);
1352            }
1353        }
1354        if ( ((cnt==0) && (j>10)) || (j>15) ) {
1355            break;
1356        }
1357    }
1358    if (cnt>0) {
1359        printf(" Warning can't flush rx-queue for port %d \n",(int)get_port_id());
1360    }
1361}
1362
1363
1364void CPhyEthIF::dump_stats_extended(FILE *fd){
1365
1366    cnt_name_t reg[]={
1367        MY_REG(IXGBE_GPTC), /* total packet */
1368        MY_REG(IXGBE_GOTCL), /* total bytes */
1369        MY_REG(IXGBE_GOTCH),
1370
1371        MY_REG(IXGBE_GPRC),
1372        MY_REG(IXGBE_GORCL),
1373        MY_REG(IXGBE_GORCH),
1374
1375
1376
1377        MY_REG(IXGBE_RXNFGPC),
1378        MY_REG(IXGBE_RXNFGBCL),
1379        MY_REG(IXGBE_RXNFGBCH),
1380        MY_REG(IXGBE_RXDGPC  ),
1381        MY_REG(IXGBE_RXDGBCL ),
1382        MY_REG(IXGBE_RXDGBCH  ),
1383        MY_REG(IXGBE_RXDDGPC ),
1384        MY_REG(IXGBE_RXDDGBCL ),
1385        MY_REG(IXGBE_RXDDGBCH  ),
1386        MY_REG(IXGBE_RXLPBKGPC ),
1387        MY_REG(IXGBE_RXLPBKGBCL),
1388        MY_REG(IXGBE_RXLPBKGBCH ),
1389        MY_REG(IXGBE_RXDLPBKGPC ),
1390        MY_REG(IXGBE_RXDLPBKGBCL),
1391        MY_REG(IXGBE_RXDLPBKGBCH ),
1392        MY_REG(IXGBE_TXDGPC      ),
1393        MY_REG(IXGBE_TXDGBCL     ),
1394        MY_REG(IXGBE_TXDGBCH     ),
1395        MY_REG(IXGBE_FDIRUSTAT ),
1396        MY_REG(IXGBE_FDIRFSTAT ),
1397        MY_REG(IXGBE_FDIRMATCH ),
1398        MY_REG(IXGBE_FDIRMISS )
1399
1400    };
1401    fprintf (fd," extended counters \n");
1402    int i;
1403    for (i=0; i<sizeof(reg)/sizeof(reg[0]); i++) {
1404        cnt_name_t *lp=&reg[i];
1405        uint32_t c=pci_reg_read(lp->offset);
1406        // xl710 bug. Counter values are -559038737 when they should be 0
1407        if (c && c != -559038737 ) {
1408            fprintf (fd," %s  : %d \n",lp->name,c);
1409        }
1410    }
1411}
1412
1413int CPhyEthIF::get_rx_stat_capabilities() {
1414    return get_ex_drv()->get_rx_stat_capabilities();
1415}
1416
1417
1418
1419void CPhyEthIF::configure(uint16_t nb_rx_queue,
1420                          uint16_t nb_tx_queue,
1421                          const struct rte_eth_conf *eth_conf){
1422    int ret;
1423    ret = rte_eth_dev_configure(m_port_id,
1424                                nb_rx_queue,
1425                                nb_tx_queue,
1426                                eth_conf);
1427
1428    if (ret < 0)
1429        rte_exit(EXIT_FAILURE, "Cannot configure device: "
1430                 "err=%d, port=%u\n",
1431                 ret, m_port_id);
1432
1433    /* get device info */
1434    rte_eth_dev_info_get(m_port_id, &m_dev_info);
1435
1436    if (CGlobalInfo::m_options.preview.getChecksumOffloadEnable()) {
1437        /* check if the device supports TCP and UDP checksum offloading */
1438        if ((m_dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) == 0) {
1439            rte_exit(EXIT_FAILURE, "Device does not support UDP checksum offload: "
1440                     "port=%u\n",
1441                     m_port_id);
1442        }
1443        if ((m_dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) == 0) {
1444            rte_exit(EXIT_FAILURE, "Device does not support TCP checksum offload: "
1445                     "port=%u\n",
1446                     m_port_id);
1447        }
1448    }
1449}
1450
1451
1452/*
1453
1454  rx-queue 0 - default- all traffic not goint to queue 1
1455  will be drop as queue is disable
1456
1457
1458  rx-queue 1 - Latency measurement packets will go here
1459
1460  pci_reg_write(IXGBE_L34T_IMIR(0),(1<<21));
1461
1462*/
1463
1464void CPhyEthIF::configure_rx_duplicate_rules(){
1465
1466    if ( get_is_rx_filter_enable() ){
1467
1468        if ( get_ex_drv()->is_hardware_filter_is_supported()==false ){
1469            printf(" ERROR this feature is not supported with current hardware \n");
1470            exit(1);
1471        }
1472        get_ex_drv()->configure_rx_filter_rules(this);
1473    }
1474}
1475
1476
1477void CPhyEthIF::stop_rx_drop_queue() {
1478    // In debug mode, we want to see all packets. Don't want to disable any queue.
1479    if ( get_vm_one_queue_enable() || (CGlobalInfo::m_options.m_debug_pkt_proto != 0)) {
1480        return;
1481    }
1482    if ( CGlobalInfo::m_options.is_rx_enabled() ) {
1483        if ( (!get_ex_drv()->is_hardware_support_drop_queue())  ) {
1484            printf(" ERROR latency feature is not supported with current hardware  \n");
1485            exit(1);
1486        }
1487    }
1488    get_ex_drv()->stop_queue(this, MAIN_DPDK_DATA_Q);
1489}
1490
1491
1492void CPhyEthIF::rx_queue_setup(uint16_t rx_queue_id,
1493                               uint16_t nb_rx_desc,
1494                               unsigned int socket_id,
1495                               const struct rte_eth_rxconf *rx_conf,
1496                               struct rte_mempool *mb_pool){
1497
1498    int ret = rte_eth_rx_queue_setup(m_port_id , rx_queue_id,
1499                                     nb_rx_desc,
1500                                     socket_id,
1501                                     rx_conf,
1502                                     mb_pool);
1503    if (ret < 0)
1504        rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup: "
1505                 "err=%d, port=%u\n",
1506                 ret, m_port_id);
1507}
1508
1509
1510
1511void CPhyEthIF::tx_queue_setup(uint16_t tx_queue_id,
1512                               uint16_t nb_tx_desc,
1513                               unsigned int socket_id,
1514                               const struct rte_eth_txconf *tx_conf){
1515
1516    int ret = rte_eth_tx_queue_setup( m_port_id,
1517                                      tx_queue_id,
1518                                      nb_tx_desc,
1519                                      socket_id,
1520                                      tx_conf);
1521    if (ret < 0)
1522        rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: "
1523                 "err=%d, port=%u queue=%u\n",
1524                 ret, m_port_id, tx_queue_id);
1525
1526}
1527
1528void CPhyEthIF::stop(){
1529    if (CGlobalInfo::m_options.preview.getCloseEnable()) {
1530        rte_eth_dev_stop(m_port_id);
1531        rte_eth_dev_close(m_port_id);
1532    }
1533}
1534
1535void CPhyEthIF::start(){
1536
1537    get_ex_drv()->clear_extended_stats(this);
1538
1539    int ret;
1540
1541    m_bw_tx.reset();
1542    m_bw_rx.reset();
1543
1544    m_stats.Clear();
1545    int i;
1546    for (i=0;i<10; i++ ) {
1547        ret = rte_eth_dev_start(m_port_id);
1548        if (ret==0) {
1549            return;
1550        }
1551        delay(1000);
1552    }
1553    if (ret < 0)
1554        rte_exit(EXIT_FAILURE, "rte_eth_dev_start: "
1555                 "err=%d, port=%u\n",
1556                 ret, m_port_id);
1557
1558}
1559
1560// Disabling flow control on interface
1561void CPhyEthIF::disable_flow_control(){
1562    int ret;
1563    // see trex-64 issue with loopback on the same NIC
1564    struct rte_eth_fc_conf fc_conf;
1565    memset(&fc_conf,0,sizeof(fc_conf));
1566    fc_conf.mode=RTE_FC_NONE;
1567    fc_conf.autoneg=1;
1568    fc_conf.pause_time=100;
1569    int i;
1570    for (i=0; i<5; i++) {
1571        ret=rte_eth_dev_flow_ctrl_set(m_port_id,&fc_conf);
1572        if (ret==0) {
1573            break;
1574        }
1575        delay(1000);
1576    }
1577    if (ret < 0)
1578        rte_exit(EXIT_FAILURE, "rte_eth_dev_flow_ctrl_set: "
1579                 "err=%d, port=%u\n probably link is down. Please check your link activity, or skip flow-control disabling, using: --no-flow-control-change option\n",
1580                 ret, m_port_id);
1581}
1582
1583/*
1584Get user frienly devices description from saved env. var
1585Changes certain attributes based on description
1586*/
1587void DpdkTRexPortAttr::update_description(){
1588    struct rte_pci_addr pci_addr;
1589    char pci[16];
1590    char * envvar;
1591    std::string pci_envvar_name;
1592    pci_addr = rte_eth_devices[m_port_id].pci_dev->addr;
1593    snprintf(pci, sizeof(pci), "%04x:%02x:%02x.%d", pci_addr.domain, pci_addr.bus, pci_addr.devid, pci_addr.function);
1594    intf_info_st.pci_addr = pci;
1595    pci_envvar_name = "pci" + intf_info_st.pci_addr;
1596    std::replace(pci_envvar_name.begin(), pci_envvar_name.end(), ':', '_');
1597    std::replace(pci_envvar_name.begin(), pci_envvar_name.end(), '.', '_');
1598    envvar = std::getenv(pci_envvar_name.c_str());
1599    if (envvar) {
1600        intf_info_st.description = envvar;
1601    } else {
1602        intf_info_st.description = "Unknown";
1603    }
1604    if (intf_info_st.description.find("82599ES") != std::string::npos) { // works for 82599EB etc. DPDK does not distinguish them
1605        flag_is_link_change_supported = false;
1606    }
1607    if (intf_info_st.description.find("82545EM") != std::string::npos) { // in virtual E1000, DPDK claims fc is supported, but it's not
1608        flag_is_fc_change_supported = false;
1609        flag_is_led_change_supported = false;
1610    }
1611    if ( CGlobalInfo::m_options.preview.getVMode() > 0){
1612        printf("port %d desc: %s\n", m_port_id, intf_info_st.description.c_str());
1613    }
1614}
1615
1616int DpdkTRexPortAttr::set_led(bool on){
1617    if (on) {
1618        return rte_eth_led_on(m_port_id);
1619    }else{
1620        return rte_eth_led_off(m_port_id);
1621    }
1622}
1623
1624int DpdkTRexPortAttr::get_flow_ctrl(int &mode) {
1625    int ret = rte_eth_dev_flow_ctrl_get(m_port_id, &fc_conf_tmp);
1626    if (ret) {
1627        mode = -1;
1628        return ret;
1629    }
1630    mode = (int) fc_conf_tmp.mode;
1631    return 0;
1632}
1633
1634int DpdkTRexPortAttr::set_flow_ctrl(int mode) {
1635    if (!flag_is_fc_change_supported) {
1636        return -ENOTSUP;
1637    }
1638    int ret = rte_eth_dev_flow_ctrl_get(m_port_id, &fc_conf_tmp);
1639    if (ret) {
1640        return ret;
1641    }
1642    fc_conf_tmp.mode = (enum rte_eth_fc_mode) mode;
1643    return rte_eth_dev_flow_ctrl_set(m_port_id, &fc_conf_tmp);
1644}
1645
1646void DpdkTRexPortAttr::reset_xstats() {
1647    rte_eth_xstats_reset(m_port_id);
1648}
1649
1650int DpdkTRexPortAttr::get_xstats_values(xstats_values_t &xstats_values) {
1651    int size = rte_eth_xstats_get(m_port_id, NULL, 0);
1652    if (size < 0) {
1653        return size;
1654    }
1655    xstats_values_tmp.resize(size);
1656    xstats_values.resize(size);
1657    size = rte_eth_xstats_get(m_port_id, xstats_values_tmp.data(), size);
1658    if (size < 0) {
1659        return size;
1660    }
1661    for (int i=0; i<size; i++) {
1662        xstats_values[xstats_values_tmp[i].id] = xstats_values_tmp[i].value;
1663    }
1664    return 0;
1665}
1666
1667int DpdkTRexPortAttr::get_xstats_names(xstats_names_t &xstats_names){
1668    int size = rte_eth_xstats_get_names(m_port_id, NULL, 0);
1669    if (size < 0) {
1670        return size;
1671    }
1672    xstats_names_tmp.resize(size);
1673    xstats_names.resize(size);
1674    size = rte_eth_xstats_get_names(m_port_id, xstats_names_tmp.data(), size);
1675    if (size < 0) {
1676        return size;
1677    }
1678    for (int i=0; i<size; i++) {
1679        xstats_names[i] = xstats_names_tmp[i].name;
1680    }
1681    return 0;
1682}
1683
1684void DpdkTRexPortAttr::dump_link(FILE *fd){
1685    fprintf(fd,"port : %d \n",(int)m_port_id);
1686    fprintf(fd,"------------\n");
1687
1688    fprintf(fd,"link         : ");
1689    if (m_link.link_status) {
1690        fprintf(fd," link : Link Up - speed %u Mbps - %s\n",
1691                (unsigned) m_link.link_speed,
1692                (m_link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1693                ("full-duplex") : ("half-duplex\n"));
1694    } else {
1695        fprintf(fd," Link Down\n");
1696    }
1697    fprintf(fd,"promiscuous  : %d \n",get_promiscuous());
1698}
1699
1700void DpdkTRexPortAttr::update_device_info(){
1701    rte_eth_dev_info_get(m_port_id, &dev_info);
1702}
1703
1704void DpdkTRexPortAttr::get_supported_speeds(supp_speeds_t &supp_speeds){
1705    uint32_t speed_capa = dev_info.speed_capa;
1706    if (speed_capa & ETH_LINK_SPEED_1G)
1707        supp_speeds.push_back(ETH_SPEED_NUM_1G);
1708    if (speed_capa & ETH_LINK_SPEED_10G)
1709        supp_speeds.push_back(ETH_SPEED_NUM_10G);
1710    if (speed_capa & ETH_LINK_SPEED_40G)
1711        supp_speeds.push_back(ETH_SPEED_NUM_40G);
1712    if (speed_capa & ETH_LINK_SPEED_100G)
1713        supp_speeds.push_back(ETH_SPEED_NUM_100G);
1714}
1715
1716void DpdkTRexPortAttr::update_link_status(){
1717    rte_eth_link_get(m_port_id, &m_link);
1718}
1719
1720bool DpdkTRexPortAttr::update_link_status_nowait(){
1721    rte_eth_link new_link;
1722    bool changed = false;
1723    rte_eth_link_get_nowait(m_port_id, &new_link);
1724
1725    if (new_link.link_speed != m_link.link_speed ||
1726                new_link.link_duplex != m_link.link_duplex ||
1727                    new_link.link_autoneg != m_link.link_autoneg ||
1728                        new_link.link_status != m_link.link_status) {
1729        changed = true;
1730
1731        /* in case of link status change - notify the dest object */
1732        if (new_link.link_status != m_link.link_status) {
1733            get_dest().on_link_down();
1734        }
1735    }
1736
1737    m_link = new_link;
1738    return changed;
1739}
1740
1741int DpdkTRexPortAttr::add_mac(char * mac){
1742    struct ether_addr mac_addr;
1743    for (int i=0; i<6;i++) {
1744        mac_addr.addr_bytes[i] =mac[i];
1745    }
1746    return rte_eth_dev_mac_addr_add(m_port_id, &mac_addr,0);
1747}
1748
1749int DpdkTRexPortAttr::set_promiscuous(bool enable){
1750    if (enable) {
1751        rte_eth_promiscuous_enable(m_port_id);
1752    }else{
1753        rte_eth_promiscuous_disable(m_port_id);
1754    }
1755    return 0;
1756}
1757
1758int DpdkTRexPortAttr::set_link_up(bool up){
1759    if (up) {
1760        return rte_eth_dev_set_link_up(m_port_id);
1761    }else{
1762        return rte_eth_dev_set_link_down(m_port_id);
1763    }
1764}
1765
1766bool DpdkTRexPortAttr::get_promiscuous(){
1767    int ret=rte_eth_promiscuous_get(m_port_id);
1768    if (ret<0) {
1769        rte_exit(EXIT_FAILURE, "rte_eth_promiscuous_get: "
1770                 "err=%d, port=%u\n",
1771                 ret, m_port_id);
1772
1773    }
1774    return ( ret?true:false);
1775}
1776
1777
1778void DpdkTRexPortAttr::get_hw_src_mac(struct ether_addr *mac_addr){
1779    rte_eth_macaddr_get(m_port_id , mac_addr);
1780}
1781
1782int CPhyEthIF::dump_fdir_global_stats(FILE *fd) {
1783    return get_ex_drv()->dump_fdir_global_stats(this, fd);
1784}
1785
1786void dump_hw_state(FILE *fd,struct ixgbe_hw_stats *hs ){
1787
1788#define DP_A1(f) if (hs->f) fprintf(fd," %-40s : %llu \n",#f, (unsigned long long)hs->f)
1789#define DP_A2(f,m) for (i=0;i<m; i++) { if (hs->f[i]) fprintf(fd," %-40s[%d] : %llu \n",#f,i, (unsigned long long)hs->f[i]); }
1790    int i;
1791
1792    //for (i=0;i<8; i++) { if (hs->mpc[i]) fprintf(fd," %-40s[%d] : %llu \n","mpc",i,hs->mpc[i]); }
1793    DP_A2(mpc,8);
1794    DP_A1(crcerrs);
1795    DP_A1(illerrc);
1796    //DP_A1(errbc);
1797    DP_A1(mspdc);
1798    DP_A1(mpctotal);
1799    DP_A1(mlfc);
1800    DP_A1(mrfc);
1801    DP_A1(rlec);
1802    //DP_A1(lxontxc);
1803    //DP_A1(lxonrxc);
1804    //DP_A1(lxofftxc);
1805    //DP_A1(lxoffrxc);
1806    //DP_A2(pxontxc,8);
1807    //DP_A2(pxonrxc,8);
1808    //DP_A2(pxofftxc,8);
1809    //DP_A2(pxoffrxc,8);
1810
1811    //DP_A1(prc64);
1812    //DP_A1(prc127);
1813    //DP_A1(prc255);
1814    // DP_A1(prc511);
1815    //DP_A1(prc1023);
1816    //DP_A1(prc1522);
1817
1818    DP_A1(gprc);
1819    DP_A1(bprc);
1820    DP_A1(mprc);
1821    DP_A1(gptc);
1822    DP_A1(gorc);
1823    DP_A1(gotc);
1824    DP_A2(rnbc,8);
1825    DP_A1(ruc);
1826    DP_A1(rfc);
1827    DP_A1(roc);
1828    DP_A1(rjc);
1829    DP_A1(mngprc);
1830    DP_A1(mngpdc);
1831    DP_A1(mngptc);
1832    DP_A1(tor);
1833    DP_A1(tpr);
1834    DP_A1(tpt);
1835    DP_A1(ptc64);
1836    DP_A1(ptc127);
1837    DP_A1(ptc255);
1838    DP_A1(ptc511);
1839    DP_A1(ptc1023);
1840    DP_A1(ptc1522);
1841    DP_A1(mptc);
1842    DP_A1(bptc);
1843    DP_A1(xec);
1844    DP_A2(qprc,16);
1845    DP_A2(qptc,16);
1846    DP_A2(qbrc,16);
1847    DP_A2(qbtc,16);
1848    DP_A2(qprdc,16);
1849    DP_A2(pxon2offc,8);
1850    DP_A1(fdirustat_add);
1851    DP_A1(fdirustat_remove);
1852    DP_A1(fdirfstat_fadd);
1853    DP_A1(fdirfstat_fremove);
1854    DP_A1(fdirmatch);
1855    DP_A1(fdirmiss);
1856    DP_A1(fccrc);
1857    DP_A1(fclast);
1858    DP_A1(fcoerpdc);
1859    DP_A1(fcoeprc);
1860    DP_A1(fcoeptc);
1861    DP_A1(fcoedwrc);
1862    DP_A1(fcoedwtc);
1863    DP_A1(fcoe_noddp);
1864    DP_A1(fcoe_noddp_ext_buff);
1865    DP_A1(ldpcec);
1866    DP_A1(pcrc8ec);
1867    DP_A1(b2ospc);
1868    DP_A1(b2ogprc);
1869    DP_A1(o2bgptc);
1870    DP_A1(o2bspc);
1871}
1872
1873void CPhyEthIF::set_ignore_stats_base(CPreTestStats &pre_stats) {
1874    // reading m_stats, so drivers saving prev in m_stats will be updated.
1875    // Actually, we want m_stats to be cleared
1876    get_ex_drv()->get_extended_stats(this, &m_stats);
1877
1878    m_ignore_stats.ipackets = m_stats.ipackets;
1879    m_ignore_stats.ibytes = m_stats.ibytes;
1880    m_ignore_stats.opackets = m_stats.opackets;
1881    m_ignore_stats.obytes = m_stats.obytes;
1882    m_stats.ipackets = 0;
1883    m_stats.opackets = 0;
1884    m_stats.ibytes = 0;
1885    m_stats.obytes = 0;
1886
1887    m_ignore_stats.m_tx_arp = pre_stats.m_tx_arp;
1888    m_ignore_stats.m_rx_arp = pre_stats.m_rx_arp;
1889
1890    if (CGlobalInfo::m_options.preview.getVMode() >= 3) {
1891        fprintf(stdout, "Pre test statistics for port %d\n", get_port_id());
1892        m_ignore_stats.dump(stdout);
1893    }
1894}
1895
1896void CPhyEthIF::dump_stats(FILE *fd){
1897
1898    update_counters();
1899
1900    fprintf(fd,"port : %d \n",(int)m_port_id);
1901    fprintf(fd,"------------\n");
1902    m_stats.DumpAll(fd);
1903    //m_stats.Dump(fd);
1904    printf (" Tx : %.1fMb/sec  \n",m_last_tx_rate);
1905    //printf (" Rx : %.1fMb/sec  \n",m_last_rx_rate);
1906}
1907
1908void CPhyEthIF::stats_clear(){
1909    rte_eth_stats_reset(m_port_id);
1910    m_stats.Clear();
1911}
1912
1913class CCorePerPort  {
1914public:
1915    CCorePerPort (){
1916        m_tx_queue_id=0;
1917        m_len=0;
1918        int i;
1919        for (i=0; i<MAX_PKT_BURST; i++) {
1920            m_table[i]=0;
1921        }
1922        m_port=0;
1923    }
1924    uint8_t                 m_tx_queue_id;
1925    uint8_t                 m_tx_queue_id_lat; // q id for tx of latency pkts
1926    uint16_t                m_len;
1927    rte_mbuf_t *            m_table[MAX_PKT_BURST];
1928    CPhyEthIF  *            m_port;
1929};
1930
1931
1932#define MAX_MBUF_CACHE 100
1933
1934
1935/* per core/gbe queue port for trasmitt */
1936class CCoreEthIF : public CVirtualIF {
1937public:
1938    enum {
1939     INVALID_Q_ID = 255
1940    };
1941
1942public:
1943
1944    CCoreEthIF(){
1945        m_mbuf_cache=0;
1946    }
1947
1948    bool Create(uint8_t             core_id,
1949                uint8_t            tx_client_queue_id,
1950                CPhyEthIF  *        tx_client_port,
1951                uint8_t            tx_server_queue_id,
1952                CPhyEthIF  *        tx_server_port,
1953                uint8_t             tx_q_id_lat);
1954    void Delete();
1955
1956    virtual int open_file(std::string file_name){
1957        return (0);
1958    }
1959
1960    virtual int close_file(void){
1961        return (flush_tx_queue());
1962    }
1963    __attribute__ ((noinline)) int send_node_flow_stat(rte_mbuf *m, CGenNodeStateless * node_sl
1964                                                       , CCorePerPort *  lp_port
1965                                                       , CVirtualIFPerSideStats  * lp_stats, bool is_const);
1966    virtual int send_node(CGenNode * node);
1967    virtual void send_one_pkt(pkt_dir_t dir, rte_mbuf_t *m);
1968    virtual int flush_tx_queue(void);
1969    __attribute__ ((noinline)) void handle_slowpath_features(CGenNode *node, rte_mbuf_t *m, uint8_t *p, pkt_dir_t dir);
1970
1971    void apply_client_cfg(const ClientCfgBase *cfg, rte_mbuf_t *m, pkt_dir_t dir, uint8_t *p);
1972
1973    bool process_rx_pkt(pkt_dir_t   dir,rte_mbuf_t * m);
1974
1975    virtual int update_mac_addr_from_global_cfg(pkt_dir_t       dir, uint8_t * p);
1976
1977    virtual pkt_dir_t port_id_to_dir(uint8_t port_id);
1978    void GetCoreCounters(CVirtualIFPerSideStats *stats);
1979    void DumpCoreStats(FILE *fd);
1980    void DumpIfStats(FILE *fd);
1981    static void DumpIfCfgHeader(FILE *fd);
1982    void DumpIfCfg(FILE *fd);
1983
1984    socket_id_t get_socket_id(){
1985        return ( CGlobalInfo::m_socket.port_to_socket( m_ports[0].m_port->get_port_id() ) );
1986    }
1987
1988    const CCorePerPort * get_ports() {
1989        return m_ports;
1990    }
1991
1992protected:
1993
1994    int send_burst(CCorePerPort * lp_port,
1995                   uint16_t len,
1996                   CVirtualIFPerSideStats  * lp_stats);
1997    int send_pkt(CCorePerPort * lp_port,
1998                 rte_mbuf_t *m,
1999                 CVirtualIFPerSideStats  * lp_stats);
2000    int send_pkt_lat(CCorePerPort * lp_port,
2001                 rte_mbuf_t *m,
2002                 CVirtualIFPerSideStats  * lp_stats);
2003
2004    void add_vlan(rte_mbuf_t *m, uint16_t vlan_id);
2005
2006protected:
2007    uint8_t      m_core_id;
2008    uint16_t     m_mbuf_cache;
2009    CCorePerPort m_ports[CS_NUM]; /* each core has 2 tx queues 1. client side and server side */
2010    CNodeRing *  m_ring_to_rx;
2011
2012} __rte_cache_aligned; ;
2013
2014class CCoreEthIFStateless : public CCoreEthIF {
2015public:
2016    virtual int send_node_flow_stat(rte_mbuf *m, CGenNodeStateless * node_sl, CCorePerPort *  lp_port
2017                                    , CVirtualIFPerSideStats  * lp_stats, bool is_const);
2018    virtual int send_node(CGenNode * node);
2019protected:
2020    int handle_slow_path_node(CGenNode *node);
2021    int send_pcap_node(CGenNodePCAP *pcap_node);
2022};
2023
2024bool CCoreEthIF::Create(uint8_t             core_id,
2025                        uint8_t             tx_client_queue_id,
2026                        CPhyEthIF  *        tx_client_port,
2027                        uint8_t             tx_server_queue_id,
2028                        CPhyEthIF  *        tx_server_port,
2029                        uint8_t tx_q_id_lat ) {
2030    m_ports[CLIENT_SIDE].m_tx_queue_id = tx_client_queue_id;
2031    m_ports[CLIENT_SIDE].m_port        = tx_client_port;
2032    m_ports[CLIENT_SIDE].m_tx_queue_id_lat = tx_q_id_lat;
2033    m_ports[SERVER_SIDE].m_tx_queue_id = tx_server_queue_id;
2034    m_ports[SERVER_SIDE].m_port        = tx_server_port;
2035    m_ports[SERVER_SIDE].m_tx_queue_id_lat = tx_q_id_lat;
2036    m_core_id = core_id;
2037
2038    CMessagingManager * rx_dp=CMsgIns::Ins()->getRxDp();
2039    m_ring_to_rx = rx_dp->getRingDpToCp(core_id-1);
2040    assert( m_ring_to_rx);
2041    return (true);
2042}
2043
2044int CCoreEthIF::flush_tx_queue(void){
2045    /* flush both sides */
2046    pkt_dir_t dir;
2047    for (dir = CLIENT_SIDE; dir < CS_NUM; dir++) {
2048        CCorePerPort * lp_port = &m_ports[dir];
2049        CVirtualIFPerSideStats  * lp_stats = &m_stats[dir];
2050        if ( likely(lp_port->m_len > 0) ) {
2051            send_burst(lp_port, lp_port->m_len, lp_stats);
2052            lp_port->m_len = 0;
2053        }
2054    }
2055
2056    return 0;
2057}
2058
2059void CCoreEthIF::GetCoreCounters(CVirtualIFPerSideStats *stats){
2060    stats->Clear();
2061    pkt_dir_t   dir ;
2062    for (dir=CLIENT_SIDE; dir<CS_NUM; dir++) {
2063        stats->Add(&m_stats[dir]);
2064    }
2065}
2066
2067void CCoreEthIF::DumpCoreStats(FILE *fd){
2068    fprintf (fd,"------------------------ \n");
2069    fprintf (fd," per core stats core id : %d  \n",m_core_id);
2070    fprintf (fd,"------------------------ \n");
2071
2072    CVirtualIFPerSideStats stats;
2073    GetCoreCounters(&stats);
2074    stats.Dump(stdout);
2075}
2076
2077void CCoreEthIF::DumpIfCfgHeader(FILE *fd){
2078    fprintf (fd," core, c-port, c-queue, s-port, s-queue, lat-queue\n");
2079    fprintf (fd," ------------------------------------------\n");
2080}
2081
2082void CCoreEthIF::DumpIfCfg(FILE *fd){
2083    fprintf (fd," %d   %6u %6u  %6u  %6u %6u  \n",m_core_id,
2084             m_ports[CLIENT_SIDE].m_port->get_port_id(),
2085             m_ports[CLIENT_SIDE].m_tx_queue_id,
2086             m_ports[SERVER_SIDE].m_port->get_port_id(),
2087             m_ports[SERVER_SIDE].m_tx_queue_id,
2088             m_ports[SERVER_SIDE].m_tx_queue_id_lat
2089             );
2090}
2091
2092
2093void CCoreEthIF::DumpIfStats(FILE *fd){
2094
2095    fprintf (fd,"------------------------ \n");
2096    fprintf (fd," per core per if stats id : %d  \n",m_core_id);
2097    fprintf (fd,"------------------------ \n");
2098
2099    const char * t[]={"client","server"};
2100    pkt_dir_t   dir ;
2101    for (dir=CLIENT_SIDE; dir<CS_NUM; dir++) {
2102        CCorePerPort * lp=&m_ports[dir];
2103        CVirtualIFPerSideStats * lpstats = &m_stats[dir];
2104        fprintf (fd," port %d, queue id :%d  - %s \n",lp->m_port->get_port_id(),lp->m_tx_queue_id,t[dir] );
2105        fprintf (fd," ---------------------------- \n");
2106        lpstats->Dump(fd);
2107    }
2108}
2109
2110#define DELAY_IF_NEEDED
2111
2112int CCoreEthIF::send_burst(CCorePerPort * lp_port,
2113                           uint16_t len,
2114                           CVirtualIFPerSideStats  * lp_stats){
2115
2116    uint16_t ret = lp_port->m_port->tx_burst(lp_port->m_tx_queue_id,lp_port->m_table,len);
2117#ifdef DELAY_IF_NEEDED
2118    while ( unlikely( ret<len ) ){
2119        rte_delay_us(1);
2120        lp_stats->m_tx_queue_full += 1;
2121        uint16_t ret1=lp_port->m_port->tx_burst(lp_port->m_tx_queue_id,
2122                                                &lp_port->m_table[ret],
2123                                                len-ret);
2124        ret+=ret1;
2125    }
2126#else
2127    /* CPU has burst of packets larger than TX can send. Need to drop packets */
2128    if ( unlikely(ret < len) ) {
2129        lp_stats->m_tx_drop += (len-ret);
2130        uint16_t i;
2131        for (i=ret; i<len;i++) {
2132            rte_mbuf_t * m=lp_port->m_table[i];
2133            rte_pktmbuf_free(m);
2134        }
2135    }
2136#endif
2137
2138    return (0);
2139}
2140
2141
2142int CCoreEthIF::send_pkt(CCorePerPort * lp_port,
2143                         rte_mbuf_t      *m,
2144                         CVirtualIFPerSideStats  * lp_stats
2145                         ){
2146
2147    uint16_t len = lp_port->m_len;
2148    lp_port->m_table[len]=m;
2149    len++;
2150    /* enough pkts to be sent */
2151    if (unlikely(len == MAX_PKT_BURST)) {
2152        send_burst(lp_port, MAX_PKT_BURST,lp_stats);
2153        len = 0;
2154    }
2155    lp_port->m_len = len;
2156
2157    return (0);
2158}
2159
2160int CCoreEthIF::send_pkt_lat(CCorePerPort *lp_port, rte_mbuf_t *m, CVirtualIFPerSideStats *lp_stats) {
2161    // We allow sending only from first core of each port. This is serious internal bug otherwise.
2162    assert(lp_port->m_tx_queue_id_lat != INVALID_Q_ID);
2163
2164    int ret = lp_port->m_port->tx_burst(lp_port->m_tx_queue_id_lat, &m, 1);
2165
2166#ifdef DELAY_IF_NEEDED
2167    while ( unlikely( ret != 1 ) ){
2168        rte_delay_us(1);
2169        lp_stats->m_tx_queue_full += 1;
2170        ret = lp_port->m_port->tx_burst(lp_port->m_tx_queue_id_lat, &m, 1);
2171    }
2172
2173#else
2174    if ( unlikely( ret != 1 ) ) {
2175        lp_stats->m_tx_drop ++;
2176        rte_pktmbuf_free(m);
2177        return 0;
2178    }
2179
2180#endif
2181
2182    return ret;
2183}
2184
2185void CCoreEthIF::send_one_pkt(pkt_dir_t       dir,
2186                              rte_mbuf_t      *m){
2187    CCorePerPort *  lp_port=&m_ports[dir];
2188    CVirtualIFPerSideStats  * lp_stats = &m_stats[dir];
2189    send_pkt(lp_port,m,lp_stats);
2190    /* flush */
2191    send_burst(lp_port,lp_port->m_len,lp_stats);
2192    lp_port->m_len = 0;
2193}
2194
2195int CCoreEthIFStateless::send_node_flow_stat(rte_mbuf *m, CGenNodeStateless * node_sl, CCorePerPort *  lp_port
2196                                             , CVirtualIFPerSideStats  * lp_stats, bool is_const) {
2197    // Defining this makes 10% percent packet loss. 1% packet reorder.
2198# ifdef ERR_CNTRS_TEST
2199    static int temp=1;
2200    temp++;
2201#endif
2202
2203    uint16_t hw_id = node_sl->get_stat_hw_id();
2204    rte_mbuf *mi;
2205    struct flow_stat_payload_header *fsp_head = NULL;
2206
2207    if (hw_id >= MAX_FLOW_STATS) {
2208        // payload rule hw_ids are in the range right above ip id rules
2209        uint16_t hw_id_payload = hw_id - MAX_FLOW_STATS;
2210        if (hw_id_payload > max_stat_hw_id_seen_payload) {
2211            max_stat_hw_id_seen_payload = hw_id_payload;
2212        }
2213
2214        mi = node_sl->alloc_flow_stat_mbuf(m, fsp_head, is_const);
2215        fsp_head->seq = lp_stats->m_lat_data[hw_id_payload].get_seq_num();
2216        fsp_head->hw_id = hw_id_payload;
2217        fsp_head->flow_seq = lp_stats->m_lat_data[hw_id_payload].get_flow_seq();
2218        fsp_head->magic = FLOW_STAT_PAYLOAD_MAGIC;
2219
2220        lp_stats->m_lat_data[hw_id_payload].inc_seq_num();
2221#ifdef ERR_CNTRS_TEST
2222        if (temp % 10 == 0) {
2223            fsp_head->seq = lp_stats->m_lat_data[hw_id_payload].inc_seq_num();
2224        }
2225        if ((temp - 1) % 100 == 0) {
2226            fsp_head->seq = lp_stats->m_lat_data[hw_id_payload].get_seq_num() - 4;
2227        }
2228#endif
2229    } else {
2230        // ip id rule
2231        if (hw_id > max_stat_hw_id_seen) {
2232            max_stat_hw_id_seen = hw_id;
2233        }
2234        mi = m;
2235    }
2236    tx_per_flow_t *lp_s = &lp_stats->m_tx_per_flow[hw_id];
2237    lp_s->add_pkts(1);
2238    lp_s->add_bytes(mi->pkt_len + 4); // We add 4 because of ethernet CRC
2239
2240    if (hw_id >= MAX_FLOW_STATS) {
2241        fsp_head->time_stamp = os_get_hr_tick_64();
2242        send_pkt_lat(lp_port, mi, lp_stats);
2243    } else {
2244        send_pkt(lp_port, mi, lp_stats);
2245    }
2246    return 0;
2247}
2248
2249int CCoreEthIFStateless::send_node(CGenNode * no) {
2250    /* if a node is marked as slow path - single IF to redirect it to slow path */
2251    if (no->get_is_slow_path()) {
2252        return handle_slow_path_node(no);
2253    }
2254
2255    CGenNodeStateless * node_sl=(CGenNodeStateless *) no;
2256
2257    /* check that we have mbuf  */
2258    rte_mbuf_t *    m;
2259
2260    pkt_dir_t dir=(pkt_dir_t)node_sl->get_mbuf_cache_dir();
2261    CCorePerPort *  lp_port=&m_ports[dir];
2262    CVirtualIFPerSideStats  * lp_stats = &m_stats[dir];
2263    if ( likely(node_sl->is_cache_mbuf_array()) ) {
2264        m=node_sl->cache_mbuf_array_get_cur();
2265        rte_pktmbuf_refcnt_update(m,1);
2266    }else{
2267        m=node_sl->get_cache_mbuf();
2268
2269        if (m) {
2270            /* cache case */
2271            rte_pktmbuf_refcnt_update(m,1);
2272        }else{
2273            m=node_sl->alloc_node_with_vm();
2274            assert(m);
2275        }
2276    }
2277
2278    if (unlikely(node_sl->is_stat_needed())) {
2279        if ( unlikely(node_sl->is_cache_mbuf_array()) ) {
2280            // No support for latency + cache. If user asks for cache on latency stream, we change cache to 0.
2281            // assert here just to make sure.
2282            assert(1);
2283        }
2284        return send_node_flow_stat(m, node_sl, lp_port, lp_stats, (node_sl->get_cache_mbuf()) ? true : false);
2285    } else {
2286        send_pkt(lp_port,m,lp_stats);
2287    }
2288
2289    return (0);
2290};
2291
2292int CCoreEthIFStateless::send_pcap_node(CGenNodePCAP *pcap_node) {
2293    rte_mbuf_t *m = pcap_node->get_pkt();
2294    if (!m) {
2295        return (-1);
2296    }
2297
2298    pkt_dir_t dir = (pkt_dir_t)pcap_node->get_mbuf_dir();
2299    CCorePerPort *lp_port=&m_ports[dir];
2300    CVirtualIFPerSideStats *lp_stats = &m_stats[dir];
2301
2302    send_pkt(lp_port, m, lp_stats);
2303
2304    return (0);
2305}
2306
2307/**
2308 * slow path code goes here
2309 *
2310 */
2311int CCoreEthIFStateless::handle_slow_path_node(CGenNode * no) {
2312
2313    if (no->m_type == CGenNode::PCAP_PKT) {
2314        return send_pcap_node((CGenNodePCAP *)no);
2315    }
2316
2317    return (-1);
2318}
2319
2320void CCoreEthIF::apply_client_cfg(const ClientCfgBase *cfg, rte_mbuf_t *m, pkt_dir_t dir, uint8_t *p) {
2321
2322    assert(cfg);
2323
2324    /* take the right direction config */
2325    const ClientCfgDirBase &cfg_dir = ( (dir == CLIENT_SIDE) ? cfg->m_initiator : cfg->m_responder);
2326
2327    /* dst mac */
2328    if (cfg_dir.has_dst_mac_addr()) {
2329        memcpy(p, cfg_dir.get_dst_mac_addr(), 6);
2330    }
2331
2332    /* src mac */
2333    if (cfg_dir.has_src_mac_addr()) {
2334        memcpy(p + 6, cfg_dir.get_src_mac_addr(), 6);
2335    }
2336
2337    /* VLAN */
2338    if (cfg_dir.has_vlan()) {
2339        add_vlan(m, cfg_dir.get_vlan());
2340    }
2341}
2342
2343
2344void CCoreEthIF::add_vlan(rte_mbuf_t *m, uint16_t vlan_id) {
2345    m->ol_flags = PKT_TX_VLAN_PKT;
2346    m->l2_len   = 14;
2347    m->vlan_tci = vlan_id;
2348}
2349
2350/**
2351 * slow path features goes here (avoid multiple IFs)
2352 *
2353 */
2354void CCoreEthIF::handle_slowpath_features(CGenNode *node, rte_mbuf_t *m, uint8_t *p, pkt_dir_t dir) {
2355
2356
2357    /* MAC ovverride */
2358    if ( unlikely( CGlobalInfo::m_options.preview.get_mac_ip_overide_enable() ) ) {
2359        /* client side */
2360        if ( node->is_initiator_pkt() ) {
2361            *((uint32_t*)(p+6)) = PKT_NTOHL(node->m_src_ip);
2362        }
2363    }
2364
2365    /* flag is faster than checking the node pointer (another cacheline) */
2366    if ( unlikely(CGlobalInfo::m_options.preview.get_is_client_cfg_enable() ) ) {
2367        apply_client_cfg(node->m_client_cfg, m, dir, p);
2368    }
2369
2370}
2371
2372int CCoreEthIF::send_node(CGenNode * node) {
2373
2374    if ( unlikely( node->get_cache_mbuf() !=NULL ) ) {
2375        pkt_dir_t       dir;
2376        rte_mbuf_t *    m=node->get_cache_mbuf();
2377        dir=(pkt_dir_t)node->get_mbuf_cache_dir();
2378        CCorePerPort *  lp_port=&m_ports[dir];
2379        CVirtualIFPerSideStats  * lp_stats = &m_stats[dir];
2380        rte_pktmbuf_refcnt_update(m,1);
2381        send_pkt(lp_port,m,lp_stats);
2382        return (0);
2383    }
2384
2385
2386    CFlowPktInfo *  lp=node->m_pkt_info;
2387    rte_mbuf_t *    m=lp->generate_new_mbuf(node);
2388
2389    pkt_dir_t       dir;
2390    bool            single_port;
2391
2392    dir         = node->cur_interface_dir();
2393    single_port = node->get_is_all_flow_from_same_dir() ;
2394
2395
2396    if ( unlikely( CGlobalInfo::m_options.preview.get_vlan_mode_enable() ) ){
2397        /* which vlan to choose 0 or 1*/
2398        uint8_t vlan_port = (node->m_src_ip &1);
2399        uint16_t vlan_id  = CGlobalInfo::m_options.m_vlan_port[vlan_port];
2400
2401        if (likely( vlan_id >0 ) ) {
2402            dir = dir ^ vlan_port;
2403        }else{
2404            /* both from the same dir but with VLAN0 */
2405            vlan_id = CGlobalInfo::m_options.m_vlan_port[0];
2406            dir = dir ^ 0;
2407        }
2408
2409        add_vlan(m, vlan_id);
2410    }
2411
2412    CCorePerPort *lp_port = &m_ports[dir];
2413    CVirtualIFPerSideStats *lp_stats = &m_stats[dir];
2414
2415    if (unlikely(m==0)) {
2416        lp_stats->m_tx_alloc_error++;
2417        return(0);
2418    }
2419
2420    /* update mac addr dest/src 12 bytes */
2421    uint8_t *p   = rte_pktmbuf_mtod(m, uint8_t*);
2422    uint8_t p_id = lp_port->m_port->get_port_id();
2423
2424    memcpy(p,CGlobalInfo::m_options.get_dst_src_mac_addr(p_id),12);
2425
2426     /* when slowpath features are on */
2427    if ( unlikely( CGlobalInfo::m_options.preview.get_is_slowpath_features_on() ) ) {
2428        handle_slowpath_features(node, m, p, dir);
2429    }
2430
2431
2432    if ( unlikely( node->is_rx_check_enabled() ) ) {
2433        lp_stats->m_tx_rx_check_pkt++;
2434        lp->do_generate_new_mbuf_rxcheck(m, node, single_port);
2435        lp_stats->m_template.inc_template( node->get_template_id( ));
2436    }else{
2437        // cache only if it is not sample as this is more complex mbuf struct
2438        if ( unlikely( node->can_cache_mbuf() ) ) {
2439            if ( !CGlobalInfo::m_options.preview.isMbufCacheDisabled() ){
2440                m_mbuf_cache++;
2441                if (m_mbuf_cache < MAX_MBUF_CACHE) {
2442                    /* limit the number of object to cache */
2443                    node->set_mbuf_cache_dir( dir);
2444                    node->set_cache_mbuf(m);
2445                    rte_pktmbuf_refcnt_update(m,1);
2446                }
2447            }
2448        }
2449    }
2450
2451    /*printf("send packet -- \n");
2452      rte_pktmbuf_dump(stdout,m, rte_pktmbuf_pkt_len(m));*/
2453
2454    /* send the packet */
2455    send_pkt(lp_port,m,lp_stats);
2456    return (0);
2457}
2458
2459
2460int CCoreEthIF::update_mac_addr_from_global_cfg(pkt_dir_t  dir, uint8_t * p){
2461    assert(p);
2462    assert(dir<2);
2463
2464    CCorePerPort *  lp_port=&m_ports[dir];
2465    uint8_t p_id=lp_port->m_port->get_port_id();
2466    memcpy(p,CGlobalInfo::m_options.get_dst_src_mac_addr(p_id),12);
2467    return (0);
2468}
2469
2470pkt_dir_t
2471CCoreEthIF::port_id_to_dir(uint8_t port_id) {
2472
2473    for (pkt_dir_t dir = 0; dir < CS_NUM; dir++) {
2474        if (m_ports[dir].m_port->get_port_id() == port_id) {
2475            return dir;
2476        }
2477    }
2478
2479    return (CS_INVALID);
2480}
2481
2482class CLatencyHWPort : public CPortLatencyHWBase {
2483public:
2484    void Create(CPhyEthIF  * p,
2485                uint8_t tx_queue,
2486                uint8_t rx_queue){
2487        m_port=p;
2488        m_tx_queue_id=tx_queue;
2489        m_rx_queue_id=rx_queue;
2490    }
2491
2492    virtual int tx(rte_mbuf_t * m){
2493        rte_mbuf_t * tx_pkts[2];
2494        tx_pkts[0]=m;
2495        if ( likely( CGlobalInfo::m_options.preview.get_vlan_mode_enable() ) ){
2496            /* vlan mode is the default */
2497            /* set the vlan */
2498            m->ol_flags = PKT_TX_VLAN_PKT;
2499            m->vlan_tci =CGlobalInfo::m_options.m_vlan_port[0];
2500            m->l2_len   =14;
2501        }
2502        uint16_t res=m_port->tx_burst(m_tx_queue_id,tx_pkts,1);
2503        if ( res == 0 ) {
2504            rte_pktmbuf_free(m);
2505            //printf(" queue is full for latency packet !!\n");
2506            return (-1);
2507
2508        }
2509#if 0
2510        fprintf(stdout," ==> %f.03 send packet ..\n",now_sec());
2511        uint8_t *p1=rte_pktmbuf_mtod(m, uint8_t*);
2512        uint16_t pkt_size1=rte_pktmbuf_pkt_len(m);
2513        utl_DumpBuffer(stdout,p1,pkt_size1,0);
2514#endif
2515
2516        return (0);
2517    }
2518    virtual rte_mbuf_t * rx(){
2519        rte_mbuf_t * rx_pkts[1];
2520        uint16_t cnt=m_port->rx_burst(m_rx_queue_id,rx_pkts,1);
2521        if (cnt) {
2522            return (rx_pkts[0]);
2523        }else{
2524            return (0);
2525        }
2526    }
2527
2528    virtual uint16_t rx_burst(struct rte_mbuf **rx_pkts,
2529                              uint16_t nb_pkts){
2530        uint16_t cnt=m_port->rx_burst(m_rx_queue_id,rx_pkts,nb_pkts);
2531        return (cnt);
2532    }
2533
2534
2535private:
2536    CPhyEthIF  * m_port;
2537    uint8_t      m_tx_queue_id ;
2538    uint8_t      m_rx_queue_id;
2539};
2540
2541
2542class CLatencyVmPort : public CPortLatencyHWBase {
2543public:
2544    void Create(uint8_t port_index,CNodeRing * ring,
2545                CLatencyManager * mgr, CPhyEthIF  * p) {
2546        m_dir        = (port_index%2);
2547        m_ring_to_dp = ring;
2548        m_mgr        = mgr;
2549        m_port = p;
2550    }
2551
2552    virtual int tx(rte_mbuf_t * m){
2553        if ( likely( CGlobalInfo::m_options.preview.get_vlan_mode_enable() ) ){
2554            /* vlan mode is the default */
2555            /* set the vlan */
2556            m->ol_flags = PKT_TX_VLAN_PKT;
2557            m->vlan_tci =CGlobalInfo::m_options.m_vlan_port[0];
2558            m->l2_len   =14;
2559        }
2560
2561        /* allocate node */
2562        CGenNodeLatencyPktInfo * node=(CGenNodeLatencyPktInfo * )CGlobalInfo::create_node();
2563        if ( node ) {
2564            node->m_msg_type = CGenNodeMsgBase::LATENCY_PKT;
2565            node->m_dir      = m_dir;
2566            node->m_pkt      = m;
2567            node->m_latency_offset = m_mgr->get_latency_header_offset();
2568
2569            if ( m_ring_to_dp->Enqueue((CGenNode*)node) ==0 ){
2570                return (0);
2571            }
2572        }
2573        return (-1);
2574    }
2575
2576    virtual rte_mbuf_t * rx() {
2577        rte_mbuf_t * rx_pkts[1];
2578        uint16_t cnt = m_port->rx_burst(0, rx_pkts, 1);
2579        if (cnt) {
2580            return (rx_pkts[0]);
2581        } else {
2582            return (0);
2583        }
2584    }
2585
2586    virtual uint16_t rx_burst(struct rte_mbuf **rx_pkts, uint16_t nb_pkts) {
2587        uint16_t cnt = m_port->rx_burst(0, rx_pkts, nb_pkts);
2588        return (cnt);
2589    }
2590
2591private:
2592    CPhyEthIF  * m_port;
2593    uint8_t                          m_dir;
2594    CNodeRing *                      m_ring_to_dp;   /* ring dp -> latency thread */
2595    CLatencyManager *                m_mgr;
2596};
2597
2598
2599
2600class CPerPortStats {
2601public:
2602    uint64_t opackets;
2603    uint64_t obytes;
2604    uint64_t ipackets;
2605    uint64_t ibytes;
2606    uint64_t ierrors;
2607    uint64_t oerrors;
2608    tx_per_flow_t m_tx_per_flow[MAX_FLOW_STATS + MAX_FLOW_STATS_PAYLOAD];
2609    tx_per_flow_t m_prev_tx_per_flow[MAX_FLOW_STATS + MAX_FLOW_STATS_PAYLOAD];
2610
2611    float     m_total_tx_bps;
2612    float     m_total_tx_pps;
2613
2614    float     m_total_rx_bps;
2615    float     m_total_rx_pps;
2616
2617    float     m_cpu_util;
2618};
2619
2620class CGlobalStats {
2621public:
2622    enum DumpFormat {
2623        dmpSTANDARD,
2624        dmpTABLE
2625    };
2626
2627    uint64_t  m_total_tx_pkts;
2628    uint64_t  m_total_rx_pkts;
2629    uint64_t  m_total_tx_bytes;
2630    uint64_t  m_total_rx_bytes;
2631
2632    uint64_t  m_total_alloc_error;
2633    uint64_t  m_total_queue_full;
2634    uint64_t  m_total_queue_drop;
2635
2636    uint64_t  m_total_clients;
2637    uint64_t  m_total_servers;
2638    uint64_t  m_active_sockets;
2639
2640    uint64_t  m_total_nat_time_out;
2641    uint64_t  m_total_nat_time_out_wait_ack;
2642    uint64_t  m_total_nat_no_fid  ;
2643    uint64_t  m_total_nat_active  ;
2644    uint64_t  m_total_nat_syn_wait;
2645    uint64_t  m_total_nat_open    ;
2646    uint64_t  m_total_nat_learn_error    ;
2647
2648    CPerTxthreadTemplateInfo m_template;
2649
2650    float     m_socket_util;
2651
2652    float m_platform_factor;
2653    float m_tx_bps;
2654    float m_rx_bps;
2655    float m_tx_pps;
2656    float m_rx_pps;
2657    float m_tx_cps;
2658    float m_tx_expected_cps;
2659    float m_tx_expected_pps;
2660    float m_tx_expected_bps;
2661    float m_rx_drop_bps;
2662    float m_active_flows;
2663    float m_open_flows;
2664    float m_cpu_util;
2665    float m_cpu_util_raw;
2666    float m_rx_cpu_util;
2667    float m_bw_per_core;
2668    uint8_t m_threads;
2669
2670    uint32_t      m_num_of_ports;
2671    CPerPortStats m_port[TREX_MAX_PORTS];
2672public:
2673    void Dump(FILE *fd,DumpFormat mode);
2674    void DumpAllPorts(FILE *fd);
2675    void dump_json(std::string & json, bool baseline);
2676private:
2677    std::string get_field(const char *name, float &f);
2678    std::string get_field(const char *name, uint64_t &f);
2679    std::string get_field_port(int port, const char *name, float &f);
2680    std::string get_field_port(int port, const char *name, uint64_t &f);
2681
2682};
2683
2684std::string CGlobalStats::get_field(const char *name, float &f){
2685    char buff[200];
2686    if(f <= -10.0 or f >= 10.0)
2687        snprintf(buff, sizeof(buff), "\"%s\":%.1f,",name,f);
2688    else
2689        snprintf(buff, sizeof(buff), "\"%s\":%.3e,",name,f);
2690    return (std::string(buff));
2691}
2692
2693std::string CGlobalStats::get_field(const char *name, uint64_t &f){
2694    char buff[200];
2695    snprintf(buff,  sizeof(buff), "\"%s\":%llu,", name, (unsigned long long)f);
2696    return (std::string(buff));
2697}
2698
2699std::string CGlobalStats::get_field_port(int port, const char *name, float &f){
2700    char buff[200];
2701    if(f <= -10.0 or f >= 10.0)
2702        snprintf(buff,  sizeof(buff), "\"%s-%d\":%.1f,", name, port, f);
2703    else
2704        snprintf(buff, sizeof(buff), "\"%s-%d\":%.3e,", name, port, f);
2705    return (std::string(buff));
2706}
2707
2708std::string CGlobalStats::get_field_port(int port, const char *name, uint64_t &f){
2709    char buff[200];
2710    snprintf(buff, sizeof(buff), "\"%s-%d\":%llu,",name, port, (unsigned long long)f);
2711    return (std::string(buff));
2712}
2713
2714
2715void CGlobalStats::dump_json(std::string & json, bool baseline){
2716    /* refactor this to JSON */
2717
2718    json="{\"name\":\"trex-global\",\"type\":0,";
2719    if (baseline) {
2720        json += "\"baseline\": true,";
2721    }
2722
2723    json +="\"data\":{";
2724
2725    char ts_buff[200];
2726    snprintf(ts_buff , sizeof(ts_buff), "\"ts\":{\"value\":%lu, \"freq\":%lu},", os_get_hr_tick_64(), os_get_hr_freq());
2727    json+= std::string(ts_buff);
2728
2729#define GET_FIELD(f) get_field(#f, f)
2730#define GET_FIELD_PORT(p,f) get_field_port(p, #f, lp->f)
2731
2732    json+=GET_FIELD(m_cpu_util);
2733    json+=GET_FIELD(m_cpu_util_raw);
2734    json+=GET_FIELD(m_bw_per_core);
2735    json+=GET_FIELD(m_rx_cpu_util);
2736    json+=GET_FIELD(m_platform_factor);
2737    json+=GET_FIELD(m_tx_bps);
2738    json+=GET_FIELD(m_rx_bps);
2739    json+=GET_FIELD(m_tx_pps);
2740    json+=GET_FIELD(m_rx_pps);
2741    json+=GET_FIELD(m_tx_cps);
2742    json+=GET_FIELD(m_tx_expected_cps);
2743    json+=GET_FIELD(m_tx_expected_pps);
2744    json+=GET_FIELD(m_tx_expected_bps);
2745    json+=GET_FIELD(m_total_alloc_error);
2746    json+=GET_FIELD(m_total_queue_full);
2747    json+=GET_FIELD(m_total_queue_drop);
2748    json+=GET_FIELD(m_rx_drop_bps);
2749    json+=GET_FIELD(m_active_flows);
2750    json+=GET_FIELD(m_open_flows);
2751
2752    json+=GET_FIELD(m_total_tx_pkts);
2753    json+=GET_FIELD(m_total_rx_pkts);
2754    json+=GET_FIELD(m_total_tx_bytes);
2755    json+=GET_FIELD(m_total_rx_bytes);
2756
2757    json+=GET_FIELD(m_total_clients);
2758    json+=GET_FIELD(m_total_servers);
2759    json+=GET_FIELD(m_active_sockets);
2760    json+=GET_FIELD(m_socket_util);
2761
2762    json+=GET_FIELD(m_total_nat_time_out);
2763    json+=GET_FIELD(m_total_nat_time_out_wait_ack);
2764    json+=GET_FIELD(m_total_nat_no_fid );
2765    json+=GET_FIELD(m_total_nat_active );
2766    json+=GET_FIELD(m_total_nat_syn_wait);
2767    json+=GET_FIELD(m_total_nat_open   );
2768    json+=GET_FIELD(m_total_nat_learn_error);
2769
2770    int i;
2771    for (i=0; i<(int)m_num_of_ports; i++) {
2772        CPerPortStats * lp=&m_port[i];
2773        json+=GET_FIELD_PORT(i,opackets) ;
2774        json+=GET_FIELD_PORT(i,obytes)   ;
2775        json+=GET_FIELD_PORT(i,ipackets) ;
2776        json+=GET_FIELD_PORT(i,ibytes)   ;
2777        json+=GET_FIELD_PORT(i,ierrors)  ;
2778        json+=GET_FIELD_PORT(i,oerrors)  ;
2779        json+=GET_FIELD_PORT(i,m_total_tx_bps);
2780        json+=GET_FIELD_PORT(i,m_total_tx_pps);
2781        json+=GET_FIELD_PORT(i,m_total_rx_bps);
2782        json+=GET_FIELD_PORT(i,m_total_rx_pps);
2783        json+=GET_FIELD_PORT(i,m_cpu_util);
2784    }
2785    json+=m_template.dump_as_json("template");
2786    json+="\"unknown\":0}}"  ;
2787}
2788
2789void CGlobalStats::DumpAllPorts(FILE *fd){
2790
2791    //fprintf (fd," Total-Tx-Pkts   : %s  \n",double_to_human_str((double)m_total_tx_pkts,"pkts",KBYE_1000).c_str());
2792    //fprintf (fd," Total-Rx-Pkts   : %s  \n",double_to_human_str((double)m_total_rx_pkts,"pkts",KBYE_1000).c_str());
2793
2794    //fprintf (fd," Total-Tx-Bytes  : %s  \n",double_to_human_str((double)m_total_tx_bytes,"bytes",KBYE_1000).c_str());
2795    //fprintf (fd," Total-Rx-Bytes  : %s  \n",double_to_human_str((double)m_total_rx_bytes,"bytes",KBYE_1000).c_str());
2796
2797
2798
2799    fprintf (fd," Cpu Utilization : %2.1f  %%  %2.1f Gb/core \n",m_cpu_util,m_bw_per_core);
2800    fprintf (fd," Platform_factor : %2.1f  \n",m_platform_factor);
2801    fprintf (fd," Total-Tx        : %s  ",double_to_human_str(m_tx_bps,"bps",KBYE_1000).c_str());
2802    if ( CGlobalInfo::is_learn_mode() ) {
2803        fprintf (fd," NAT time out    : %8llu", (unsigned long long)m_total_nat_time_out);
2804        if (CGlobalInfo::is_learn_mode(CParserOption::LEARN_MODE_TCP_ACK)) {
2805            fprintf (fd," (%llu in wait for syn+ack)\n", (unsigned long long)m_total_nat_time_out_wait_ack);
2806        } else {
2807            fprintf (fd, "\n");
2808        }
2809    }else{
2810        fprintf (fd,"\n");
2811    }
2812
2813
2814    fprintf (fd," Total-Rx        : %s  ",double_to_human_str(m_rx_bps,"bps",KBYE_1000).c_str());
2815    if ( CGlobalInfo::is_learn_mode() ) {
2816        fprintf (fd," NAT aged flow id: %8llu \n", (unsigned long long)m_total_nat_no_fid);
2817    }else{
2818        fprintf (fd,"\n");
2819    }
2820
2821    fprintf (fd," Total-PPS       : %s  ",double_to_human_str(m_tx_pps,"pps",KBYE_1000).c_str());
2822    if ( CGlobalInfo::is_learn_mode() ) {
2823        fprintf (fd," Total NAT active: %8llu", (unsigned long long)m_total_nat_active);
2824        if (CGlobalInfo::is_learn_mode(CParserOption::LEARN_MODE_TCP_ACK)) {
2825            fprintf (fd," (%llu waiting for syn)\n", (unsigned long long)m_total_nat_syn_wait);
2826        } else {
2827            fprintf (fd, "\n");
2828        }
2829    }else{
2830        fprintf (fd,"\n");
2831    }
2832
2833    fprintf (fd," Total-CPS       : %s  ",double_to_human_str(m_tx_cps,"cps",KBYE_1000).c_str());
2834    if ( CGlobalInfo::is_learn_mode() ) {
2835        fprintf (fd," Total NAT opened: %8llu \n", (unsigned long long)m_total_nat_open);
2836    }else{
2837        fprintf (fd,"\n");
2838    }
2839    fprintf (fd,"\n");
2840    fprintf (fd," Expected-PPS    : %s  ",double_to_human_str(m_tx_expected_pps,"pps",KBYE_1000).c_str());
2841    if ( CGlobalInfo::is_learn_verify_mode() ) {
2842        fprintf (fd," NAT learn errors: %8llu \n", (unsigned long long)m_total_nat_learn_error);
2843    }else{
2844        fprintf (fd,"\n");
2845    }
2846    fprintf (fd," Expected-CPS    : %s  \n",double_to_human_str(m_tx_expected_cps,"cps",KBYE_1000).c_str());
2847    fprintf (fd," Expected-BPS    : %s  \n",double_to_human_str(m_tx_expected_bps,"bps",KBYE_1000).c_str());
2848    fprintf (fd,"\n");
2849    fprintf (fd," Active-flows    : %8llu  Clients : %8llu   Socket-util : %3.4f %%    \n",
2850             (unsigned long long)m_active_flows,
2851             (unsigned long long)m_total_clients,
2852             m_socket_util);
2853    fprintf (fd," Open-flows      : %8llu  Servers : %8llu   Socket : %8llu Socket/Clients :  %.1f \n",
2854             (unsigned long long)m_open_flows,
2855             (unsigned long long)m_total_servers,
2856             (unsigned long long)m_active_sockets,
2857             (float)m_active_sockets/(float)m_total_clients);
2858
2859    if (m_total_alloc_error) {
2860        fprintf (fd," Total_alloc_err  : %llu         \n", (unsigned long long)m_total_alloc_error);
2861    }
2862    if ( m_total_queue_full ){
2863        fprintf (fd," Total_queue_full : %llu         \n", (unsigned long long)m_total_queue_full);
2864    }
2865    if (m_total_queue_drop) {
2866        fprintf (fd," Total_queue_drop : %llu         \n", (unsigned long long)m_total_queue_drop);
2867    }
2868
2869    //m_template.Dump(fd);
2870
2871    fprintf (fd," drop-rate       : %s   \n",double_to_human_str(m_rx_drop_bps,"bps",KBYE_1000).c_str() );
2872}
2873
2874
2875void CGlobalStats::Dump(FILE *fd,DumpFormat mode){
2876    int i;
2877    int port_to_show=m_num_of_ports;
2878    if (port_to_show>4) {
2879        port_to_show=4;
2880        fprintf (fd," per port - limited to 4   \n");
2881    }
2882
2883
2884    if ( mode== dmpSTANDARD ){
2885        fprintf (fd," --------------- \n");
2886        for (i=0; i<(int)port_to_show; i++) {
2887            CPerPortStats * lp=&m_port[i];
2888            fprintf(fd,"port : %d \n",(int)i);
2889            fprintf(fd,"------------\n");
2890#define GS_DP_A4(f) fprintf(fd," %-40s : %llu \n",#f, (unsigned long long)lp->f)
2891#define GS_DP_A(f) if (lp->f) fprintf(fd," %-40s : %llu \n",#f, (unsigned long long)lp->f)
2892            GS_DP_A4(opackets);
2893            GS_DP_A4(obytes);
2894            GS_DP_A4(ipackets);
2895            GS_DP_A4(ibytes);
2896            GS_DP_A(ierrors);
2897            GS_DP_A(oerrors);
2898            fprintf (fd," Tx : %s  \n",double_to_human_str((double)lp->m_total_tx_bps,"bps",KBYE_1000).c_str());
2899        }
2900    }else{
2901        fprintf(fd," %10s ","ports");
2902        for (i=0; i<(int)port_to_show; i++) {
2903            fprintf(fd,"| %15d ",i);
2904        }
2905        fprintf(fd,"\n");
2906        fprintf(fd," -----------------------------------------------------------------------------------------\n");
2907        std::string names[]={"opackets","obytes","ipackets","ibytes","ierrors","oerrors","Tx Bw"
2908        };
2909        for (i=0; i<7; i++) {
2910            fprintf(fd," %10s ",names[i].c_str());
2911            int j=0;
2912            for (j=0; j<port_to_show;j++) {
2913                CPerPortStats * lp=&m_port[j];
2914                uint64_t cnt;
2915                switch (i) {
2916                case 0:
2917                    cnt=lp->opackets;
2918                    fprintf(fd,"| %15lu ",cnt);
2919
2920                    break;
2921                case 1:
2922                    cnt=lp->obytes;
2923                    fprintf(fd,"| %15lu ",cnt);
2924
2925                    break;
2926                case 2:
2927                    cnt=lp->ipackets;
2928                    fprintf(fd,"| %15lu ",cnt);
2929
2930                    break;
2931                case 3:
2932                    cnt=lp->ibytes;
2933                    fprintf(fd,"| %15lu ",cnt);
2934
2935                    break;
2936                case 4:
2937                    cnt=lp->ierrors;
2938                    fprintf(fd,"| %15lu ",cnt);
2939
2940                    break;
2941                case 5:
2942                    cnt=lp->oerrors;
2943                    fprintf(fd,"| %15lu ",cnt);
2944
2945                    break;
2946                case 6:
2947                    fprintf(fd,"| %15s ",double_to_human_str((double)lp->m_total_tx_bps,"bps",KBYE_1000).c_str());
2948                    break;
2949                default:
2950                    cnt=0xffffff;
2951                }
2952            } /* ports */
2953            fprintf(fd, "\n");
2954        }/* fields*/
2955    }
2956
2957
2958}
2959
2960class CGlobalTRex  {
2961
2962public:
2963
2964    /**
2965     * different types of shutdown causes
2966     */
2967    typedef enum {
2968        SHUTDOWN_NONE,
2969        SHUTDOWN_TEST_ENDED,
2970        SHUTDOWN_CTRL_C,
2971        SHUTDOWN_SIGINT,
2972        SHUTDOWN_SIGTERM,
2973        SHUTDOWN_RPC_REQ
2974    } shutdown_rc_e;
2975
2976
2977    CGlobalTRex (){
2978        m_max_ports=4;
2979        m_max_cores=1;
2980        m_cores_to_dual_ports=0;
2981        m_max_queues_per_port=0;
2982        m_fl_was_init=false;
2983        m_expected_pps=0.0;
2984        m_expected_cps=0.0;
2985        m_expected_bps=0.0;
2986        m_trex_stateless = NULL;
2987        m_mark_for_shutdown = SHUTDOWN_NONE;
2988    }
2989
2990    bool Create();
2991    void Delete();
2992    int  ixgbe_prob_init();
2993    int  cores_prob_init();
2994    int  queues_prob_init();
2995    int  ixgbe_start();
2996    int  ixgbe_rx_queue_flush();
2997    void ixgbe_configure_mg();
2998    void rx_sl_configure();
2999    bool is_all_links_are_up(bool dump=false);
3000    void pre_test();
3001
3002    /**
3003     * mark for shutdown
3004     * on the next check - the control plane will
3005     * call shutdown()
3006     */
3007    void mark_for_shutdown(shutdown_rc_e rc) {
3008
3009        if (is_marked_for_shutdown()) {
3010            return;
3011        }
3012
3013        m_mark_for_shutdown = rc;
3014    }
3015
3016private:
3017    void register_signals();
3018
3019    /* try to stop all datapath cores and RX core */
3020    void try_stop_all_cores();
3021    /* send message to all dp cores */
3022    int  send_message_all_dp(TrexStatelessCpToDpMsgBase *msg);
3023    int  send_message_to_rx(TrexStatelessCpToRxMsgBase *msg);
3024    void check_for_dp_message_from_core(int thread_id);
3025
3026    bool is_marked_for_shutdown() const {
3027        return (m_mark_for_shutdown != SHUTDOWN_NONE);
3028    }
3029
3030    /**
3031     * shutdown sequence
3032     *
3033     */
3034    void shutdown();
3035
3036public:
3037    void check_for_dp_messages();
3038    int start_master_statefull();
3039    int start_master_stateless();
3040    int run_in_core(virtual_thread_id_t virt_core_id);
3041    int core_for_rx(){
3042        if ( (! get_is_rx_thread_enabled()) ) {
3043            return -1;
3044        }else{
3045            return m_max_cores - 1;
3046        }
3047    }
3048    int run_in_rx_core();
3049    int run_in_master();
3050
3051    void handle_fast_path();
3052    void handle_slow_path();
3053
3054    int stop_master();
3055    /* return the minimum number of dp cores needed to support the active ports
3056       this is for c==1 or  m_cores_mul==1
3057    */
3058    int get_base_num_cores(){
3059        return (m_max_ports>>1);
3060    }
3061
3062    int get_cores_tx(){
3063        /* 0 - master
3064           num_of_cores -
3065           last for latency */
3066        if ( (! get_is_rx_thread_enabled()) ) {
3067            return (m_max_cores - 1 );
3068        } else {
3069            return (m_max_cores - BP_MASTER_AND_LATENCY );
3070        }
3071    }
3072
3073private:
3074    bool is_all_cores_finished();
3075
3076public:
3077
3078    void publish_async_data(bool sync_now, bool baseline = false);
3079    void publish_async_barrier(uint32_t key);
3080    void publish_async_port_attr_changed(uint8_t port_id);
3081
3082    void dump_stats(FILE *fd,
3083                    CGlobalStats::DumpFormat format);
3084    void dump_template_info(std::string & json);
3085    bool sanity_check();
3086    void update_stats(void);
3087    tx_per_flow_t get_flow_tx_stats(uint8_t port, uint16_t hw_id);
3088    tx_per_flow_t clear_flow_tx_stats(uint8_t port, uint16_t index, bool is_lat);
3089    void get_stats(CGlobalStats & stats);
3090    float get_cpu_util_per_interface(uint8_t port_id);
3091    void dump_post_test_stats(FILE *fd);
3092    void dump_config(FILE *fd);
3093    void dump_links_status(FILE *fd);
3094
3095public:
3096    port_cfg_t  m_port_cfg;
3097    uint32_t    m_max_ports;    /* active number of ports supported options are  2,4,8,10,12  */
3098    uint32_t    m_max_cores;    /* current number of cores , include master and latency  ==> ( master)1+c*(m_max_ports>>1)+1( latency )  */
3099    uint32_t    m_cores_mul;    /* how cores multipler given  c=4 ==> m_cores_mul */
3100    uint32_t    m_max_queues_per_port; // Number of TX queues per port
3101    uint32_t    m_cores_to_dual_ports; /* number of TX cores allocated for each port pair */
3102    uint16_t    m_rx_core_tx_q_id; /* TX q used by rx core */
3103    // statistic
3104    CPPSMeasure  m_cps;
3105    float        m_expected_pps;
3106    float        m_expected_cps;
3107    float        m_expected_bps;//bps
3108    float        m_last_total_cps;
3109
3110    CPhyEthIF   m_ports[TREX_MAX_PORTS];
3111    CCoreEthIF          m_cores_vif_sf[BP_MAX_CORES]; /* counted from 1 , 2,3 core zero is reserved - stateful */
3112    CCoreEthIFStateless m_cores_vif_sl[BP_MAX_CORES]; /* counted from 1 , 2,3 core zero is reserved - stateless*/
3113    CCoreEthIF *        m_cores_vif[BP_MAX_CORES];
3114    CParserOption m_po ;
3115    CFlowGenList  m_fl;
3116    bool          m_fl_was_init;
3117    volatile uint8_t       m_signal[BP_MAX_CORES] __rte_cache_aligned ; // Signal to main core when DP thread finished
3118    volatile bool m_sl_rx_running; // Signal main core when RX thread finished
3119    CLatencyManager     m_mg; // statefull RX core
3120    CRxCoreStateless    m_rx_sl; // stateless RX core
3121    CTrexGlobalIoMode   m_io_modes;
3122    CTRexExtendedDriverBase * m_drv;
3123
3124private:
3125    CLatencyHWPort      m_latency_vports[TREX_MAX_PORTS];    /* read hardware driver */
3126    CLatencyVmPort      m_latency_vm_vports[TREX_MAX_PORTS]; /* vm driver */
3127    CLatencyPktInfo     m_latency_pkt;
3128    TrexPublisher       m_zmq_publisher;
3129    CGlobalStats        m_stats;
3130    uint32_t            m_stats_cnt;
3131    std::mutex          m_cp_lock;
3132
3133    TrexMonitor         m_monitor;
3134
3135    shutdown_rc_e       m_mark_for_shutdown;
3136
3137public:
3138    TrexStateless       *m_trex_stateless;
3139
3140};
3141
3142// Before starting, send gratuitous ARP on our addresses, and try to resolve dst MAC addresses.
3143void CGlobalTRex::pre_test() {
3144    CPretest pretest(m_max_ports);
3145    bool resolve_needed = false;
3146    uint8_t empty_mac[ETHER_ADDR_LEN] = {0,0,0,0,0,0};
3147    bool need_grat_arp[TREX_MAX_PORTS];
3148
3149    if (CGlobalInfo::m_options.preview.get_is_client_cfg_enable()) {
3150        std::vector<ClientCfgCompactEntry *> conf;
3151        m_fl.get_client_cfg_ip_list(conf);
3152
3153        // If we got src MAC for port in global config, take it, otherwise use src MAC from DPDK
3154        uint8_t port_macs[m_max_ports][ETHER_ADDR_LEN];
3155        for (int port_id = 0; port_id < m_max_ports; port_id++) {
3156            uint8_t empty_mac[ETHER_ADDR_LEN] = {0,0,0,0,0,0};
3157            if (! memcmp( CGlobalInfo::m_options.m_mac_addr[port_id].u.m_mac.src, empty_mac, ETHER_ADDR_LEN)) {
3158                rte_eth_macaddr_get(port_id,
3159                                    (struct ether_addr *)&CGlobalInfo::m_options.m_mac_addr[port_id].u.m_mac.src);
3160            }
3161            memcpy(port_macs[port_id], CGlobalInfo::m_options.m_mac_addr[port_id].u.m_mac.src, ETHER_ADDR_LEN);
3162        }
3163
3164        for (std::vector<ClientCfgCompactEntry *>::iterator it = conf.begin(); it != conf.end(); it++) {
3165            uint8_t port = (*it)->get_port();
3166            uint16_t vlan = (*it)->get_vlan();
3167            uint32_t count = (*it)->get_count();
3168            uint32_t dst_ip = (*it)->get_dst_ip();
3169            uint32_t src_ip = (*it)->get_src_ip();
3170
3171            for (int i = 0; i < count; i++) {
3172                //??? handle ipv6;
3173                if ((*it)->is_ipv4()) {
3174                    pretest.add_next_hop(port, dst_ip + i, vlan);
3175                }
3176            }
3177            if (!src_ip) {
3178                src_ip = CGlobalInfo::m_options.m_ip_cfg[port].get_ip();
3179                if (!src_ip) {
3180                    fprintf(stderr, "No matching src ip for port: %d ip:%s vlan: %d\n"
3181                            , port, ip_to_str(dst_ip).c_str(), vlan);
3182                    fprintf(stderr, "You must specify src_ip in client config file or in TRex config file\n");
3183                    exit(1);
3184                }
3185            }
3186            pretest.add_ip(port, src_ip, vlan, port_macs[port]);
3187            COneIPv4Info ipv4(src_ip, vlan, port_macs[port], port);
3188            m_mg.add_grat_arp_src(ipv4);
3189
3190            delete *it;
3191        }
3192        if ( CGlobalInfo::m_options.preview.getVMode() > 1) {
3193            fprintf(stdout, "*******Pretest for client cfg********\n");
3194            pretest.dump(stdout);
3195            }
3196    } else {
3197        for (int port_id = 0; port_id < m_max_ports; port_id++) {
3198            if (! memcmp( CGlobalInfo::m_options.m_mac_addr[port_id].u.m_mac.dest, empty_mac, ETHER_ADDR_LEN)) {
3199                resolve_needed = true;
3200            } else {
3201                resolve_needed = false;
3202            }
3203            if (! memcmp( CGlobalInfo::m_options.m_mac_addr[port_id].u.m_mac.src, empty_mac, ETHER_ADDR_LEN)) {
3204                rte_eth_macaddr_get(port_id,
3205                                    (struct ether_addr *)&CGlobalInfo::m_options.m_mac_addr[port_id].u.m_mac.src);
3206                need_grat_arp[port_id] = true;
3207            } else {
3208                // If we got src MAC from config file, do not send gratuitous ARP for it
3209                // (for compatibility with old behaviour)
3210                need_grat_arp[port_id] = false;
3211            }
3212
3213            pretest.add_ip(port_id, CGlobalInfo::m_options.m_ip_cfg[port_id].get_ip()
3214                           , CGlobalInfo::m_options.m_ip_cfg[port_id].get_vlan()
3215                           , CGlobalInfo::m_options.m_mac_addr[port_id].u.m_mac.src);
3216
3217            if (resolve_needed) {
3218                pretest.add_next_hop(port_id, CGlobalInfo::m_options.m_ip_cfg[port_id].get_def_gw()
3219                                     , CGlobalInfo::m_options.m_ip_cfg[port_id].get_vlan());
3220            }
3221        }
3222    }
3223
3224    for (int port_id = 0; port_id < m_max_ports; port_id++) {
3225        CPhyEthIF *pif = &m_ports[port_id];
3226        // Configure port to send all packets to software
3227        CTRexExtendedDriverDb::Ins()->get_drv()->set_rcv_all(pif, true);
3228    }
3229
3230
3231    pretest.send_grat_arp_all();
3232    bool ret;
3233    int count = 0;
3234    bool resolve_failed = false;
3235    do {
3236        ret = pretest.resolve_all();
3237        count++;
3238    } while ((ret != true) && (count < 10));
3239    if (ret != true) {
3240        resolve_failed = true;
3241    }
3242
3243    if ( CGlobalInfo::m_options.preview.getVMode() > 1) {
3244        fprintf(stdout, "*******Pretest after resolving ********\n");
3245        pretest.dump(stdout);
3246    }
3247
3248    if (CGlobalInfo::m_options.preview.get_is_client_cfg_enable()) {
3249        CManyIPInfo pretest_result;
3250        pretest.get_results(pretest_result);
3251        if (resolve_failed) {
3252            fprintf(stderr, "Resolution of following IPs failed. Exiting.\n");
3253            for (const COneIPInfo *ip=pretest_result.get_next(); ip != NULL;
3254                   ip = pretest_result.get_next()) {
3255                if (ip->resolve_needed()) {
3256                    ip->dump(stderr, "  ");
3257                }
3258            }
3259            exit(1);
3260        }
3261        m_fl.set_client_config_resolved_macs(pretest_result);
3262        if ( CGlobalInfo::m_options.preview.getVMode() > 1) {
3263            m_fl.dump_client_config(stdout);
3264        }
3265
3266        bool port_found[TREX_MAX_PORTS];
3267        for (int port_id = 0; port_id < m_max_ports; port_id++) {
3268            port_found[port_id] = false;
3269        }
3270        // If client config enabled, we don't resolve MACs from trex_cfg.yaml. For latency (-l)
3271        // We need to able to send packets from RX core, so need to configure MAC/vlan for each port.
3272        for (const COneIPInfo *ip=pretest_result.get_next(); ip != NULL; ip = pretest_result.get_next()) {
3273            // Use first MAC/vlan we see on each port
3274            uint8_t port_id = ip->get_port();
3275            uint16_t vlan = ip->get_vlan();
3276            if ( ! port_found[port_id]) {
3277                port_found[port_id] = true;
3278                ip->get_mac(CGlobalInfo::m_options.m_mac_addr[port_id].u.m_mac.dest);
3279                CGlobalInfo::m_options.m_ip_cfg[port_id].set_vlan(vlan);
3280            }
3281        }
3282    } else {
3283        uint8_t mac[ETHER_ADDR_LEN];
3284        for (int port_id = 0; port_id < m_max_ports; port_id++) {
3285            if (! memcmp(CGlobalInfo::m_options.m_mac_addr[port_id].u.m_mac.dest, empty_mac, ETHER_ADDR_LEN)) {
3286                // we don't have dest MAC. Get it from what we resolved.
3287                uint32_t ip = CGlobalInfo::m_options.m_ip_cfg[port_id].get_def_gw();
3288                uint16_t vlan = CGlobalInfo::m_options.m_ip_cfg[port_id].get_vlan();
3289
3290                if (!pretest.get_mac(port_id, ip, vlan, mac)) {
3291                    fprintf(stderr, "Failed resolving dest MAC for default gateway:%d.%d.%d.%d on port %d\n"
3292                            , (ip >> 24) & 0xFF, (ip >> 16) & 0xFF, (ip >> 8) & 0xFF, ip & 0xFF, port_id);
3293
3294                    if (get_is_stateless()) {
3295                        continue;
3296                    } else {
3297                        exit(1);
3298                    }
3299                }
3300
3301
3302
3303                memcpy(CGlobalInfo::m_options.m_mac_addr[port_id].u.m_mac.dest, mac, ETHER_ADDR_LEN);
3304                // if port is connected in loopback, no need to send gratuitous ARP. It will only confuse our ingress counters.
3305                if (need_grat_arp[port_id] && (! pretest.is_loopback(port_id))) {
3306                    COneIPv4Info ipv4(CGlobalInfo::m_options.m_ip_cfg[port_id].get_ip()
3307                                      , CGlobalInfo::m_options.m_ip_cfg[port_id].get_vlan()
3308                                      , CGlobalInfo::m_options.m_mac_addr[port_id].u.m_mac.src
3309                                      , port_id);
3310                    m_mg.add_grat_arp_src(ipv4);
3311                }
3312            }
3313
3314            // update statistics baseline, so we can ignore what happened in pre test phase
3315            CPhyEthIF *pif = &m_ports[port_id];
3316            CPreTestStats pre_stats = pretest.get_stats(port_id);
3317            pif->set_ignore_stats_base(pre_stats);
3318
3319            // Configure port back to normal mode. Only relevant packets handled by software.
3320            CTRexExtendedDriverDb::Ins()->get_drv()->set_rcv_all(pif, false);
3321
3322           /* set resolved IPv4 */
3323           uint32_t dg = CGlobalInfo::m_options.m_ip_cfg[port_id].get_def_gw();
3324           const uint8_t *dst_mac = CGlobalInfo::m_options.m_mac_addr[port_id].u.m_mac.dest;
3325           if (dg) {
3326               m_ports[port_id].get_port_attr()->get_dest().set_dest(dg, dst_mac);
3327           } else {
3328               m_ports[port_id].get_port_attr()->get_dest().set_dest(dst_mac);
3329           }
3330
3331        }
3332    }
3333}
3334
3335/**
3336 * check for a single core
3337 *
3338 * @author imarom (19-Nov-15)
3339 *
3340 * @param thread_id
3341 */
3342void
3343CGlobalTRex::check_for_dp_message_from_core(int thread_id) {
3344
3345    CNodeRing *ring = CMsgIns::Ins()->getCpDp()->getRingDpToCp(thread_id);
3346
3347    /* fast path check */
3348    if ( likely ( ring->isEmpty() ) ) {
3349        return;
3350    }
3351
3352    while ( true ) {
3353        CGenNode * node = NULL;
3354        if (ring->Dequeue(node) != 0) {
3355            break;
3356        }
3357        assert(node);
3358
3359        TrexStatelessDpToCpMsgBase * msg = (TrexStatelessDpToCpMsgBase *)node;
3360        msg->handle();
3361        delete msg;
3362    }
3363
3364}
3365
3366/**
3367 * check for messages that arrived from DP to CP
3368 *
3369 */
3370void
3371CGlobalTRex::check_for_dp_messages() {
3372
3373    /* for all the cores - check for a new message */
3374    for (int i = 0; i < get_cores_tx(); i++) {
3375        check_for_dp_message_from_core(i);
3376    }
3377}
3378
3379bool CGlobalTRex::is_all_links_are_up(bool dump){
3380    bool all_link_are=true;
3381    int i;
3382    for (i=0; i<m_max_ports; i++) {
3383        CPhyEthIF * _if=&m_ports[i];
3384        _if->get_port_attr()->update_link_status();
3385        if ( dump ){
3386            _if->dump_stats(stdout);
3387        }
3388        if ( _if->get_port_attr()->is_link_up() == false){
3389            all_link_are=false;
3390            break;
3391        }
3392    }
3393    return (all_link_are);
3394}
3395
3396void CGlobalTRex::try_stop_all_cores(){
3397
3398    TrexStatelessDpQuit * dp_msg= new TrexStatelessDpQuit();
3399    send_message_all_dp(dp_msg);
3400    delete dp_msg;
3401
3402    if (get_is_stateless()) {
3403        TrexStatelessRxQuit * rx_msg= new TrexStatelessRxQuit();
3404        send_message_to_rx(rx_msg);
3405    }
3406
3407    // no need to delete rx_msg. Deleted by receiver
3408    bool all_core_finished = false;
3409    int i;
3410    for (i=0; i<20; i++) {
3411        if ( is_all_cores_finished() ){
3412            all_core_finished =true;
3413            break;
3414        }
3415        delay(100);
3416    }
3417    if ( all_core_finished ){
3418        m_zmq_publisher.publish_event(TrexPublisher::EVENT_SERVER_STOPPED);
3419        printf(" All cores stopped !! \n");
3420    }else{
3421        printf(" ERROR one of the DP core is stucked !\n");
3422    }
3423}
3424
3425
3426int  CGlobalTRex::send_message_all_dp(TrexStatelessCpToDpMsgBase *msg){
3427
3428    int max_threads=(int)CMsgIns::Ins()->getCpDp()->get_num_threads();
3429    int i;
3430
3431    for (i=0; i<max_threads; i++) {
3432        CNodeRing *ring = CMsgIns::Ins()->getCpDp()->getRingCpToDp((uint8_t)i);
3433        ring->Enqueue((CGenNode*)msg->clone());
3434    }
3435    return (0);
3436}
3437
3438int CGlobalTRex::send_message_to_rx(TrexStatelessCpToRxMsgBase *msg) {
3439    CNodeRing *ring = CMsgIns::Ins()->getCpRx()->getRingCpToDp(0);
3440    ring->Enqueue((CGenNode *) msg);
3441
3442    return (0);
3443}
3444
3445
3446int  CGlobalTRex::ixgbe_rx_queue_flush(){
3447    int i;
3448    for (i=0; i<m_max_ports; i++) {
3449        CPhyEthIF * _if=&m_ports[i];
3450        _if->flush_rx_queue();
3451    }
3452    return (0);
3453}
3454
3455
3456// init stateful rx core
3457void CGlobalTRex::ixgbe_configure_mg(void) {
3458    int i;
3459    CLatencyManagerCfg mg_cfg;
3460    mg_cfg.m_max_ports = m_max_ports;
3461
3462    uint32_t latency_rate=CGlobalInfo::m_options.m_latency_rate;
3463
3464    if ( latency_rate ) {
3465        mg_cfg.m_cps = (double)latency_rate ;
3466    } else {
3467        // If RX core needed, we need something to make the scheduler running.
3468        // If nothing configured, send 1 CPS latency measurement packets.
3469        if (CGlobalInfo::m_options.m_arp_ref_per == 0) {
3470            mg_cfg.m_cps = 1.0;
3471        } else {
3472            mg_cfg.m_cps = 0;
3473        }
3474    }
3475
3476    if ( get_vm_one_queue_enable() ) {
3477        /* vm mode, indirect queues  */
3478        for (i=0; i<m_max_ports; i++) {
3479            CPhyEthIF * _if = &m_ports[i];
3480            CMessagingManager * rx_dp=CMsgIns::Ins()->getRxDp();
3481
3482            uint8_t thread_id = (i>>1);
3483
3484            CNodeRing * r = rx_dp->getRingCpToDp(thread_id);
3485            m_latency_vm_vports[i].Create((uint8_t)i, r, &m_mg, _if);
3486
3487            mg_cfg.m_ports[i] =&m_latency_vm_vports[i];
3488        }
3489
3490    }else{
3491        for (i=0; i<m_max_ports; i++) {
3492            CPhyEthIF * _if=&m_ports[i];
3493            _if->dump_stats(stdout);
3494            m_latency_vports[i].Create(_if, m_rx_core_tx_q_id, 1);
3495
3496            mg_cfg.m_ports[i] =&m_latency_vports[i];
3497        }
3498    }
3499
3500
3501    m_mg.Create(&mg_cfg);
3502    m_mg.set_mask(CGlobalInfo::m_options.m_latency_mask);
3503}
3504
3505// init m_rx_sl object for stateless rx core
3506void CGlobalTRex::rx_sl_configure(void) {
3507    CRxSlCfg rx_sl_cfg;
3508    int i;
3509
3510    rx_sl_cfg.m_max_ports = m_max_ports;
3511    rx_sl_cfg.m_num_crc_fix_bytes = get_ex_drv()->get_num_crc_fix_bytes();
3512
3513    if ( get_vm_one_queue_enable() ) {
3514        /* vm mode, indirect queues  */
3515        for (i=0; i < m_max_ports; i++) {
3516            CPhyEthIF * _if = &m_ports[i];
3517            CMessagingManager * rx_dp = CMsgIns::Ins()->getRxDp();
3518            uint8_t thread_id = (i >> 1);
3519            CNodeRing * r = rx_dp->getRingCpToDp(thread_id);
3520            m_latency_vm_vports[i].Create(i, r, &m_mg, _if);
3521            rx_sl_cfg.m_ports[i] = &m_latency_vm_vports[i];
3522        }
3523    } else {
3524        for (i = 0; i < m_max_ports; i++) {
3525            CPhyEthIF * _if = &m_ports[i];
3526            m_latency_vports[i].Create(_if, m_rx_core_tx_q_id, 1);
3527            rx_sl_cfg.m_ports[i] = &m_latency_vports[i];
3528        }
3529    }
3530
3531    m_rx_sl.create(rx_sl_cfg);
3532}
3533
3534int  CGlobalTRex::ixgbe_start(void){
3535    int i;
3536    for (i=0; i<m_max_ports; i++) {
3537        socket_id_t socket_id = CGlobalInfo::m_socket.port_to_socket((port_id_t)i);
3538        assert(CGlobalInfo::m_mem_pool[socket_id].m_mbuf_pool_2048);
3539        CPhyEthIF * _if=&m_ports[i];
3540        _if->Create((uint8_t)i);
3541
3542        if ( get_vm_one_queue_enable() ) {
3543            /* VMXNET3 does claim to support 16K but somehow does not work */
3544            /* reduce to 2000 */
3545            m_port_cfg.m_port_conf.rxmode.max_rx_pkt_len = 2000;
3546            /* In VM case, there is one tx q and one rx q */
3547            _if->configure(1, 1, &m_port_cfg.m_port_conf);
3548            // Only 1 rx queue, so use it for everything
3549            m_rx_core_tx_q_id = 0;
3550            _if->set_rx_queue(0);
3551            _if->rx_queue_setup(0, RTE_TEST_RX_DESC_VM_DEFAULT, socket_id, &m_port_cfg.m_rx_conf,
3552                                CGlobalInfo::m_mem_pool[socket_id].m_mbuf_pool_2048);
3553            // 1 TX queue in VM case
3554            _if->tx_queue_setup(0, RTE_TEST_TX_DESC_VM_DEFAULT, socket_id, &m_port_cfg.m_tx_conf);
3555        } else {
3556            // 2 rx queues.
3557            // TX queues: 1 for each core handling the port pair + 1 for latency pkts + 1 for use by RX core
3558            _if->configure(2, m_cores_to_dual_ports + 2, &m_port_cfg.m_port_conf);
3559            m_rx_core_tx_q_id = m_cores_to_dual_ports;
3560
3561            // setup RX drop queue
3562            _if->rx_queue_setup(MAIN_DPDK_DATA_Q,
3563                                RTE_TEST_RX_DESC_DEFAULT,
3564                                socket_id,
3565                                &m_port_cfg.m_rx_conf,
3566                                CGlobalInfo::m_mem_pool[socket_id].m_mbuf_pool_2048);
3567            // setup RX filter queue
3568            _if->set_rx_queue(MAIN_DPDK_RX_Q);
3569            _if->rx_queue_setup(MAIN_DPDK_RX_Q,
3570                                RTE_TEST_RX_LATENCY_DESC_DEFAULT,
3571                                socket_id,
3572                                &m_port_cfg.m_rx_conf,
3573                                CGlobalInfo::m_mem_pool[socket_id].m_mbuf_pool_9k);
3574            // setup TX queues
3575            for (int qid = 0; qid < m_max_queues_per_port; qid++) {
3576                _if->tx_queue_setup((uint16_t)qid,
3577                                    RTE_TEST_TX_DESC_DEFAULT ,
3578                                    socket_id,
3579                                    &m_port_cfg.m_tx_conf);
3580            }
3581        }
3582
3583        _if->stats_clear();
3584        _if->start();
3585        _if->configure_rx_duplicate_rules();
3586
3587        if ( ! get_vm_one_queue_enable()  && ! CGlobalInfo::m_options.preview.get_is_disable_flow_control_setting()
3588             && _if->get_port_attr()->is_fc_change_supported()) {
3589            _if->disable_flow_control();
3590        }
3591
3592        _if->get_port_attr()->add_mac((char *)CGlobalInfo::m_options.get_src_mac_addr(i));
3593
3594        fflush(stdout);
3595    }
3596
3597    if ( !is_all_links_are_up()  ){
3598        /* wait for ports to be stable */
3599        get_ex_drv()->wait_for_stable_link();
3600
3601        if ( !is_all_links_are_up(true) /*&& !get_is_stateless()*/ ){ // disable start with link down for now
3602
3603            /* temporary solution for trex-192 issue, solve the case for X710/XL710, will work for both Statless and Stateful */
3604            if (  get_ex_drv()->drop_packets_incase_of_linkdown() ){
3605                printf(" WARNING : there is no link on one of the ports, driver support auto drop in case of link down - continue\n");
3606            }else{
3607                dump_links_status(stdout);
3608                rte_exit(EXIT_FAILURE, " One of the links is down \n");
3609            }
3610        }
3611    } else {
3612        get_ex_drv()->wait_after_link_up();
3613    }
3614
3615    dump_links_status(stdout);
3616
3617    ixgbe_rx_queue_flush();
3618
3619    if (! get_is_stateless()) {
3620        ixgbe_configure_mg();
3621    }
3622
3623
3624    /* core 0 - control
3625       core 1 - port 0-0,1-0,
3626       core 2 - port 2-0,3-0,
3627       core 3 - port 0-1,1-1,
3628       core 4 - port 2-1,3-1,
3629
3630    */
3631    int port_offset=0;
3632    uint8_t lat_q_id;
3633
3634    if ( get_vm_one_queue_enable() ) {
3635        lat_q_id = 0;
3636    } else {
3637        lat_q_id = get_cores_tx() / get_base_num_cores() + 1;
3638    }
3639    for (i=0; i<get_cores_tx(); i++) {
3640        int j=(i+1);
3641        int queue_id=((j-1)/get_base_num_cores() );   /* for the first min core queue 0 , then queue 1 etc */
3642        if ( get_is_stateless() ){
3643            m_cores_vif[j]=&m_cores_vif_sl[j];
3644        }else{
3645            m_cores_vif[j]=&m_cores_vif_sf[j];
3646        }
3647        m_cores_vif[j]->Create(j,
3648                               queue_id,
3649                               &m_ports[port_offset], /* 0,2*/
3650                               queue_id,
3651                               &m_ports[port_offset+1], /*1,3*/
3652                               lat_q_id);
3653        port_offset+=2;
3654        if (port_offset == m_max_ports) {
3655            port_offset = 0;
3656            // We want to allow sending latency packets only from first core handling a port
3657            lat_q_id = CCoreEthIF::INVALID_Q_ID;
3658        }
3659    }
3660
3661    fprintf(stdout," -------------------------------\n");
3662    fprintf(stdout, "RX core uses TX queue number %d on all ports\n", m_rx_core_tx_q_id);
3663    CCoreEthIF::DumpIfCfgHeader(stdout);
3664    for (i=0; i<get_cores_tx(); i++) {
3665        m_cores_vif[i+1]->DumpIfCfg(stdout);
3666    }
3667    fprintf(stdout," -------------------------------\n");
3668
3669    return (0);
3670}
3671
3672static void trex_termination_handler(int signum);
3673
3674void CGlobalTRex::register_signals() {
3675    struct sigaction action;
3676
3677    /* handler */
3678    action.sa_handler = trex_termination_handler;
3679
3680    /* blocked signals during handling */
3681    sigemptyset(&action.sa_mask);
3682    sigaddset(&action.sa_mask, SIGINT);
3683    sigaddset(&action.sa_mask, SIGTERM);
3684
3685    /* no flags */
3686    action.sa_flags = 0;
3687
3688    /* register */
3689    sigaction(SIGINT,  &action, NULL);
3690    sigaction(SIGTERM, &action, NULL);
3691}
3692
3693bool CGlobalTRex::Create(){
3694    CFlowsYamlInfo     pre_yaml_info;
3695
3696    register_signals();
3697
3698    m_stats_cnt =0;
3699    if (!get_is_stateless()) {
3700        pre_yaml_info.load_from_yaml_file(CGlobalInfo::m_options.cfg_file);
3701    }
3702
3703    if ( !m_zmq_publisher.Create( CGlobalInfo::m_options.m_zmq_port,
3704                                  !CGlobalInfo::m_options.preview.get_zmq_publish_enable() ) ){
3705        return (false);
3706    }
3707
3708    if ( pre_yaml_info.m_vlan_info.m_enable ){
3709        CGlobalInfo::m_options.preview.set_vlan_mode_enable(true);
3710    }
3711    /* End update pre flags */
3712
3713    ixgbe_prob_init();
3714    cores_prob_init();
3715    queues_prob_init();
3716
3717    /* allocate rings */
3718    assert( CMsgIns::Ins()->Create(get_cores_tx()) );
3719
3720    if ( sizeof(CGenNodeNatInfo) != sizeof(CGenNode)  ) {
3721        printf("ERROR sizeof(CGenNodeNatInfo) %lu != sizeof(CGenNode) %lu must be the same size \n",sizeof(CGenNodeNatInfo),sizeof(CGenNode));
3722        assert(0);
3723    }
3724
3725    if ( sizeof(CGenNodeLatencyPktInfo) != sizeof(CGenNode)  ) {
3726        printf("ERROR sizeof(CGenNodeLatencyPktInfo) %lu != sizeof(CGenNode) %lu must be the same size \n",sizeof(CGenNodeLatencyPktInfo),sizeof(CGenNode));
3727        assert(0);
3728    }
3729
3730    /* allocate the memory */
3731
3732    uint32_t rx_mbuf = 0 ;
3733
3734    if ( get_vm_one_queue_enable() ) {
3735        rx_mbuf = (m_max_ports * RTE_TEST_RX_DESC_VM_DEFAULT);
3736    }else{
3737        rx_mbuf = (m_max_ports * (RTE_TEST_RX_LATENCY_DESC_DEFAULT+RTE_TEST_RX_DESC_DEFAULT));
3738    }
3739
3740    CGlobalInfo::init_pools(rx_mbuf);
3741    ixgbe_start();
3742    dump_config(stdout);
3743
3744    /* start stateless */
3745    if (get_is_stateless()) {
3746
3747        TrexStatelessCfg cfg;
3748
3749        TrexRpcServerConfig rpc_req_resp_cfg(TrexRpcServerConfig::RPC_PROT_TCP,
3750                                             global_platform_cfg_info.m_zmq_rpc_port,
3751                                             &m_cp_lock);
3752
3753        cfg.m_port_count         = CGlobalInfo::m_options.m_expected_portd;
3754        cfg.m_rpc_req_resp_cfg   = &rpc_req_resp_cfg;
3755        cfg.m_rpc_server_verbose = false;
3756        cfg.m_platform_api       = new TrexDpdkPlatformApi();
3757        cfg.m_publisher          = &m_zmq_publisher;
3758
3759        m_trex_stateless = new TrexStateless(cfg);
3760
3761        rx_sl_configure();
3762    }
3763
3764    return (true);
3765
3766}
3767void CGlobalTRex::Delete(){
3768
3769    m_zmq_publisher.Delete();
3770    m_fl.Delete();
3771
3772    if (m_trex_stateless) {
3773        delete m_trex_stateless;
3774        m_trex_stateless = NULL;
3775    }
3776}
3777
3778
3779
3780int  CGlobalTRex::ixgbe_prob_init(void){
3781
3782    m_max_ports  = rte_eth_dev_count();
3783    if (m_max_ports == 0)
3784        rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n");
3785
3786    printf(" Number of ports found: %d \n",m_max_ports);
3787
3788    if ( m_max_ports %2 !=0 ) {
3789        rte_exit(EXIT_FAILURE, " Number of ports %d should be even, mask the one port in the configuration file  \n, ",
3790                 m_max_ports);
3791    }
3792
3793    if ( CGlobalInfo::m_options.get_expected_ports() > TREX_MAX_PORTS ) {
3794        rte_exit(EXIT_FAILURE, " Maximum ports supported are %d, use the configuration file to set the expected number of ports   \n",TREX_MAX_PORTS);
3795    }
3796
3797    if ( CGlobalInfo::m_options.get_expected_ports() > m_max_ports ){
3798        rte_exit(EXIT_FAILURE, " There are %d ports you expected more %d,use the configuration file to set the expected number of ports   \n",
3799                 m_max_ports,
3800                 CGlobalInfo::m_options.get_expected_ports());
3801    }
3802    if (CGlobalInfo::m_options.get_expected_ports() < m_max_ports ) {
3803        /* limit the number of ports */
3804        m_max_ports=CGlobalInfo::m_options.get_expected_ports();
3805    }
3806    assert(m_max_ports <= TREX_MAX_PORTS);
3807
3808    struct rte_eth_dev_info dev_info;
3809    rte_eth_dev_info_get((uint8_t) 0,&dev_info);
3810
3811    if ( CGlobalInfo::m_options.preview.getVMode() > 0){
3812        printf("\n\n");
3813        printf("if_index : %d \n",dev_info.if_index);
3814        printf("driver name : %s \n",dev_info.driver_name);
3815        printf("min_rx_bufsize : %d \n",dev_info.min_rx_bufsize);
3816        printf("max_rx_pktlen  : %d \n",dev_info.max_rx_pktlen);
3817        printf("max_rx_queues  : %d \n",dev_info.max_rx_queues);
3818        printf("max_tx_queues  : %d \n",dev_info.max_tx_queues);
3819        printf("max_mac_addrs  : %d \n",dev_info.max_mac_addrs);
3820
3821        printf("rx_offload_capa : %x \n",dev_info.rx_offload_capa);
3822        printf("tx_offload_capa : %x \n",dev_info.tx_offload_capa);
3823    }
3824
3825
3826
3827    if ( !CTRexExtendedDriverDb::Ins()->is_driver_exists(dev_info.driver_name) ){
3828        printf(" Error: driver %s is not supported. Please consult the documentation for a list of supported drivers\n"
3829               ,dev_info.driver_name);
3830        exit(1);
3831    }
3832
3833    int i;
3834    struct rte_eth_dev_info dev_info1;
3835
3836    for (i=1; i<m_max_ports; i++) {
3837        rte_eth_dev_info_get((uint8_t) i,&dev_info1);
3838        if ( strcmp(dev_info1.driver_name,dev_info.driver_name)!=0) {
3839            printf(" ERROR all device should have the same type  %s != %s \n",dev_info1.driver_name,dev_info.driver_name);
3840            exit(1);
3841        }
3842    }
3843
3844    CTRexExtendedDriverDb::Ins()->set_driver_name(dev_info.driver_name);
3845    m_drv = CTRexExtendedDriverDb::Ins()->get_drv();
3846
3847    // check if firmware version is new enough
3848    for (i = 0; i < m_max_ports; i++) {
3849        if (m_drv->verify_fw_ver(i) < 0) {
3850            // error message printed by verify_fw_ver
3851            exit(1);
3852        }
3853    }
3854
3855    m_port_cfg.update_var();
3856
3857    if ( get_is_rx_filter_enable() ){
3858        m_port_cfg.update_global_config_fdir();
3859    }
3860
3861    if ( get_vm_one_queue_enable() ) {
3862        /* verify that we have only one thread/core per dual- interface */
3863        if ( CGlobalInfo::m_options.preview.getCores()>1 ) {
3864            printf(" ERROR the number of cores should be 1 when the driver support only one tx queue and one rx queue \n");
3865            exit(1);
3866        }
3867    }
3868    return (0);
3869}
3870
3871int  CGlobalTRex::cores_prob_init(){
3872    m_max_cores = rte_lcore_count();
3873    assert(m_max_cores>0);
3874    return (0);
3875}
3876
3877int  CGlobalTRex::queues_prob_init(){
3878
3879    if (m_max_cores < 2) {
3880        rte_exit(EXIT_FAILURE, "number of cores should be at least 2 \n");
3881    }
3882
3883    assert((m_max_ports>>1) <= get_cores_tx() );
3884
3885    m_cores_mul = CGlobalInfo::m_options.preview.getCores();
3886
3887    m_cores_to_dual_ports  = m_cores_mul;
3888
3889    /* core 0 - control
3890       -core 1 - port 0/1
3891       -core 2 - port 2/3
3892       -core 3 - port 0/1
3893       -core 4 - port 2/3
3894
3895       m_cores_to_dual_ports = 2;
3896    */
3897
3898    // One q for each core allowed to send on this port + 1 for latency q (Used in stateless) + 1 for RX core.
3899    m_max_queues_per_port  = m_cores_to_dual_ports + 2;
3900
3901    if (m_max_queues_per_port > BP_MAX_TX_QUEUE) {
3902        rte_exit(EXIT_FAILURE,
3903                 "Error: Number of TX queues exceeds %d. Try running with lower -c <val> \n",BP_MAX_TX_QUEUE);
3904    }
3905
3906    assert(m_max_queues_per_port>0);
3907    return (0);
3908}
3909
3910
3911void CGlobalTRex::dump_config(FILE *fd){
3912    fprintf(fd," number of ports         : %u \n",m_max_ports);
3913    fprintf(fd," max cores for 2 ports   : %u \n",m_cores_to_dual_ports);
3914    fprintf(fd," max queue per port      : %u \n",m_max_queues_per_port);
3915}
3916
3917
3918void CGlobalTRex::dump_links_status(FILE *fd){
3919    for (int i=0; i<m_max_ports; i++) {
3920        m_ports[i].get_port_attr()->update_link_status_nowait();
3921        m_ports[i].get_port_attr()->dump_link(fd);
3922    }
3923}
3924
3925
3926void CGlobalTRex::dump_post_test_stats(FILE *fd){
3927    uint64_t pkt_out=0;
3928    uint64_t pkt_out_bytes=0;
3929    uint64_t pkt_in_bytes=0;
3930    uint64_t pkt_in=0;
3931    uint64_t sw_pkt_out=0;
3932    uint64_t sw_pkt_out_err=0;
3933    uint64_t sw_pkt_out_bytes=0;
3934    uint64_t tx_arp = 0;
3935    uint64_t rx_arp = 0;
3936
3937    int i;
3938    for (i=0; i<get_cores_tx(); i++) {
3939        CCoreEthIF * erf_vif = m_cores_vif[i+1];
3940        CVirtualIFPerSideStats stats;
3941        erf_vif->GetCoreCounters(&stats);
3942        sw_pkt_out     += stats.m_tx_pkt;
3943        sw_pkt_out_err += stats.m_tx_drop +stats.m_tx_queue_full +stats.m_tx_alloc_error ;
3944        sw_pkt_out_bytes +=stats.m_tx_bytes;
3945    }
3946
3947
3948    for (i=0; i<m_max_ports; i++) {
3949        CPhyEthIF * _if=&m_ports[i];
3950        pkt_in  +=_if->get_stats().ipackets;
3951        pkt_in_bytes +=_if->get_stats().ibytes;
3952        pkt_out +=_if->get_stats().opackets;
3953        pkt_out_bytes +=_if->get_stats().obytes;
3954        tx_arp += _if->get_ignore_stats().get_tx_arp();
3955        rx_arp += _if->get_ignore_stats().get_rx_arp();
3956    }
3957    if ( CGlobalInfo::m_options.is_latency_enabled() ){
3958        sw_pkt_out += m_mg.get_total_pkt();
3959        sw_pkt_out_bytes +=m_mg.get_total_bytes();
3960    }
3961
3962
3963    fprintf (fd," summary stats \n");
3964    fprintf (fd," -------------- \n");
3965
3966    if (pkt_in > pkt_out)
3967        {
3968            fprintf (fd, " Total-pkt-drop       : 0 pkts \n");
3969            if (pkt_in > pkt_out * 1.01)
3970                fprintf (fd, " Warning : number of rx packets exceeds 101%% of tx packets!\n");
3971        }
3972    else
3973        fprintf (fd, " Total-pkt-drop       : %llu pkts \n", (unsigned long long) (pkt_out - pkt_in));
3974    fprintf (fd," Total-tx-bytes       : %llu bytes \n", (unsigned long long)pkt_out_bytes);
3975    fprintf (fd," Total-tx-sw-bytes    : %llu bytes \n", (unsigned long long)sw_pkt_out_bytes);
3976    fprintf (fd," Total-rx-bytes       : %llu byte \n", (unsigned long long)pkt_in_bytes);
3977
3978    fprintf (fd," \n");
3979
3980    fprintf (fd," Total-tx-pkt         : %llu pkts \n", (unsigned long long)pkt_out);
3981    fprintf (fd," Total-rx-pkt         : %llu pkts \n", (unsigned long long)pkt_in);
3982    fprintf (fd," Total-sw-tx-pkt      : %llu pkts \n", (unsigned long long)sw_pkt_out);
3983    fprintf (fd," Total-sw-err         : %llu pkts \n", (unsigned long long)sw_pkt_out_err);
3984    fprintf (fd," Total ARP sent       : %llu pkts \n", (unsigned long long)tx_arp);
3985    fprintf (fd," Total ARP received   : %llu pkts \n", (unsigned long long)rx_arp);
3986
3987
3988    if ( CGlobalInfo::m_options.is_latency_enabled() ){
3989        fprintf (fd," maximum-latency   : %.0f usec \n",m_mg.get_max_latency());
3990        fprintf (fd," average-latency   : %.0f usec \n",m_mg.get_avr_latency());
3991        fprintf (fd," latency-any-error : %s  \n",m_mg.is_any_error()?"ERROR":"OK");
3992    }
3993
3994
3995}
3996
3997
3998void CGlobalTRex::update_stats(){
3999
4000    int i;
4001    for (i=0; i<m_max_ports; i++) {
4002        CPhyEthIF * _if=&m_ports[i];
4003        _if->update_counters();
4004    }
4005    uint64_t total_open_flows=0;
4006
4007
4008    CFlowGenListPerThread   * lpt;
4009    for (i=0; i<get_cores_tx(); i++) {
4010        lpt = m_fl.m_threads_info[i];
4011        total_open_flows +=   lpt->m_stats.m_total_open_flows ;
4012    }
4013    m_last_total_cps = m_cps.add(total_open_flows);
4014
4015}
4016
4017tx_per_flow_t CGlobalTRex::get_flow_tx_stats(uint8_t port, uint16_t index) {
4018    return m_stats.m_port[port].m_tx_per_flow[index] - m_stats.m_port[port].m_prev_tx_per_flow[index];
4019}
4020
4021// read stats. Return read value, and clear.
4022tx_per_flow_t CGlobalTRex::clear_flow_tx_stats(uint8_t port, uint16_t index, bool is_lat) {
4023    uint8_t port0;
4024    CFlowGenListPerThread * lpt;
4025    tx_per_flow_t ret;
4026
4027    m_stats.m_port[port].m_tx_per_flow[index].clear();
4028
4029    for (int i=0; i < get_cores_tx(); i++) {
4030        lpt = m_fl.m_threads_info[i];
4031        port0 = lpt->getDualPortId() * 2;
4032        if ((port == port0) || (port == port0 + 1)) {
4033            m_stats.m_port[port].m_tx_per_flow[index] +=
4034                lpt->m_node_gen.m_v_if->m_stats[port - port0].m_tx_per_flow[index];
4035            if (is_lat)
4036                lpt->m_node_gen.m_v_if->m_stats[port - port0].m_lat_data[index - MAX_FLOW_STATS].reset();
4037        }
4038    }
4039
4040    ret = m_stats.m_port[port].m_tx_per_flow[index] - m_stats.m_port[port].m_prev_tx_per_flow[index];
4041
4042    // Since we return diff from prev, following "clears" the stats.
4043    m_stats.m_port[port].m_prev_tx_per_flow[index] = m_stats.m_port[port].m_tx_per_flow[index];
4044
4045    return ret;
4046}
4047
4048void CGlobalTRex::get_stats(CGlobalStats & stats){
4049
4050    int i;
4051    float total_tx=0.0;
4052    float total_rx=0.0;
4053    float total_tx_pps=0.0;
4054    float total_rx_pps=0.0;
4055
4056    stats.m_total_tx_pkts  = 0;
4057    stats.m_total_rx_pkts  = 0;
4058    stats.m_total_tx_bytes = 0;
4059    stats.m_total_rx_bytes = 0;
4060    stats.m_total_alloc_error=0;
4061    stats.m_total_queue_full=0;
4062    stats.m_total_queue_drop=0;
4063
4064
4065    stats.m_num_of_ports = m_max_ports;
4066    stats.m_cpu_util = m_fl.GetCpuUtil();
4067    stats.m_cpu_util_raw = m_fl.GetCpuUtilRaw();
4068    if (get_is_stateless()) {
4069        stats.m_rx_cpu_util = m_rx_sl.get_cpu_util();
4070    }
4071    stats.m_threads      = m_fl.m_threads_info.size();
4072
4073    for (i=0; i<m_max_ports; i++) {
4074        CPhyEthIF * _if=&m_ports[i];
4075        CPerPortStats * stp=&stats.m_port[i];
4076
4077        CPhyEthIFStats & st =_if->get_stats();
4078
4079        stp->opackets = st.opackets;
4080        stp->obytes   = st.obytes;
4081        stp->ipackets = st.ipackets;
4082        stp->ibytes   = st.ibytes;
4083        stp->ierrors  = st.ierrors;
4084        stp->oerrors  = st.oerrors;
4085        stp->m_total_tx_bps = _if->get_last_tx_rate()*_1Mb_DOUBLE;
4086        stp->m_total_tx_pps = _if->get_last_tx_pps_rate();
4087        stp->m_total_rx_bps = _if->get_last_rx_rate()*_1Mb_DOUBLE;
4088        stp->m_total_rx_pps = _if->get_last_rx_pps_rate();
4089
4090        stats.m_total_tx_pkts  += st.opackets;
4091        stats.m_total_rx_pkts  += st.ipackets;
4092        stats.m_total_tx_bytes += st.obytes;
4093        stats.m_total_rx_bytes += st.ibytes;
4094
4095        total_tx +=_if->get_last_tx_rate();
4096        total_rx +=_if->get_last_rx_rate();
4097        total_tx_pps +=_if->get_last_tx_pps_rate();
4098        total_rx_pps +=_if->get_last_rx_pps_rate();
4099        // IP ID rules
4100        for (uint16_t flow = 0; flow <= max_stat_hw_id_seen; flow++) {
4101            stats.m_port[i].m_tx_per_flow[flow].clear();
4102        }
4103        // payload rules
4104        for (uint16_t flow = MAX_FLOW_STATS; flow <= MAX_FLOW_STATS + max_stat_hw_id_seen_payload; flow++) {
4105            stats.m_port[i].m_tx_per_flow[flow].clear();
4106        }
4107
4108        stp->m_cpu_util = get_cpu_util_per_interface(i);
4109
4110    }
4111
4112    uint64_t total_open_flows=0;
4113    uint64_t total_active_flows=0;
4114
4115    uint64_t total_clients=0;
4116    uint64_t total_servers=0;
4117    uint64_t active_sockets=0;
4118    uint64_t total_sockets=0;
4119
4120
4121    uint64_t total_nat_time_out =0;
4122    uint64_t total_nat_time_out_wait_ack =0;
4123    uint64_t total_nat_no_fid   =0;
4124    uint64_t total_nat_active   =0;
4125    uint64_t total_nat_syn_wait = 0;
4126    uint64_t total_nat_open     =0;
4127    uint64_t total_nat_learn_error=0;
4128
4129    CFlowGenListPerThread   * lpt;
4130    stats.m_template.Clear();
4131    for (i=0; i<get_cores_tx(); i++) {
4132        lpt = m_fl.m_threads_info[i];
4133        total_open_flows +=   lpt->m_stats.m_total_open_flows ;
4134        total_active_flows += (lpt->m_stats.m_total_open_flows-lpt->m_stats.m_total_close_flows) ;
4135
4136        stats.m_total_alloc_error += lpt->m_node_gen.m_v_if->m_stats[0].m_tx_alloc_error+
4137            lpt->m_node_gen.m_v_if->m_stats[1].m_tx_alloc_error;
4138        stats.m_total_queue_full +=lpt->m_node_gen.m_v_if->m_stats[0].m_tx_queue_full+
4139            lpt->m_node_gen.m_v_if->m_stats[1].m_tx_queue_full;
4140
4141        stats.m_total_queue_drop +=lpt->m_node_gen.m_v_if->m_stats[0].m_tx_drop+
4142            lpt->m_node_gen.m_v_if->m_stats[1].m_tx_drop;
4143
4144        stats.m_template.Add(&lpt->m_node_gen.m_v_if->m_stats[0].m_template);
4145        stats.m_template.Add(&lpt->m_node_gen.m_v_if->m_stats[1].m_template);
4146
4147
4148        total_clients   += lpt->m_smart_gen.getTotalClients();
4149        total_servers   += lpt->m_smart_gen.getTotalServers();
4150        active_sockets  += lpt->m_smart_gen.ActiveSockets();
4151        total_sockets   += lpt->m_smart_gen.MaxSockets();
4152
4153        total_nat_time_out +=lpt->m_stats.m_nat_flow_timeout;
4154        total_nat_time_out_wait_ack += lpt->m_stats.m_nat_flow_timeout_wait_ack;
4155        total_nat_no_fid   +=lpt->m_stats.m_nat_lookup_no_flow_id ;
4156        total_nat_active   +=lpt->m_stats.m_nat_lookup_add_flow_id - lpt->m_stats.m_nat_lookup_remove_flow_id;
4157        total_nat_syn_wait += lpt->m_stats.m_nat_lookup_add_flow_id - lpt->m_stats.m_nat_lookup_wait_ack_state;
4158        total_nat_open     +=lpt->m_stats.m_nat_lookup_add_flow_id;
4159        total_nat_learn_error   +=lpt->m_stats.m_nat_flow_learn_error;
4160        uint8_t port0 = lpt->getDualPortId() *2;
4161        // IP ID rules
4162        for (uint16_t flow = 0; flow <= max_stat_hw_id_seen; flow++) {
4163            stats.m_port[port0].m_tx_per_flow[flow] +=
4164                lpt->m_node_gen.m_v_if->m_stats[0].m_tx_per_flow[flow];
4165            stats.m_port[port0 + 1].m_tx_per_flow[flow] +=
4166                lpt->m_node_gen.m_v_if->m_stats[1].m_tx_per_flow[flow];
4167        }
4168        // payload rules
4169        for (uint16_t flow = MAX_FLOW_STATS; flow <= MAX_FLOW_STATS + max_stat_hw_id_seen_payload; flow++) {
4170            stats.m_port[port0].m_tx_per_flow[flow] +=
4171                lpt->m_node_gen.m_v_if->m_stats[0].m_tx_per_flow[flow];
4172            stats.m_port[port0 + 1].m_tx_per_flow[flow] +=
4173                lpt->m_node_gen.m_v_if->m_stats[1].m_tx_per_flow[flow];
4174        }
4175
4176    }
4177
4178    stats.m_total_nat_time_out = total_nat_time_out;
4179    stats.m_total_nat_time_out_wait_ack = total_nat_time_out_wait_ack;
4180    stats.m_total_nat_no_fid   = total_nat_no_fid;
4181    stats.m_total_nat_active   = total_nat_active;
4182    stats.m_total_nat_syn_wait = total_nat_syn_wait;
4183    stats.m_total_nat_open     = total_nat_open;
4184    stats.m_total_nat_learn_error     = total_nat_learn_error;
4185
4186    stats.m_total_clients = total_clients;
4187    stats.m_total_servers = total_servers;
4188    stats.m_active_sockets = active_sockets;
4189
4190    if (total_sockets != 0) {
4191        stats.m_socket_util =100.0*(double)active_sockets/(double)total_sockets;
4192    } else {
4193        stats.m_socket_util = 0;
4194    }
4195
4196
4197
4198    float drop_rate=total_tx-total_rx;
4199    if ( (drop_rate<0.0)  || (drop_rate < 0.1*total_tx ) )  {
4200        drop_rate=0.0;
4201    }
4202    float pf =CGlobalInfo::m_options.m_platform_factor;
4203    stats.m_platform_factor = pf;
4204
4205    stats.m_active_flows = total_active_flows*pf;
4206    stats.m_open_flows   = total_open_flows*pf;
4207    stats.m_rx_drop_bps   = drop_rate*pf *_1Mb_DOUBLE;
4208
4209    stats.m_tx_bps        = total_tx*pf*_1Mb_DOUBLE;
4210    stats.m_rx_bps        = total_rx*pf*_1Mb_DOUBLE;
4211    stats.m_tx_pps        = total_tx_pps*pf;
4212    stats.m_rx_pps        = total_rx_pps*pf;
4213    stats.m_tx_cps        = m_last_total_cps*pf;
4214    if(stats.m_cpu_util < 0.0001)
4215        stats.m_bw_per_core = 0;
4216    else
4217        stats.m_bw_per_core   = 2*(stats.m_tx_bps/1e9)*100.0/(stats.m_cpu_util*stats.m_threads);
4218
4219    stats.m_tx_expected_cps        = m_expected_cps*pf;
4220    stats.m_tx_expected_pps        = m_expected_pps*pf;
4221    stats.m_tx_expected_bps        = m_expected_bps*pf;
4222}
4223
4224float
4225CGlobalTRex::get_cpu_util_per_interface(uint8_t port_id) {
4226    CPhyEthIF * _if = &m_ports[port_id];
4227
4228    float    tmp = 0;
4229    uint8_t  cnt = 0;
4230    for (const auto &p : _if->get_core_list()) {
4231        uint8_t core_id = p.first;
4232        CFlowGenListPerThread *lp = m_fl.m_threads_info[core_id];
4233        if (lp->is_port_active(port_id)) {
4234            tmp += lp->m_cpu_cp_u.GetVal();
4235            cnt++;
4236        }
4237    }
4238
4239    return ( (cnt > 0) ? (tmp / cnt) : 0);
4240
4241}
4242
4243bool CGlobalTRex::sanity_check(){
4244
4245    CFlowGenListPerThread   * lpt;
4246    uint32_t errors=0;
4247    int i;
4248    for (i=0; i<get_cores_tx(); i++) {
4249        lpt = m_fl.m_threads_info[i];
4250        errors   += lpt->m_smart_gen.getErrorAllocationCounter();
4251    }
4252
4253    if ( errors ) {
4254        printf(" ERRORs sockets allocation errors! \n");
4255        printf(" you should allocate more clients in the pool \n");
4256        return(true);
4257    }
4258    return ( false);
4259}
4260
4261
4262/* dump the template info */
4263void CGlobalTRex::dump_template_info(std::string & json){
4264    CFlowGenListPerThread   * lpt = m_fl.m_threads_info[0];
4265    CFlowsYamlInfo * yaml_info=&lpt->m_yaml_info;
4266
4267    json="{\"name\":\"template_info\",\"type\":0,\"data\":[";
4268    int i;
4269    for (i=0; i<yaml_info->m_vec.size()-1; i++) {
4270        CFlowYamlInfo * r=&yaml_info->m_vec[i] ;
4271        json+="\""+ r->m_name+"\"";
4272        json+=",";
4273    }
4274    json+="\""+yaml_info->m_vec[i].m_name+"\"";
4275    json+="]}" ;
4276}
4277
4278void CGlobalTRex::dump_stats(FILE *fd, CGlobalStats::DumpFormat format){
4279
4280    update_stats();
4281    get_stats(m_stats);
4282
4283    if (format==CGlobalStats::dmpTABLE) {
4284        if ( m_io_modes.m_g_mode == CTrexGlobalIoMode::gNORMAL ){
4285            switch (m_io_modes.m_pp_mode ){
4286            case CTrexGlobalIoMode::ppDISABLE:
4287                fprintf(fd,"\n+Per port stats disabled \n");
4288                break;
4289            case CTrexGlobalIoMode::ppTABLE:
4290                fprintf(fd,"\n-Per port stats table \n");
4291                m_stats.Dump(fd,CGlobalStats::dmpTABLE);
4292                break;
4293            case CTrexGlobalIoMode::ppSTANDARD:
4294                fprintf(fd,"\n-Per port stats - standard\n");
4295                m_stats.Dump(fd,CGlobalStats::dmpSTANDARD);
4296                break;
4297            };
4298
4299            switch (m_io_modes.m_ap_mode ){
4300            case   CTrexGlobalIoMode::apDISABLE:
4301                fprintf(fd,"\n+Global stats disabled \n");
4302                break;
4303            case   CTrexGlobalIoMode::apENABLE:
4304                fprintf(fd,"\n-Global stats enabled \n");
4305                m_stats.DumpAllPorts(fd);
4306                break;
4307            };
4308        }
4309    }else{
4310        /* at exit , always need to dump it in standartd mode for scripts*/
4311        m_stats.Dump(fd,format);
4312        m_stats.DumpAllPorts(fd);
4313    }
4314
4315}
4316
4317void
4318CGlobalTRex::publish_async_data(bool sync_now, bool baseline) {
4319    std::string json;
4320
4321    /* refactor to update, dump, and etc. */
4322    if (sync_now) {
4323        update_stats();
4324        get_stats(m_stats);
4325    }
4326
4327    m_stats.dump_json(json, baseline);
4328    m_zmq_publisher.publish_json(json);
4329
4330    /* generator json , all cores are the same just sample the first one */
4331    m_fl.m_threads_info[0]->m_node_gen.dump_json(json);
4332    m_zmq_publisher.publish_json(json);
4333
4334
4335    if ( !get_is_stateless() ){
4336        dump_template_info(json);
4337        m_zmq_publisher.publish_json(json);
4338    }
4339
4340    if ( get_is_rx_check_mode() ) {
4341        m_mg.rx_check_dump_json(json );
4342        m_zmq_publisher.publish_json(json);
4343    }
4344
4345    /* backward compatible */
4346    m_mg.dump_json(json );
4347    m_zmq_publisher.publish_json(json);
4348
4349    /* more info */
4350    m_mg.dump_json_v2(json );
4351    m_zmq_publisher.publish_json(json);
4352
4353    if (get_is_stateless()) {
4354        std::string stat_json;
4355        std::string latency_json;
4356        if (m_trex_stateless->m_rx_flow_stat.dump_json(stat_json, latency_json, baseline)) {
4357            m_zmq_publisher.publish_json(stat_json);
4358            m_zmq_publisher.publish_json(latency_json);
4359        }
4360    }
4361}
4362
4363void
4364CGlobalTRex::publish_async_barrier(uint32_t key) {
4365    m_zmq_publisher.publish_barrier(key);
4366}
4367
4368void
4369CGlobalTRex:: publish_async_port_attr_changed(uint8_t port_id) {
4370    Json::Value data;
4371    data["port_id"] = port_id;
4372    TRexPortAttr * _attr = m_ports[port_id].get_port_attr();
4373
4374    _attr->to_json(data["attr"]);
4375
4376    m_zmq_publisher.publish_event(TrexPublisher::EVENT_PORT_ATTR_CHANGED, data);
4377}
4378
4379void
4380CGlobalTRex::handle_slow_path() {
4381    m_stats_cnt+=1;
4382
4383    // update speed, link up/down etc.
4384    for (int i=0; i<m_max_ports; i++) {
4385        bool changed = m_ports[i].get_port_attr()->update_link_status_nowait();
4386        if (changed) {
4387            publish_async_port_attr_changed(i);
4388        }
4389    }
4390
4391    if ( CGlobalInfo::m_options.preview.get_no_keyboard() ==false ) {
4392        if ( m_io_modes.handle_io_modes() ) {
4393            mark_for_shutdown(SHUTDOWN_CTRL_C);
4394            return;
4395        }
4396    }
4397
4398    if ( sanity_check() ) {
4399        mark_for_shutdown(SHUTDOWN_TEST_ENDED);
4400        return;
4401    }
4402
4403    if (m_io_modes.m_g_mode != CTrexGlobalIoMode::gDISABLE ) {
4404        fprintf(stdout,"\033[2J");
4405        fprintf(stdout,"\033[2H");
4406
4407    } else {
4408        if ( m_io_modes.m_g_disable_first  ) {
4409            m_io_modes.m_g_disable_first=false;
4410            fprintf(stdout,"\033[2J");
4411            fprintf(stdout,"\033[2H");
4412            printf("clean !!!\n");
4413            fflush(stdout);
4414        }
4415    }
4416
4417
4418    if (m_io_modes.m_g_mode == CTrexGlobalIoMode::gHELP ) {
4419        m_io_modes.DumpHelp(stdout);
4420    }
4421
4422    dump_stats(stdout,CGlobalStats::dmpTABLE);
4423
4424    if (m_io_modes.m_g_mode == CTrexGlobalIoMode::gNORMAL ) {
4425        fprintf (stdout," current time    : %.1f sec  \n",now_sec());
4426        float d= CGlobalInfo::m_options.m_duration - now_sec();
4427        if (d<0) {
4428            d=0;
4429
4430        }
4431        fprintf (stdout," test duration   : %.1f sec  \n",d);
4432    }
4433
4434    if (m_io_modes.m_g_mode == CTrexGlobalIoMode::gMem) {
4435
4436        if ( m_stats_cnt%4==0) {
4437            fprintf (stdout," %s \n",CGlobalInfo::dump_pool_as_json_str().c_str());
4438        }
4439    }
4440
4441
4442    if ( CGlobalInfo::m_options.is_rx_enabled() && (! get_is_stateless())) {
4443        m_mg.update();
4444
4445        if ( m_io_modes.m_g_mode ==  CTrexGlobalIoMode::gNORMAL ) {
4446            switch (m_io_modes.m_l_mode) {
4447            case CTrexGlobalIoMode::lDISABLE:
4448                fprintf(stdout,"\n+Latency stats disabled \n");
4449                break;
4450            case CTrexGlobalIoMode::lENABLE:
4451                fprintf(stdout,"\n-Latency stats enabled \n");
4452                m_mg.DumpShort(stdout);
4453                break;
4454            case CTrexGlobalIoMode::lENABLE_Extended:
4455                fprintf(stdout,"\n-Latency stats extended \n");
4456                m_mg.Dump(stdout);
4457                break;
4458            }
4459
4460            if ( get_is_rx_check_mode() ) {
4461
4462                switch (m_io_modes.m_rc_mode) {
4463                case CTrexGlobalIoMode::rcDISABLE:
4464                    fprintf(stdout,"\n+Rx Check stats disabled \n");
4465                    break;
4466                case CTrexGlobalIoMode::rcENABLE:
4467                    fprintf(stdout,"\n-Rx Check stats enabled \n");
4468                    m_mg.DumpShortRxCheck(stdout);
4469                    break;
4470                case CTrexGlobalIoMode::rcENABLE_Extended:
4471                    fprintf(stdout,"\n-Rx Check stats enhanced \n");
4472                    m_mg.DumpRxCheck(stdout);
4473                    break;
4474                }
4475            }
4476        }
4477    }
4478    if ( m_io_modes.m_g_mode ==  CTrexGlobalIoMode::gNAT ) {
4479        if ( m_io_modes.m_nat_mode == CTrexGlobalIoMode::natENABLE ) {
4480            if (CGlobalInfo::is_learn_mode(CParserOption::LEARN_MODE_TCP_ACK)) {
4481                fprintf(stdout, "NAT flow table info\n");
4482                m_mg.dump_nat_flow_table(stdout);
4483            } else {
4484                fprintf(stdout, "\nThis is only relevant in --learn-mode %d\n", CParserOption::LEARN_MODE_TCP_ACK);
4485            }
4486        }
4487    }
4488
4489    /* publish data */
4490    publish_async_data(false);
4491}
4492
4493
4494void
4495CGlobalTRex::handle_fast_path() {
4496    /* check from messages from DP */
4497    check_for_dp_messages();
4498
4499    /* measure CPU utilization by sampling (we sample 1000 to get an accurate sampling) */
4500    for (int i = 0; i < 1000; i++) {
4501        m_fl.UpdateFast();
4502
4503        if (get_is_stateless()) {
4504            m_rx_sl.update_cpu_util();
4505        }else{
4506            m_mg.update_fast();
4507        }
4508
4509        rte_pause();
4510    }
4511
4512
4513    if ( is_all_cores_finished() ) {
4514        mark_for_shutdown(SHUTDOWN_TEST_ENDED);
4515    }
4516}
4517
4518
4519/**
4520 * shutdown sequence
4521 *
4522 */
4523void CGlobalTRex::shutdown() {
4524    std::stringstream ss;
4525    ss << " *** TRex is shutting down - cause: '";
4526
4527    switch (m_mark_for_shutdown) {
4528
4529    case SHUTDOWN_TEST_ENDED:
4530        ss << "test has ended'";
4531        break;
4532
4533    case SHUTDOWN_CTRL_C:
4534        ss << "CTRL + C detected'";
4535        break;
4536
4537    case SHUTDOWN_SIGINT:
4538        ss << "received signal SIGINT'";
4539        break;
4540
4541    case SHUTDOWN_SIGTERM:
4542        ss << "received signal SIGTERM'";
4543        break;
4544
4545    case SHUTDOWN_RPC_REQ:
4546        ss << "server received RPC 'shutdown' request'";
4547        break;
4548
4549    default:
4550        assert(0);
4551    }
4552
4553    /* report */
4554    std::cout << ss.str() << "\n";
4555
4556    /* first stop the WD */
4557    TrexWatchDog::getInstance().stop();
4558
4559    /* stateless shutdown */
4560    if (get_is_stateless()) {
4561        m_trex_stateless->shutdown();
4562    }
4563
4564    if (!is_all_cores_finished()) {
4565        try_stop_all_cores();
4566    }
4567
4568    m_mg.stop();
4569
4570    delay(1000);
4571
4572    /* shutdown drivers */
4573    for (int i = 0; i < m_max_ports; i++) {
4574        m_ports[i].stop();
4575    }
4576
4577    if (m_mark_for_shutdown != SHUTDOWN_TEST_ENDED) {
4578        /* we should stop latency and exit to stop agents */
4579        Delete();
4580        utl_termio_reset();
4581        exit(-1);
4582    }
4583}
4584
4585
4586int CGlobalTRex::run_in_master() {
4587
4588    //rte_thread_setname(pthread_self(), "TRex Control");
4589
4590    if ( get_is_stateless() ) {
4591        m_trex_stateless->launch_control_plane();
4592    }
4593
4594    /* exception and scope safe */
4595    std::unique_lock<std::mutex> cp_lock(m_cp_lock);
4596
4597    uint32_t slow_path_counter = 0;
4598
4599    const int FASTPATH_DELAY_MS = 10;
4600    const int SLOWPATH_DELAY_MS = 500;
4601
4602    m_monitor.create("master", 2);
4603    TrexWatchDog::getInstance().register_monitor(&m_monitor);
4604
4605    TrexWatchDog::getInstance().start();
4606
4607    while (!is_marked_for_shutdown()) {
4608
4609        /* fast path */
4610        handle_fast_path();
4611
4612        /* slow path */
4613        if (slow_path_counter >= SLOWPATH_DELAY_MS) {
4614            handle_slow_path();
4615            slow_path_counter = 0;
4616        }
4617
4618
4619        cp_lock.unlock();
4620        delay(FASTPATH_DELAY_MS);
4621        slow_path_counter += FASTPATH_DELAY_MS;
4622        cp_lock.lock();
4623
4624        m_monitor.tickle();
4625    }
4626
4627    /* on exit release the lock */
4628    cp_lock.unlock();
4629
4630    /* shutdown everything gracefully */
4631    shutdown();
4632
4633    return (0);
4634}
4635
4636
4637
4638int CGlobalTRex::run_in_rx_core(void){
4639
4640    rte_thread_setname(pthread_self(), "TRex RX");
4641
4642    if (get_is_stateless()) {
4643        m_sl_rx_running = true;
4644        m_rx_sl.start();
4645        m_sl_rx_running = false;
4646    } else {
4647        if ( CGlobalInfo::m_options.is_rx_enabled() ){
4648            m_sl_rx_running = false;
4649            m_mg.start(0, true);
4650        }
4651    }
4652
4653    return (0);
4654}
4655
4656int CGlobalTRex::run_in_core(virtual_thread_id_t virt_core_id){
4657    std::stringstream ss;
4658
4659    ss << "Trex DP core " << int(virt_core_id);
4660    rte_thread_setname(pthread_self(), ss.str().c_str());
4661
4662    CPreviewMode *lp=&CGlobalInfo::m_options.preview;
4663    if ( lp->getSingleCore() &&
4664         (virt_core_id==2 ) &&
4665         (lp-> getCores() ==1) ){
4666        printf(" bypass this core \n");
4667        m_signal[virt_core_id]=1;
4668        return (0);
4669    }
4670
4671
4672    assert(m_fl_was_init);
4673    CFlowGenListPerThread   * lpt;
4674
4675    lpt = m_fl.m_threads_info[virt_core_id-1];
4676
4677    /* register a watchdog handle on current core */
4678    lpt->m_monitor.create(ss.str(), 1);
4679    TrexWatchDog::getInstance().register_monitor(&lpt->m_monitor);
4680
4681    if (get_is_stateless()) {
4682        lpt->start_stateless_daemon(*lp);
4683    }else{
4684        lpt->start_generate_stateful(CGlobalInfo::m_options.out_file,*lp);
4685    }
4686
4687    /* done - remove this from the watchdog (we might wait on join for a long time) */
4688    lpt->m_monitor.disable();
4689
4690    m_signal[virt_core_id]=1;
4691    return (0);
4692}
4693
4694
4695int CGlobalTRex::stop_master(){
4696
4697    delay(1000);
4698    fprintf(stdout," ==================\n");
4699    fprintf(stdout," interface sum \n");
4700    fprintf(stdout," ==================\n");
4701    dump_stats(stdout,CGlobalStats::dmpSTANDARD);
4702    fprintf(stdout," ==================\n");
4703    fprintf(stdout," \n\n");
4704
4705    fprintf(stdout," ==================\n");
4706    fprintf(stdout," interface sum \n");
4707    fprintf(stdout," ==================\n");
4708
4709    CFlowGenListPerThread   * lpt;
4710    uint64_t total_tx_rx_check=0;
4711
4712    int i;
4713    for (i=0; i<get_cores_tx(); i++) {
4714        lpt = m_fl.m_threads_info[i];
4715        CCoreEthIF * erf_vif = m_cores_vif[i+1];
4716
4717        erf_vif->DumpCoreStats(stdout);
4718        erf_vif->DumpIfStats(stdout);
4719        total_tx_rx_check+=erf_vif->m_stats[CLIENT_SIDE].m_tx_rx_check_pkt+
4720            erf_vif->m_stats[SERVER_SIDE].m_tx_rx_check_pkt;
4721    }
4722
4723    fprintf(stdout," ==================\n");
4724    fprintf(stdout," generators \n");
4725    fprintf(stdout," ==================\n");
4726    for (i=0; i<get_cores_tx(); i++) {
4727        lpt = m_fl.m_threads_info[i];
4728        lpt->m_node_gen.DumpHist(stdout);
4729        lpt->DumpStats(stdout);
4730    }
4731    if ( CGlobalInfo::m_options.is_latency_enabled() ){
4732        fprintf(stdout," ==================\n");
4733        fprintf(stdout," latency \n");
4734        fprintf(stdout," ==================\n");
4735        m_mg.DumpShort(stdout);
4736        m_mg.Dump(stdout);
4737        m_mg.DumpShortRxCheck(stdout);
4738        m_mg.DumpRxCheck(stdout);
4739        m_mg.DumpRxCheckVerification(stdout,total_tx_rx_check);
4740    }
4741
4742    dump_stats(stdout,CGlobalStats::dmpSTANDARD);
4743    dump_post_test_stats(stdout);
4744
4745    return (0);
4746}
4747
4748bool CGlobalTRex::is_all_cores_finished() {
4749    int i;
4750    for (i=0; i<get_cores_tx(); i++) {
4751        if ( m_signal[i+1]==0){
4752            return false;
4753        }
4754    }
4755    if (m_sl_rx_running)
4756        return false;
4757
4758    return true;
4759}
4760
4761
4762int CGlobalTRex::start_master_stateless(){
4763    int i;
4764    for (i=0; i<BP_MAX_CORES; i++) {
4765        m_signal[i]=0;
4766    }
4767    m_fl.Create();
4768    m_expected_pps = 0;
4769    m_expected_cps = 0;
4770    m_expected_bps = 0;
4771
4772    m_fl.generate_p_thread_info(get_cores_tx());
4773    CFlowGenListPerThread   * lpt;
4774
4775    for (i=0; i<get_cores_tx(); i++) {
4776        lpt = m_fl.m_threads_info[i];
4777        CVirtualIF * erf_vif = m_cores_vif[i+1];
4778        lpt->set_vif(erf_vif);
4779        lpt->m_node_gen.m_socket_id =m_cores_vif[i+1]->get_socket_id();
4780    }
4781    m_fl_was_init=true;
4782
4783    return (0);
4784}
4785
4786int CGlobalTRex::start_master_statefull() {
4787    int i;
4788    for (i=0; i<BP_MAX_CORES; i++) {
4789        m_signal[i]=0;
4790    }
4791
4792    m_fl.Create();
4793    m_fl.load_from_yaml(CGlobalInfo::m_options.cfg_file,get_cores_tx());
4794
4795    /* client config */
4796    if (CGlobalInfo::m_options.client_cfg_file != "") {
4797        try {
4798            m_fl.load_client_config_file(CGlobalInfo::m_options.client_cfg_file);
4799        } catch (const std::runtime_error &e) {
4800            std::cout << "\n*** " << e.what() << "\n\n";
4801            exit(-1);
4802        }
4803        CGlobalInfo::m_options.preview.set_client_cfg_enable(true);
4804        m_fl.set_client_config_tuple_gen_info(&m_fl.m_yaml_info.m_tuple_gen);
4805        pre_test();
4806    }
4807
4808    /* verify options */
4809    try {
4810        CGlobalInfo::m_options.verify();
4811    } catch (const std::runtime_error &e) {
4812        std::cout << "\n*** " << e.what() << "\n\n";
4813        exit(-1);
4814    }
4815
4816    m_expected_pps = m_fl.get_total_pps();
4817    m_expected_cps = 1000.0*m_fl.get_total_kcps();
4818    m_expected_bps = m_fl.get_total_tx_bps();
4819    if ( m_fl.get_total_repeat_flows() > 2000) {
4820        /* disable flows cache */
4821        CGlobalInfo::m_options.preview.setDisableMbufCache(true);
4822    }
4823
4824    CTupleGenYamlInfo * tg=&m_fl.m_yaml_info.m_tuple_gen;
4825
4826    m_mg.set_ip( tg->m_client_pool[0].get_ip_start(),
4827                 tg->m_server_pool[0].get_ip_start(),
4828                 tg->m_client_pool[0].getDualMask()
4829                 );
4830
4831    if (  CGlobalInfo::m_options.preview.getVMode() >0 ) {
4832        m_fl.DumpCsv(stdout);
4833        for (i=0; i<100; i++) {
4834            fprintf(stdout,"\n");
4835        }
4836        fflush(stdout);
4837    }
4838
4839    m_fl.generate_p_thread_info(get_cores_tx());
4840    CFlowGenListPerThread   * lpt;
4841
4842    for (i=0; i<get_cores_tx(); i++) {
4843        lpt = m_fl.m_threads_info[i];
4844        //CNullIF * erf_vif = new CNullIF();
4845        CVirtualIF * erf_vif = m_cores_vif[i+1];
4846        lpt->set_vif(erf_vif);
4847        /* socket id */
4848        lpt->m_node_gen.m_socket_id =m_cores_vif[i+1]->get_socket_id();
4849
4850    }
4851    m_fl_was_init=true;
4852
4853    return (0);
4854}
4855
4856
4857////////////////////////////////////////////
4858static CGlobalTRex g_trex;
4859
4860void CPhyEthIF::update_counters() {
4861    get_ex_drv()->get_extended_stats(this, &m_stats);
4862    CRXCoreIgnoreStat ign_stats;
4863    g_trex.m_mg.get_ignore_stats(m_port_id, ign_stats, true);
4864    m_stats.obytes -= ign_stats.get_tx_bytes();
4865    m_stats.opackets -= ign_stats.get_tx_pkts();
4866    m_ignore_stats.opackets += ign_stats.get_tx_pkts();
4867    m_ignore_stats.obytes += ign_stats.get_tx_bytes();
4868    m_ignore_stats.m_tx_arp += ign_stats.get_tx_arp();
4869
4870    m_last_tx_rate      =  m_bw_tx.add(m_stats.obytes);
4871    m_last_rx_rate      =  m_bw_rx.add(m_stats.ibytes);
4872    m_last_tx_pps       =  m_pps_tx.add(m_stats.opackets);
4873    m_last_rx_pps       =  m_pps_rx.add(m_stats.ipackets);
4874}
4875
4876bool CPhyEthIF::Create(uint8_t portid) {
4877    m_port_id      = portid;
4878    m_last_rx_rate = 0.0;
4879    m_last_tx_rate = 0.0;
4880    m_last_tx_pps  = 0.0;
4881    m_port_attr    = g_trex.m_drv->create_port_attr(portid);
4882
4883
4884    uint32_t src_ipv4 = CGlobalInfo::m_options.m_ip_cfg[m_port_id].get_ip();
4885    if (src_ipv4) {
4886        m_port_attr->set_src_ipv4(src_ipv4);
4887    }
4888
4889    /* for now set as unresolved IPv4 destination */
4890    uint32_t dest_ipv4 = CGlobalInfo::m_options.m_ip_cfg[m_port_id].get_def_gw();
4891    if (dest_ipv4) {
4892        m_port_attr->get_dest().set_dest(dest_ipv4);
4893    }
4894
4895    return true;
4896}
4897
4898const std::vector<std::pair<uint8_t, uint8_t>> &
4899CPhyEthIF::get_core_list() {
4900
4901    /* lazy find */
4902    if (m_core_id_list.size() == 0) {
4903
4904        for (uint8_t core_id = 0; core_id < g_trex.get_cores_tx(); core_id++) {
4905
4906            /* iterate over all the directions*/
4907            for (uint8_t dir = 0 ; dir < CS_NUM; dir++) {
4908                if (g_trex.m_cores_vif[core_id + 1]->get_ports()[dir].m_port->get_port_id() == m_port_id) {
4909                    m_core_id_list.push_back(std::make_pair(core_id, dir));
4910                }
4911            }
4912        }
4913    }
4914
4915    return m_core_id_list;
4916
4917}
4918
4919int CPhyEthIF::reset_hw_flow_stats() {
4920    if (get_ex_drv()->hw_rx_stat_supported()) {
4921        get_ex_drv()->reset_rx_stats(this, m_stats.m_fdir_prev_pkts, 0, MAX_FLOW_STATS);
4922    } else {
4923        g_trex.m_rx_sl.reset_rx_stats(get_port_id());
4924    }
4925    return 0;
4926}
4927
4928// get/reset flow director counters
4929// return 0 if OK. -1 if operation not supported.
4930// rx_stats, tx_stats - arrays of len max - min + 1. Returning rx, tx updated absolute values.
4931// min, max - minimum, maximum counters range to get
4932// reset - If true, need to reset counter value after reading
4933int CPhyEthIF::get_flow_stats(rx_per_flow_t *rx_stats, tx_per_flow_t *tx_stats, int min, int max, bool reset) {
4934    uint32_t diff_pkts[MAX_FLOW_STATS];
4935    uint32_t diff_bytes[MAX_FLOW_STATS];
4936    bool hw_rx_stat_supported = get_ex_drv()->hw_rx_stat_supported();
4937
4938    if (hw_rx_stat_supported) {
4939        if (get_ex_drv()->get_rx_stats(this, diff_pkts, m_stats.m_fdir_prev_pkts
4940                                       , diff_bytes, m_stats.m_fdir_prev_bytes, min, max) < 0) {
4941            return -1;
4942        }
4943    } else {
4944        g_trex.m_rx_sl.get_rx_stats(get_port_id(), rx_stats, min, max, reset, TrexPlatformApi::IF_STAT_IPV4_ID);
4945    }
4946
4947    for (int i = min; i <= max; i++) {
4948        if ( reset ) {
4949            // return value so far, and reset
4950            if (hw_rx_stat_supported) {
4951                if (rx_stats != NULL) {
4952                    rx_stats[i - min].set_pkts(m_stats.m_rx_per_flow_pkts[i] + diff_pkts[i]);
4953                    rx_stats[i - min].set_bytes(m_stats.m_rx_per_flow_bytes[i] + diff_bytes[i]);
4954                }
4955                m_stats.m_rx_per_flow_pkts[i] = 0;
4956                m_stats.m_rx_per_flow_bytes[i] = 0;
4957                get_ex_drv()->reset_rx_stats(this, &m_stats.m_fdir_prev_pkts[i], i, 1);
4958
4959            }
4960            if (tx_stats != NULL) {
4961                tx_stats[i - min] = g_trex.clear_flow_tx_stats(m_port_id, i, false);
4962            }
4963        } else {
4964            if (hw_rx_stat_supported) {
4965                m_stats.m_rx_per_flow_pkts[i] += diff_pkts[i];
4966                m_stats.m_rx_per_flow_bytes[i] += diff_bytes[i];
4967                if (rx_stats != NULL) {
4968                    rx_stats[i - min].set_pkts(m_stats.m_rx_per_flow_pkts[i]);
4969                    rx_stats[i - min].set_bytes(m_stats.m_rx_per_flow_bytes[i]);
4970                }
4971            }
4972            if (tx_stats != NULL) {
4973                tx_stats[i - min] = g_trex.get_flow_tx_stats(m_port_id, i);
4974            }
4975        }
4976    }
4977
4978    return 0;
4979}
4980
4981int CPhyEthIF::get_flow_stats_payload(rx_per_flow_t *rx_stats, tx_per_flow_t *tx_stats, int min, int max, bool reset) {
4982    g_trex.m_rx_sl.get_rx_stats(get_port_id(), rx_stats, min, max, reset, TrexPlatformApi::IF_STAT_PAYLOAD);
4983    for (int i = min; i <= max; i++) {
4984        if ( reset ) {
4985            if (tx_stats != NULL) {
4986                tx_stats[i - min] = g_trex.clear_flow_tx_stats(m_port_id, i + MAX_FLOW_STATS, true);
4987            }
4988        } else {
4989            if (tx_stats != NULL) {
4990                tx_stats[i - min] = g_trex.get_flow_tx_stats(m_port_id, i + MAX_FLOW_STATS);
4991            }
4992        }
4993    }
4994
4995    return 0;
4996}
4997
4998// If needed, send packets to rx core for processing.
4999// This is relevant only in VM case, where we receive packets to the working DP core (only 1 DP core in this case)
5000bool CCoreEthIF::process_rx_pkt(pkt_dir_t dir, rte_mbuf_t * m) {
5001    CFlowStatParser parser;
5002    uint32_t ip_id;
5003
5004    if (parser.parse(rte_pktmbuf_mtod(m, uint8_t*), rte_pktmbuf_pkt_len(m)) != 0) {
5005        return false;
5006    }
5007    bool send=false;
5008
5009    // e1000 on ESXI hands us the packet with the ethernet FCS
5010    if (parser.get_pkt_size() < rte_pktmbuf_pkt_len(m)) {
5011        rte_pktmbuf_trim(m, rte_pktmbuf_pkt_len(m) - parser.get_pkt_size());
5012    }
5013
5014    if ( get_is_stateless() ) {
5015        // In stateless RX, we only care about flow stat packets
5016        if ((parser.get_ip_id(ip_id) == 0) && ((ip_id & 0xff00) == IP_ID_RESERVE_BASE)) {
5017            send = true;
5018        }
5019    } else {
5020        CLatencyPktMode *c_l_pkt_mode = g_trex.m_mg.c_l_pkt_mode;
5021        bool is_lateancy_pkt =  c_l_pkt_mode->IsLatencyPkt((IPHeader *)parser.get_l4()) &
5022            CCPortLatency::IsLatencyPkt(parser.get_l4() + c_l_pkt_mode->l4_header_len());
5023
5024        if (is_lateancy_pkt) {
5025            send = true;
5026        } else {
5027            if ( get_is_rx_filter_enable() ) {
5028                uint8_t max_ttl = 0xff - get_rx_check_hops();
5029                uint8_t pkt_ttl = parser.get_ttl();
5030                if ( (pkt_ttl==max_ttl) || (pkt_ttl==(max_ttl-1) ) ) {
5031                    send=true;
5032                }
5033            }
5034        }
5035    }
5036
5037
5038    if (send) {
5039        CGenNodeLatencyPktInfo * node=(CGenNodeLatencyPktInfo * )CGlobalInfo::create_node();
5040        if ( node ) {
5041            node->m_msg_type = CGenNodeMsgBase::LATENCY_PKT;
5042            node->m_dir      = dir;
5043            node->m_latency_offset = 0xdead;
5044            node->m_pkt      = m;
5045            if ( m_ring_to_rx->Enqueue((CGenNode*)node)==0 ){
5046            }else{
5047                CGlobalInfo::free_node((CGenNode *)node);
5048                send=false;
5049            }
5050
5051#ifdef LATENCY_QUEUE_TRACE_
5052            printf("rx to cp --\n");
5053            rte_pktmbuf_dump(stdout,m, rte_pktmbuf_pkt_len(m));
5054#endif
5055        }else{
5056            send=false;
5057        }
5058    }
5059    return (send);
5060}
5061
5062TrexStateless * get_stateless_obj() {
5063    return g_trex.m_trex_stateless;
5064}
5065
5066CRxCoreStateless * get_rx_sl_core_obj() {
5067    return &g_trex.m_rx_sl;
5068}
5069
5070static int latency_one_lcore(__attribute__((unused)) void *dummy)
5071{
5072    CPlatformSocketInfo * lpsock=&CGlobalInfo::m_socket;
5073    physical_thread_id_t  phy_id =rte_lcore_id();
5074
5075    if ( lpsock->thread_phy_is_rx(phy_id) ) {
5076        g_trex.run_in_rx_core();
5077    }else{
5078
5079        if ( lpsock->thread_phy_is_master( phy_id ) ) {
5080            g_trex.run_in_master();
5081            delay(1);
5082        }else{
5083            delay((uint32_t)(1000.0*CGlobalInfo::m_options.m_duration));
5084            /* this core has stopped */
5085            g_trex.m_signal[ lpsock->thread_phy_to_virt( phy_id ) ]=1;
5086        }
5087    }
5088    return 0;
5089}
5090
5091
5092
5093static int slave_one_lcore(__attribute__((unused)) void *dummy)
5094{
5095    CPlatformSocketInfo * lpsock=&CGlobalInfo::m_socket;
5096    physical_thread_id_t  phy_id =rte_lcore_id();
5097
5098    if ( lpsock->thread_phy_is_rx(phy_id) ) {
5099        g_trex.run_in_rx_core();
5100    }else{
5101        if ( lpsock->thread_phy_is_master( phy_id ) ) {
5102            g_trex.run_in_master();
5103            delay(1);
5104        }else{
5105            g_trex.run_in_core( lpsock->thread_phy_to_virt( phy_id ) );
5106        }
5107    }
5108    return 0;
5109}
5110
5111
5112
5113uint32_t get_cores_mask(uint32_t cores,int offset){
5114    int i;
5115
5116    uint32_t res=1;
5117
5118    uint32_t mask=(1<<(offset+1));
5119    for (i=0; i<(cores-1); i++) {
5120        res |= mask ;
5121        mask = mask <<1;
5122    }
5123    return (res);
5124}
5125
5126
5127static char *g_exe_name;
5128const char *get_exe_name() {
5129    return g_exe_name;
5130}
5131
5132
5133int main(int argc , char * argv[]){
5134    g_exe_name = argv[0];
5135
5136    return ( main_test(argc , argv));
5137}
5138
5139
5140int update_global_info_from_platform_file(){
5141
5142    CPlatformYamlInfo *cg=&global_platform_cfg_info;
5143
5144    CGlobalInfo::m_socket.Create(&cg->m_platform);
5145
5146
5147    if (!cg->m_info_exist) {
5148        /* nothing to do ! */
5149        return 0;
5150    }
5151
5152    CGlobalInfo::m_options.prefix =cg->m_prefix;
5153    CGlobalInfo::m_options.preview.setCores(cg->m_thread_per_dual_if);
5154
5155    if ( cg->m_port_limit_exist ){
5156        CGlobalInfo::m_options.m_expected_portd =cg->m_port_limit;
5157    }
5158
5159    if ( cg->m_enable_zmq_pub_exist ){
5160        CGlobalInfo::m_options.preview.set_zmq_publish_enable(cg->m_enable_zmq_pub);
5161        CGlobalInfo::m_options.m_zmq_port = cg->m_zmq_pub_port;
5162    }
5163    if ( cg->m_telnet_exist ){
5164        CGlobalInfo::m_options.m_telnet_port = cg->m_telnet_port;
5165    }
5166
5167    if ( cg->m_mac_info_exist ){
5168        int i;
5169        /* cop the file info */
5170
5171        int port_size=cg->m_mac_info.size();
5172
5173        if ( port_size > TREX_MAX_PORTS ){
5174            port_size = TREX_MAX_PORTS;
5175        }
5176        for (i=0; i<port_size; i++){
5177            cg->m_mac_info[i].copy_src(( char *)CGlobalInfo::m_options.m_mac_addr[i].u.m_mac.src)   ;
5178            cg->m_mac_info[i].copy_dest(( char *)CGlobalInfo::m_options.m_mac_addr[i].u.m_mac.dest)  ;
5179            CGlobalInfo::m_options.m_ip_cfg[i].set_def_gw(cg->m_mac_info[i].get_def_gw());
5180            CGlobalInfo::m_options.m_ip_cfg[i].set_ip(cg->m_mac_info[i].get_ip());
5181            CGlobalInfo::m_options.m_ip_cfg[i].set_mask(cg->m_mac_info[i].get_mask());
5182            CGlobalInfo::m_options.m_ip_cfg[i].set_vlan(cg->m_mac_info[i].get_vlan());
5183        }
5184    }
5185
5186    /* mul by interface type */
5187    float mul=1.0;
5188    if (cg->m_port_bandwidth_gb<10) {
5189        cg->m_port_bandwidth_gb=10.0;
5190    }
5191
5192    mul = mul*(float)cg->m_port_bandwidth_gb/10.0;
5193    mul= mul * (float)cg->m_port_limit/2.0;
5194
5195    mul= mul * CGlobalInfo::m_options.m_mbuf_factor;
5196
5197
5198    CGlobalInfo::m_memory_cfg.set_pool_cache_size(RTE_MEMPOOL_CACHE_MAX_SIZE);
5199
5200    CGlobalInfo::m_memory_cfg.set_number_of_dp_cors(
5201                                                    CGlobalInfo::m_options.get_number_of_dp_cores_needed() );
5202
5203    CGlobalInfo::m_memory_cfg.set(cg->m_memory,mul);
5204    return (0);
5205}
5206
5207extern "C" int eal_cpu_detected(unsigned lcore_id);
5208// return mask representing available cores
5209int core_mask_calc() {
5210    uint32_t mask = 0;
5211    int lcore_id;
5212
5213    for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
5214        if (eal_cpu_detected(lcore_id)) {
5215            mask |= (1 << lcore_id);
5216        }
5217    }
5218
5219    return mask;
5220}
5221
5222// Return number of set bits in i
5223uint32_t num_set_bits(uint32_t i)
5224{
5225    i = i - ((i >> 1) & 0x55555555);
5226    i = (i & 0x33333333) + ((i >> 2) & 0x33333333);
5227    return (((i + (i >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24;
5228}
5229
5230// sanity check if the cores we want to use really exist
5231int core_mask_sanity(uint32_t wanted_core_mask) {
5232    uint32_t calc_core_mask = core_mask_calc();
5233    uint32_t wanted_core_num, calc_core_num;
5234
5235    wanted_core_num = num_set_bits(wanted_core_mask);
5236    calc_core_num = num_set_bits(calc_core_mask);
5237
5238    if (calc_core_num == 1) {
5239        printf ("Error: You have only 1 core available. Minimum configuration requires 2 cores\n");
5240        printf("        If you are running on VM, consider adding more cores if possible\n");
5241        return -1;
5242    }
5243    if (wanted_core_num > calc_core_num) {
5244        printf("Error: You have %d threads available, but you asked for %d threads.\n", calc_core_num, wanted_core_num);
5245        printf("       Calculation is: -c <num>(%d) * dual ports (%d) + 1 master thread %s"
5246               , CGlobalInfo::m_options.preview.getCores(), CGlobalInfo::m_options.get_expected_dual_ports()
5247               , get_is_rx_thread_enabled() ? "+1 latency thread (because of -l flag)\n" : "\n");
5248        if (CGlobalInfo::m_options.preview.getCores() > 1)
5249            printf("       Maybe try smaller -c <num>.\n");
5250        printf("       If you are running on VM, consider adding more cores if possible\n");
5251        return -1;
5252    }
5253
5254    if (wanted_core_mask != (wanted_core_mask & calc_core_mask)) {
5255        printf ("Serious error: Something is wrong with the hardware. Wanted core mask is %x. Existing core mask is %x\n", wanted_core_mask, calc_core_mask);
5256        return -1;
5257    }
5258
5259    return 0;
5260}
5261
5262int  update_dpdk_args(void){
5263
5264    CPlatformSocketInfo * lpsock=&CGlobalInfo::m_socket;
5265    CParserOption * lpop= &CGlobalInfo::m_options;
5266
5267    lpsock->set_rx_thread_is_enabled(get_is_rx_thread_enabled());
5268    lpsock->set_number_of_threads_per_ports(lpop->preview.getCores() );
5269    lpsock->set_number_of_dual_ports(lpop->get_expected_dual_ports());
5270    if ( !lpsock->sanity_check() ){
5271        printf(" ERROR in configuration file \n");
5272        return (-1);
5273    }
5274
5275    if ( CGlobalInfo::m_options.preview.getVMode() > 0  ) {
5276        lpsock->dump(stdout);
5277    }
5278
5279    snprintf(global_cores_str, sizeof(global_cores_str), "0x%llx" ,(unsigned long long)lpsock->get_cores_mask());
5280    if (core_mask_sanity(strtol(global_cores_str, NULL, 16)) < 0) {
5281        return -1;
5282    }
5283
5284    /* set the DPDK options */
5285    global_dpdk_args_num = 0;
5286
5287    global_dpdk_args[global_dpdk_args_num++]=(char *)"xx";
5288    global_dpdk_args[global_dpdk_args_num++]=(char *)"-c";
5289    global_dpdk_args[global_dpdk_args_num++]=(char *)global_cores_str;
5290    global_dpdk_args[global_dpdk_args_num++]=(char *)"-n";
5291    global_dpdk_args[global_dpdk_args_num++]=(char *)"4";
5292
5293    if ( CGlobalInfo::m_options.preview.getVMode() == 0  ) {
5294        global_dpdk_args[global_dpdk_args_num++]=(char *)"--log-level";
5295        snprintf(global_loglevel_str, sizeof(global_loglevel_str), "%d", 4);
5296        global_dpdk_args[global_dpdk_args_num++]=(char *)global_loglevel_str;
5297    }else{
5298        global_dpdk_args[global_dpdk_args_num++]=(char *)"--log-level";
5299        snprintf(global_loglevel_str, sizeof(global_loglevel_str), "%d", CGlobalInfo::m_options.preview.getVMode()+1);
5300        global_dpdk_args[global_dpdk_args_num++]=(char *)global_loglevel_str;
5301    }
5302
5303    global_dpdk_args[global_dpdk_args_num++] = (char *)"--master-lcore";
5304
5305    snprintf(global_master_id_str, sizeof(global_master_id_str), "%u", lpsock->get_master_phy_id());
5306    global_dpdk_args[global_dpdk_args_num++] = global_master_id_str;
5307
5308    /* add white list */
5309    if (lpop->m_run_mode == CParserOption::RUN_MODE_DUMP_INFO and lpop->dump_interfaces.size()) {
5310        for (int i=0; i<(int)lpop->dump_interfaces.size(); i++) {
5311            global_dpdk_args[global_dpdk_args_num++]=(char *)"-w";
5312            global_dpdk_args[global_dpdk_args_num++]=(char *)lpop->dump_interfaces[i].c_str();
5313        }
5314    }
5315    else {
5316        for (int i=0; i<(int)global_platform_cfg_info.m_if_list.size(); i++) {
5317            global_dpdk_args[global_dpdk_args_num++]=(char *)"-w";
5318            global_dpdk_args[global_dpdk_args_num++]=(char *)global_platform_cfg_info.m_if_list[i].c_str();
5319        }
5320    }
5321
5322
5323
5324    if ( lpop->prefix.length()  ){
5325        global_dpdk_args[global_dpdk_args_num++]=(char *)"--file-prefix";
5326        snprintf(global_prefix_str, sizeof(global_prefix_str), "%s", lpop->prefix.c_str());
5327        global_dpdk_args[global_dpdk_args_num++]=(char *)global_prefix_str;
5328        global_dpdk_args[global_dpdk_args_num++]=(char *)"-m";
5329        if (global_platform_cfg_info.m_limit_memory.length()) {
5330            global_dpdk_args[global_dpdk_args_num++]=(char *)global_platform_cfg_info.m_limit_memory.c_str();
5331        }else{
5332            global_dpdk_args[global_dpdk_args_num++]=(char *)"1024";
5333        }
5334    }
5335
5336
5337    if ( CGlobalInfo::m_options.preview.getVMode() > 0  ) {
5338        printf("args \n");
5339        int i;
5340        for (i=0; i<global_dpdk_args_num; i++) {
5341            printf(" %s \n",global_dpdk_args[i]);
5342        }
5343    }
5344    return (0);
5345}
5346
5347
5348int sim_load_list_of_cap_files(CParserOption * op){
5349
5350    CFlowGenList fl;
5351    fl.Create();
5352    fl.load_from_yaml(op->cfg_file,1);
5353    if ( op->preview.getVMode() >0 ) {
5354        fl.DumpCsv(stdout);
5355    }
5356    uint32_t start=    os_get_time_msec();
5357
5358    CErfIF erf_vif;
5359
5360    fl.generate_p_thread_info(1);
5361    CFlowGenListPerThread   * lpt;
5362    lpt=fl.m_threads_info[0];
5363    lpt->set_vif(&erf_vif);
5364
5365    if ( (op->preview.getVMode() >1)  || op->preview.getFileWrite() ) {
5366        lpt->start_generate_stateful(op->out_file,op->preview);
5367    }
5368
5369    lpt->m_node_gen.DumpHist(stdout);
5370
5371    uint32_t stop=    os_get_time_msec();
5372    printf(" d time = %ul %ul \n",stop-start,os_get_time_freq());
5373    fl.Delete();
5374    return (0);
5375}
5376
5377void dump_interfaces_info() {
5378    printf("Showing interfaces info.\n");
5379    uint8_t m_max_ports = rte_eth_dev_count();
5380    struct ether_addr mac_addr;
5381    char mac_str[ETHER_ADDR_FMT_SIZE];
5382    struct rte_pci_addr pci_addr;
5383
5384    for (uint8_t port_id=0; port_id<m_max_ports; port_id++) {
5385        // PCI, MAC and Driver
5386        pci_addr = rte_eth_devices[port_id].pci_dev->addr;
5387        rte_eth_macaddr_get(port_id, &mac_addr);
5388        ether_format_addr(mac_str, sizeof mac_str, &mac_addr);
5389        printf("PCI: %04x:%02x:%02x.%d - MAC: %s - Driver: %s\n",
5390            pci_addr.domain, pci_addr.bus, pci_addr.devid, pci_addr.function, mac_str,
5391            rte_eth_devices[port_id].pci_dev->driver->name);
5392    }
5393}
5394
5395int main_test(int argc , char * argv[]){
5396
5397
5398    utl_termio_init();
5399
5400    int ret;
5401    unsigned lcore_id;
5402    printf("Starting  TRex %s please wait  ... \n",VERSION_BUILD_NUM);
5403
5404    CGlobalInfo::m_options.preview.clean();
5405
5406    if ( parse_options_wrapper(argc, argv, &CGlobalInfo::m_options,true ) != 0){
5407        exit(-1);
5408    }
5409
5410    /* enable core dump if requested */
5411    if (CGlobalInfo::m_options.preview.getCoreDumpEnable()) {
5412        utl_set_coredump_size(-1);
5413    }
5414    else {
5415        utl_set_coredump_size(0);
5416    }
5417
5418
5419    update_global_info_from_platform_file();
5420
5421    /* It is not a mistake. Give the user higher priorty over the configuration file */
5422    if (parse_options_wrapper(argc, argv, &CGlobalInfo::m_options ,false) != 0) {
5423        exit(-1);
5424    }
5425
5426
5427    if ( CGlobalInfo::m_options.preview.getVMode() > 0){
5428        CGlobalInfo::m_options.dump(stdout);
5429        CGlobalInfo::m_memory_cfg.Dump(stdout);
5430    }
5431
5432
5433    if (update_dpdk_args() < 0) {
5434        return -1;
5435    }
5436
5437    CParserOption * po=&CGlobalInfo::m_options;
5438
5439
5440    if ( CGlobalInfo::m_options.preview.getVMode() == 0  ) {
5441        rte_set_log_level(1);
5442
5443    }
5444    uid_t uid;
5445    uid = geteuid ();
5446    if ( uid != 0 ) {
5447        printf("ERROR you must run with superuser priviliges \n");
5448        printf("User id   : %d \n",uid);
5449        printf("try 'sudo' %s \n",argv[0]);
5450        return (-1);
5451    }
5452
5453    /* set affinity to the master core as default */
5454    cpu_set_t mask;
5455    CPU_ZERO(&mask);
5456    CPU_SET(CGlobalInfo::m_socket.get_master_phy_id(), &mask);
5457    pthread_setaffinity_np(pthread_self(), sizeof(mask), &mask);
5458
5459    ret = rte_eal_init(global_dpdk_args_num, (char **)global_dpdk_args);
5460    if (ret < 0){
5461        printf(" You might need to run ./trex-cfg  once  \n");
5462        rte_exit(EXIT_FAILURE, "Invalid EAL arguments\n");
5463    }
5464    if (CGlobalInfo::m_options.m_run_mode == CParserOption::RUN_MODE_DUMP_INFO) {
5465        dump_interfaces_info();
5466        exit(0);
5467    }
5468    reorder_dpdk_ports();
5469    time_init();
5470
5471    /* check if we are in simulation mode */
5472    if ( CGlobalInfo::m_options.out_file != "" ){
5473        printf(" t-rex simulation mode into %s \n",CGlobalInfo::m_options.out_file.c_str());
5474        return ( sim_load_list_of_cap_files(&CGlobalInfo::m_options) );
5475    }
5476
5477    if ( !g_trex.Create() ){
5478        exit(1);
5479    }
5480
5481    if (po->preview.get_is_rx_check_enable() &&  (po->m_rx_check_sample< get_min_sample_rate()) ) {
5482        po->m_rx_check_sample = get_min_sample_rate();
5483        printf("Warning:rx check sample rate should not be lower than %d. Setting it to %d\n",get_min_sample_rate(),get_min_sample_rate());
5484    }
5485
5486    /* set dump mode */
5487    g_trex.m_io_modes.set_mode((CTrexGlobalIoMode::CliDumpMode)CGlobalInfo::m_options.m_io_mode);
5488
5489    /* disable WD if needed */
5490    bool wd_enable = (CGlobalInfo::m_options.preview.getWDDisable() ? false : true);
5491    TrexWatchDog::getInstance().init(wd_enable);
5492
5493    g_trex.m_sl_rx_running = false;
5494    if ( get_is_stateless() ) {
5495        g_trex.start_master_stateless();
5496
5497    }else{
5498        g_trex.start_master_statefull();
5499    }
5500
5501    // For unit testing of HW rules and queues configuration. Just send some packets and exit.
5502    if (CGlobalInfo::m_options.m_debug_pkt_proto != 0) {
5503        CTrexDebug debug = CTrexDebug(g_trex.m_ports, g_trex.m_max_ports);
5504        int ret;
5505
5506        if (CGlobalInfo::m_options.m_debug_pkt_proto == D_PKT_TYPE_HW_TOGGLE_TEST) {
5507            // Unit test: toggle many times between receive all and stateless/stateful modes,
5508            // to test resiliency of add/delete fdir filters
5509            printf("Starting receive all/normal mode toggle unit test\n");
5510            for (int i = 0; i < 100; i++) {
5511                for (int port_id = 0; port_id < g_trex.m_max_ports; port_id++) {
5512                    CPhyEthIF *pif = &g_trex.m_ports[port_id];
5513                    CTRexExtendedDriverDb::Ins()->get_drv()->set_rcv_all(pif, true);
5514                }
5515                ret = debug.test_send(D_PKT_TYPE_HW_VERIFY_RCV_ALL);
5516                if (ret != 0) {
5517                    printf("Iteration %d: Receive all mode failed\n", i);
5518                    exit(ret);
5519                }
5520
5521                for (int port_id = 0; port_id < g_trex.m_max_ports; port_id++) {
5522                    CPhyEthIF *pif = &g_trex.m_ports[port_id];
5523                    CTRexExtendedDriverDb::Ins()->get_drv()->configure_rx_filter_rules(pif);
5524                }
5525
5526                ret = debug.test_send(D_PKT_TYPE_HW_VERIFY);
5527                if (ret != 0) {
5528                    printf("Iteration %d: Normal mode failed\n", i);
5529                    exit(ret);
5530                }
5531
5532                printf("Iteration %d OK\n", i);
5533            }
5534            exit(0);
5535        } else {
5536            if (CGlobalInfo::m_options.m_debug_pkt_proto == D_PKT_TYPE_HW_VERIFY_RCV_ALL) {
5537                for (int port_id = 0; port_id < g_trex.m_max_ports; port_id++) {
5538                    CPhyEthIF *pif = &g_trex.m_ports[port_id];
5539                    CTRexExtendedDriverDb::Ins()->get_drv()->set_rcv_all(pif, true);
5540                }
5541            }
5542            ret = debug.test_send(CGlobalInfo::m_options.m_debug_pkt_proto);
5543            exit(ret);
5544        }
5545    }
5546
5547    // in case of client config, we already run pretest
5548    if (! CGlobalInfo::m_options.preview.get_is_client_cfg_enable()) {
5549        g_trex.pre_test();
5550    }
5551
5552    // after doing all needed ARP resolution, we need to flush queues, and stop our drop queue
5553    g_trex.ixgbe_rx_queue_flush();
5554    for (int i = 0; i < g_trex.m_max_ports; i++) {
5555        CPhyEthIF *_if = &g_trex.m_ports[i];
5556        _if->stop_rx_drop_queue();
5557    }
5558
5559    if ( CGlobalInfo::m_options.is_latency_enabled()
5560         && (CGlobalInfo::m_options.m_latency_prev > 0)) {
5561        uint32_t pkts = CGlobalInfo::m_options.m_latency_prev *
5562            CGlobalInfo::m_options.m_latency_rate;
5563        printf("Starting warm up phase for %d sec\n",CGlobalInfo::m_options.m_latency_prev);
5564        g_trex.m_mg.start(pkts, NULL);
5565        delay(CGlobalInfo::m_options.m_latency_prev* 1000);
5566        printf("Finished \n");
5567        g_trex.m_mg.reset();
5568    }
5569
5570    if ( CGlobalInfo::m_options.preview.getOnlyLatency() ){
5571        rte_eal_mp_remote_launch(latency_one_lcore, NULL, CALL_MASTER);
5572        RTE_LCORE_FOREACH_SLAVE(lcore_id) {
5573            if (rte_eal_wait_lcore(lcore_id) < 0)
5574                return -1;
5575        }
5576        g_trex.stop_master();
5577
5578        return (0);
5579    }
5580
5581    if ( CGlobalInfo::m_options.preview.getSingleCore() ) {
5582        g_trex.run_in_core(1);
5583        g_trex.stop_master();
5584        return (0);
5585    }
5586
5587    rte_eal_mp_remote_launch(slave_one_lcore, NULL, CALL_MASTER);
5588    RTE_LCORE_FOREACH_SLAVE(lcore_id) {
5589        if (rte_eal_wait_lcore(lcore_id) < 0)
5590            return -1;
5591    }
5592
5593    g_trex.stop_master();
5594    g_trex.Delete();
5595    utl_termio_reset();
5596
5597    return (0);
5598}
5599
5600void wait_x_sec(int sec) {
5601    int i;
5602    printf(" wait %d sec ", sec);
5603    fflush(stdout);
5604    for (i=0; i<sec; i++) {
5605        delay(1000);
5606        printf(".");
5607        fflush(stdout);
5608    }
5609    printf("\n");
5610    fflush(stdout);
5611}
5612
5613/*
5614Changes the order of rte_eth_devices array elements
5615to be consistent with our /etc/trex_cfg.yaml
5616*/
5617void reorder_dpdk_ports() {
5618    rte_eth_dev rte_eth_devices_temp[RTE_MAX_ETHPORTS];
5619    uint8_t m_port_map[RTE_MAX_ETHPORTS];
5620    struct rte_pci_addr addr;
5621    uint8_t port_id;
5622
5623    // gather port relation information and save current array to temp
5624    for (int i=0; i<(int)global_platform_cfg_info.m_if_list.size(); i++) {
5625        memcpy(&rte_eth_devices_temp[i], &rte_eth_devices[i], sizeof rte_eth_devices[i]);
5626        if (eal_parse_pci_BDF(global_platform_cfg_info.m_if_list[i].c_str(), &addr) != 0 && eal_parse_pci_DomBDF(global_platform_cfg_info.m_if_list[i].c_str(), &addr) != 0) {
5627            printf("Failed mapping TRex port id to DPDK id: %d\n", i);
5628            exit(1);
5629        }
5630        rte_eth_dev_get_port_by_addr(&addr, &port_id);
5631        m_port_map[port_id] = i;
5632        // print the relation in verbose mode
5633        if ( CGlobalInfo::m_options.preview.getVMode() > 0){
5634            printf("TRex cfg port id: %d <-> DPDK port id: %d\n", i, port_id);
5635        }
5636    }
5637
5638    // actual reorder
5639    for (int i=0; i<(int)global_platform_cfg_info.m_if_list.size(); i++) {
5640        memcpy(&rte_eth_devices[m_port_map[i]], &rte_eth_devices_temp[i], sizeof rte_eth_devices_temp[i]);
5641    }
5642}
5643
5644//////////////////////////////////////////////////////////////////////////////////////////////
5645//////////////////////////////////////////////////////////////////////////////////////////////
5646// driver section
5647//////////////////////////////////////////////////////////////////////////////////////////////
5648int CTRexExtendedDriverBase::stop_queue(CPhyEthIF * _if, uint16_t q_num) {
5649    uint8_t port_id=_if->get_rte_port_id();
5650    return (rte_eth_dev_rx_queue_stop(port_id, q_num));
5651}
5652
5653int CTRexExtendedDriverBase::wait_for_stable_link() {
5654    wait_x_sec(CGlobalInfo::m_options.m_wait_before_traffic);
5655    return 0;
5656}
5657
5658void CTRexExtendedDriverBase::wait_after_link_up() {
5659    wait_x_sec(CGlobalInfo::m_options.m_wait_before_traffic);
5660}
5661
5662CFlowStatParser *CTRexExtendedDriverBase::get_flow_stat_parser() {
5663    CFlowStatParser *parser = new CFlowStatParser();
5664    assert (parser);
5665    return parser;
5666}
5667
5668// in 1G we need to wait if links became ready to soon
5669void CTRexExtendedDriverBase1G::wait_after_link_up(){
5670    wait_x_sec(6 + CGlobalInfo::m_options.m_wait_before_traffic);
5671}
5672
5673int CTRexExtendedDriverBase1G::wait_for_stable_link(){
5674    wait_x_sec(9 + CGlobalInfo::m_options.m_wait_before_traffic);
5675    return(0);
5676}
5677
5678void CTRexExtendedDriverBase1G::update_configuration(port_cfg_t * cfg){
5679
5680    cfg->m_tx_conf.tx_thresh.pthresh = TX_PTHRESH_1G;
5681    cfg->m_tx_conf.tx_thresh.hthresh = TX_HTHRESH;
5682    cfg->m_tx_conf.tx_thresh.wthresh = 0;
5683}
5684
5685void CTRexExtendedDriverBase1G::update_global_config_fdir(port_cfg_t * cfg){
5686    // Configuration is done in configure_rx_filter_rules by writing to registers
5687}
5688
5689#define E1000_RXDCTL_QUEUE_ENABLE	0x02000000
5690// e1000 driver does not support the generic stop/start queue API, so we need to implement ourselves
5691int CTRexExtendedDriverBase1G::stop_queue(CPhyEthIF * _if, uint16_t q_num) {
5692    uint32_t reg_val = _if->pci_reg_read( E1000_RXDCTL(q_num));
5693    reg_val &= ~E1000_RXDCTL_QUEUE_ENABLE;
5694    _if->pci_reg_write( E1000_RXDCTL(q_num), reg_val);
5695    return 0;
5696}
5697
5698int CTRexExtendedDriverBase1G::configure_rx_filter_rules(CPhyEthIF * _if){
5699    if ( get_is_stateless() ) {
5700        return configure_rx_filter_rules_stateless(_if);
5701    } else {
5702        return configure_rx_filter_rules_statefull(_if);
5703    }
5704
5705    return 0;
5706}
5707
5708int CTRexExtendedDriverBase1G::configure_rx_filter_rules_statefull(CPhyEthIF * _if) {
5709    uint16_t hops = get_rx_check_hops();
5710    uint16_t v4_hops = (hops << 8)&0xff00;
5711    uint8_t protocol;
5712
5713    if (CGlobalInfo::m_options.m_l_pkt_mode == 0) {
5714        protocol = IPPROTO_SCTP;
5715    } else {
5716        protocol = IPPROTO_ICMP;
5717    }
5718    /* enable filter to pass packet to rx queue 1 */
5719    _if->pci_reg_write( E1000_IMIR(0), 0x00020000);
5720    _if->pci_reg_write( E1000_IMIREXT(0), 0x00081000);
5721    _if->pci_reg_write( E1000_TTQF(0),   protocol
5722                        | 0x00008100 /* enable */
5723                        | 0xE0010000 /* RX queue is 1 */
5724                        );
5725
5726
5727    /* 16  :   12 MAC , (2)0x0800,2      | DW0 , DW1
5728       6 bytes , TTL , PROTO     | DW2=0 , DW3=0x0000FF06
5729    */
5730    int i;
5731    // IPv4: bytes being compared are {TTL, Protocol}
5732    uint16_t ff_rules_v4[6]={
5733        (uint16_t)(0xFF06 - v4_hops),
5734        (uint16_t)(0xFE11 - v4_hops),
5735        (uint16_t)(0xFF11 - v4_hops),
5736        (uint16_t)(0xFE06 - v4_hops),
5737        (uint16_t)(0xFF01 - v4_hops),
5738        (uint16_t)(0xFE01 - v4_hops),
5739    }  ;
5740    // IPv6: bytes being compared are {NextHdr, HopLimit}
5741    uint16_t ff_rules_v6[2]={
5742        (uint16_t)(0x3CFF - hops),
5743        (uint16_t)(0x3CFE - hops),
5744    }  ;
5745    uint16_t *ff_rules;
5746    uint16_t num_rules;
5747    uint32_t mask=0;
5748    int  rule_id;
5749
5750    if (  CGlobalInfo::m_options.preview.get_ipv6_mode_enable() ){
5751        ff_rules = &ff_rules_v6[0];
5752        num_rules = sizeof(ff_rules_v6)/sizeof(ff_rules_v6[0]);
5753    }else{
5754        ff_rules = &ff_rules_v4[0];
5755        num_rules = sizeof(ff_rules_v4)/sizeof(ff_rules_v4[0]);
5756    }
5757
5758    clear_rx_filter_rules(_if);
5759
5760    uint8_t len = 24;
5761    for (rule_id=0; rule_id<num_rules; rule_id++ ) {
5762        /* clear rule all */
5763        for (i=0; i<0xff; i+=4) {
5764            _if->pci_reg_write( (E1000_FHFT(rule_id)+i) , 0);
5765        }
5766
5767        if (  CGlobalInfo::m_options.preview.get_vlan_mode_enable() ){
5768            len += 8;
5769            if (  CGlobalInfo::m_options.preview.get_ipv6_mode_enable() ){
5770                // IPv6 VLAN: NextHdr/HopLimit offset = 0x18
5771                _if->pci_reg_write( (E1000_FHFT(rule_id)+(3*16)+0) , PKT_NTOHS(ff_rules[rule_id]) );
5772                _if->pci_reg_write( (E1000_FHFT(rule_id)+(3*16)+8) , 0x03); /* MASK */
5773            }else{
5774                // IPv4 VLAN: TTL/Protocol offset = 0x1A
5775                _if->pci_reg_write( (E1000_FHFT(rule_id)+(3*16)+0) , (PKT_NTOHS(ff_rules[rule_id])<<16) );
5776                _if->pci_reg_write( (E1000_FHFT(rule_id)+(3*16)+8) , 0x0C); /* MASK */
5777            }
5778        }else{
5779            if (  CGlobalInfo::m_options.preview.get_ipv6_mode_enable() ){
5780                // IPv6: NextHdr/HopLimit offset = 0x14
5781                _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16)+4) , PKT_NTOHS(ff_rules[rule_id]) );
5782                _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16)+8) , 0x30); /* MASK */
5783            }else{
5784                // IPv4: TTL/Protocol offset = 0x16
5785                _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16)+4) , (PKT_NTOHS(ff_rules[rule_id])<<16) );
5786                _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16)+8) , 0xC0); /* MASK */
5787            }
5788        }
5789
5790        // FLEX_PRIO[[18:16] = 1, RQUEUE[10:8] = 1
5791        _if->pci_reg_write( (E1000_FHFT(rule_id)+0xFC) , (1<<16) | (1<<8)  | len);
5792
5793        mask |=(1<<rule_id);
5794    }
5795
5796    /* enable all rules */
5797    _if->pci_reg_write(E1000_WUFC, (mask<<16) | (1<<14) );
5798
5799    return (0);
5800}
5801
5802// Sadly, DPDK has no support for i350 filters, so we need to implement by writing to registers.
5803int CTRexExtendedDriverBase1G::configure_rx_filter_rules_stateless(CPhyEthIF * _if) {
5804    /* enable filter to pass packet to rx queue 1 */
5805    _if->pci_reg_write( E1000_IMIR(0), 0x00020000);
5806    _if->pci_reg_write( E1000_IMIREXT(0), 0x00081000);
5807
5808    uint8_t len = 24;
5809    uint32_t mask = 0;
5810    int rule_id;
5811
5812    clear_rx_filter_rules(_if);
5813
5814    rule_id = 0;
5815    mask |= 0x1 << rule_id;
5816    // filter for byte 18 of packet (msb of IP ID) should equal ff
5817    _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16)) ,  0x00ff0000);
5818    _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16) + 8) , 0x04); /* MASK */
5819    // + bytes 12 + 13 (ether type) should indicate IP.
5820    _if->pci_reg_write( (E1000_FHFT(rule_id)+(1*16) + 4) ,  0x00000008);
5821    _if->pci_reg_write( (E1000_FHFT(rule_id)+(1*16) + 8) , 0x30); /* MASK */
5822    // FLEX_PRIO[[18:16] = 1, RQUEUE[10:8] = 1
5823    _if->pci_reg_write( (E1000_FHFT(rule_id) + 0xFC) , (1 << 16) | (1 << 8) | len);
5824
5825    // same as 0, but with vlan. type should be vlan. Inside vlan, should be IP with lsb of IP ID equals 0xff
5826    rule_id = 1;
5827    mask |= 0x1 << rule_id;
5828    // filter for byte 22 of packet (msb of IP ID) should equal ff
5829    _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16) + 4) ,  0x00ff0000);
5830    _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16) + 8) , 0x40 | 0x03); /* MASK */
5831    // + bytes 12 + 13 (ether type) should indicate VLAN.
5832    _if->pci_reg_write( (E1000_FHFT(rule_id)+(1*16) + 4) ,  0x00000081);
5833    _if->pci_reg_write( (E1000_FHFT(rule_id)+(1*16) + 8) , 0x30); /* MASK */
5834    // + bytes 16 + 17 (vlan type) should indicate IP.
5835    _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16) ) ,  0x00000008);
5836    // Was written together with IP ID filter
5837    // _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16) + 8) , 0x03); /* MASK */
5838    // FLEX_PRIO[[18:16] = 1, RQUEUE[10:8] = 1
5839    _if->pci_reg_write( (E1000_FHFT(rule_id) + 0xFC) , (1 << 16) | (1 << 8) | len);
5840
5841    rule_id = 2;
5842    mask |= 0x1 << rule_id;
5843    // ipv6 flow stat
5844    // filter for byte 16 of packet (part of flow label) should equal 0xff
5845    _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16)) ,  0x000000ff);
5846    _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16) + 8) , 0x01); /* MASK */
5847    // + bytes 12 + 13 (ether type) should indicate IPv6.
5848    _if->pci_reg_write( (E1000_FHFT(rule_id)+(1*16) + 4) ,  0x0000dd86);
5849    _if->pci_reg_write( (E1000_FHFT(rule_id)+(1*16) + 8) , 0x30); /* MASK */
5850    // FLEX_PRIO[[18:16] = 1, RQUEUE[10:8] = 1
5851    _if->pci_reg_write( (E1000_FHFT(rule_id) + 0xFC) , (1 << 16) | (1 << 8) | len);
5852
5853    rule_id = 3;
5854    mask |= 0x1 << rule_id;
5855    // same as 2, with vlan. Type is vlan. Inside vlan, IPv6 with flow label second bits 4-11 equals 0xff
5856    // filter for byte 20 of packet (part of flow label) should equal 0xff
5857    _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16) + 4) ,  0x000000ff);
5858    _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16) + 8) , 0x10 | 0x03); /* MASK */
5859    // + bytes 12 + 13 (ether type) should indicate VLAN.
5860    _if->pci_reg_write( (E1000_FHFT(rule_id)+(1*16) + 4) ,  0x00000081);
5861    _if->pci_reg_write( (E1000_FHFT(rule_id)+(1*16) + 8) , 0x30); /* MASK */
5862    // + bytes 16 + 17 (vlan type) should indicate IP.
5863    _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16) ) ,  0x0000dd86);
5864    // Was written together with flow label filter
5865    // _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16) + 8) , 0x03); /* MASK */
5866    // FLEX_PRIO[[18:16] = 1, RQUEUE[10:8] = 1
5867    _if->pci_reg_write( (E1000_FHFT(rule_id) + 0xFC) , (1 << 16) | (1 << 8) | len);
5868
5869    /* enable rules */
5870    _if->pci_reg_write(E1000_WUFC, (mask << 16) | (1 << 14) );
5871
5872    return (0);
5873}
5874
5875// clear registers of rules
5876void CTRexExtendedDriverBase1G::clear_rx_filter_rules(CPhyEthIF * _if) {
5877    for (int rule_id = 0 ; rule_id < 8; rule_id++) {
5878        for (int i = 0; i < 0xff; i += 4) {
5879            _if->pci_reg_write( (E1000_FHFT(rule_id) + i) , 0);
5880        }
5881    }
5882}
5883
5884int CTRexExtendedDriverBase1G::set_rcv_all(CPhyEthIF * _if, bool set_on) {
5885    // byte 12 equals 08 - for IPv4 and ARP
5886    //                86 - For IPv6
5887    //                81 - For VLAN
5888    //                88 - For MPLS
5889    uint8_t eth_types[] = {0x08, 0x86, 0x81, 0x88};
5890    uint32_t mask = 0;
5891
5892    clear_rx_filter_rules(_if);
5893
5894    if (set_on) {
5895        for (int rule_id = 0; rule_id < sizeof(eth_types); rule_id++) {
5896            mask |= 0x1 << rule_id;
5897            // Filter for byte 12 of packet
5898            _if->pci_reg_write( (E1000_FHFT(rule_id)+(1*16) + 4) ,  0x000000 | eth_types[rule_id]);
5899            _if->pci_reg_write( (E1000_FHFT(rule_id)+(1*16) + 8) , 0x10); /* MASK */
5900            // FLEX_PRIO[[18:16] = 1, RQUEUE[10:8] = 1, len = 24
5901            _if->pci_reg_write( (E1000_FHFT(rule_id) + 0xFC) , (1 << 16) | (1 << 8) | 24);
5902        }
5903    } else {
5904        configure_rx_filter_rules(_if);
5905    }
5906
5907    return 0;
5908}
5909
5910void CTRexExtendedDriverBase1G::get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats){
5911
5912    stats->ipackets     +=  _if->pci_reg_read(E1000_GPRC) ;
5913
5914    stats->ibytes       +=  (_if->pci_reg_read(E1000_GORCL) );
5915    stats->ibytes       +=  (((uint64_t)_if->pci_reg_read(E1000_GORCH))<<32);
5916
5917
5918    stats->opackets     +=  _if->pci_reg_read(E1000_GPTC);
5919    stats->obytes       +=  _if->pci_reg_read(E1000_GOTCL) ;
5920    stats->obytes       +=  ( (((uint64_t)_if->pci_reg_read(IXGBE_GOTCH))<<32) );
5921
5922    stats->f_ipackets   +=  0;
5923    stats->f_ibytes     += 0;
5924
5925
5926    stats->ierrors      +=  ( _if->pci_reg_read(E1000_RNBC) +
5927                              _if->pci_reg_read(E1000_CRCERRS) +
5928                              _if->pci_reg_read(E1000_ALGNERRC ) +
5929                              _if->pci_reg_read(E1000_SYMERRS ) +
5930                              _if->pci_reg_read(E1000_RXERRC ) +
5931
5932                              _if->pci_reg_read(E1000_ROC)+
5933                              _if->pci_reg_read(E1000_RUC)+
5934                              _if->pci_reg_read(E1000_RJC) +
5935
5936                              _if->pci_reg_read(E1000_XONRXC)+
5937                              _if->pci_reg_read(E1000_XONTXC)+
5938                              _if->pci_reg_read(E1000_XOFFRXC)+
5939                              _if->pci_reg_read(E1000_XOFFTXC)+
5940                              _if->pci_reg_read(E1000_FCRUC)
5941                              );
5942
5943    stats->oerrors      +=  0;
5944    stats->imcasts      =  0;
5945    stats->rx_nombuf    =  0;
5946}
5947
5948void CTRexExtendedDriverBase1G::clear_extended_stats(CPhyEthIF * _if){
5949}
5950
5951#if 0
5952int CTRexExtendedDriverBase1G::get_rx_stats(CPhyEthIF * _if, uint32_t *pkts, uint32_t *prev_pkts
5953                                            ,uint32_t *bytes, uint32_t *prev_bytes, int min, int max) {
5954    uint32_t port_id = _if->get_port_id();
5955    return g_trex.m_rx_sl.get_rx_stats(port_id, pkts, prev_pkts, bytes, prev_bytes, min, max);
5956}
5957#endif
5958
5959void CTRexExtendedDriverBase10G::clear_extended_stats(CPhyEthIF * _if){
5960    _if->pci_reg_read(IXGBE_RXNFGPC);
5961}
5962
5963void CTRexExtendedDriverBase10G::update_global_config_fdir(port_cfg_t * cfg){
5964    cfg->update_global_config_fdir_10g();
5965}
5966
5967void CTRexExtendedDriverBase10G::update_configuration(port_cfg_t * cfg){
5968    cfg->m_tx_conf.tx_thresh.pthresh = TX_PTHRESH;
5969    cfg->m_tx_conf.tx_thresh.hthresh = TX_HTHRESH;
5970    cfg->m_tx_conf.tx_thresh.wthresh = TX_WTHRESH;
5971}
5972
5973int CTRexExtendedDriverBase10G::configure_rx_filter_rules(CPhyEthIF * _if) {
5974    set_rcv_all(_if, false);
5975    if ( get_is_stateless() ) {
5976        return configure_rx_filter_rules_stateless(_if);
5977    } else {
5978        return