main_dpdk.cpp revision fa8792d5
1/*
2  Hanoh Haim
3  Cisco Systems, Inc.
4*/
5
6/*
7  Copyright (c) 2015-2017 Cisco Systems, Inc.
8
9  Licensed under the Apache License, Version 2.0 (the "License");
10  you may not use this file except in compliance with the License.
11  You may obtain a copy of the License at
12
13  http://www.apache.org/licenses/LICENSE-2.0
14
15  Unless required by applicable law or agreed to in writing, software
16  distributed under the License is distributed on an "AS IS" BASIS,
17  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18  See the License for the specific language governing permissions and
19  limitations under the License.
20*/
21#include <assert.h>
22#include <pthread.h>
23#include <signal.h>
24#include <pwd.h>
25#include <stdio.h>
26#include <string.h>
27#include <unistd.h>
28#include <zmq.h>
29#include <rte_common.h>
30#include <rte_log.h>
31#include <rte_memory.h>
32#include <rte_memcpy.h>
33#include <rte_memzone.h>
34#include <rte_tailq.h>
35#include <rte_eal.h>
36#include <rte_per_lcore.h>
37#include <rte_launch.h>
38#include <rte_atomic.h>
39#include <rte_cycles.h>
40#include <rte_prefetch.h>
41#include <rte_lcore.h>
42#include <rte_per_lcore.h>
43#include <rte_branch_prediction.h>
44#include <rte_interrupts.h>
45#include <rte_pci.h>
46#include <rte_debug.h>
47#include <rte_ether.h>
48#include <rte_ethdev.h>
49#include <rte_ring.h>
50#include <rte_mempool.h>
51#include <rte_mbuf.h>
52#include <rte_random.h>
53#include <rte_version.h>
54#include <rte_ip.h>
55
56#include "bp_sim.h"
57#include "os_time.h"
58#include "common/arg/SimpleGlob.h"
59#include "common/arg/SimpleOpt.h"
60#include "common/basic_utils.h"
61#include "stateless/cp/trex_stateless.h"
62#include "stateless/dp/trex_stream_node.h"
63#include "stateless/messaging/trex_stateless_messaging.h"
64#include "stateless/rx/trex_stateless_rx_core.h"
65#include "publisher/trex_publisher.h"
66#include "../linux_dpdk/version.h"
67extern "C" {
68#include "dpdk/drivers/net/ixgbe/base/ixgbe_type.h"
69#include "dpdk_funcs.h"
70}
71#include "dpdk/drivers/net/e1000/base/e1000_regs.h"
72#include "global_io_mode.h"
73#include "utl_term_io.h"
74#include "msg_manager.h"
75#include "platform_cfg.h"
76#include "pre_test.h"
77#include "stateful_rx_core.h"
78#include "debug.h"
79#include "pkt_gen.h"
80#include "trex_port_attr.h"
81#include "internal_api/trex_platform_api.h"
82#include "main_dpdk.h"
83#include "trex_watchdog.h"
84
85#define RX_CHECK_MIX_SAMPLE_RATE 8
86#define RX_CHECK_MIX_SAMPLE_RATE_1G 2
87
88#define MAX_PKT_BURST   32
89
90#define BP_MAX_CORES 32
91#define BP_MAX_TX_QUEUE 16
92#define BP_MASTER_AND_LATENCY 2
93
94#define RX_DESC_NUM_DROP_Q 64
95#define RX_DESC_NUM_DATA_Q 1024
96#define RX_DESC_NUM_DROP_Q_MLX 8
97#define RX_DESC_NUM_DATA_Q_VM 512
98#define TX_DESC_NUM 512
99
100typedef struct rte_mbuf * (*rte_mbuf_convert_to_one_seg_t)(struct rte_mbuf *m);
101struct rte_mbuf *  rte_mbuf_convert_to_one_seg(struct rte_mbuf *m);
102extern "C" int rte_eth_dev_get_port_by_addr(const struct rte_pci_addr *addr, uint8_t *port_id);
103void set_driver();
104void reorder_dpdk_ports();
105
106static int max_stat_hw_id_seen = 0;
107static int max_stat_hw_id_seen_payload = 0;
108
109static inline int get_is_rx_thread_enabled() {
110    return ((CGlobalInfo::m_options.is_rx_enabled() || CGlobalInfo::m_options.is_stateless()) ?1:0);
111}
112
113struct port_cfg_t;
114
115#define MAX_DPDK_ARGS 50
116static CPlatformYamlInfo global_platform_cfg_info;
117static int global_dpdk_args_num ;
118static char * global_dpdk_args[MAX_DPDK_ARGS];
119static char global_cores_str[100];
120static char global_prefix_str[100];
121static char global_loglevel_str[20];
122static char global_master_id_str[10];
123static char global_mlx5_so_id_str[50];
124static char global_mlx4_so_id_str[50];
125static char global_image_postfix[10];
126#define TREX_NAME "_t-rex-64"
127
128class CTRexExtendedDriverBase {
129protected:
130    enum {
131        // Is there HW support for dropping packets arriving to certain queue?
132        TREX_DRV_CAP_DROP_Q = 0x1,
133        /* Does this NIC type support automatic packet dropping in case of a link down?
134           in case it is supported the packets will be dropped, else there would be a back pressure to tx queues
135           this interface is used as a workaround to let TRex work without link in stateless mode, driver that
136           does not support that will be failed at init time because it will cause watchdog due to watchdog hang */
137        TREX_DRV_CAP_DROP_PKTS_IF_LNK_DOWN = 0x2,
138        // Does the driver support changing MAC address?
139        TREX_DRV_CAP_MAC_ADDR_CHG = 0x4,
140        /* Mellanox driver does not work well with the DPDK port reorder we do */
141        TREX_DRV_CAP_NO_PORT_REORDER_POSSIBLE = 0x8,
142    } trex_drv_cap;
143
144public:
145    virtual int get_min_sample_rate(void)=0;
146    virtual void update_configuration(port_cfg_t * cfg)=0;
147    virtual void update_global_config_fdir(port_cfg_t * cfg)=0;
148    virtual int configure_rx_filter_rules(CPhyEthIF * _if)=0;
149    virtual int add_del_rx_flow_stat_rule(uint8_t port_id, enum rte_filter_op op, uint16_t l3, uint8_t l4
150                                          , uint8_t ipv6_next_h, uint16_t id) {return 0;}
151    bool is_hardware_support_drop_queue() {
152        return ((m_cap & TREX_DRV_CAP_DROP_Q) != 0);
153    }
154    bool hardware_support_mac_change() {
155        return ((m_cap & TREX_DRV_CAP_MAC_ADDR_CHG) != 0);
156    }
157    bool drop_packets_incase_of_linkdown() {
158        return ((m_cap & TREX_DRV_CAP_DROP_PKTS_IF_LNK_DOWN) != 0);
159    }
160    bool supports_port_reorder() {
161        // Since only Mellanox does not support, logic here is reveresed compared to other flags.
162        // Put this only if not supported.
163        return ((m_cap & TREX_DRV_CAP_NO_PORT_REORDER_POSSIBLE) == 0);
164    }
165    virtual int stop_queue(CPhyEthIF * _if, uint16_t q_num);
166    void get_extended_stats_fixed(CPhyEthIF * _if, CPhyEthIFStats *stats, int fix_i, int fix_o);
167    virtual void get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats)=0;
168    virtual void clear_extended_stats(CPhyEthIF * _if)=0;
169    virtual int  wait_for_stable_link();
170    virtual void wait_after_link_up();
171    virtual bool hw_rx_stat_supported(){return false;}
172    virtual int get_rx_stats(CPhyEthIF * _if, uint32_t *pkts, uint32_t *prev_pkts, uint32_t *bytes, uint32_t *prev_bytes
173                             , int min, int max) {return -1;}
174    virtual void reset_rx_stats(CPhyEthIF * _if, uint32_t *stats, int min, int len) {}
175    virtual int dump_fdir_global_stats(CPhyEthIF * _if, FILE *fd) { return -1;}
176    virtual int get_stat_counters_num() {return 0;}
177    virtual int get_rx_stat_capabilities() {return 0;}
178    virtual int verify_fw_ver(int i) {return 0;}
179    virtual CFlowStatParser *get_flow_stat_parser();
180    virtual int set_rcv_all(CPhyEthIF * _if, bool set_on)=0;
181    virtual TRexPortAttr * create_port_attr(uint8_t port_id) = 0;
182
183    virtual rte_mempool_t * get_rx_mem_pool(int socket_id) {
184        return CGlobalInfo::m_mem_pool[socket_id].m_mbuf_pool_9k;
185    }
186    virtual void get_dpdk_drv_params(CTrexDpdkParams &p) {
187        p.rx_data_q_num = 1;
188        if (CGlobalInfo::get_queues_mode() == CGlobalInfo::Q_MODE_ONE_QUEUE) {
189            p.rx_drop_q_num = 0;
190        } else {
191            p.rx_drop_q_num = 1;
192        }
193        p.rx_desc_num_data_q = RX_DESC_NUM_DATA_Q;
194        p.rx_desc_num_drop_q = RX_DESC_NUM_DROP_Q;
195        p.tx_desc_num = TX_DESC_NUM;
196    }
197
198protected:
199    // flags describing interface capabilities
200    uint32_t m_cap;
201};
202
203
204class CTRexExtendedDriverBase1G : public CTRexExtendedDriverBase {
205
206public:
207    CTRexExtendedDriverBase1G(){
208        m_cap = TREX_DRV_CAP_DROP_Q | TREX_DRV_CAP_MAC_ADDR_CHG;
209    }
210
211    TRexPortAttr * create_port_attr(uint8_t port_id) {
212        return new DpdkTRexPortAttr(port_id, false, true);
213    }
214
215    static CTRexExtendedDriverBase * create(){
216        return ( new CTRexExtendedDriverBase1G() );
217    }
218
219    virtual void update_global_config_fdir(port_cfg_t * cfg);
220
221    virtual int get_min_sample_rate(void){
222        return ( RX_CHECK_MIX_SAMPLE_RATE_1G);
223    }
224    virtual void update_configuration(port_cfg_t * cfg);
225    virtual int stop_queue(CPhyEthIF * _if, uint16_t q_num);
226    virtual int configure_rx_filter_rules(CPhyEthIF * _if);
227    virtual int configure_rx_filter_rules_statefull(CPhyEthIF * _if);
228    virtual int configure_rx_filter_rules_stateless(CPhyEthIF * _if);
229    virtual void clear_rx_filter_rules(CPhyEthIF * _if);
230    virtual void get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats);
231    virtual void clear_extended_stats(CPhyEthIF * _if);
232    virtual int dump_fdir_global_stats(CPhyEthIF * _if, FILE *fd) {return 0;}
233    virtual int get_stat_counters_num() {return MAX_FLOW_STATS;}
234    virtual int get_rx_stat_capabilities() {
235        return TrexPlatformApi::IF_STAT_IPV4_ID | TrexPlatformApi::IF_STAT_RX_BYTES_COUNT
236            | TrexPlatformApi::IF_STAT_PAYLOAD;
237    }
238    virtual int wait_for_stable_link();
239    virtual void wait_after_link_up();
240    virtual int set_rcv_all(CPhyEthIF * _if, bool set_on);
241};
242
243// Base for all virtual drivers. No constructor. Should not create object from this type.
244class CTRexExtendedDriverVirtBase : public CTRexExtendedDriverBase {
245public:
246    TRexPortAttr * create_port_attr(uint8_t port_id) {
247        return new DpdkTRexPortAttr(port_id, true, true);
248    }
249    virtual void update_global_config_fdir(port_cfg_t * cfg) {}
250
251    virtual int get_min_sample_rate(void){
252        return ( RX_CHECK_MIX_SAMPLE_RATE_1G);
253    }
254    virtual void get_dpdk_drv_params(CTrexDpdkParams &p) {
255        p.rx_data_q_num = 1;
256        p.rx_drop_q_num = 0;
257        p.rx_desc_num_data_q = RX_DESC_NUM_DATA_Q_VM;
258        p.rx_desc_num_drop_q = RX_DESC_NUM_DROP_Q;
259        p.tx_desc_num = TX_DESC_NUM;
260    }
261    virtual rte_mempool_t * get_rx_mem_pool(int socket_id) {
262        // In VMs there is usually less memory available
263        return CGlobalInfo::m_mem_pool[socket_id].m_mbuf_pool_2048;
264    }
265    virtual void update_configuration(port_cfg_t * cfg);
266    virtual int configure_rx_filter_rules(CPhyEthIF * _if);
267    virtual int stop_queue(CPhyEthIF * _if, uint16_t q_num);
268    virtual void get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats)=0;
269    virtual void clear_extended_stats(CPhyEthIF * _if);
270    virtual int wait_for_stable_link();
271    virtual int get_stat_counters_num() {return MAX_FLOW_STATS;}
272    virtual int get_rx_stat_capabilities() {
273        return TrexPlatformApi::IF_STAT_IPV4_ID | TrexPlatformApi::IF_STAT_RX_BYTES_COUNT
274            | TrexPlatformApi::IF_STAT_PAYLOAD;
275    }
276    virtual int set_rcv_all(CPhyEthIF * _if, bool set_on) {return 0;}
277    CFlowStatParser *get_flow_stat_parser();
278};
279
280class CTRexExtendedDriverVirtio : public CTRexExtendedDriverVirtBase {
281public:
282    CTRexExtendedDriverVirtio() {
283        CGlobalInfo::set_queues_mode(CGlobalInfo::Q_MODE_ONE_QUEUE);
284        m_cap = /*TREX_DRV_CAP_DROP_Q  | TREX_DRV_CAP_MAC_ADDR_CHG */ 0;
285    }
286    static CTRexExtendedDriverBase * create(){
287        return ( new CTRexExtendedDriverVirtio() );
288    }
289    virtual void get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats);
290};
291
292class CTRexExtendedDriverVmxnet3 : public CTRexExtendedDriverVirtBase {
293public:
294    CTRexExtendedDriverVmxnet3(){
295        CGlobalInfo::set_queues_mode(CGlobalInfo::Q_MODE_ONE_QUEUE);
296        m_cap = /*TREX_DRV_CAP_DROP_Q  | TREX_DRV_CAP_MAC_ADDR_CHG*/0;
297    }
298
299    static CTRexExtendedDriverBase * create() {
300        return ( new CTRexExtendedDriverVmxnet3() );
301    }
302    virtual void get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats);
303    virtual void update_configuration(port_cfg_t * cfg);
304};
305
306class CTRexExtendedDriverI40evf : public CTRexExtendedDriverVirtBase {
307public:
308    CTRexExtendedDriverI40evf(){
309        CGlobalInfo::set_queues_mode(CGlobalInfo::Q_MODE_ONE_QUEUE);
310        m_cap = /*TREX_DRV_CAP_DROP_Q  | TREX_DRV_CAP_MAC_ADDR_CHG */0;
311    }
312    virtual void get_extended_stats(CPhyEthIF * _if, CPhyEthIFStats *stats) {
313        get_extended_stats_fixed(_if, stats, 4, 4);
314    }
315    virtual void update_configuration(port_cfg_t * cfg);
316    static CTRexExtendedDriverBase * create() {
317        return ( new CTRexExtendedDriverI40evf() );
318    }
319};
320
321class CTRexExtendedDriverIxgbevf : public CTRexExtendedDriverI40evf {
322
323public:
324    CTRexExtendedDriverIxgbevf(){
325        CGlobalInfo::set_queues_mode(CGlobalInfo::Q_MODE_ONE_QUEUE);
326        m_cap = /*TREX_DRV_CAP_DROP_Q  | TREX_DRV_CAP_MAC_ADDR_CHG */0;
327    }
328    virtual void get_extended_stats(CPhyEthIF * _if, CPhyEthIFStats *stats) {
329        get_extended_stats_fixed(_if, stats, 4, 4);
330    }
331
332    static CTRexExtendedDriverBase * create() {
333        return ( new CTRexExtendedDriverIxgbevf() );
334    }
335};
336
337class CTRexExtendedDriverBaseE1000 : public CTRexExtendedDriverVirtBase {
338    CTRexExtendedDriverBaseE1000() {
339        // E1000 driver is only relevant in VM in our case
340        CGlobalInfo::set_queues_mode(CGlobalInfo::Q_MODE_ONE_QUEUE);
341        m_cap = /*TREX_DRV_CAP_DROP_Q  | TREX_DRV_CAP_MAC_ADDR_CHG */0;
342    }
343public:
344    static CTRexExtendedDriverBase * create() {
345        return ( new CTRexExtendedDriverBaseE1000() );
346    }
347    // e1000 driver handing us packets with ethernet CRC, so we need to chop them
348    virtual void update_configuration(port_cfg_t * cfg);
349    virtual void get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats);
350
351};
352
353class CTRexExtendedDriverBase10G : public CTRexExtendedDriverBase {
354public:
355    CTRexExtendedDriverBase10G(){
356        m_cap = TREX_DRV_CAP_DROP_Q | TREX_DRV_CAP_MAC_ADDR_CHG;
357    }
358
359    TRexPortAttr * create_port_attr(uint8_t port_id) {
360        return new DpdkTRexPortAttr(port_id, false, true);
361    }
362
363    static CTRexExtendedDriverBase * create(){
364        return ( new CTRexExtendedDriverBase10G() );
365    }
366
367    virtual void update_global_config_fdir(port_cfg_t * cfg);
368
369    virtual int get_min_sample_rate(void){
370        return (RX_CHECK_MIX_SAMPLE_RATE);
371    }
372    virtual void update_configuration(port_cfg_t * cfg);
373    virtual int configure_rx_filter_rules(CPhyEthIF * _if);
374    virtual int configure_rx_filter_rules_stateless(CPhyEthIF * _if);
375    virtual int configure_rx_filter_rules_statefull(CPhyEthIF * _if);
376    virtual void get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats);
377    virtual void clear_extended_stats(CPhyEthIF * _if);
378    virtual int wait_for_stable_link();
379    virtual int get_stat_counters_num() {return MAX_FLOW_STATS;}
380    virtual int get_rx_stat_capabilities() {
381        return TrexPlatformApi::IF_STAT_IPV4_ID | TrexPlatformApi::IF_STAT_RX_BYTES_COUNT
382            | TrexPlatformApi::IF_STAT_PAYLOAD;
383    }
384    virtual CFlowStatParser *get_flow_stat_parser();
385    int add_del_eth_filter(CPhyEthIF * _if, bool is_add, uint16_t ethertype);
386    virtual int set_rcv_all(CPhyEthIF * _if, bool set_on);
387};
388
389class CTRexExtendedDriverBase40G : public CTRexExtendedDriverBase {
390public:
391    CTRexExtendedDriverBase40G(){
392        // Since we support only 128 counters per if, it is OK to configure here 4 statically.
393        // If we want to support more counters in case of card having less interfaces, we
394        // Will have to identify the number of interfaces dynamically.
395        m_if_per_card = 4;
396        m_cap = TREX_DRV_CAP_DROP_Q | TREX_DRV_CAP_MAC_ADDR_CHG | TREX_DRV_CAP_DROP_PKTS_IF_LNK_DOWN;
397    }
398
399    TRexPortAttr * create_port_attr(uint8_t port_id) {
400        // disabling flow control on 40G using DPDK API causes the interface to malfunction
401        return new DpdkTRexPortAttr(port_id, false, false);
402    }
403
404    static CTRexExtendedDriverBase * create(){
405        return ( new CTRexExtendedDriverBase40G() );
406    }
407
408    virtual void update_global_config_fdir(port_cfg_t * cfg){
409    }
410    virtual int get_min_sample_rate(void){
411        return (RX_CHECK_MIX_SAMPLE_RATE);
412    }
413    virtual void update_configuration(port_cfg_t * cfg);
414    virtual int configure_rx_filter_rules(CPhyEthIF * _if);
415    virtual int add_del_rx_flow_stat_rule(uint8_t port_id, enum rte_filter_op op, uint16_t l3_proto
416                                          , uint8_t l4_proto, uint8_t ipv6_next_h, uint16_t id);
417    virtual void get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats);
418    virtual void clear_extended_stats(CPhyEthIF * _if);
419    virtual void reset_rx_stats(CPhyEthIF * _if, uint32_t *stats, int min, int len);
420    virtual int get_rx_stats(CPhyEthIF * _if, uint32_t *pkts, uint32_t *prev_pkts, uint32_t *bytes, uint32_t *prev_bytes, int min, int max);
421    virtual int dump_fdir_global_stats(CPhyEthIF * _if, FILE *fd);
422    virtual int get_stat_counters_num() {return MAX_FLOW_STATS;}
423    virtual int get_rx_stat_capabilities() {
424        uint32_t ret = TrexPlatformApi::IF_STAT_IPV4_ID | TrexPlatformApi::IF_STAT_PAYLOAD;
425        // HW counters on x710 does not support coutning bytes.
426        if ( CGlobalInfo::m_options.preview.get_disable_hw_flow_stat()
427             || CGlobalInfo::get_queues_mode() == CGlobalInfo::Q_MODE_ONE_QUEUE
428             || CGlobalInfo::get_queues_mode() == CGlobalInfo::Q_MODE_RSS) {
429            ret |= TrexPlatformApi::IF_STAT_RX_BYTES_COUNT;
430        }
431        return ret;
432    }
433    virtual int wait_for_stable_link();
434    virtual bool hw_rx_stat_supported(){
435        if (CGlobalInfo::m_options.preview.get_disable_hw_flow_stat()
436            || CGlobalInfo::get_queues_mode() == CGlobalInfo::Q_MODE_ONE_QUEUE
437            || CGlobalInfo::get_queues_mode() == CGlobalInfo::Q_MODE_RSS) {
438            return false;
439        } else {
440            return true;
441        }
442    }
443    virtual int verify_fw_ver(int i);
444    virtual CFlowStatParser *get_flow_stat_parser();
445    virtual int set_rcv_all(CPhyEthIF * _if, bool set_on);
446
447private:
448    virtual void add_del_rules(enum rte_filter_op op, uint8_t port_id, uint16_t type, uint8_t ttl
449                               , uint16_t ip_id, uint8_t l4_proto, int queue, uint16_t stat_idx);
450    virtual int add_del_eth_type_rule(uint8_t port_id, enum rte_filter_op op, uint16_t eth_type);
451    virtual int configure_rx_filter_rules_statefull(CPhyEthIF * _if);
452
453private:
454    uint8_t m_if_per_card;
455};
456
457class CTRexExtendedDriverBaseVIC : public CTRexExtendedDriverBase {
458public:
459    CTRexExtendedDriverBaseVIC(){
460        m_cap = TREX_DRV_CAP_DROP_Q  | TREX_DRV_CAP_MAC_ADDR_CHG;
461    }
462
463    TRexPortAttr * create_port_attr(uint8_t port_id) {
464        return new DpdkTRexPortAttr(port_id, false, false);
465    }
466
467    static CTRexExtendedDriverBase * create(){
468        return ( new CTRexExtendedDriverBaseVIC() );
469    }
470    virtual void update_global_config_fdir(port_cfg_t * cfg){
471    }
472    void clear_extended_stats(CPhyEthIF * _if);
473    void get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats);
474
475    virtual int get_min_sample_rate(void){
476        return (RX_CHECK_MIX_SAMPLE_RATE);
477    }
478
479    virtual int verify_fw_ver(int i);
480
481    virtual void update_configuration(port_cfg_t * cfg);
482
483    virtual int configure_rx_filter_rules(CPhyEthIF * _if);
484    virtual void reset_rx_stats(CPhyEthIF * _if, uint32_t *stats, int min, int len);
485    virtual int get_rx_stats(CPhyEthIF * _if, uint32_t *pkts, uint32_t *prev_pkts, uint32_t *bytes, uint32_t *prev_bytes, int min, int max);
486    virtual int get_stat_counters_num() {return MAX_FLOW_STATS;}
487    virtual int get_rx_stat_capabilities() {
488        return TrexPlatformApi::IF_STAT_IPV4_ID | TrexPlatformApi::IF_STAT_PAYLOAD;
489    }
490    virtual CFlowStatParser *get_flow_stat_parser();
491    virtual int dump_fdir_global_stats(CPhyEthIF * _if, FILE *fd);
492    virtual int set_rcv_all(CPhyEthIF * _if, bool set_on);
493
494private:
495
496    virtual void add_del_rules(enum rte_filter_op op, uint8_t port_id, uint16_t type, uint16_t id
497                               , uint8_t l4_proto, uint8_t tos, int queue);
498    virtual int add_del_eth_type_rule(uint8_t port_id, enum rte_filter_op op, uint16_t eth_type);
499    virtual int configure_rx_filter_rules_statefull(CPhyEthIF * _if);
500
501};
502
503
504class CTRexExtendedDriverBaseMlnx5G : public CTRexExtendedDriverBase {
505public:
506    CTRexExtendedDriverBaseMlnx5G(){
507        m_cap = TREX_DRV_CAP_DROP_Q | TREX_DRV_CAP_MAC_ADDR_CHG | TREX_DRV_CAP_NO_PORT_REORDER_POSSIBLE;
508        CGlobalInfo::set_queues_mode(CGlobalInfo::Q_MODE_MANY_DROP_Q);
509    }
510
511    TRexPortAttr * create_port_attr(uint8_t port_id) {
512        // disabling flow control on 40G using DPDK API causes the interface to malfunction
513        return new DpdkTRexPortAttr(port_id, false, false);
514    }
515
516    static CTRexExtendedDriverBase * create(){
517        return ( new CTRexExtendedDriverBaseMlnx5G() );
518    }
519
520    virtual void update_global_config_fdir(port_cfg_t * cfg){
521    }
522
523    virtual int get_min_sample_rate(void){
524        return (RX_CHECK_MIX_SAMPLE_RATE);
525    }
526    virtual void get_dpdk_drv_params(CTrexDpdkParams &p) {
527        p.rx_data_q_num = 1;
528        /* Mellanox ConnectX-4 can drop only 35MPPS per Rx queue.
529         * to workaround this issue we will create multi rx queue and enable RSS. for Queue1 we will disable RSS
530         * return zero for disable patch and rx queues number for enable.
531        */
532        p.rx_drop_q_num = 4;
533        p.rx_desc_num_data_q = RX_DESC_NUM_DATA_Q;
534        p.rx_desc_num_drop_q = RX_DESC_NUM_DROP_Q_MLX;
535        p.tx_desc_num = TX_DESC_NUM;
536    }
537    virtual void update_configuration(port_cfg_t * cfg);
538    virtual int configure_rx_filter_rules(CPhyEthIF * _if);
539    virtual void get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats);
540    virtual void clear_extended_stats(CPhyEthIF * _if);
541    virtual void reset_rx_stats(CPhyEthIF * _if, uint32_t *stats, int min, int len);
542    virtual int get_rx_stats(CPhyEthIF * _if, uint32_t *pkts, uint32_t *prev_pkts, uint32_t *bytes, uint32_t *prev_bytes, int min, int max);
543    virtual int dump_fdir_global_stats(CPhyEthIF * _if, FILE *fd);
544    virtual int get_stat_counters_num() {return MAX_FLOW_STATS;}
545    virtual int get_rx_stat_capabilities() {
546        return TrexPlatformApi::IF_STAT_IPV4_ID | TrexPlatformApi::IF_STAT_PAYLOAD;
547    }
548    virtual int wait_for_stable_link();
549    // disabling flow control on 40G using DPDK API causes the interface to malfunction
550    virtual bool flow_control_disable_supported(){return false;}
551    virtual CFlowStatParser *get_flow_stat_parser();
552    virtual int set_rcv_all(CPhyEthIF * _if, bool set_on);
553
554private:
555    virtual void add_del_rules(enum rte_filter_op op, uint8_t port_id, uint16_t type, uint16_t ip_id, uint8_t l4_proto
556                               , int queue);
557    virtual int add_del_rx_filter_rules(CPhyEthIF * _if, bool set_on);
558};
559
560typedef CTRexExtendedDriverBase * (*create_object_t) (void);
561
562
563class CTRexExtendedDriverRec {
564public:
565    std::string         m_driver_name;
566    create_object_t     m_constructor;
567};
568
569class CTRexExtendedDriverDb {
570public:
571
572    const std::string & get_driver_name() {
573        return m_driver_name;
574    }
575
576    bool is_driver_exists(std::string name);
577
578
579
580    void set_driver_name(std::string name){
581        m_driver_was_set=true;
582        m_driver_name=name;
583        printf(" set driver name %s \n",name.c_str());
584        m_drv=create_driver(m_driver_name);
585        assert(m_drv);
586    }
587
588    CTRexExtendedDriverBase * get_drv(){
589        if (!m_driver_was_set) {
590            printf(" ERROR too early to use this object !\n");
591            printf(" need to set the right driver \n");
592            assert(0);
593        }
594        assert(m_drv);
595        return (m_drv);
596    }
597
598public:
599
600    static CTRexExtendedDriverDb * Ins();
601
602private:
603    CTRexExtendedDriverBase * create_driver(std::string name);
604
605    CTRexExtendedDriverDb(){
606        register_driver(std::string("net_ixgbe"),CTRexExtendedDriverBase10G::create);
607        register_driver(std::string("net_e1000_igb"),CTRexExtendedDriverBase1G::create);
608        register_driver(std::string("net_i40e"),CTRexExtendedDriverBase40G::create);
609        register_driver(std::string("net_enic"),CTRexExtendedDriverBaseVIC::create);
610        register_driver(std::string("net_mlx5"),CTRexExtendedDriverBaseMlnx5G::create);
611
612        /* virtual devices */
613        register_driver(std::string("net_e1000_em"), CTRexExtendedDriverBaseE1000::create);
614        register_driver(std::string("net_vmxnet3"), CTRexExtendedDriverVmxnet3::create);
615        register_driver(std::string("net_virtio"), CTRexExtendedDriverVirtio::create);
616        register_driver(std::string("net_i40e_vf"), CTRexExtendedDriverI40evf::create);
617        register_driver(std::string("net_ixgbe_vf"), CTRexExtendedDriverIxgbevf::create);
618
619        m_driver_was_set=false;
620        m_drv=0;
621        m_driver_name="";
622    }
623    void register_driver(std::string name,create_object_t func);
624    static CTRexExtendedDriverDb * m_ins;
625    bool        m_driver_was_set;
626    std::string m_driver_name;
627    CTRexExtendedDriverBase * m_drv;
628    std::vector <CTRexExtendedDriverRec*>     m_list;
629
630};
631
632CTRexExtendedDriverDb * CTRexExtendedDriverDb::m_ins;
633
634
635void CTRexExtendedDriverDb::register_driver(std::string name,
636                                            create_object_t func){
637    CTRexExtendedDriverRec * rec;
638    rec = new CTRexExtendedDriverRec();
639    rec->m_driver_name=name;
640    rec->m_constructor=func;
641    m_list.push_back(rec);
642}
643
644
645bool CTRexExtendedDriverDb::is_driver_exists(std::string name){
646    int i;
647    for (i=0; i<(int)m_list.size(); i++) {
648        if (m_list[i]->m_driver_name == name) {
649            return (true);
650        }
651    }
652    return (false);
653}
654
655
656CTRexExtendedDriverBase * CTRexExtendedDriverDb::create_driver(std::string name){
657    int i;
658    for (i=0; i<(int)m_list.size(); i++) {
659        if (m_list[i]->m_driver_name == name) {
660            return ( m_list[i]->m_constructor() );
661        }
662    }
663    return( (CTRexExtendedDriverBase *)0);
664}
665
666
667
668CTRexExtendedDriverDb * CTRexExtendedDriverDb::Ins(){
669    if (!m_ins) {
670        m_ins = new CTRexExtendedDriverDb();
671    }
672    return (m_ins);
673}
674
675static CTRexExtendedDriverBase *  get_ex_drv(){
676
677    return ( CTRexExtendedDriverDb::Ins()->get_drv());
678}
679
680static inline int get_min_sample_rate(void){
681    return ( get_ex_drv()->get_min_sample_rate());
682}
683
684// cores =0==1,1*2,2,3,4,5,6
685// An enum for all the option types
686enum { OPT_HELP,
687       OPT_MODE_BATCH,
688       OPT_MODE_INTERACTIVE,
689       OPT_NODE_DUMP,
690       OPT_DUMP_INTERFACES,
691       OPT_UT,
692       OPT_CORES,
693       OPT_SINGLE_CORE,
694       OPT_FLIP_CLIENT_SERVER,
695       OPT_FLOW_FLIP_CLIENT_SERVER,
696       OPT_FLOW_FLIP_CLIENT_SERVER_SIDE,
697       OPT_RATE_MULT,
698       OPT_DURATION,
699       OPT_PLATFORM_FACTOR,
700       OPT_PUB_DISABLE,
701       OPT_LIMT_NUM_OF_PORTS,
702       OPT_PLAT_CFG_FILE,
703       OPT_MBUF_FACTOR,
704       OPT_LATENCY,
705       OPT_NO_CLEAN_FLOW_CLOSE,
706       OPT_LATENCY_MASK,
707       OPT_ONLY_LATENCY,
708       OPT_LATENCY_PREVIEW ,
709       OPT_WAIT_BEFORE_TRAFFIC,
710       OPT_PCAP,
711       OPT_RX_CHECK,
712       OPT_IO_MODE,
713       OPT_IPV6,
714       OPT_LEARN,
715       OPT_LEARN_MODE,
716       OPT_LEARN_VERIFY,
717       OPT_L_PKT_MODE,
718       OPT_NO_FLOW_CONTROL,
719       OPT_NO_HW_FLOW_STAT,
720       OPT_X710_RESET_THRESHOLD,
721       OPT_VLAN,
722       OPT_RX_CHECK_HOPS,
723       OPT_CLIENT_CFG_FILE,
724       OPT_NO_KEYBOARD_INPUT,
725       OPT_VIRT_ONE_TX_RX_QUEUE,
726       OPT_PREFIX,
727       OPT_SEND_DEBUG_PKT,
728       OPT_NO_WATCHDOG,
729       OPT_ALLOW_COREDUMP,
730       OPT_CHECKSUM_OFFLOAD,
731       OPT_CLOSE,
732       OPT_ARP_REF_PER,
733       OPT_NO_OFED_CHECK,
734       OPT_NO_SCAPY_SERVER,
735       OPT_ACTIVE_FLOW,
736       OPT_RT,
737       OPT_MLX4_SO,
738       OPT_MLX5_SO
739};
740
741/* these are the argument types:
742   SO_NONE --    no argument needed
743   SO_REQ_SEP -- single required argument
744   SO_MULTI --   multiple arguments needed
745*/
746static CSimpleOpt::SOption parser_options[] =
747    {
748        { OPT_HELP,                   "-?",                SO_NONE    },
749        { OPT_HELP,                   "-h",                SO_NONE    },
750        { OPT_HELP,                   "--help",            SO_NONE    },
751        { OPT_UT,                     "--ut",              SO_NONE    },
752        { OPT_MODE_BATCH,             "-f",                SO_REQ_SEP },
753        { OPT_MODE_INTERACTIVE,       "-i",                SO_NONE    },
754        { OPT_PLAT_CFG_FILE,          "--cfg",             SO_REQ_SEP },
755        { OPT_SINGLE_CORE,            "-s",                SO_NONE    },
756        { OPT_FLIP_CLIENT_SERVER,     "--flip",            SO_NONE    },
757        { OPT_FLOW_FLIP_CLIENT_SERVER,"-p",                SO_NONE    },
758        { OPT_FLOW_FLIP_CLIENT_SERVER_SIDE, "-e",          SO_NONE    },
759        { OPT_NO_CLEAN_FLOW_CLOSE,    "--nc",              SO_NONE    },
760        { OPT_LIMT_NUM_OF_PORTS,      "--limit-ports",     SO_REQ_SEP },
761        { OPT_CORES,                  "-c",                SO_REQ_SEP },
762        { OPT_NODE_DUMP,              "-v",                SO_REQ_SEP },
763        { OPT_DUMP_INTERFACES,        "--dump-interfaces", SO_MULTI   },
764        { OPT_LATENCY,                "-l",                SO_REQ_SEP },
765        { OPT_DURATION,               "-d",                SO_REQ_SEP },
766        { OPT_PLATFORM_FACTOR,        "-pm",               SO_REQ_SEP },
767        { OPT_PUB_DISABLE,            "-pubd",             SO_NONE    },
768        { OPT_RATE_MULT,              "-m",                SO_REQ_SEP },
769        { OPT_LATENCY_MASK,           "--lm",              SO_REQ_SEP },
770        { OPT_ONLY_LATENCY,           "--lo",              SO_NONE    },
771        { OPT_LATENCY_PREVIEW,        "-k",                SO_REQ_SEP },
772        { OPT_WAIT_BEFORE_TRAFFIC,    "-w",                SO_REQ_SEP },
773        { OPT_PCAP,                   "--pcap",            SO_NONE    },
774        { OPT_RX_CHECK,               "--rx-check",        SO_REQ_SEP },
775        { OPT_IO_MODE,                "--iom",             SO_REQ_SEP },
776        { OPT_RX_CHECK_HOPS,          "--hops",            SO_REQ_SEP },
777        { OPT_IPV6,                   "--ipv6",            SO_NONE    },
778        { OPT_LEARN,                  "--learn",           SO_NONE    },
779        { OPT_LEARN_MODE,             "--learn-mode",      SO_REQ_SEP },
780        { OPT_LEARN_VERIFY,           "--learn-verify",    SO_NONE    },
781        { OPT_L_PKT_MODE,             "--l-pkt-mode",      SO_REQ_SEP },
782        { OPT_NO_FLOW_CONTROL,        "--no-flow-control-change", SO_NONE },
783        { OPT_NO_HW_FLOW_STAT,        "--no-hw-flow-stat", SO_NONE },
784        { OPT_X710_RESET_THRESHOLD,   "--x710-reset-threshold", SO_REQ_SEP },
785        { OPT_VLAN,                   "--vlan",            SO_NONE    },
786        { OPT_CLIENT_CFG_FILE,        "--client_cfg",      SO_REQ_SEP },
787        { OPT_CLIENT_CFG_FILE,        "--client-cfg",      SO_REQ_SEP },
788        { OPT_NO_KEYBOARD_INPUT,      "--no-key",          SO_NONE    },
789        { OPT_VIRT_ONE_TX_RX_QUEUE,   "--software",        SO_NONE    },
790        { OPT_PREFIX,                 "--prefix",          SO_REQ_SEP },
791        { OPT_SEND_DEBUG_PKT,         "--send-debug-pkt",  SO_REQ_SEP },
792        { OPT_MBUF_FACTOR,            "--mbuf-factor",     SO_REQ_SEP },
793        { OPT_NO_WATCHDOG,            "--no-watchdog",     SO_NONE    },
794        { OPT_ALLOW_COREDUMP,         "--allow-coredump",  SO_NONE    },
795        { OPT_CHECKSUM_OFFLOAD,       "--checksum-offload", SO_NONE   },
796        { OPT_ACTIVE_FLOW,            "--active-flows",   SO_REQ_SEP  },
797        { OPT_MLX5_SO,                "--mlx5-so", SO_NONE    },
798        { OPT_MLX4_SO,                "--mlx4-so", SO_NONE    },
799        { OPT_CLOSE,                  "--close-at-end",    SO_NONE    },
800        { OPT_ARP_REF_PER,            "--arp-refresh-period", SO_REQ_SEP },
801        { OPT_NO_OFED_CHECK,          "--no-ofed-check",   SO_NONE    },
802        { OPT_NO_SCAPY_SERVER,        "--no-scapy-server", SO_NONE    },
803        { OPT_RT,                     "--rt",              SO_NONE    },
804        SO_END_OF_OPTIONS
805    };
806
807static int usage(){
808
809    printf(" Usage: t-rex-64 [mode] <options>\n\n");
810    printf(" mode is one of:\n");
811    printf("   -f <file> : YAML file with traffic template configuration (Will run TRex in 'stateful' mode)\n");
812    printf("   -i        : Run TRex in 'stateless' mode\n");
813    printf("\n");
814
815    printf(" Available options are:\n");
816    printf(" --active-flows             : An experimental switch to scale up or down the number of active flows.  \n");
817    printf("                              It is not accurate due to the quantization of flow scheduler and in some case does not work. \n");
818    printf("                              Example --active-flows 500000 wil set the ballpark of the active flow to be ~0.5M \n");
819    printf(" --allow-coredump           : Allow creation of core dump \n");
820    printf(" --arp-refresh-period       : Period in seconds between sending of gratuitous ARP for our addresses. Value of 0 means 'never send' \n");
821    printf(" -c <num>>                  : Number of hardware threads to allocate for each port pair. Overrides the 'c' argument from config file \n");
822    printf(" --cfg <file>               : Use file as TRex config file instead of the default /etc/trex_cfg.yaml \n");
823    printf(" --checksum-offload         : Enable IP, TCP and UDP tx checksum offloading, using DPDK. This requires all used interfaces to support this \n");
824    printf(" --client_cfg <file>        : YAML file describing clients configuration \n");
825    printf(" --close-at-end             : Call rte_eth_dev_stop and close at exit. Calling these functions caused link down issues in older versions, \n");
826    printf("                               so we do not call them by default for now. Leaving this as option in case someone thinks it is helpful for him \n");
827    printf("                               This it temporary option. Will be removed in the future \n");
828    printf(" -d                         : Duration of the test in sec (default is 3600). Look also at --nc \n");
829    printf(" -e                         : Like -p but src/dst IP will be chosen according to the port (i.e. on client port send all packets with client src and server dest, and vice versa on server port \n");
830    printf(" --flip                     : Each flow will be sent both from client to server and server to client. This can acheive better port utilization when flow traffic is asymmetric \n");
831    printf(" --hops <hops>              : If rx check is enabled, the hop number can be assigned. See manual for details \n");
832    printf(" --iom  <mode>              : IO mode  for server output [0- silent, 1- normal , 2- short] \n");
833    printf(" --ipv6                     : Work in ipv6 mode \n");
834    printf(" -k  <num>                  : Run 'warm up' traffic for num seconds before starting the test. \n");
835    printf(" -l <rate>                  : In parallel to the test, run latency check, sending packets at rate/sec from each interface \n");
836    printf(" --l-pkt-mode <0-3>         : Set mode for sending latency packets \n");
837    printf("      0 (default)    send SCTP packets  \n");
838    printf("      1              Send ICMP request packets  \n");
839    printf("      2              Send ICMP requests from client side, and response from server side (for working with firewall) \n");
840    printf("      3              Send ICMP requests with sequence ID 0 from both sides \n");
841    printf("    Rate of zero means no latency check \n");
842    printf(" --learn (deprecated). Replaced by --learn-mode. To get older behaviour, use --learn-mode 2 \n");
843    printf(" --learn-mode [1-3]         : Used for working in NAT environments. Dynamically learn the NAT translation done by the DUT \n");
844    printf("      1    In case of TCP flow, use TCP ACK in first SYN to pass NAT translation information. Initial SYN packet must be first packet in the TCP flow \n");
845    printf("           In case of UDP stream, NAT translation information will pass in IP ID field of first packet in flow. This means that this field is changed by TRex\n");
846    printf("      2    Add special IP option to pass NAT translation information to first packet of each flow. Will not work on certain firewalls if they drop packets with IP options \n");
847    printf("      3    Like 1, but without support for sequence number randomization in server->client direction. Performance (flow/second) better than 1 \n");
848    printf(" --learn-verify             : Test the NAT translation mechanism. Should be used when there is no NAT in the setup \n");
849    printf(" --limit-ports              : Limit number of ports used. Must be even number (TRex always uses port pairs) \n");
850    printf(" --lm                       : Hex mask of cores that should send traffic \n");
851    printf("    For example: Value of 0x5 will cause only ports 0 and 2 to send traffic \n");
852    printf(" --lo                       : Only run latency test \n");
853    printf(" -m <num>                   : Rate multiplier.  Multiply basic rate of templates by this number \n");
854    printf(" --mbuf-factor              : Factor for packet memory \n");
855    printf(" --nc                       : If set, will not wait for all flows to be closed, before terminating - see manual for more information \n");
856    printf(" --no-flow-control-change   : By default TRex disables flow-control. If this option is given, it does not touch it \n");
857    printf(" --no-hw-flow-stat          : Relevant only for Intel x710 stateless mode. Do not use HW counters for flow stats\n");
858    printf("                            : Enabling this will support lower traffic rate, but will also report RX byte count statistics. See manual for more details\n");
859    printf(" --no-key                   : Daemon mode, don't get input from keyboard \n");
860    printf(" --no-ofed-check            : Disable the check of OFED version \n");
861    printf(" --no-scapy-server          : Disable Scapy server implicit start at stateless \n");
862    printf(" --no-watchdog              : Disable watchdog \n");
863    printf(" --rt                       : Run TRex DP/RX cores in realtime priority \n");
864    printf(" -p                         : Send all flow packets from the same interface (choosed randomly between client ad server ports) without changing their src/dst IP \n");
865    printf(" -pm                        : Platform factor. If you have splitter in the setup, you can multiply the total results by this factor \n");
866    printf("    e.g --pm 2.0 will multiply all the results bps in this factor \n");
867    printf(" --prefix <nam>             : For running multi TRex instances on the same machine. Each instance should have different name \n");
868    printf(" -pubd                      : Disable monitors publishers \n");
869    printf(" --rx-check  <rate>         : Enable rx check. TRex will sample flows at 1/rate and check order, latency and more \n");
870    printf(" -s                         : Single core. Run only one data path core. For debug \n");
871    printf(" --send-debug-pkt <proto>   : Do not run traffic generator. Just send debug packet and dump receive queues \n");
872    printf("    Supported protocols are 1 for icmp, 2 for UDP, 3 for TCP, 4 for ARP, 5 for 9K UDP \n");
873    printf(" --software                 : Do not configure any hardware rules. In this mode we use 1 core, and one RX queue and one TX queue per port\n");
874    printf(" -v <verbosity level>       : The higher the value, print more debug information \n");
875    printf(" --vlan                     : Relevant only for stateless mode with Intel 82599 10G NIC \n");
876    printf("                              When configuring flow stat and latency per stream rules, assume all streams uses VLAN \n");
877    printf(" -w  <num>                  : Wait num seconds between init of interfaces and sending traffic, default is 1 \n");
878
879    printf("\n");
880    printf(" Examples: ");
881    printf(" basic trex run for 20 sec and multiplier of 10 \n");
882    printf("  t-rex-64 -f cap2/dns.yaml -m 10 -d 20 \n");
883    printf("\n\n");
884    printf(" Copyright (c) 2015-2017 Cisco Systems, Inc.    \n");
885    printf("                                                                  \n");
886    printf(" Licensed under the Apache License, Version 2.0 (the 'License') \n");
887    printf(" you may not use this file except in compliance with the License. \n");
888    printf(" You may obtain a copy of the License at                          \n");
889    printf("                                                                  \n");
890    printf("    http://www.apache.org/licenses/LICENSE-2.0                    \n");
891    printf("                                                                  \n");
892    printf(" Unless required by applicable law or agreed to in writing, software \n");
893    printf(" distributed under the License is distributed on an \"AS IS\" BASIS,   \n");
894    printf(" WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. \n");
895    printf(" See the License for the specific language governing permissions and      \n");
896    printf(" limitations under the License.                                           \n");
897    printf(" \n");
898    printf(" Open Source Components / Libraries \n");
899    printf(" DPDK       (BSD)       \n");
900    printf(" YAML-CPP   (BSD)       \n");
901    printf(" JSONCPP    (MIT)       \n");
902    printf(" \n");
903    printf(" Open Source Binaries \n");
904    printf(" ZMQ        (LGPL v3plus) \n");
905    printf(" \n");
906    printf(" Version : %s   \n",VERSION_BUILD_NUM);
907    printf(" DPDK version : %s   \n",rte_version());
908    printf(" User    : %s   \n",VERSION_USER);
909    printf(" Date    : %s , %s \n",get_build_date(),get_build_time());
910    printf(" Uuid    : %s    \n",VERSION_UIID);
911    printf(" Git SHA : %s    \n",VERSION_GIT_SHA);
912    return (0);
913}
914
915
916int gtest_main(int argc, char **argv) ;
917
918static void parse_err(const std::string &msg) {
919    std::cout << "\nArgument Parsing Error: \n\n" << "*** "<< msg << "\n\n";
920    exit(-1);
921}
922
923static int parse_options(int argc, char *argv[], CParserOption* po, bool first_time ) {
924    CSimpleOpt args(argc, argv, parser_options);
925
926    bool latency_was_set=false;
927    (void)latency_was_set;
928    char ** rgpszArg = NULL;
929    bool opt_vlan_was_set = false;
930
931    int a=0;
932    int node_dump=0;
933
934    po->preview.setFileWrite(true);
935    po->preview.setRealTime(true);
936    uint32_t tmp_data;
937    float tmp_double;
938
939    po->m_run_mode = CParserOption::RUN_MODE_INVALID;
940
941    while ( args.Next() ){
942        if (args.LastError() == SO_SUCCESS) {
943            switch (args.OptionId()) {
944
945            case OPT_UT :
946                parse_err("Supported only in simulation");
947                break;
948
949            case OPT_HELP:
950                usage();
951                return -1;
952
953            case OPT_MODE_BATCH:
954                if (po->m_run_mode != CParserOption::RUN_MODE_INVALID) {
955                    parse_err("Please specify single run mode");
956                }
957                po->m_run_mode = CParserOption::RUN_MODE_BATCH;
958                po->cfg_file = args.OptionArg();
959                break;
960
961            case OPT_MODE_INTERACTIVE:
962                if (po->m_run_mode != CParserOption::RUN_MODE_INVALID) {
963                    parse_err("Please specify single run mode");
964                }
965                po->m_run_mode = CParserOption::RUN_MODE_INTERACTIVE;
966                break;
967
968            case OPT_NO_KEYBOARD_INPUT  :
969                po->preview.set_no_keyboard(true);
970                break;
971
972            case OPT_CLIENT_CFG_FILE :
973                po->client_cfg_file = args.OptionArg();
974                break;
975
976            case OPT_PLAT_CFG_FILE :
977                po->platform_cfg_file = args.OptionArg();
978                break;
979
980            case OPT_SINGLE_CORE :
981                po->preview.setSingleCore(true);
982                break;
983
984            case OPT_IPV6:
985                po->preview.set_ipv6_mode_enable(true);
986                break;
987
988            case OPT_RT:
989                po->preview.set_rt_prio_mode(true);
990                break;
991
992            case OPT_MLX5_SO:
993                po->preview.set_mlx5_so_mode(true);
994                break;
995
996            case OPT_MLX4_SO:
997                po->preview.set_mlx4_so_mode(true);
998                break;
999
1000            case OPT_LEARN :
1001                po->m_learn_mode = CParserOption::LEARN_MODE_IP_OPTION;
1002                break;
1003
1004            case OPT_LEARN_MODE :
1005                sscanf(args.OptionArg(),"%d", &tmp_data);
1006                if (! po->is_valid_opt_val(tmp_data, CParserOption::LEARN_MODE_DISABLED, CParserOption::LEARN_MODE_MAX, "--learn-mode")) {
1007                    exit(-1);
1008                }
1009                po->m_learn_mode = (uint8_t)tmp_data;
1010                break;
1011
1012            case OPT_LEARN_VERIFY :
1013                // must configure learn_mode for learn verify to work. If different learn mode will be given later, it will be set instead.
1014                if (po->m_learn_mode == 0) {
1015                    po->m_learn_mode = CParserOption::LEARN_MODE_IP_OPTION;
1016                }
1017                po->preview.set_learn_and_verify_mode_enable(true);
1018                break;
1019
1020            case OPT_L_PKT_MODE :
1021                sscanf(args.OptionArg(),"%d", &tmp_data);
1022                if (! po->is_valid_opt_val(tmp_data, 0, L_PKT_SUBMODE_0_SEQ, "--l-pkt-mode")) {
1023                    exit(-1);
1024                }
1025                po->m_l_pkt_mode=(uint8_t)tmp_data;
1026                break;
1027
1028            case OPT_NO_HW_FLOW_STAT:
1029                po->preview.set_disable_hw_flow_stat(true);
1030                break;
1031            case OPT_NO_FLOW_CONTROL:
1032                po->preview.set_disable_flow_control_setting(true);
1033                break;
1034            case OPT_X710_RESET_THRESHOLD:
1035                po->set_x710_fdir_reset_threshold(atoi(args.OptionArg()));
1036                break;
1037            case OPT_VLAN:
1038                opt_vlan_was_set = true;
1039                break;
1040            case OPT_LIMT_NUM_OF_PORTS :
1041                po->m_expected_portd =atoi(args.OptionArg());
1042                break;
1043            case  OPT_CORES  :
1044                po->preview.setCores(atoi(args.OptionArg()));
1045                break;
1046            case OPT_FLIP_CLIENT_SERVER :
1047                po->preview.setClientServerFlip(true);
1048                break;
1049            case OPT_NO_CLEAN_FLOW_CLOSE :
1050                po->preview.setNoCleanFlowClose(true);
1051                break;
1052            case OPT_FLOW_FLIP_CLIENT_SERVER :
1053                po->preview.setClientServerFlowFlip(true);
1054                break;
1055            case OPT_FLOW_FLIP_CLIENT_SERVER_SIDE:
1056                po->preview.setClientServerFlowFlipAddr(true);
1057                break;
1058            case OPT_NODE_DUMP:
1059                a=atoi(args.OptionArg());
1060                node_dump=1;
1061                po->preview.setFileWrite(false);
1062                break;
1063            case OPT_DUMP_INTERFACES:
1064                if (first_time) {
1065                    rgpszArg = args.MultiArg(1);
1066                    while (rgpszArg != NULL) {
1067                        po->dump_interfaces.push_back(rgpszArg[0]);
1068                        rgpszArg = args.MultiArg(1);
1069                    }
1070                }
1071                if (po->m_run_mode != CParserOption::RUN_MODE_INVALID) {
1072                    parse_err("Please specify single run mode (-i for stateless, or -f <file> for stateful");
1073                }
1074                po->m_run_mode = CParserOption::RUN_MODE_DUMP_INFO;
1075                break;
1076            case OPT_MBUF_FACTOR:
1077                sscanf(args.OptionArg(),"%f", &po->m_mbuf_factor);
1078                break;
1079            case OPT_RATE_MULT :
1080                sscanf(args.OptionArg(),"%f", &po->m_factor);
1081                break;
1082            case OPT_DURATION :
1083                sscanf(args.OptionArg(),"%f", &po->m_duration);
1084                break;
1085            case OPT_PUB_DISABLE:
1086                po->preview.set_zmq_publish_enable(false);
1087                break;
1088            case OPT_PLATFORM_FACTOR:
1089                sscanf(args.OptionArg(),"%f", &po->m_platform_factor);
1090                break;
1091            case OPT_LATENCY :
1092                latency_was_set=true;
1093                sscanf(args.OptionArg(),"%d", &po->m_latency_rate);
1094                break;
1095            case OPT_LATENCY_MASK :
1096                sscanf(args.OptionArg(),"%x", &po->m_latency_mask);
1097                break;
1098            case OPT_ONLY_LATENCY :
1099                po->preview.setOnlyLatency(true);
1100                break;
1101            case OPT_NO_WATCHDOG :
1102                po->preview.setWDDisable(true);
1103                break;
1104            case OPT_ALLOW_COREDUMP :
1105                po->preview.setCoreDumpEnable(true);
1106                break;
1107            case  OPT_LATENCY_PREVIEW :
1108                sscanf(args.OptionArg(),"%d", &po->m_latency_prev);
1109                break;
1110            case  OPT_WAIT_BEFORE_TRAFFIC :
1111                sscanf(args.OptionArg(),"%d", &po->m_wait_before_traffic);
1112                break;
1113            case OPT_PCAP:
1114                po->preview.set_pcap_mode_enable(true);
1115                break;
1116            case OPT_ACTIVE_FLOW:
1117                sscanf(args.OptionArg(),"%f", &tmp_double);
1118                po->m_active_flows=(uint32_t)tmp_double;
1119                break;
1120            case OPT_RX_CHECK :
1121                sscanf(args.OptionArg(),"%d", &tmp_data);
1122                po->m_rx_check_sample=(uint16_t)tmp_data;
1123                po->preview.set_rx_check_enable(true);
1124                break;
1125            case OPT_RX_CHECK_HOPS :
1126                sscanf(args.OptionArg(),"%d", &tmp_data);
1127                po->m_rx_check_hops = (uint16_t)tmp_data;
1128                break;
1129            case OPT_IO_MODE :
1130                sscanf(args.OptionArg(),"%d", &tmp_data);
1131                po->m_io_mode=(uint16_t)tmp_data;
1132                break;
1133
1134            case OPT_VIRT_ONE_TX_RX_QUEUE:
1135                CGlobalInfo::set_queues_mode(CGlobalInfo::Q_MODE_ONE_QUEUE);
1136                po->preview.setCores(1); // Only one TX core supported in software mode currently
1137                break;
1138
1139            case OPT_PREFIX:
1140                po->prefix = args.OptionArg();
1141                break;
1142
1143            case OPT_SEND_DEBUG_PKT:
1144                sscanf(args.OptionArg(),"%d", &tmp_data);
1145                po->m_debug_pkt_proto = (uint8_t)tmp_data;
1146                break;
1147
1148            case OPT_CHECKSUM_OFFLOAD:
1149                po->preview.setChecksumOffloadEnable(true);
1150                break;
1151
1152            case OPT_CLOSE:
1153                po->preview.setCloseEnable(true);
1154                break;
1155            case  OPT_ARP_REF_PER:
1156                sscanf(args.OptionArg(),"%d", &tmp_data);
1157                po->m_arp_ref_per=(uint16_t)tmp_data;
1158                break;
1159            case OPT_NO_OFED_CHECK:
1160                break;
1161            case OPT_NO_SCAPY_SERVER:
1162                break;
1163
1164            default:
1165                printf("Error: option %s is not handled.\n\n", args.OptionText());
1166                return -1;
1167                break;
1168            } // End of switch
1169        }// End of IF
1170        else {
1171            if (args.LastError() == SO_OPT_INVALID) {
1172                printf("Error: option %s is not recognized.\n\n", args.OptionText());
1173            } else if (args.LastError() == SO_ARG_MISSING) {
1174                printf("Error: option %s is expected to have argument.\n\n", args.OptionText());
1175            }
1176            usage();
1177            return -1;
1178        }
1179    } // End of while
1180
1181
1182    if ((po->m_run_mode ==  CParserOption::RUN_MODE_INVALID) ) {
1183        parse_err("Please provide single run mode. -f <file> for stateful or -i for stateless (interactive)");
1184    }
1185
1186    if (CGlobalInfo::is_learn_mode() && po->preview.get_ipv6_mode_enable()) {
1187        parse_err("--learn mode is not supported with --ipv6, beacuse there is no such thing as NAT66 (ipv6 to ipv6 translation) \n" \
1188                  "If you think it is important, please open a defect or write to TRex mailing list\n");
1189    }
1190
1191    if (po->preview.get_is_rx_check_enable() ||  po->is_latency_enabled() || CGlobalInfo::is_learn_mode()
1192        || (CGlobalInfo::m_options.m_arp_ref_per != 0)
1193        || CGlobalInfo::get_queues_mode() == CGlobalInfo::Q_MODE_ONE_QUEUE
1194        || CGlobalInfo::get_queues_mode() == CGlobalInfo::Q_MODE_RSS) {
1195        po->set_rx_enabled();
1196    }
1197
1198    if ( node_dump ){
1199        po->preview.setVMode(a);
1200    }
1201
1202    /* if we have a platform factor we need to devided by it so we can still work with normalized yaml profile  */
1203    po->m_factor = po->m_factor/po->m_platform_factor;
1204
1205    uint32_t cores=po->preview.getCores();
1206    if ( cores > ((BP_MAX_CORES)/2-1) ) {
1207        fprintf(stderr, " Error: maximum supported core number is: %d \n",((BP_MAX_CORES)/2-1));
1208        return -1;
1209    }
1210
1211
1212    if ( first_time ){
1213        /* only first time read the configuration file */
1214        if ( po->platform_cfg_file.length() >0  ) {
1215            if ( node_dump ){
1216                printf("Using configuration file %s \n",po->platform_cfg_file.c_str());
1217            }
1218            global_platform_cfg_info.load_from_yaml_file(po->platform_cfg_file);
1219            if ( node_dump ){
1220                global_platform_cfg_info.Dump(stdout);
1221            }
1222        }else{
1223            if ( utl_is_file_exists("/etc/trex_cfg.yaml") ){
1224                if ( node_dump ){
1225                    printf("Using configuration file /etc/trex_cfg.yaml \n");
1226                }
1227                global_platform_cfg_info.load_from_yaml_file("/etc/trex_cfg.yaml");
1228                if ( node_dump ){
1229                    global_platform_cfg_info.Dump(stdout);
1230                }
1231            }
1232        }
1233    }
1234
1235    if ( get_is_stateless() ) {
1236        if ( opt_vlan_was_set ) {
1237            // Only purpose of this in stateless is for configuring the 82599 rules correctly
1238            po->preview.set_vlan_mode(CPreviewMode::VLAN_MODE_NORMAL);
1239        }
1240        if (CGlobalInfo::m_options.client_cfg_file != "") {
1241            parse_err("Client config file is not supported with interactive (stateless) mode ");
1242        }
1243        if ( po->m_duration ) {
1244            parse_err("Duration is not supported with interactive (stateless) mode ");
1245        }
1246
1247        if ( po->preview.get_is_rx_check_enable() ) {
1248            parse_err("Rx check is not supported with interactive (stateless) mode ");
1249        }
1250
1251        if  ( (po->is_latency_enabled()) || (po->preview.getOnlyLatency()) ){
1252            parse_err("Latency check is not supported with interactive (stateless) mode ");
1253        }
1254
1255        if ( po->preview.getSingleCore() ){
1256            parse_err("Single core is not supported with interactive (stateless) mode ");
1257        }
1258
1259    } else {
1260        if ( !po->m_duration ) {
1261            po->m_duration = 3600.0;
1262        }
1263        if ( global_platform_cfg_info.m_tw.m_info_exist ){
1264
1265            CTimerWheelYamlInfo *lp=&global_platform_cfg_info.m_tw;
1266            std::string  err;
1267            if (!lp->Verify(err)){
1268                parse_err(err);
1269            }
1270
1271            po->set_tw_bucket_time_in_usec(lp->m_bucket_time_usec);
1272            po->set_tw_buckets(lp->m_buckets);
1273            po->set_tw_levels(lp->m_levels);
1274        }
1275    }
1276
1277    return 0;
1278}
1279
1280static int parse_options_wrapper(int argc, char *argv[], CParserOption* po, bool first_time ) {
1281    // copy, as arg parser sometimes changes the argv
1282    char ** argv_copy = (char **) malloc(sizeof(char *) * argc);
1283    for(int i=0; i<argc; i++) {
1284        argv_copy[i] = strdup(argv[i]);
1285    }
1286    int ret = parse_options(argc, argv_copy, po, first_time);
1287
1288    // free
1289    for(int i=0; i<argc; i++) {
1290        free(argv_copy[i]);
1291    }
1292    free(argv_copy);
1293    return ret;
1294}
1295
1296int main_test(int argc , char * argv[]);
1297
1298
1299#define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
1300#define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
1301#define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */
1302
1303/*
1304 * These default values are optimized for use with the Intel(R) 82599 10 GbE
1305 * Controller and the DPDK ixgbe PMD. Consider using other values for other
1306 * network controllers and/or network drivers.
1307 */
1308#define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */
1309#define TX_HTHRESH 0  /**< Default values of TX host threshold reg. */
1310#define TX_WTHRESH 0  /**< Default values of TX write-back threshold reg. */
1311
1312#define TX_WTHRESH_1G 1  /**< Default values of TX write-back threshold reg. */
1313#define TX_PTHRESH_1G 1 /**< Default values of TX prefetch threshold reg. */
1314
1315
1316struct port_cfg_t {
1317public:
1318    port_cfg_t(){
1319        memset(&m_port_conf,0,sizeof(m_port_conf));
1320        memset(&m_rx_conf,0,sizeof(m_rx_conf));
1321        memset(&m_tx_conf,0,sizeof(m_tx_conf));
1322        memset(&m_rx_drop_conf,0,sizeof(m_rx_drop_conf));
1323
1324        m_rx_conf.rx_thresh.pthresh = RX_PTHRESH;
1325        m_rx_conf.rx_thresh.hthresh = RX_HTHRESH;
1326        m_rx_conf.rx_thresh.wthresh = RX_WTHRESH;
1327        m_rx_conf.rx_free_thresh =32;
1328
1329        m_rx_drop_conf.rx_thresh.pthresh = 0;
1330        m_rx_drop_conf.rx_thresh.hthresh = 0;
1331        m_rx_drop_conf.rx_thresh.wthresh = 0;
1332        m_rx_drop_conf.rx_free_thresh =32;
1333        m_rx_drop_conf.rx_drop_en=1;
1334
1335        m_tx_conf.tx_thresh.pthresh = TX_PTHRESH;
1336        m_tx_conf.tx_thresh.hthresh = TX_HTHRESH;
1337        m_tx_conf.tx_thresh.wthresh = TX_WTHRESH;
1338
1339        m_port_conf.rxmode.jumbo_frame=1;
1340        m_port_conf.rxmode.max_rx_pkt_len =9*1024+22;
1341        m_port_conf.rxmode.hw_strip_crc=1;
1342    }
1343
1344    inline void update_var(void){
1345        get_ex_drv()->update_configuration(this);
1346    }
1347
1348    inline void update_global_config_fdir(void){
1349        get_ex_drv()->update_global_config_fdir(this);
1350    }
1351
1352    struct rte_eth_conf     m_port_conf;
1353    struct rte_eth_rxconf   m_rx_conf;
1354    struct rte_eth_rxconf   m_rx_drop_conf;
1355    struct rte_eth_txconf   m_tx_conf;
1356};
1357
1358
1359/* this object is per core / per port / per queue
1360   each core will have 2 ports to send to
1361
1362
1363   port0                                port1
1364
1365   0,1,2,3,..15 out queue ( per core )       0,1,2,3,..15 out queue ( per core )
1366
1367*/
1368
1369
1370typedef struct cnt_name_ {
1371    uint32_t offset;
1372    char * name;
1373}cnt_name_t ;
1374
1375#define MY_REG(a) {a,(char *)#a}
1376
1377void CPhyEthIFStats::Clear() {
1378    ipackets = 0;
1379    ibytes = 0;
1380    f_ipackets = 0;
1381    f_ibytes = 0;
1382    opackets = 0;
1383    obytes = 0;
1384    ierrors = 0;
1385    oerrors = 0;
1386    imcasts = 0;
1387    rx_nombuf = 0;
1388    memset(&m_prev_stats, 0, sizeof(m_prev_stats));
1389    memset(m_rx_per_flow_pkts, 0, sizeof(m_rx_per_flow_pkts));
1390    memset(m_rx_per_flow_bytes, 0, sizeof(m_rx_per_flow_bytes));
1391}
1392
1393// dump all counters (even ones that equal 0)
1394void CPhyEthIFStats::DumpAll(FILE *fd) {
1395#define DP_A4(f) printf(" %-40s : %llu \n",#f, (unsigned long long)f)
1396#define DP_A(f) if (f) printf(" %-40s : %llu \n",#f, (unsigned long long)f)
1397    DP_A4(opackets);
1398    DP_A4(obytes);
1399    DP_A4(ipackets);
1400    DP_A4(ibytes);
1401    DP_A(ierrors);
1402    DP_A(oerrors);
1403}
1404
1405// dump all non zero counters
1406void CPhyEthIFStats::Dump(FILE *fd) {
1407    DP_A(opackets);
1408    DP_A(obytes);
1409    DP_A(f_ipackets);
1410    DP_A(f_ibytes);
1411    DP_A(ipackets);
1412    DP_A(ibytes);
1413    DP_A(ierrors);
1414    DP_A(oerrors);
1415    DP_A(imcasts);
1416    DP_A(rx_nombuf);
1417}
1418
1419void CPhyEthIgnoreStats::dump(FILE *fd) {
1420    DP_A4(opackets);
1421    DP_A4(obytes);
1422    DP_A4(ipackets);
1423    DP_A4(ibytes);
1424    DP_A4(m_tx_arp);
1425    DP_A4(m_rx_arp);
1426}
1427
1428// Clear the RX queue of an interface, dropping all packets
1429void CPhyEthIF::flush_rx_queue(void){
1430
1431    rte_mbuf_t * rx_pkts[32];
1432    int j=0;
1433    uint16_t cnt=0;
1434
1435    while (true) {
1436        j++;
1437        cnt = rx_burst(m_rx_queue,rx_pkts,32);
1438        if ( cnt ) {
1439            int i;
1440            for (i=0; i<(int)cnt;i++) {
1441                rte_mbuf_t * m=rx_pkts[i];
1442                /*printf("rx--\n");
1443                  rte_pktmbuf_dump(stdout,m, rte_pktmbuf_pkt_len(m));*/
1444                rte_pktmbuf_free(m);
1445            }
1446        }
1447        if ( ((cnt==0) && (j>10)) || (j>15) ) {
1448            break;
1449        }
1450    }
1451    if (cnt>0) {
1452        printf(" Warning can't flush rx-queue for port %d \n",(int)get_port_id());
1453    }
1454}
1455
1456
1457void CPhyEthIF::dump_stats_extended(FILE *fd){
1458
1459    cnt_name_t reg[]={
1460        MY_REG(IXGBE_GPTC), /* total packet */
1461        MY_REG(IXGBE_GOTCL), /* total bytes */
1462        MY_REG(IXGBE_GOTCH),
1463
1464        MY_REG(IXGBE_GPRC),
1465        MY_REG(IXGBE_GORCL),
1466        MY_REG(IXGBE_GORCH),
1467
1468
1469
1470        MY_REG(IXGBE_RXNFGPC),
1471        MY_REG(IXGBE_RXNFGBCL),
1472        MY_REG(IXGBE_RXNFGBCH),
1473        MY_REG(IXGBE_RXDGPC  ),
1474        MY_REG(IXGBE_RXDGBCL ),
1475        MY_REG(IXGBE_RXDGBCH  ),
1476        MY_REG(IXGBE_RXDDGPC ),
1477        MY_REG(IXGBE_RXDDGBCL ),
1478        MY_REG(IXGBE_RXDDGBCH  ),
1479        MY_REG(IXGBE_RXLPBKGPC ),
1480        MY_REG(IXGBE_RXLPBKGBCL),
1481        MY_REG(IXGBE_RXLPBKGBCH ),
1482        MY_REG(IXGBE_RXDLPBKGPC ),
1483        MY_REG(IXGBE_RXDLPBKGBCL),
1484        MY_REG(IXGBE_RXDLPBKGBCH ),
1485        MY_REG(IXGBE_TXDGPC      ),
1486        MY_REG(IXGBE_TXDGBCL     ),
1487        MY_REG(IXGBE_TXDGBCH     ),
1488        MY_REG(IXGBE_FDIRUSTAT ),
1489        MY_REG(IXGBE_FDIRFSTAT ),
1490        MY_REG(IXGBE_FDIRMATCH ),
1491        MY_REG(IXGBE_FDIRMISS )
1492
1493    };
1494    fprintf (fd," extended counters \n");
1495    int i;
1496    for (i=0; i<sizeof(reg)/sizeof(reg[0]); i++) {
1497        cnt_name_t *lp=&reg[i];
1498        uint32_t c=pci_reg_read(lp->offset);
1499        // xl710 bug. Counter values are -559038737 when they should be 0
1500        if (c && c != -559038737 ) {
1501            fprintf (fd," %s  : %d \n",lp->name,c);
1502        }
1503    }
1504}
1505
1506int CPhyEthIF::get_rx_stat_capabilities() {
1507    return get_ex_drv()->get_rx_stat_capabilities();
1508}
1509
1510
1511
1512void CPhyEthIF::configure(uint16_t nb_rx_queue,
1513                          uint16_t nb_tx_queue,
1514                          const struct rte_eth_conf *eth_conf){
1515    int ret;
1516    ret = rte_eth_dev_configure(m_port_id,
1517                                nb_rx_queue,
1518                                nb_tx_queue,
1519                                eth_conf);
1520
1521    if (ret < 0)
1522        rte_exit(EXIT_FAILURE, "Cannot configure device: "
1523                 "err=%d, port=%u\n",
1524                 ret, m_port_id);
1525
1526    /* get device info */
1527    rte_eth_dev_info_get(m_port_id, &m_dev_info);
1528
1529    if (CGlobalInfo::m_options.preview.getChecksumOffloadEnable()) {
1530        /* check if the device supports TCP and UDP checksum offloading */
1531        if ((m_dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) == 0) {
1532            rte_exit(EXIT_FAILURE, "Device does not support UDP checksum offload: "
1533                     "port=%u\n",
1534                     m_port_id);
1535        }
1536        if ((m_dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) == 0) {
1537            rte_exit(EXIT_FAILURE, "Device does not support TCP checksum offload: "
1538                     "port=%u\n",
1539                     m_port_id);
1540        }
1541    }
1542}
1543
1544/*
1545  rx-queue 0 is the default queue. All traffic not going to queue 1
1546  will be dropped as queue 0 is disabled
1547  rx-queue 1 - Latency measurement packets and other features that need software processing will go here.
1548*/
1549void CPhyEthIF::configure_rx_duplicate_rules(){
1550    if ( get_is_rx_filter_enable() ){
1551        get_ex_drv()->configure_rx_filter_rules(this);
1552    }
1553}
1554
1555int CPhyEthIF::set_port_rcv_all(bool is_rcv) {
1556    // In these modes we are always receiving all packets anyway.
1557    switch (CGlobalInfo::get_queues_mode()) {
1558    case CGlobalInfo::Q_MODE_ONE_QUEUE:
1559        // In this mode we are always receiving all packets anyway.
1560        break;
1561    case CGlobalInfo::Q_MODE_RSS:
1562        //todo: need to send announcment to all tx cores
1563        //todo: need new function set_all_ports rcv all, to be able to send less tx messages
1564        break;
1565    default:
1566        get_ex_drv()->set_rcv_all(this, is_rcv);
1567        break;
1568    }
1569
1570    return 0;
1571}
1572
1573void CPhyEthIF::stop_rx_drop_queue() {
1574    // In debug mode, we want to see all packets. Don't want to disable any queue.
1575    if ( (CGlobalInfo::get_queues_mode() != CGlobalInfo::Q_MODE_NORMAL)
1576         || (CGlobalInfo::m_options.m_debug_pkt_proto != 0)) {
1577        return;
1578    }
1579    if ( CGlobalInfo::m_options.is_rx_enabled() ) {
1580        if ( (!get_ex_drv()->is_hardware_support_drop_queue())  ) {
1581            printf(" ERROR latency feature is not supported with current hardware  \n");
1582            exit(1);
1583        }
1584    }
1585    // OK to only stop MAIN_DPDK_DROP_Q here. The only driver in which there are
1586    // more than 1 drop q is Mellanox. stop_queue does not work in this case anyway.
1587    get_ex_drv()->stop_queue(this, MAIN_DPDK_DROP_Q);
1588}
1589
1590
1591void CPhyEthIF::rx_queue_setup(uint16_t rx_queue_id,
1592                               uint16_t nb_rx_desc,
1593                               unsigned int socket_id,
1594                               const struct rte_eth_rxconf *rx_conf,
1595                               struct rte_mempool *mb_pool){
1596
1597    int ret = rte_eth_rx_queue_setup(m_port_id , rx_queue_id,
1598                                     nb_rx_desc,
1599                                     socket_id,
1600                                     rx_conf,
1601                                     mb_pool);
1602    if (ret < 0)
1603        rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup: "
1604                 "err=%d, port=%u\n",
1605                 ret, m_port_id);
1606}
1607
1608
1609
1610void CPhyEthIF::tx_queue_setup(uint16_t tx_queue_id,
1611                               uint16_t nb_tx_desc,
1612                               unsigned int socket_id,
1613                               const struct rte_eth_txconf *tx_conf){
1614
1615    int ret = rte_eth_tx_queue_setup( m_port_id,
1616                                      tx_queue_id,
1617                                      nb_tx_desc,
1618                                      socket_id,
1619                                      tx_conf);
1620    if (ret < 0)
1621        rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: "
1622                 "err=%d, port=%u queue=%u\n",
1623                 ret, m_port_id, tx_queue_id);
1624
1625}
1626
1627void CPhyEthIF::stop(){
1628    if (CGlobalInfo::m_options.preview.getCloseEnable()) {
1629        rte_eth_dev_stop(m_port_id);
1630        rte_eth_dev_close(m_port_id);
1631    }
1632}
1633
1634void CPhyEthIF::start(){
1635
1636    get_ex_drv()->clear_extended_stats(this);
1637
1638    int ret;
1639
1640    m_bw_tx.reset();
1641    m_bw_rx.reset();
1642
1643    m_stats.Clear();
1644    int i;
1645    for (i=0;i<10; i++ ) {
1646        ret = rte_eth_dev_start(m_port_id);
1647        if (ret==0) {
1648            return;
1649        }
1650        delay(1000);
1651    }
1652    if (ret < 0)
1653        rte_exit(EXIT_FAILURE, "rte_eth_dev_start: "
1654                 "err=%d, port=%u\n",
1655                 ret, m_port_id);
1656
1657}
1658
1659// Disabling flow control on interface
1660void CPhyEthIF::disable_flow_control(){
1661    int ret;
1662    // see trex-64 issue with loopback on the same NIC
1663    struct rte_eth_fc_conf fc_conf;
1664    memset(&fc_conf,0,sizeof(fc_conf));
1665    fc_conf.mode=RTE_FC_NONE;
1666    fc_conf.autoneg=1;
1667    fc_conf.pause_time=100;
1668    int i;
1669    for (i=0; i<5; i++) {
1670        ret=rte_eth_dev_flow_ctrl_set(m_port_id,&fc_conf);
1671        if (ret==0) {
1672            break;
1673        }
1674        delay(1000);
1675    }
1676    if (ret < 0)
1677        rte_exit(EXIT_FAILURE, "rte_eth_dev_flow_ctrl_set: "
1678                 "err=%d, port=%u\n probably link is down. Please check your link activity, or skip flow-control disabling, using: --no-flow-control-change option\n",
1679                 ret, m_port_id);
1680}
1681
1682/*
1683Get user friendly devices description from saved env. var
1684Changes certain attributes based on description
1685*/
1686void DpdkTRexPortAttr::update_description(){
1687    struct rte_pci_addr pci_addr;
1688    char pci[16];
1689    char * envvar;
1690    std::string pci_envvar_name;
1691    pci_addr = rte_eth_devices[m_port_id].device->devargs->pci.addr;
1692    snprintf(pci, sizeof(pci), "%04x:%02x:%02x.%d", pci_addr.domain, pci_addr.bus, pci_addr.devid, pci_addr.function);
1693    intf_info_st.pci_addr = pci;
1694    pci_envvar_name = "pci" + intf_info_st.pci_addr;
1695    std::replace(pci_envvar_name.begin(), pci_envvar_name.end(), ':', '_');
1696    std::replace(pci_envvar_name.begin(), pci_envvar_name.end(), '.', '_');
1697    envvar = std::getenv(pci_envvar_name.c_str());
1698    if (envvar) {
1699        intf_info_st.description = envvar;
1700    } else {
1701        intf_info_st.description = "Unknown";
1702    }
1703    if (intf_info_st.description.find("82599ES") != std::string::npos) { // works for 82599EB etc. DPDK does not distinguish them
1704        flag_is_link_change_supported = false;
1705    }
1706    if (intf_info_st.description.find("82545EM") != std::string::npos) { // in virtual E1000, DPDK claims fc is supported, but it's not
1707        flag_is_fc_change_supported = false;
1708        flag_is_led_change_supported = false;
1709    }
1710    if ( CGlobalInfo::m_options.preview.getVMode() > 0){
1711        printf("port %d desc: %s\n", m_port_id, intf_info_st.description.c_str());
1712    }
1713}
1714
1715int DpdkTRexPortAttr::set_led(bool on){
1716    if (on) {
1717        return rte_eth_led_on(m_port_id);
1718    }else{
1719        return rte_eth_led_off(m_port_id);
1720    }
1721}
1722
1723int DpdkTRexPortAttr::get_flow_ctrl(int &mode) {
1724    int ret = rte_eth_dev_flow_ctrl_get(m_port_id, &fc_conf_tmp);
1725    if (ret) {
1726        mode = -1;
1727        return ret;
1728    }
1729    mode = (int) fc_conf_tmp.mode;
1730    return 0;
1731}
1732
1733int DpdkTRexPortAttr::set_flow_ctrl(int mode) {
1734    if (!flag_is_fc_change_supported) {
1735        return -ENOTSUP;
1736    }
1737    int ret = rte_eth_dev_flow_ctrl_get(m_port_id, &fc_conf_tmp);
1738    if (ret) {
1739        return ret;
1740    }
1741    fc_conf_tmp.mode = (enum rte_eth_fc_mode) mode;
1742    return rte_eth_dev_flow_ctrl_set(m_port_id, &fc_conf_tmp);
1743}
1744
1745void DpdkTRexPortAttr::reset_xstats() {
1746    rte_eth_xstats_reset(m_port_id);
1747}
1748
1749int DpdkTRexPortAttr::get_xstats_values(xstats_values_t &xstats_values) {
1750    int size = rte_eth_xstats_get(m_port_id, NULL, 0);
1751    if (size < 0) {
1752        return size;
1753    }
1754    xstats_values_tmp.resize(size);
1755    xstats_values.resize(size);
1756    size = rte_eth_xstats_get(m_port_id, xstats_values_tmp.data(), size);
1757    if (size < 0) {
1758        return size;
1759    }
1760    for (int i=0; i<size; i++) {
1761        xstats_values[xstats_values_tmp[i].id] = xstats_values_tmp[i].value;
1762    }
1763    return 0;
1764}
1765
1766int DpdkTRexPortAttr::get_xstats_names(xstats_names_t &xstats_names){
1767    int size = rte_eth_xstats_get_names(m_port_id, NULL, 0);
1768    if (size < 0) {
1769        return size;
1770    }
1771    xstats_names_tmp.resize(size);
1772    xstats_names.resize(size);
1773    size = rte_eth_xstats_get_names(m_port_id, xstats_names_tmp.data(), size);
1774    if (size < 0) {
1775        return size;
1776    }
1777    for (int i=0; i<size; i++) {
1778        xstats_names[i] = xstats_names_tmp[i].name;
1779    }
1780    return 0;
1781}
1782
1783void DpdkTRexPortAttr::dump_link(FILE *fd){
1784    fprintf(fd,"port : %d \n",(int)m_port_id);
1785    fprintf(fd,"------------\n");
1786
1787    fprintf(fd,"link         : ");
1788    if (m_link.link_status) {
1789        fprintf(fd," link : Link Up - speed %u Mbps - %s\n",
1790                (unsigned) m_link.link_speed,
1791                (m_link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1792                ("full-duplex") : ("half-duplex\n"));
1793    } else {
1794        fprintf(fd," Link Down\n");
1795    }
1796    fprintf(fd,"promiscuous  : %d \n",get_promiscuous());
1797}
1798
1799void DpdkTRexPortAttr::update_device_info(){
1800    rte_eth_dev_info_get(m_port_id, &dev_info);
1801}
1802
1803void DpdkTRexPortAttr::get_supported_speeds(supp_speeds_t &supp_speeds){
1804    uint32_t speed_capa = dev_info.speed_capa;
1805    if (speed_capa & ETH_LINK_SPEED_1G)
1806        supp_speeds.push_back(ETH_SPEED_NUM_1G);
1807    if (speed_capa & ETH_LINK_SPEED_10G)
1808        supp_speeds.push_back(ETH_SPEED_NUM_10G);
1809    if (speed_capa & ETH_LINK_SPEED_40G)
1810        supp_speeds.push_back(ETH_SPEED_NUM_40G);
1811    if (speed_capa & ETH_LINK_SPEED_100G)
1812        supp_speeds.push_back(ETH_SPEED_NUM_100G);
1813}
1814
1815void DpdkTRexPortAttr::update_link_status(){
1816    rte_eth_link_get(m_port_id, &m_link);
1817}
1818
1819bool DpdkTRexPortAttr::update_link_status_nowait(){
1820    rte_eth_link new_link;
1821    bool changed = false;
1822    rte_eth_link_get_nowait(m_port_id, &new_link);
1823
1824    if (new_link.link_speed != m_link.link_speed ||
1825                new_link.link_duplex != m_link.link_duplex ||
1826                    new_link.link_autoneg != m_link.link_autoneg ||
1827                        new_link.link_status != m_link.link_status) {
1828        changed = true;
1829
1830        /* in case of link status change - notify the dest object */
1831        if (new_link.link_status != m_link.link_status) {
1832            on_link_down();
1833        }
1834    }
1835
1836    m_link = new_link;
1837    return changed;
1838}
1839
1840int DpdkTRexPortAttr::add_mac(char * mac){
1841    struct ether_addr mac_addr;
1842    for (int i=0; i<6;i++) {
1843        mac_addr.addr_bytes[i] =mac[i];
1844    }
1845
1846    if ( get_ex_drv()->hardware_support_mac_change() ) {
1847        if ( rte_eth_dev_mac_addr_add(m_port_id, &mac_addr,0) != 0) {
1848            printf("Failed setting MAC for port %d \n", m_port_id);
1849            exit(-1);
1850        }
1851    }
1852
1853    return 0;
1854}
1855
1856int DpdkTRexPortAttr::set_promiscuous(bool enable){
1857    if (enable) {
1858        rte_eth_promiscuous_enable(m_port_id);
1859    }else{
1860        rte_eth_promiscuous_disable(m_port_id);
1861    }
1862    return 0;
1863}
1864
1865int DpdkTRexPortAttr::set_multicast(bool enable){
1866    if (enable) {
1867        rte_eth_allmulticast_enable(m_port_id);
1868    }else{
1869        rte_eth_allmulticast_disable(m_port_id);
1870    }
1871    return 0;
1872}
1873
1874int DpdkTRexPortAttr::set_link_up(bool up){
1875    if (up) {
1876        return rte_eth_dev_set_link_up(m_port_id);
1877    }else{
1878        return rte_eth_dev_set_link_down(m_port_id);
1879    }
1880}
1881
1882bool DpdkTRexPortAttr::get_promiscuous(){
1883    int ret=rte_eth_promiscuous_get(m_port_id);
1884    if (ret<0) {
1885        rte_exit(EXIT_FAILURE, "rte_eth_promiscuous_get: "
1886                 "err=%d, port=%u\n",
1887                 ret, m_port_id);
1888
1889    }
1890    return ( ret?true:false);
1891}
1892
1893bool DpdkTRexPortAttr::get_multicast(){
1894    int ret=rte_eth_allmulticast_get(m_port_id);
1895    if (ret<0) {
1896        rte_exit(EXIT_FAILURE, "rte_eth_allmulticast_get: "
1897                 "err=%d, port=%u\n",
1898                 ret, m_port_id);
1899
1900    }
1901    return ( ret?true:false);
1902}
1903
1904
1905void DpdkTRexPortAttr::get_hw_src_mac(struct ether_addr *mac_addr){
1906    rte_eth_macaddr_get(m_port_id , mac_addr);
1907}
1908
1909int CPhyEthIF::dump_fdir_global_stats(FILE *fd) {
1910    return get_ex_drv()->dump_fdir_global_stats(this, fd);
1911}
1912
1913void dump_hw_state(FILE *fd,struct ixgbe_hw_stats *hs ){
1914
1915#define DP_A1(f) if (hs->f) fprintf(fd," %-40s : %llu \n",#f, (unsigned long long)hs->f)
1916#define DP_A2(f,m) for (i=0;i<m; i++) { if (hs->f[i]) fprintf(fd," %-40s[%d] : %llu \n",#f,i, (unsigned long long)hs->f[i]); }
1917    int i;
1918
1919    //for (i=0;i<8; i++) { if (hs->mpc[i]) fprintf(fd," %-40s[%d] : %llu \n","mpc",i,hs->mpc[i]); }
1920    DP_A2(mpc,8);
1921    DP_A1(crcerrs);
1922    DP_A1(illerrc);
1923    //DP_A1(errbc);
1924    DP_A1(mspdc);
1925    DP_A1(mpctotal);
1926    DP_A1(mlfc);
1927    DP_A1(mrfc);
1928    DP_A1(rlec);
1929    //DP_A1(lxontxc);
1930    //DP_A1(lxonrxc);
1931    //DP_A1(lxofftxc);
1932    //DP_A1(lxoffrxc);
1933    //DP_A2(pxontxc,8);
1934    //DP_A2(pxonrxc,8);
1935    //DP_A2(pxofftxc,8);
1936    //DP_A2(pxoffrxc,8);
1937
1938    //DP_A1(prc64);
1939    //DP_A1(prc127);
1940    //DP_A1(prc255);
1941    // DP_A1(prc511);
1942    //DP_A1(prc1023);
1943    //DP_A1(prc1522);
1944
1945    DP_A1(gprc);
1946    DP_A1(bprc);
1947    DP_A1(mprc);
1948    DP_A1(gptc);
1949    DP_A1(gorc);
1950    DP_A1(gotc);
1951    DP_A2(rnbc,8);
1952    DP_A1(ruc);
1953    DP_A1(rfc);
1954    DP_A1(roc);
1955    DP_A1(rjc);
1956    DP_A1(mngprc);
1957    DP_A1(mngpdc);
1958    DP_A1(mngptc);
1959    DP_A1(tor);
1960    DP_A1(tpr);
1961    DP_A1(tpt);
1962    DP_A1(ptc64);
1963    DP_A1(ptc127);
1964    DP_A1(ptc255);
1965    DP_A1(ptc511);
1966    DP_A1(ptc1023);
1967    DP_A1(ptc1522);
1968    DP_A1(mptc);
1969    DP_A1(bptc);
1970    DP_A1(xec);
1971    DP_A2(qprc,16);
1972    DP_A2(qptc,16);
1973    DP_A2(qbrc,16);
1974    DP_A2(qbtc,16);
1975    DP_A2(qprdc,16);
1976    DP_A2(pxon2offc,8);
1977    DP_A1(fdirustat_add);
1978    DP_A1(fdirustat_remove);
1979    DP_A1(fdirfstat_fadd);
1980    DP_A1(fdirfstat_fremove);
1981    DP_A1(fdirmatch);
1982    DP_A1(fdirmiss);
1983    DP_A1(fccrc);
1984    DP_A1(fclast);
1985    DP_A1(fcoerpdc);
1986    DP_A1(fcoeprc);
1987    DP_A1(fcoeptc);
1988    DP_A1(fcoedwrc);
1989    DP_A1(fcoedwtc);
1990    DP_A1(fcoe_noddp);
1991    DP_A1(fcoe_noddp_ext_buff);
1992    DP_A1(ldpcec);
1993    DP_A1(pcrc8ec);
1994    DP_A1(b2ospc);
1995    DP_A1(b2ogprc);
1996    DP_A1(o2bgptc);
1997    DP_A1(o2bspc);
1998}
1999
2000void CPhyEthIF::set_ignore_stats_base(CPreTestStats &pre_stats) {
2001    // reading m_stats, so drivers saving prev in m_stats will be updated.
2002    // Actually, we want m_stats to be cleared
2003    get_ex_drv()->get_extended_stats(this, &m_stats);
2004
2005    m_ignore_stats.ipackets = m_stats.ipackets;
2006    m_ignore_stats.ibytes = m_stats.ibytes;
2007    m_ignore_stats.opackets = m_stats.opackets;
2008    m_ignore_stats.obytes = m_stats.obytes;
2009    m_stats.ipackets = 0;
2010    m_stats.opackets = 0;
2011    m_stats.ibytes = 0;
2012    m_stats.obytes = 0;
2013
2014    m_ignore_stats.m_tx_arp = pre_stats.m_tx_arp;
2015    m_ignore_stats.m_rx_arp = pre_stats.m_rx_arp;
2016
2017    if (CGlobalInfo::m_options.preview.getVMode() >= 3) {
2018        fprintf(stdout, "Pre test statistics for port %d\n", get_port_id());
2019        m_ignore_stats.dump(stdout);
2020    }
2021}
2022
2023void CPhyEthIF::dump_stats(FILE *fd){
2024
2025    update_counters();
2026
2027    fprintf(fd,"port : %d \n",(int)m_port_id);
2028    fprintf(fd,"------------\n");
2029    m_stats.DumpAll(fd);
2030    //m_stats.Dump(fd);
2031    printf (" Tx : %.1fMb/sec  \n",m_last_tx_rate);
2032    //printf (" Rx : %.1fMb/sec  \n",m_last_rx_rate);
2033}
2034
2035void CPhyEthIF::stats_clear(){
2036    rte_eth_stats_reset(m_port_id);
2037    m_stats.Clear();
2038}
2039
2040class CCorePerPort  {
2041public:
2042    CCorePerPort (){
2043        m_tx_queue_id=0;
2044        m_len=0;
2045        int i;
2046        for (i=0; i<MAX_PKT_BURST; i++) {
2047            m_table[i]=0;
2048        }
2049        m_port=0;
2050    }
2051    uint8_t                 m_tx_queue_id;
2052    uint8_t                 m_tx_queue_id_lat; // q id for tx of latency pkts
2053    uint16_t                m_len;
2054    rte_mbuf_t *            m_table[MAX_PKT_BURST];
2055    CPhyEthIF  *            m_port;
2056};
2057
2058
2059#define MAX_MBUF_CACHE 100
2060
2061
2062/* per core/gbe queue port for trasmitt */
2063class CCoreEthIF : public CVirtualIF {
2064public:
2065    enum {
2066     INVALID_Q_ID = 255
2067    };
2068
2069public:
2070
2071    CCoreEthIF(){
2072        m_mbuf_cache=0;
2073    }
2074
2075    bool Create(uint8_t             core_id,
2076                uint8_t            tx_client_queue_id,
2077                CPhyEthIF  *        tx_client_port,
2078                uint8_t            tx_server_queue_id,
2079                CPhyEthIF  *        tx_server_port,
2080                uint8_t             tx_q_id_lat);
2081    void Delete();
2082
2083    virtual int open_file(std::string file_name){
2084        return (0);
2085    }
2086
2087    virtual int close_file(void){
2088        return (flush_tx_queue());
2089    }
2090    __attribute__ ((noinline)) int send_node_flow_stat(rte_mbuf *m, CGenNodeStateless * node_sl
2091                                                       , CCorePerPort *  lp_port
2092                                                       , CVirtualIFPerSideStats  * lp_stats, bool is_const);
2093    virtual int send_node(CGenNode * node);
2094    virtual void send_one_pkt(pkt_dir_t dir, rte_mbuf_t *m);
2095    virtual int flush_tx_queue(void);
2096    __attribute__ ((noinline)) void handle_slowpath_features(CGenNode *node, rte_mbuf_t *m, uint8_t *p, pkt_dir_t dir);
2097
2098    void apply_client_cfg(const ClientCfgBase *cfg, rte_mbuf_t *m, pkt_dir_t dir, uint8_t *p);
2099
2100    bool process_rx_pkt(pkt_dir_t   dir,rte_mbuf_t * m);
2101
2102    virtual int update_mac_addr_from_global_cfg(pkt_dir_t       dir, uint8_t * p);
2103
2104    virtual pkt_dir_t port_id_to_dir(uint8_t port_id);
2105    void GetCoreCounters(CVirtualIFPerSideStats *stats);
2106    void DumpCoreStats(FILE *fd);
2107    void DumpIfStats(FILE *fd);
2108    static void DumpIfCfgHeader(FILE *fd);
2109    void DumpIfCfg(FILE *fd);
2110
2111    socket_id_t get_socket_id(){
2112        return ( CGlobalInfo::m_socket.port_to_socket( m_ports[0].m_port->get_port_id() ) );
2113    }
2114
2115    const CCorePerPort * get_ports() {
2116        return m_ports;
2117    }
2118
2119protected:
2120
2121    int send_burst(CCorePerPort * lp_port,
2122                   uint16_t len,
2123                   CVirtualIFPerSideStats  * lp_stats);
2124    int send_pkt(CCorePerPort * lp_port,
2125                 rte_mbuf_t *m,
2126                 CVirtualIFPerSideStats  * lp_stats);
2127    int send_pkt_lat(CCorePerPort * lp_port,
2128                 rte_mbuf_t *m,
2129                 CVirtualIFPerSideStats  * lp_stats);
2130
2131protected:
2132    uint8_t      m_core_id;
2133    uint16_t     m_mbuf_cache;
2134    CCorePerPort m_ports[CS_NUM]; /* each core has 2 tx queues 1. client side and server side */
2135    CNodeRing *  m_ring_to_rx;
2136
2137} __rte_cache_aligned; ;
2138
2139class CCoreEthIFStateless : public CCoreEthIF {
2140public:
2141    virtual int send_node_flow_stat(rte_mbuf *m, CGenNodeStateless * node_sl, CCorePerPort *  lp_port
2142                                    , CVirtualIFPerSideStats  * lp_stats, bool is_const);
2143
2144    /**
2145     * fast path version
2146     */
2147    virtual int send_node(CGenNode *node);
2148
2149    /**
2150     * slow path version
2151     */
2152    virtual int send_node_service_mode(CGenNode *node);
2153
2154protected:
2155    template <bool SERVICE_MODE> inline int send_node_common(CGenNode *no);
2156
2157    inline rte_mbuf_t * generate_node_pkt(CGenNodeStateless *node_sl)   __attribute__ ((always_inline));
2158    inline int send_node_packet(CGenNodeStateless      *node_sl,
2159                                rte_mbuf_t             *m,
2160                                CCorePerPort           *lp_port,
2161                                CVirtualIFPerSideStats *lp_stats)   __attribute__ ((always_inline));
2162
2163    rte_mbuf_t * generate_slow_path_node_pkt(CGenNodeStateless *node_sl);
2164};
2165
2166bool CCoreEthIF::Create(uint8_t             core_id,
2167                        uint8_t             tx_client_queue_id,
2168                        CPhyEthIF  *        tx_client_port,
2169                        uint8_t             tx_server_queue_id,
2170                        CPhyEthIF  *        tx_server_port,
2171                        uint8_t tx_q_id_lat ) {
2172    m_ports[CLIENT_SIDE].m_tx_queue_id = tx_client_queue_id;
2173    m_ports[CLIENT_SIDE].m_port        = tx_client_port;
2174    m_ports[CLIENT_SIDE].m_tx_queue_id_lat = tx_q_id_lat;
2175    m_ports[SERVER_SIDE].m_tx_queue_id = tx_server_queue_id;
2176    m_ports[SERVER_SIDE].m_port        = tx_server_port;
2177    m_ports[SERVER_SIDE].m_tx_queue_id_lat = tx_q_id_lat;
2178    m_core_id = core_id;
2179
2180    CMessagingManager * rx_dp=CMsgIns::Ins()->getRxDp();
2181    m_ring_to_rx = rx_dp->getRingDpToCp(core_id-1);
2182    assert( m_ring_to_rx);
2183    return (true);
2184}
2185
2186int CCoreEthIF::flush_tx_queue(void){
2187    /* flush both sides */
2188    pkt_dir_t dir;
2189    for (dir = CLIENT_SIDE; dir < CS_NUM; dir++) {
2190        CCorePerPort * lp_port = &m_ports[dir];
2191        CVirtualIFPerSideStats  * lp_stats = &m_stats[dir];
2192        if ( likely(lp_port->m_len > 0) ) {
2193            send_burst(lp_port, lp_port->m_len, lp_stats);
2194            lp_port->m_len = 0;
2195        }
2196    }
2197
2198    return 0;
2199}
2200
2201void CCoreEthIF::GetCoreCounters(CVirtualIFPerSideStats *stats){
2202    stats->Clear();
2203    pkt_dir_t   dir ;
2204    for (dir=CLIENT_SIDE; dir<CS_NUM; dir++) {
2205        stats->Add(&m_stats[dir]);
2206    }
2207}
2208
2209void CCoreEthIF::DumpCoreStats(FILE *fd){
2210    fprintf (fd,"------------------------ \n");
2211    fprintf (fd," per core stats core id : %d  \n",m_core_id);
2212    fprintf (fd,"------------------------ \n");
2213
2214    CVirtualIFPerSideStats stats;
2215    GetCoreCounters(&stats);
2216    stats.Dump(stdout);
2217}
2218
2219void CCoreEthIF::DumpIfCfgHeader(FILE *fd){
2220    fprintf (fd," core, c-port, c-queue, s-port, s-queue, lat-queue\n");
2221    fprintf (fd," ------------------------------------------\n");
2222}
2223
2224void CCoreEthIF::DumpIfCfg(FILE *fd){
2225    fprintf (fd," %d   %6u %6u  %6u  %6u %6u  \n",m_core_id,
2226             m_ports[CLIENT_SIDE].m_port->get_port_id(),
2227             m_ports[CLIENT_SIDE].m_tx_queue_id,
2228             m_ports[SERVER_SIDE].m_port->get_port_id(),
2229             m_ports[SERVER_SIDE].m_tx_queue_id,
2230             m_ports[SERVER_SIDE].m_tx_queue_id_lat
2231             );
2232}
2233
2234
2235void CCoreEthIF::DumpIfStats(FILE *fd){
2236
2237    fprintf (fd,"------------------------ \n");
2238    fprintf (fd," per core per if stats id : %d  \n",m_core_id);
2239    fprintf (fd,"------------------------ \n");
2240
2241    const char * t[]={"client","server"};
2242    pkt_dir_t   dir ;
2243    for (dir=CLIENT_SIDE; dir<CS_NUM; dir++) {
2244        CCorePerPort * lp=&m_ports[dir];
2245        CVirtualIFPerSideStats * lpstats = &m_stats[dir];
2246        fprintf (fd," port %d, queue id :%d  - %s \n",lp->m_port->get_port_id(),lp->m_tx_queue_id,t[dir] );
2247        fprintf (fd," ---------------------------- \n");
2248        lpstats->Dump(fd);
2249    }
2250}
2251
2252#define DELAY_IF_NEEDED
2253
2254int CCoreEthIF::send_burst(CCorePerPort * lp_port,
2255                           uint16_t len,
2256                           CVirtualIFPerSideStats  * lp_stats){
2257
2258#ifdef DEBUG_SEND_BURST
2259    if (CGlobalInfo::m_options.preview.getVMode() > 10) {
2260        fprintf(stdout, "send_burst port:%d queue:%d len:%d\n", lp_port->m_port->get_rte_port_id()
2261                , lp_port->m_tx_queue_id, len);
2262        for (int i = 0; i < lp_port->m_len; i++) {
2263            fprintf(stdout, "packet %d:\n", i);
2264            rte_mbuf_t *m = lp_port->m_table[i];
2265            utl_DumpBuffer(stdout, rte_pktmbuf_mtod(m, uint8_t*), rte_pktmbuf_pkt_len(m), 0);
2266        }
2267    }
2268#endif
2269
2270    uint16_t ret = lp_port->m_port->tx_burst(lp_port->m_tx_queue_id,lp_port->m_table,len);
2271#ifdef DELAY_IF_NEEDED
2272    while ( unlikely( ret<len ) ){
2273        rte_delay_us(1);
2274        lp_stats->m_tx_queue_full += 1;
2275        uint16_t ret1=lp_port->m_port->tx_burst(lp_port->m_tx_queue_id,
2276                                                &lp_port->m_table[ret],
2277                                                len-ret);
2278        ret+=ret1;
2279    }
2280#else
2281    /* CPU has burst of packets larger than TX can send. Need to drop packets */
2282    if ( unlikely(ret < len) ) {
2283        lp_stats->m_tx_drop += (len-ret);
2284        uint16_t i;
2285        for (i=ret; i<len;i++) {
2286            rte_mbuf_t * m=lp_port->m_table[i];
2287            rte_pktmbuf_free(m);
2288        }
2289    }
2290#endif
2291
2292    return (0);
2293}
2294
2295
2296int CCoreEthIF::send_pkt(CCorePerPort * lp_port,
2297                         rte_mbuf_t      *m,
2298                         CVirtualIFPerSideStats  * lp_stats
2299                         ){
2300
2301    uint16_t len = lp_port->m_len;
2302    lp_port->m_table[len]=m;
2303    len++;
2304
2305    /* enough pkts to be sent */
2306    if (unlikely(len == MAX_PKT_BURST)) {
2307        send_burst(lp_port, MAX_PKT_BURST,lp_stats);
2308        len = 0;
2309    }
2310    lp_port->m_len = len;
2311
2312    return (0);
2313}
2314
2315int CCoreEthIF::send_pkt_lat(CCorePerPort *lp_port, rte_mbuf_t *m, CVirtualIFPerSideStats *lp_stats) {
2316    // We allow sending only from first core of each port. This is serious internal bug otherwise.
2317    assert(lp_port->m_tx_queue_id_lat != INVALID_Q_ID);
2318
2319    int ret = lp_port->m_port->tx_burst(lp_port->m_tx_queue_id_lat, &m, 1);
2320
2321#ifdef DELAY_IF_NEEDED
2322    while ( unlikely( ret != 1 ) ){
2323        rte_delay_us(1);
2324        lp_stats->m_tx_queue_full += 1;
2325        ret = lp_port->m_port->tx_burst(lp_port->m_tx_queue_id_lat, &m, 1);
2326    }
2327
2328#else
2329    if ( unlikely( ret != 1 ) ) {
2330        lp_stats->m_tx_drop ++;
2331        rte_pktmbuf_free(m);
2332        return 0;
2333    }
2334
2335#endif
2336
2337    return ret;
2338}
2339
2340void CCoreEthIF::send_one_pkt(pkt_dir_t       dir,
2341                              rte_mbuf_t      *m){
2342    CCorePerPort *  lp_port=&m_ports[dir];
2343    CVirtualIFPerSideStats  * lp_stats = &m_stats[dir];
2344    send_pkt(lp_port,m,lp_stats);
2345    /* flush */
2346    send_burst(lp_port,lp_port->m_len,lp_stats);
2347    lp_port->m_len = 0;
2348}
2349
2350int CCoreEthIFStateless::send_node_flow_stat(rte_mbuf *m, CGenNodeStateless * node_sl, CCorePerPort *  lp_port
2351                                             , CVirtualIFPerSideStats  * lp_stats, bool is_const) {
2352    // Defining this makes 10% percent packet loss. 1% packet reorder.
2353# ifdef ERR_CNTRS_TEST
2354    static int temp=1;
2355    temp++;
2356#endif
2357
2358    uint16_t hw_id = node_sl->get_stat_hw_id();
2359    rte_mbuf *mi;
2360    struct flow_stat_payload_header *fsp_head = NULL;
2361
2362    if (hw_id >= MAX_FLOW_STATS) {
2363        // payload rule hw_ids are in the range right above ip id rules
2364        uint16_t hw_id_payload = hw_id - MAX_FLOW_STATS;
2365        if (hw_id_payload > max_stat_hw_id_seen_payload) {
2366            max_stat_hw_id_seen_payload = hw_id_payload;
2367        }
2368
2369        mi = node_sl->alloc_flow_stat_mbuf(m, fsp_head, is_const);
2370        fsp_head->seq = lp_stats->m_lat_data[hw_id_payload].get_seq_num();
2371        fsp_head->hw_id = hw_id_payload;
2372        fsp_head->flow_seq = lp_stats->m_lat_data[hw_id_payload].get_flow_seq();
2373        fsp_head->magic = FLOW_STAT_PAYLOAD_MAGIC;
2374
2375        lp_stats->m_lat_data[hw_id_payload].inc_seq_num();
2376#ifdef ERR_CNTRS_TEST
2377        if (temp % 10 == 0) {
2378            fsp_head->seq = lp_stats->m_lat_data[hw_id_payload].inc_seq_num();
2379        }
2380        if ((temp - 1) % 100 == 0) {
2381            fsp_head->seq = lp_stats->m_lat_data[hw_id_payload].get_seq_num() - 4;
2382        }
2383#endif
2384    } else {
2385        // ip id rule
2386        if (hw_id > max_stat_hw_id_seen) {
2387            max_stat_hw_id_seen = hw_id;
2388        }
2389        mi = m;
2390    }
2391    tx_per_flow_t *lp_s = &lp_stats->m_tx_per_flow[hw_id];
2392    lp_s->add_pkts(1);
2393    lp_s->add_bytes(mi->pkt_len + 4); // We add 4 because of ethernet CRC
2394
2395    if (hw_id >= MAX_FLOW_STATS) {
2396        fsp_head->time_stamp = os_get_hr_tick_64();
2397        send_pkt_lat(lp_port, mi, lp_stats);
2398    } else {
2399        send_pkt(lp_port, mi, lp_stats);
2400    }
2401    return 0;
2402}
2403
2404inline rte_mbuf_t *
2405CCoreEthIFStateless::generate_node_pkt(CGenNodeStateless *node_sl) {
2406    if (unlikely(node_sl->get_is_slow_path())) {
2407        return generate_slow_path_node_pkt(node_sl);
2408    }
2409
2410    /* check that we have mbuf  */
2411    rte_mbuf_t *m;
2412
2413    if ( likely(node_sl->is_cache_mbuf_array()) ) {
2414        m = node_sl->cache_mbuf_array_get_cur();
2415        rte_pktmbuf_refcnt_update(m,1);
2416    }else{
2417        m = node_sl->get_cache_mbuf();
2418
2419        if (m) {
2420            /* cache case */
2421            rte_pktmbuf_refcnt_update(m,1);
2422        }else{
2423            m=node_sl->alloc_node_with_vm();
2424            assert(m);
2425        }
2426    }
2427
2428    return m;
2429}
2430
2431inline int
2432CCoreEthIFStateless::send_node_packet(CGenNodeStateless      *node_sl,
2433                                      rte_mbuf_t             *m,
2434                                      CCorePerPort           *lp_port,
2435                                      CVirtualIFPerSideStats *lp_stats) {
2436
2437    if (unlikely(node_sl->is_stat_needed())) {
2438        if ( unlikely(node_sl->is_cache_mbuf_array()) ) {
2439            // No support for latency + cache. If user asks for cache on latency stream, we change cache to 0.
2440            // assert here just to make sure.
2441            assert(1);
2442        }
2443        return send_node_flow_stat(m, node_sl, lp_port, lp_stats, (node_sl->get_cache_mbuf()) ? true : false);
2444    } else {
2445        return send_pkt(lp_port, m, lp_stats);
2446    }
2447}
2448
2449int CCoreEthIFStateless::send_node(CGenNode *node) {
2450    return send_node_common<false>(node);
2451}
2452
2453int CCoreEthIFStateless::send_node_service_mode(CGenNode *node) {
2454    return send_node_common<true>(node);
2455}
2456
2457/**
2458 * this is the common function and it is templated
2459 * for two compiler evaluation for performance
2460 *
2461 */
2462template <bool SERVICE_MODE>
2463int CCoreEthIFStateless::send_node_common(CGenNode *node) {
2464    CGenNodeStateless * node_sl = (CGenNodeStateless *) node;
2465
2466    pkt_dir_t dir                     = (pkt_dir_t)node_sl->get_mbuf_cache_dir();
2467    CCorePerPort *lp_port             = &m_ports[dir];
2468    CVirtualIFPerSideStats *lp_stats  = &m_stats[dir];
2469
2470    /* generate packet (can never fail) */
2471    rte_mbuf_t *m = generate_node_pkt(node_sl);
2472
2473    /* template boolean - this will be removed at compile time */
2474    if (SERVICE_MODE) {
2475        TrexStatelessCaptureMngr::getInstance().handle_pkt_tx(m, lp_port->m_port->get_port_id());
2476    }
2477
2478    /* send */
2479    return send_node_packet(node_sl, m, lp_port, lp_stats);
2480}
2481
2482/**
2483 * slow path code goes here
2484 *
2485 */
2486rte_mbuf_t *
2487CCoreEthIFStateless::generate_slow_path_node_pkt(CGenNodeStateless *node_sl) {
2488
2489    if (node_sl->m_type == CGenNode::PCAP_PKT) {
2490        CGenNodePCAP *pcap_node = (CGenNodePCAP *)node_sl;
2491        return pcap_node->get_pkt();
2492    }
2493
2494    /* unhandled case of slow path node */
2495    assert(0);
2496    return (NULL);
2497}
2498
2499void CCoreEthIF::apply_client_cfg(const ClientCfgBase *cfg, rte_mbuf_t *m, pkt_dir_t dir, uint8_t *p) {
2500
2501    assert(cfg);
2502
2503    /* take the right direction config */
2504    const ClientCfgDirBase &cfg_dir = ( (dir == CLIENT_SIDE) ? cfg->m_initiator : cfg->m_responder);
2505
2506    /* dst mac */
2507    if (cfg_dir.has_dst_mac_addr()) {
2508        memcpy(p, cfg_dir.get_dst_mac_addr(), 6);
2509    }
2510
2511    /* src mac */
2512    if (cfg_dir.has_src_mac_addr()) {
2513        memcpy(p + 6, cfg_dir.get_src_mac_addr(), 6);
2514    }
2515
2516    /* VLAN */
2517    if (cfg_dir.has_vlan()) {
2518        add_vlan(m, cfg_dir.get_vlan());
2519    }
2520}
2521
2522/**
2523 * slow path features goes here (avoid multiple IFs)
2524 *
2525 */
2526void CCoreEthIF::handle_slowpath_features(CGenNode *node, rte_mbuf_t *m, uint8_t *p, pkt_dir_t dir) {
2527
2528
2529    /* MAC ovverride */
2530    if ( unlikely( CGlobalInfo::m_options.preview.get_mac_ip_overide_enable() ) ) {
2531        /* client side */
2532        if ( node->is_initiator_pkt() ) {
2533            *((uint32_t*)(p+6)) = PKT_NTOHL(node->m_src_ip);
2534        }
2535    }
2536
2537    /* flag is faster than checking the node pointer (another cacheline) */
2538    if ( unlikely(CGlobalInfo::m_options.preview.get_is_client_cfg_enable() ) ) {
2539        apply_client_cfg(node->m_client_cfg, m, dir, p);
2540    }
2541
2542}
2543
2544int CCoreEthIF::send_node(CGenNode * node) {
2545
2546#ifdef OPT_REPEAT_MBUF
2547
2548    if ( unlikely( node->get_cache_mbuf() !=NULL ) ) {
2549        pkt_dir_t       dir;
2550        rte_mbuf_t *    m=node->get_cache_mbuf();
2551        dir=(pkt_dir_t)node->get_mbuf_cache_dir();
2552        CCorePerPort *  lp_port=&m_ports[dir];
2553        CVirtualIFPerSideStats  * lp_stats = &m_stats[dir];
2554        rte_pktmbuf_refcnt_update(m,1);
2555        send_pkt(lp_port,m,lp_stats);
2556        return (0);
2557    }
2558#endif
2559
2560    CFlowPktInfo *  lp=node->m_pkt_info;
2561    rte_mbuf_t *    m=lp->generate_new_mbuf(node);
2562
2563    pkt_dir_t       dir;
2564    bool            single_port;
2565
2566    dir         = node->cur_interface_dir();
2567    single_port = node->get_is_all_flow_from_same_dir() ;
2568
2569
2570    if ( unlikely(CGlobalInfo::m_options.preview.get_vlan_mode()
2571                  != CPreviewMode::VLAN_MODE_NONE) ) {
2572        uint16_t vlan_id=0;
2573
2574        if (CGlobalInfo::m_options.preview.get_vlan_mode()
2575            == CPreviewMode::VLAN_MODE_LOAD_BALANCE) {
2576            /* which vlan to choose 0 or 1*/
2577            uint8_t vlan_port = (node->m_src_ip & 1);
2578            vlan_id = CGlobalInfo::m_options.m_vlan_port[vlan_port];
2579            if (likely( vlan_id > 0 ) ) {
2580                dir = dir ^ vlan_port;
2581            } else {
2582                /* both from the same dir but with VLAN0 */
2583                vlan_id = CGlobalInfo::m_options.m_vlan_port[0];
2584            }
2585        } else if (CGlobalInfo::m_options.preview.get_vlan_mode()
2586            == CPreviewMode::VLAN_MODE_NORMAL) {
2587            CCorePerPort *lp_port = &m_ports[dir];
2588            uint8_t port_id = lp_port->m_port->get_port_id();
2589            vlan_id = CGlobalInfo::m_options.m_ip_cfg[port_id].get_vlan();
2590        }
2591
2592        add_vlan(m, vlan_id);
2593    }
2594
2595    CCorePerPort *lp_port = &m_ports[dir];
2596    CVirtualIFPerSideStats *lp_stats = &m_stats[dir];
2597
2598    if (unlikely(m==0)) {
2599        lp_stats->m_tx_alloc_error++;
2600        return(0);
2601    }
2602
2603    /* update mac addr dest/src 12 bytes */
2604    uint8_t *p   = rte_pktmbuf_mtod(m, uint8_t*);
2605    uint8_t p_id = lp_port->m_port->get_port_id();
2606
2607    memcpy(p,CGlobalInfo::m_options.get_dst_src_mac_addr(p_id),12);
2608
2609     /* when slowpath features are on */
2610    if ( unlikely( CGlobalInfo::m_options.preview.get_is_slowpath_features_on() ) ) {
2611        handle_slowpath_features(node, m, p, dir);
2612    }
2613
2614
2615    if ( unlikely( node->is_rx_check_enabled() ) ) {
2616        lp_stats->m_tx_rx_check_pkt++;
2617        lp->do_generate_new_mbuf_rxcheck(m, node, single_port);
2618        lp_stats->m_template.inc_template( node->get_template_id( ));
2619    }else{
2620
2621#ifdef OPT_REPEAT_MBUF
2622        // cache only if it is not sample as this is more complex mbuf struct
2623        if ( unlikely( node->can_cache_mbuf() ) ) {
2624            if ( !CGlobalInfo::m_options.preview.isMbufCacheDisabled() ){
2625                m_mbuf_cache++;
2626                if (m_mbuf_cache < MAX_MBUF_CACHE) {
2627                    /* limit the number of object to cache */
2628                    node->set_mbuf_cache_dir( dir);
2629                    node->set_cache_mbuf(m);
2630                    rte_pktmbuf_refcnt_update(m,1);
2631                }
2632            }
2633        }
2634#endif
2635
2636    }
2637
2638    /*printf("send packet -- \n");
2639      rte_pktmbuf_dump(stdout,m, rte_pktmbuf_pkt_len(m));*/
2640
2641    /* send the packet */
2642    send_pkt(lp_port,m,lp_stats);
2643    return (0);
2644}
2645
2646
2647int CCoreEthIF::update_mac_addr_from_global_cfg(pkt_dir_t  dir, uint8_t * p){
2648    assert(p);
2649    assert(dir<2);
2650
2651    CCorePerPort *  lp_port=&m_ports[dir];
2652    uint8_t p_id=lp_port->m_port->get_port_id();
2653    memcpy(p,CGlobalInfo::m_options.get_dst_src_mac_addr(p_id),12);
2654    return (0);
2655}
2656
2657pkt_dir_t
2658CCoreEthIF::port_id_to_dir(uint8_t port_id) {
2659
2660    for (pkt_dir_t dir = 0; dir < CS_NUM; dir++) {
2661        if (m_ports[dir].m_port->get_port_id() == port_id) {
2662            return dir;
2663        }
2664    }
2665
2666    return (CS_INVALID);
2667}
2668
2669class CLatencyHWPort : public CPortLatencyHWBase {
2670public:
2671    void Create(CPhyEthIF  * p,
2672                uint8_t tx_queue,
2673                uint8_t rx_queue){
2674        m_port=p;
2675        m_tx_queue_id=tx_queue;
2676        m_rx_queue_id=rx_queue;
2677    }
2678
2679    virtual int tx(rte_mbuf_t *m) {
2680        rte_mbuf_t *tx_pkts[2];
2681
2682        tx_pkts[0] = m;
2683        uint8_t vlan_mode = CGlobalInfo::m_options.preview.get_vlan_mode();
2684        if ( likely( vlan_mode != CPreviewMode::VLAN_MODE_NONE) ) {
2685            if ( vlan_mode == CPreviewMode::VLAN_MODE_LOAD_BALANCE ) {
2686                add_vlan(m, CGlobalInfo::m_options.m_vlan_port[0]);
2687            } else if (vlan_mode == CPreviewMode::VLAN_MODE_NORMAL) {
2688                uint8_t port_id = m_port->get_rte_port_id();
2689                add_vlan(m, CGlobalInfo::m_options.m_ip_cfg[port_id].get_vlan());
2690            }
2691        }
2692        uint16_t res=m_port->tx_burst(m_tx_queue_id,tx_pkts,1);
2693        if ( res == 0 ) {
2694            rte_pktmbuf_free(m);
2695            //printf(" queue is full for latency packet !!\n");
2696            return (-1);
2697
2698        }
2699#if 0
2700        fprintf(stdout," ==> %f.03 send packet ..\n",now_sec());
2701        uint8_t *p1=rte_pktmbuf_mtod(m, uint8_t*);
2702        uint16_t pkt_size1=rte_pktmbuf_pkt_len(m);
2703        utl_DumpBuffer(stdout,p1,pkt_size1,0);
2704#endif
2705
2706        return (0);
2707    }
2708
2709
2710    /* nothing special with HW implementation */
2711    virtual int tx_latency(rte_mbuf_t *m) {
2712        return tx(m);
2713    }
2714
2715    virtual rte_mbuf_t * rx(){
2716        rte_mbuf_t * rx_pkts[1];
2717        uint16_t cnt=m_port->rx_burst(m_rx_queue_id,rx_pkts,1);
2718        if (cnt) {
2719            return (rx_pkts[0]);
2720        }else{
2721            return (0);
2722        }
2723    }
2724
2725
2726    virtual uint16_t rx_burst(struct rte_mbuf **rx_pkts,
2727                              uint16_t nb_pkts){
2728        uint16_t cnt=m_port->rx_burst(m_rx_queue_id,rx_pkts,nb_pkts);
2729        return (cnt);
2730    }
2731
2732
2733private:
2734    CPhyEthIF  * m_port;
2735    uint8_t      m_tx_queue_id ;
2736    uint8_t      m_rx_queue_id;
2737};
2738
2739
2740class CLatencyVmPort : public CPortLatencyHWBase {
2741public:
2742    void Create(uint8_t port_index,
2743                CNodeRing *ring,
2744                CLatencyManager *mgr,
2745                CPhyEthIF  *p) {
2746
2747        m_dir        = (port_index % 2);
2748        m_ring_to_dp = ring;
2749        m_mgr        = mgr;
2750        m_port       = p;
2751    }
2752
2753
2754    virtual int tx(rte_mbuf_t *m) {
2755        return tx_common(m, false);
2756    }
2757
2758    virtual int tx_latency(rte_mbuf_t *m) {
2759        return tx_common(m, true);
2760    }
2761
2762    virtual rte_mbuf_t * rx() {
2763        rte_mbuf_t * rx_pkts[1];
2764        uint16_t cnt = m_port->rx_burst(0, rx_pkts, 1);
2765        if (cnt) {
2766            return (rx_pkts[0]);
2767        } else {
2768            return (0);
2769        }
2770    }
2771
2772    virtual uint16_t rx_burst(struct rte_mbuf **rx_pkts, uint16_t nb_pkts) {
2773        uint16_t cnt = m_port->rx_burst(0, rx_pkts, nb_pkts);
2774        return (cnt);
2775    }
2776
2777private:
2778      virtual int tx_common(rte_mbuf_t *m, bool fix_timestamp) {
2779
2780
2781        uint8_t vlan_mode = CGlobalInfo::m_options.preview.get_vlan_mode();
2782        if ( likely( vlan_mode != CPreviewMode::VLAN_MODE_NONE) ) {
2783            if ( vlan_mode == CPreviewMode::VLAN_MODE_LOAD_BALANCE ) {
2784                add_vlan(m, CGlobalInfo::m_options.m_vlan_port[0]);
2785            } else if (vlan_mode == CPreviewMode::VLAN_MODE_NORMAL) {
2786                uint8_t port_id = m_port->get_rte_port_id();
2787                add_vlan(m, CGlobalInfo::m_options.m_ip_cfg[port_id].get_vlan());
2788            }
2789        }
2790
2791        /* allocate node */
2792        CGenNodeLatencyPktInfo *node=(CGenNodeLatencyPktInfo * )CGlobalInfo::create_node();
2793        if (!node) {
2794            return (-1);
2795        }
2796
2797        node->m_msg_type = CGenNodeMsgBase::LATENCY_PKT;
2798        node->m_dir      = m_dir;
2799        node->m_pkt      = m;
2800
2801        if (fix_timestamp) {
2802            node->m_latency_offset = m_mgr->get_latency_header_offset();
2803            node->m_update_ts = 1;
2804        } else {
2805            node->m_update_ts = 0;
2806        }
2807
2808        if ( m_ring_to_dp->Enqueue((CGenNode*)node) != 0 ){
2809            return (-1);
2810        }
2811
2812        return (0);
2813    }
2814
2815    CPhyEthIF  * m_port;
2816    uint8_t                          m_dir;
2817    CNodeRing *                      m_ring_to_dp;   /* ring dp -> latency thread */
2818    CLatencyManager *                m_mgr;
2819};
2820
2821
2822
2823class CPerPortStats {
2824public:
2825    uint64_t opackets;
2826    uint64_t obytes;
2827    uint64_t ipackets;
2828    uint64_t ibytes;
2829    uint64_t ierrors;
2830    uint64_t oerrors;
2831    tx_per_flow_t m_tx_per_flow[MAX_FLOW_STATS + MAX_FLOW_STATS_PAYLOAD];
2832    tx_per_flow_t m_prev_tx_per_flow[MAX_FLOW_STATS + MAX_FLOW_STATS_PAYLOAD];
2833
2834    float     m_total_tx_bps;
2835    float     m_total_tx_pps;
2836
2837    float     m_total_rx_bps;
2838    float     m_total_rx_pps;
2839
2840    float     m_cpu_util;
2841    bool      m_link_up = true;
2842    bool      m_link_was_down = false;
2843};
2844
2845class CGlobalStats {
2846public:
2847    enum DumpFormat {
2848        dmpSTANDARD,
2849        dmpTABLE
2850    };
2851
2852    uint64_t  m_total_tx_pkts;
2853    uint64_t  m_total_rx_pkts;
2854    uint64_t  m_total_tx_bytes;
2855    uint64_t  m_total_rx_bytes;
2856
2857    uint64_t  m_total_alloc_error;
2858    uint64_t  m_total_queue_full;
2859    uint64_t  m_total_queue_drop;
2860
2861    uint64_t  m_total_clients;
2862    uint64_t  m_total_servers;
2863    uint64_t  m_active_sockets;
2864
2865    uint64_t  m_total_nat_time_out;
2866    uint64_t  m_total_nat_time_out_wait_ack;
2867    uint64_t  m_total_nat_no_fid  ;
2868    uint64_t  m_total_nat_active  ;
2869    uint64_t  m_total_nat_syn_wait;
2870    uint64_t  m_total_nat_open    ;
2871    uint64_t  m_total_nat_learn_error    ;
2872
2873    CPerTxthreadTemplateInfo m_template;
2874
2875    float     m_socket_util;
2876
2877    float m_platform_factor;
2878    float m_tx_bps;
2879    float m_rx_bps;
2880    float m_tx_pps;
2881    float m_rx_pps;
2882    float m_tx_cps;
2883    float m_tx_expected_cps;
2884    float m_tx_expected_pps;
2885    float m_tx_expected_bps;
2886    float m_rx_drop_bps;
2887    float m_active_flows;
2888    float m_open_flows;
2889    float m_cpu_util;
2890    float m_cpu_util_raw;
2891    float m_rx_cpu_util;
2892    float m_bw_per_core;
2893    uint8_t m_threads;
2894
2895    uint32_t      m_num_of_ports;
2896    CPerPortStats m_port[TREX_MAX_PORTS];
2897public:
2898    void Dump(FILE *fd,DumpFormat mode);
2899    void DumpAllPorts(FILE *fd);
2900    void dump_json(std::string & json, bool baseline);
2901private:
2902    std::string get_field(const char *name, float &f);
2903    std::string get_field(const char *name, uint64_t &f);
2904    std::string get_field_port(int port, const char *name, float &f);
2905    std::string get_field_port(int port, const char *name, uint64_t &f);
2906
2907};
2908
2909std::string CGlobalStats::get_field(const char *name, float &f){
2910    char buff[200];
2911    if(f <= -10.0 or f >= 10.0)
2912        snprintf(buff, sizeof(buff), "\"%s\":%.1f,",name,f);
2913    else
2914        snprintf(buff, sizeof(buff), "\"%s\":%.3e,",name,f);
2915    return (std::string(buff));
2916}
2917
2918std::string CGlobalStats::get_field(const char *name, uint64_t &f){
2919    char buff[200];
2920    snprintf(buff,  sizeof(buff), "\"%s\":%llu,", name, (unsigned long long)f);
2921    return (std::string(buff));
2922}
2923
2924std::string CGlobalStats::get_field_port(int port, const char *name, float &f){
2925    char buff[200];
2926    if(f <= -10.0 or f >= 10.0)
2927        snprintf(buff,  sizeof(buff), "\"%s-%d\":%.1f,", name, port, f);
2928    else
2929        snprintf(buff, sizeof(buff), "\"%s-%d\":%.3e,", name, port, f);
2930    return (std::string(buff));
2931}
2932
2933std::string CGlobalStats::get_field_port(int port, const char *name, uint64_t &f){
2934    char buff[200];
2935    snprintf(buff, sizeof(buff), "\"%s-%d\":%llu,",name, port, (unsigned long long)f);
2936    return (std::string(buff));
2937}
2938
2939
2940void CGlobalStats::dump_json(std::string & json, bool baseline){
2941    /* refactor this to JSON */
2942
2943    json="{\"name\":\"trex-global\",\"type\":0,";
2944    if (baseline) {
2945        json += "\"baseline\": true,";
2946    }
2947
2948    json +="\"data\":{";
2949
2950    char ts_buff[200];
2951    snprintf(ts_buff , sizeof(ts_buff), "\"ts\":{\"value\":%lu, \"freq\":%lu},", os_get_hr_tick_64(), os_get_hr_freq());
2952    json+= std::string(ts_buff);
2953
2954#define GET_FIELD(f) get_field(#f, f)
2955#define GET_FIELD_PORT(p,f) get_field_port(p, #f, lp->f)
2956
2957    json+=GET_FIELD(m_cpu_util);
2958    json+=GET_FIELD(m_cpu_util_raw);
2959    json+=GET_FIELD(m_bw_per_core);
2960    json+=GET_FIELD(m_rx_cpu_util);
2961    json+=GET_FIELD(m_platform_factor);
2962    json+=GET_FIELD(m_tx_bps);
2963    json+=GET_FIELD(m_rx_bps);
2964    json+=GET_FIELD(m_tx_pps);
2965    json+=GET_FIELD(m_rx_pps);
2966    json+=GET_FIELD(m_tx_cps);
2967    json+=GET_FIELD(m_tx_expected_cps);
2968    json+=GET_FIELD(m_tx_expected_pps);
2969    json+=GET_FIELD(m_tx_expected_bps);
2970    json+=GET_FIELD(m_total_alloc_error);
2971    json+=GET_FIELD(m_total_queue_full);
2972    json+=GET_FIELD(m_total_queue_drop);
2973    json+=GET_FIELD(m_rx_drop_bps);
2974    json+=GET_FIELD(m_active_flows);
2975    json+=GET_FIELD(m_open_flows);
2976
2977    json+=GET_FIELD(m_total_tx_pkts);
2978    json+=GET_FIELD(m_total_rx_pkts);
2979    json+=GET_FIELD(m_total_tx_bytes);
2980    json+=GET_FIELD(m_total_rx_bytes);
2981
2982    json+=GET_FIELD(m_total_clients);
2983    json+=GET_FIELD(m_total_servers);
2984    json+=GET_FIELD(m_active_sockets);
2985    json+=GET_FIELD(m_socket_util);
2986
2987    json+=GET_FIELD(m_total_nat_time_out);
2988    json+=GET_FIELD(m_total_nat_time_out_wait_ack);
2989    json+=GET_FIELD(m_total_nat_no_fid );
2990    json+=GET_FIELD(m_total_nat_active );
2991    json+=GET_FIELD(m_total_nat_syn_wait);
2992    json+=GET_FIELD(m_total_nat_open   );
2993    json+=GET_FIELD(m_total_nat_learn_error);
2994
2995    int i;
2996    for (i=0; i<(int)m_num_of_ports; i++) {
2997        CPerPortStats * lp=&m_port[i];
2998        json+=GET_FIELD_PORT(i,opackets) ;
2999        json+=GET_FIELD_PORT(i,obytes)   ;
3000        json+=GET_FIELD_PORT(i,ipackets) ;
3001        json+=GET_FIELD_PORT(i,ibytes)   ;
3002        json+=GET_FIELD_PORT(i,ierrors)  ;
3003        json+=GET_FIELD_PORT(i,oerrors)  ;
3004        json+=GET_FIELD_PORT(i,m_total_tx_bps);
3005        json+=GET_FIELD_PORT(i,m_total_tx_pps);
3006        json+=GET_FIELD_PORT(i,m_total_rx_bps);
3007        json+=GET_FIELD_PORT(i,m_total_rx_pps);
3008        json+=GET_FIELD_PORT(i,m_cpu_util);
3009    }
3010    json+=m_template.dump_as_json("template");
3011    json+="\"unknown\":0}}"  ;
3012}
3013
3014void CGlobalStats::DumpAllPorts(FILE *fd){
3015
3016    //fprintf (fd," Total-Tx-Pkts   : %s  \n",double_to_human_str((double)m_total_tx_pkts,"pkts",KBYE_1000).c_str());
3017    //fprintf (fd," Total-Rx-Pkts   : %s  \n",double_to_human_str((double)m_total_rx_pkts,"pkts",KBYE_1000).c_str());
3018
3019    //fprintf (fd," Total-Tx-Bytes  : %s  \n",double_to_human_str((double)m_total_tx_bytes,"bytes",KBYE_1000).c_str());
3020    //fprintf (fd," Total-Rx-Bytes  : %s  \n",double_to_human_str((double)m_total_rx_bytes,"bytes",KBYE_1000).c_str());
3021
3022
3023
3024    fprintf (fd," Cpu Utilization : %2.1f  %%  %2.1f Gb/core \n",m_cpu_util,m_bw_per_core);
3025    fprintf (fd," Platform_factor : %2.1f  \n",m_platform_factor);
3026    fprintf (fd," Total-Tx        : %s  ",double_to_human_str(m_tx_bps,"bps",KBYE_1000).c_str());
3027    if ( CGlobalInfo::is_learn_mode() ) {
3028        fprintf (fd," NAT time out    : %8llu", (unsigned long long)m_total_nat_time_out);
3029        if (CGlobalInfo::is_learn_mode(CParserOption::LEARN_MODE_TCP_ACK)) {
3030            fprintf (fd," (%llu in wait for syn+ack)\n", (unsigned long long)m_total_nat_time_out_wait_ack);
3031        } else {
3032            fprintf (fd, "\n");
3033        }
3034    }else{
3035        fprintf (fd,"\n");
3036    }
3037
3038
3039    fprintf (fd," Total-Rx        : %s  ",double_to_human_str(m_rx_bps,"bps",KBYE_1000).c_str());
3040    if ( CGlobalInfo::is_learn_mode() ) {
3041        fprintf (fd," NAT aged flow id: %8llu \n", (unsigned long long)m_total_nat_no_fid);
3042    }else{
3043        fprintf (fd,"\n");
3044    }
3045
3046    fprintf (fd," Total-PPS       : %s  ",double_to_human_str(m_tx_pps,"pps",KBYE_1000).c_str());
3047    if ( CGlobalInfo::is_learn_mode() ) {
3048        fprintf (fd," Total NAT active: %8llu", (unsigned long long)m_total_nat_active);
3049        if (CGlobalInfo::is_learn_mode(CParserOption::LEARN_MODE_TCP_ACK)) {
3050            fprintf (fd," (%llu waiting for syn)\n", (unsigned long long)m_total_nat_syn_wait);
3051        } else {
3052            fprintf (fd, "\n");
3053        }
3054    }else{
3055        fprintf (fd,"\n");
3056    }
3057
3058    fprintf (fd," Total-CPS       : %s  ",double_to_human_str(m_tx_cps,"cps",KBYE_1000).c_str());
3059    if ( CGlobalInfo::is_learn_mode() ) {
3060        fprintf (fd," Total NAT opened: %8llu \n", (unsigned long long)m_total_nat_open);
3061    }else{
3062        fprintf (fd,"\n");
3063    }
3064    fprintf (fd,"\n");
3065    fprintf (fd," Expected-PPS    : %s  ",double_to_human_str(m_tx_expected_pps,"pps",KBYE_1000).c_str());
3066    if ( CGlobalInfo::is_learn_verify_mode() ) {
3067        fprintf (fd," NAT learn errors: %8llu \n", (unsigned long long)m_total_nat_learn_error);
3068    }else{
3069        fprintf (fd,"\n");
3070    }
3071    fprintf (fd," Expected-CPS    : %s  \n",double_to_human_str(m_tx_expected_cps,"cps",KBYE_1000).c_str());
3072    fprintf (fd," Expected-BPS    : %s  \n",double_to_human_str(m_tx_expected_bps,"bps",KBYE_1000).c_str());
3073    fprintf (fd,"\n");
3074    fprintf (fd," Active-flows    : %8llu  Clients : %8llu   Socket-util : %3.4f %%    \n",
3075             (unsigned long long)m_active_flows,
3076             (unsigned long long)m_total_clients,
3077             m_socket_util);
3078    fprintf (fd," Open-flows      : %8llu  Servers : %8llu   Socket : %8llu Socket/Clients :  %.1f \n",
3079             (unsigned long long)m_open_flows,
3080             (unsigned long long)m_total_servers,
3081             (unsigned long long)m_active_sockets,
3082             (float)m_active_sockets/(float)m_total_clients);
3083
3084    if (m_total_alloc_error) {
3085        fprintf (fd," Total_alloc_err  : %llu         \n", (unsigned long long)m_total_alloc_error);
3086    }
3087    if ( m_total_queue_full ){
3088        fprintf (fd," Total_queue_full : %llu         \n", (unsigned long long)m_total_queue_full);
3089    }
3090    if (m_total_queue_drop) {
3091        fprintf (fd," Total_queue_drop : %llu         \n", (unsigned long long)m_total_queue_drop);
3092    }
3093
3094    //m_template.Dump(fd);
3095
3096    fprintf (fd," drop-rate       : %s   \n",double_to_human_str(m_rx_drop_bps,"bps",KBYE_1000).c_str() );
3097}
3098
3099
3100void CGlobalStats::Dump(FILE *fd,DumpFormat mode){
3101    int i;
3102    int port_to_show=m_num_of_ports;
3103    if (port_to_show>4) {
3104        port_to_show=4;
3105        fprintf (fd," per port - limited to 4   \n");
3106    }
3107
3108
3109    if ( mode== dmpSTANDARD ){
3110        fprintf (fd," --------------- \n");
3111        for (i=0; i<(int)port_to_show; i++) {
3112            CPerPortStats * lp=&m_port[i];
3113            fprintf(fd,"port : %d ",(int)i);
3114            if ( ! lp->m_link_up ) {
3115                fprintf(fd," (link DOWN)");
3116            }
3117            fprintf(fd,"\n------------\n");
3118#define GS_DP_A4(f) fprintf(fd," %-40s : %llu \n",#f, (unsigned long long)lp->f)
3119#define GS_DP_A(f) if (lp->f) fprintf(fd," %-40s : %llu \n",#f, (unsigned long long)lp->f)
3120            GS_DP_A4(opackets);
3121            GS_DP_A4(obytes);
3122            GS_DP_A4(ipackets);
3123            GS_DP_A4(ibytes);
3124            GS_DP_A(ierrors);
3125            GS_DP_A(oerrors);
3126            fprintf (fd," Tx : %s  \n",double_to_human_str((double)lp->m_total_tx_bps,"bps",KBYE_1000).c_str());
3127        }
3128    }else{
3129        fprintf(fd," %10s ","ports");
3130        for (i=0; i<(int)port_to_show; i++) {
3131            CPerPortStats * lp=&m_port[i];
3132            if ( lp->m_link_up ) {
3133                fprintf(fd,"| %15d ",i);
3134            } else {
3135                std::string port_with_state = "(link DOWN) " + std::to_string(i);
3136                fprintf(fd,"| %15s ",port_with_state.c_str());
3137            }
3138        }
3139        fprintf(fd,"\n");
3140        fprintf(fd," -----------------------------------------------------------------------------------------\n");
3141        std::string names[]={"opackets","obytes","ipackets","ibytes","ierrors","oerrors","Tx Bw"
3142        };
3143        for (i=0; i<7; i++) {
3144            fprintf(fd," %10s ",names[i].c_str());
3145            int j=0;
3146            for (j=0; j<port_to_show;j++) {
3147                CPerPortStats * lp=&m_port[j];
3148                uint64_t cnt;
3149                switch (i) {
3150                case 0:
3151                    cnt=lp->opackets;
3152                    fprintf(fd,"| %15lu ",cnt);
3153
3154                    break;
3155                case 1:
3156                    cnt=lp->obytes;
3157                    fprintf(fd,"| %15lu ",cnt);
3158
3159                    break;
3160                case 2:
3161                    cnt=lp->ipackets;
3162                    fprintf(fd,"| %15lu ",cnt);
3163
3164                    break;
3165                case 3:
3166                    cnt=lp->ibytes;
3167                    fprintf(fd,"| %15lu ",cnt);
3168
3169                    break;
3170                case 4:
3171                    cnt=lp->ierrors;
3172                    fprintf(fd,"| %15lu ",cnt);
3173
3174                    break;
3175                case 5:
3176                    cnt=lp->oerrors;
3177                    fprintf(fd,"| %15lu ",cnt);
3178
3179                    break;
3180                case 6:
3181                    fprintf(fd,"| %15s ",double_to_human_str((double)lp->m_total_tx_bps,"bps",KBYE_1000).c_str());
3182                    break;
3183                default:
3184                    cnt=0xffffff;
3185                }
3186            } /* ports */
3187            fprintf(fd, "\n");
3188        }/* fields*/
3189    }
3190
3191
3192}
3193
3194class CGlobalTRex  {
3195
3196public:
3197
3198    /**
3199     * different types of shutdown causes
3200     */
3201    typedef enum {
3202        SHUTDOWN_NONE,
3203        SHUTDOWN_TEST_ENDED,
3204        SHUTDOWN_CTRL_C,
3205        SHUTDOWN_SIGINT,
3206        SHUTDOWN_SIGTERM,
3207        SHUTDOWN_RPC_REQ
3208    } shutdown_rc_e;
3209
3210    CGlobalTRex (){
3211        m_max_ports=4;
3212        m_max_cores=1;
3213        m_cores_to_dual_ports=0;
3214        m_max_queues_per_port=0;
3215        m_fl_was_init=false;
3216        m_expected_pps=0.0;
3217        m_expected_cps=0.0;
3218        m_expected_bps=0.0;
3219        m_trex_stateless = NULL;
3220        m_mark_for_shutdown = SHUTDOWN_NONE;
3221    }
3222
3223    bool Create();
3224    void Delete();
3225    int  ixgbe_prob_init();
3226    int  cores_prob_init();
3227    int  queues_prob_init();
3228    int  ixgbe_start();
3229    int  ixgbe_rx_queue_flush();
3230    void rx_stf_conf();
3231    void rx_sl_configure();
3232    bool is_all_links_are_up(bool dump=false);
3233    void pre_test();
3234
3235    /**
3236     * mark for shutdown
3237     * on the next check - the control plane will
3238     * call shutdown()
3239     */
3240    void mark_for_shutdown(shutdown_rc_e rc) {
3241
3242        if (is_marked_for_shutdown()) {
3243            return;
3244        }
3245
3246        m_mark_for_shutdown = rc;
3247    }
3248
3249private:
3250    void register_signals();
3251
3252    /* try to stop all datapath cores and RX core */
3253    void try_stop_all_cores();
3254    /* send message to all dp cores */
3255    int  send_message_all_dp(TrexStatelessCpToDpMsgBase *msg);
3256    int  send_message_to_rx(TrexStatelessCpToRxMsgBase *msg);
3257    void check_for_dp_message_from_core(int thread_id);
3258
3259    bool is_marked_for_shutdown() const {
3260        return (m_mark_for_shutdown != SHUTDOWN_NONE);
3261    }
3262
3263    /**
3264     * shutdown sequence
3265     *
3266     */
3267    void shutdown();
3268
3269public:
3270    void check_for_dp_messages();
3271    int start_master_statefull();
3272    int start_master_stateless();
3273    int run_in_core(virtual_thread_id_t virt_core_id);
3274    int core_for_rx(){
3275        if ( (! get_is_rx_thread_enabled()) ) {
3276            return -1;
3277        }else{
3278            return m_max_cores - 1;
3279        }
3280    }
3281    int run_in_rx_core();
3282    int run_in_master();
3283
3284    void handle_fast_path();
3285    void handle_slow_path();
3286
3287    int stop_master();
3288    /* return the minimum number of dp cores needed to support the active ports
3289       this is for c==1 or  m_cores_mul==1
3290    */
3291    int get_base_num_cores(){
3292        return (m_max_ports>>1);
3293    }
3294
3295    int get_cores_tx(){
3296        /* 0 - master
3297           num_of_cores -
3298           last for latency */
3299        if ( (! get_is_rx_thread_enabled()) ) {
3300            return (m_max_cores - 1 );
3301        } else {
3302            return (m_max_cores - BP_MASTER_AND_LATENCY );
3303        }
3304    }
3305
3306private:
3307    bool is_all_cores_finished();
3308
3309public:
3310
3311    void publish_async_data(bool sync_now, bool baseline = false);
3312    void publish_async_barrier(uint32_t key);
3313    void publish_async_port_attr_changed(uint8_t port_id);
3314
3315    void dump_stats(FILE *fd,
3316                    CGlobalStats::DumpFormat format);
3317    void dump_template_info(std::string & json);
3318    bool sanity_check();
3319    void update_stats(void);
3320    tx_per_flow_t get_flow_tx_stats(uint8_t port, uint16_t hw_id);
3321    tx_per_flow_t clear_flow_tx_stats(uint8_t port, uint16_t index, bool is_lat);
3322    void get_stats(CGlobalStats & stats);
3323    float get_cpu_util_per_interface(uint8_t port_id);
3324    void dump_post_test_stats(FILE *fd);
3325    void dump_config(FILE *fd);
3326    void dump_links_status(FILE *fd);
3327
3328    bool lookup_port_by_mac(const uint8_t *mac, uint8_t &port_id);
3329
3330public:
3331    port_cfg_t  m_port_cfg;
3332    uint32_t    m_max_ports;    /* active number of ports supported options are  2,4,8,10,12  */
3333    uint32_t    m_max_cores;    /* current number of cores , include master and latency  ==> ( master)1+c*(m_max_ports>>1)+1( latency )  */
3334    uint32_t    m_cores_mul;    /* how cores multipler given  c=4 ==> m_cores_mul */
3335    uint32_t    m_max_queues_per_port; // Number of TX queues per port
3336    uint32_t    m_cores_to_dual_ports; /* number of TX cores allocated for each port pair */
3337    uint16_t    m_rx_core_tx_q_id; /* TX q used by rx core */
3338    // statistic
3339    CPPSMeasure  m_cps;
3340    float        m_expected_pps;
3341    float        m_expected_cps;
3342    float        m_expected_bps;//bps
3343    float        m_last_total_cps;
3344
3345    CPhyEthIF   m_ports[TREX_MAX_PORTS];
3346    CCoreEthIF          m_cores_vif_sf[BP_MAX_CORES]; /* counted from 1 , 2,3 core zero is reserved - stateful */
3347    CCoreEthIFStateless m_cores_vif_sl[BP_MAX_CORES]; /* counted from 1 , 2,3 core zero is reserved - stateless*/
3348    CCoreEthIF *        m_cores_vif[BP_MAX_CORES];
3349    CParserOption m_po ;
3350    CFlowGenList  m_fl;
3351    bool          m_fl_was_init;
3352    volatile uint8_t       m_signal[BP_MAX_CORES] __rte_cache_aligned ; // Signal to main core when DP thread finished
3353    volatile bool m_sl_rx_running; // Signal main core when RX thread finished
3354    CLatencyManager     m_mg; // statefull RX core
3355    CRxCoreStateless    m_rx_sl; // stateless RX core
3356    CTrexGlobalIoMode   m_io_modes;
3357    CTRexExtendedDriverBase * m_drv;
3358
3359private:
3360    CLatencyHWPort      m_latency_vports[TREX_MAX_PORTS];    /* read hardware driver */
3361    CLatencyVmPort      m_latency_vm_vports[TREX_MAX_PORTS]; /* vm driver */
3362    CLatencyPktInfo     m_latency_pkt;
3363    TrexPublisher       m_zmq_publisher;
3364    CGlobalStats        m_stats;
3365    uint32_t            m_stats_cnt;
3366    std::mutex          m_cp_lock;
3367
3368    TrexMonitor         m_monitor;
3369    shutdown_rc_e       m_mark_for_shutdown;
3370
3371public:
3372    TrexStateless       *m_trex_stateless;
3373
3374};
3375
3376// Before starting, send gratuitous ARP on our addresses, and try to resolve dst MAC addresses.
3377void CGlobalTRex::pre_test() {
3378    CTrexDpdkParams dpdk_p;
3379    get_ex_drv()->get_dpdk_drv_params(dpdk_p);
3380    CPretest pretest(m_max_ports, dpdk_p.rx_data_q_num + dpdk_p.rx_drop_q_num);
3381    bool resolve_needed = false;
3382    uint8_t empty_mac[ETHER_ADDR_LEN] = {0,0,0,0,0,0};
3383    bool need_grat_arp[TREX_MAX_PORTS];
3384
3385    if (CGlobalInfo::m_options.preview.get_is_client_cfg_enable()) {
3386        std::vector<ClientCfgCompactEntry *> conf;
3387        m_fl.get_client_cfg_ip_list(conf);
3388
3389        // If we got src MAC for port in global config, take it, otherwise use src MAC from DPDK
3390        uint8_t port_macs[m_max_ports][ETHER_ADDR_LEN];
3391        for (int port_id = 0; port_id < m_max_ports; port_id++) {
3392            memcpy(port_macs[port_id], CGlobalInfo::m_options.m_mac_addr[port_id].u.m_mac.src, ETHER_ADDR_LEN);
3393        }
3394
3395        for (std::vector<ClientCfgCompactEntry *>::iterator it = conf.begin(); it != conf.end(); it++) {
3396            uint8_t port = (*it)->get_port();
3397            uint16_t vlan = (*it)->get_vlan();
3398            uint32_t count = (*it)->get_count();
3399            uint32_t dst_ip = (*it)->get_dst_ip();
3400            uint32_t src_ip = (*it)->get_src_ip();
3401
3402            for (int i = 0; i < count; i++) {
3403                //??? handle ipv6;
3404                if ((*it)->is_ipv4()) {
3405                    pretest.add_next_hop(port, dst_ip + i, vlan);
3406                }
3407            }
3408            if (!src_ip) {
3409                src_ip = CGlobalInfo::m_options.m_ip_cfg[port].get_ip();
3410                if (!src_ip) {
3411                    fprintf(stderr, "No matching src ip for port: %d ip:%s vlan: %d\n"
3412                            , port, ip_to_str(dst_ip).c_str(), vlan);
3413                    fprintf(stderr, "You must specify src_ip in client config file or in TRex config file\n");
3414                    exit(1);
3415                }
3416            }
3417            pretest.add_ip(port, src_ip, vlan, port_macs[port]);
3418            COneIPv4Info ipv4(src_ip, vlan, port_macs[port], port);
3419            m_mg.add_grat_arp_src(ipv4);
3420
3421            delete *it;
3422        }
3423        if ( CGlobalInfo::m_options.preview.getVMode() > 1) {
3424            fprintf(stdout, "*******Pretest for client cfg********\n");
3425            pretest.dump(stdout);
3426            }
3427    } else {
3428        for (int port_id = 0; port_id < m_max_ports; port_id++) {
3429            if (! memcmp( CGlobalInfo::m_options.m_mac_addr[port_id].u.m_mac.dest, empty_mac, ETHER_ADDR_LEN)) {
3430                resolve_needed = true;
3431            } else {
3432                resolve_needed = false;
3433            }
3434
3435            need_grat_arp[port_id] = CGlobalInfo::m_options.m_ip_cfg[port_id].get_ip() != 0;
3436
3437            pretest.add_ip(port_id, CGlobalInfo::m_options.m_ip_cfg[port_id].get_ip()
3438                           , CGlobalInfo::m_options.m_ip_cfg[port_id].get_vlan()
3439                           , CGlobalInfo::m_options.m_mac_addr[port_id].u.m_mac.src);
3440
3441            if (resolve_needed) {
3442                pretest.add_next_hop(port_id, CGlobalInfo::m_options.m_ip_cfg[port_id].get_def_gw()
3443                                     , CGlobalInfo::m_options.m_ip_cfg[port_id].get_vlan());
3444            }
3445        }
3446    }
3447
3448    for (int port_id = 0; port_id < m_max_ports; port_id++) {
3449        CPhyEthIF *pif = &m_ports[port_id];
3450        // Configure port to send all packets to software
3451        pif->set_port_rcv_all(true);
3452    }
3453
3454    pretest.send_grat_arp_all();
3455    bool ret;
3456    int count = 0;
3457    bool resolve_failed = false;
3458    do {
3459        ret = pretest.resolve_all();
3460        count++;
3461    } while ((ret != true) && (count < 10));
3462    if (ret != true) {
3463        resolve_failed = true;
3464    }
3465
3466    if ( CGlobalInfo::m_options.preview.getVMode() > 1) {
3467        fprintf(stdout, "*******Pretest after resolving ********\n");
3468        pretest.dump(stdout);
3469    }
3470
3471    if (CGlobalInfo::m_options.preview.get_is_client_cfg_enable()) {
3472        CManyIPInfo pretest_result;
3473        pretest.get_results(pretest_result);
3474        if (resolve_failed) {
3475            fprintf(stderr, "Resolution of following IPs failed. Exiting.\n");
3476            for (const COneIPInfo *ip=pretest_result.get_next(); ip != NULL;
3477                   ip = pretest_result.get_next()) {
3478                if (ip->resolve_needed()) {
3479                    ip->dump(stderr, "  ");
3480                }
3481            }
3482            exit(1);
3483        }
3484        m_fl.set_client_config_resolved_macs(pretest_result);
3485        if ( CGlobalInfo::m_options.preview.getVMode() > 1) {
3486            m_fl.dump_client_config(stdout);
3487        }
3488
3489        bool port_found[TREX_MAX_PORTS];
3490        for (int port_id = 0; port_id < m_max_ports; port_id++) {
3491            port_found[port_id] = false;
3492        }
3493        // If client config enabled, we don't resolve MACs from trex_cfg.yaml. For latency (-l)
3494        // We need to able to send packets from RX core, so need to configure MAC/vlan for each port.
3495        for (const COneIPInfo *ip=pretest_result.get_next(); ip != NULL; ip = pretest_result.get_next()) {
3496            // Use first MAC/vlan we see on each port
3497            uint8_t port_id = ip->get_port();
3498            uint16_t vlan = ip->get_vlan();
3499            if ( ! port_found[port_id]) {
3500                port_found[port_id] = true;
3501                ip->get_mac(CGlobalInfo::m_options.m_mac_addr[port_id].u.m_mac.dest);
3502                CGlobalInfo::m_options.m_ip_cfg[port_id].set_vlan(vlan);
3503            }
3504        }
3505    } else {
3506        uint8_t mac[ETHER_ADDR_LEN];
3507        for (int port_id = 0; port_id < m_max_ports; port_id++) {
3508            if (! memcmp(CGlobalInfo::m_options.m_mac_addr[port_id].u.m_mac.dest, empty_mac, ETHER_ADDR_LEN)) {
3509                // we don't have dest MAC. Get it from what we resolved.
3510                uint32_t ip = CGlobalInfo::m_options.m_ip_cfg[port_id].get_def_gw();
3511                uint16_t vlan = CGlobalInfo::m_options.m_ip_cfg[port_id].get_vlan();
3512
3513                if (!pretest.get_mac(port_id, ip, vlan, mac)) {
3514                    fprintf(stderr, "Failed resolving dest MAC for default gateway:%d.%d.%d.%d on port %d\n"
3515                            , (ip >> 24) & 0xFF, (ip >> 16) & 0xFF, (ip >> 8) & 0xFF, ip & 0xFF, port_id);
3516
3517                    if (get_is_stateless()) {
3518                        continue;
3519                    } else {
3520                        exit(1);
3521                    }
3522                }
3523
3524
3525
3526                memcpy(CGlobalInfo::m_options.m_mac_addr[port_id].u.m_mac.dest, mac, ETHER_ADDR_LEN);
3527                // if port is connected in loopback, no need to send gratuitous ARP. It will only confuse our ingress counters.
3528                if (need_grat_arp[port_id] && (! pretest.is_loopback(port_id))) {
3529                    COneIPv4Info ipv4(CGlobalInfo::m_options.m_ip_cfg[port_id].get_ip()
3530                                      , CGlobalInfo::m_options.m_ip_cfg[port_id].get_vlan()
3531                                      , CGlobalInfo::m_options.m_mac_addr[port_id].u.m_mac.src
3532                                      , port_id);
3533                    m_mg.add_grat_arp_src(ipv4);
3534                }
3535            }
3536
3537            // update statistics baseline, so we can ignore what happened in pre test phase
3538            CPhyEthIF *pif = &m_ports[port_id];
3539            CPreTestStats pre_stats = pretest.get_stats(port_id);
3540            pif->set_ignore_stats_base(pre_stats);
3541            // Configure port back to normal mode. Only relevant packets handled by software.
3542            pif->set_port_rcv_all(false);
3543        }
3544    }
3545
3546    /* for stateless only - set port mode */
3547    if (get_is_stateless()) {
3548        for (int port_id = 0; port_id < m_max_ports; port_id++) {
3549            uint32_t src_ipv4 = CGlobalInfo::m_options.m_ip_cfg[port_id].get_ip();
3550            uint32_t dg = CGlobalInfo::m_options.m_ip_cfg[port_id].get_def_gw();
3551            const uint8_t *dst_mac = CGlobalInfo::m_options.m_mac_addr[port_id].u.m_mac.dest;
3552
3553            /* L3 mode */
3554            if (src_ipv4 && dg) {
3555                if (memcmp(dst_mac, empty_mac, 6) == 0) {
3556                    m_trex_stateless->get_port_by_id(port_id)->set_l3_mode(src_ipv4, dg);
3557                } else {
3558                    m_trex_stateless->get_port_by_id(port_id)->set_l3_mode(src_ipv4, dg, dst_mac);
3559                }
3560
3561            /* L2 mode */
3562            } else if (CGlobalInfo::m_options.m_mac_addr[port_id].u.m_mac.is_set) {
3563                m_trex_stateless->get_port_by_id(port_id)->set_l2_mode(dst_mac);
3564            }
3565        }
3566    }
3567
3568
3569}
3570
3571/**
3572 * check for a single core
3573 *
3574 * @author imarom (19-Nov-15)
3575 *
3576 * @param thread_id
3577 */
3578void
3579CGlobalTRex::check_for_dp_message_from_core(int thread_id) {
3580
3581    CNodeRing *ring = CMsgIns::Ins()->getCpDp()->getRingDpToCp(thread_id);
3582
3583    /* fast path check */
3584    if ( likely ( ring->isEmpty() ) ) {
3585        return;
3586    }
3587
3588    while ( true ) {
3589        CGenNode * node = NULL;
3590        if (ring->Dequeue(node) != 0) {
3591            break;
3592        }
3593        assert(node);
3594
3595        TrexStatelessDpToCpMsgBase * msg = (TrexStatelessDpToCpMsgBase *)node;
3596        msg->handle();
3597        delete msg;
3598    }
3599
3600}
3601
3602/**
3603 * check for messages that arrived from DP to CP
3604 *
3605 */
3606void
3607CGlobalTRex::check_for_dp_messages() {
3608
3609    /* for all the cores - check for a new message */
3610    for (int i = 0; i < get_cores_tx(); i++) {
3611        check_for_dp_message_from_core(i);
3612    }
3613}
3614
3615bool CGlobalTRex::is_all_links_are_up(bool dump){
3616    bool all_link_are=true;
3617    int i;
3618    for (i=0; i<m_max_ports; i++) {
3619        CPhyEthIF * _if=&m_ports[i];
3620        _if->get_port_attr()->update_link_status();
3621        if ( dump ){
3622            _if->dump_stats(stdout);
3623        }
3624        if ( _if->get_port_attr()->is_link_up() == false){
3625            all_link_are=false;
3626            break;
3627        }
3628    }
3629    return (all_link_are);
3630}
3631
3632void CGlobalTRex::try_stop_all_cores(){
3633
3634    TrexStatelessDpQuit * dp_msg= new TrexStatelessDpQuit();
3635    send_message_all_dp(dp_msg);
3636    delete dp_msg;
3637
3638    if (get_is_stateless()) {
3639        TrexStatelessRxQuit * rx_msg= new TrexStatelessRxQuit();
3640        send_message_to_rx(rx_msg);
3641    }
3642
3643    // no need to delete rx_msg. Deleted by receiver
3644    bool all_core_finished = false;
3645    int i;
3646    for (i=0; i<20; i++) {
3647        if ( is_all_cores_finished() ){
3648            all_core_finished =true;
3649            break;
3650        }
3651        delay(100);
3652    }
3653    if ( all_core_finished ){
3654        m_zmq_publisher.publish_event(TrexPublisher::EVENT_SERVER_STOPPED);
3655        printf(" All cores stopped !! \n");
3656    }else{
3657        printf(" ERROR one of the DP core is stucked !\n");
3658    }
3659}
3660
3661
3662int  CGlobalTRex::send_message_all_dp(TrexStatelessCpToDpMsgBase *msg){
3663
3664    int max_threads=(int)CMsgIns::Ins()->getCpDp()->get_num_threads();
3665    int i;
3666
3667    for (i=0; i<max_threads; i++) {
3668        CNodeRing *ring = CMsgIns::Ins()->getCpDp()->getRingCpToDp((uint8_t)i);
3669        ring->Enqueue((CGenNode*)msg->clone());
3670    }
3671    return (0);
3672}
3673
3674int CGlobalTRex::send_message_to_rx(TrexStatelessCpToRxMsgBase *msg) {
3675    CNodeRing *ring = CMsgIns::Ins()->getCpRx()->getRingCpToDp(0);
3676    ring->Enqueue((CGenNode *) msg);
3677
3678    return (0);
3679}
3680
3681
3682int  CGlobalTRex::ixgbe_rx_queue_flush(){
3683    int i;
3684    for (i=0; i<m_max_ports; i++) {
3685        CPhyEthIF * _if=&m_ports[i];
3686        _if->flush_rx_queue();
3687    }
3688    return (0);
3689}
3690
3691
3692// init stateful rx core
3693void CGlobalTRex::rx_stf_conf(void) {
3694    int i;
3695    CLatencyManagerCfg mg_cfg;
3696    mg_cfg.m_max_ports = m_max_ports;
3697
3698    uint32_t latency_rate=CGlobalInfo::m_options.m_latency_rate;
3699
3700    if ( latency_rate ) {
3701        mg_cfg.m_cps = (double)latency_rate ;
3702    } else {
3703        // If RX core needed, we need something to make the scheduler running.
3704        // If nothing configured, send 1 CPS latency measurement packets.
3705        if (CGlobalInfo::m_options.m_arp_ref_per == 0) {
3706            mg_cfg.m_cps = 1.0;
3707        } else {
3708            mg_cfg.m_cps = 0;
3709        }
3710    }
3711
3712    if (CGlobalInfo::get_queues_mode() == CGlobalInfo::Q_MODE_ONE_QUEUE) {
3713        /* vm mode, indirect queues  */
3714        for (i=0; i<m_max_ports; i++) {
3715            CPhyEthIF * _if = &m_ports[i];
3716            CMessagingManager * rx_dp=CMsgIns::Ins()->getRxDp();
3717
3718            uint8_t thread_id = (i>>1);
3719
3720            CNodeRing * r = rx_dp->getRingCpToDp(thread_id);
3721            m_latency_vm_vports[i].Create((uint8_t)i, r, &m_mg, _if);
3722
3723            mg_cfg.m_ports[i] =&m_latency_vm_vports[i];
3724        }
3725
3726    }else{
3727        for (i=0; i<m_max_ports; i++) {
3728            CPhyEthIF * _if=&m_ports[i];
3729            _if->dump_stats(stdout);
3730            m_latency_vports[i].Create(_if, m_rx_core_tx_q_id, 1);
3731
3732            mg_cfg.m_ports[i] =&m_latency_vports[i];
3733        }
3734    }
3735
3736
3737    m_mg.Create(&mg_cfg);
3738    m_mg.set_mask(CGlobalInfo::m_options.m_latency_mask);
3739}
3740
3741// init m_rx_sl object for stateless rx core
3742void CGlobalTRex::rx_sl_configure(void) {
3743    CRxSlCfg rx_sl_cfg;
3744    int i;
3745
3746    rx_sl_cfg.m_max_ports = m_max_ports;
3747    rx_sl_cfg.m_tx_cores  = get_cores_tx();
3748
3749    if (CGlobalInfo::get_queues_mode() == CGlobalInfo::Q_MODE_ONE_QUEUE) {
3750        /* vm mode, indirect queues  */
3751        for (i=0; i < m_max_ports; i++) {
3752            CPhyEthIF * _if = &m_ports[i];
3753            CMessagingManager * rx_dp = CMsgIns::Ins()->getRxDp();
3754            uint8_t thread_id = (i >> 1);
3755            CNodeRing * r = rx_dp->getRingCpToDp(thread_id);
3756            m_latency_vm_vports[i].Create(i, r, &m_mg, _if);
3757            rx_sl_cfg.m_ports[i] = &m_latency_vm_vports[i];
3758        }
3759    } else {
3760        for (i = 0; i < m_max_ports; i++) {
3761            CPhyEthIF * _if = &m_ports[i];
3762            m_latency_vports[i].Create(_if, m_rx_core_tx_q_id, 1);
3763            rx_sl_cfg.m_ports[i] = &m_latency_vports[i];
3764        }
3765    }
3766
3767    m_rx_sl.create(rx_sl_cfg);
3768}
3769
3770int  CGlobalTRex::ixgbe_start(void){
3771    int i;
3772    for (i=0; i<m_max_ports; i++) {
3773        socket_id_t socket_id = CGlobalInfo::m_socket.port_to_socket((port_id_t)i);
3774        assert(CGlobalInfo::m_mem_pool[socket_id].m_mbuf_pool_2048);
3775        CPhyEthIF * _if=&m_ports[i];
3776        _if->Create((uint8_t)i);
3777        _if->conf_queues();
3778        _if->stats_clear();
3779        _if->start();
3780        _if->configure_rx_duplicate_rules();
3781
3782        if ( ! CGlobalInfo::m_options.preview.get_is_disable_flow_control_setting()
3783             && _if->get_port_attr()->is_fc_change_supported()) {
3784            _if->disable_flow_control();
3785        }
3786
3787        _if->get_port_attr()->add_mac((char *)CGlobalInfo::m_options.get_src_mac_addr(i));
3788
3789        fflush(stdout);
3790    }
3791
3792    if ( !is_all_links_are_up()  ){
3793        /* wait for ports to be stable */
3794        get_ex_drv()->wait_for_stable_link();
3795
3796        if ( !is_all_links_are_up() /*&& !get_is_stateless()*/ ){ // disable start with link down for now
3797
3798            /* temporary solution for trex-192 issue, solve the case for X710/XL710, will work for both Statless and Stateful */
3799            if (  get_ex_drv()->drop_packets_incase_of_linkdown() ){
3800                printf(" WARNING : there is no link on one of the ports, driver support auto drop in case of link down - continue\n");
3801            }else{
3802                dump_links_status(stdout);
3803                rte_exit(EXIT_FAILURE, " One of the links is down \n");
3804            }
3805        }
3806    } else {
3807        get_ex_drv()->wait_after_link_up();
3808    }
3809
3810    dump_links_status(stdout);
3811
3812    ixgbe_rx_queue_flush();
3813
3814    if (! get_is_stateless()) {
3815        rx_stf_conf();
3816    }
3817
3818
3819    /* core 0 - control
3820       core 1 - port 0-0,1-0,
3821       core 2 - port 2-0,3-0,
3822       core 3 - port 0-1,1-1,
3823       core 4 - port 2-1,3-1,
3824
3825    */
3826    int port_offset=0;
3827    uint8_t lat_q_id;
3828
3829    if (CGlobalInfo::get_queues_mode() == CGlobalInfo::Q_MODE_ONE_QUEUE) {
3830        lat_q_id = 0;
3831    } else {
3832        lat_q_id = get_cores_tx() / get_base_num_cores() + 1;
3833    }
3834    for (i=0; i<get_cores_tx(); i++) {
3835        int j=(i+1);
3836        int queue_id=((j-1)/get_base_num_cores() );   /* for the first min core queue 0 , then queue 1 etc */
3837        if ( get_is_stateless() ){
3838            m_cores_vif[j]=&m_cores_vif_sl[j];
3839        }else{
3840            m_cores_vif[j]=&m_cores_vif_sf[j];
3841        }
3842        m_cores_vif[j]->Create(j,
3843                               queue_id,
3844                               &m_ports[port_offset], /* 0,2*/
3845                               queue_id,
3846                               &m_ports[port_offset+1], /*1,3*/
3847                               lat_q_id);
3848        port_offset+=2;
3849        if (port_offset == m_max_ports) {
3850            port_offset = 0;
3851            // We want to allow sending latency packets only from first core handling a port
3852            lat_q_id = CCoreEthIF::INVALID_Q_ID;
3853        }
3854    }
3855
3856    fprintf(stdout," -------------------------------\n");
3857    fprintf(stdout, "RX core uses TX queue number %d on all ports\n", m_rx_core_tx_q_id);
3858    CCoreEthIF::DumpIfCfgHeader(stdout);
3859    for (i=0; i<get_cores_tx(); i++) {
3860        m_cores_vif[i+1]->DumpIfCfg(stdout);
3861    }
3862    fprintf(stdout," -------------------------------\n");
3863
3864    return (0);
3865}
3866
3867static void trex_termination_handler(int signum);
3868
3869void CGlobalTRex::register_signals() {
3870    struct sigaction action;
3871
3872    /* handler */
3873    action.sa_handler = trex_termination_handler;
3874
3875    /* blocked signals during handling */
3876    sigemptyset(&action.sa_mask);
3877    sigaddset(&action.sa_mask, SIGINT);
3878    sigaddset(&action.sa_mask, SIGTERM);
3879
3880    /* no flags */
3881    action.sa_flags = 0;
3882
3883    /* register */
3884    sigaction(SIGINT,  &action, NULL);
3885    sigaction(SIGTERM, &action, NULL);
3886}
3887
3888bool CGlobalTRex::Create(){
3889    CFlowsYamlInfo     pre_yaml_info;
3890
3891    register_signals();
3892
3893    m_stats_cnt =0;
3894    if (!get_is_stateless()) {
3895        pre_yaml_info.load_from_yaml_file(CGlobalInfo::m_options.cfg_file);
3896        if ( CGlobalInfo::m_options.preview.getVMode() > 0){
3897            CGlobalInfo::m_options.dump(stdout);
3898            CGlobalInfo::m_memory_cfg.Dump(stdout);
3899        }
3900    }
3901
3902    if ( !m_zmq_publisher.Create( CGlobalInfo::m_options.m_zmq_port,
3903                                  !CGlobalInfo::m_options.preview.get_zmq_publish_enable() ) ){
3904        return (false);
3905    }
3906
3907    if ( pre_yaml_info.m_vlan_info.m_enable ){
3908        CGlobalInfo::m_options.preview.set_vlan_mode_verify(CPreviewMode::VLAN_MODE_LOAD_BALANCE);
3909    }
3910    /* End update pre flags */
3911
3912    ixgbe_prob_init();
3913    cores_prob_init();
3914    queues_prob_init();
3915
3916    /* allocate rings */
3917    assert( CMsgIns::Ins()->Create(get_cores_tx()) );
3918
3919    if ( sizeof(CGenNodeNatInfo) != sizeof(CGenNode)  ) {
3920        printf("ERROR sizeof(CGenNodeNatInfo) %lu != sizeof(CGenNode) %lu must be the same size \n",sizeof(CGenNodeNatInfo),sizeof(CGenNode));
3921        assert(0);
3922    }
3923
3924    if ( sizeof(CGenNodeLatencyPktInfo) != sizeof(CGenNode)  ) {
3925        printf("ERROR sizeof(CGenNodeLatencyPktInfo) %lu != sizeof(CGenNode) %lu must be the same size \n",sizeof(CGenNodeLatencyPktInfo),sizeof(CGenNode));
3926        assert(0);
3927    }
3928
3929    /* allocate the memory */
3930    CTrexDpdkParams dpdk_p;
3931    get_ex_drv()->get_dpdk_drv_params(dpdk_p);
3932
3933    CGlobalInfo::init_pools(m_max_ports *
3934                            (dpdk_p.rx_data_q_num * dpdk_p.rx_desc_num_data_q +
3935                             dpdk_p.rx_drop_q_num * dpdk_p.rx_desc_num_drop_q));
3936    ixgbe_start();
3937    dump_config(stdout);
3938
3939    /* start stateless */
3940    if (get_is_stateless()) {
3941
3942        TrexStatelessCfg cfg;
3943
3944        TrexRpcServerConfig rpc_req_resp_cfg(TrexRpcServerConfig::RPC_PROT_TCP,
3945                                             global_platform_cfg_info.m_zmq_rpc_port,
3946                                             &m_cp_lock);
3947
3948        cfg.m_port_count         = CGlobalInfo::m_options.m_expected_portd;
3949        cfg.m_rpc_req_resp_cfg   = &rpc_req_resp_cfg;
3950        cfg.m_rpc_server_verbose = false;
3951        cfg.m_platform_api       = new TrexDpdkPlatformApi();
3952        cfg.m_publisher          = &m_zmq_publisher;
3953
3954        m_trex_stateless = new TrexStateless(cfg);
3955
3956        rx_sl_configure();
3957    }
3958
3959    return (true);
3960
3961}
3962void CGlobalTRex::Delete(){
3963
3964    m_zmq_publisher.Delete();
3965
3966    if (m_trex_stateless) {
3967        delete m_trex_stateless;
3968        m_trex_stateless = NULL;
3969    }
3970
3971    m_fl.Delete();
3972
3973}
3974
3975
3976
3977int  CGlobalTRex::ixgbe_prob_init(void){
3978
3979    m_max_ports  = rte_eth_dev_count();
3980    if (m_max_ports == 0)
3981        rte_exit(EXIT_FAILURE, "Error: Could not find supported ethernet ports. You are probably trying to use unsupported NIC \n");
3982
3983    printf(" Number of ports found: %d \n",m_max_ports);
3984
3985    if ( m_max_ports %2 !=0 ) {
3986        rte_exit(EXIT_FAILURE, " Number of ports in config file is %d. It should be even. Please use --limit-ports, or change 'port_limit:' in the config file\n",
3987                 m_max_ports);
3988    }
3989
3990    if ( CGlobalInfo::m_options.get_expected_ports() > TREX_MAX_PORTS ) {
3991        rte_exit(EXIT_FAILURE, " Maximum number of ports supported is %d. You are trying to use %d. Please use --limit-ports, or change 'port_limit:' in the config file\n"
3992                 ,TREX_MAX_PORTS, CGlobalInfo::m_options.get_expected_ports());
3993    }
3994
3995    if ( CGlobalInfo::m_options.get_expected_ports() > m_max_ports ){
3996        rte_exit(EXIT_FAILURE, " There are %d ports available. You are trying to use %d. Please use --limit-ports, or change 'port_limit:' in the config file\n",
3997                 m_max_ports,
3998                 CGlobalInfo::m_options.get_expected_ports());
3999    }
4000    if (CGlobalInfo::m_options.get_expected_ports() < m_max_ports ) {
4001        /* limit the number of ports */
4002        m_max_ports=CGlobalInfo::m_options.get_expected_ports();
4003    }
4004    assert(m_max_ports <= TREX_MAX_PORTS);
4005
4006    struct rte_eth_dev_info dev_info;
4007    rte_eth_dev_info_get((uint8_t) 0,&dev_info);
4008
4009    if ( CGlobalInfo::m_options.preview.getVMode() > 0){
4010        printf("\n\n");
4011        printf("if_index : %d \n",dev_info.if_index);
4012        printf("driver name : %s \n",dev_info.driver_name);
4013        printf("min_rx_bufsize : %d \n",dev_info.min_rx_bufsize);
4014        printf("max_rx_pktlen  : %d \n",dev_info.max_rx_pktlen);
4015        printf("max_rx_queues  : %d \n",dev_info.max_rx_queues);
4016        printf("max_tx_queues  : %d \n",dev_info.max_tx_queues);
4017        printf("max_mac_addrs  : %d \n",dev_info.max_mac_addrs);
4018
4019        printf("rx_offload_capa : 0x%x \n",dev_info.rx_offload_capa);
4020        printf("tx_offload_capa : 0x%x \n",dev_info.tx_offload_capa);
4021        printf("rss reta_size   : %d \n",dev_info.reta_size);
4022        printf("flow_type_rss   : 0x%lx \n",dev_info.flow_type_rss_offloads);
4023    }
4024
4025    int i;
4026    struct rte_eth_dev_info dev_info1;
4027
4028    for (i=1; i<m_max_ports; i++) {
4029        rte_eth_dev_info_get((uint8_t) i,&dev_info1);
4030        if ( strcmp(dev_info1.driver_name,dev_info.driver_name)!=0) {
4031            printf(" ERROR all device should have the same type  %s != %s \n",dev_info1.driver_name,dev_info.driver_name);
4032            exit(1);
4033        }
4034    }
4035
4036    m_drv = CTRexExtendedDriverDb::Ins()->get_drv();
4037
4038    // check if firmware version is new enough
4039    for (i = 0; i < m_max_ports; i++) {
4040        if (m_drv->verify_fw_ver(i) < 0) {
4041            // error message printed by verify_fw_ver
4042            exit(1);
4043        }
4044    }
4045
4046    m_port_cfg.update_var();
4047
4048    if ( get_is_rx_filter_enable() ){
4049        m_port_cfg.update_global_config_fdir();
4050    }
4051
4052    if (CGlobalInfo::get_queues_mode() == CGlobalInfo::Q_MODE_ONE_QUEUE) {
4053        /* verify that we have only one thread/core per dual- interface */
4054        if ( CGlobalInfo::m_options.preview.getCores()>1 ) {
4055            printf("Error: the number of cores should be 1 when the driver support only one tx queue and one rx queue. Please use -c 1 \n");
4056            exit(1);
4057        }
4058    }
4059    return (0);
4060}
4061
4062int  CGlobalTRex::cores_prob_init(){
4063    m_max_cores = rte_lcore_count();
4064    assert(m_max_cores>0);
4065    return (0);
4066}
4067
4068int  CGlobalTRex::queues_prob_init(){
4069
4070    if (m_max_cores < 2) {
4071        rte_exit(EXIT_FAILURE, "number of cores should be at least 2 \n");
4072    }
4073
4074    assert((m_max_ports>>1) <= get_cores_tx() );
4075
4076    m_cores_mul = CGlobalInfo::m_options.preview.getCores();
4077
4078    m_cores_to_dual_ports  = m_cores_mul;
4079
4080    /* core 0 - control
4081       -core 1 - port 0/1
4082       -core 2 - port 2/3
4083       -core 3 - port 0/1
4084       -core 4 - port 2/3
4085
4086       m_cores_to_dual_ports = 2;
4087    */
4088
4089    // One q for each core allowed to send on this port + 1 for latency q (Used in stateless) + 1 for RX core.
4090    m_max_queues_per_port  = m_cores_to_dual_ports + 2;
4091
4092    if (m_max_queues_per_port > BP_MAX_TX_QUEUE) {
4093        rte_exit(EXIT_FAILURE,
4094                 "Error: Number of TX queues exceeds %d. Try running with lower -c <val> \n",BP_MAX_TX_QUEUE);
4095    }
4096
4097    assert(m_max_queues_per_port>0);
4098    return (0);
4099}
4100
4101
4102void CGlobalTRex::dump_config(FILE *fd){
4103    fprintf(fd," number of ports         : %u \n",m_max_ports);
4104    fprintf(fd," max cores for 2 ports   : %u \n",m_cores_to_dual_ports);
4105    fprintf(fd," max queue per port      : %u \n",m_max_queues_per_port);
4106}
4107
4108
4109void CGlobalTRex::dump_links_status(FILE *fd){
4110    for (int i=0; i<m_max_ports; i++) {
4111        m_ports[i].get_port_attr()->update_link_status_nowait();
4112        m_ports[i].get_port_attr()->dump_link(fd);
4113    }
4114}
4115
4116bool CGlobalTRex::lookup_port_by_mac(const uint8_t *mac, uint8_t &port_id) {
4117    for (int i = 0; i < m_max_ports; i++) {
4118        if (memcmp(m_ports[i].get_port_attr()->get_layer_cfg().get_ether().get_src(), mac, 6) == 0) {
4119            port_id = i;
4120            return true;
4121        }
4122    }
4123
4124    return false;
4125}
4126
4127void CGlobalTRex::dump_post_test_stats(FILE *fd){
4128    uint64_t pkt_out=0;
4129    uint64_t pkt_out_bytes=0;
4130    uint64_t pkt_in_bytes=0;
4131    uint64_t pkt_in=0;
4132    uint64_t sw_pkt_out=0;
4133    uint64_t sw_pkt_out_err=0;
4134    uint64_t sw_pkt_out_bytes=0;
4135    uint64_t tx_arp = 0;
4136    uint64_t rx_arp = 0;
4137
4138    int i;
4139    for (i=0; i<get_cores_tx(); i++) {
4140        CCoreEthIF * erf_vif = m_cores_vif[i+1];
4141        CVirtualIFPerSideStats stats;
4142        erf_vif->GetCoreCounters(&stats);
4143        sw_pkt_out     += stats.m_tx_pkt;
4144        sw_pkt_out_err += stats.m_tx_drop +stats.m_tx_queue_full +stats.m_tx_alloc_error ;
4145        sw_pkt_out_bytes +=stats.m_tx_bytes;
4146    }
4147
4148
4149    for (i=0; i<m_max_ports; i++) {
4150        CPhyEthIF * _if=&m_ports[i];
4151        pkt_in  +=_if->get_stats().ipackets;
4152        pkt_in_bytes +=_if->get_stats().ibytes;
4153        pkt_out +=_if->get_stats().opackets;
4154        pkt_out_bytes +=_if->get_stats().obytes;
4155        tx_arp += _if->get_ignore_stats().get_tx_arp();
4156        rx_arp += _if->get_ignore_stats().get_rx_arp();
4157    }
4158    if ( CGlobalInfo::m_options.is_latency_enabled() ){
4159        sw_pkt_out += m_mg.get_total_pkt();
4160        sw_pkt_out_bytes +=m_mg.get_total_bytes();
4161    }
4162
4163
4164    fprintf (fd," summary stats \n");
4165    fprintf (fd," -------------- \n");
4166
4167    if (pkt_in > pkt_out)
4168        {
4169            fprintf (fd, " Total-pkt-drop       : 0 pkts \n");
4170            if (pkt_in > pkt_out * 1.01)
4171                fprintf (fd, " Warning : number of rx packets exceeds 101%% of tx packets!\n");
4172        }
4173    else
4174        fprintf (fd, " Total-pkt-drop       : %llu pkts \n", (unsigned long long) (pkt_out - pkt_in));
4175    for (i=0; i<m_max_ports; i++) {
4176        if ( m_stats.m_port[i].m_link_was_down ) {
4177            fprintf (fd, " WARNING: Link was down at port %d during test (at least for some time)!\n", i);
4178        }
4179    }
4180    fprintf (fd," Total-tx-bytes       : %llu bytes \n", (unsigned long long)pkt_out_bytes);
4181    fprintf (fd," Total-tx-sw-bytes    : %llu bytes \n", (unsigned long long)sw_pkt_out_bytes);
4182    fprintf (fd," Total-rx-bytes       : %llu byte \n", (unsigned long long)pkt_in_bytes);
4183
4184    fprintf (fd," \n");
4185
4186    fprintf (fd," Total-tx-pkt         : %llu pkts \n", (unsigned long long)pkt_out);
4187    fprintf (fd," Total-rx-pkt         : %llu pkts \n", (unsigned long long)pkt_in);
4188    fprintf (fd," Total-sw-tx-pkt      : %llu pkts \n", (unsigned long long)sw_pkt_out);
4189    fprintf (fd," Total-sw-err         : %llu pkts \n", (unsigned long long)sw_pkt_out_err);
4190    fprintf (fd," Total ARP sent       : %llu pkts \n", (unsigned long long)tx_arp);
4191    fprintf (fd," Total ARP received   : %llu pkts \n", (unsigned long long)rx_arp);
4192
4193
4194    if ( CGlobalInfo::m_options.is_latency_enabled() ){
4195        fprintf (fd," maximum-latency   : %.0f usec \n",m_mg.get_max_latency());
4196        fprintf (fd," average-latency   : %.0f usec \n",m_mg.get_avr_latency());
4197        fprintf (fd," latency-any-error : %s  \n",m_mg.is_any_error()?"ERROR":"OK");
4198    }
4199
4200
4201}
4202
4203
4204void CGlobalTRex::update_stats(){
4205
4206    int i;
4207    for (i=0; i<m_max_ports; i++) {
4208        CPhyEthIF * _if=&m_ports[i];
4209        _if->update_counters();
4210    }
4211    uint64_t total_open_flows=0;
4212
4213
4214    CFlowGenListPerThread   * lpt;
4215    for (i=0; i<get_cores_tx(); i++) {
4216        lpt = m_fl.m_threads_info[i];
4217        total_open_flows +=   lpt->m_stats.m_total_open_flows ;
4218    }
4219    m_last_total_cps = m_cps.add(total_open_flows);
4220
4221}
4222
4223tx_per_flow_t CGlobalTRex::get_flow_tx_stats(uint8_t port, uint16_t index) {
4224    return m_stats.m_port[port].m_tx_per_flow[index] - m_stats.m_port[port].m_prev_tx_per_flow[index];
4225}
4226
4227// read stats. Return read value, and clear.
4228tx_per_flow_t CGlobalTRex::clear_flow_tx_stats(uint8_t port, uint16_t index, bool is_lat) {
4229    uint8_t port0;
4230    CFlowGenListPerThread * lpt;
4231    tx_per_flow_t ret;
4232
4233    m_stats.m_port[port].m_tx_per_flow[index].clear();
4234
4235    for (int i=0; i < get_cores_tx(); i++) {
4236        lpt = m_fl.m_threads_info[i];
4237        port0 = lpt->getDualPortId() * 2;
4238        if ((port == port0) || (port == port0 + 1)) {
4239            m_stats.m_port[port].m_tx_per_flow[index] +=
4240                lpt->m_node_gen.m_v_if->m_stats[port - port0].m_tx_per_flow[index];
4241            if (is_lat)
4242                lpt->m_node_gen.m_v_if->m_stats[port - port0].m_lat_data[index - MAX_FLOW_STATS].reset();
4243        }
4244    }
4245
4246    ret = m_stats.m_port[port].m_tx_per_flow[index] - m_stats.m_port[port].m_prev_tx_per_flow[index];
4247
4248    // Since we return diff from prev, following "clears" the stats.
4249    m_stats.m_port[port].m_prev_tx_per_flow[index] = m_stats.m_port[port].m_tx_per_flow[index];
4250
4251    return ret;
4252}
4253
4254void CGlobalTRex::get_stats(CGlobalStats & stats){
4255
4256    int i;
4257    float total_tx=0.0;
4258    float total_rx=0.0;
4259    float total_tx_pps=0.0;
4260    float total_rx_pps=0.0;
4261
4262    stats.m_total_tx_pkts  = 0;
4263    stats.m_total_rx_pkts  = 0;
4264    stats.m_total_tx_bytes = 0;
4265    stats.m_total_rx_bytes = 0;
4266    stats.m_total_alloc_error=0;
4267    stats.m_total_queue_full=0;
4268    stats.m_total_queue_drop=0;
4269
4270
4271    stats.m_num_of_ports = m_max_ports;
4272    stats.m_cpu_util = m_fl.GetCpuUtil();
4273    stats.m_cpu_util_raw = m_fl.GetCpuUtilRaw();
4274    if (get_is_stateless()) {
4275        stats.m_rx_cpu_util = m_rx_sl.get_cpu_util();
4276    }
4277    stats.m_threads      = m_fl.m_threads_info.size();
4278
4279    for (i=0; i<m_max_ports; i++) {
4280        CPhyEthIF * _if=&m_ports[i];
4281        CPerPortStats * stp=&stats.m_port[i];
4282
4283        CPhyEthIFStats & st =_if->get_stats();
4284
4285        stp->opackets = st.opackets;
4286        stp->obytes   = st.obytes;
4287        stp->ipackets = st.ipackets;
4288        stp->ibytes   = st.ibytes;
4289        stp->ierrors  = st.ierrors;
4290        stp->oerrors  = st.oerrors;
4291        stp->m_total_tx_bps = _if->get_last_tx_rate()*_1Mb_DOUBLE;
4292        stp->m_total_tx_pps = _if->get_last_tx_pps_rate();
4293        stp->m_total_rx_bps = _if->get_last_rx_rate()*_1Mb_DOUBLE;
4294        stp->m_total_rx_pps = _if->get_last_rx_pps_rate();
4295        stp->m_link_up        = _if->get_port_attr()->is_link_up();
4296        stp->m_link_was_down |= ! _if->get_port_attr()->is_link_up();
4297
4298        stats.m_total_tx_pkts  += st.opackets;
4299        stats.m_total_rx_pkts  += st.ipackets;
4300        stats.m_total_tx_bytes += st.obytes;
4301        stats.m_total_rx_bytes += st.ibytes;
4302
4303        total_tx +=_if->get_last_tx_rate();
4304        total_rx +=_if->get_last_rx_rate();
4305        total_tx_pps +=_if->get_last_tx_pps_rate();
4306        total_rx_pps +=_if->get_last_rx_pps_rate();
4307        // IP ID rules
4308        for (uint16_t flow = 0; flow <= max_stat_hw_id_seen; flow++) {
4309            stats.m_port[i].m_tx_per_flow[flow].clear();
4310        }
4311        // payload rules
4312        for (uint16_t flow = MAX_FLOW_STATS; flow <= MAX_FLOW_STATS + max_stat_hw_id_seen_payload; flow++) {
4313            stats.m_port[i].m_tx_per_flow[flow].clear();
4314        }
4315
4316        stp->m_cpu_util = get_cpu_util_per_interface(i);
4317
4318    }
4319
4320    uint64_t total_open_flows=0;
4321    uint64_t total_active_flows=0;
4322
4323    uint64_t total_clients=0;
4324    uint64_t total_servers=0;
4325    uint64_t active_sockets=0;
4326    uint64_t total_sockets=0;
4327
4328
4329    uint64_t total_nat_time_out =0;
4330    uint64_t total_nat_time_out_wait_ack =0;
4331    uint64_t total_nat_no_fid   =0;
4332    uint64_t total_nat_active   =0;
4333    uint64_t total_nat_syn_wait = 0;
4334    uint64_t total_nat_open     =0;
4335    uint64_t total_nat_learn_error=0;
4336
4337    CFlowGenListPerThread   * lpt;
4338    stats.m_template.Clear();
4339    for (i=0; i<get_cores_tx(); i++) {
4340        lpt = m_fl.m_threads_info[i];
4341        total_open_flows +=   lpt->m_stats.m_total_open_flows ;
4342        total_active_flows += (lpt->m_stats.m_total_open_flows-lpt->m_stats.m_total_close_flows) ;
4343
4344        stats.m_total_alloc_error += lpt->m_node_gen.m_v_if->m_stats[0].m_tx_alloc_error+
4345            lpt->m_node_gen.m_v_if->m_stats[1].m_tx_alloc_error;
4346        stats.m_total_queue_full +=lpt->m_node_gen.m_v_if->m_stats[0].m_tx_queue_full+
4347            lpt->m_node_gen.m_v_if->m_stats[1].m_tx_queue_full;
4348
4349        stats.m_total_queue_drop +=lpt->m_node_gen.m_v_if->m_stats[0].m_tx_drop+
4350            lpt->m_node_gen.m_v_if->m_stats[1].m_tx_drop;
4351
4352        stats.m_template.Add(&lpt->m_node_gen.m_v_if->m_stats[0].m_template);
4353        stats.m_template.Add(&lpt->m_node_gen.m_v_if->m_stats[1].m_template);
4354
4355
4356        total_clients   += lpt->m_smart_gen.getTotalClients();
4357        total_servers   += lpt->m_smart_gen.getTotalServers();
4358        active_sockets  += lpt->m_smart_gen.ActiveSockets();
4359        total_sockets   += lpt->m_smart_gen.MaxSockets();
4360
4361        total_nat_time_out +=lpt->m_stats.m_nat_flow_timeout;
4362        total_nat_time_out_wait_ack += lpt->m_stats.m_nat_flow_timeout_wait_ack;
4363        total_nat_no_fid   +=lpt->m_stats.m_nat_lookup_no_flow_id ;
4364        total_nat_active   +=lpt->m_stats.m_nat_lookup_add_flow_id - lpt->m_stats.m_nat_lookup_remove_flow_id;
4365        total_nat_syn_wait += lpt->m_stats.m_nat_lookup_add_flow_id - lpt->m_stats.m_nat_lookup_wait_ack_state;
4366        total_nat_open     +=lpt->m_stats.m_nat_lookup_add_flow_id;
4367        total_nat_learn_error   +=lpt->m_stats.m_nat_flow_learn_error;
4368        uint8_t port0 = lpt->getDualPortId() *2;
4369        // IP ID rules
4370        for (uint16_t flow = 0; flow <= max_stat_hw_id_seen; flow++) {
4371            stats.m_port[port0].m_tx_per_flow[flow] +=
4372                lpt->m_node_gen.m_v_if->m_stats[0].m_tx_per_flow[flow];
4373            stats.m_port[port0 + 1].m_tx_per_flow[flow] +=
4374                lpt->m_node_gen.m_v_if->m_stats[1].m_tx_per_flow[flow];
4375        }
4376        // payload rules
4377        for (uint16_t flow = MAX_FLOW_STATS; flow <= MAX_FLOW_STATS + max_stat_hw_id_seen_payload; flow++) {
4378            stats.m_port[port0].m_tx_per_flow[flow] +=
4379                lpt->m_node_gen.m_v_if->m_stats[0].m_tx_per_flow[flow];
4380            stats.m_port[port0 + 1].m_tx_per_flow[flow] +=
4381                lpt->m_node_gen.m_v_if->m_stats[1].m_tx_per_flow[flow];
4382        }
4383
4384    }
4385
4386    stats.m_total_nat_time_out = total_nat_time_out;
4387    stats.m_total_nat_time_out_wait_ack = total_nat_time_out_wait_ack;
4388    stats.m_total_nat_no_fid   = total_nat_no_fid;
4389    stats.m_total_nat_active   = total_nat_active;
4390    stats.m_total_nat_syn_wait = total_nat_syn_wait;
4391    stats.m_total_nat_open     = total_nat_open;
4392    stats.m_total_nat_learn_error     = total_nat_learn_error;
4393
4394    stats.m_total_clients = total_clients;
4395    stats.m_total_servers = total_servers;
4396    stats.m_active_sockets = active_sockets;
4397
4398    if (total_sockets != 0) {
4399        stats.m_socket_util =100.0*(double)active_sockets/(double)total_sockets;
4400    } else {
4401        stats.m_socket_util = 0;
4402    }
4403
4404
4405
4406    float drop_rate=total_tx-total_rx;
4407    if ( (drop_rate<0.0)  || (drop_rate < 0.1*total_tx ) )  {
4408        drop_rate=0.0;
4409    }
4410    float pf =CGlobalInfo::m_options.m_platform_factor;
4411    stats.m_platform_factor = pf;
4412
4413    stats.m_active_flows = total_active_flows*pf;
4414    stats.m_open_flows   = total_open_flows*pf;
4415    stats.m_rx_drop_bps   = drop_rate*pf *_1Mb_DOUBLE;
4416
4417    stats.m_tx_bps        = total_tx*pf*_1Mb_DOUBLE;
4418    stats.m_rx_bps        = total_rx*pf*_1Mb_DOUBLE;
4419    stats.m_tx_pps        = total_tx_pps*pf;
4420    stats.m_rx_pps        = total_rx_pps*pf;
4421    stats.m_tx_cps        = m_last_total_cps*pf;
4422    if(stats.m_cpu_util < 0.0001)
4423        stats.m_bw_per_core = 0;
4424    else
4425        stats.m_bw_per_core   = 2*(stats.m_tx_bps/1e9)*100.0/(stats.m_cpu_util*stats.m_threads);
4426
4427    stats.m_tx_expected_cps        = m_expected_cps*pf;
4428    stats.m_tx_expected_pps        = m_expected_pps*pf;
4429    stats.m_tx_expected_bps        = m_expected_bps*pf;
4430}
4431
4432float
4433CGlobalTRex::get_cpu_util_per_interface(uint8_t port_id) {
4434    CPhyEthIF * _if = &m_ports[port_id];
4435
4436    float    tmp = 0;
4437    uint8_t  cnt = 0;
4438    for (const auto &p : _if->get_core_list()) {
4439        uint8_t core_id = p.first;
4440        CFlowGenListPerThread *lp = m_fl.m_threads_info[core_id];
4441        if (lp->is_port_active(port_id)) {
4442            tmp += lp->m_cpu_cp_u.GetVal();
4443            cnt++;
4444        }
4445    }
4446
4447    return ( (cnt > 0) ? (tmp / cnt) : 0);
4448
4449}
4450
4451bool CGlobalTRex::sanity_check(){
4452
4453    CFlowGenListPerThread   * lpt;
4454    uint32_t errors=0;
4455    int i;
4456    for (i=0; i<get_cores_tx(); i++) {
4457        lpt = m_fl.m_threads_info[i];
4458        errors   += lpt->m_smart_gen.getErrorAllocationCounter();
4459    }
4460
4461    if ( errors ) {
4462        printf(" ERRORs sockets allocation errors! \n");
4463        printf(" you should allocate more clients in the pool \n");
4464        return(true);
4465    }
4466    return ( false);
4467}
4468
4469
4470/* dump the template info */
4471void CGlobalTRex::dump_template_info(std::string & json){
4472    CFlowGenListPerThread   * lpt = m_fl.m_threads_info[0];
4473    CFlowsYamlInfo * yaml_info=&lpt->m_yaml_info;
4474
4475    json="{\"name\":\"template_info\",\"type\":0,\"data\":[";
4476    int i;
4477    for (i=0; i<yaml_info->m_vec.size()-1; i++) {
4478        CFlowYamlInfo * r=&yaml_info->m_vec[i] ;
4479        json+="\""+ r->m_name+"\"";
4480        json+=",";
4481    }
4482    json+="\""+yaml_info->m_vec[i].m_name+"\"";
4483    json+="]}" ;
4484}
4485
4486void CGlobalTRex::dump_stats(FILE *fd, CGlobalStats::DumpFormat format){
4487
4488    update_stats();
4489    get_stats(m_stats);
4490
4491    if (format==CGlobalStats::dmpTABLE) {
4492        if ( m_io_modes.m_g_mode == CTrexGlobalIoMode::gNORMAL ){
4493            switch (m_io_modes.m_pp_mode ){
4494            case CTrexGlobalIoMode::ppDISABLE:
4495                fprintf(fd,"\n+Per port stats disabled \n");
4496                break;
4497            case CTrexGlobalIoMode::ppTABLE:
4498                fprintf(fd,"\n-Per port stats table \n");
4499                m_stats.Dump(fd,CGlobalStats::dmpTABLE);
4500                break;
4501            case CTrexGlobalIoMode::ppSTANDARD:
4502                fprintf(fd,"\n-Per port stats - standard\n");
4503                m_stats.Dump(fd,CGlobalStats::dmpSTANDARD);
4504                break;
4505            };
4506
4507            switch (m_io_modes.m_ap_mode ){
4508            case   CTrexGlobalIoMode::apDISABLE:
4509                fprintf(fd,"\n+Global stats disabled \n");
4510                break;
4511            case   CTrexGlobalIoMode::apENABLE:
4512                fprintf(fd,"\n-Global stats enabled \n");
4513                m_stats.DumpAllPorts(fd);
4514                break;
4515            };
4516        }
4517    }else{
4518        /* at exit , always need to dump it in standartd mode for scripts*/
4519        m_stats.Dump(fd,format);
4520        m_stats.DumpAllPorts(fd);
4521    }
4522
4523}
4524
4525void
4526CGlobalTRex::publish_async_data(bool sync_now, bool baseline) {
4527    std::string json;
4528
4529    /* refactor to update, dump, and etc. */
4530    if (sync_now) {
4531        update_stats();
4532        get_stats(m_stats);
4533    }
4534
4535    m_stats.dump_json(json, baseline);
4536    m_zmq_publisher.publish_json(json);
4537
4538    /* generator json , all cores are the same just sample the first one */
4539    m_fl.m_threads_info[0]->m_node_gen.dump_json(json);
4540    m_zmq_publisher.publish_json(json);
4541
4542
4543    if ( !get_is_stateless() ){
4544        dump_template_info(json);
4545        m_zmq_publisher.publish_json(json);
4546    }
4547
4548    if ( get_is_rx_check_mode() ) {
4549        m_mg.rx_check_dump_json(json );
4550        m_zmq_publisher.publish_json(json);
4551    }
4552
4553    /* backward compatible */
4554    m_mg.dump_json(json );
4555    m_zmq_publisher.publish_json(json);
4556
4557    /* more info */
4558    m_mg.dump_json_v2(json );
4559    m_zmq_publisher.publish_json(json);
4560
4561    if (get_is_stateless()) {
4562        std::string stat_json;
4563        std::string latency_json;
4564        if (m_trex_stateless->m_rx_flow_stat.dump_json(stat_json, latency_json, baseline)) {
4565            m_zmq_publisher.publish_json(stat_json);
4566            m_zmq_publisher.publish_json(latency_json);
4567        }
4568    }
4569}
4570
4571void
4572CGlobalTRex::publish_async_barrier(uint32_t key) {
4573    m_zmq_publisher.publish_barrier(key);
4574}
4575
4576void
4577CGlobalTRex:: publish_async_port_attr_changed(uint8_t port_id) {
4578    Json::Value data;
4579    data["port_id"] = port_id;
4580    TRexPortAttr * _attr = m_ports[port_id].get_port_attr();
4581
4582    _attr->to_json(data["attr"]);
4583
4584    m_zmq_publisher.publish_event(TrexPublisher::EVENT_PORT_ATTR_CHANGED, data);
4585}
4586
4587void
4588CGlobalTRex::handle_slow_path() {
4589    m_stats_cnt+=1;
4590
4591    // update speed, link up/down etc.
4592    for (int i=0; i<m_max_ports; i++) {
4593        bool changed = m_ports[i].get_port_attr()->update_link_status_nowait();
4594        if (changed) {
4595            publish_async_port_attr_changed(i);
4596        }
4597    }
4598
4599    if ( CGlobalInfo::m_options.preview.get_no_keyboard() ==false ) {
4600        if ( m_io_modes.handle_io_modes() ) {
4601            mark_for_shutdown(SHUTDOWN_CTRL_C);
4602            return;
4603        }
4604    }
4605
4606    if ( sanity_check() ) {
4607        mark_for_shutdown(SHUTDOWN_TEST_ENDED);
4608        return;
4609    }
4610
4611    if (m_io_modes.m_g_mode != CTrexGlobalIoMode::gDISABLE ) {
4612        fprintf(stdout,"\033[2J");
4613        fprintf(stdout,"\033[2H");
4614
4615    } else {
4616        if ( m_io_modes.m_g_disable_first  ) {
4617            m_io_modes.m_g_disable_first=false;
4618            fprintf(stdout,"\033[2J");
4619            fprintf(stdout,"\033[2H");
4620            printf("clean !!!\n");
4621            fflush(stdout);
4622        }
4623    }
4624
4625
4626    if (m_io_modes.m_g_mode == CTrexGlobalIoMode::gHELP ) {
4627        m_io_modes.DumpHelp(stdout);
4628    }
4629
4630    dump_stats(stdout,CGlobalStats::dmpTABLE);
4631
4632    if (m_io_modes.m_g_mode == CTrexGlobalIoMode::gNORMAL ) {
4633        fprintf (stdout," current time    : %.1f sec  \n",now_sec());
4634        float d= CGlobalInfo::m_options.m_duration - now_sec();
4635        if (d<0) {
4636            d=0;
4637
4638        }
4639        fprintf (stdout," test duration   : %.1f sec  \n",d);
4640    }
4641
4642    if (m_io_modes.m_g_mode == CTrexGlobalIoMode::gMem) {
4643
4644        if ( m_stats_cnt%4==0) {
4645            fprintf (stdout," %s \n",CGlobalInfo::dump_pool_as_json_str().c_str());
4646        }
4647    }
4648
4649
4650    if ( CGlobalInfo::m_options.is_rx_enabled() && (! get_is_stateless())) {
4651        m_mg.update();
4652
4653        if ( m_io_modes.m_g_mode ==  CTrexGlobalIoMode::gNORMAL ) {
4654            if (CGlobalInfo::m_options.m_latency_rate != 0) {
4655                switch (m_io_modes.m_l_mode) {
4656                case CTrexGlobalIoMode::lDISABLE:
4657                    fprintf(stdout, "\n+Latency stats disabled \n");
4658                    break;
4659                case CTrexGlobalIoMode::lENABLE:
4660                    fprintf(stdout, "\n-Latency stats enabled \n");
4661                    m_mg.DumpShort(stdout);
4662                    break;
4663                case CTrexGlobalIoMode::lENABLE_Extended:
4664                    fprintf(stdout, "\n-Latency stats extended \n");
4665                    m_mg.Dump(stdout);
4666                    break;
4667                }
4668            }
4669
4670            if ( get_is_rx_check_mode() ) {
4671
4672                switch (m_io_modes.m_rc_mode) {
4673                case CTrexGlobalIoMode::rcDISABLE:
4674                    fprintf(stdout,"\n+Rx Check stats disabled \n");
4675                    break;
4676                case CTrexGlobalIoMode::rcENABLE:
4677                    fprintf(stdout,"\n-Rx Check stats enabled \n");
4678                    m_mg.DumpShortRxCheck(stdout);
4679                    break;
4680                case CTrexGlobalIoMode::rcENABLE_Extended:
4681                    fprintf(stdout,"\n-Rx Check stats enhanced \n");
4682                    m_mg.DumpRxCheck(stdout);
4683                    break;
4684                }
4685            }
4686        }
4687    }
4688    if ( m_io_modes.m_g_mode ==  CTrexGlobalIoMode::gNAT ) {
4689        if ( m_io_modes.m_nat_mode == CTrexGlobalIoMode::natENABLE ) {
4690            if (CGlobalInfo::is_learn_mode(CParserOption::LEARN_MODE_TCP_ACK)) {
4691                fprintf(stdout, "NAT flow table info\n");
4692                m_mg.dump_nat_flow_table(stdout);
4693            } else {
4694                fprintf(stdout, "\nThis is only relevant in --learn-mode %d\n", CParserOption::LEARN_MODE_TCP_ACK);
4695            }
4696        }
4697    }
4698
4699    /* publish data */
4700    publish_async_data(false);
4701}
4702
4703
4704void
4705CGlobalTRex::handle_fast_path() {
4706    /* check from messages from DP */
4707    check_for_dp_messages();
4708
4709    /* measure CPU utilization by sampling (we sample 1000 to get an accurate sampling) */
4710    for (int i = 0; i < 1000; i++) {
4711        m_fl.UpdateFast();
4712
4713        if (get_is_stateless()) {
4714            m_rx_sl.update_cpu_util();
4715        }else{
4716            m_mg.update_fast();
4717        }
4718
4719        rte_pause();
4720    }
4721
4722
4723    if ( is_all_cores_finished() ) {
4724        mark_for_shutdown(SHUTDOWN_TEST_ENDED);
4725    }
4726}
4727
4728
4729/**
4730 * shutdown sequence
4731 *
4732 */
4733void CGlobalTRex::shutdown() {
4734    std::stringstream ss;
4735    ss << " *** TRex is shutting down - cause: '";
4736
4737    switch (m_mark_for_shutdown) {
4738
4739    case SHUTDOWN_TEST_ENDED:
4740        ss << "test has ended'";
4741        break;
4742
4743    case SHUTDOWN_CTRL_C:
4744        ss << "CTRL + C detected'";
4745        break;
4746
4747    case SHUTDOWN_SIGINT:
4748        ss << "received signal SIGINT'";
4749        break;
4750
4751    case SHUTDOWN_SIGTERM:
4752        ss << "received signal SIGTERM'";
4753        break;
4754
4755    case SHUTDOWN_RPC_REQ:
4756        ss << "server received RPC 'shutdown' request'";
4757        break;
4758
4759    default:
4760        assert(0);
4761    }
4762
4763    /* report */
4764    std::cout << ss.str() << "\n";
4765
4766    /* first stop the WD */
4767    TrexWatchDog::getInstance().stop();
4768
4769    /* stateless shutdown */
4770    if (get_is_stateless()) {
4771        m_trex_stateless->shutdown();
4772    }
4773
4774    if (!is_all_cores_finished()) {
4775        try_stop_all_cores();
4776    }
4777
4778    m_mg.stop();
4779
4780    delay(1000);
4781
4782    /* shutdown drivers */
4783    for (int i = 0; i < m_max_ports; i++) {
4784        m_ports[i].stop();
4785    }
4786
4787    if (m_mark_for_shutdown != SHUTDOWN_TEST_ENDED) {
4788        /* we should stop latency and exit to stop agents */
4789        Delete();
4790        utl_termio_reset();
4791        exit(-1);
4792    }
4793}
4794
4795
4796int CGlobalTRex::run_in_master() {
4797
4798    //rte_thread_setname(pthread_self(), "TRex Control");
4799
4800    if ( get_is_stateless() ) {
4801        m_trex_stateless->launch_control_plane();
4802    }
4803
4804    /* exception and scope safe */
4805    std::unique_lock<std::mutex> cp_lock(m_cp_lock);
4806
4807    uint32_t slow_path_counter = 0;
4808
4809    const int FASTPATH_DELAY_MS = 10;
4810    const int SLOWPATH_DELAY_MS = 500;
4811
4812    m_monitor.create("master", 2);
4813    TrexWatchDog::getInstance().register_monitor(&m_monitor);
4814
4815    TrexWatchDog::getInstance().start();
4816
4817    while (!is_marked_for_shutdown()) {
4818
4819        /* fast path */
4820        handle_fast_path();
4821
4822        /* slow path */
4823        if (slow_path_counter >= SLOWPATH_DELAY_MS) {
4824            handle_slow_path();
4825            slow_path_counter = 0;
4826        }
4827
4828        m_monitor.disable(30); //assume we will wake up
4829
4830        cp_lock.unlock();
4831        delay(FASTPATH_DELAY_MS);
4832        slow_path_counter += FASTPATH_DELAY_MS;
4833        cp_lock.lock();
4834
4835        m_monitor.enable();
4836    }
4837
4838    /* on exit release the lock */
4839    cp_lock.unlock();
4840
4841    /* shutdown everything gracefully */
4842    shutdown();
4843
4844    return (0);
4845}
4846
4847
4848
4849int CGlobalTRex::run_in_rx_core(void){
4850
4851    CPreviewMode *lp = &CGlobalInfo::m_options.preview;
4852
4853    rte_thread_setname(pthread_self(), "TRex RX");
4854
4855    /* set RT mode if set */
4856    if (lp->get_rt_prio_mode()) {
4857        struct sched_param param;
4858        param.sched_priority = sched_get_priority_max(SCHED_FIFO);
4859        if (pthread_setschedparam(pthread_self(), SCHED_FIFO, &param) != 0) {
4860            perror("setting RT priroity mode on RX core failed with error");
4861            exit(EXIT_FAILURE);
4862        }
4863    }
4864
4865    if (get_is_stateless()) {
4866        m_sl_rx_running = true;
4867        m_rx_sl.start();
4868        m_sl_rx_running = false;
4869    } else {
4870        if ( CGlobalInfo::m_options.is_rx_enabled() ){
4871            m_sl_rx_running = false;
4872            m_mg.start(0, true);
4873        }
4874    }
4875
4876    return (0);
4877}
4878
4879int CGlobalTRex::run_in_core(virtual_thread_id_t virt_core_id){
4880    std::stringstream ss;
4881    CPreviewMode *lp = &CGlobalInfo::m_options.preview;
4882
4883    ss << "Trex DP core " << int(virt_core_id);
4884    rte_thread_setname(pthread_self(), ss.str().c_str());
4885
4886    /* set RT mode if set */
4887    if (lp->get_rt_prio_mode()) {
4888        struct sched_param param;
4889        param.sched_priority = sched_get_priority_max(SCHED_FIFO);
4890        if (pthread_setschedparam(pthread_self(), SCHED_FIFO, &param) != 0) {
4891            perror("setting RT priroity mode on DP core failed with error");
4892            exit(EXIT_FAILURE);
4893        }
4894    }
4895
4896
4897    if ( lp->getSingleCore() &&
4898         (virt_core_id==2 ) &&
4899         (lp-> getCores() ==1) ){
4900        printf(" bypass this core \n");
4901        m_signal[virt_core_id]=1;
4902        return (0);
4903    }
4904
4905
4906    assert(m_fl_was_init);
4907    CFlowGenListPerThread   * lpt;
4908
4909    lpt = m_fl.m_threads_info[virt_core_id-1];
4910
4911    /* register a watchdog handle on current core */
4912    lpt->m_monitor.create(ss.str(), 1);
4913    TrexWatchDog::getInstance().register_monitor(&lpt->m_monitor);
4914
4915    if (get_is_stateless()) {
4916        lpt->start_stateless_daemon(*lp);
4917    }else{
4918        lpt->start_generate_stateful(CGlobalInfo::m_options.out_file,*lp);
4919    }
4920
4921    /* done - remove this from the watchdog (we might wait on join for a long time) */
4922    lpt->m_monitor.disable();
4923
4924    m_signal[virt_core_id]=1;
4925    return (0);
4926}
4927
4928
4929int CGlobalTRex::stop_master(){
4930
4931    delay(1000);
4932    fprintf(stdout," ==================\n");
4933    fprintf(stdout," interface sum \n");
4934    fprintf(stdout," ==================\n");
4935    dump_stats(stdout,CGlobalStats::dmpSTANDARD);
4936    fprintf(stdout," ==================\n");
4937    fprintf(stdout," \n\n");
4938
4939    fprintf(stdout," ==================\n");
4940    fprintf(stdout," interface sum \n");
4941    fprintf(stdout," ==================\n");
4942
4943    CFlowGenListPerThread   * lpt;
4944    uint64_t total_tx_rx_check=0;
4945
4946    int i;
4947    for (i=0; i<get_cores_tx(); i++) {
4948        lpt = m_fl.m_threads_info[i];
4949        CCoreEthIF * erf_vif = m_cores_vif[i+1];
4950
4951        erf_vif->DumpCoreStats(stdout);
4952        erf_vif->DumpIfStats(stdout);
4953        total_tx_rx_check+=erf_vif->m_stats[CLIENT_SIDE].m_tx_rx_check_pkt+
4954            erf_vif->m_stats[SERVER_SIDE].m_tx_rx_check_pkt;
4955    }
4956
4957    fprintf(stdout," ==================\n");
4958    fprintf(stdout," generators \n");
4959    fprintf(stdout," ==================\n");
4960    for (i=0; i<get_cores_tx(); i++) {
4961        lpt = m_fl.m_threads_info[i];
4962        lpt->m_node_gen.DumpHist(stdout);
4963        lpt->DumpStats(stdout);
4964    }
4965    if ( CGlobalInfo::m_options.is_latency_enabled() ){
4966        fprintf(stdout," ==================\n");
4967        fprintf(stdout," latency \n");
4968        fprintf(stdout," ==================\n");
4969        m_mg.DumpShort(stdout);
4970        m_mg.Dump(stdout);
4971        m_mg.DumpShortRxCheck(stdout);
4972        m_mg.DumpRxCheck(stdout);
4973        m_mg.DumpRxCheckVerification(stdout,total_tx_rx_check);
4974    }
4975
4976    dump_stats(stdout,CGlobalStats::dmpSTANDARD);
4977    dump_post_test_stats(stdout);
4978    publish_async_data(false);
4979
4980    return (0);
4981}
4982
4983bool CGlobalTRex::is_all_cores_finished() {
4984    int i;
4985    for (i=0; i<get_cores_tx(); i++) {
4986        if ( m_signal[i+1]==0){
4987            return false;
4988        }
4989    }
4990    if (m_sl_rx_running)
4991        return false;
4992
4993    return true;
4994}
4995
4996
4997int CGlobalTRex::start_master_stateless(){
4998    int i;
4999    for (i=0; i<BP_MAX_CORES; i++) {
5000        m_signal[i]=0;
5001    }
5002    m_fl.Create();
5003    m_expected_pps = 0;
5004    m_expected_cps = 0;
5005    m_expected_bps = 0;
5006
5007    m_fl.generate_p_thread_info(get_cores_tx());
5008    CFlowGenListPerThread   * lpt;
5009
5010    for (i=0; i<get_cores_tx(); i++) {
5011        lpt = m_fl.m_threads_info[i];
5012        CVirtualIF * erf_vif = m_cores_vif[i+1];
5013        lpt->set_vif(erf_vif);
5014        lpt->m_node_gen.m_socket_id =m_cores_vif[i+1]->get_socket_id();
5015    }
5016    m_fl_was_init=true;
5017
5018    return (0);
5019}
5020
5021int CGlobalTRex::start_master_statefull() {
5022    int i;
5023    for (i=0; i<BP_MAX_CORES; i++) {
5024        m_signal[i]=0;
5025    }
5026
5027    m_fl.Create();
5028    m_fl.load_from_yaml(CGlobalInfo::m_options.cfg_file,get_cores_tx());
5029
5030    if ( CGlobalInfo::m_options.m_active_flows>0 ) {
5031        m_fl.update_active_flows(CGlobalInfo::m_options.m_active_flows);
5032    }
5033
5034    /* client config */
5035    if (CGlobalInfo::m_options.client_cfg_file != "") {
5036        try {
5037            m_fl.load_client_config_file(CGlobalInfo::m_options.client_cfg_file);
5038        } catch (const std::runtime_error &e) {
5039            std::cout << "\n*** " << e.what() << "\n\n";
5040            exit(-1);
5041        }
5042        CGlobalInfo::m_options.preview.set_client_cfg_enable(true);
5043        m_fl.set_client_config_tuple_gen_info(&m_fl.m_yaml_info.m_tuple_gen);
5044        pre_test();
5045    }
5046
5047    /* verify options */
5048    try {
5049        CGlobalInfo::m_options.verify();
5050    } catch (const std::runtime_error &e) {
5051        std::cout << "\n*** " << e.what() << "\n\n";
5052        exit(-1);
5053    }
5054
5055    m_expected_pps = m_fl.get_total_pps();
5056    m_expected_cps = 1000.0*m_fl.get_total_kcps();
5057    m_expected_bps = m_fl.get_total_tx_bps();
5058    if ( m_fl.get_total_repeat_flows() > 2000) {
5059        /* disable flows cache */
5060        CGlobalInfo::m_options.preview.setDisableMbufCache(true);
5061    }
5062
5063    CTupleGenYamlInfo * tg=&m_fl.m_yaml_info.m_tuple_gen;
5064
5065    m_mg.set_ip( tg->m_client_pool[0].get_ip_start(),
5066                 tg->m_server_pool[0].get_ip_start(),
5067                 tg->m_client_pool[0].getDualMask()
5068                 );
5069
5070    if (  CGlobalInfo::m_options.preview.getVMode() >0 ) {
5071        m_fl.DumpCsv(stdout);
5072        for (i=0; i<100; i++) {
5073            fprintf(stdout,"\n");
5074        }
5075        fflush(stdout);
5076    }
5077
5078    m_fl.generate_p_thread_info(get_cores_tx());
5079    CFlowGenListPerThread   * lpt;
5080
5081    for (i=0; i<get_cores_tx(); i++) {
5082        lpt = m_fl.m_threads_info[i];
5083        //CNullIF * erf_vif = new CNullIF();
5084        CVirtualIF * erf_vif = m_cores_vif[i+1];
5085        lpt->set_vif(erf_vif);
5086        /* socket id */
5087        lpt->m_node_gen.m_socket_id =m_cores_vif[i+1]->get_socket_id();
5088
5089    }
5090    m_fl_was_init=true;
5091
5092    return (0);
5093}
5094
5095
5096////////////////////////////////////////////
5097static CGlobalTRex g_trex;
5098
5099void CPhyEthIF::conf_queues() {
5100    CTrexDpdkParams dpdk_p;
5101    get_ex_drv()->get_dpdk_drv_params(dpdk_p);
5102    uint16_t num_tx_q = (CGlobalInfo::get_queues_mode() == CGlobalInfo::Q_MODE_ONE_QUEUE) ?
5103        1 : g_trex.m_max_queues_per_port;
5104    socket_id_t socket_id = CGlobalInfo::m_socket.port_to_socket((port_id_t)m_port_id);
5105    assert(CGlobalInfo::m_mem_pool[socket_id].m_mbuf_pool_2048);
5106
5107    configure(dpdk_p.rx_drop_q_num + dpdk_p.rx_data_q_num, num_tx_q, &g_trex.m_port_cfg.m_port_conf);
5108    for (uint16_t qid = 0; qid < num_tx_q; qid++) {
5109        tx_queue_setup(qid, dpdk_p.tx_desc_num , socket_id, &g_trex.m_port_cfg.m_tx_conf);
5110    }
5111
5112    switch (dpdk_p.rx_drop_q_num) {
5113    case 0:
5114        if (dpdk_p.rx_data_q_num == 1) {
5115            // 1 rx rcv q. no drop q. VM mode.
5116            // Only 1 rx queue, so use it for everything
5117            g_trex.m_rx_core_tx_q_id = 0;
5118            rx_queue_setup(0, dpdk_p.rx_desc_num_data_q, socket_id, &g_trex.m_port_cfg.m_rx_conf,
5119                           get_ex_drv()->get_rx_mem_pool(socket_id));
5120            set_rx_queue(0);
5121        } else {
5122            // no drop q. Many rcv queues. RSS mode.
5123            // rss on all rcv queues. Do not skip any q.
5124            configure_rss_redirect_table(dpdk_p.rx_data_q_num, 0xff);
5125            g_trex.m_rx_core_tx_q_id = g_trex.m_cores_to_dual_ports;
5126            for (int queue = 0; queue < dpdk_p.rx_data_q_num; queue++) {
5127                rx_queue_setup(queue, dpdk_p.rx_desc_num_data_q, socket_id,
5128                               &g_trex.m_port_cfg.m_rx_conf, CGlobalInfo::m_mem_pool[socket_id].m_mbuf_pool_9k);
5129            }
5130        }
5131        break;
5132    case 1:
5133        // 1 drop q. 1 or more rx queues. Normal mode.
5134        // rx core will use largest tx q
5135        g_trex.m_rx_core_tx_q_id = g_trex.m_cores_to_dual_ports;
5136        // configure drop q
5137        rx_queue_setup(MAIN_DPDK_DROP_Q, dpdk_p.rx_desc_num_drop_q, socket_id, &g_trex.m_port_cfg.m_rx_conf,
5138                            CGlobalInfo::m_mem_pool[socket_id].m_mbuf_pool_2048);
5139        set_rx_queue(MAIN_DPDK_RX_Q);
5140        rx_queue_setup(MAIN_DPDK_RX_Q, dpdk_p.rx_desc_num_data_q, socket_id,
5141                       &g_trex.m_port_cfg.m_rx_conf, CGlobalInfo::m_mem_pool[socket_id].m_mbuf_pool_9k);
5142        break;
5143    default:
5144        // Many drop queues. Mellanox mode.
5145        g_trex.m_rx_core_tx_q_id = g_trex.m_cores_to_dual_ports;
5146        // configure drop queues (all queues but MAIN_DPDK_RX_Q)
5147        for (int j = 0; j < dpdk_p.rx_drop_q_num + 1; j++) {
5148            if (j == MAIN_DPDK_RX_Q) {
5149                continue;
5150            }
5151            rx_queue_setup(j, dpdk_p.rx_desc_num_drop_q, socket_id, &g_trex.m_port_cfg.m_rx_conf,
5152                           CGlobalInfo::m_mem_pool[socket_id].m_mbuf_pool_2048);
5153        }
5154        rx_queue_setup(MAIN_DPDK_RX_Q, dpdk_p.rx_desc_num_data_q, socket_id,
5155                       &g_trex.m_port_cfg.m_rx_conf, CGlobalInfo::m_mem_pool[socket_id].m_mbuf_pool_9k);
5156        // rss on all drop queues. Skip MAIN_DPDK_RX_Q
5157        configure_rss_redirect_table(dpdk_p.rx_drop_q_num + 1, MAIN_DPDK_RX_Q);
5158        break;
5159    }
5160}
5161
5162void CPhyEthIF::configure_rss_redirect_table(uint16_t numer_of_queues, uint16_t skip_queue) {
5163     struct rte_eth_dev_info dev_info;
5164
5165     rte_eth_dev_info_get(m_port_id,&dev_info);
5166     assert(dev_info.reta_size > 0);
5167     int reta_conf_size = std::max(1, dev_info.reta_size / RTE_RETA_GROUP_SIZE);
5168     struct rte_eth_rss_reta_entry64 reta_conf[reta_conf_size];
5169
5170     rte_eth_dev_rss_reta_query(m_port_id,&reta_conf[0],dev_info.reta_size);
5171
5172     for (int j = 0; j < reta_conf_size; j++) {
5173         uint16_t skip = 0;
5174         reta_conf[j].mask = ~0ULL;
5175         for (int i = 0; i < RTE_RETA_GROUP_SIZE; i++) {
5176             uint16_t q;
5177             while (true) {
5178                 q=(i + skip) % numer_of_queues;
5179                 if (q != skip_queue) {
5180                     break;
5181                 }
5182                 skip += 1;
5183             }
5184             reta_conf[j].reta[i] = q;
5185         }
5186     }
5187     rte_eth_dev_rss_reta_update(m_port_id, &reta_conf[0], dev_info.reta_size);
5188     rte_eth_dev_rss_reta_query(m_port_id, &reta_conf[0], dev_info.reta_size);
5189
5190#if 0
5191     /* verification */
5192     for (j = 0; j < reta_conf_size; j++) {
5193         for (i = 0; i<RTE_RETA_GROUP_SIZE; i++) {
5194             printf(" R  %d %d %d \n",j,i,reta_conf[j].reta[i]);
5195         }
5196     }
5197#endif
5198}
5199
5200void CPhyEthIF::update_counters() {
5201    get_ex_drv()->get_extended_stats(this, &m_stats);
5202    CRXCoreIgnoreStat ign_stats;
5203
5204    if (get_is_stateless()) {
5205        g_trex.m_rx_sl.get_ignore_stats(m_port_id, ign_stats, true);
5206    } else {
5207        g_trex.m_mg.get_ignore_stats(m_port_id, ign_stats, true);
5208    }
5209
5210    m_stats.obytes -= ign_stats.get_tx_bytes();
5211    m_stats.opackets -= ign_stats.get_tx_pkts();
5212    m_ignore_stats.opackets += ign_stats.get_tx_pkts();
5213    m_ignore_stats.obytes += ign_stats.get_tx_bytes();
5214    m_ignore_stats.m_tx_arp += ign_stats.get_tx_arp();
5215
5216    m_last_tx_rate      =  m_bw_tx.add(m_stats.obytes);
5217    m_last_rx_rate      =  m_bw_rx.add(m_stats.ibytes);
5218    m_last_tx_pps       =  m_pps_tx.add(m_stats.opackets);
5219    m_last_rx_pps       =  m_pps_rx.add(m_stats.ipackets);
5220}
5221
5222bool CPhyEthIF::Create(uint8_t portid) {
5223    m_port_id      = portid;
5224    m_last_rx_rate = 0.0;
5225    m_last_tx_rate = 0.0;
5226    m_last_tx_pps  = 0.0;
5227    m_port_attr    = g_trex.m_drv->create_port_attr(portid);
5228
5229    /* set src MAC addr */
5230    uint8_t empty_mac[ETHER_ADDR_LEN] = {0,0,0,0,0,0};
5231    if (! memcmp( CGlobalInfo::m_options.m_mac_addr[m_port_id].u.m_mac.src, empty_mac, ETHER_ADDR_LEN)) {
5232        rte_eth_macaddr_get(m_port_id,
5233                            (struct ether_addr *)&CGlobalInfo::m_options.m_mac_addr[m_port_id].u.m_mac.src);
5234    }
5235
5236    return true;
5237}
5238
5239const std::vector<std::pair<uint8_t, uint8_t>> &
5240CPhyEthIF::get_core_list() {
5241
5242    /* lazy find */
5243    if (m_core_id_list.size() == 0) {
5244
5245        for (uint8_t core_id = 0; core_id < g_trex.get_cores_tx(); core_id++) {
5246
5247            /* iterate over all the directions*/
5248            for (uint8_t dir = 0 ; dir < CS_NUM; dir++) {
5249                if (g_trex.m_cores_vif[core_id + 1]->get_ports()[dir].m_port->get_port_id() == m_port_id) {
5250                    m_core_id_list.push_back(std::make_pair(core_id, dir));
5251                }
5252            }
5253        }
5254    }
5255
5256    return m_core_id_list;
5257
5258}
5259
5260int CPhyEthIF::reset_hw_flow_stats() {
5261    if (get_ex_drv()->hw_rx_stat_supported()) {
5262        get_ex_drv()->reset_rx_stats(this, m_stats.m_fdir_prev_pkts, 0, MAX_FLOW_STATS);
5263    } else {
5264        g_trex.m_rx_sl.reset_rx_stats(get_port_id());
5265    }
5266    return 0;
5267}
5268
5269// get/reset flow director counters
5270// return 0 if OK. -1 if operation not supported.
5271// rx_stats, tx_stats - arrays of len max - min + 1. Returning rx, tx updated absolute values.
5272// min, max - minimum, maximum counters range to get
5273// reset - If true, need to reset counter value after reading
5274int CPhyEthIF::get_flow_stats(rx_per_flow_t *rx_stats, tx_per_flow_t *tx_stats, int min, int max, bool reset) {
5275    uint32_t diff_pkts[MAX_FLOW_STATS];
5276    uint32_t diff_bytes[MAX_FLOW_STATS];
5277    bool hw_rx_stat_supported = get_ex_drv()->hw_rx_stat_supported();
5278
5279    if (hw_rx_stat_supported) {
5280        if (get_ex_drv()->get_rx_stats(this, diff_pkts, m_stats.m_fdir_prev_pkts
5281                                       , diff_bytes, m_stats.m_fdir_prev_bytes, min, max) < 0) {
5282            return -1;
5283        }
5284    } else {
5285        g_trex.m_rx_sl.get_rx_stats(get_port_id(), rx_stats, min, max, reset, TrexPlatformApi::IF_STAT_IPV4_ID);
5286    }
5287
5288    for (int i = min; i <= max; i++) {
5289        if ( reset ) {
5290            // return value so far, and reset
5291            if (hw_rx_stat_supported) {
5292                if (rx_stats != NULL) {
5293                    rx_stats[i - min].set_pkts(m_stats.m_rx_per_flow_pkts[i] + diff_pkts[i]);
5294                    rx_stats[i - min].set_bytes(m_stats.m_rx_per_flow_bytes[i] + diff_bytes[i]);
5295                }
5296                m_stats.m_rx_per_flow_pkts[i] = 0;
5297                m_stats.m_rx_per_flow_bytes[i] = 0;
5298                get_ex_drv()->reset_rx_stats(this, &m_stats.m_fdir_prev_pkts[i], i, 1);
5299
5300            }
5301            if (tx_stats != NULL) {
5302                tx_stats[i - min] = g_trex.clear_flow_tx_stats(m_port_id, i, false);
5303            }
5304        } else {
5305            if (hw_rx_stat_supported) {
5306                m_stats.m_rx_per_flow_pkts[i] += diff_pkts[i];
5307                m_stats.m_rx_per_flow_bytes[i] += diff_bytes[i];
5308                if (rx_stats != NULL) {
5309                    rx_stats[i - min].set_pkts(m_stats.m_rx_per_flow_pkts[i]);
5310                    rx_stats[i - min].set_bytes(m_stats.m_rx_per_flow_bytes[i]);
5311                }
5312            }
5313            if (tx_stats != NULL) {
5314                tx_stats[i - min] = g_trex.get_flow_tx_stats(m_port_id, i);
5315            }
5316        }
5317    }
5318
5319    return 0;
5320}
5321
5322int CPhyEthIF::get_flow_stats_payload(rx_per_flow_t *rx_stats, tx_per_flow_t *tx_stats, int min, int max, bool reset) {
5323    g_trex.m_rx_sl.get_rx_stats(get_port_id(), rx_stats, min, max, reset, TrexPlatformApi::IF_STAT_PAYLOAD);
5324    for (int i = min; i <= max; i++) {
5325        if ( reset ) {
5326            if (tx_stats != NULL) {
5327                tx_stats[i - min] = g_trex.clear_flow_tx_stats(m_port_id, i + MAX_FLOW_STATS, true);
5328            }
5329        } else {
5330            if (tx_stats != NULL) {
5331                tx_stats[i - min] = g_trex.get_flow_tx_stats(m_port_id, i + MAX_FLOW_STATS);
5332            }
5333        }
5334    }
5335
5336    return 0;
5337}
5338
5339TrexStateless * get_stateless_obj() {
5340    return g_trex.m_trex_stateless;
5341}
5342
5343CRxCoreStateless * get_rx_sl_core_obj() {
5344    return &g_trex.m_rx_sl;
5345}
5346
5347static int latency_one_lcore(__attribute__((unused)) void *dummy)
5348{
5349    CPlatformSocketInfo * lpsock=&CGlobalInfo::m_socket;
5350    physical_thread_id_t  phy_id =rte_lcore_id();
5351
5352    if ( lpsock->thread_phy_is_rx(phy_id) ) {
5353        g_trex.run_in_rx_core();
5354    }else{
5355
5356        if ( lpsock->thread_phy_is_master( phy_id ) ) {
5357            g_trex.run_in_master();
5358            delay(1);
5359        }else{
5360            delay((uint32_t)(1000.0*CGlobalInfo::m_options.m_duration));
5361            /* this core has stopped */
5362            g_trex.m_signal[ lpsock->thread_phy_to_virt( phy_id ) ]=1;
5363        }
5364    }
5365    return 0;
5366}
5367
5368
5369
5370static int slave_one_lcore(__attribute__((unused)) void *dummy)
5371{
5372    CPlatformSocketInfo * lpsock=&CGlobalInfo::m_socket;
5373    physical_thread_id_t  phy_id =rte_lcore_id();
5374
5375    if ( lpsock->thread_phy_is_rx(phy_id) ) {
5376        g_trex.run_in_rx_core();
5377    }else{
5378        if ( lpsock->thread_phy_is_master( phy_id ) ) {
5379            g_trex.run_in_master();
5380            delay(1);
5381        }else{
5382            g_trex.run_in_core( lpsock->thread_phy_to_virt( phy_id ) );
5383        }
5384    }
5385    return 0;
5386}
5387
5388
5389
5390uint32_t get_cores_mask(uint32_t cores,int offset){
5391    int i;
5392
5393    uint32_t res=1;
5394
5395    uint32_t mask=(1<<(offset+1));
5396    for (i=0; i<(cores-1); i++) {
5397        res |= mask ;
5398        mask = mask <<1;
5399    }
5400    return (res);
5401}
5402
5403
5404static char *g_exe_name;
5405const char *get_exe_name() {
5406    return g_exe_name;
5407}
5408
5409
5410int main(int argc , char * argv[]){
5411    g_exe_name = argv[0];
5412
5413    return ( main_test(argc , argv));
5414}
5415
5416
5417int update_global_info_from_platform_file(){
5418
5419    CPlatformYamlInfo *cg=&global_platform_cfg_info;
5420
5421    CGlobalInfo::m_socket.Create(&cg->m_platform);
5422
5423
5424    if (!cg->m_info_exist) {
5425        /* nothing to do ! */
5426        return 0;
5427    }
5428
5429    CGlobalInfo::m_options.prefix =cg->m_prefix;
5430    CGlobalInfo::m_options.preview.setCores(cg->m_thread_per_dual_if);
5431
5432    if ( cg->m_port_limit_exist ){
5433        CGlobalInfo::m_options.m_expected_portd =cg->m_port_limit;
5434    }
5435
5436    if ( cg->m_enable_zmq_pub_exist ){
5437        CGlobalInfo::m_options.preview.set_zmq_publish_enable(cg->m_enable_zmq_pub);
5438        CGlobalInfo::m_options.m_zmq_port = cg->m_zmq_pub_port;
5439    }
5440    if ( cg->m_telnet_exist ){
5441        CGlobalInfo::m_options.m_telnet_port = cg->m_telnet_port;
5442    }
5443
5444    if ( cg->m_mac_info_exist ){
5445        int i;
5446        /* cop the file info */
5447
5448        int port_size=cg->m_mac_info.size();
5449
5450        if ( port_size > TREX_MAX_PORTS ){
5451            port_size = TREX_MAX_PORTS;
5452        }
5453        for (i=0; i<port_size; i++){
5454            cg->m_mac_info[i].copy_src(( char *)CGlobalInfo::m_options.m_mac_addr[i].u.m_mac.src)   ;
5455            cg->m_mac_info[i].copy_dest(( char *)CGlobalInfo::m_options.m_mac_addr[i].u.m_mac.dest)  ;
5456            CGlobalInfo::m_options.m_mac_addr[i].u.m_mac.is_set = 1;
5457
5458            CGlobalInfo::m_options.m_ip_cfg[i].set_def_gw(cg->m_mac_info[i].get_def_gw());
5459            CGlobalInfo::m_options.m_ip_cfg[i].set_ip(cg->m_mac_info[i].get_ip());
5460            CGlobalInfo::m_options.m_ip_cfg[i].set_mask(cg->m_mac_info[i].get_mask());
5461            CGlobalInfo::m_options.m_ip_cfg[i].set_vlan(cg->m_mac_info[i].get_vlan());
5462            // If one of the ports has vlan, work in vlan mode
5463            if (cg->m_mac_info[i].get_vlan() != 0) {
5464                CGlobalInfo::m_options.preview.set_vlan_mode_verify(CPreviewMode::VLAN_MODE_NORMAL);
5465            }
5466        }
5467    }
5468
5469    /* mul by interface type */
5470    float mul=1.0;
5471    if (cg->m_port_bandwidth_gb<10) {
5472        cg->m_port_bandwidth_gb=10.0;
5473    }
5474
5475    mul = mul*(float)cg->m_port_bandwidth_gb/10.0;
5476    mul= mul * (float)cg->m_port_limit/2.0;
5477
5478    mul= mul * CGlobalInfo::m_options.m_mbuf_factor;
5479
5480
5481    CGlobalInfo::m_memory_cfg.set_pool_cache_size(RTE_MEMPOOL_CACHE_MAX_SIZE);
5482
5483    CGlobalInfo::m_memory_cfg.set_number_of_dp_cors(
5484                                                    CGlobalInfo::m_options.get_number_of_dp_cores_needed() );
5485
5486    CGlobalInfo::m_memory_cfg.set(cg->m_memory,mul);
5487    return (0);
5488}
5489
5490extern "C" int eal_cpu_detected(unsigned lcore_id);
5491// return mask representing available cores
5492int core_mask_calc() {
5493    uint32_t mask = 0;
5494    int lcore_id;
5495
5496    for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
5497        if (eal_cpu_detected(lcore_id)) {
5498            mask |= (1 << lcore_id);
5499        }
5500    }
5501
5502    return mask;
5503}
5504
5505// Return number of set bits in i
5506uint32_t num_set_bits(uint32_t i)
5507{
5508    i = i - ((i >> 1) & 0x55555555);
5509    i = (i & 0x33333333) + ((i >> 2) & 0x33333333);
5510    return (((i + (i >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24;
5511}
5512
5513// sanity check if the cores we want to use really exist
5514int core_mask_sanity(uint32_t wanted_core_mask) {
5515    uint32_t calc_core_mask = core_mask_calc();
5516    uint32_t wanted_core_num, calc_core_num;
5517
5518    wanted_core_num = num_set_bits(wanted_core_mask);
5519    calc_core_num = num_set_bits(calc_core_mask);
5520
5521    if (calc_core_num == 1) {
5522        printf ("Error: You have only 1 core available. Minimum configuration requires 2 cores\n");
5523        printf("        If you are running on VM, consider adding more cores if possible\n");
5524        return -1;
5525    }
5526    if (wanted_core_num > calc_core_num) {
5527        printf("Error: You have %d threads available, but you asked for %d threads.\n", calc_core_num, wanted_core_num);
5528        printf("       Calculation is: -c <num>(%d) * dual ports (%d) + 1 master thread %s"
5529               , CGlobalInfo::m_options.preview.getCores(), CGlobalInfo::m_options.get_expected_dual_ports()
5530               , get_is_rx_thread_enabled() ? "+1 latency thread (because of -l flag)\n" : "\n");
5531        if (CGlobalInfo::m_options.preview.getCores() > 1)
5532            printf("       Maybe try smaller -c <num>.\n");
5533        printf("       If you are running on VM, consider adding more cores if possible\n");
5534        return -1;
5535    }
5536
5537    if (wanted_core_mask != (wanted_core_mask & calc_core_mask)) {
5538        printf ("Serious error: Something is wrong with the hardware. Wanted core mask is %x. Existing core mask is %x\n", wanted_core_mask, calc_core_mask);
5539        return -1;
5540    }
5541
5542    return 0;
5543}
5544
5545int  update_dpdk_args(void){
5546
5547    CPlatformSocketInfo * lpsock=&CGlobalInfo::m_socket;
5548    CParserOption * lpop= &CGlobalInfo::m_options;
5549
5550    lpsock->set_rx_thread_is_enabled(get_is_rx_thread_enabled());
5551    lpsock->set_number_of_threads_per_ports(lpop->preview.getCores() );
5552    lpsock->set_number_of_dual_ports(lpop->get_expected_dual_ports());
5553    if ( !lpsock->sanity_check() ){
5554        printf(" ERROR in configuration file \n");
5555        return (-1);
5556    }
5557
5558    if ( CGlobalInfo::m_options.preview.getVMode() > 0  ) {
5559        lpsock->dump(stdout);
5560    }
5561
5562    snprintf(global_cores_str, sizeof(global_cores_str), "0x%llx" ,(unsigned long long)lpsock->get_cores_mask());
5563    if (core_mask_sanity(strtol(global_cores_str, NULL, 16)) < 0) {
5564        return -1;
5565    }
5566
5567    /* set the DPDK options */
5568    global_dpdk_args_num = 0;
5569
5570    global_dpdk_args[global_dpdk_args_num++]=(char *)"xx";
5571
5572    if ( CGlobalInfo::m_options.preview.get_mlx5_so_mode() ){
5573        global_dpdk_args[global_dpdk_args_num++]=(char *)"-d";
5574        snprintf(global_mlx5_so_id_str, sizeof(global_mlx5_so_id_str), "libmlx5-64%s.so",global_image_postfix );
5575        global_dpdk_args[global_dpdk_args_num++]=(char *)global_mlx5_so_id_str;
5576    }
5577
5578    if ( CGlobalInfo::m_options.preview.get_mlx4_so_mode() ){
5579        global_dpdk_args[global_dpdk_args_num++]=(char *)"-d";
5580        snprintf(global_mlx4_so_id_str, sizeof(global_mlx4_so_id_str), "libmlx4-64%s.so",global_image_postfix );
5581        global_dpdk_args[global_dpdk_args_num++]=(char *)global_mlx4_so_id_str;
5582    }
5583
5584    global_dpdk_args[global_dpdk_args_num++]=(char *)"-c";
5585    global_dpdk_args[global_dpdk_args_num++]=(char *)global_cores_str;
5586    global_dpdk_args[global_dpdk_args_num++]=(char *)"-n";
5587    global_dpdk_args[global_dpdk_args_num++]=(char *)"4";
5588
5589    if ( CGlobalInfo::m_options.preview.getVMode() == 0  ) {
5590        global_dpdk_args[global_dpdk_args_num++]=(char *)"--log-level";
5591        snprintf(global_loglevel_str, sizeof(global_loglevel_str), "%d", 4);
5592        global_dpdk_args[global_dpdk_args_num++]=(char *)global_loglevel_str;
5593    }else{
5594        global_dpdk_args[global_dpdk_args_num++]=(char *)"--log-level";
5595        snprintf(global_loglevel_str, sizeof(global_loglevel_str), "%d", CGlobalInfo::m_options.preview.getVMode()+1);
5596        global_dpdk_args[global_dpdk_args_num++]=(char *)global_loglevel_str;
5597    }
5598
5599    global_dpdk_args[global_dpdk_args_num++] = (char *)"--master-lcore";
5600
5601    snprintf(global_master_id_str, sizeof(global_master_id_str), "%u", lpsock->get_master_phy_id());
5602    global_dpdk_args[global_dpdk_args_num++] = global_master_id_str;
5603
5604    /* add white list */
5605    if (lpop->m_run_mode == CParserOption::RUN_MODE_DUMP_INFO and lpop->dump_interfaces.size()) {
5606        for (int i=0; i<(int)lpop->dump_interfaces.size(); i++) {
5607            global_dpdk_args[global_dpdk_args_num++]=(char *)"-w";
5608            global_dpdk_args[global_dpdk_args_num++]=(char *)lpop->dump_interfaces[i].c_str();
5609        }
5610    }
5611    else {
5612        for (int i=0; i<(int)global_platform_cfg_info.m_if_list.size(); i++) {
5613            global_dpdk_args[global_dpdk_args_num++]=(char *)"-w";
5614            global_dpdk_args[global_dpdk_args_num++]=(char *)global_platform_cfg_info.m_if_list[i].c_str();
5615        }
5616    }
5617
5618
5619
5620    if ( lpop->prefix.length()  ){
5621        global_dpdk_args[global_dpdk_args_num++]=(char *)"--file-prefix";
5622        snprintf(global_prefix_str, sizeof(global_prefix_str), "%s", lpop->prefix.c_str());
5623        global_dpdk_args[global_dpdk_args_num++]=(char *)global_prefix_str;
5624        global_dpdk_args[global_dpdk_args_num++]=(char *)"-m";
5625        if (global_platform_cfg_info.m_limit_memory.length()) {
5626            global_dpdk_args[global_dpdk_args_num++]=(char *)global_platform_cfg_info.m_limit_memory.c_str();
5627        }else{
5628            global_dpdk_args[global_dpdk_args_num++]=(char *)"1024";
5629        }
5630    }
5631
5632
5633    if ( CGlobalInfo::m_options.preview.getVMode() > 0  ) {
5634        printf("args \n");
5635        int i;
5636        for (i=0; i<global_dpdk_args_num; i++) {
5637            printf(" %s \n",global_dpdk_args[i]);
5638        }
5639    }
5640    return (0);
5641}
5642
5643
5644int sim_load_list_of_cap_files(CParserOption * op){
5645
5646    CFlowGenList fl;
5647    fl.Create();
5648    fl.load_from_yaml(op->cfg_file,1);
5649    if ( op->preview.getVMode() >0 ) {
5650        fl.DumpCsv(stdout);
5651    }
5652    uint32_t start=    os_get_time_msec();
5653
5654    CErfIF erf_vif;
5655
5656    fl.generate_p_thread_info(1);
5657    CFlowGenListPerThread   * lpt;
5658    lpt=fl.m_threads_info[0];
5659    lpt->set_vif(&erf_vif);
5660
5661    if ( (op->preview.getVMode() >1)  || op->preview.getFileWrite() ) {
5662        lpt->start_generate_stateful(op->out_file,op->preview);
5663    }
5664
5665    lpt->m_node_gen.DumpHist(stdout);
5666
5667    uint32_t stop=    os_get_time_msec();
5668    printf(" d time = %ul %ul \n",stop-start,os_get_time_freq());
5669    fl.Delete();
5670    return (0);
5671}
5672
5673void dump_interfaces_info() {
5674    printf("Showing interfaces info.\n");
5675    uint8_t m_max_ports = rte_eth_dev_count();
5676    struct ether_addr mac_addr;
5677    char mac_str[ETHER_ADDR_FMT_SIZE];
5678    struct rte_pci_addr pci_addr;
5679
5680    for (uint8_t port_id=0; port_id<m_max_ports; port_id++) {
5681        // PCI, MAC and Driver
5682        pci_addr = rte_eth_devices[port_id].device->devargs->pci.addr;
5683        rte_eth_macaddr_get(port_id, &mac_addr);
5684        ether_format_addr(mac_str, sizeof mac_str, &mac_addr);
5685        printf("PCI: %04x:%02x:%02x.%d - MAC: %s - Driver: %s\n",
5686            pci_addr.domain, pci_addr.bus, pci_addr.devid, pci_addr.function, mac_str,
5687            rte_eth_devices[port_id].data->drv_name);
5688    }
5689}
5690
5691
5692int learn_image_postfix(char * image_name){
5693
5694    char *p = strstr(image_name,TREX_NAME);
5695    if (p) {
5696        strcpy(global_image_postfix,p+strlen(TREX_NAME));
5697    }
5698    return(0);
5699}
5700
5701int main_test(int argc , char * argv[]){
5702
5703    learn_image_postfix(argv[0]);
5704
5705    utl_termio_init();
5706
5707    int ret;
5708    unsigned lcore_id;
5709    printf("Starting  TRex %s please wait  ... \n",VERSION_BUILD_NUM);
5710
5711    CGlobalInfo::m_options.preview.clean();
5712
5713    if ( parse_options_wrapper(argc, argv, &CGlobalInfo::m_options,true ) != 0){
5714        exit(-1);
5715    }
5716
5717    /* enable core dump if requested */
5718    if (CGlobalInfo::m_options.preview.getCoreDumpEnable()) {
5719        utl_set_coredump_size(-1);
5720    }
5721    else {
5722        utl_set_coredump_size(0);
5723    }
5724
5725
5726    update_global_info_from_platform_file();
5727
5728    /* It is not a mistake. Give the user higher priorty over the configuration file */
5729    if (parse_options_wrapper(argc, argv, &CGlobalInfo::m_options ,false) != 0) {
5730        exit(-1);
5731    }
5732
5733
5734    if ( CGlobalInfo::m_options.preview.getVMode() > 0){
5735        CGlobalInfo::m_options.dump(stdout);
5736        CGlobalInfo::m_memory_cfg.Dump(stdout);
5737    }
5738
5739
5740    if (update_dpdk_args() < 0) {
5741        return -1;
5742    }
5743
5744    CParserOption * po=&CGlobalInfo::m_options;
5745
5746
5747    if ( CGlobalInfo::m_options.preview.getVMode() == 0  ) {
5748        rte_set_log_level(1);
5749
5750    }
5751    uid_t uid;
5752    uid = geteuid ();
5753    if ( uid != 0 ) {
5754        printf("ERROR you must run with superuser priviliges \n");
5755        printf("User id   : %d \n",uid);
5756        printf("try 'sudo' %s \n",argv[0]);
5757        return (-1);
5758    }
5759
5760    /* set affinity to the master core as default */
5761    cpu_set_t mask;
5762    CPU_ZERO(&mask);
5763    CPU_SET(CGlobalInfo::m_socket.get_master_phy_id(), &mask);
5764    pthread_setaffinity_np(pthread_self(), sizeof(mask), &mask);
5765
5766    ret = rte_eal_init(global_dpdk_args_num, (char **)global_dpdk_args);
5767    if (ret < 0){
5768        printf(" You might need to run ./trex-cfg  once  \n");
5769        rte_exit(EXIT_FAILURE, "Invalid EAL arguments\n");
5770    }
5771    set_driver();
5772    if (CGlobalInfo::m_options.m_run_mode == CParserOption::RUN_MODE_DUMP_INFO) {
5773        dump_interfaces_info();
5774        exit(0);
5775    }
5776    reorder_dpdk_ports();
5777    time_init();
5778
5779    /* check if we are in simulation mode */
5780    if ( CGlobalInfo::m_options.out_file != "" ){
5781        printf(" t-rex simulation mode into %s \n",CGlobalInfo::m_options.out_file.c_str());
5782        return ( sim_load_list_of_cap_files(&CGlobalInfo::m_options) );
5783    }
5784
5785    if ( !g_trex.Create() ){
5786        exit(1);
5787    }
5788
5789    if (po->preview.get_is_rx_check_enable() &&  (po->m_rx_check_sample< get_min_sample_rate()) ) {
5790        po->m_rx_check_sample = get_min_sample_rate();
5791        printf("Warning:rx check sample rate should not be lower than %d. Setting it to %d\n",get_min_sample_rate(),get_min_sample_rate());
5792    }
5793
5794    /* set dump mode */
5795    g_trex.m_io_modes.set_mode((CTrexGlobalIoMode::CliDumpMode)CGlobalInfo::m_options.m_io_mode);
5796
5797    /* disable WD if needed */
5798    bool wd_enable = (CGlobalInfo::m_options.preview.getWDDisable() ? false : true);
5799    TrexWatchDog::getInstance().init(wd_enable);
5800
5801    g_trex.m_sl_rx_running = false;
5802    if ( get_is_stateless() ) {
5803        g_trex.start_master_stateless();
5804
5805    }else{
5806        g_trex.start_master_statefull();
5807    }
5808
5809    // For unit testing of HW rules and queues configuration. Just send some packets and exit.
5810    if (CGlobalInfo::m_options.m_debug_pkt_proto != 0) {
5811        CTrexDpdkParams dpdk_p;
5812        get_ex_drv()->get_dpdk_drv_params(dpdk_p);
5813        CTrexDebug debug = CTrexDebug(g_trex.m_ports, g_trex.m_max_ports
5814                                      , dpdk_p.rx_data_q_num + dpdk_p.rx_drop_q_num);
5815        int ret;
5816
5817        if (CGlobalInfo::m_options.m_debug_pkt_proto == D_PKT_TYPE_HW_TOGGLE_TEST) {
5818            // Unit test: toggle many times between receive all and stateless/stateful modes,
5819            // to test resiliency of add/delete fdir filters
5820            printf("Starting receive all/normal mode toggle unit test\n");
5821            for (int i = 0; i < 100; i++) {
5822                for (int port_id = 0; port_id < g_trex.m_max_ports; port_id++) {
5823                    CPhyEthIF *pif = &g_trex.m_ports[port_id];
5824                    pif->set_port_rcv_all(true);
5825                }
5826                ret = debug.test_send(D_PKT_TYPE_HW_VERIFY_RCV_ALL);
5827                if (ret != 0) {
5828                    printf("Iteration %d: Receive all mode failed\n", i);
5829                    exit(ret);
5830                }
5831
5832                for (int port_id = 0; port_id < g_trex.m_max_ports; port_id++) {
5833                    CPhyEthIF *pif = &g_trex.m_ports[port_id];
5834                    CTRexExtendedDriverDb::Ins()->get_drv()->configure_rx_filter_rules(pif);
5835                }
5836
5837                ret = debug.test_send(D_PKT_TYPE_HW_VERIFY);
5838                if (ret != 0) {
5839                    printf("Iteration %d: Normal mode failed\n", i);
5840                    exit(ret);
5841                }
5842
5843                printf("Iteration %d OK\n", i);
5844            }
5845            exit(0);
5846        } else {
5847            if (CGlobalInfo::m_options.m_debug_pkt_proto == D_PKT_TYPE_HW_VERIFY_RCV_ALL) {
5848                for (int port_id = 0; port_id < g_trex.m_max_ports; port_id++) {
5849                    CPhyEthIF *pif = &g_trex.m_ports[port_id];
5850                    pif->set_port_rcv_all(true);
5851                }
5852            }
5853            ret = debug.test_send(CGlobalInfo::m_options.m_debug_pkt_proto);
5854            exit(ret);
5855        }
5856    }
5857
5858    // in case of client config, we already run pretest
5859    if (! CGlobalInfo::m_options.preview.get_is_client_cfg_enable()) {
5860        g_trex.pre_test();
5861    }
5862
5863    // after doing all needed ARP resolution, we need to flush queues, and stop our drop queue
5864    g_trex.ixgbe_rx_queue_flush();
5865    for (int i = 0; i < g_trex.m_max_ports; i++) {
5866        CPhyEthIF *_if = &g_trex.m_ports[i];
5867        _if->stop_rx_drop_queue();
5868    }
5869
5870    if ( CGlobalInfo::m_options.is_latency_enabled()
5871         && (CGlobalInfo::m_options.m_latency_prev > 0)) {
5872        uint32_t pkts = CGlobalInfo::m_options.m_latency_prev *
5873            CGlobalInfo::m_options.m_latency_rate;
5874        printf("Starting warm up phase for %d sec\n",CGlobalInfo::m_options.m_latency_prev);
5875        g_trex.m_mg.start(pkts, NULL);
5876        delay(CGlobalInfo::m_options.m_latency_prev* 1000);
5877        printf("Finished \n");
5878        g_trex.m_mg.reset();
5879    }
5880
5881    if ( CGlobalInfo::m_options.preview.getOnlyLatency() ){
5882        rte_eal_mp_remote_launch(latency_one_lcore, NULL, CALL_MASTER);
5883        RTE_LCORE_FOREACH_SLAVE(lcore_id) {
5884            if (rte_eal_wait_lcore(lcore_id) < 0)
5885                return -1;
5886        }
5887        g_trex.stop_master();
5888
5889        return (0);
5890    }
5891
5892    if ( CGlobalInfo::m_options.preview.getSingleCore() ) {
5893        g_trex.run_in_core(1);
5894        g_trex.stop_master();
5895        return (0);
5896    }
5897
5898    rte_eal_mp_remote_launch(slave_one_lcore, NULL, CALL_MASTER);
5899    RTE_LCORE_FOREACH_SLAVE(lcore_id) {
5900        if (rte_eal_wait_lcore(lcore_id) < 0)
5901            return -1;
5902    }
5903
5904    g_trex.stop_master();
5905    g_trex.Delete();
5906    utl_termio_reset();
5907
5908    return (0);
5909}
5910
5911void wait_x_sec(int sec) {
5912    int i;
5913    printf(" wait %d sec ", sec);
5914    fflush(stdout);
5915    for (i=0; i<sec; i++) {
5916        delay(1000);
5917        printf(".");
5918        fflush(stdout);
5919    }
5920    printf("\n");
5921    fflush(stdout);
5922}
5923
5924/* should be called after rte_eal_init() */
5925void set_driver() {
5926    uint8_t m_max_ports = rte_eth_dev_count();
5927    if ( !m_max_ports ) {
5928        printf("Could not find interfaces.\n");
5929        exit(1);
5930    }
5931    struct rte_eth_dev_info dev_info;
5932    rte_eth_dev_info_get(0, &dev_info);
5933
5934    if ( !CTRexExtendedDriverDb::Ins()->is_driver_exists(dev_info.driver_name) ){
5935        printf("\nError: driver %s is not supported. Please consult the documentation for a list of supported drivers\n"
5936               ,dev_info.driver_name);
5937        exit(1);
5938    }
5939
5940    CTRexExtendedDriverDb::Ins()->set_driver_name(dev_info.driver_name);
5941}
5942
5943/*