1/*
2  Hanoh Haim
3  Cisco Systems, Inc.
4*/
5
6/*
7  Copyright (c) 2015-2017 Cisco Systems, Inc.
8
9  Licensed under the Apache License, Version 2.0 (the "License");
10  you may not use this file except in compliance with the License.
11  You may obtain a copy of the License at
12
13  http://www.apache.org/licenses/LICENSE-2.0
14
15  Unless required by applicable law or agreed to in writing, software
16  distributed under the License is distributed on an "AS IS" BASIS,
17  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18  See the License for the specific language governing permissions and
19  limitations under the License.
20*/
21#include <assert.h>
22#include <pthread.h>
23#include <signal.h>
24#include <pwd.h>
25#include <stdio.h>
26#include <string.h>
27#include <unistd.h>
28#include <zmq.h>
29#include <rte_common.h>
30#include <rte_log.h>
31#include <rte_memory.h>
32#include <rte_memcpy.h>
33#include <rte_memzone.h>
34#include <rte_tailq.h>
35#include <rte_eal.h>
36#include <rte_per_lcore.h>
37#include <rte_launch.h>
38#include <rte_atomic.h>
39#include <rte_cycles.h>
40#include <rte_prefetch.h>
41#include <rte_lcore.h>
42#include <rte_per_lcore.h>
43#include <rte_branch_prediction.h>
44#include <rte_interrupts.h>
45#include <rte_pci.h>
46#include <rte_debug.h>
47#include <rte_ether.h>
48#include <rte_ethdev.h>
49#include <rte_ring.h>
50#include <rte_mempool.h>
51#include <rte_mbuf.h>
52#include <rte_random.h>
53#include <rte_version.h>
54#include <rte_ip.h>
55
56#include "bp_sim.h"
57#include "os_time.h"
58#include "common/arg/SimpleGlob.h"
59#include "common/arg/SimpleOpt.h"
60#include "common/basic_utils.h"
61#include "stateless/cp/trex_stateless.h"
62#include "stateless/dp/trex_stream_node.h"
63#include "stateless/messaging/trex_stateless_messaging.h"
64#include "stateless/rx/trex_stateless_rx_core.h"
65#include "publisher/trex_publisher.h"
66#include "../linux_dpdk/version.h"
67extern "C" {
68#include "dpdk/drivers/net/ixgbe/base/ixgbe_type.h"
69#include "dpdk_funcs.h"
70}
71#include "dpdk/drivers/net/e1000/base/e1000_regs.h"
72#include "global_io_mode.h"
73#include "utl_term_io.h"
74#include "msg_manager.h"
75#include "platform_cfg.h"
76#include "pre_test.h"
77#include "stateful_rx_core.h"
78#include "debug.h"
79#include "pkt_gen.h"
80#include "trex_port_attr.h"
81#include "internal_api/trex_platform_api.h"
82#include "main_dpdk.h"
83#include "trex_watchdog.h"
84
85#define RX_CHECK_MIX_SAMPLE_RATE 8
86#define RX_CHECK_MIX_SAMPLE_RATE_1G 2
87
88#define MAX_PKT_BURST   32
89
90#define BP_MAX_CORES 32
91#define BP_MAX_TX_QUEUE 16
92#define BP_MASTER_AND_LATENCY 2
93
94#define RX_DESC_NUM_DROP_Q 64
95#define RX_DESC_NUM_DATA_Q 1024
96#define RX_DESC_NUM_DROP_Q_MLX 8
97#define RX_DESC_NUM_DATA_Q_VM 512
98#define TX_DESC_NUM 512
99
100typedef struct rte_mbuf * (*rte_mbuf_convert_to_one_seg_t)(struct rte_mbuf *m);
101struct rte_mbuf *  rte_mbuf_convert_to_one_seg(struct rte_mbuf *m);
102extern "C" int rte_eth_dev_get_port_by_addr(const struct rte_pci_addr *addr, uint8_t *port_id);
103void set_driver();
104void reorder_dpdk_ports();
105
106static int max_stat_hw_id_seen = 0;
107static int max_stat_hw_id_seen_payload = 0;
108
109static inline int get_is_rx_thread_enabled() {
110    return ((CGlobalInfo::m_options.is_rx_enabled() || CGlobalInfo::m_options.is_stateless()) ?1:0);
111}
112
113struct port_cfg_t;
114
115#define MAX_DPDK_ARGS 50
116static CPlatformYamlInfo global_platform_cfg_info;
117static int global_dpdk_args_num ;
118static char * global_dpdk_args[MAX_DPDK_ARGS];
119static char global_cores_str[100];
120static char global_prefix_str[100];
121static char global_loglevel_str[20];
122static char global_master_id_str[10];
123static char global_mlx5_so_id_str[50];
124static char global_mlx4_so_id_str[50];
125static char global_image_postfix[10];
126#define TREX_NAME "_t-rex-64"
127
128class CTRexExtendedDriverBase {
129protected:
130    enum {
131        // Is there HW support for dropping packets arriving to certain queue?
132        TREX_DRV_CAP_DROP_Q = 0x1,
133        /* Does this NIC type support automatic packet dropping in case of a link down?
134           in case it is supported the packets will be dropped, else there would be a back pressure to tx queues
135           this interface is used as a workaround to let TRex work without link in stateless mode, driver that
136           does not support that will be failed at init time because it will cause watchdog due to watchdog hang */
137        TREX_DRV_CAP_DROP_PKTS_IF_LNK_DOWN = 0x2,
138        // Does the driver support changing MAC address?
139        TREX_DRV_CAP_MAC_ADDR_CHG = 0x4,
140        /* Mellanox driver does not work well with the DPDK port reorder we do */
141        TREX_DRV_CAP_NO_PORT_REORDER_POSSIBLE = 0x8,
142    } trex_drv_cap;
143
144public:
145    virtual int get_min_sample_rate(void)=0;
146    virtual void update_configuration(port_cfg_t * cfg)=0;
147    virtual void update_global_config_fdir(port_cfg_t * cfg)=0;
148    virtual int configure_rx_filter_rules(CPhyEthIF * _if)=0;
149    virtual int add_del_rx_flow_stat_rule(uint8_t port_id, enum rte_filter_op op, uint16_t l3, uint8_t l4
150                                          , uint8_t ipv6_next_h, uint16_t id) {return 0;}
151    bool is_hardware_support_drop_queue() {
152        return ((m_cap & TREX_DRV_CAP_DROP_Q) != 0);
153    }
154    bool hardware_support_mac_change() {
155        return ((m_cap & TREX_DRV_CAP_MAC_ADDR_CHG) != 0);
156    }
157    bool drop_packets_incase_of_linkdown() {
158        return ((m_cap & TREX_DRV_CAP_DROP_PKTS_IF_LNK_DOWN) != 0);
159    }
160    bool supports_port_reorder() {
161        // Since only Mellanox does not support, logic here is reveresed compared to other flags.
162        // Put this only if not supported.
163        return ((m_cap & TREX_DRV_CAP_NO_PORT_REORDER_POSSIBLE) == 0);
164    }
165    virtual int stop_queue(CPhyEthIF * _if, uint16_t q_num);
166    void get_extended_stats_fixed(CPhyEthIF * _if, CPhyEthIFStats *stats, int fix_i, int fix_o);
167    virtual void get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats)=0;
168    virtual void clear_extended_stats(CPhyEthIF * _if)=0;
169    virtual int  wait_for_stable_link();
170    virtual void wait_after_link_up();
171    virtual bool hw_rx_stat_supported(){return false;}
172    virtual int get_rx_stats(CPhyEthIF * _if, uint32_t *pkts, uint32_t *prev_pkts, uint32_t *bytes, uint32_t *prev_bytes
173                             , int min, int max) {return -1;}
174    virtual void reset_rx_stats(CPhyEthIF * _if, uint32_t *stats, int min, int len) {}
175    virtual int dump_fdir_global_stats(CPhyEthIF * _if, FILE *fd) { return -1;}
176    virtual void get_rx_stat_capabilities(uint16_t &flags, uint16_t &num_counters, uint16_t &base_ip_id) = 0;
177    virtual int verify_fw_ver(int i) {return 0;}
178    virtual CFlowStatParser *get_flow_stat_parser();
179    virtual int set_rcv_all(CPhyEthIF * _if, bool set_on)=0;
180    virtual TRexPortAttr * create_port_attr(uint8_t port_id) = 0;
181
182    virtual rte_mempool_t * get_rx_mem_pool(int socket_id) {
183        return CGlobalInfo::m_mem_pool[socket_id].m_mbuf_pool_9k;
184    }
185    virtual void get_dpdk_drv_params(CTrexDpdkParams &p) {
186        p.rx_data_q_num = 1;
187        if (CGlobalInfo::get_queues_mode() == CGlobalInfo::Q_MODE_ONE_QUEUE) {
188            p.rx_drop_q_num = 0;
189        } else {
190            p.rx_drop_q_num = 1;
191        }
192        p.rx_desc_num_data_q = RX_DESC_NUM_DATA_Q;
193        p.rx_desc_num_drop_q = RX_DESC_NUM_DROP_Q;
194        p.tx_desc_num = TX_DESC_NUM;
195    }
196
197protected:
198    // flags describing interface capabilities
199    uint32_t m_cap;
200};
201
202
203class CTRexExtendedDriverBase1G : public CTRexExtendedDriverBase {
204
205public:
206    CTRexExtendedDriverBase1G(){
207        m_cap = TREX_DRV_CAP_DROP_Q | TREX_DRV_CAP_MAC_ADDR_CHG;
208    }
209
210    TRexPortAttr * create_port_attr(uint8_t port_id) {
211        return new DpdkTRexPortAttr(port_id, false, true);
212    }
213
214    static CTRexExtendedDriverBase * create(){
215        return ( new CTRexExtendedDriverBase1G() );
216    }
217
218    virtual void update_global_config_fdir(port_cfg_t * cfg);
219
220    virtual int get_min_sample_rate(void){
221        return ( RX_CHECK_MIX_SAMPLE_RATE_1G);
222    }
223    virtual void update_configuration(port_cfg_t * cfg);
224    virtual int stop_queue(CPhyEthIF * _if, uint16_t q_num);
225    virtual int configure_rx_filter_rules(CPhyEthIF * _if);
226    virtual int configure_rx_filter_rules_statefull(CPhyEthIF * _if);
227    virtual int configure_rx_filter_rules_stateless(CPhyEthIF * _if);
228    virtual void clear_rx_filter_rules(CPhyEthIF * _if);
229    virtual void get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats);
230    virtual void clear_extended_stats(CPhyEthIF * _if);
231    virtual int dump_fdir_global_stats(CPhyEthIF * _if, FILE *fd) {return 0;}
232    virtual void get_rx_stat_capabilities(uint16_t &flags, uint16_t &num_counters, uint16_t &base_ip_id) {
233        flags = TrexPlatformApi::IF_STAT_IPV4_ID | TrexPlatformApi::IF_STAT_RX_BYTES_COUNT
234            | TrexPlatformApi::IF_STAT_PAYLOAD;
235
236        if (CGlobalInfo::get_queues_mode() == CGlobalInfo::Q_MODE_ONE_QUEUE
237            || CGlobalInfo::get_queues_mode() == CGlobalInfo::Q_MODE_RSS) {
238            num_counters = MAX_FLOW_STATS;
239            base_ip_id = IP_ID_RESERVE_BASE;
240        } else {
241            num_counters = UINT8_MAX;
242            // Must be 0xff00, since we configure HW filter for the 0xff byte
243            // The filter must catch all flow stat packets, and latency packets (having 0xffff in IP ID)
244            base_ip_id = 0xff00;
245        }
246    }
247    virtual int wait_for_stable_link();
248    virtual void wait_after_link_up();
249    virtual int set_rcv_all(CPhyEthIF * _if, bool set_on);
250};
251
252// Base for all virtual drivers. No constructor. Should not create object from this type.
253class CTRexExtendedDriverVirtBase : public CTRexExtendedDriverBase {
254public:
255    TRexPortAttr * create_port_attr(uint8_t port_id) {
256        return new DpdkTRexPortAttr(port_id, true, true);
257    }
258    virtual void update_global_config_fdir(port_cfg_t * cfg) {}
259
260    virtual int get_min_sample_rate(void){
261        return ( RX_CHECK_MIX_SAMPLE_RATE_1G);
262    }
263    virtual void get_dpdk_drv_params(CTrexDpdkParams &p) {
264        p.rx_data_q_num = 1;
265        p.rx_drop_q_num = 0;
266        p.rx_desc_num_data_q = RX_DESC_NUM_DATA_Q_VM;
267        p.rx_desc_num_drop_q = RX_DESC_NUM_DROP_Q;
268        p.tx_desc_num = TX_DESC_NUM;
269    }
270    virtual rte_mempool_t * get_rx_mem_pool(int socket_id) {
271        // In VMs there is usually less memory available
272        return CGlobalInfo::m_mem_pool[socket_id].m_mbuf_pool_2048;
273    }
274    virtual void update_configuration(port_cfg_t * cfg);
275    virtual int configure_rx_filter_rules(CPhyEthIF * _if);
276    virtual int stop_queue(CPhyEthIF * _if, uint16_t q_num);
277    virtual void get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats)=0;
278    virtual void clear_extended_stats(CPhyEthIF * _if);
279    virtual int wait_for_stable_link();
280    virtual void get_rx_stat_capabilities(uint16_t &flags, uint16_t &num_counters, uint16_t &base_ip_id) {
281        flags = TrexPlatformApi::IF_STAT_IPV4_ID | TrexPlatformApi::IF_STAT_RX_BYTES_COUNT
282            | TrexPlatformApi::IF_STAT_PAYLOAD;
283        num_counters = MAX_FLOW_STATS;
284        base_ip_id = IP_ID_RESERVE_BASE;
285    }
286
287    virtual int set_rcv_all(CPhyEthIF * _if, bool set_on) {return 0;}
288    CFlowStatParser *get_flow_stat_parser();
289};
290
291class CTRexExtendedDriverVirtio : public CTRexExtendedDriverVirtBase {
292public:
293    CTRexExtendedDriverVirtio() {
294        CGlobalInfo::set_queues_mode(CGlobalInfo::Q_MODE_ONE_QUEUE);
295        m_cap = /*TREX_DRV_CAP_DROP_Q  | TREX_DRV_CAP_MAC_ADDR_CHG */ 0;
296    }
297    static CTRexExtendedDriverBase * create(){
298        return ( new CTRexExtendedDriverVirtio() );
299    }
300    virtual void get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats);
301};
302
303class CTRexExtendedDriverVmxnet3 : public CTRexExtendedDriverVirtBase {
304public:
305    CTRexExtendedDriverVmxnet3(){
306        CGlobalInfo::set_queues_mode(CGlobalInfo::Q_MODE_ONE_QUEUE);
307        m_cap = /*TREX_DRV_CAP_DROP_Q  | TREX_DRV_CAP_MAC_ADDR_CHG*/0;
308    }
309
310    static CTRexExtendedDriverBase * create() {
311        return ( new CTRexExtendedDriverVmxnet3() );
312    }
313    virtual void get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats);
314    virtual void update_configuration(port_cfg_t * cfg);
315};
316
317class CTRexExtendedDriverI40evf : public CTRexExtendedDriverVirtBase {
318public:
319    CTRexExtendedDriverI40evf(){
320        CGlobalInfo::set_queues_mode(CGlobalInfo::Q_MODE_ONE_QUEUE);
321        m_cap = /*TREX_DRV_CAP_DROP_Q  | TREX_DRV_CAP_MAC_ADDR_CHG */0;
322    }
323    virtual void get_extended_stats(CPhyEthIF * _if, CPhyEthIFStats *stats) {
324        get_extended_stats_fixed(_if, stats, 4, 4);
325    }
326    virtual void update_configuration(port_cfg_t * cfg);
327    static CTRexExtendedDriverBase * create() {
328        return ( new CTRexExtendedDriverI40evf() );
329    }
330};
331
332class CTRexExtendedDriverIxgbevf : public CTRexExtendedDriverI40evf {
333
334public:
335    CTRexExtendedDriverIxgbevf(){
336        CGlobalInfo::set_queues_mode(CGlobalInfo::Q_MODE_ONE_QUEUE);
337        m_cap = /*TREX_DRV_CAP_DROP_Q  | TREX_DRV_CAP_MAC_ADDR_CHG */0;
338    }
339    virtual void get_extended_stats(CPhyEthIF * _if, CPhyEthIFStats *stats) {
340        get_extended_stats_fixed(_if, stats, 4, 4);
341    }
342
343    static CTRexExtendedDriverBase * create() {
344        return ( new CTRexExtendedDriverIxgbevf() );
345    }
346};
347
348class CTRexExtendedDriverBaseE1000 : public CTRexExtendedDriverVirtBase {
349    CTRexExtendedDriverBaseE1000() {
350        // E1000 driver is only relevant in VM in our case
351        CGlobalInfo::set_queues_mode(CGlobalInfo::Q_MODE_ONE_QUEUE);
352        m_cap = /*TREX_DRV_CAP_DROP_Q  | TREX_DRV_CAP_MAC_ADDR_CHG */0;
353    }
354public:
355    static CTRexExtendedDriverBase * create() {
356        return ( new CTRexExtendedDriverBaseE1000() );
357    }
358    // e1000 driver handing us packets with ethernet CRC, so we need to chop them
359    virtual void update_configuration(port_cfg_t * cfg);
360    virtual void get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats);
361
362};
363
364class CTRexExtendedDriverBase10G : public CTRexExtendedDriverBase {
365public:
366    CTRexExtendedDriverBase10G(){
367        m_cap = TREX_DRV_CAP_DROP_Q | TREX_DRV_CAP_MAC_ADDR_CHG;
368    }
369
370    TRexPortAttr * create_port_attr(uint8_t port_id) {
371        return new DpdkTRexPortAttr(port_id, false, true);
372    }
373
374    static CTRexExtendedDriverBase * create(){
375        return ( new CTRexExtendedDriverBase10G() );
376    }
377
378    virtual void update_global_config_fdir(port_cfg_t * cfg);
379
380    virtual int get_min_sample_rate(void){
381        return (RX_CHECK_MIX_SAMPLE_RATE);
382    }
383    virtual void update_configuration(port_cfg_t * cfg);
384    virtual int configure_rx_filter_rules(CPhyEthIF * _if);
385    virtual int configure_rx_filter_rules_stateless(CPhyEthIF * _if);
386    virtual int configure_rx_filter_rules_statefull(CPhyEthIF * _if);
387    virtual void get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats);
388    virtual void clear_extended_stats(CPhyEthIF * _if);
389    virtual int wait_for_stable_link();
390    virtual void get_rx_stat_capabilities(uint16_t &flags, uint16_t &num_counters, uint16_t &base_ip_id) {
391        flags = TrexPlatformApi::IF_STAT_IPV4_ID | TrexPlatformApi::IF_STAT_RX_BYTES_COUNT
392            | TrexPlatformApi::IF_STAT_PAYLOAD;
393        if ((CGlobalInfo::get_queues_mode() == CGlobalInfo::Q_MODE_RSS)
394            || (CGlobalInfo::get_queues_mode() == CGlobalInfo::Q_MODE_ONE_QUEUE)) {
395            num_counters = MAX_FLOW_STATS;
396        } else {
397            num_counters = 127;
398        }
399        base_ip_id = IP_ID_RESERVE_BASE;
400    }
401    virtual CFlowStatParser *get_flow_stat_parser();
402    int add_del_eth_filter(CPhyEthIF * _if, bool is_add, uint16_t ethertype);
403    virtual int set_rcv_all(CPhyEthIF * _if, bool set_on);
404};
405
406class CTRexExtendedDriverBase40G : public CTRexExtendedDriverBase {
407public:
408    CTRexExtendedDriverBase40G(){
409        // 4 will make us support 127 flow stat counters
410        // If we want to support more counters in case of card having less interfaces, we
411        // Will have to identify the number of interfaces dynamically.
412        m_if_per_card = 4;
413
414        m_cap = TREX_DRV_CAP_DROP_Q | TREX_DRV_CAP_MAC_ADDR_CHG | TREX_DRV_CAP_DROP_PKTS_IF_LNK_DOWN;
415    }
416
417    TRexPortAttr * create_port_attr(uint8_t port_id) {
418        // disabling flow control on 40G using DPDK API causes the interface to malfunction
419        return new DpdkTRexPortAttr(port_id, false, false);
420    }
421
422    static CTRexExtendedDriverBase * create(){
423        return ( new CTRexExtendedDriverBase40G() );
424    }
425
426    virtual void update_global_config_fdir(port_cfg_t * cfg){
427    }
428    virtual int get_min_sample_rate(void){
429        return (RX_CHECK_MIX_SAMPLE_RATE);
430    }
431    virtual void update_configuration(port_cfg_t * cfg);
432    virtual int configure_rx_filter_rules(CPhyEthIF * _if);
433    virtual int add_del_rx_flow_stat_rule(uint8_t port_id, enum rte_filter_op op, uint16_t l3_proto
434                                          , uint8_t l4_proto, uint8_t ipv6_next_h, uint16_t id);
435    virtual void get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats);
436    virtual void clear_extended_stats(CPhyEthIF * _if);
437    virtual void reset_rx_stats(CPhyEthIF * _if, uint32_t *stats, int min, int len);
438    virtual int get_rx_stats(CPhyEthIF * _if, uint32_t *pkts, uint32_t *prev_pkts, uint32_t *bytes, uint32_t *prev_bytes, int min, int max);
439    virtual int dump_fdir_global_stats(CPhyEthIF * _if, FILE *fd);
440    virtual void get_rx_stat_capabilities(uint16_t &flags, uint16_t &num_counters, uint16_t &base_ip_id) {
441        flags = TrexPlatformApi::IF_STAT_IPV4_ID | TrexPlatformApi::IF_STAT_PAYLOAD;
442        // HW counters on x710 does not support coutning bytes.
443        if ( CGlobalInfo::m_options.preview.get_disable_hw_flow_stat()
444             || CGlobalInfo::get_queues_mode() == CGlobalInfo::Q_MODE_ONE_QUEUE
445             || CGlobalInfo::get_queues_mode() == CGlobalInfo::Q_MODE_RSS) {
446            flags |= TrexPlatformApi::IF_STAT_RX_BYTES_COUNT;
447            num_counters = MAX_FLOW_STATS;
448        } else {
449            if (m_if_per_card == 4) {
450                num_counters = MAX_FLOW_STATS_X710;
451            } else {
452                num_counters = MAX_FLOW_STATS_XL710;
453            }
454        }
455        base_ip_id = IP_ID_RESERVE_BASE;
456        m_max_flow_stats = num_counters;
457    }
458    virtual int wait_for_stable_link();
459    virtual bool hw_rx_stat_supported(){
460        if (CGlobalInfo::m_options.preview.get_disable_hw_flow_stat()
461            || CGlobalInfo::get_queues_mode() == CGlobalInfo::Q_MODE_ONE_QUEUE
462            || CGlobalInfo::get_queues_mode() == CGlobalInfo::Q_MODE_RSS) {
463            return false;
464        } else {
465            return true;
466        }
467    }
468    virtual int verify_fw_ver(int i);
469    virtual CFlowStatParser *get_flow_stat_parser();
470    virtual int set_rcv_all(CPhyEthIF * _if, bool set_on);
471
472private:
473    virtual void add_del_rules(enum rte_filter_op op, uint8_t port_id, uint16_t type, uint8_t ttl
474                               , uint16_t ip_id, uint8_t l4_proto, int queue, uint16_t stat_idx);
475    virtual int add_del_eth_type_rule(uint8_t port_id, enum rte_filter_op op, uint16_t eth_type);
476    virtual int configure_rx_filter_rules_statefull(CPhyEthIF * _if);
477
478private:
479    uint8_t m_if_per_card;
480    uint16_t m_max_flow_stats;
481};
482
483class CTRexExtendedDriverBaseVIC : public CTRexExtendedDriverBase {
484public:
485    CTRexExtendedDriverBaseVIC(){
486        m_cap = TREX_DRV_CAP_DROP_Q  | TREX_DRV_CAP_MAC_ADDR_CHG;
487    }
488
489    TRexPortAttr * create_port_attr(uint8_t port_id) {
490        return new DpdkTRexPortAttr(port_id, false, false);
491    }
492
493    static CTRexExtendedDriverBase * create(){
494        return ( new CTRexExtendedDriverBaseVIC() );
495    }
496    virtual void update_global_config_fdir(port_cfg_t * cfg){
497    }
498    void clear_extended_stats(CPhyEthIF * _if);
499    void get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats);
500
501    virtual int get_min_sample_rate(void){
502        return (RX_CHECK_MIX_SAMPLE_RATE);
503    }
504
505    virtual int verify_fw_ver(int i);
506
507    virtual void update_configuration(port_cfg_t * cfg);
508
509    virtual int configure_rx_filter_rules(CPhyEthIF * _if);
510    virtual void reset_rx_stats(CPhyEthIF * _if, uint32_t *stats, int min, int len);
511    virtual int get_rx_stats(CPhyEthIF * _if, uint32_t *pkts, uint32_t *prev_pkts, uint32_t *bytes, uint32_t *prev_bytes, int min, int max);
512    virtual int get_stat_counters_num() {return MAX_FLOW_STATS;}
513    virtual void get_rx_stat_capabilities(uint16_t &flags, uint16_t &num_counters, uint16_t &base_ip_id) {
514        flags = TrexPlatformApi::IF_STAT_IPV4_ID | TrexPlatformApi::IF_STAT_RX_BYTES_COUNT
515            | TrexPlatformApi::IF_STAT_PAYLOAD;
516        num_counters = MAX_FLOW_STATS;
517        base_ip_id = IP_ID_RESERVE_BASE;
518    }
519
520    virtual CFlowStatParser *get_flow_stat_parser();
521    virtual int dump_fdir_global_stats(CPhyEthIF * _if, FILE *fd);
522    virtual int set_rcv_all(CPhyEthIF * _if, bool set_on);
523
524private:
525
526    virtual void add_del_rules(enum rte_filter_op op, uint8_t port_id, uint16_t type, uint16_t id
527                               , uint8_t l4_proto, uint8_t tos, int queue);
528    virtual int add_del_eth_type_rule(uint8_t port_id, enum rte_filter_op op, uint16_t eth_type);
529    virtual int configure_rx_filter_rules_statefull(CPhyEthIF * _if);
530
531};
532
533
534class CTRexExtendedDriverBaseMlnx5G : public CTRexExtendedDriverBase {
535public:
536    CTRexExtendedDriverBaseMlnx5G(){
537        m_cap = TREX_DRV_CAP_DROP_Q | TREX_DRV_CAP_MAC_ADDR_CHG | TREX_DRV_CAP_NO_PORT_REORDER_POSSIBLE;
538        CGlobalInfo::set_queues_mode(CGlobalInfo::Q_MODE_MANY_DROP_Q);
539    }
540
541    TRexPortAttr * create_port_attr(uint8_t port_id) {
542        // disabling flow control on 40G using DPDK API causes the interface to malfunction
543        return new DpdkTRexPortAttr(port_id, false, false);
544    }
545
546    static CTRexExtendedDriverBase * create(){
547        return ( new CTRexExtendedDriverBaseMlnx5G() );
548    }
549
550    virtual void update_global_config_fdir(port_cfg_t * cfg){
551    }
552
553    virtual int get_min_sample_rate(void){
554        return (RX_CHECK_MIX_SAMPLE_RATE);
555    }
556    virtual void get_dpdk_drv_params(CTrexDpdkParams &p) {
557        p.rx_data_q_num = 1;
558        /* Mellanox ConnectX-4 can drop only 35MPPS per Rx queue.
559         * to workaround this issue we will create multi rx queue and enable RSS. for Queue1 we will disable RSS
560         * return zero for disable patch and rx queues number for enable.
561        */
562        p.rx_drop_q_num = 4;
563        p.rx_desc_num_data_q = RX_DESC_NUM_DATA_Q;
564        p.rx_desc_num_drop_q = RX_DESC_NUM_DROP_Q_MLX;
565        p.tx_desc_num = TX_DESC_NUM;
566    }
567    virtual void update_configuration(port_cfg_t * cfg);
568    virtual int configure_rx_filter_rules(CPhyEthIF * _if);
569    virtual void get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats);
570    virtual void clear_extended_stats(CPhyEthIF * _if);
571    virtual void reset_rx_stats(CPhyEthIF * _if, uint32_t *stats, int min, int len);
572    virtual int get_rx_stats(CPhyEthIF * _if, uint32_t *pkts, uint32_t *prev_pkts, uint32_t *bytes, uint32_t *prev_bytes, int min, int max);
573    virtual int dump_fdir_global_stats(CPhyEthIF * _if, FILE *fd);
574    virtual void get_rx_stat_capabilities(uint16_t &flags, uint16_t &num_counters, uint16_t &base_ip_id) {
575        flags = TrexPlatformApi::IF_STAT_IPV4_ID | TrexPlatformApi::IF_STAT_RX_BYTES_COUNT
576            | TrexPlatformApi::IF_STAT_PAYLOAD;
577        num_counters = 127; //With MAX_FLOW_STATS we saw packet failures in rx_test. Need to check.
578        base_ip_id = IP_ID_RESERVE_BASE;
579    }
580    virtual int wait_for_stable_link();
581    // disabling flow control on 40G using DPDK API causes the interface to malfunction
582    virtual bool flow_control_disable_supported(){return false;}
583    virtual CFlowStatParser *get_flow_stat_parser();
584    virtual int set_rcv_all(CPhyEthIF * _if, bool set_on);
585
586private:
587    virtual void add_del_rules(enum rte_filter_op op, uint8_t port_id, uint16_t type, uint16_t ip_id, uint8_t l4_proto
588                               , int queue);
589    virtual int add_del_rx_filter_rules(CPhyEthIF * _if, bool set_on);
590};
591
592typedef CTRexExtendedDriverBase * (*create_object_t) (void);
593
594
595class CTRexExtendedDriverRec {
596public:
597    std::string         m_driver_name;
598    create_object_t     m_constructor;
599};
600
601class CTRexExtendedDriverDb {
602public:
603
604    const std::string & get_driver_name() {
605        return m_driver_name;
606    }
607
608    bool is_driver_exists(std::string name);
609
610
611
612    void set_driver_name(std::string name){
613        m_driver_was_set=true;
614        m_driver_name=name;
615        printf(" set driver name %s \n",name.c_str());
616        m_drv=create_driver(m_driver_name);
617        assert(m_drv);
618    }
619
620    CTRexExtendedDriverBase * get_drv(){
621        if (!m_driver_was_set) {
622            printf(" ERROR too early to use this object !\n");
623            printf(" need to set the right driver \n");
624            assert(0);
625        }
626        assert(m_drv);
627        return (m_drv);
628    }
629
630public:
631
632    static CTRexExtendedDriverDb * Ins();
633
634private:
635    CTRexExtendedDriverBase * create_driver(std::string name);
636
637    CTRexExtendedDriverDb(){
638        register_driver(std::string("net_ixgbe"),CTRexExtendedDriverBase10G::create);
639        register_driver(std::string("net_e1000_igb"),CTRexExtendedDriverBase1G::create);
640        register_driver(std::string("net_i40e"),CTRexExtendedDriverBase40G::create);
641        register_driver(std::string("net_enic"),CTRexExtendedDriverBaseVIC::create);
642        register_driver(std::string("net_mlx5"),CTRexExtendedDriverBaseMlnx5G::create);
643
644        /* virtual devices */
645        register_driver(std::string("net_e1000_em"), CTRexExtendedDriverBaseE1000::create);
646        register_driver(std::string("net_vmxnet3"), CTRexExtendedDriverVmxnet3::create);
647        register_driver(std::string("net_virtio"), CTRexExtendedDriverVirtio::create);
648        register_driver(std::string("net_i40e_vf"), CTRexExtendedDriverI40evf::create);
649        register_driver(std::string("net_ixgbe_vf"), CTRexExtendedDriverIxgbevf::create);
650
651        m_driver_was_set=false;
652        m_drv=0;
653        m_driver_name="";
654    }
655    void register_driver(std::string name,create_object_t func);
656    static CTRexExtendedDriverDb * m_ins;
657    bool        m_driver_was_set;
658    std::string m_driver_name;
659    CTRexExtendedDriverBase * m_drv;
660    std::vector <CTRexExtendedDriverRec*>     m_list;
661
662};
663
664CTRexExtendedDriverDb * CTRexExtendedDriverDb::m_ins;
665
666
667void CTRexExtendedDriverDb::register_driver(std::string name,
668                                            create_object_t func){
669    CTRexExtendedDriverRec * rec;
670    rec = new CTRexExtendedDriverRec();
671    rec->m_driver_name=name;
672    rec->m_constructor=func;
673    m_list.push_back(rec);
674}
675
676
677bool CTRexExtendedDriverDb::is_driver_exists(std::string name){
678    int i;
679    for (i=0; i<(int)m_list.size(); i++) {
680        if (m_list[i]->m_driver_name == name) {
681            return (true);
682        }
683    }
684    return (false);
685}
686
687
688CTRexExtendedDriverBase * CTRexExtendedDriverDb::create_driver(std::string name){
689    int i;
690    for (i=0; i<(int)m_list.size(); i++) {
691        if (m_list[i]->m_driver_name == name) {
692            return ( m_list[i]->m_constructor() );
693        }
694    }
695    return( (CTRexExtendedDriverBase *)0);
696}
697
698
699
700CTRexExtendedDriverDb * CTRexExtendedDriverDb::Ins(){
701    if (!m_ins) {
702        m_ins = new CTRexExtendedDriverDb();
703    }
704    return (m_ins);
705}
706
707static CTRexExtendedDriverBase *  get_ex_drv(){
708
709    return ( CTRexExtendedDriverDb::Ins()->get_drv());
710}
711
712static inline int get_min_sample_rate(void){
713    return ( get_ex_drv()->get_min_sample_rate());
714}
715
716// cores =0==1,1*2,2,3,4,5,6
717// An enum for all the option types
718enum { OPT_HELP,
719       OPT_MODE_BATCH,
720       OPT_MODE_INTERACTIVE,
721       OPT_NODE_DUMP,
722       OPT_DUMP_INTERFACES,
723       OPT_UT,
724       OPT_CORES,
725       OPT_SINGLE_CORE,
726       OPT_FLIP_CLIENT_SERVER,
727       OPT_FLOW_FLIP_CLIENT_SERVER,
728       OPT_FLOW_FLIP_CLIENT_SERVER_SIDE,
729       OPT_RATE_MULT,
730       OPT_DURATION,
731       OPT_PLATFORM_FACTOR,
732       OPT_PUB_DISABLE,
733       OPT_LIMT_NUM_OF_PORTS,
734       OPT_PLAT_CFG_FILE,
735       OPT_MBUF_FACTOR,
736       OPT_LATENCY,
737       OPT_NO_CLEAN_FLOW_CLOSE,
738       OPT_LATENCY_MASK,
739       OPT_ONLY_LATENCY,
740       OPT_LATENCY_PREVIEW ,
741       OPT_WAIT_BEFORE_TRAFFIC,
742       OPT_PCAP,
743       OPT_RX_CHECK,
744       OPT_IO_MODE,
745       OPT_IPV6,
746       OPT_LEARN,
747       OPT_LEARN_MODE,
748       OPT_LEARN_VERIFY,
749       OPT_L_PKT_MODE,
750       OPT_NO_FLOW_CONTROL,
751       OPT_NO_HW_FLOW_STAT,
752       OPT_X710_RESET_THRESHOLD,
753       OPT_VLAN,
754       OPT_RX_CHECK_HOPS,
755       OPT_CLIENT_CFG_FILE,
756       OPT_NO_KEYBOARD_INPUT,
757       OPT_VIRT_ONE_TX_RX_QUEUE,
758       OPT_PREFIX,
759       OPT_SEND_DEBUG_PKT,
760       OPT_NO_WATCHDOG,
761       OPT_ALLOW_COREDUMP,
762       OPT_CHECKSUM_OFFLOAD,
763       OPT_CLOSE,
764       OPT_ARP_REF_PER,
765       OPT_NO_OFED_CHECK,
766       OPT_NO_SCAPY_SERVER,
767       OPT_ACTIVE_FLOW,
768       OPT_RT,
769       OPT_MLX4_SO,
770       OPT_MLX5_SO
771};
772
773/* these are the argument types:
774   SO_NONE --    no argument needed
775   SO_REQ_SEP -- single required argument
776   SO_MULTI --   multiple arguments needed
777*/
778static CSimpleOpt::SOption parser_options[] =
779    {
780        { OPT_HELP,                   "-?",                SO_NONE    },
781        { OPT_HELP,                   "-h",                SO_NONE    },
782        { OPT_HELP,                   "--help",            SO_NONE    },
783        { OPT_UT,                     "--ut",              SO_NONE    },
784        { OPT_MODE_BATCH,             "-f",                SO_REQ_SEP },
785        { OPT_MODE_INTERACTIVE,       "-i",                SO_NONE    },
786        { OPT_PLAT_CFG_FILE,          "--cfg",             SO_REQ_SEP },
787        { OPT_SINGLE_CORE,            "-s",                SO_NONE    },
788        { OPT_FLIP_CLIENT_SERVER,     "--flip",            SO_NONE    },
789        { OPT_FLOW_FLIP_CLIENT_SERVER,"-p",                SO_NONE    },
790        { OPT_FLOW_FLIP_CLIENT_SERVER_SIDE, "-e",          SO_NONE    },
791        { OPT_NO_CLEAN_FLOW_CLOSE,    "--nc",              SO_NONE    },
792        { OPT_LIMT_NUM_OF_PORTS,      "--limit-ports",     SO_REQ_SEP },
793        { OPT_CORES,                  "-c",                SO_REQ_SEP },
794        { OPT_NODE_DUMP,              "-v",                SO_REQ_SEP },
795        { OPT_DUMP_INTERFACES,        "--dump-interfaces", SO_MULTI   },
796        { OPT_LATENCY,                "-l",                SO_REQ_SEP },
797        { OPT_DURATION,               "-d",                SO_REQ_SEP },
798        { OPT_PLATFORM_FACTOR,        "-pm",               SO_REQ_SEP },
799        { OPT_PUB_DISABLE,            "-pubd",             SO_NONE    },
800        { OPT_RATE_MULT,              "-m",                SO_REQ_SEP },
801        { OPT_LATENCY_MASK,           "--lm",              SO_REQ_SEP },
802        { OPT_ONLY_LATENCY,           "--lo",              SO_NONE    },
803        { OPT_LATENCY_PREVIEW,        "-k",                SO_REQ_SEP },
804        { OPT_WAIT_BEFORE_TRAFFIC,    "-w",                SO_REQ_SEP },
805        { OPT_PCAP,                   "--pcap",            SO_NONE    },
806        { OPT_RX_CHECK,               "--rx-check",        SO_REQ_SEP },
807        { OPT_IO_MODE,                "--iom",             SO_REQ_SEP },
808        { OPT_RX_CHECK_HOPS,          "--hops",            SO_REQ_SEP },
809        { OPT_IPV6,                   "--ipv6",            SO_NONE    },
810        { OPT_LEARN,                  "--learn",           SO_NONE    },
811        { OPT_LEARN_MODE,             "--learn-mode",      SO_REQ_SEP },
812        { OPT_LEARN_VERIFY,           "--learn-verify",    SO_NONE    },
813        { OPT_L_PKT_MODE,             "--l-pkt-mode",      SO_REQ_SEP },
814        { OPT_NO_FLOW_CONTROL,        "--no-flow-control-change", SO_NONE },
815        { OPT_NO_HW_FLOW_STAT,        "--no-hw-flow-stat", SO_NONE },
816        { OPT_X710_RESET_THRESHOLD,   "--x710-reset-threshold", SO_REQ_SEP },
817        { OPT_VLAN,                   "--vlan",            SO_NONE    },
818        { OPT_CLIENT_CFG_FILE,        "--client_cfg",      SO_REQ_SEP },
819        { OPT_CLIENT_CFG_FILE,        "--client-cfg",      SO_REQ_SEP },
820        { OPT_NO_KEYBOARD_INPUT,      "--no-key",          SO_NONE    },
821        { OPT_VIRT_ONE_TX_RX_QUEUE,   "--software",        SO_NONE    },
822        { OPT_PREFIX,                 "--prefix",          SO_REQ_SEP },
823        { OPT_SEND_DEBUG_PKT,         "--send-debug-pkt",  SO_REQ_SEP },
824        { OPT_MBUF_FACTOR,            "--mbuf-factor",     SO_REQ_SEP },
825        { OPT_NO_WATCHDOG,            "--no-watchdog",     SO_NONE    },
826        { OPT_ALLOW_COREDUMP,         "--allow-coredump",  SO_NONE    },
827        { OPT_CHECKSUM_OFFLOAD,       "--checksum-offload", SO_NONE   },
828        { OPT_ACTIVE_FLOW,            "--active-flows",   SO_REQ_SEP  },
829        { OPT_MLX5_SO,                "--mlx5-so", SO_NONE    },
830        { OPT_MLX4_SO,                "--mlx4-so", SO_NONE    },
831        { OPT_CLOSE,                  "--close-at-end",    SO_NONE    },
832        { OPT_ARP_REF_PER,            "--arp-refresh-period", SO_REQ_SEP },
833        { OPT_NO_OFED_CHECK,          "--no-ofed-check",   SO_NONE    },
834        { OPT_NO_SCAPY_SERVER,        "--no-scapy-server", SO_NONE    },
835        { OPT_RT,                     "--rt",              SO_NONE    },
836        SO_END_OF_OPTIONS
837    };
838
839static int usage(){
840
841    printf(" Usage: t-rex-64 [mode] <options>\n\n");
842    printf(" mode is one of:\n");
843    printf("   -f <file> : YAML file with traffic template configuration (Will run TRex in 'stateful' mode)\n");
844    printf("   -i        : Run TRex in 'stateless' mode\n");
845    printf("\n");
846
847    printf(" Available options are:\n");
848    printf(" --active-flows             : An experimental switch to scale up or down the number of active flows.  \n");
849    printf("                              It is not accurate due to the quantization of flow scheduler and in some case does not work. \n");
850    printf("                              Example --active-flows 500000 wil set the ballpark of the active flow to be ~0.5M \n");
851    printf(" --allow-coredump           : Allow creation of core dump \n");
852    printf(" --arp-refresh-period       : Period in seconds between sending of gratuitous ARP for our addresses. Value of 0 means 'never send' \n");
853    printf(" -c <num>>                  : Number of hardware threads to allocate for each port pair. Overrides the 'c' argument from config file \n");
854    printf(" --cfg <file>               : Use file as TRex config file instead of the default /etc/trex_cfg.yaml \n");
855    printf(" --checksum-offload         : Enable IP, TCP and UDP tx checksum offloading, using DPDK. This requires all used interfaces to support this \n");
856    printf(" --client_cfg <file>        : YAML file describing clients configuration \n");
857    printf(" --close-at-end             : Call rte_eth_dev_stop and close at exit. Calling these functions caused link down issues in older versions, \n");
858    printf("                               so we do not call them by default for now. Leaving this as option in case someone thinks it is helpful for him \n");
859    printf("                               This it temporary option. Will be removed in the future \n");
860    printf(" -d                         : Duration of the test in sec (default is 3600). Look also at --nc \n");
861    printf(" -e                         : Like -p but src/dst IP will be chosen according to the port (i.e. on client port send all packets with client src and server dest, and vice versa on server port \n");
862    printf(" --flip                     : Each flow will be sent both from client to server and server to client. This can acheive better port utilization when flow traffic is asymmetric \n");
863    printf(" --hops <hops>              : If rx check is enabled, the hop number can be assigned. See manual for details \n");
864    printf(" --iom  <mode>              : IO mode  for server output [0- silent, 1- normal , 2- short] \n");
865    printf(" --ipv6                     : Work in ipv6 mode \n");
866    printf(" -k  <num>                  : Run 'warm up' traffic for num seconds before starting the test. \n");
867    printf(" -l <rate>                  : In parallel to the test, run latency check, sending packets at rate/sec from each interface \n");
868    printf(" --l-pkt-mode <0-3>         : Set mode for sending latency packets \n");
869    printf("      0 (default)    send SCTP packets  \n");
870    printf("      1              Send ICMP request packets  \n");
871    printf("      2              Send ICMP requests from client side, and response from server side (for working with firewall) \n");
872    printf("      3              Send ICMP requests with sequence ID 0 from both sides \n");
873    printf("    Rate of zero means no latency check \n");
874    printf(" --learn (deprecated). Replaced by --learn-mode. To get older behaviour, use --learn-mode 2 \n");
875    printf(" --learn-mode [1-3]         : Used for working in NAT environments. Dynamically learn the NAT translation done by the DUT \n");
876    printf("      1    In case of TCP flow, use TCP ACK in first SYN to pass NAT translation information. Initial SYN packet must be first packet in the TCP flow \n");
877    printf("           In case of UDP stream, NAT translation information will pass in IP ID field of first packet in flow. This means that this field is changed by TRex\n");
878    printf("      2    Add special IP option to pass NAT translation information to first packet of each flow. Will not work on certain firewalls if they drop packets with IP options \n");
879    printf("      3    Like 1, but without support for sequence number randomization in server->client direction. Performance (flow/second) better than 1 \n");
880    printf(" --learn-verify             : Test the NAT translation mechanism. Should be used when there is no NAT in the setup \n");
881    printf(" --limit-ports              : Limit number of ports used. Must be even number (TRex always uses port pairs) \n");
882    printf(" --lm                       : Hex mask of cores that should send traffic \n");
883    printf("    For example: Value of 0x5 will cause only ports 0 and 2 to send traffic \n");
884    printf(" --lo                       : Only run latency test \n");
885    printf(" -m <num>                   : Rate multiplier.  Multiply basic rate of templates by this number \n");
886    printf(" --mbuf-factor              : Factor for packet memory \n");
887    printf(" --nc                       : If set, will not wait for all flows to be closed, before terminating - see manual for more information \n");
888    printf(" --no-flow-control-change   : By default TRex disables flow-control. If this option is given, it does not touch it \n");
889    printf(" --no-hw-flow-stat          : Relevant only for Intel x710 stateless mode. Do not use HW counters for flow stats\n");
890    printf("                            : Enabling this will support lower traffic rate, but will also report RX byte count statistics. See manual for more details\n");
891    printf(" --no-key                   : Daemon mode, don't get input from keyboard \n");
892    printf(" --no-ofed-check            : Disable the check of OFED version \n");
893    printf(" --no-scapy-server          : Disable Scapy server implicit start at stateless \n");
894    printf(" --no-watchdog              : Disable watchdog \n");
895    printf(" --rt                       : Run TRex DP/RX cores in realtime priority \n");
896    printf(" -p                         : Send all flow packets from the same interface (choosed randomly between client ad server ports) without changing their src/dst IP \n");
897    printf(" -pm                        : Platform factor. If you have splitter in the setup, you can multiply the total results by this factor \n");
898    printf("    e.g --pm 2.0 will multiply all the results bps in this factor \n");
899    printf(" --prefix <nam>             : For running multi TRex instances on the same machine. Each instance should have different name \n");
900    printf(" -pubd                      : Disable monitors publishers \n");
901    printf(" --rx-check  <rate>         : Enable rx check. TRex will sample flows at 1/rate and check order, latency and more \n");
902    printf(" -s                         : Single core. Run only one data path core. For debug \n");
903    printf(" --send-debug-pkt <proto>   : Do not run traffic generator. Just send debug packet and dump receive queues \n");
904    printf("    Supported protocols are 1 for icmp, 2 for UDP, 3 for TCP, 4 for ARP, 5 for 9K UDP \n");
905    printf(" --software                 : Do not configure any hardware rules. In this mode we use 1 core, and one RX queue and one TX queue per port\n");
906    printf(" -v <verbosity level>       : The higher the value, print more debug information \n");
907    printf(" --vlan                     : Relevant only for stateless mode with Intel 82599 10G NIC \n");
908    printf("                              When configuring flow stat and latency per stream rules, assume all streams uses VLAN \n");
909    printf(" -w  <num>                  : Wait num seconds between init of interfaces and sending traffic, default is 1 \n");
910
911    printf("\n");
912    printf(" Examples: ");
913    printf(" basic trex run for 20 sec and multiplier of 10 \n");
914    printf("  t-rex-64 -f cap2/dns.yaml -m 10 -d 20 \n");
915    printf("\n\n");
916    printf(" Copyright (c) 2015-2017 Cisco Systems, Inc.    \n");
917    printf("                                                                  \n");
918    printf(" Licensed under the Apache License, Version 2.0 (the 'License') \n");
919    printf(" you may not use this file except in compliance with the License. \n");
920    printf(" You may obtain a copy of the License at                          \n");
921    printf("                                                                  \n");
922    printf("    http://www.apache.org/licenses/LICENSE-2.0                    \n");
923    printf("                                                                  \n");
924    printf(" Unless required by applicable law or agreed to in writing, software \n");
925    printf(" distributed under the License is distributed on an \"AS IS\" BASIS,   \n");
926    printf(" WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. \n");
927    printf(" See the License for the specific language governing permissions and      \n");
928    printf(" limitations under the License.                                           \n");
929    printf(" \n");
930    printf(" Open Source Components / Libraries \n");
931    printf(" DPDK       (BSD)       \n");
932    printf(" YAML-CPP   (BSD)       \n");
933    printf(" JSONCPP    (MIT)       \n");
934    printf(" \n");
935    printf(" Open Source Binaries \n");
936    printf(" ZMQ        (LGPL v3plus) \n");
937    printf(" \n");
938    printf(" Version : %s   \n",VERSION_BUILD_NUM);
939    printf(" DPDK version : %s   \n",rte_version());
940    printf(" User    : %s   \n",VERSION_USER);
941    printf(" Date    : %s , %s \n",get_build_date(),get_build_time());
942    printf(" Uuid    : %s    \n",VERSION_UIID);
943    printf(" Git SHA : %s    \n",VERSION_GIT_SHA);
944    return (0);
945}
946
947
948int gtest_main(int argc, char **argv) ;
949
950static void parse_err(const std::string &msg) {
951    std::cout << "\nArgument Parsing Error: \n\n" << "*** "<< msg << "\n\n";
952    exit(-1);
953}
954
955static int parse_options(int argc, char *argv[], CParserOption* po, bool first_time ) {
956    CSimpleOpt args(argc, argv, parser_options);
957
958    bool latency_was_set=false;
959    (void)latency_was_set;
960    char ** rgpszArg = NULL;
961    bool opt_vlan_was_set = false;
962
963    int a=0;
964    int node_dump=0;
965
966    po->preview.setFileWrite(true);
967    po->preview.setRealTime(true);
968    uint32_t tmp_data;
969    float tmp_double;
970
971    po->m_run_mode = CParserOption::RUN_MODE_INVALID;
972
973    while ( args.Next() ){
974        if (args.LastError() == SO_SUCCESS) {
975            switch (args.OptionId()) {
976
977            case OPT_UT :
978                parse_err("Supported only in simulation");
979                break;
980
981            case OPT_HELP:
982                usage();
983                return -1;
984
985            case OPT_MODE_BATCH:
986                if (po->m_run_mode != CParserOption::RUN_MODE_INVALID) {
987                    parse_err("Please specify single run mode");
988                }
989                po->m_run_mode = CParserOption::RUN_MODE_BATCH;
990                po->cfg_file = args.OptionArg();
991                break;
992
993            case OPT_MODE_INTERACTIVE:
994                if (po->m_run_mode != CParserOption::RUN_MODE_INVALID) {
995                    parse_err("Please specify single run mode");
996                }
997                po->m_run_mode = CParserOption::RUN_MODE_INTERACTIVE;
998                break;
999
1000            case OPT_NO_KEYBOARD_INPUT  :
1001                po->preview.set_no_keyboard(true);
1002                break;
1003
1004            case OPT_CLIENT_CFG_FILE :
1005                po->client_cfg_file = args.OptionArg();
1006                break;
1007
1008            case OPT_PLAT_CFG_FILE :
1009                po->platform_cfg_file = args.OptionArg();
1010                break;
1011
1012            case OPT_SINGLE_CORE :
1013                po->preview.setSingleCore(true);
1014                break;
1015
1016            case OPT_IPV6:
1017                po->preview.set_ipv6_mode_enable(true);
1018                break;
1019
1020            case OPT_RT:
1021                po->preview.set_rt_prio_mode(true);
1022                break;
1023
1024            case OPT_MLX5_SO:
1025                po->preview.set_mlx5_so_mode(true);
1026                break;
1027
1028            case OPT_MLX4_SO:
1029                po->preview.set_mlx4_so_mode(true);
1030                break;
1031
1032            case OPT_LEARN :
1033                po->m_learn_mode = CParserOption::LEARN_MODE_IP_OPTION;
1034                break;
1035
1036            case OPT_LEARN_MODE :
1037                sscanf(args.OptionArg(),"%d", &tmp_data);
1038                if (! po->is_valid_opt_val(tmp_data, CParserOption::LEARN_MODE_DISABLED, CParserOption::LEARN_MODE_MAX, "--learn-mode")) {
1039                    exit(-1);
1040                }
1041                po->m_learn_mode = (uint8_t)tmp_data;
1042                break;
1043
1044            case OPT_LEARN_VERIFY :
1045                // must configure learn_mode for learn verify to work. If different learn mode will be given later, it will be set instead.
1046                if (po->m_learn_mode == 0) {
1047                    po->m_learn_mode = CParserOption::LEARN_MODE_IP_OPTION;
1048                }
1049                po->preview.set_learn_and_verify_mode_enable(true);
1050                break;
1051
1052            case OPT_L_PKT_MODE :
1053                sscanf(args.OptionArg(),"%d", &tmp_data);
1054                if (! po->is_valid_opt_val(tmp_data, 0, L_PKT_SUBMODE_0_SEQ, "--l-pkt-mode")) {
1055                    exit(-1);
1056                }
1057                po->m_l_pkt_mode=(uint8_t)tmp_data;
1058                break;
1059
1060            case OPT_NO_HW_FLOW_STAT:
1061                po->preview.set_disable_hw_flow_stat(true);
1062                break;
1063            case OPT_NO_FLOW_CONTROL:
1064                po->preview.set_disable_flow_control_setting(true);
1065                break;
1066            case OPT_X710_RESET_THRESHOLD:
1067                po->set_x710_fdir_reset_threshold(atoi(args.OptionArg()));
1068                break;
1069            case OPT_VLAN:
1070                opt_vlan_was_set = true;
1071                break;
1072            case OPT_LIMT_NUM_OF_PORTS :
1073                po->m_expected_portd =atoi(args.OptionArg());
1074                break;
1075            case  OPT_CORES  :
1076                po->preview.setCores(atoi(args.OptionArg()));
1077                break;
1078            case OPT_FLIP_CLIENT_SERVER :
1079                po->preview.setClientServerFlip(true);
1080                break;
1081            case OPT_NO_CLEAN_FLOW_CLOSE :
1082                po->preview.setNoCleanFlowClose(true);
1083                break;
1084            case OPT_FLOW_FLIP_CLIENT_SERVER :
1085                po->preview.setClientServerFlowFlip(true);
1086                break;
1087            case OPT_FLOW_FLIP_CLIENT_SERVER_SIDE:
1088                po->preview.setClientServerFlowFlipAddr(true);
1089                break;
1090            case OPT_NODE_DUMP:
1091                a=atoi(args.OptionArg());
1092                node_dump=1;
1093                po->preview.setFileWrite(false);
1094                break;
1095            case OPT_DUMP_INTERFACES:
1096                if (first_time) {
1097                    rgpszArg = args.MultiArg(1);
1098                    while (rgpszArg != NULL) {
1099                        po->dump_interfaces.push_back(rgpszArg[0]);
1100                        rgpszArg = args.MultiArg(1);
1101                    }
1102                }
1103                if (po->m_run_mode != CParserOption::RUN_MODE_INVALID) {
1104                    parse_err("Please specify single run mode (-i for stateless, or -f <file> for stateful");
1105                }
1106                po->m_run_mode = CParserOption::RUN_MODE_DUMP_INFO;
1107                break;
1108            case OPT_MBUF_FACTOR:
1109                sscanf(args.OptionArg(),"%f", &po->m_mbuf_factor);
1110                break;
1111            case OPT_RATE_MULT :
1112                sscanf(args.OptionArg(),"%f", &po->m_factor);
1113                break;
1114            case OPT_DURATION :
1115                sscanf(args.OptionArg(),"%f", &po->m_duration);
1116                break;
1117            case OPT_PUB_DISABLE:
1118                po->preview.set_zmq_publish_enable(false);
1119                break;
1120            case OPT_PLATFORM_FACTOR:
1121                sscanf(args.OptionArg(),"%f", &po->m_platform_factor);
1122                break;
1123            case OPT_LATENCY :
1124                latency_was_set=true;
1125                sscanf(args.OptionArg(),"%d", &po->m_latency_rate);
1126                break;
1127            case OPT_LATENCY_MASK :
1128                sscanf(args.OptionArg(),"%x", &po->m_latency_mask);
1129                break;
1130            case OPT_ONLY_LATENCY :
1131                po->preview.setOnlyLatency(true);
1132                break;
1133            case OPT_NO_WATCHDOG :
1134                po->preview.setWDDisable(true);
1135                break;
1136            case OPT_ALLOW_COREDUMP :
1137                po->preview.setCoreDumpEnable(true);
1138                break;
1139            case  OPT_LATENCY_PREVIEW :
1140                sscanf(args.OptionArg(),"%d", &po->m_latency_prev);
1141                break;
1142            case  OPT_WAIT_BEFORE_TRAFFIC :
1143                sscanf(args.OptionArg(),"%d", &po->m_wait_before_traffic);
1144                break;
1145            case OPT_PCAP:
1146                po->preview.set_pcap_mode_enable(true);
1147                break;
1148            case OPT_ACTIVE_FLOW:
1149                sscanf(args.OptionArg(),"%f", &tmp_double);
1150                po->m_active_flows=(uint32_t)tmp_double;
1151                break;
1152            case OPT_RX_CHECK :
1153                sscanf(args.OptionArg(),"%d", &tmp_data);
1154                po->m_rx_check_sample=(uint16_t)tmp_data;
1155                po->preview.set_rx_check_enable(true);
1156                break;
1157            case OPT_RX_CHECK_HOPS :
1158                sscanf(args.OptionArg(),"%d", &tmp_data);
1159                po->m_rx_check_hops = (uint16_t)tmp_data;
1160                break;
1161            case OPT_IO_MODE :
1162                sscanf(args.OptionArg(),"%d", &tmp_data);
1163                po->m_io_mode=(uint16_t)tmp_data;
1164                break;
1165
1166            case OPT_VIRT_ONE_TX_RX_QUEUE:
1167                CGlobalInfo::set_queues_mode(CGlobalInfo::Q_MODE_ONE_QUEUE);
1168                po->preview.setCores(1); // Only one TX core supported in software mode currently
1169                break;
1170
1171            case OPT_PREFIX:
1172                po->prefix = args.OptionArg();
1173                break;
1174
1175            case OPT_SEND_DEBUG_PKT:
1176                sscanf(args.OptionArg(),"%d", &tmp_data);
1177                po->m_debug_pkt_proto = (uint8_t)tmp_data;
1178                break;
1179
1180            case OPT_CHECKSUM_OFFLOAD:
1181                po->preview.setChecksumOffloadEnable(true);
1182                break;
1183
1184            case OPT_CLOSE:
1185                po->preview.setCloseEnable(true);
1186                break;
1187            case  OPT_ARP_REF_PER:
1188                sscanf(args.OptionArg(),"%d", &tmp_data);
1189                po->m_arp_ref_per=(uint16_t)tmp_data;
1190                break;
1191            case OPT_NO_OFED_CHECK:
1192                break;
1193            case OPT_NO_SCAPY_SERVER:
1194                break;
1195
1196            default:
1197                printf("Error: option %s is not handled.\n\n", args.OptionText());
1198                return -1;
1199                break;
1200            } // End of switch
1201        }// End of IF
1202        else {
1203            if (args.LastError() == SO_OPT_INVALID) {
1204                printf("Error: option %s is not recognized.\n\n", args.OptionText());
1205            } else if (args.LastError() == SO_ARG_MISSING) {
1206                printf("Error: option %s is expected to have argument.\n\n", args.OptionText());
1207            }
1208            usage();
1209            return -1;
1210        }
1211    } // End of while
1212
1213
1214    if ((po->m_run_mode ==  CParserOption::RUN_MODE_INVALID) ) {
1215        parse_err("Please provide single run mode. -f <file> for stateful or -i for stateless (interactive)");
1216    }
1217
1218    if (CGlobalInfo::is_learn_mode() && po->preview.get_ipv6_mode_enable()) {
1219        parse_err("--learn mode is not supported with --ipv6, beacuse there is no such thing as NAT66 (ipv6 to ipv6 translation) \n" \
1220                  "If you think it is important, please open a defect or write to TRex mailing list\n");
1221    }
1222
1223    if (po->preview.get_is_rx_check_enable() ||  po->is_latency_enabled() || CGlobalInfo::is_learn_mode()
1224        || (CGlobalInfo::m_options.m_arp_ref_per != 0)
1225        || CGlobalInfo::get_queues_mode() == CGlobalInfo::Q_MODE_ONE_QUEUE
1226        || CGlobalInfo::get_queues_mode() == CGlobalInfo::Q_MODE_RSS) {
1227        po->set_rx_enabled();
1228    }
1229
1230    if ( node_dump ){
1231        po->preview.setVMode(a);
1232    }
1233
1234    /* if we have a platform factor we need to devided by it so we can still work with normalized yaml profile  */
1235    po->m_factor = po->m_factor/po->m_platform_factor;
1236
1237    uint32_t cores=po->preview.getCores();
1238    if ( cores > ((BP_MAX_CORES)/2-1) ) {
1239        fprintf(stderr, " Error: maximum supported core number is: %d \n",((BP_MAX_CORES)/2-1));
1240        return -1;
1241    }
1242
1243
1244    if ( first_time ){
1245        /* only first time read the configuration file */
1246        if ( po->platform_cfg_file.length() >0  ) {
1247            if ( node_dump ){
1248                printf("Using configuration file %s \n",po->platform_cfg_file.c_str());
1249            }
1250            global_platform_cfg_info.load_from_yaml_file(po->platform_cfg_file);
1251            if ( node_dump ){
1252                global_platform_cfg_info.Dump(stdout);
1253            }
1254        }else{
1255            if ( utl_is_file_exists("/etc/trex_cfg.yaml") ){
1256                if ( node_dump ){
1257                    printf("Using configuration file /etc/trex_cfg.yaml \n");
1258                }
1259                global_platform_cfg_info.load_from_yaml_file("/etc/trex_cfg.yaml");
1260                if ( node_dump ){
1261                    global_platform_cfg_info.Dump(stdout);
1262                }
1263            }
1264        }
1265    }
1266
1267    if ( get_is_stateless() ) {
1268        if ( opt_vlan_was_set ) {
1269            // Only purpose of this in stateless is for configuring the 82599 rules correctly
1270            po->preview.set_vlan_mode(CPreviewMode::VLAN_MODE_NORMAL);
1271        }
1272        if (CGlobalInfo::m_options.client_cfg_file != "") {
1273            parse_err("Client config file is not supported with interactive (stateless) mode ");
1274        }
1275        if ( po->m_duration ) {
1276            parse_err("Duration is not supported with interactive (stateless) mode ");
1277        }
1278
1279        if ( po->preview.get_is_rx_check_enable() ) {
1280            parse_err("Rx check is not supported with interactive (stateless) mode ");
1281        }
1282
1283        if  ( (po->is_latency_enabled()) || (po->preview.getOnlyLatency()) ){
1284            parse_err("Latency check is not supported with interactive (stateless) mode ");
1285        }
1286
1287        if ( po->preview.getSingleCore() ){
1288            parse_err("Single core is not supported with interactive (stateless) mode ");
1289        }
1290
1291    } else {
1292        if ( !po->m_duration ) {
1293            po->m_duration = 3600.0;
1294        }
1295        if ( global_platform_cfg_info.m_tw.m_info_exist ){
1296
1297            CTimerWheelYamlInfo *lp=&global_platform_cfg_info.m_tw;
1298            std::string  err;
1299            if (!lp->Verify(err)){
1300                parse_err(err);
1301            }
1302
1303            po->set_tw_bucket_time_in_usec(lp->m_bucket_time_usec);
1304            po->set_tw_buckets(lp->m_buckets);
1305            po->set_tw_levels(lp->m_levels);
1306        }
1307    }
1308
1309    return 0;
1310}
1311
1312static int parse_options_wrapper(int argc, char *argv[], CParserOption* po, bool first_time ) {
1313    // copy, as arg parser sometimes changes the argv
1314    char ** argv_copy = (char **) malloc(sizeof(char *) * argc);
1315    for(int i=0; i<argc; i++) {
1316        argv_copy[i] = strdup(argv[i]);
1317    }
1318    int ret = parse_options(argc, argv_copy, po, first_time);
1319
1320    // free
1321    for(int i=0; i<argc; i++) {
1322        free(argv_copy[i]);
1323    }
1324    free(argv_copy);
1325    return ret;
1326}
1327
1328int main_test(int argc , char * argv[]);
1329
1330
1331#define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
1332#define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
1333#define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */
1334
1335/*
1336 * These default values are optimized for use with the Intel(R) 82599 10 GbE
1337 * Controller and the DPDK ixgbe PMD. Consider using other values for other
1338 * network controllers and/or network drivers.
1339 */
1340#define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */
1341#define TX_HTHRESH 0  /**< Default values of TX host threshold reg. */
1342#define TX_WTHRESH 0  /**< Default values of TX write-back threshold reg. */
1343
1344#define TX_WTHRESH_1G 1  /**< Default values of TX write-back threshold reg. */
1345#define TX_PTHRESH_1G 1 /**< Default values of TX prefetch threshold reg. */
1346
1347
1348struct port_cfg_t {
1349public:
1350    port_cfg_t(){
1351        memset(&m_port_conf,0,sizeof(m_port_conf));
1352        memset(&m_rx_conf,0,sizeof(m_rx_conf));
1353        memset(&m_tx_conf,0,sizeof(m_tx_conf));
1354        memset(&m_rx_drop_conf,0,sizeof(m_rx_drop_conf));
1355
1356        m_rx_conf.rx_thresh.pthresh = RX_PTHRESH;
1357        m_rx_conf.rx_thresh.hthresh = RX_HTHRESH;
1358        m_rx_conf.rx_thresh.wthresh = RX_WTHRESH;
1359        m_rx_conf.rx_free_thresh =32;
1360
1361        m_rx_drop_conf.rx_thresh.pthresh = 0;
1362        m_rx_drop_conf.rx_thresh.hthresh = 0;
1363        m_rx_drop_conf.rx_thresh.wthresh = 0;
1364        m_rx_drop_conf.rx_free_thresh =32;
1365        m_rx_drop_conf.rx_drop_en=1;
1366
1367        m_tx_conf.tx_thresh.pthresh = TX_PTHRESH;
1368        m_tx_conf.tx_thresh.hthresh = TX_HTHRESH;
1369        m_tx_conf.tx_thresh.wthresh = TX_WTHRESH;
1370
1371        m_port_conf.rxmode.jumbo_frame=1;
1372        m_port_conf.rxmode.max_rx_pkt_len =9*1024+22;
1373        m_port_conf.rxmode.hw_strip_crc=1;
1374    }
1375
1376    inline void update_var(void){
1377        get_ex_drv()->update_configuration(this);
1378    }
1379
1380    inline void update_global_config_fdir(void){
1381        get_ex_drv()->update_global_config_fdir(this);
1382    }
1383
1384    struct rte_eth_conf     m_port_conf;
1385    struct rte_eth_rxconf   m_rx_conf;
1386    struct rte_eth_rxconf   m_rx_drop_conf;
1387    struct rte_eth_txconf   m_tx_conf;
1388};
1389
1390
1391/* this object is per core / per port / per queue
1392   each core will have 2 ports to send to
1393
1394
1395   port0                                port1
1396
1397   0,1,2,3,..15 out queue ( per core )       0,1,2,3,..15 out queue ( per core )
1398
1399*/
1400
1401
1402typedef struct cnt_name_ {
1403    uint32_t offset;
1404    char * name;
1405}cnt_name_t ;
1406
1407#define MY_REG(a) {a,(char *)#a}
1408
1409void CPhyEthIFStats::Clear() {
1410    ipackets = 0;
1411    ibytes = 0;
1412    f_ipackets = 0;
1413    f_ibytes = 0;
1414    opackets = 0;
1415    obytes = 0;
1416    ierrors = 0;
1417    oerrors = 0;
1418    imcasts = 0;
1419    rx_nombuf = 0;
1420    memset(&m_prev_stats, 0, sizeof(m_prev_stats));
1421    memset(m_rx_per_flow_pkts, 0, sizeof(m_rx_per_flow_pkts));
1422    memset(m_rx_per_flow_bytes, 0, sizeof(m_rx_per_flow_bytes));
1423}
1424
1425// dump all counters (even ones that equal 0)
1426void CPhyEthIFStats::DumpAll(FILE *fd) {
1427#define DP_A4(f) printf(" %-40s : %llu \n",#f, (unsigned long long)f)
1428#define DP_A(f) if (f) printf(" %-40s : %llu \n",#f, (unsigned long long)f)
1429    DP_A4(opackets);
1430    DP_A4(obytes);
1431    DP_A4(ipackets);
1432    DP_A4(ibytes);
1433    DP_A(ierrors);
1434    DP_A(oerrors);
1435}
1436
1437// dump all non zero counters
1438void CPhyEthIFStats::Dump(FILE *fd) {
1439    DP_A(opackets);
1440    DP_A(obytes);
1441    DP_A(f_ipackets);
1442    DP_A(f_ibytes);
1443    DP_A(ipackets);
1444    DP_A(ibytes);
1445    DP_A(ierrors);
1446    DP_A(oerrors);
1447    DP_A(imcasts);
1448    DP_A(rx_nombuf);
1449}
1450
1451void CPhyEthIgnoreStats::dump(FILE *fd) {
1452    DP_A4(opackets);
1453    DP_A4(obytes);
1454    DP_A4(ipackets);
1455    DP_A4(ibytes);
1456    DP_A4(m_tx_arp);
1457    DP_A4(m_rx_arp);
1458}
1459
1460// Clear the RX queue of an interface, dropping all packets
1461void CPhyEthIF::flush_rx_queue(void){
1462
1463    rte_mbuf_t * rx_pkts[32];
1464    int j=0;
1465    uint16_t cnt=0;
1466
1467    while (true) {
1468        j++;
1469        cnt = rx_burst(m_rx_queue,rx_pkts,32);
1470        if ( cnt ) {
1471            int i;
1472            for (i=0; i<(int)cnt;i++) {
1473                rte_mbuf_t * m=rx_pkts[i];
1474                /*printf("rx--\n");
1475                  rte_pktmbuf_dump(stdout,m, rte_pktmbuf_pkt_len(m));*/
1476                rte_pktmbuf_free(m);
1477            }
1478        }
1479        if ( ((cnt==0) && (j>10)) || (j>15) ) {
1480            break;
1481        }
1482    }
1483    if (cnt>0) {
1484        printf(" Warning can't flush rx-queue for port %d \n",(int)get_port_id());
1485    }
1486}
1487
1488
1489void CPhyEthIF::dump_stats_extended(FILE *fd){
1490
1491    cnt_name_t reg[]={
1492        MY_REG(IXGBE_GPTC), /* total packet */
1493        MY_REG(IXGBE_GOTCL), /* total bytes */
1494        MY_REG(IXGBE_GOTCH),
1495
1496        MY_REG(IXGBE_GPRC),
1497        MY_REG(IXGBE_GORCL),
1498        MY_REG(IXGBE_GORCH),
1499
1500
1501
1502        MY_REG(IXGBE_RXNFGPC),
1503        MY_REG(IXGBE_RXNFGBCL),
1504        MY_REG(IXGBE_RXNFGBCH),
1505        MY_REG(IXGBE_RXDGPC  ),
1506        MY_REG(IXGBE_RXDGBCL ),
1507        MY_REG(IXGBE_RXDGBCH  ),
1508        MY_REG(IXGBE_RXDDGPC ),
1509        MY_REG(IXGBE_RXDDGBCL ),
1510        MY_REG(IXGBE_RXDDGBCH  ),
1511        MY_REG(IXGBE_RXLPBKGPC ),
1512        MY_REG(IXGBE_RXLPBKGBCL),
1513        MY_REG(IXGBE_RXLPBKGBCH ),
1514        MY_REG(IXGBE_RXDLPBKGPC ),
1515        MY_REG(IXGBE_RXDLPBKGBCL),
1516        MY_REG(IXGBE_RXDLPBKGBCH ),
1517        MY_REG(IXGBE_TXDGPC      ),
1518        MY_REG(IXGBE_TXDGBCL     ),
1519        MY_REG(IXGBE_TXDGBCH     ),
1520        MY_REG(IXGBE_FDIRUSTAT ),
1521        MY_REG(IXGBE_FDIRFSTAT ),
1522        MY_REG(IXGBE_FDIRMATCH ),
1523        MY_REG(IXGBE_FDIRMISS )
1524
1525    };
1526    fprintf (fd," extended counters \n");
1527    int i;
1528    for (i=0; i<sizeof(reg)/sizeof(reg[0]); i++) {
1529        cnt_name_t *lp=&reg[i];
1530        uint32_t c=pci_reg_read(lp->offset);
1531        // xl710 bug. Counter values are -559038737 when they should be 0
1532        if (c && c != -559038737 ) {
1533            fprintf (fd," %s  : %d \n",lp->name,c);
1534        }
1535    }
1536}
1537
1538void CPhyEthIF::configure(uint16_t nb_rx_queue,
1539                          uint16_t nb_tx_queue,
1540                          const struct rte_eth_conf *eth_conf){
1541    int ret;
1542    ret = rte_eth_dev_configure(m_port_id,
1543                                nb_rx_queue,
1544                                nb_tx_queue,
1545                                eth_conf);
1546
1547    if (ret < 0)
1548        rte_exit(EXIT_FAILURE, "Cannot configure device: "
1549                 "err=%d, port=%u\n",
1550                 ret, m_port_id);
1551
1552    /* get device info */
1553    rte_eth_dev_info_get(m_port_id, &m_dev_info);
1554
1555    if (CGlobalInfo::m_options.preview.getChecksumOffloadEnable()) {
1556        /* check if the device supports TCP and UDP checksum offloading */
1557        if ((m_dev_info.tx_offload_capa & DEV_TX_OFFLOAD_UDP_CKSUM) == 0) {
1558            rte_exit(EXIT_FAILURE, "Device does not support UDP checksum offload: "
1559                     "port=%u\n",
1560                     m_port_id);
1561        }
1562        if ((m_dev_info.tx_offload_capa & DEV_TX_OFFLOAD_TCP_CKSUM) == 0) {
1563            rte_exit(EXIT_FAILURE, "Device does not support TCP checksum offload: "
1564                     "port=%u\n",
1565                     m_port_id);
1566        }
1567    }
1568}
1569
1570/*
1571  rx-queue 0 is the default queue. All traffic not going to queue 1
1572  will be dropped as queue 0 is disabled
1573  rx-queue 1 - Latency measurement packets and other features that need software processing will go here.
1574*/
1575void CPhyEthIF::configure_rx_duplicate_rules(){
1576    if ( get_is_rx_filter_enable() ){
1577        get_ex_drv()->configure_rx_filter_rules(this);
1578    }
1579}
1580
1581int CPhyEthIF::set_port_rcv_all(bool is_rcv) {
1582    // In these modes we are always receiving all packets anyway.
1583    switch (CGlobalInfo::get_queues_mode()) {
1584    case CGlobalInfo::Q_MODE_ONE_QUEUE:
1585        // In this mode we are always receiving all packets anyway.
1586        break;
1587    case CGlobalInfo::Q_MODE_RSS:
1588        //todo: need to send announcment to all tx cores
1589        //todo: need new function set_all_ports rcv all, to be able to send less tx messages
1590        break;
1591    default:
1592        get_ex_drv()->set_rcv_all(this, is_rcv);
1593        break;
1594    }
1595
1596    return 0;
1597}
1598
1599void CPhyEthIF::stop_rx_drop_queue() {
1600    // In debug mode, we want to see all packets. Don't want to disable any queue.
1601    if ( (CGlobalInfo::get_queues_mode() != CGlobalInfo::Q_MODE_NORMAL)
1602         || (CGlobalInfo::m_options.m_debug_pkt_proto != 0)) {
1603        return;
1604    }
1605    if ( CGlobalInfo::m_options.is_rx_enabled() ) {
1606        if ( (!get_ex_drv()->is_hardware_support_drop_queue())  ) {
1607            printf(" ERROR latency feature is not supported with current hardware  \n");
1608            exit(1);
1609        }
1610    }
1611    // OK to only stop MAIN_DPDK_DROP_Q here. The only driver in which there are
1612    // more than 1 drop q is Mellanox. stop_queue does not work in this case anyway.
1613    get_ex_drv()->stop_queue(this, MAIN_DPDK_DROP_Q);
1614}
1615
1616
1617void CPhyEthIF::rx_queue_setup(uint16_t rx_queue_id,
1618                               uint16_t nb_rx_desc,
1619                               unsigned int socket_id,
1620                               const struct rte_eth_rxconf *rx_conf,
1621                               struct rte_mempool *mb_pool){
1622
1623    int ret = rte_eth_rx_queue_setup(m_port_id , rx_queue_id,
1624                                     nb_rx_desc,
1625                                     socket_id,
1626                                     rx_conf,
1627                                     mb_pool);
1628    if (ret < 0)
1629        rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup: "
1630                 "err=%d, port=%u\n",
1631                 ret, m_port_id);
1632}
1633
1634
1635
1636void CPhyEthIF::tx_queue_setup(uint16_t tx_queue_id,
1637                               uint16_t nb_tx_desc,
1638                               unsigned int socket_id,
1639                               const struct rte_eth_txconf *tx_conf){
1640
1641    int ret = rte_eth_tx_queue_setup( m_port_id,
1642                                      tx_queue_id,
1643                                      nb_tx_desc,
1644                                      socket_id,
1645                                      tx_conf);
1646    if (ret < 0)
1647        rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: "
1648                 "err=%d, port=%u queue=%u\n",
1649                 ret, m_port_id, tx_queue_id);
1650
1651}
1652
1653void CPhyEthIF::stop(){
1654    if (CGlobalInfo::m_options.preview.getCloseEnable()) {
1655        rte_eth_dev_stop(m_port_id);
1656        rte_eth_dev_close(m_port_id);
1657    }
1658}
1659
1660void CPhyEthIF::start(){
1661
1662    get_ex_drv()->clear_extended_stats(this);
1663
1664    int ret;
1665
1666    m_bw_tx.reset();
1667    m_bw_rx.reset();
1668
1669    m_stats.Clear();
1670    int i;
1671    for (i=0;i<10; i++ ) {
1672        ret = rte_eth_dev_start(m_port_id);
1673        if (ret==0) {
1674            return;
1675        }
1676        delay(1000);
1677    }
1678    if (ret < 0)
1679        rte_exit(EXIT_FAILURE, "rte_eth_dev_start: "
1680                 "err=%d, port=%u\n",
1681                 ret, m_port_id);
1682
1683}
1684
1685// Disabling flow control on interface
1686void CPhyEthIF::disable_flow_control(){
1687    int ret;
1688    // see trex-64 issue with loopback on the same NIC
1689    struct rte_eth_fc_conf fc_conf;
1690    memset(&fc_conf,0,sizeof(fc_conf));
1691    fc_conf.mode=RTE_FC_NONE;
1692    fc_conf.autoneg=1;
1693    fc_conf.pause_time=100;
1694    int i;
1695    for (i=0; i<5; i++) {
1696        ret=rte_eth_dev_flow_ctrl_set(m_port_id,&fc_conf);
1697        if (ret==0) {
1698            break;
1699        }
1700        delay(1000);
1701    }
1702    if (ret < 0)
1703        rte_exit(EXIT_FAILURE, "rte_eth_dev_flow_ctrl_set: "
1704                 "err=%d, port=%u\n probably link is down. Please check your link activity, or skip flow-control disabling, using: --no-flow-control-change option\n",
1705                 ret, m_port_id);
1706}
1707
1708/*
1709Get user friendly devices description from saved env. var
1710Changes certain attributes based on description
1711*/
1712void DpdkTRexPortAttr::update_description(){
1713    struct rte_pci_addr pci_addr;
1714    char pci[16];
1715    char * envvar;
1716    std::string pci_envvar_name;
1717    pci_addr = rte_eth_devices[m_port_id].device->devargs->pci.addr;
1718    snprintf(pci, sizeof(pci), "%04x:%02x:%02x.%d", pci_addr.domain, pci_addr.bus, pci_addr.devid, pci_addr.function);
1719    intf_info_st.pci_addr = pci;
1720    pci_envvar_name = "pci" + intf_info_st.pci_addr;
1721    std::replace(pci_envvar_name.begin(), pci_envvar_name.end(), ':', '_');
1722    std::replace(pci_envvar_name.begin(), pci_envvar_name.end(), '.', '_');
1723    envvar = std::getenv(pci_envvar_name.c_str());
1724    if (envvar) {
1725        intf_info_st.description = envvar;
1726    } else {
1727        intf_info_st.description = "Unknown";
1728    }
1729    if (intf_info_st.description.find("82599ES") != std::string::npos) { // works for 82599EB etc. DPDK does not distinguish them
1730        flag_is_link_change_supported = false;
1731    }
1732    if (intf_info_st.description.find("82545EM") != std::string::npos) { // in virtual E1000, DPDK claims fc is supported, but it's not
1733        flag_is_fc_change_supported = false;
1734        flag_is_led_change_supported = false;
1735    }
1736    if ( CGlobalInfo::m_options.preview.getVMode() > 0){
1737        printf("port %d desc: %s\n", m_port_id, intf_info_st.description.c_str());
1738    }
1739}
1740
1741int DpdkTRexPortAttr::set_led(bool on){
1742    if (on) {
1743        return rte_eth_led_on(m_port_id);
1744    }else{
1745        return rte_eth_led_off(m_port_id);
1746    }
1747}
1748
1749int DpdkTRexPortAttr::get_flow_ctrl(int &mode) {
1750    int ret = rte_eth_dev_flow_ctrl_get(m_port_id, &fc_conf_tmp);
1751    if (ret) {
1752        mode = -1;
1753        return ret;
1754    }
1755    mode = (int) fc_conf_tmp.mode;
1756    return 0;
1757}
1758
1759int DpdkTRexPortAttr::set_flow_ctrl(int mode) {
1760    if (!flag_is_fc_change_supported) {
1761        return -ENOTSUP;
1762    }
1763    int ret = rte_eth_dev_flow_ctrl_get(m_port_id, &fc_conf_tmp);
1764    if (ret) {
1765        return ret;
1766    }
1767    fc_conf_tmp.mode = (enum rte_eth_fc_mode) mode;
1768    return rte_eth_dev_flow_ctrl_set(m_port_id, &fc_conf_tmp);
1769}
1770
1771void DpdkTRexPortAttr::reset_xstats() {
1772    rte_eth_xstats_reset(m_port_id);
1773}
1774
1775int DpdkTRexPortAttr::get_xstats_values(xstats_values_t &xstats_values) {
1776    int size = rte_eth_xstats_get(m_port_id, NULL, 0);
1777    if (size < 0) {
1778        return size;
1779    }
1780    xstats_values_tmp.resize(size);
1781    xstats_values.resize(size);
1782    size = rte_eth_xstats_get(m_port_id, xstats_values_tmp.data(), size);
1783    if (size < 0) {
1784        return size;
1785    }
1786    for (int i=0; i<size; i++) {
1787        xstats_values[xstats_values_tmp[i].id] = xstats_values_tmp[i].value;
1788    }
1789    return 0;
1790}
1791
1792int DpdkTRexPortAttr::get_xstats_names(xstats_names_t &xstats_names){
1793    int size = rte_eth_xstats_get_names(m_port_id, NULL, 0);
1794    if (size < 0) {
1795        return size;
1796    }
1797    xstats_names_tmp.resize(size);
1798    xstats_names.resize(size);
1799    size = rte_eth_xstats_get_names(m_port_id, xstats_names_tmp.data(), size);
1800    if (size < 0) {
1801        return size;
1802    }
1803    for (int i=0; i<size; i++) {
1804        xstats_names[i] = xstats_names_tmp[i].name;
1805    }
1806    return 0;
1807}
1808
1809void DpdkTRexPortAttr::dump_link(FILE *fd){
1810    fprintf(fd,"port : %d \n",(int)m_port_id);
1811    fprintf(fd,"------------\n");
1812
1813    fprintf(fd,"link         : ");
1814    if (m_link.link_status) {
1815        fprintf(fd," link : Link Up - speed %u Mbps - %s\n",
1816                (unsigned) m_link.link_speed,
1817                (m_link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1818                ("full-duplex") : ("half-duplex\n"));
1819    } else {
1820        fprintf(fd," Link Down\n");
1821    }
1822    fprintf(fd,"promiscuous  : %d \n",get_promiscuous());
1823}
1824
1825void DpdkTRexPortAttr::update_device_info(){
1826    rte_eth_dev_info_get(m_port_id, &dev_info);
1827}
1828
1829void DpdkTRexPortAttr::get_supported_speeds(supp_speeds_t &supp_speeds){
1830    uint32_t speed_capa = dev_info.speed_capa;
1831    if (speed_capa & ETH_LINK_SPEED_1G)
1832        supp_speeds.push_back(ETH_SPEED_NUM_1G);
1833    if (speed_capa & ETH_LINK_SPEED_10G)
1834        supp_speeds.push_back(ETH_SPEED_NUM_10G);
1835    if (speed_capa & ETH_LINK_SPEED_40G)
1836        supp_speeds.push_back(ETH_SPEED_NUM_40G);
1837    if (speed_capa & ETH_LINK_SPEED_100G)
1838        supp_speeds.push_back(ETH_SPEED_NUM_100G);
1839}
1840
1841void DpdkTRexPortAttr::update_link_status(){
1842    rte_eth_link_get(m_port_id, &m_link);
1843}
1844
1845bool DpdkTRexPortAttr::update_link_status_nowait(){
1846    rte_eth_link new_link;
1847    bool changed = false;
1848    rte_eth_link_get_nowait(m_port_id, &new_link);
1849
1850    if (new_link.link_speed != m_link.link_speed ||
1851                new_link.link_duplex != m_link.link_duplex ||
1852                    new_link.link_autoneg != m_link.link_autoneg ||
1853                        new_link.link_status != m_link.link_status) {
1854        changed = true;
1855
1856        /* in case of link status change - notify the dest object */
1857        if (new_link.link_status != m_link.link_status) {
1858            on_link_down();
1859        }
1860    }
1861
1862    m_link = new_link;
1863    return changed;
1864}
1865
1866int DpdkTRexPortAttr::add_mac(char * mac){
1867    struct ether_addr mac_addr;
1868    for (int i=0; i<6;i++) {
1869        mac_addr.addr_bytes[i] =mac[i];
1870    }
1871
1872    if ( get_ex_drv()->hardware_support_mac_change() ) {
1873        if ( rte_eth_dev_mac_addr_add(m_port_id, &mac_addr,0) != 0) {
1874            printf("Failed setting MAC for port %d \n", m_port_id);
1875            exit(-1);
1876        }
1877    }
1878
1879    return 0;
1880}
1881
1882int DpdkTRexPortAttr::set_promiscuous(bool enable){
1883    if (enable) {
1884        rte_eth_promiscuous_enable(m_port_id);
1885    }else{
1886        rte_eth_promiscuous_disable(m_port_id);
1887    }
1888    return 0;
1889}
1890
1891int DpdkTRexPortAttr::set_multicast(bool enable){
1892    if (enable) {
1893        rte_eth_allmulticast_enable(m_port_id);
1894    }else{
1895        rte_eth_allmulticast_disable(m_port_id);
1896    }
1897    return 0;
1898}
1899
1900int DpdkTRexPortAttr::set_link_up(bool up){
1901    if (up) {
1902        return rte_eth_dev_set_link_up(m_port_id);
1903    }else{
1904        return rte_eth_dev_set_link_down(m_port_id);
1905    }
1906}
1907
1908bool DpdkTRexPortAttr::get_promiscuous(){
1909    int ret=rte_eth_promiscuous_get(m_port_id);
1910    if (ret<0) {
1911        rte_exit(EXIT_FAILURE, "rte_eth_promiscuous_get: "
1912                 "err=%d, port=%u\n",
1913                 ret, m_port_id);
1914
1915    }
1916    return ( ret?true:false);
1917}
1918
1919bool DpdkTRexPortAttr::get_multicast(){
1920    int ret=rte_eth_allmulticast_get(m_port_id);
1921    if (ret<0) {
1922        rte_exit(EXIT_FAILURE, "rte_eth_allmulticast_get: "
1923                 "err=%d, port=%u\n",
1924                 ret, m_port_id);
1925
1926    }
1927    return ( ret?true:false);
1928}
1929
1930
1931void DpdkTRexPortAttr::get_hw_src_mac(struct ether_addr *mac_addr){
1932    rte_eth_macaddr_get(m_port_id , mac_addr);
1933}
1934
1935int CPhyEthIF::dump_fdir_global_stats(FILE *fd) {
1936    return get_ex_drv()->dump_fdir_global_stats(this, fd);
1937}
1938
1939void dump_hw_state(FILE *fd,struct ixgbe_hw_stats *hs ){
1940
1941#define DP_A1(f) if (hs->f) fprintf(fd," %-40s : %llu \n",#f, (unsigned long long)hs->f)
1942#define DP_A2(f,m) for (i=0;i<m; i++) { if (hs->f[i]) fprintf(fd," %-40s[%d] : %llu \n",#f,i, (unsigned long long)hs->f[i]); }
1943    int i;
1944
1945    //for (i=0;i<8; i++) { if (hs->mpc[i]) fprintf(fd," %-40s[%d] : %llu \n","mpc",i,hs->mpc[i]); }
1946    DP_A2(mpc,8);
1947    DP_A1(crcerrs);
1948    DP_A1(illerrc);
1949    //DP_A1(errbc);
1950    DP_A1(mspdc);
1951    DP_A1(mpctotal);
1952    DP_A1(mlfc);
1953    DP_A1(mrfc);
1954    DP_A1(rlec);
1955    //DP_A1(lxontxc);
1956    //DP_A1(lxonrxc);
1957    //DP_A1(lxofftxc);
1958    //DP_A1(lxoffrxc);
1959    //DP_A2(pxontxc,8);
1960    //DP_A2(pxonrxc,8);
1961    //DP_A2(pxofftxc,8);
1962    //DP_A2(pxoffrxc,8);
1963
1964    //DP_A1(prc64);
1965    //DP_A1(prc127);
1966    //DP_A1(prc255);
1967    // DP_A1(prc511);
1968    //DP_A1(prc1023);
1969    //DP_A1(prc1522);
1970
1971    DP_A1(gprc);
1972    DP_A1(bprc);
1973    DP_A1(mprc);
1974    DP_A1(gptc);
1975    DP_A1(gorc);
1976    DP_A1(gotc);
1977    DP_A2(rnbc,8);
1978    DP_A1(ruc);
1979    DP_A1(rfc);
1980    DP_A1(roc);
1981    DP_A1(rjc);
1982    DP_A1(mngprc);
1983    DP_A1(mngpdc);
1984    DP_A1(mngptc);
1985    DP_A1(tor);
1986    DP_A1(tpr);
1987    DP_A1(tpt);
1988    DP_A1(ptc64);
1989    DP_A1(ptc127);
1990    DP_A1(ptc255);
1991    DP_A1(ptc511);
1992    DP_A1(ptc1023);
1993    DP_A1(ptc1522);
1994    DP_A1(mptc);
1995    DP_A1(bptc);
1996    DP_A1(xec);
1997    DP_A2(qprc,16);
1998    DP_A2(qptc,16);
1999    DP_A2(qbrc,16);
2000    DP_A2(qbtc,16);
2001    DP_A2(qprdc,16);
2002    DP_A2(pxon2offc,8);
2003    DP_A1(fdirustat_add);
2004    DP_A1(fdirustat_remove);
2005    DP_A1(fdirfstat_fadd);
2006    DP_A1(fdirfstat_fremove);
2007    DP_A1(fdirmatch);
2008    DP_A1(fdirmiss);
2009    DP_A1(fccrc);
2010    DP_A1(fclast);
2011    DP_A1(fcoerpdc);
2012    DP_A1(fcoeprc);
2013    DP_A1(fcoeptc);
2014    DP_A1(fcoedwrc);
2015    DP_A1(fcoedwtc);
2016    DP_A1(fcoe_noddp);
2017    DP_A1(fcoe_noddp_ext_buff);
2018    DP_A1(ldpcec);
2019    DP_A1(pcrc8ec);
2020    DP_A1(b2ospc);
2021    DP_A1(b2ogprc);
2022    DP_A1(o2bgptc);
2023    DP_A1(o2bspc);
2024}
2025
2026void CPhyEthIF::set_ignore_stats_base(CPreTestStats &pre_stats) {
2027    // reading m_stats, so drivers saving prev in m_stats will be updated.
2028    // Actually, we want m_stats to be cleared
2029    get_ex_drv()->get_extended_stats(this, &m_stats);
2030
2031    m_ignore_stats.ipackets = m_stats.ipackets;
2032    m_ignore_stats.ibytes = m_stats.ibytes;
2033    m_ignore_stats.opackets = m_stats.opackets;
2034    m_ignore_stats.obytes = m_stats.obytes;
2035    m_stats.ipackets = 0;
2036    m_stats.opackets = 0;
2037    m_stats.ibytes = 0;
2038    m_stats.obytes = 0;
2039
2040    m_ignore_stats.m_tx_arp = pre_stats.m_tx_arp;
2041    m_ignore_stats.m_rx_arp = pre_stats.m_rx_arp;
2042
2043    if (CGlobalInfo::m_options.preview.getVMode() >= 3) {
2044        fprintf(stdout, "Pre test statistics for port %d\n", get_port_id());
2045        m_ignore_stats.dump(stdout);
2046    }
2047}
2048
2049void CPhyEthIF::dump_stats(FILE *fd){
2050
2051    update_counters();
2052
2053    fprintf(fd,"port : %d \n",(int)m_port_id);
2054    fprintf(fd,"------------\n");
2055    m_stats.DumpAll(fd);
2056    //m_stats.Dump(fd);
2057    printf (" Tx : %.1fMb/sec  \n",m_last_tx_rate);
2058    //printf (" Rx : %.1fMb/sec  \n",m_last_rx_rate);
2059}
2060
2061void CPhyEthIF::stats_clear(){
2062    rte_eth_stats_reset(m_port_id);
2063    m_stats.Clear();
2064}
2065
2066class CCorePerPort  {
2067public:
2068    CCorePerPort (){
2069        m_tx_queue_id=0;
2070        m_len=0;
2071        int i;
2072        for (i=0; i<MAX_PKT_BURST; i++) {
2073            m_table[i]=0;
2074        }
2075        m_port=0;
2076    }
2077    uint8_t                 m_tx_queue_id;
2078    uint8_t                 m_tx_queue_id_lat; // q id for tx of latency pkts
2079    uint16_t                m_len;
2080    rte_mbuf_t *            m_table[MAX_PKT_BURST];
2081    CPhyEthIF  *            m_port;
2082};
2083
2084
2085#define MAX_MBUF_CACHE 100
2086
2087
2088/* per core/gbe queue port for trasmitt */
2089class CCoreEthIF : public CVirtualIF {
2090public:
2091    enum {
2092     INVALID_Q_ID = 255
2093    };
2094
2095public:
2096
2097    CCoreEthIF(){
2098        m_mbuf_cache=0;
2099    }
2100
2101    bool Create(uint8_t             core_id,
2102                uint8_t            tx_client_queue_id,
2103                CPhyEthIF  *        tx_client_port,
2104                uint8_t            tx_server_queue_id,
2105                CPhyEthIF  *        tx_server_port,
2106                uint8_t             tx_q_id_lat);
2107    void Delete();
2108
2109    virtual int open_file(std::string file_name){
2110        return (0);
2111    }
2112
2113    virtual int close_file(void){
2114        return (flush_tx_queue());
2115    }
2116    __attribute__ ((noinline)) int send_node_flow_stat(rte_mbuf *m, CGenNodeStateless * node_sl
2117                                                       , CCorePerPort *  lp_port
2118                                                       , CVirtualIFPerSideStats  * lp_stats, bool is_const);
2119    virtual int send_node(CGenNode * node);
2120    virtual void send_one_pkt(pkt_dir_t dir, rte_mbuf_t *m);
2121    virtual int flush_tx_queue(void);
2122    __attribute__ ((noinline)) void handle_slowpath_features(CGenNode *node, rte_mbuf_t *m, uint8_t *p, pkt_dir_t dir);
2123
2124    void apply_client_cfg(const ClientCfgBase *cfg, rte_mbuf_t *m, pkt_dir_t dir, uint8_t *p);
2125
2126    bool process_rx_pkt(pkt_dir_t   dir,rte_mbuf_t * m);
2127
2128    virtual int update_mac_addr_from_global_cfg(pkt_dir_t       dir, uint8_t * p);
2129
2130    virtual pkt_dir_t port_id_to_dir(uint8_t port_id);
2131    void GetCoreCounters(CVirtualIFPerSideStats *stats);
2132    void DumpCoreStats(FILE *fd);
2133    void DumpIfStats(FILE *fd);
2134    static void DumpIfCfgHeader(FILE *fd);
2135    void DumpIfCfg(FILE *fd);
2136
2137    socket_id_t get_socket_id(){
2138        return ( CGlobalInfo::m_socket.port_to_socket( m_ports[0].m_port->get_port_id() ) );
2139    }
2140
2141    const CCorePerPort * get_ports() {
2142        return m_ports;
2143    }
2144
2145protected:
2146
2147    int send_burst(CCorePerPort * lp_port,
2148                   uint16_t len,
2149                   CVirtualIFPerSideStats  * lp_stats);
2150    int send_pkt(CCorePerPort * lp_port,
2151                 rte_mbuf_t *m,
2152                 CVirtualIFPerSideStats  * lp_stats);
2153    int send_pkt_lat(CCorePerPort * lp_port,
2154                 rte_mbuf_t *m,
2155                 CVirtualIFPerSideStats  * lp_stats);
2156
2157protected:
2158    uint8_t      m_core_id;
2159    uint16_t     m_mbuf_cache;
2160    CCorePerPort m_ports[CS_NUM]; /* each core has 2 tx queues 1. client side and server side */
2161    CNodeRing *  m_ring_to_rx;
2162
2163} __rte_cache_aligned; ;
2164
2165class CCoreEthIFStateless : public CCoreEthIF {
2166public:
2167    virtual int send_node_flow_stat(rte_mbuf *m, CGenNodeStateless * node_sl, CCorePerPort *  lp_port
2168                                    , CVirtualIFPerSideStats  * lp_stats, bool is_const);
2169
2170    /**
2171     * fast path version
2172     */
2173    virtual int send_node(CGenNode *node);
2174
2175    /**
2176     * slow path version
2177     */
2178    virtual int send_node_service_mode(CGenNode *node);
2179
2180protected:
2181    template <bool SERVICE_MODE> inline int send_node_common(CGenNode *no);
2182
2183    inline rte_mbuf_t * generate_node_pkt(CGenNodeStateless *node_sl)   __attribute__ ((always_inline));
2184    inline int send_node_packet(CGenNodeStateless      *node_sl,
2185                                rte_mbuf_t             *m,
2186                                CCorePerPort           *lp_port,
2187                                CVirtualIFPerSideStats *lp_stats)   __attribute__ ((always_inline));
2188
2189    rte_mbuf_t * generate_slow_path_node_pkt(CGenNodeStateless *node_sl);
2190};
2191
2192bool CCoreEthIF::Create(uint8_t             core_id,
2193                        uint8_t             tx_client_queue_id,
2194                        CPhyEthIF  *        tx_client_port,
2195                        uint8_t             tx_server_queue_id,
2196                        CPhyEthIF  *        tx_server_port,
2197                        uint8_t tx_q_id_lat ) {
2198    m_ports[CLIENT_SIDE].m_tx_queue_id = tx_client_queue_id;
2199    m_ports[CLIENT_SIDE].m_port        = tx_client_port;
2200    m_ports[CLIENT_SIDE].m_tx_queue_id_lat = tx_q_id_lat;
2201    m_ports[SERVER_SIDE].m_tx_queue_id = tx_server_queue_id;
2202    m_ports[SERVER_SIDE].m_port        = tx_server_port;
2203    m_ports[SERVER_SIDE].m_tx_queue_id_lat = tx_q_id_lat;
2204    m_core_id = core_id;
2205
2206    CMessagingManager * rx_dp=CMsgIns::Ins()->getRxDp();
2207    m_ring_to_rx = rx_dp->getRingDpToCp(core_id-1);
2208    assert( m_ring_to_rx);
2209    return (true);
2210}
2211
2212int CCoreEthIF::flush_tx_queue(void){
2213    /* flush both sides */
2214    pkt_dir_t dir;
2215    for (dir = CLIENT_SIDE; dir < CS_NUM; dir++) {
2216        CCorePerPort * lp_port = &m_ports[dir];
2217        CVirtualIFPerSideStats  * lp_stats = &m_stats[dir];
2218        if ( likely(lp_port->m_len > 0) ) {
2219            send_burst(lp_port, lp_port->m_len, lp_stats);
2220            lp_port->m_len = 0;
2221        }
2222    }
2223
2224    return 0;
2225}
2226
2227void CCoreEthIF::GetCoreCounters(CVirtualIFPerSideStats *stats){
2228    stats->Clear();
2229    pkt_dir_t   dir ;
2230    for (dir=CLIENT_SIDE; dir<CS_NUM; dir++) {
2231        stats->Add(&m_stats[dir]);
2232    }
2233}
2234
2235void CCoreEthIF::DumpCoreStats(FILE *fd){
2236    fprintf (fd,"------------------------ \n");
2237    fprintf (fd," per core stats core id : %d  \n",m_core_id);
2238    fprintf (fd,"------------------------ \n");
2239
2240    CVirtualIFPerSideStats stats;
2241    GetCoreCounters(&stats);
2242    stats.Dump(stdout);
2243}
2244
2245void CCoreEthIF::DumpIfCfgHeader(FILE *fd){
2246    fprintf (fd," core, c-port, c-queue, s-port, s-queue, lat-queue\n");
2247    fprintf (fd," ------------------------------------------\n");
2248}
2249
2250void CCoreEthIF::DumpIfCfg(FILE *fd){
2251    fprintf (fd," %d   %6u %6u  %6u  %6u %6u  \n",m_core_id,
2252             m_ports[CLIENT_SIDE].m_port->get_port_id(),
2253             m_ports[CLIENT_SIDE].m_tx_queue_id,
2254             m_ports[SERVER_SIDE].m_port->get_port_id(),
2255             m_ports[SERVER_SIDE].m_tx_queue_id,
2256             m_ports[SERVER_SIDE].m_tx_queue_id_lat
2257             );
2258}
2259
2260
2261void CCoreEthIF::DumpIfStats(FILE *fd){
2262
2263    fprintf (fd,"------------------------ \n");
2264    fprintf (fd," per core per if stats id : %d  \n",m_core_id);
2265    fprintf (fd,"------------------------ \n");
2266
2267    const char * t[]={"client","server"};
2268    pkt_dir_t   dir ;
2269    for (dir=CLIENT_SIDE; dir<CS_NUM; dir++) {
2270        CCorePerPort * lp=&m_ports[dir];
2271        CVirtualIFPerSideStats * lpstats = &m_stats[dir];
2272        fprintf (fd," port %d, queue id :%d  - %s \n",lp->m_port->get_port_id(),lp->m_tx_queue_id,t[dir] );
2273        fprintf (fd," ---------------------------- \n");
2274        lpstats->Dump(fd);
2275    }
2276}
2277
2278#define DELAY_IF_NEEDED
2279
2280int CCoreEthIF::send_burst(CCorePerPort * lp_port,
2281                           uint16_t len,
2282                           CVirtualIFPerSideStats  * lp_stats){
2283
2284#ifdef DEBUG_SEND_BURST
2285    if (CGlobalInfo::m_options.preview.getVMode() > 10) {
2286        fprintf(stdout, "send_burst port:%d queue:%d len:%d\n", lp_port->m_port->get_rte_port_id()
2287                , lp_port->m_tx_queue_id, len);
2288        for (int i = 0; i < lp_port->m_len; i++) {
2289            fprintf(stdout, "packet %d:\n", i);
2290            rte_mbuf_t *m = lp_port->m_table[i];
2291            utl_DumpBuffer(stdout, rte_pktmbuf_mtod(m, uint8_t*), rte_pktmbuf_pkt_len(m), 0);
2292        }
2293    }
2294#endif
2295
2296    uint16_t ret = lp_port->m_port->tx_burst(lp_port->m_tx_queue_id,lp_port->m_table,len);
2297#ifdef DELAY_IF_NEEDED
2298    while ( unlikely( ret<len ) ){
2299        rte_delay_us(1);
2300        lp_stats->m_tx_queue_full += 1;
2301        uint16_t ret1=lp_port->m_port->tx_burst(lp_port->m_tx_queue_id,
2302                                                &lp_port->m_table[ret],
2303                                                len-ret);
2304        ret+=ret1;
2305    }
2306#else
2307    /* CPU has burst of packets larger than TX can send. Need to drop packets */
2308    if ( unlikely(ret < len) ) {
2309        lp_stats->m_tx_drop += (len-ret);
2310        uint16_t i;
2311        for (i=ret; i<len;i++) {
2312            rte_mbuf_t * m=lp_port->m_table[i];
2313            rte_pktmbuf_free(m);
2314        }
2315    }
2316#endif
2317
2318    return (0);
2319}
2320
2321
2322int CCoreEthIF::send_pkt(CCorePerPort * lp_port,
2323                         rte_mbuf_t      *m,
2324                         CVirtualIFPerSideStats  * lp_stats
2325                         ){
2326
2327    uint16_t len = lp_port->m_len;
2328    lp_port->m_table[len]=m;
2329    len++;
2330
2331    /* enough pkts to be sent */
2332    if (unlikely(len == MAX_PKT_BURST)) {
2333        send_burst(lp_port, MAX_PKT_BURST,lp_stats);
2334        len = 0;
2335    }
2336    lp_port->m_len = len;
2337
2338    return (0);
2339}
2340
2341int CCoreEthIF::send_pkt_lat(CCorePerPort *lp_port, rte_mbuf_t *m, CVirtualIFPerSideStats *lp_stats) {
2342    // We allow sending only from first core of each port. This is serious internal bug otherwise.
2343    assert(lp_port->m_tx_queue_id_lat != INVALID_Q_ID);
2344
2345    int ret = lp_port->m_port->tx_burst(lp_port->m_tx_queue_id_lat, &m, 1);
2346
2347#ifdef DELAY_IF_NEEDED
2348    while ( unlikely( ret != 1 ) ){
2349        rte_delay_us(1);
2350        lp_stats->m_tx_queue_full += 1;
2351        ret = lp_port->m_port->tx_burst(lp_port->m_tx_queue_id_lat, &m, 1);
2352    }
2353
2354#else
2355    if ( unlikely( ret != 1 ) ) {
2356        lp_stats->m_tx_drop ++;
2357        rte_pktmbuf_free(m);
2358        return 0;
2359    }
2360
2361#endif
2362
2363    return ret;
2364}
2365
2366void CCoreEthIF::send_one_pkt(pkt_dir_t       dir,
2367                              rte_mbuf_t      *m){
2368    CCorePerPort *  lp_port=&m_ports[dir];
2369    CVirtualIFPerSideStats  * lp_stats = &m_stats[dir];
2370    send_pkt(lp_port,m,lp_stats);
2371    /* flush */
2372    send_burst(lp_port,lp_port->m_len,lp_stats);
2373    lp_port->m_len = 0;
2374}
2375
2376int CCoreEthIFStateless::send_node_flow_stat(rte_mbuf *m, CGenNodeStateless * node_sl, CCorePerPort *  lp_port
2377                                             , CVirtualIFPerSideStats  * lp_stats, bool is_const) {
2378    // Defining this makes 10% percent packet loss. 1% packet reorder.
2379# ifdef ERR_CNTRS_TEST
2380    static int temp=1;
2381    temp++;
2382#endif
2383
2384    uint16_t hw_id = node_sl->get_stat_hw_id();
2385    rte_mbuf *mi;
2386    struct flow_stat_payload_header *fsp_head = NULL;
2387
2388    if (hw_id >= MAX_FLOW_STATS) {
2389        // payload rule hw_ids are in the range right above ip id rules
2390        uint16_t hw_id_payload = hw_id - MAX_FLOW_STATS;
2391        if (hw_id_payload > max_stat_hw_id_seen_payload) {
2392            max_stat_hw_id_seen_payload = hw_id_payload;
2393        }
2394
2395        mi = node_sl->alloc_flow_stat_mbuf(m, fsp_head, is_const);
2396        fsp_head->seq = lp_stats->m_lat_data[hw_id_payload].get_seq_num();
2397        fsp_head->hw_id = hw_id_payload;
2398        fsp_head->flow_seq = lp_stats->m_lat_data[hw_id_payload].get_flow_seq();
2399        fsp_head->magic = FLOW_STAT_PAYLOAD_MAGIC;
2400
2401        lp_stats->m_lat_data[hw_id_payload].inc_seq_num();
2402#ifdef ERR_CNTRS_TEST
2403        if (temp % 10 == 0) {
2404            fsp_head->seq = lp_stats->m_lat_data[hw_id_payload].inc_seq_num();
2405        }
2406        if ((temp - 1) % 100 == 0) {
2407            fsp_head->seq = lp_stats->m_lat_data[hw_id_payload].get_seq_num() - 4;
2408        }
2409#endif
2410    } else {
2411        // ip id rule
2412        if (hw_id > max_stat_hw_id_seen) {
2413            max_stat_hw_id_seen = hw_id;
2414        }
2415        mi = m;
2416    }
2417    tx_per_flow_t *lp_s = &lp_stats->m_tx_per_flow[hw_id];
2418    lp_s->add_pkts(1);
2419    lp_s->add_bytes(mi->pkt_len + 4); // We add 4 because of ethernet CRC
2420
2421    if (hw_id >= MAX_FLOW_STATS) {
2422        fsp_head->time_stamp = os_get_hr_tick_64();
2423        send_pkt_lat(lp_port, mi, lp_stats);
2424    } else {
2425        send_pkt(lp_port, mi, lp_stats);
2426    }
2427    return 0;
2428}
2429
2430inline rte_mbuf_t *
2431CCoreEthIFStateless::generate_node_pkt(CGenNodeStateless *node_sl) {
2432    if (unlikely(node_sl->get_is_slow_path())) {
2433        return generate_slow_path_node_pkt(node_sl);
2434    }
2435
2436    /* check that we have mbuf  */
2437    rte_mbuf_t *m;
2438
2439    if ( likely(node_sl->is_cache_mbuf_array()) ) {
2440        m = node_sl->cache_mbuf_array_get_cur();
2441        rte_pktmbuf_refcnt_update(m,1);
2442    }else{
2443        m = node_sl->get_cache_mbuf();
2444
2445        if (m) {
2446            /* cache case */
2447            rte_pktmbuf_refcnt_update(m,1);
2448        }else{
2449            m=node_sl->alloc_node_with_vm();
2450            assert(m);
2451        }
2452    }
2453
2454    return m;
2455}
2456
2457inline int
2458CCoreEthIFStateless::send_node_packet(CGenNodeStateless      *node_sl,
2459                                      rte_mbuf_t             *m,
2460                                      CCorePerPort           *lp_port,
2461                                      CVirtualIFPerSideStats *lp_stats) {
2462
2463    if (unlikely(node_sl->is_stat_needed())) {
2464        if ( unlikely(node_sl->is_cache_mbuf_array()) ) {
2465            // No support for latency + cache. If user asks for cache on latency stream, we change cache to 0.
2466            // assert here just to make sure.
2467            assert(1);
2468        }
2469        return send_node_flow_stat(m, node_sl, lp_port, lp_stats, (node_sl->get_cache_mbuf()) ? true : false);
2470    } else {
2471        return send_pkt(lp_port, m, lp_stats);
2472    }
2473}
2474
2475int CCoreEthIFStateless::send_node(CGenNode *node) {
2476    return send_node_common<false>(node);
2477}
2478
2479int CCoreEthIFStateless::send_node_service_mode(CGenNode *node) {
2480    return send_node_common<true>(node);
2481}
2482
2483/**
2484 * this is the common function and it is templated
2485 * for two compiler evaluation for performance
2486 *
2487 */
2488template <bool SERVICE_MODE>
2489int CCoreEthIFStateless::send_node_common(CGenNode *node) {
2490    CGenNodeStateless * node_sl = (CGenNodeStateless *) node;
2491
2492    pkt_dir_t dir                     = (pkt_dir_t)node_sl->get_mbuf_cache_dir();
2493    CCorePerPort *lp_port             = &m_ports[dir];
2494    CVirtualIFPerSideStats *lp_stats  = &m_stats[dir];
2495
2496    /* generate packet (can never fail) */
2497    rte_mbuf_t *m = generate_node_pkt(node_sl);
2498
2499    /* template boolean - this will be removed at compile time */
2500    if (SERVICE_MODE) {
2501        TrexStatelessCaptureMngr::getInstance().handle_pkt_tx(m, lp_port->m_port->get_port_id());
2502    }
2503
2504    /* send */
2505    return send_node_packet(node_sl, m, lp_port, lp_stats);
2506}
2507
2508/**
2509 * slow path code goes here
2510 *
2511 */
2512rte_mbuf_t *
2513CCoreEthIFStateless::generate_slow_path_node_pkt(CGenNodeStateless *node_sl) {
2514
2515    if (node_sl->m_type == CGenNode::PCAP_PKT) {
2516        CGenNodePCAP *pcap_node = (CGenNodePCAP *)node_sl;
2517        return pcap_node->get_pkt();
2518    }
2519
2520    /* unhandled case of slow path node */
2521    assert(0);
2522    return (NULL);
2523}
2524
2525void CCoreEthIF::apply_client_cfg(const ClientCfgBase *cfg, rte_mbuf_t *m, pkt_dir_t dir, uint8_t *p) {
2526
2527    assert(cfg);
2528
2529    /* take the right direction config */
2530    const ClientCfgDirBase &cfg_dir = ( (dir == CLIENT_SIDE) ? cfg->m_initiator : cfg->m_responder);
2531
2532    /* dst mac */
2533    if (cfg_dir.has_dst_mac_addr()) {
2534        memcpy(p, cfg_dir.get_dst_mac_addr(), 6);
2535    }
2536
2537    /* src mac */
2538    if (cfg_dir.has_src_mac_addr()) {
2539        memcpy(p + 6, cfg_dir.get_src_mac_addr(), 6);
2540    }
2541
2542    /* VLAN */
2543    if (cfg_dir.has_vlan()) {
2544        add_vlan(m, cfg_dir.get_vlan());
2545    }
2546}
2547
2548/**
2549 * slow path features goes here (avoid multiple IFs)
2550 *
2551 */
2552void CCoreEthIF::handle_slowpath_features(CGenNode *node, rte_mbuf_t *m, uint8_t *p, pkt_dir_t dir) {
2553
2554
2555    /* MAC ovverride */
2556    if ( unlikely( CGlobalInfo::m_options.preview.get_mac_ip_overide_enable() ) ) {
2557        /* client side */
2558        if ( node->is_initiator_pkt() ) {
2559            *((uint32_t*)(p+6)) = PKT_NTOHL(node->m_src_ip);
2560        }
2561    }
2562
2563    /* flag is faster than checking the node pointer (another cacheline) */
2564    if ( unlikely(CGlobalInfo::m_options.preview.get_is_client_cfg_enable() ) ) {
2565        apply_client_cfg(node->m_client_cfg, m, dir, p);
2566    }
2567
2568}
2569
2570int CCoreEthIF::send_node(CGenNode * node) {
2571
2572#ifdef OPT_REPEAT_MBUF
2573
2574    if ( unlikely( node->get_cache_mbuf() !=NULL ) ) {
2575        pkt_dir_t       dir;
2576        rte_mbuf_t *    m=node->get_cache_mbuf();
2577        dir=(pkt_dir_t)node->get_mbuf_cache_dir();
2578        CCorePerPort *  lp_port=&m_ports[dir];
2579        CVirtualIFPerSideStats  * lp_stats = &m_stats[dir];
2580        rte_pktmbuf_refcnt_update(m,1);
2581        send_pkt(lp_port,m,lp_stats);
2582        return (0);
2583    }
2584#endif
2585
2586    CFlowPktInfo *  lp=node->m_pkt_info;
2587    rte_mbuf_t *    m=lp->generate_new_mbuf(node);
2588
2589    pkt_dir_t       dir;
2590    bool            single_port;
2591
2592    dir         = node->cur_interface_dir();
2593    single_port = node->get_is_all_flow_from_same_dir() ;
2594
2595
2596    if ( unlikely(CGlobalInfo::m_options.preview.get_vlan_mode()
2597                  != CPreviewMode::VLAN_MODE_NONE) ) {
2598        uint16_t vlan_id=0;
2599
2600        if (CGlobalInfo::m_options.preview.get_vlan_mode()
2601            == CPreviewMode::VLAN_MODE_LOAD_BALANCE) {
2602            /* which vlan to choose 0 or 1*/
2603            uint8_t vlan_port = (node->m_src_ip & 1);
2604            vlan_id = CGlobalInfo::m_options.m_vlan_port[vlan_port];
2605            if (likely( vlan_id > 0 ) ) {
2606                dir = dir ^ vlan_port;
2607            } else {
2608                /* both from the same dir but with VLAN0 */
2609                vlan_id = CGlobalInfo::m_options.m_vlan_port[0];
2610            }
2611        } else if (CGlobalInfo::m_options.preview.get_vlan_mode()
2612            == CPreviewMode::VLAN_MODE_NORMAL) {
2613            CCorePerPort *lp_port = &m_ports[dir];
2614            uint8_t port_id = lp_port->m_port->get_port_id();
2615            vlan_id = CGlobalInfo::m_options.m_ip_cfg[port_id].get_vlan();
2616        }
2617
2618        add_vlan(m, vlan_id);
2619    }
2620
2621    CCorePerPort *lp_port = &m_ports[dir];
2622    CVirtualIFPerSideStats *lp_stats = &m_stats[dir];
2623
2624    if (unlikely(m==0)) {
2625        lp_stats->m_tx_alloc_error++;
2626        return(0);
2627    }
2628
2629    /* update mac addr dest/src 12 bytes */
2630    uint8_t *p   = rte_pktmbuf_mtod(m, uint8_t*);
2631    uint8_t p_id = lp_port->m_port->get_port_id();
2632
2633    memcpy(p,CGlobalInfo::m_options.get_dst_src_mac_addr(p_id),12);
2634
2635     /* when slowpath features are on */
2636    if ( unlikely( CGlobalInfo::m_options.preview.get_is_slowpath_features_on() ) ) {
2637        handle_slowpath_features(node, m, p, dir);
2638    }
2639
2640
2641    if ( unlikely( node->is_rx_check_enabled() ) ) {
2642        lp_stats->m_tx_rx_check_pkt++;
2643        lp->do_generate_new_mbuf_rxcheck(m, node, single_port);
2644        lp_stats->m_template.inc_template( node->get_template_id( ));
2645    }else{
2646
2647#ifdef OPT_REPEAT_MBUF
2648        // cache only if it is not sample as this is more complex mbuf struct
2649        if ( unlikely( node->can_cache_mbuf() ) ) {
2650            if ( !CGlobalInfo::m_options.preview.isMbufCacheDisabled() ){
2651                m_mbuf_cache++;
2652                if (m_mbuf_cache < MAX_MBUF_CACHE) {
2653                    /* limit the number of object to cache */
2654                    node->set_mbuf_cache_dir( dir);
2655                    node->set_cache_mbuf(m);
2656                    rte_pktmbuf_refcnt_update(m,1);
2657                }
2658            }
2659        }
2660#endif
2661
2662    }
2663
2664    /*printf("send packet -- \n");
2665      rte_pktmbuf_dump(stdout,m, rte_pktmbuf_pkt_len(m));*/
2666
2667    /* send the packet */
2668    send_pkt(lp_port,m,lp_stats);
2669    return (0);
2670}
2671
2672
2673int CCoreEthIF::update_mac_addr_from_global_cfg(pkt_dir_t  dir, uint8_t * p){
2674    assert(p);
2675    assert(dir<2);
2676
2677    CCorePerPort *  lp_port=&m_ports[dir];
2678    uint8_t p_id=lp_port->m_port->get_port_id();
2679    memcpy(p,CGlobalInfo::m_options.get_dst_src_mac_addr(p_id),12);
2680    return (0);
2681}
2682
2683pkt_dir_t
2684CCoreEthIF::port_id_to_dir(uint8_t port_id) {
2685
2686    for (pkt_dir_t dir = 0; dir < CS_NUM; dir++) {
2687        if (m_ports[dir].m_port->get_port_id() == port_id) {
2688            return dir;
2689        }
2690    }
2691
2692    return (CS_INVALID);
2693}
2694
2695class CLatencyHWPort : public CPortLatencyHWBase {
2696public:
2697    void Create(CPhyEthIF  * p,
2698                uint8_t tx_queue,
2699                uint8_t rx_queue){
2700        m_port=p;
2701        m_tx_queue_id=tx_queue;
2702        m_rx_queue_id=rx_queue;
2703    }
2704
2705    virtual int tx(rte_mbuf_t *m) {
2706        rte_mbuf_t *tx_pkts[2];
2707
2708        tx_pkts[0] = m;
2709        uint8_t vlan_mode = CGlobalInfo::m_options.preview.get_vlan_mode();
2710        if ( likely( vlan_mode != CPreviewMode::VLAN_MODE_NONE) ) {
2711            if ( vlan_mode == CPreviewMode::VLAN_MODE_LOAD_BALANCE ) {
2712                add_vlan(m, CGlobalInfo::m_options.m_vlan_port[0]);
2713            } else if (vlan_mode == CPreviewMode::VLAN_MODE_NORMAL) {
2714                uint8_t port_id = m_port->get_rte_port_id();
2715                add_vlan(m, CGlobalInfo::m_options.m_ip_cfg[port_id].get_vlan());
2716            }
2717        }
2718        uint16_t res=m_port->tx_burst(m_tx_queue_id,tx_pkts,1);
2719        if ( res == 0 ) {
2720            rte_pktmbuf_free(m);
2721            //printf(" queue is full for latency packet !!\n");
2722            return (-1);
2723
2724        }
2725#if 0
2726        fprintf(stdout," ==> %f.03 send packet ..\n",now_sec());
2727        uint8_t *p1=rte_pktmbuf_mtod(m, uint8_t*);
2728        uint16_t pkt_size1=rte_pktmbuf_pkt_len(m);
2729        utl_DumpBuffer(stdout,p1,pkt_size1,0);
2730#endif
2731
2732        return (0);
2733    }
2734
2735
2736    /* nothing special with HW implementation */
2737    virtual int tx_latency(rte_mbuf_t *m) {
2738        return tx(m);
2739    }
2740
2741    virtual rte_mbuf_t * rx(){
2742        rte_mbuf_t * rx_pkts[1];
2743        uint16_t cnt=m_port->rx_burst(m_rx_queue_id,rx_pkts,1);
2744        if (cnt) {
2745            return (rx_pkts[0]);
2746        }else{
2747            return (0);
2748        }
2749    }
2750
2751
2752    virtual uint16_t rx_burst(struct rte_mbuf **rx_pkts,
2753                              uint16_t nb_pkts){
2754        uint16_t cnt=m_port->rx_burst(m_rx_queue_id,rx_pkts,nb_pkts);
2755        return (cnt);
2756    }
2757
2758
2759private:
2760    CPhyEthIF  * m_port;
2761    uint8_t      m_tx_queue_id ;
2762    uint8_t      m_rx_queue_id;
2763};
2764
2765
2766class CLatencyVmPort : public CPortLatencyHWBase {
2767public:
2768    void Create(uint8_t port_index,
2769                CNodeRing *ring,
2770                CLatencyManager *mgr,
2771                CPhyEthIF  *p) {
2772
2773        m_dir        = (port_index % 2);
2774        m_ring_to_dp = ring;
2775        m_mgr        = mgr;
2776        m_port       = p;
2777    }
2778
2779
2780    virtual int tx(rte_mbuf_t *m) {
2781        return tx_common(m, false);
2782    }
2783
2784    virtual int tx_latency(rte_mbuf_t *m) {
2785        return tx_common(m, true);
2786    }
2787
2788    virtual rte_mbuf_t * rx() {
2789        rte_mbuf_t * rx_pkts[1];
2790        uint16_t cnt = m_port->rx_burst(0, rx_pkts, 1);
2791        if (cnt) {
2792            return (rx_pkts[0]);
2793        } else {
2794            return (0);
2795        }
2796    }
2797
2798    virtual uint16_t rx_burst(struct rte_mbuf **rx_pkts, uint16_t nb_pkts) {
2799        uint16_t cnt = m_port->rx_burst(0, rx_pkts, nb_pkts);
2800        return (cnt);
2801    }
2802
2803private:
2804      virtual int tx_common(rte_mbuf_t *m, bool fix_timestamp) {
2805
2806
2807        uint8_t vlan_mode = CGlobalInfo::m_options.preview.get_vlan_mode();
2808        if ( likely( vlan_mode != CPreviewMode::VLAN_MODE_NONE) ) {
2809            if ( vlan_mode == CPreviewMode::VLAN_MODE_LOAD_BALANCE ) {
2810                add_vlan(m, CGlobalInfo::m_options.m_vlan_port[0]);
2811            } else if (vlan_mode == CPreviewMode::VLAN_MODE_NORMAL) {
2812                uint8_t port_id = m_port->get_rte_port_id();
2813                add_vlan(m, CGlobalInfo::m_options.m_ip_cfg[port_id].get_vlan());
2814            }
2815        }
2816
2817        /* allocate node */
2818        CGenNodeLatencyPktInfo *node=(CGenNodeLatencyPktInfo * )CGlobalInfo::create_node();
2819        if (!node) {
2820            return (-1);
2821        }
2822
2823        node->m_msg_type = CGenNodeMsgBase::LATENCY_PKT;
2824        node->m_dir      = m_dir;
2825        node->m_pkt      = m;
2826
2827        if (fix_timestamp) {
2828            node->m_latency_offset = m_mgr->get_latency_header_offset();
2829            node->m_update_ts = 1;
2830        } else {
2831            node->m_update_ts = 0;
2832        }
2833
2834        if ( m_ring_to_dp->Enqueue((CGenNode*)node) != 0 ){
2835            return (-1);
2836        }
2837
2838        return (0);
2839    }
2840
2841    CPhyEthIF  * m_port;
2842    uint8_t                          m_dir;
2843    CNodeRing *                      m_ring_to_dp;   /* ring dp -> latency thread */
2844    CLatencyManager *                m_mgr;
2845};
2846
2847
2848
2849class CPerPortStats {
2850public:
2851    uint64_t opackets;
2852    uint64_t obytes;
2853    uint64_t ipackets;
2854    uint64_t ibytes;
2855    uint64_t ierrors;
2856    uint64_t oerrors;
2857    tx_per_flow_t m_tx_per_flow[MAX_FLOW_STATS + MAX_FLOW_STATS_PAYLOAD];
2858    tx_per_flow_t m_prev_tx_per_flow[MAX_FLOW_STATS + MAX_FLOW_STATS_PAYLOAD];
2859
2860    float     m_total_tx_bps;
2861    float     m_total_tx_pps;
2862
2863    float     m_total_rx_bps;
2864    float     m_total_rx_pps;
2865
2866    float     m_cpu_util;
2867    bool      m_link_up = true;
2868    bool      m_link_was_down = false;
2869};
2870
2871class CGlobalStats {
2872public:
2873    enum DumpFormat {
2874        dmpSTANDARD,
2875        dmpTABLE
2876    };
2877
2878    uint64_t  m_total_tx_pkts;
2879    uint64_t  m_total_rx_pkts;
2880    uint64_t  m_total_tx_bytes;
2881    uint64_t  m_total_rx_bytes;
2882
2883    uint64_t  m_total_alloc_error;
2884    uint64_t  m_total_queue_full;
2885    uint64_t  m_total_queue_drop;
2886
2887    uint64_t  m_total_clients;
2888    uint64_t  m_total_servers;
2889    uint64_t  m_active_sockets;
2890
2891    uint64_t  m_total_nat_time_out;
2892    uint64_t  m_total_nat_time_out_wait_ack;
2893    uint64_t  m_total_nat_no_fid  ;
2894    uint64_t  m_total_nat_active  ;
2895    uint64_t  m_total_nat_syn_wait;
2896    uint64_t  m_total_nat_open    ;
2897    uint64_t  m_total_nat_learn_error    ;
2898
2899    CPerTxthreadTemplateInfo m_template;
2900
2901    float     m_socket_util;
2902
2903    float m_platform_factor;
2904    float m_tx_bps;
2905    float m_rx_bps;
2906    float m_tx_pps;
2907    float m_rx_pps;
2908    float m_tx_cps;
2909    float m_tx_expected_cps;
2910    float m_tx_expected_pps;
2911    float m_tx_expected_bps;
2912    float m_rx_drop_bps;
2913    float m_active_flows;
2914    float m_open_flows;
2915    float m_cpu_util;
2916    float m_cpu_util_raw;
2917    float m_rx_cpu_util;
2918    float m_bw_per_core;
2919    uint8_t m_threads;
2920
2921    uint32_t      m_num_of_ports;
2922    CPerPortStats m_port[TREX_MAX_PORTS];
2923public:
2924    void Dump(FILE *fd,DumpFormat mode);
2925    void DumpAllPorts(FILE *fd);
2926    void dump_json(std::string & json, bool baseline);
2927private:
2928    std::string get_field(const char *name, float &f);
2929    std::string get_field(const char *name, uint64_t &f);
2930    std::string get_field_port(int port, const char *name, float &f);
2931    std::string get_field_port(int port, const char *name, uint64_t &f);
2932
2933};
2934
2935std::string CGlobalStats::get_field(const char *name, float &f){
2936    char buff[200];
2937    if(f <= -10.0 or f >= 10.0)
2938        snprintf(buff, sizeof(buff), "\"%s\":%.1f,",name,f);
2939    else
2940        snprintf(buff, sizeof(buff), "\"%s\":%.3e,",name,f);
2941    return (std::string(buff));
2942}
2943
2944std::string CGlobalStats::get_field(const char *name, uint64_t &f){
2945    char buff[200];
2946    snprintf(buff,  sizeof(buff), "\"%s\":%llu,", name, (unsigned long long)f);
2947    return (std::string(buff));
2948}
2949
2950std::string CGlobalStats::get_field_port(int port, const char *name, float &f){
2951    char buff[200];
2952    if(f <= -10.0 or f >= 10.0)
2953        snprintf(buff,  sizeof(buff), "\"%s-%d\":%.1f,", name, port, f);
2954    else
2955        snprintf(buff, sizeof(buff), "\"%s-%d\":%.3e,", name, port, f);
2956    return (std::string(buff));
2957}
2958
2959std::string CGlobalStats::get_field_port(int port, const char *name, uint64_t &f){
2960    char buff[200];
2961    snprintf(buff, sizeof(buff), "\"%s-%d\":%llu,",name, port, (unsigned long long)f);
2962    return (std::string(buff));
2963}
2964
2965
2966void CGlobalStats::dump_json(std::string & json, bool baseline){
2967    /* refactor this to JSON */
2968
2969    json="{\"name\":\"trex-global\",\"type\":0,";
2970    if (baseline) {
2971        json += "\"baseline\": true,";
2972    }
2973
2974    json +="\"data\":{";
2975
2976    char ts_buff[200];
2977    snprintf(ts_buff , sizeof(ts_buff), "\"ts\":{\"value\":%lu, \"freq\":%lu},", os_get_hr_tick_64(), os_get_hr_freq());
2978    json+= std::string(ts_buff);
2979
2980#define GET_FIELD(f) get_field(#f, f)
2981#define GET_FIELD_PORT(p,f) get_field_port(p, #f, lp->f)
2982
2983    json+=GET_FIELD(m_cpu_util);
2984    json+=GET_FIELD(m_cpu_util_raw);
2985    json+=GET_FIELD(m_bw_per_core);
2986    json+=GET_FIELD(m_rx_cpu_util);
2987    json+=GET_FIELD(m_platform_factor);
2988    json+=GET_FIELD(m_tx_bps);
2989    json+=GET_FIELD(m_rx_bps);
2990    json+=GET_FIELD(m_tx_pps);
2991    json+=GET_FIELD(m_rx_pps);
2992    json+=GET_FIELD(m_tx_cps);
2993    json+=GET_FIELD(m_tx_expected_cps);
2994    json+=GET_FIELD(m_tx_expected_pps);
2995    json+=GET_FIELD(m_tx_expected_bps);
2996    json+=GET_FIELD(m_total_alloc_error);
2997    json+=GET_FIELD(m_total_queue_full);
2998    json+=GET_FIELD(m_total_queue_drop);
2999    json+=GET_FIELD(m_rx_drop_bps);
3000    json+=GET_FIELD(m_active_flows);
3001    json+=GET_FIELD(m_open_flows);
3002
3003    json+=GET_FIELD(m_total_tx_pkts);
3004    json+=GET_FIELD(m_total_rx_pkts);
3005    json+=GET_FIELD(m_total_tx_bytes);
3006    json+=GET_FIELD(m_total_rx_bytes);
3007
3008    json+=GET_FIELD(m_total_clients);
3009    json+=GET_FIELD(m_total_servers);
3010    json+=GET_FIELD(m_active_sockets);
3011    json+=GET_FIELD(m_socket_util);
3012
3013    json+=GET_FIELD(m_total_nat_time_out);
3014    json+=GET_FIELD(m_total_nat_time_out_wait_ack);
3015    json+=GET_FIELD(m_total_nat_no_fid );
3016    json+=GET_FIELD(m_total_nat_active );
3017    json+=GET_FIELD(m_total_nat_syn_wait);
3018    json+=GET_FIELD(m_total_nat_open   );
3019    json+=GET_FIELD(m_total_nat_learn_error);
3020
3021    int i;
3022    for (i=0; i<(int)m_num_of_ports; i++) {
3023        CPerPortStats * lp=&m_port[i];
3024        json+=GET_FIELD_PORT(i,opackets) ;
3025        json+=GET_FIELD_PORT(i,obytes)   ;
3026        json+=GET_FIELD_PORT(i,ipackets) ;
3027        json+=GET_FIELD_PORT(i,ibytes)   ;
3028        json+=GET_FIELD_PORT(i,ierrors)  ;
3029        json+=GET_FIELD_PORT(i,oerrors)  ;
3030        json+=GET_FIELD_PORT(i,m_total_tx_bps);
3031        json+=GET_FIELD_PORT(i,m_total_tx_pps);
3032        json+=GET_FIELD_PORT(i,m_total_rx_bps);
3033        json+=GET_FIELD_PORT(i,m_total_rx_pps);
3034        json+=GET_FIELD_PORT(i,m_cpu_util);
3035    }
3036    json+=m_template.dump_as_json("template");
3037    json+="\"unknown\":0}}"  ;
3038}
3039
3040void CGlobalStats::DumpAllPorts(FILE *fd){
3041
3042    //fprintf (fd," Total-Tx-Pkts   : %s  \n",double_to_human_str((double)m_total_tx_pkts,"pkts",KBYE_1000).c_str());
3043    //fprintf (fd," Total-Rx-Pkts   : %s  \n",double_to_human_str((double)m_total_rx_pkts,"pkts",KBYE_1000).c_str());
3044
3045    //fprintf (fd," Total-Tx-Bytes  : %s  \n",double_to_human_str((double)m_total_tx_bytes,"bytes",KBYE_1000).c_str());
3046    //fprintf (fd," Total-Rx-Bytes  : %s  \n",double_to_human_str((double)m_total_rx_bytes,"bytes",KBYE_1000).c_str());
3047
3048
3049
3050    fprintf (fd," Cpu Utilization : %2.1f  %%  %2.1f Gb/core \n",m_cpu_util,m_bw_per_core);
3051    fprintf (fd," Platform_factor : %2.1f  \n",m_platform_factor);
3052    fprintf (fd," Total-Tx        : %s  ",double_to_human_str(m_tx_bps,"bps",KBYE_1000).c_str());
3053    if ( CGlobalInfo::is_learn_mode() ) {
3054        fprintf (fd," NAT time out    : %8llu", (unsigned long long)m_total_nat_time_out);
3055        if (CGlobalInfo::is_learn_mode(CParserOption::LEARN_MODE_TCP_ACK)) {
3056            fprintf (fd," (%llu in wait for syn+ack)\n", (unsigned long long)m_total_nat_time_out_wait_ack);
3057        } else {
3058            fprintf (fd, "\n");
3059        }
3060    }else{
3061        fprintf (fd,"\n");
3062    }
3063
3064
3065    fprintf (fd," Total-Rx        : %s  ",double_to_human_str(m_rx_bps,"bps",KBYE_1000).c_str());
3066    if ( CGlobalInfo::is_learn_mode() ) {
3067        fprintf (fd," NAT aged flow id: %8llu \n", (unsigned long long)m_total_nat_no_fid);
3068    }else{
3069        fprintf (fd,"\n");
3070    }
3071
3072    fprintf (fd," Total-PPS       : %s  ",double_to_human_str(m_tx_pps,"pps",KBYE_1000).c_str());
3073    if ( CGlobalInfo::is_learn_mode() ) {
3074        fprintf (fd," Total NAT active: %8llu", (unsigned long long)m_total_nat_active);
3075        if (CGlobalInfo::is_learn_mode(CParserOption::LEARN_MODE_TCP_ACK)) {
3076            fprintf (fd," (%llu waiting for syn)\n", (unsigned long long)m_total_nat_syn_wait);
3077        } else {
3078            fprintf (fd, "\n");
3079        }
3080    }else{
3081        fprintf (fd,"\n");
3082    }
3083
3084    fprintf (fd," Total-CPS       : %s  ",double_to_human_str(m_tx_cps,"cps",KBYE_1000).c_str());
3085    if ( CGlobalInfo::is_learn_mode() ) {
3086        fprintf (fd," Total NAT opened: %8llu \n", (unsigned long long)m_total_nat_open);
3087    }else{
3088        fprintf (fd,"\n");
3089    }
3090    fprintf (fd,"\n");
3091    fprintf (fd," Expected-PPS    : %s  ",double_to_human_str(m_tx_expected_pps,"pps",KBYE_1000).c_str());
3092    if ( CGlobalInfo::is_learn_verify_mode() ) {
3093        fprintf (fd," NAT learn errors: %8llu \n", (unsigned long long)m_total_nat_learn_error);
3094    }else{
3095        fprintf (fd,"\n");
3096    }
3097    fprintf (fd," Expected-CPS    : %s  \n",double_to_human_str(m_tx_expected_cps,"cps",KBYE_1000).c_str());
3098    fprintf (fd," Expected-BPS    : %s  \n",double_to_human_str(m_tx_expected_bps,"bps",KBYE_1000).c_str());
3099    fprintf (fd,"\n");
3100    fprintf (fd," Active-flows    : %8llu  Clients : %8llu   Socket-util : %3.4f %%    \n",
3101             (unsigned long long)m_active_flows,
3102             (unsigned long long)m_total_clients,
3103             m_socket_util);
3104    fprintf (fd," Open-flows      : %8llu  Servers : %8llu   Socket : %8llu Socket/Clients :  %.1f \n",
3105             (unsigned long long)m_open_flows,
3106             (unsigned long long)m_total_servers,
3107             (unsigned long long)m_active_sockets,
3108             (float)m_active_sockets/(float)m_total_clients);
3109
3110    if (m_total_alloc_error) {
3111        fprintf (fd," Total_alloc_err  : %llu         \n", (unsigned long long)m_total_alloc_error);
3112    }
3113    if ( m_total_queue_full ){
3114        fprintf (fd," Total_queue_full : %llu         \n", (unsigned long long)m_total_queue_full);
3115    }
3116    if (m_total_queue_drop) {
3117        fprintf (fd," Total_queue_drop : %llu         \n", (unsigned long long)m_total_queue_drop);
3118    }
3119
3120    //m_template.Dump(fd);
3121
3122    fprintf (fd," drop-rate       : %s   \n",double_to_human_str(m_rx_drop_bps,"bps",KBYE_1000).c_str() );
3123}
3124
3125
3126void CGlobalStats::Dump(FILE *fd,DumpFormat mode){
3127    int i;
3128    int port_to_show=m_num_of_ports;
3129    if (port_to_show>4) {
3130        port_to_show=4;
3131        fprintf (fd," per port - limited to 4   \n");
3132    }
3133
3134
3135    if ( mode== dmpSTANDARD ){
3136        fprintf (fd," --------------- \n");
3137        for (i=0; i<(int)port_to_show; i++) {
3138            CPerPortStats * lp=&m_port[i];
3139            fprintf(fd,"port : %d ",(int)i);
3140            if ( ! lp->m_link_up ) {
3141                fprintf(fd," (link DOWN)");
3142            }
3143            fprintf(fd,"\n------------\n");
3144#define GS_DP_A4(f) fprintf(fd," %-40s : %llu \n",#f, (unsigned long long)lp->f)
3145#define GS_DP_A(f) if (lp->f) fprintf(fd," %-40s : %llu \n",#f, (unsigned long long)lp->f)
3146            GS_DP_A4(opackets);
3147            GS_DP_A4(obytes);
3148            GS_DP_A4(ipackets);
3149            GS_DP_A4(ibytes);
3150            GS_DP_A(ierrors);
3151            GS_DP_A(oerrors);
3152            fprintf (fd," Tx : %s  \n",double_to_human_str((double)lp->m_total_tx_bps,"bps",KBYE_1000).c_str());
3153        }
3154    }else{
3155        fprintf(fd," %10s ","ports");
3156        for (i=0; i<(int)port_to_show; i++) {
3157            CPerPortStats * lp=&m_port[i];
3158            if ( lp->m_link_up ) {
3159                fprintf(fd,"| %15d ",i);
3160            } else {
3161                std::string port_with_state = "(link DOWN) " + std::to_string(i);
3162                fprintf(fd,"| %15s ",port_with_state.c_str());
3163            }
3164        }
3165        fprintf(fd,"\n");
3166        fprintf(fd," -----------------------------------------------------------------------------------------\n");
3167        std::string names[]={"opackets","obytes","ipackets","ibytes","ierrors","oerrors","Tx Bw"
3168        };
3169        for (i=0; i<7; i++) {
3170            fprintf(fd," %10s ",names[i].c_str());
3171            int j=0;
3172            for (j=0; j<port_to_show;j++) {
3173                CPerPortStats * lp=&m_port[j];
3174                uint64_t cnt;
3175                switch (i) {
3176                case 0:
3177                    cnt=lp->opackets;
3178                    fprintf(fd,"| %15lu ",cnt);
3179
3180                    break;
3181                case 1:
3182                    cnt=lp->obytes;
3183                    fprintf(fd,"| %15lu ",cnt);
3184
3185                    break;
3186                case 2:
3187                    cnt=lp->ipackets;
3188                    fprintf(fd,"| %15lu ",cnt);
3189
3190                    break;
3191                case 3:
3192                    cnt=lp->ibytes;
3193                    fprintf(fd,"| %15lu ",cnt);
3194
3195                    break;
3196                case 4:
3197                    cnt=lp->ierrors;
3198                    fprintf(fd,"| %15lu ",cnt);
3199
3200                    break;
3201                case 5:
3202                    cnt=lp->oerrors;
3203                    fprintf(fd,"| %15lu ",cnt);
3204
3205                    break;
3206                case 6:
3207                    fprintf(fd,"| %15s ",double_to_human_str((double)lp->m_total_tx_bps,"bps",KBYE_1000).c_str());
3208                    break;
3209                default:
3210                    cnt=0xffffff;
3211                }
3212            } /* ports */
3213            fprintf(fd, "\n");
3214        }/* fields*/
3215    }
3216
3217
3218}
3219
3220class CGlobalTRex  {
3221
3222public:
3223
3224    /**
3225     * different types of shutdown causes
3226     */
3227    typedef enum {
3228        SHUTDOWN_NONE,
3229        SHUTDOWN_TEST_ENDED,
3230        SHUTDOWN_CTRL_C,
3231        SHUTDOWN_SIGINT,
3232        SHUTDOWN_SIGTERM,
3233        SHUTDOWN_RPC_REQ
3234    } shutdown_rc_e;
3235
3236    CGlobalTRex (){
3237        m_max_ports=4;
3238        m_max_cores=1;
3239        m_cores_to_dual_ports=0;
3240        m_max_queues_per_port=0;
3241        m_fl_was_init=false;
3242        m_expected_pps=0.0;
3243        m_expected_cps=0.0;
3244        m_expected_bps=0.0;
3245        m_trex_stateless = NULL;
3246        m_mark_for_shutdown = SHUTDOWN_NONE;
3247    }
3248
3249    bool Create();
3250    void Delete();
3251    int  ixgbe_prob_init();
3252    int  cores_prob_init();
3253    int  queues_prob_init();
3254    int  ixgbe_start();
3255    int  ixgbe_rx_queue_flush();
3256    void rx_stf_conf();
3257    void rx_sl_configure();
3258    bool is_all_links_are_up(bool dump=false);
3259    void pre_test();
3260
3261    /**
3262     * mark for shutdown
3263     * on the next check - the control plane will
3264     * call shutdown()
3265     */
3266    void mark_for_shutdown(shutdown_rc_e rc) {
3267
3268        if (is_marked_for_shutdown()) {
3269            return;
3270        }
3271
3272        m_mark_for_shutdown = rc;
3273    }
3274
3275private:
3276    void register_signals();
3277
3278    /* try to stop all datapath cores and RX core */
3279    void try_stop_all_cores();
3280    /* send message to all dp cores */
3281    int  send_message_all_dp(TrexStatelessCpToDpMsgBase *msg);
3282    int  send_message_to_rx(TrexStatelessCpToRxMsgBase *msg);
3283    void check_for_dp_message_from_core(int thread_id);
3284
3285    bool is_marked_for_shutdown() const {
3286        return (m_mark_for_shutdown != SHUTDOWN_NONE);
3287    }
3288
3289    /**
3290     * shutdown sequence
3291     *
3292     */
3293    void shutdown();
3294
3295public:
3296    void check_for_dp_messages();
3297    int start_master_statefull();
3298    int start_master_stateless();
3299    int run_in_core(virtual_thread_id_t virt_core_id);
3300    int core_for_rx(){
3301        if ( (! get_is_rx_thread_enabled()) ) {
3302            return -1;
3303        }else{
3304            return m_max_cores - 1;
3305        }
3306    }
3307    int run_in_rx_core();
3308    int run_in_master();
3309
3310    void handle_fast_path();
3311    void handle_slow_path();
3312
3313    int stop_master();
3314    /* return the minimum number of dp cores needed to support the active ports
3315       this is for c==1 or  m_cores_mul==1
3316    */
3317    int get_base_num_cores(){
3318        return (m_max_ports>>1);
3319    }
3320
3321    int get_cores_tx(){
3322        /* 0 - master
3323           num_of_cores -
3324           last for latency */
3325        if ( (! get_is_rx_thread_enabled()) ) {
3326            return (m_max_cores - 1 );
3327        } else {
3328            return (m_max_cores - BP_MASTER_AND_LATENCY );
3329        }
3330    }
3331
3332private:
3333    bool is_all_cores_finished();
3334
3335public:
3336
3337    void publish_async_data(bool sync_now, bool baseline = false);
3338    void publish_async_barrier(uint32_t key);
3339    void publish_async_port_attr_changed(uint8_t port_id);
3340
3341    void dump_stats(FILE *fd,
3342                    CGlobalStats::DumpFormat format);
3343    void dump_template_info(std::string & json);
3344    bool sanity_check();
3345    void update_stats(void);
3346    tx_per_flow_t get_flow_tx_stats(uint8_t port, uint16_t hw_id);
3347    tx_per_flow_t clear_flow_tx_stats(uint8_t port, uint16_t index, bool is_lat);
3348    void get_stats(CGlobalStats & stats);
3349    float get_cpu_util_per_interface(uint8_t port_id);
3350    void dump_post_test_stats(FILE *fd);
3351    void dump_config(FILE *fd);
3352    void dump_links_status(FILE *fd);
3353
3354    bool lookup_port_by_mac(const uint8_t *mac, uint8_t &port_id);
3355
3356public:
3357    port_cfg_t  m_port_cfg;
3358    uint32_t    m_max_ports;    /* active number of ports supported options are  2,4,8,10,12  */
3359    uint32_t    m_max_cores;    /* current number of cores , include master and latency  ==> ( master)1+c*(m_max_ports>>1)+1( latency )  */
3360    uint32_t    m_cores_mul;    /* how cores multipler given  c=4 ==> m_cores_mul */
3361    uint32_t    m_max_queues_per_port; // Number of TX queues per port
3362    uint32_t    m_cores_to_dual_ports; /* number of TX cores allocated for each port pair */
3363    uint16_t    m_rx_core_tx_q_id; /* TX q used by rx core */
3364    // statistic
3365    CPPSMeasure  m_cps;
3366    float        m_expected_pps;
3367    float        m_expected_cps;
3368    float        m_expected_bps;//bps
3369    float        m_last_total_cps;
3370
3371    CPhyEthIF   m_ports[TREX_MAX_PORTS];
3372    CCoreEthIF          m_cores_vif_sf[BP_MAX_CORES]; /* counted from 1 , 2,3 core zero is reserved - stateful */
3373    CCoreEthIFStateless m_cores_vif_sl[BP_MAX_CORES]; /* counted from 1 , 2,3 core zero is reserved - stateless*/
3374    CCoreEthIF *        m_cores_vif[BP_MAX_CORES];
3375    CParserOption m_po ;
3376    CFlowGenList  m_fl;
3377    bool          m_fl_was_init;
3378    volatile uint8_t       m_signal[BP_MAX_CORES] __rte_cache_aligned ; // Signal to main core when DP thread finished
3379    volatile bool m_sl_rx_running; // Signal main core when RX thread finished
3380    CLatencyManager     m_mg; // statefull RX core
3381    CRxCoreStateless    m_rx_sl; // stateless RX core
3382    CTrexGlobalIoMode   m_io_modes;
3383    CTRexExtendedDriverBase * m_drv;
3384
3385private:
3386    CLatencyHWPort      m_latency_vports[TREX_MAX_PORTS];    /* read hardware driver */
3387    CLatencyVmPort      m_latency_vm_vports[TREX_MAX_PORTS]; /* vm driver */
3388    CLatencyPktInfo     m_latency_pkt;
3389    TrexPublisher       m_zmq_publisher;
3390    CGlobalStats        m_stats;
3391    uint32_t            m_stats_cnt;
3392    std::mutex          m_cp_lock;
3393
3394    TrexMonitor         m_monitor;
3395    shutdown_rc_e       m_mark_for_shutdown;
3396
3397public:
3398    TrexStateless       *m_trex_stateless;
3399
3400};
3401
3402// Before starting, send gratuitous ARP on our addresses, and try to resolve dst MAC addresses.
3403void CGlobalTRex::pre_test() {
3404    CTrexDpdkParams dpdk_p;
3405    get_ex_drv()->get_dpdk_drv_params(dpdk_p);
3406    CPretest pretest(m_max_ports, dpdk_p.rx_data_q_num + dpdk_p.rx_drop_q_num);
3407    bool resolve_needed = false;
3408    uint8_t empty_mac[ETHER_ADDR_LEN] = {0,0,0,0,0,0};
3409    bool need_grat_arp[TREX_MAX_PORTS];
3410
3411    if (CGlobalInfo::m_options.preview.get_is_client_cfg_enable()) {
3412        std::vector<ClientCfgCompactEntry *> conf;
3413        m_fl.get_client_cfg_ip_list(conf);
3414
3415        // If we got src MAC for port in global config, take it, otherwise use src MAC from DPDK
3416        uint8_t port_macs[m_max_ports][ETHER_ADDR_LEN];
3417        for (int port_id = 0; port_id < m_max_ports; port_id++) {
3418            memcpy(port_macs[port_id], CGlobalInfo::m_options.m_mac_addr[port_id].u.m_mac.src, ETHER_ADDR_LEN);
3419        }
3420
3421        for (std::vector<ClientCfgCompactEntry *>::iterator it = conf.begin(); it != conf.end(); it++) {
3422            uint8_t port = (*it)->get_port();
3423            uint16_t vlan = (*it)->get_vlan();
3424            uint32_t count = (*it)->get_count();
3425            uint32_t dst_ip = (*it)->get_dst_ip();
3426            uint32_t src_ip = (*it)->get_src_ip();
3427
3428            for (int i = 0; i < count; i++) {
3429                //??? handle ipv6;
3430                if ((*it)->is_ipv4()) {
3431                    pretest.add_next_hop(port, dst_ip + i, vlan);
3432                }
3433            }
3434            if (!src_ip) {
3435                src_ip = CGlobalInfo::m_options.m_ip_cfg[port].get_ip();
3436                if (!src_ip) {
3437                    fprintf(stderr, "No matching src ip for port: %d ip:%s vlan: %d\n"
3438                            , port, ip_to_str(dst_ip).c_str(), vlan);
3439                    fprintf(stderr, "You must specify src_ip in client config file or in TRex config file\n");
3440                    exit(1);
3441                }
3442            }
3443            pretest.add_ip(port, src_ip, vlan, port_macs[port]);
3444            COneIPv4Info ipv4(src_ip, vlan, port_macs[port], port);
3445            m_mg.add_grat_arp_src(ipv4);
3446
3447            delete *it;
3448        }
3449        if ( CGlobalInfo::m_options.preview.getVMode() > 1) {
3450            fprintf(stdout, "*******Pretest for client cfg********\n");
3451            pretest.dump(stdout);
3452            }
3453    } else {
3454        for (int port_id = 0; port_id < m_max_ports; port_id++) {
3455            if (! memcmp( CGlobalInfo::m_options.m_mac_addr[port_id].u.m_mac.dest, empty_mac, ETHER_ADDR_LEN)) {
3456                resolve_needed = true;
3457            } else {
3458                resolve_needed = false;
3459            }
3460
3461            need_grat_arp[port_id] = CGlobalInfo::m_options.m_ip_cfg[port_id].get_ip() != 0;
3462
3463            pretest.add_ip(port_id, CGlobalInfo::m_options.m_ip_cfg[port_id].get_ip()
3464                           , CGlobalInfo::m_options.m_ip_cfg[port_id].get_vlan()
3465                           , CGlobalInfo::m_options.m_mac_addr[port_id].u.m_mac.src);
3466
3467            if (resolve_needed) {
3468                pretest.add_next_hop(port_id, CGlobalInfo::m_options.m_ip_cfg[port_id].get_def_gw()
3469                                     , CGlobalInfo::m_options.m_ip_cfg[port_id].get_vlan());
3470            }
3471        }
3472    }
3473
3474    for (int port_id = 0; port_id < m_max_ports; port_id++) {
3475        CPhyEthIF *pif = &m_ports[port_id];
3476        // Configure port to send all packets to software
3477        pif->set_port_rcv_all(true);
3478    }
3479
3480    pretest.send_grat_arp_all();
3481    bool ret;
3482    int count = 0;
3483    bool resolve_failed = false;
3484    do {
3485        ret = pretest.resolve_all();
3486        count++;
3487    } while ((ret != true) && (count < 10));
3488    if (ret != true) {
3489        resolve_failed = true;
3490    }
3491
3492    if ( CGlobalInfo::m_options.preview.getVMode() > 1) {
3493        fprintf(stdout, "*******Pretest after resolving ********\n");
3494        pretest.dump(stdout);
3495    }
3496
3497    if (CGlobalInfo::m_options.preview.get_is_client_cfg_enable()) {
3498        CManyIPInfo pretest_result;
3499        pretest.get_results(pretest_result);
3500        if (resolve_failed) {
3501            fprintf(stderr, "Resolution of following IPs failed. Exiting.\n");
3502            for (const COneIPInfo *ip=pretest_result.get_next(); ip != NULL;
3503                   ip = pretest_result.get_next()) {
3504                if (ip->resolve_needed()) {
3505                    ip->dump(stderr, "  ");
3506                }
3507            }
3508            exit(1);
3509        }
3510        m_fl.set_client_config_resolved_macs(pretest_result);
3511        if ( CGlobalInfo::m_options.preview.getVMode() > 1) {
3512            m_fl.dump_client_config(stdout);
3513        }
3514
3515        bool port_found[TREX_MAX_PORTS];
3516        for (int port_id = 0; port_id < m_max_ports; port_id++) {
3517            port_found[port_id] = false;
3518        }
3519        // If client config enabled, we don't resolve MACs from trex_cfg.yaml. For latency (-l)
3520        // We need to able to send packets from RX core, so need to configure MAC/vlan for each port.
3521        for (const COneIPInfo *ip=pretest_result.get_next(); ip != NULL; ip = pretest_result.get_next()) {
3522            // Use first MAC/vlan we see on each port
3523            uint8_t port_id = ip->get_port();
3524            uint16_t vlan = ip->get_vlan();
3525            if ( ! port_found[port_id]) {
3526                port_found[port_id] = true;
3527                ip->get_mac(CGlobalInfo::m_options.m_mac_addr[port_id].u.m_mac.dest);
3528                CGlobalInfo::m_options.m_ip_cfg[port_id].set_vlan(vlan);
3529            }
3530        }
3531    } else {
3532        uint8_t mac[ETHER_ADDR_LEN];
3533        for (int port_id = 0; port_id < m_max_ports; port_id++) {
3534            if (! memcmp(CGlobalInfo::m_options.m_mac_addr[port_id].u.m_mac.dest, empty_mac, ETHER_ADDR_LEN)) {
3535                // we don't have dest MAC. Get it from what we resolved.
3536                uint32_t ip = CGlobalInfo::m_options.m_ip_cfg[port_id].get_def_gw();
3537                uint16_t vlan = CGlobalInfo::m_options.m_ip_cfg[port_id].get_vlan();
3538
3539                if (!pretest.get_mac(port_id, ip, vlan, mac)) {
3540                    fprintf(stderr, "Failed resolving dest MAC for default gateway:%d.%d.%d.%d on port %d\n"
3541                            , (ip >> 24) & 0xFF, (ip >> 16) & 0xFF, (ip >> 8) & 0xFF, ip & 0xFF, port_id);
3542
3543                    if (get_is_stateless()) {
3544                        continue;
3545                    } else {
3546                        exit(1);
3547                    }
3548                }
3549
3550
3551
3552                memcpy(CGlobalInfo::m_options.m_mac_addr[port_id].u.m_mac.dest, mac, ETHER_ADDR_LEN);
3553                // if port is connected in loopback, no need to send gratuitous ARP. It will only confuse our ingress counters.
3554                if (need_grat_arp[port_id] && (! pretest.is_loopback(port_id))) {
3555                    COneIPv4Info ipv4(CGlobalInfo::m_options.m_ip_cfg[port_id].get_ip()
3556                                      , CGlobalInfo::m_options.m_ip_cfg[port_id].get_vlan()
3557                                      , CGlobalInfo::m_options.m_mac_addr[port_id].u.m_mac.src
3558                                      , port_id);
3559                    m_mg.add_grat_arp_src(ipv4);
3560                }
3561            }
3562
3563            // update statistics baseline, so we can ignore what happened in pre test phase
3564            CPhyEthIF *pif = &m_ports[port_id];
3565            CPreTestStats pre_stats = pretest.get_stats(port_id);
3566            pif->set_ignore_stats_base(pre_stats);
3567            // Configure port back to normal mode. Only relevant packets handled by software.
3568            pif->set_port_rcv_all(false);
3569        }
3570    }
3571
3572    /* for stateless only - set port mode */
3573    if (get_is_stateless()) {
3574        for (int port_id = 0; port_id < m_max_ports; port_id++) {
3575            uint32_t src_ipv4 = CGlobalInfo::m_options.m_ip_cfg[port_id].get_ip();
3576            uint32_t dg = CGlobalInfo::m_options.m_ip_cfg[port_id].get_def_gw();
3577            const uint8_t *dst_mac = CGlobalInfo::m_options.m_mac_addr[port_id].u.m_mac.dest;
3578
3579            /* L3 mode */
3580            if (src_ipv4 && dg) {
3581                if (memcmp(dst_mac, empty_mac, 6) == 0) {
3582                    m_trex_stateless->get_port_by_id(port_id)->set_l3_mode(src_ipv4, dg);
3583                } else {
3584                    m_trex_stateless->get_port_by_id(port_id)->set_l3_mode(src_ipv4, dg, dst_mac);
3585                }
3586
3587            /* L2 mode */
3588            } else if (CGlobalInfo::m_options.m_mac_addr[port_id].u.m_mac.is_set) {
3589                m_trex_stateless->get_port_by_id(port_id)->set_l2_mode(dst_mac);
3590            }
3591        }
3592    }
3593
3594
3595}
3596
3597/**
3598 * check for a single core
3599 *
3600 * @author imarom (19-Nov-15)
3601 *
3602 * @param thread_id
3603 */
3604void
3605CGlobalTRex::check_for_dp_message_from_core(int thread_id) {
3606
3607    CNodeRing *ring = CMsgIns::Ins()->getCpDp()->getRingDpToCp(thread_id);
3608
3609    /* fast path check */
3610    if ( likely ( ring->isEmpty() ) ) {
3611        return;
3612    }
3613
3614    while ( true ) {
3615        CGenNode * node = NULL;
3616        if (ring->Dequeue(node) != 0) {
3617            break;
3618        }
3619        assert(node);
3620
3621        TrexStatelessDpToCpMsgBase * msg = (TrexStatelessDpToCpMsgBase *)node;
3622        msg->handle();
3623        delete msg;
3624    }
3625
3626}
3627
3628/**
3629 * check for messages that arrived from DP to CP
3630 *
3631 */
3632void
3633CGlobalTRex::check_for_dp_messages() {
3634
3635    /* for all the cores - check for a new message */
3636    for (int i = 0; i < get_cores_tx(); i++) {
3637        check_for_dp_message_from_core(i);
3638    }
3639}
3640
3641bool CGlobalTRex::is_all_links_are_up(bool dump){
3642    bool all_link_are=true;
3643    int i;
3644    for (i=0; i<m_max_ports; i++) {
3645        CPhyEthIF * _if=&m_ports[i];
3646        _if->get_port_attr()->update_link_status();
3647        if ( dump ){
3648            _if->dump_stats(stdout);
3649        }
3650        if ( _if->get_port_attr()->is_link_up() == false){
3651            all_link_are=false;
3652            break;
3653        }
3654    }
3655    return (all_link_are);
3656}
3657
3658void CGlobalTRex::try_stop_all_cores(){
3659
3660    TrexStatelessDpQuit * dp_msg= new TrexStatelessDpQuit();
3661    send_message_all_dp(dp_msg);
3662    delete dp_msg;
3663
3664    if (get_is_stateless()) {
3665        TrexStatelessRxQuit * rx_msg= new TrexStatelessRxQuit();
3666        send_message_to_rx(rx_msg);
3667    }
3668
3669    // no need to delete rx_msg. Deleted by receiver
3670    bool all_core_finished = false;
3671    int i;
3672    for (i=0; i<20; i++) {
3673        if ( is_all_cores_finished() ){
3674            all_core_finished =true;
3675            break;
3676        }
3677        delay(100);
3678    }
3679    if ( all_core_finished ){
3680        m_zmq_publisher.publish_event(TrexPublisher::EVENT_SERVER_STOPPED);
3681        printf(" All cores stopped !! \n");
3682    }else{
3683        printf(" ERROR one of the DP core is stucked !\n");
3684    }
3685}
3686
3687
3688int  CGlobalTRex::send_message_all_dp(TrexStatelessCpToDpMsgBase *msg){
3689
3690    int max_threads=(int)CMsgIns::Ins()->getCpDp()->get_num_threads();
3691    int i;
3692
3693    for (i=0; i<max_threads; i++) {
3694        CNodeRing *ring = CMsgIns::Ins()->getCpDp()->getRingCpToDp((uint8_t)i);
3695        ring->Enqueue((CGenNode*)msg->clone());
3696    }
3697    return (0);
3698}
3699
3700int CGlobalTRex::send_message_to_rx(TrexStatelessCpToRxMsgBase *msg) {
3701    CNodeRing *ring = CMsgIns::Ins()->getCpRx()->getRingCpToDp(0);
3702    ring->Enqueue((CGenNode *) msg);
3703
3704    return (0);
3705}
3706
3707
3708int  CGlobalTRex::ixgbe_rx_queue_flush(){
3709    int i;
3710    for (i=0; i<m_max_ports; i++) {
3711        CPhyEthIF * _if=&m_ports[i];
3712        _if->flush_rx_queue();
3713    }
3714    return (0);
3715}
3716
3717
3718// init stateful rx core
3719void CGlobalTRex::rx_stf_conf(void) {
3720    int i;
3721    CLatencyManagerCfg mg_cfg;
3722    mg_cfg.m_max_ports = m_max_ports;
3723
3724    uint32_t latency_rate=CGlobalInfo::m_options.m_latency_rate;
3725
3726    if ( latency_rate ) {
3727        mg_cfg.m_cps = (double)latency_rate ;
3728    } else {
3729        // If RX core needed, we need something to make the scheduler running.
3730        // If nothing configured, send 1 CPS latency measurement packets.
3731        if (CGlobalInfo::m_options.m_arp_ref_per == 0) {
3732            mg_cfg.m_cps = 1.0;
3733        } else {
3734            mg_cfg.m_cps = 0;
3735        }
3736    }
3737
3738    if (CGlobalInfo::get_queues_mode() == CGlobalInfo::Q_MODE_ONE_QUEUE) {
3739        /* vm mode, indirect queues  */
3740        for (i=0; i<m_max_ports; i++) {
3741            CPhyEthIF * _if = &m_ports[i];
3742            CMessagingManager * rx_dp=CMsgIns::Ins()->getRxDp();
3743
3744            uint8_t thread_id = (i>>1);
3745
3746            CNodeRing * r = rx_dp->getRingCpToDp(thread_id);
3747            m_latency_vm_vports[i].Create((uint8_t)i, r, &m_mg, _if);
3748
3749            mg_cfg.m_ports[i] =&m_latency_vm_vports[i];
3750        }
3751
3752    }else{
3753        for (i=0; i<m_max_ports; i++) {
3754            CPhyEthIF * _if=&m_ports[i];
3755            _if->dump_stats(stdout);
3756            m_latency_vports[i].Create(_if, m_rx_core_tx_q_id, 1);
3757
3758            mg_cfg.m_ports[i] =&m_latency_vports[i];
3759        }
3760    }
3761
3762
3763    m_mg.Create(&mg_cfg);
3764    m_mg.set_mask(CGlobalInfo::m_options.m_latency_mask);
3765}
3766
3767// init m_rx_sl object for stateless rx core
3768void CGlobalTRex::rx_sl_configure(void) {
3769    CRxSlCfg rx_sl_cfg;
3770    int i;
3771
3772    rx_sl_cfg.m_max_ports = m_max_ports;
3773    rx_sl_cfg.m_tx_cores  = get_cores_tx();
3774
3775    if (CGlobalInfo::get_queues_mode() == CGlobalInfo::Q_MODE_ONE_QUEUE) {
3776        /* vm mode, indirect queues  */
3777        for (i=0; i < m_max_ports; i++) {
3778            CPhyEthIF * _if = &m_ports[i];
3779            CMessagingManager * rx_dp = CMsgIns::Ins()->getRxDp();
3780            uint8_t thread_id = (i >> 1);
3781            CNodeRing * r = rx_dp->getRingCpToDp(thread_id);
3782            m_latency_vm_vports[i].Create(i, r, &m_mg, _if);
3783            rx_sl_cfg.m_ports[i] = &m_latency_vm_vports[i];
3784        }
3785    } else {
3786        for (i = 0; i < m_max_ports; i++) {
3787            CPhyEthIF * _if = &m_ports[i];
3788            m_latency_vports[i].Create(_if, m_rx_core_tx_q_id, 1);
3789            rx_sl_cfg.m_ports[i] = &m_latency_vports[i];
3790        }
3791    }
3792
3793    m_rx_sl.create(rx_sl_cfg);
3794}
3795
3796int  CGlobalTRex::ixgbe_start(void){
3797    int i;
3798    for (i=0; i<m_max_ports; i++) {
3799        socket_id_t socket_id = CGlobalInfo::m_socket.port_to_socket((port_id_t)i);
3800        assert(CGlobalInfo::m_mem_pool[socket_id].m_mbuf_pool_2048);
3801        CPhyEthIF * _if=&m_ports[i];
3802        _if->Create((uint8_t)i);
3803        _if->conf_queues();
3804        _if->stats_clear();
3805        _if->start();
3806        _if->configure_rx_duplicate_rules();
3807
3808        if ( ! CGlobalInfo::m_options.preview.get_is_disable_flow_control_setting()
3809             && _if->get_port_attr()->is_fc_change_supported()) {
3810            _if->disable_flow_control();
3811        }
3812
3813        _if->get_port_attr()->add_mac((char *)CGlobalInfo::m_options.get_src_mac_addr(i));
3814
3815        fflush(stdout);
3816    }
3817
3818    if ( !is_all_links_are_up()  ){
3819        /* wait for ports to be stable */
3820        get_ex_drv()->wait_for_stable_link();
3821
3822        if ( !is_all_links_are_up() /*&& !get_is_stateless()*/ ){ // disable start with link down for now
3823
3824            /* temporary solution for trex-192 issue, solve the case for X710/XL710, will work for both Statless and Stateful */
3825            if (  get_ex_drv()->drop_packets_incase_of_linkdown() ){
3826                printf(" WARNING : there is no link on one of the ports, driver support auto drop in case of link down - continue\n");
3827            }else{
3828                dump_links_status(stdout);
3829                rte_exit(EXIT_FAILURE, " One of the links is down \n");
3830            }
3831        }
3832    } else {
3833        get_ex_drv()->wait_after_link_up();
3834    }
3835
3836    dump_links_status(stdout);
3837
3838    ixgbe_rx_queue_flush();
3839
3840    if (! get_is_stateless()) {
3841        rx_stf_conf();
3842    }
3843
3844
3845    /* core 0 - control
3846       core 1 - port 0-0,1-0,
3847       core 2 - port 2-0,3-0,
3848       core 3 - port 0-1,1-1,
3849       core 4 - port 2-1,3-1,
3850
3851    */
3852    int port_offset=0;
3853    uint8_t lat_q_id;
3854
3855    if (CGlobalInfo::get_queues_mode() == CGlobalInfo::Q_MODE_ONE_QUEUE) {
3856        lat_q_id = 0;
3857    } else {
3858        lat_q_id = get_cores_tx() / get_base_num_cores() + 1;
3859    }
3860    for (i=0; i<get_cores_tx(); i++) {
3861        int j=(i+1);
3862        int queue_id=((j-1)/get_base_num_cores() );   /* for the first min core queue 0 , then queue 1 etc */
3863        if ( get_is_stateless() ){
3864            m_cores_vif[j]=&m_cores_vif_sl[j];
3865        }else{
3866            m_cores_vif[j]=&m_cores_vif_sf[j];
3867        }
3868        m_cores_vif[j]->Create(j,
3869                               queue_id,
3870                               &m_ports[port_offset], /* 0,2*/
3871                               queue_id,
3872                               &m_ports[port_offset+1], /*1,3*/
3873                               lat_q_id);
3874        port_offset+=2;
3875        if (port_offset == m_max_ports) {
3876            port_offset = 0;
3877            // We want to allow sending latency packets only from first core handling a port
3878            lat_q_id = CCoreEthIF::INVALID_Q_ID;
3879        }
3880    }
3881
3882    fprintf(stdout," -------------------------------\n");
3883    fprintf(stdout, "RX core uses TX queue number %d on all ports\n", m_rx_core_tx_q_id);
3884    CCoreEthIF::DumpIfCfgHeader(stdout);
3885    for (i=0; i<get_cores_tx(); i++) {
3886        m_cores_vif[i+1]->DumpIfCfg(stdout);
3887    }
3888    fprintf(stdout," -------------------------------\n");
3889
3890    return (0);
3891}
3892
3893static void trex_termination_handler(int signum);
3894
3895void CGlobalTRex::register_signals() {
3896    struct sigaction action;
3897
3898    /* handler */
3899    action.sa_handler = trex_termination_handler;
3900
3901    /* blocked signals during handling */
3902    sigemptyset(&action.sa_mask);
3903    sigaddset(&action.sa_mask, SIGINT);
3904    sigaddset(&action.sa_mask, SIGTERM);
3905
3906    /* no flags */
3907    action.sa_flags = 0;
3908
3909    /* register */
3910    sigaction(SIGINT,  &action, NULL);
3911    sigaction(SIGTERM, &action, NULL);
3912}
3913
3914bool CGlobalTRex::Create(){
3915    CFlowsYamlInfo     pre_yaml_info;
3916
3917    register_signals();
3918
3919    m_stats_cnt =0;
3920    if (!get_is_stateless()) {
3921        pre_yaml_info.load_from_yaml_file(CGlobalInfo::m_options.cfg_file);
3922        if ( CGlobalInfo::m_options.preview.getVMode() > 0){
3923            CGlobalInfo::m_options.dump(stdout);
3924            CGlobalInfo::m_memory_cfg.Dump(stdout);
3925        }
3926    }
3927
3928    if ( !m_zmq_publisher.Create( CGlobalInfo::m_options.m_zmq_port,
3929                                  !CGlobalInfo::m_options.preview.get_zmq_publish_enable() ) ){
3930        return (false);
3931    }
3932
3933    if ( pre_yaml_info.m_vlan_info.m_enable ){
3934        CGlobalInfo::m_options.preview.set_vlan_mode_verify(CPreviewMode::VLAN_MODE_LOAD_BALANCE);
3935    }
3936    /* End update pre flags */
3937
3938    ixgbe_prob_init();
3939    cores_prob_init();
3940    queues_prob_init();
3941
3942    /* allocate rings */
3943    assert( CMsgIns::Ins()->Create(get_cores_tx()) );
3944
3945    if ( sizeof(CGenNodeNatInfo) != sizeof(CGenNode)  ) {
3946        printf("ERROR sizeof(CGenNodeNatInfo) %lu != sizeof(CGenNode) %lu must be the same size \n",sizeof(CGenNodeNatInfo),sizeof(CGenNode));
3947        assert(0);
3948    }
3949
3950    if ( sizeof(CGenNodeLatencyPktInfo) != sizeof(CGenNode)  ) {
3951        printf("ERROR sizeof(CGenNodeLatencyPktInfo) %lu != sizeof(CGenNode) %lu must be the same size \n",sizeof(CGenNodeLatencyPktInfo),sizeof(CGenNode));
3952        assert(0);
3953    }
3954
3955    /* allocate the memory */
3956    CTrexDpdkParams dpdk_p;
3957    get_ex_drv()->get_dpdk_drv_params(dpdk_p);
3958
3959    CGlobalInfo::init_pools(m_max_ports *
3960                            (dpdk_p.rx_data_q_num * dpdk_p.rx_desc_num_data_q +
3961                             dpdk_p.rx_drop_q_num * dpdk_p.rx_desc_num_drop_q));
3962    ixgbe_start();
3963    dump_config(stdout);
3964
3965    /* start stateless */
3966    if (get_is_stateless()) {
3967
3968        TrexStatelessCfg cfg;
3969
3970        TrexRpcServerConfig rpc_req_resp_cfg(TrexRpcServerConfig::RPC_PROT_TCP,
3971                                             global_platform_cfg_info.m_zmq_rpc_port,
3972                                             &m_cp_lock);
3973
3974        cfg.m_port_count         = CGlobalInfo::m_options.m_expected_portd;
3975        cfg.m_rpc_req_resp_cfg   = &rpc_req_resp_cfg;
3976        cfg.m_rpc_server_verbose = false;
3977        cfg.m_platform_api       = new TrexDpdkPlatformApi();
3978        cfg.m_publisher          = &m_zmq_publisher;
3979
3980        m_trex_stateless = new TrexStateless(cfg);
3981
3982        rx_sl_configure();
3983    }
3984
3985    return (true);
3986
3987}
3988void CGlobalTRex::Delete(){
3989
3990    m_zmq_publisher.Delete();
3991
3992    if (m_trex_stateless) {
3993        delete m_trex_stateless;
3994        m_trex_stateless = NULL;
3995    }
3996
3997    m_fl.Delete();
3998
3999}
4000
4001
4002
4003int  CGlobalTRex::ixgbe_prob_init(void){
4004
4005    m_max_ports  = rte_eth_dev_count();
4006    if (m_max_ports == 0)
4007        rte_exit(EXIT_FAILURE, "Error: Could not find supported ethernet ports. You are probably trying to use unsupported NIC \n");
4008
4009    printf(" Number of ports found: %d \n",m_max_ports);
4010
4011    if ( m_max_ports %2 !=0 ) {
4012        rte_exit(EXIT_FAILURE, " Number of ports in config file is %d. It should be even. Please use --limit-ports, or change 'port_limit:' in the config file\n",
4013                 m_max_ports);
4014    }
4015
4016    if ( CGlobalInfo::m_options.get_expected_ports() > TREX_MAX_PORTS ) {
4017        rte_exit(EXIT_FAILURE, " Maximum number of ports supported is %d. You are trying to use %d. Please use --limit-ports, or change 'port_limit:' in the config file\n"
4018                 ,TREX_MAX_PORTS, CGlobalInfo::m_options.get_expected_ports());
4019    }
4020
4021    if ( CGlobalInfo::m_options.get_expected_ports() > m_max_ports ){
4022        rte_exit(EXIT_FAILURE, " There are %d ports available. You are trying to use %d. Please use --limit-ports, or change 'port_limit:' in the config file\n",
4023                 m_max_ports,
4024                 CGlobalInfo::m_options.get_expected_ports());
4025    }
4026    if (CGlobalInfo::m_options.get_expected_ports() < m_max_ports ) {
4027        /* limit the number of ports */
4028        m_max_ports=CGlobalInfo::m_options.get_expected_ports();
4029    }
4030    assert(m_max_ports <= TREX_MAX_PORTS);
4031
4032    struct rte_eth_dev_info dev_info;
4033    rte_eth_dev_info_get((uint8_t) 0,&dev_info);
4034
4035    if ( CGlobalInfo::m_options.preview.getVMode() > 0){
4036        printf("\n\n");
4037        printf("if_index : %d \n",dev_info.if_index);
4038        printf("driver name : %s \n",dev_info.driver_name);
4039        printf("min_rx_bufsize : %d \n",dev_info.min_rx_bufsize);
4040        printf("max_rx_pktlen  : %d \n",dev_info.max_rx_pktlen);
4041        printf("max_rx_queues  : %d \n",dev_info.max_rx_queues);
4042        printf("max_tx_queues  : %d \n",dev_info.max_tx_queues);
4043        printf("max_mac_addrs  : %d \n",dev_info.max_mac_addrs);
4044
4045        printf("rx_offload_capa : 0x%x \n",dev_info.rx_offload_capa);
4046        printf("tx_offload_capa : 0x%x \n",dev_info.tx_offload_capa);
4047        printf("rss reta_size   : %d \n",dev_info.reta_size);
4048        printf("flow_type_rss   : 0x%lx \n",dev_info.flow_type_rss_offloads);
4049    }
4050
4051    int i;
4052    struct rte_eth_dev_info dev_info1;
4053
4054    for (i=1; i<m_max_ports; i++) {
4055        rte_eth_dev_info_get((uint8_t) i,&dev_info1);
4056        if ( strcmp(dev_info1.driver_name,dev_info.driver_name)!=0) {
4057            printf(" ERROR all device should have the same type  %s != %s \n",dev_info1.driver_name,dev_info.driver_name);
4058            exit(1);
4059        }
4060    }
4061
4062    m_drv = CTRexExtendedDriverDb::Ins()->get_drv();
4063
4064    // check if firmware version is new enough
4065    for (i = 0; i < m_max_ports; i++) {
4066        if (m_drv->verify_fw_ver(i) < 0) {
4067            // error message printed by verify_fw_ver
4068            exit(1);
4069        }
4070    }
4071
4072    m_port_cfg.update_var();
4073
4074    if ( get_is_rx_filter_enable() ){
4075        m_port_cfg.update_global_config_fdir();
4076    }
4077
4078    if (CGlobalInfo::get_queues_mode() == CGlobalInfo::Q_MODE_ONE_QUEUE) {
4079        /* verify that we have only one thread/core per dual- interface */
4080        if ( CGlobalInfo::m_options.preview.getCores()>1 ) {
4081            printf("Error: the number of cores should be 1 when the driver support only one tx queue and one rx queue. Please use -c 1 \n");
4082            exit(1);
4083        }
4084    }
4085    return (0);
4086}
4087
4088int  CGlobalTRex::cores_prob_init(){
4089    m_max_cores = rte_lcore_count();
4090    assert(m_max_cores>0);
4091    return (0);
4092}
4093
4094int  CGlobalTRex::queues_prob_init(){
4095
4096    if (m_max_cores < 2) {
4097        rte_exit(EXIT_FAILURE, "number of cores should be at least 2 \n");
4098    }
4099
4100    assert((m_max_ports>>1) <= get_cores_tx() );
4101
4102    m_cores_mul = CGlobalInfo::m_options.preview.getCores();
4103
4104    m_cores_to_dual_ports  = m_cores_mul;
4105
4106    /* core 0 - control
4107       -core 1 - port 0/1
4108       -core 2 - port 2/3
4109       -core 3 - port 0/1
4110       -core 4 - port 2/3
4111
4112       m_cores_to_dual_ports = 2;
4113    */
4114
4115    // One q for each core allowed to send on this port + 1 for latency q (Used in stateless) + 1 for RX core.
4116    m_max_queues_per_port  = m_cores_to_dual_ports + 2;
4117
4118    if (m_max_queues_per_port > BP_MAX_TX_QUEUE) {
4119        rte_exit(EXIT_FAILURE,
4120                 "Error: Number of TX queues exceeds %d. Try running with lower -c <val> \n",BP_MAX_TX_QUEUE);
4121    }
4122
4123    assert(m_max_queues_per_port>0);
4124    return (0);
4125}
4126
4127
4128void CGlobalTRex::dump_config(FILE *fd){
4129    fprintf(fd," number of ports         : %u \n",m_max_ports);
4130    fprintf(fd," max cores for 2 ports   : %u \n",m_cores_to_dual_ports);
4131    fprintf(fd," max queue per port      : %u \n",m_max_queues_per_port);
4132}
4133
4134
4135void CGlobalTRex::dump_links_status(FILE *fd){
4136    for (int i=0; i<m_max_ports; i++) {
4137        m_ports[i].get_port_attr()->update_link_status_nowait();
4138        m_ports[i].get_port_attr()->dump_link(fd);
4139    }
4140}
4141
4142bool CGlobalTRex::lookup_port_by_mac(const uint8_t *mac, uint8_t &port_id) {
4143    for (int i = 0; i < m_max_ports; i++) {
4144        if (memcmp(m_ports[i].get_port_attr()->get_layer_cfg().get_ether().get_src(), mac, 6) == 0) {
4145            port_id = i;
4146            return true;
4147        }
4148    }
4149
4150    return false;
4151}
4152
4153void CGlobalTRex::dump_post_test_stats(FILE *fd){
4154    uint64_t pkt_out=0;
4155    uint64_t pkt_out_bytes=0;
4156    uint64_t pkt_in_bytes=0;
4157    uint64_t pkt_in=0;
4158    uint64_t sw_pkt_out=0;
4159    uint64_t sw_pkt_out_err=0;
4160    uint64_t sw_pkt_out_bytes=0;
4161    uint64_t tx_arp = 0;
4162    uint64_t rx_arp = 0;
4163
4164    int i;
4165    for (i=0; i<get_cores_tx(); i++) {
4166        CCoreEthIF * erf_vif = m_cores_vif[i+1];
4167        CVirtualIFPerSideStats stats;
4168        erf_vif->GetCoreCounters(&stats);
4169        sw_pkt_out     += stats.m_tx_pkt;
4170        sw_pkt_out_err += stats.m_tx_drop +stats.m_tx_queue_full +stats.m_tx_alloc_error ;
4171        sw_pkt_out_bytes +=stats.m_tx_bytes;
4172    }
4173
4174
4175    for (i=0; i<m_max_ports; i++) {
4176        CPhyEthIF * _if=&m_ports[i];
4177        pkt_in  +=_if->get_stats().ipackets;
4178        pkt_in_bytes +=_if->get_stats().ibytes;
4179        pkt_out +=_if->get_stats().opackets;
4180        pkt_out_bytes +=_if->get_stats().obytes;
4181        tx_arp += _if->get_ignore_stats().get_tx_arp();
4182        rx_arp += _if->get_ignore_stats().get_rx_arp();
4183    }
4184    if ( CGlobalInfo::m_options.is_latency_enabled() ){
4185        sw_pkt_out += m_mg.get_total_pkt();
4186        sw_pkt_out_bytes +=m_mg.get_total_bytes();
4187    }
4188
4189
4190    fprintf (fd," summary stats \n");
4191    fprintf (fd," -------------- \n");
4192
4193    if (pkt_in > pkt_out)
4194        {
4195            fprintf (fd, " Total-pkt-drop       : 0 pkts \n");
4196            if (pkt_in > pkt_out * 1.01)
4197                fprintf (fd, " Warning : number of rx packets exceeds 101%% of tx packets!\n");
4198        }
4199    else
4200        fprintf (fd, " Total-pkt-drop       : %llu pkts \n", (unsigned long long) (pkt_out - pkt_in));
4201    for (i=0; i<m_max_ports; i++) {
4202        if ( m_stats.m_port[i].m_link_was_down ) {
4203            fprintf (fd, " WARNING: Link was down at port %d during test (at least for some time)!\n", i);
4204        }
4205    }
4206    fprintf (fd," Total-tx-bytes       : %llu bytes \n", (unsigned long long)pkt_out_bytes);
4207    fprintf (fd," Total-tx-sw-bytes    : %llu bytes \n", (unsigned long long)sw_pkt_out_bytes);
4208    fprintf (fd," Total-rx-bytes       : %llu byte \n", (unsigned long long)pkt_in_bytes);
4209
4210    fprintf (fd," \n");
4211
4212    fprintf (fd," Total-tx-pkt         : %llu pkts \n", (unsigned long long)pkt_out);
4213    fprintf (fd," Total-rx-pkt         : %llu pkts \n", (unsigned long long)pkt_in);
4214    fprintf (fd," Total-sw-tx-pkt      : %llu pkts \n", (unsigned long long)sw_pkt_out);
4215    fprintf (fd," Total-sw-err         : %llu pkts \n", (unsigned long long)sw_pkt_out_err);
4216    fprintf (fd," Total ARP sent       : %llu pkts \n", (unsigned long long)tx_arp);
4217    fprintf (fd," Total ARP received   : %llu pkts \n", (unsigned long long)rx_arp);
4218
4219
4220    if ( CGlobalInfo::m_options.is_latency_enabled() ){
4221        fprintf (fd," maximum-latency   : %.0f usec \n",m_mg.get_max_latency());
4222        fprintf (fd," average-latency   : %.0f usec \n",m_mg.get_avr_latency());
4223        fprintf (fd," latency-any-error : %s  \n",m_mg.is_any_error()?"ERROR":"OK");
4224    }
4225
4226
4227}
4228
4229
4230void CGlobalTRex::update_stats(){
4231
4232    int i;
4233    for (i=0; i<m_max_ports; i++) {
4234        CPhyEthIF * _if=&m_ports[i];
4235        _if->update_counters();
4236    }
4237    uint64_t total_open_flows=0;
4238
4239
4240    CFlowGenListPerThread   * lpt;
4241    for (i=0; i<get_cores_tx(); i++) {
4242        lpt = m_fl.m_threads_info[i];
4243        total_open_flows +=   lpt->m_stats.m_total_open_flows ;
4244    }
4245    m_last_total_cps = m_cps.add(total_open_flows);
4246
4247}
4248
4249tx_per_flow_t CGlobalTRex::get_flow_tx_stats(uint8_t port, uint16_t index) {
4250    return m_stats.m_port[port].m_tx_per_flow[index] - m_stats.m_port[port].m_prev_tx_per_flow[index];
4251}
4252
4253// read stats. Return read value, and clear.
4254tx_per_flow_t CGlobalTRex::clear_flow_tx_stats(uint8_t port, uint16_t index, bool is_lat) {
4255    uint8_t port0;
4256    CFlowGenListPerThread * lpt;
4257    tx_per_flow_t ret;
4258
4259    m_stats.m_port[port].m_tx_per_flow[index].clear();
4260
4261    for (int i=0; i < get_cores_tx(); i++) {
4262        lpt = m_fl.m_threads_info[i];
4263        port0 = lpt->getDualPortId() * 2;
4264        if ((port == port0) || (port == port0 + 1)) {
4265            m_stats.m_port[port].m_tx_per_flow[index] +=
4266                lpt->m_node_gen.m_v_if->m_stats[port - port0].m_tx_per_flow[index];
4267            if (is_lat)
4268                lpt->m_node_gen.m_v_if->m_stats[port - port0].m_lat_data[index - MAX_FLOW_STATS].reset();
4269        }
4270    }
4271
4272    ret = m_stats.m_port[port].m_tx_per_flow[index] - m_stats.m_port[port].m_prev_tx_per_flow[index];
4273
4274    // Since we return diff from prev, following "clears" the stats.
4275    m_stats.m_port[port].m_prev_tx_per_flow[index] = m_stats.m_port[port].m_tx_per_flow[index];
4276
4277    return ret;
4278}
4279
4280void CGlobalTRex::get_stats(CGlobalStats & stats){
4281
4282    int i;
4283    float total_tx=0.0;
4284    float total_rx=0.0;
4285    float total_tx_pps=0.0;
4286    float total_rx_pps=0.0;
4287
4288    stats.m_total_tx_pkts  = 0;
4289    stats.m_total_rx_pkts  = 0;
4290    stats.m_total_tx_bytes = 0;
4291    stats.m_total_rx_bytes = 0;
4292    stats.m_total_alloc_error=0;
4293    stats.m_total_queue_full=0;
4294    stats.m_total_queue_drop=0;
4295
4296
4297    stats.m_num_of_ports = m_max_ports;
4298    stats.m_cpu_util = m_fl.GetCpuUtil();
4299    stats.m_cpu_util_raw = m_fl.GetCpuUtilRaw();
4300    if (get_is_stateless()) {
4301        stats.m_rx_cpu_util = m_rx_sl.get_cpu_util();
4302    }
4303    stats.m_threads      = m_fl.m_threads_info.size();
4304
4305    for (i=0; i<m_max_ports; i++) {
4306        CPhyEthIF * _if=&m_ports[i];
4307        CPerPortStats * stp=&stats.m_port[i];
4308
4309        CPhyEthIFStats & st =_if->get_stats();
4310
4311        stp->opackets = st.opackets;
4312        stp->obytes   = st.obytes;
4313        stp->ipackets = st.ipackets;
4314        stp->ibytes   = st.ibytes;
4315        stp->ierrors  = st.ierrors;
4316        stp->oerrors  = st.oerrors;
4317        stp->m_total_tx_bps = _if->get_last_tx_rate()*_1Mb_DOUBLE;
4318        stp->m_total_tx_pps = _if->get_last_tx_pps_rate();
4319        stp->m_total_rx_bps = _if->get_last_rx_rate()*_1Mb_DOUBLE;
4320        stp->m_total_rx_pps = _if->get_last_rx_pps_rate();
4321        stp->m_link_up        = _if->get_port_attr()->is_link_up();
4322        stp->m_link_was_down |= ! _if->get_port_attr()->is_link_up();
4323
4324        stats.m_total_tx_pkts  += st.opackets;
4325        stats.m_total_rx_pkts  += st.ipackets;
4326        stats.m_total_tx_bytes += st.obytes;
4327        stats.m_total_rx_bytes += st.ibytes;
4328
4329        total_tx +=_if->get_last_tx_rate();
4330        total_rx +=_if->get_last_rx_rate();
4331        total_tx_pps +=_if->get_last_tx_pps_rate();
4332        total_rx_pps +=_if->get_last_rx_pps_rate();
4333        // IP ID rules
4334        for (uint16_t flow = 0; flow <= max_stat_hw_id_seen; flow++) {
4335            stats.m_port[i].m_tx_per_flow[flow].clear();
4336        }
4337        // payload rules
4338        for (uint16_t flow = MAX_FLOW_STATS; flow <= MAX_FLOW_STATS + max_stat_hw_id_seen_payload; flow++) {
4339            stats.m_port[i].m_tx_per_flow[flow].clear();
4340        }
4341
4342        stp->m_cpu_util = get_cpu_util_per_interface(i);
4343
4344    }
4345
4346    uint64_t total_open_flows=0;
4347    uint64_t total_active_flows=0;
4348
4349    uint64_t total_clients=0;
4350    uint64_t total_servers=0;
4351    uint64_t active_sockets=0;
4352    uint64_t total_sockets=0;
4353
4354
4355    uint64_t total_nat_time_out =0;
4356    uint64_t total_nat_time_out_wait_ack =0;
4357    uint64_t total_nat_no_fid   =0;
4358    uint64_t total_nat_active   =0;
4359    uint64_t total_nat_syn_wait = 0;
4360    uint64_t total_nat_open     =0;
4361    uint64_t total_nat_learn_error=0;
4362
4363    CFlowGenListPerThread   * lpt;
4364    stats.m_template.Clear();
4365    for (i=0; i<get_cores_tx(); i++) {
4366        lpt = m_fl.m_threads_info[i];
4367        total_open_flows +=   lpt->m_stats.m_total_open_flows ;
4368        total_active_flows += (lpt->m_stats.m_total_open_flows-lpt->m_stats.m_total_close_flows) ;
4369
4370        stats.m_total_alloc_error += lpt->m_node_gen.m_v_if->m_stats[0].m_tx_alloc_error+
4371            lpt->m_node_gen.m_v_if->m_stats[1].m_tx_alloc_error;
4372        stats.m_total_queue_full +=lpt->m_node_gen.m_v_if->m_stats[0].m_tx_queue_full+
4373            lpt->m_node_gen.m_v_if->m_stats[1].m_tx_queue_full;
4374
4375        stats.m_total_queue_drop +=lpt->m_node_gen.m_v_if->m_stats[0].m_tx_drop+
4376            lpt->m_node_gen.m_v_if->m_stats[1].m_tx_drop;
4377
4378        stats.m_template.Add(&lpt->m_node_gen.m_v_if->m_stats[0].m_template);
4379        stats.m_template.Add(&lpt->m_node_gen.m_v_if->m_stats[1].m_template);
4380
4381
4382        total_clients   += lpt->m_smart_gen.getTotalClients();
4383        total_servers   += lpt->m_smart_gen.getTotalServers();
4384        active_sockets  += lpt->m_smart_gen.ActiveSockets();
4385        total_sockets   += lpt->m_smart_gen.MaxSockets();
4386
4387        total_nat_time_out +=lpt->m_stats.m_nat_flow_timeout;
4388        total_nat_time_out_wait_ack += lpt->m_stats.m_nat_flow_timeout_wait_ack;
4389        total_nat_no_fid   +=lpt->m_stats.m_nat_lookup_no_flow_id ;
4390        total_nat_active   +=lpt->m_stats.m_nat_lookup_add_flow_id - lpt->m_stats.m_nat_lookup_remove_flow_id;
4391        total_nat_syn_wait += lpt->m_stats.m_nat_lookup_add_flow_id - lpt->m_stats.m_nat_lookup_wait_ack_state;
4392        total_nat_open     +=lpt->m_stats.m_nat_lookup_add_flow_id;
4393        total_nat_learn_error   +=lpt->m_stats.m_nat_flow_learn_error;
4394        uint8_t port0 = lpt->getDualPortId() *2;
4395        // IP ID rules
4396        for (uint16_t flow = 0; flow <= max_stat_hw_id_seen; flow++) {
4397            stats.m_port[port0].m_tx_per_flow[flow] +=
4398                lpt->m_node_gen.m_v_if->m_stats[0].m_tx_per_flow[flow];
4399            stats.m_port[port0 + 1].m_tx_per_flow[flow] +=
4400                lpt->m_node_gen.m_v_if->m_stats[1].m_tx_per_flow[flow];
4401        }
4402        // payload rules
4403        for (uint16_t flow = MAX_FLOW_STATS; flow <= MAX_FLOW_STATS + max_stat_hw_id_seen_payload; flow++) {
4404            stats.m_port[port0].m_tx_per_flow[flow] +=
4405                lpt->m_node_gen.m_v_if->m_stats[0].m_tx_per_flow[flow];
4406            stats.m_port[port0 + 1].m_tx_per_flow[flow] +=
4407                lpt->m_node_gen.m_v_if->m_stats[1].m_tx_per_flow[flow];
4408        }
4409
4410    }
4411
4412    stats.m_total_nat_time_out = total_nat_time_out;
4413    stats.m_total_nat_time_out_wait_ack = total_nat_time_out_wait_ack;
4414    stats.m_total_nat_no_fid   = total_nat_no_fid;
4415    stats.m_total_nat_active   = total_nat_active;
4416    stats.m_total_nat_syn_wait = total_nat_syn_wait;
4417    stats.m_total_nat_open     = total_nat_open;
4418    stats.m_total_nat_learn_error     = total_nat_learn_error;
4419
4420    stats.m_total_clients = total_clients;
4421    stats.m_total_servers = total_servers;
4422    stats.m_active_sockets = active_sockets;
4423
4424    if (total_sockets != 0) {
4425        stats.m_socket_util =100.0*(double)active_sockets/(double)total_sockets;
4426    } else {
4427        stats.m_socket_util = 0;
4428    }
4429
4430
4431
4432    float drop_rate=total_tx-total_rx;
4433    if ( (drop_rate<0.0)  || (drop_rate < 0.1*total_tx ) )  {
4434        drop_rate=0.0;
4435    }
4436    float pf =CGlobalInfo::m_options.m_platform_factor;
4437    stats.m_platform_factor = pf;
4438
4439    stats.m_active_flows = total_active_flows*pf;
4440    stats.m_open_flows   = total_open_flows*pf;
4441    stats.m_rx_drop_bps   = drop_rate*pf *_1Mb_DOUBLE;
4442
4443    stats.m_tx_bps        = total_tx*pf*_1Mb_DOUBLE;
4444    stats.m_rx_bps        = total_rx*pf*_1Mb_DOUBLE;
4445    stats.m_tx_pps        = total_tx_pps*pf;
4446    stats.m_rx_pps        = total_rx_pps*pf;
4447    stats.m_tx_cps        = m_last_total_cps*pf;
4448    if(stats.m_cpu_util < 0.0001)
4449        stats.m_bw_per_core = 0;
4450    else
4451        stats.m_bw_per_core   = 2*(stats.m_tx_bps/1e9)*100.0/(stats.m_cpu_util*stats.m_threads);
4452
4453    stats.m_tx_expected_cps        = m_expected_cps*pf;
4454    stats.m_tx_expected_pps        = m_expected_pps*pf;
4455    stats.m_tx_expected_bps        = m_expected_bps*pf;
4456}
4457
4458float
4459CGlobalTRex::get_cpu_util_per_interface(uint8_t port_id) {
4460    CPhyEthIF * _if = &m_ports[port_id];
4461
4462    float    tmp = 0;
4463    uint8_t  cnt = 0;
4464    for (const auto &p : _if->get_core_list()) {
4465        uint8_t core_id = p.first;
4466        CFlowGenListPerThread *lp = m_fl.m_threads_info[core_id];
4467        if (lp->is_port_active(port_id)) {
4468            tmp += lp->m_cpu_cp_u.GetVal();
4469            cnt++;
4470        }
4471    }
4472
4473    return ( (cnt > 0) ? (tmp / cnt) : 0);
4474
4475}
4476
4477bool CGlobalTRex::sanity_check(){
4478
4479    CFlowGenListPerThread   * lpt;
4480    uint32_t errors=0;
4481    int i;
4482    for (i=0; i<get_cores_tx(); i++) {
4483        lpt = m_fl.m_threads_info[i];
4484        errors   += lpt->m_smart_gen.getErrorAllocationCounter();
4485    }
4486
4487    if ( errors ) {
4488        printf(" ERRORs sockets allocation errors! \n");
4489        printf(" you should allocate more clients in the pool \n");
4490        return(true);
4491    }
4492    return ( false);
4493}
4494
4495
4496/* dump the template info */
4497void CGlobalTRex::dump_template_info(std::string & json){
4498    CFlowGenListPerThread   * lpt = m_fl.m_threads_info[0];
4499    CFlowsYamlInfo * yaml_info=&lpt->m_yaml_info;
4500
4501    json="{\"name\":\"template_info\",\"type\":0,\"data\":[";
4502    int i;
4503    for (i=0; i<yaml_info->m_vec.size()-1; i++) {
4504        CFlowYamlInfo * r=&yaml_info->m_vec[i] ;
4505        json+="\""+ r->m_name+"\"";
4506        json+=",";
4507    }
4508    json+="\""+yaml_info->m_vec[i].m_name+"\"";
4509    json+="]}" ;
4510}
4511
4512void CGlobalTRex::dump_stats(FILE *fd, CGlobalStats::DumpFormat format){
4513
4514    update_stats();
4515    get_stats(m_stats);
4516
4517    if (format==CGlobalStats::dmpTABLE) {
4518        if ( m_io_modes.m_g_mode == CTrexGlobalIoMode::gNORMAL ){
4519            switch (m_io_modes.m_pp_mode ){
4520            case CTrexGlobalIoMode::ppDISABLE:
4521                fprintf(fd,"\n+Per port stats disabled \n");
4522                break;
4523            case CTrexGlobalIoMode::ppTABLE:
4524                fprintf(fd,"\n-Per port stats table \n");
4525                m_stats.Dump(fd,CGlobalStats::dmpTABLE);
4526                break;
4527            case CTrexGlobalIoMode::ppSTANDARD:
4528                fprintf(fd,"\n-Per port stats - standard\n");
4529                m_stats.Dump(fd,CGlobalStats::dmpSTANDARD);
4530                break;
4531            };
4532
4533            switch (m_io_modes.m_ap_mode ){
4534            case   CTrexGlobalIoMode::apDISABLE:
4535                fprintf(fd,"\n+Global stats disabled \n");
4536                break;
4537            case   CTrexGlobalIoMode::apENABLE:
4538                fprintf(fd,"\n-Global stats enabled \n");
4539                m_stats.DumpAllPorts(fd);
4540                break;
4541            };
4542        }
4543    }else{
4544        /* at exit , always need to dump it in standartd mode for scripts*/
4545        m_stats.Dump(fd,format);
4546        m_stats.DumpAllPorts(fd);
4547    }
4548
4549}
4550
4551void
4552CGlobalTRex::publish_async_data(bool sync_now, bool baseline) {
4553    std::string json;
4554
4555    /* refactor to update, dump, and etc. */
4556    if (sync_now) {
4557        update_stats();
4558        get_stats(m_stats);
4559    }
4560
4561    m_stats.dump_json(json, baseline);
4562    m_zmq_publisher.publish_json(json);
4563
4564    /* generator json , all cores are the same just sample the first one */
4565    m_fl.m_threads_info[0]->m_node_gen.dump_json(json);
4566    m_zmq_publisher.publish_json(json);
4567
4568
4569    if ( !get_is_stateless() ){
4570        dump_template_info(json);
4571        m_zmq_publisher.publish_json(json);
4572    }
4573
4574    if ( get_is_rx_check_mode() ) {
4575        m_mg.rx_check_dump_json(json );
4576        m_zmq_publisher.publish_json(json);
4577    }
4578
4579    /* backward compatible */
4580    m_mg.dump_json(json );
4581    m_zmq_publisher.publish_json(json);
4582
4583    /* more info */
4584    m_mg.dump_json_v2(json );
4585    m_zmq_publisher.publish_json(json);
4586
4587    if (get_is_stateless()) {
4588        std::string stat_json;
4589        std::string latency_json;
4590        if (m_trex_stateless->m_rx_flow_stat.dump_json(stat_json, latency_json, baseline, sync_now)) {
4591            m_zmq_publisher.publish_json(stat_json);
4592            m_zmq_publisher.publish_json(latency_json);
4593        }
4594    }
4595}
4596
4597void
4598CGlobalTRex::publish_async_barrier(uint32_t key) {
4599    m_zmq_publisher.publish_barrier(key);
4600}
4601
4602void
4603CGlobalTRex:: publish_async_port_attr_changed(uint8_t port_id) {
4604    Json::Value data;
4605    data["port_id"] = port_id;
4606    TRexPortAttr * _attr = m_ports[port_id].get_port_attr();
4607
4608    _attr->to_json(data["attr"]);
4609
4610    m_zmq_publisher.publish_event(TrexPublisher::EVENT_PORT_ATTR_CHANGED, data);
4611}
4612
4613void
4614CGlobalTRex::handle_slow_path() {
4615    m_stats_cnt+=1;
4616
4617    // update speed, link up/down etc.
4618    for (int i=0; i<m_max_ports; i++) {
4619        bool changed = m_ports[i].get_port_attr()->update_link_status_nowait();
4620        if (changed) {
4621            publish_async_port_attr_changed(i);
4622        }
4623    }
4624
4625    if ( CGlobalInfo::m_options.preview.get_no_keyboard() ==false ) {
4626        if ( m_io_modes.handle_io_modes() ) {
4627            mark_for_shutdown(SHUTDOWN_CTRL_C);
4628            return;
4629        }
4630    }
4631
4632    if ( sanity_check() ) {
4633        mark_for_shutdown(SHUTDOWN_TEST_ENDED);
4634        return;
4635    }
4636
4637    if (m_io_modes.m_g_mode != CTrexGlobalIoMode::gDISABLE ) {
4638        fprintf(stdout,"\033[2J");
4639        fprintf(stdout,"\033[2H");
4640
4641    } else {
4642        if ( m_io_modes.m_g_disable_first  ) {
4643            m_io_modes.m_g_disable_first=false;
4644            fprintf(stdout,"\033[2J");
4645            fprintf(stdout,"\033[2H");
4646            printf("clean !!!\n");
4647            fflush(stdout);
4648        }
4649    }
4650
4651
4652    if (m_io_modes.m_g_mode == CTrexGlobalIoMode::gHELP ) {
4653        m_io_modes.DumpHelp(stdout);
4654    }
4655
4656    dump_stats(stdout,CGlobalStats::dmpTABLE);
4657
4658    if (m_io_modes.m_g_mode == CTrexGlobalIoMode::gNORMAL ) {
4659        fprintf (stdout," current time    : %.1f sec  \n",now_sec());
4660        float d= CGlobalInfo::m_options.m_duration - now_sec();
4661        if (d<0) {
4662            d=0;
4663
4664        }
4665        fprintf (stdout," test duration   : %.1f sec  \n",d);
4666    }
4667
4668    if (m_io_modes.m_g_mode == CTrexGlobalIoMode::gMem) {
4669
4670        if ( m_stats_cnt%4==0) {
4671            fprintf (stdout," %s \n",CGlobalInfo::dump_pool_as_json_str().c_str());
4672        }
4673    }
4674
4675
4676    if ( CGlobalInfo::m_options.is_rx_enabled() && (! get_is_stateless())) {
4677        m_mg.update();
4678
4679        if ( m_io_modes.m_g_mode ==  CTrexGlobalIoMode::gNORMAL ) {
4680            if (CGlobalInfo::m_options.m_latency_rate != 0) {
4681                switch (m_io_modes.m_l_mode) {
4682                case CTrexGlobalIoMode::lDISABLE:
4683                    fprintf(stdout, "\n+Latency stats disabled \n");
4684                    break;
4685                case CTrexGlobalIoMode::lENABLE:
4686                    fprintf(stdout, "\n-Latency stats enabled \n");
4687                    m_mg.DumpShort(stdout);
4688                    break;
4689                case CTrexGlobalIoMode::lENABLE_Extended:
4690                    fprintf(stdout, "\n-Latency stats extended \n");
4691                    m_mg.Dump(stdout);
4692                    break;
4693                }
4694            }
4695
4696            if ( get_is_rx_check_mode() ) {
4697
4698                switch (m_io_modes.m_rc_mode) {
4699                case CTrexGlobalIoMode::rcDISABLE:
4700                    fprintf(stdout,"\n+Rx Check stats disabled \n");
4701                    break;
4702                case CTrexGlobalIoMode::rcENABLE:
4703                    fprintf(stdout,"\n-Rx Check stats enabled \n");
4704                    m_mg.DumpShortRxCheck(stdout);
4705                    break;
4706                case CTrexGlobalIoMode::rcENABLE_Extended:
4707                    fprintf(stdout,"\n-Rx Check stats enhanced \n");
4708                    m_mg.DumpRxCheck(stdout);
4709                    break;
4710                }
4711            }
4712        }
4713    }
4714    if ( m_io_modes.m_g_mode ==  CTrexGlobalIoMode::gNAT ) {
4715        if ( m_io_modes.m_nat_mode == CTrexGlobalIoMode::natENABLE ) {
4716            if (CGlobalInfo::is_learn_mode(CParserOption::LEARN_MODE_TCP_ACK)) {
4717                fprintf(stdout, "NAT flow table info\n");
4718                m_mg.dump_nat_flow_table(stdout);
4719            } else {
4720                fprintf(stdout, "\nThis is only relevant in --learn-mode %d\n", CParserOption::LEARN_MODE_TCP_ACK);
4721            }
4722        }
4723    }
4724
4725    /* publish data */
4726    publish_async_data(false);
4727}
4728
4729
4730void
4731CGlobalTRex::handle_fast_path() {
4732    /* check from messages from DP */
4733    check_for_dp_messages();
4734
4735    /* measure CPU utilization by sampling (we sample 1000 to get an accurate sampling) */
4736    for (int i = 0; i < 1000; i++) {
4737        m_fl.UpdateFast();
4738
4739        if (get_is_stateless()) {
4740            m_rx_sl.update_cpu_util();
4741        }else{
4742            m_mg.update_fast();
4743        }
4744
4745        rte_pause();
4746    }
4747
4748
4749    if ( is_all_cores_finished() ) {
4750        mark_for_shutdown(SHUTDOWN_TEST_ENDED);
4751    }
4752}
4753
4754
4755/**
4756 * shutdown sequence
4757 *
4758 */
4759void CGlobalTRex::shutdown() {
4760    std::stringstream ss;
4761    ss << " *** TRex is shutting down - cause: '";
4762
4763    switch (m_mark_for_shutdown) {
4764
4765    case SHUTDOWN_TEST_ENDED:
4766        ss << "test has ended'";
4767        break;
4768
4769    case SHUTDOWN_CTRL_C:
4770        ss << "CTRL + C detected'";
4771        break;
4772
4773    case SHUTDOWN_SIGINT:
4774        ss << "received signal SIGINT'";
4775        break;
4776
4777    case SHUTDOWN_SIGTERM:
4778        ss << "received signal SIGTERM'";
4779        break;
4780
4781    case SHUTDOWN_RPC_REQ:
4782        ss << "server received RPC 'shutdown' request'";
4783        break;
4784
4785    default:
4786        assert(0);
4787    }
4788
4789    /* report */
4790    std::cout << ss.str() << "\n";
4791
4792    /* first stop the WD */
4793    TrexWatchDog::getInstance().stop();
4794
4795    /* stateless shutdown */
4796    if (get_is_stateless()) {
4797        m_trex_stateless->shutdown();
4798    }
4799
4800    if (!is_all_cores_finished()) {
4801        try_stop_all_cores();
4802    }
4803
4804    m_mg.stop();
4805
4806    delay(1000);
4807
4808    /* shutdown drivers */
4809    for (int i = 0; i < m_max_ports; i++) {
4810        m_ports[i].stop();
4811    }
4812
4813    if (m_mark_for_shutdown != SHUTDOWN_TEST_ENDED) {
4814        /* we should stop latency and exit to stop agents */
4815        Delete();
4816        utl_termio_reset();
4817        exit(-1);
4818    }
4819}
4820
4821
4822int CGlobalTRex::run_in_master() {
4823
4824    //rte_thread_setname(pthread_self(), "TRex Control");
4825
4826    if ( get_is_stateless() ) {
4827        m_trex_stateless->launch_control_plane();
4828    }
4829
4830    /* exception and scope safe */
4831    std::unique_lock<std::mutex> cp_lock(m_cp_lock);
4832
4833    uint32_t slow_path_counter = 0;
4834
4835    const int FASTPATH_DELAY_MS = 10;
4836    const int SLOWPATH_DELAY_MS = 500;
4837
4838    m_monitor.create("master", 2);
4839    TrexWatchDog::getInstance().register_monitor(&m_monitor);
4840
4841    TrexWatchDog::getInstance().start();
4842
4843    while (!is_marked_for_shutdown()) {
4844
4845        /* fast path */
4846        handle_fast_path();
4847
4848        /* slow path */
4849        if (slow_path_counter >= SLOWPATH_DELAY_MS) {
4850            handle_slow_path();
4851            slow_path_counter = 0;
4852        }
4853
4854        m_monitor.disable(30); //assume we will wake up
4855
4856        cp_lock.unlock();
4857        delay(FASTPATH_DELAY_MS);
4858        slow_path_counter += FASTPATH_DELAY_MS;
4859        cp_lock.lock();
4860
4861        m_monitor.enable();
4862    }
4863
4864    /* on exit release the lock */
4865    cp_lock.unlock();
4866
4867    /* shutdown everything gracefully */
4868    shutdown();
4869
4870    return (0);
4871}
4872
4873
4874
4875int CGlobalTRex::run_in_rx_core(void){
4876
4877    CPreviewMode *lp = &CGlobalInfo::m_options.preview;
4878
4879    rte_thread_setname(pthread_self(), "TRex RX");
4880
4881    /* set RT mode if set */
4882    if (lp->get_rt_prio_mode()) {
4883        struct sched_param param;
4884        param.sched_priority = sched_get_priority_max(SCHED_FIFO);
4885        if (pthread_setschedparam(pthread_self(), SCHED_FIFO, &param) != 0) {
4886            perror("setting RT priroity mode on RX core failed with error");
4887            exit(EXIT_FAILURE);
4888        }
4889    }
4890
4891    if (get_is_stateless()) {
4892        m_sl_rx_running = true;
4893        m_rx_sl.start();
4894        m_sl_rx_running = false;
4895    } else {
4896        if ( CGlobalInfo::m_options.is_rx_enabled() ){
4897            m_sl_rx_running = false;
4898            m_mg.start(0, true);
4899        }
4900    }
4901
4902    return (0);
4903}
4904
4905int CGlobalTRex::run_in_core(virtual_thread_id_t virt_core_id){
4906    std::stringstream ss;
4907    CPreviewMode *lp = &CGlobalInfo::m_options.preview;
4908
4909    ss << "Trex DP core " << int(virt_core_id);
4910    rte_thread_setname(pthread_self(), ss.str().c_str());
4911
4912    /* set RT mode if set */
4913    if (lp->get_rt_prio_mode()) {
4914        struct sched_param param;
4915        param.sched_priority = sched_get_priority_max(SCHED_FIFO);
4916        if (pthread_setschedparam(pthread_self(), SCHED_FIFO, &param) != 0) {
4917            perror("setting RT priroity mode on DP core failed with error");
4918            exit(EXIT_FAILURE);
4919        }
4920    }
4921
4922
4923    if ( lp->getSingleCore() &&
4924         (virt_core_id==2 ) &&
4925         (lp-> getCores() ==1) ){
4926        printf(" bypass this core \n");
4927        m_signal[virt_core_id]=1;
4928        return (0);
4929    }
4930
4931
4932    assert(m_fl_was_init);
4933    CFlowGenListPerThread   * lpt;
4934
4935    lpt = m_fl.m_threads_info[virt_core_id-1];
4936
4937    /* register a watchdog handle on current core */
4938    lpt->m_monitor.create(ss.str(), 1);
4939    TrexWatchDog::getInstance().register_monitor(&lpt->m_monitor);
4940
4941    if (get_is_stateless()) {
4942        lpt->start_stateless_daemon(*lp);
4943    }else{
4944        lpt->start_generate_stateful(CGlobalInfo::m_options.out_file,*lp);
4945    }
4946
4947    /* done - remove this from the watchdog (we might wait on join for a long time) */
4948    lpt->m_monitor.disable();
4949
4950    m_signal[virt_core_id]=1;
4951    return (0);
4952}
4953
4954
4955int CGlobalTRex::stop_master(){
4956
4957    delay(1000);
4958    fprintf(stdout," ==================\n");
4959    fprintf(stdout," interface sum \n");
4960    fprintf(stdout," ==================\n");
4961    dump_stats(stdout,CGlobalStats::dmpSTANDARD);
4962    fprintf(stdout," ==================\n");
4963    fprintf(stdout," \n\n");
4964
4965    fprintf(stdout," ==================\n");
4966    fprintf(stdout," interface sum \n");
4967    fprintf(stdout," ==================\n");
4968
4969    CFlowGenListPerThread   * lpt;
4970    uint64_t total_tx_rx_check=0;
4971
4972    int i;
4973    for (i=0; i<get_cores_tx(); i++) {
4974        lpt = m_fl.m_threads_info[i];
4975        CCoreEthIF * erf_vif = m_cores_vif[i+1];
4976
4977        erf_vif->DumpCoreStats(stdout);
4978        erf_vif->DumpIfStats(stdout);
4979        total_tx_rx_check+=erf_vif->m_stats[CLIENT_SIDE].m_tx_rx_check_pkt+
4980            erf_vif->m_stats[SERVER_SIDE].m_tx_rx_check_pkt;
4981    }
4982
4983    fprintf(stdout," ==================\n");
4984    fprintf(stdout," generators \n");
4985    fprintf(stdout," ==================\n");
4986    for (i=0; i<get_cores_tx(); i++) {
4987        lpt = m_fl.m_threads_info[i];
4988        lpt->m_node_gen.DumpHist(stdout);
4989        lpt->DumpStats(stdout);
4990    }
4991    if ( CGlobalInfo::m_options.is_latency_enabled() ){
4992        fprintf(stdout," ==================\n");
4993        fprintf(stdout," latency \n");
4994        fprintf(stdout," ==================\n");
4995        m_mg.DumpShort(stdout);
4996        m_mg.Dump(stdout);
4997        m_mg.DumpShortRxCheck(stdout);
4998        m_mg.DumpRxCheck(stdout);
4999        m_mg.DumpRxCheckVerification(stdout,total_tx_rx_check);
5000    }
5001
5002    dump_stats(stdout,CGlobalStats::dmpSTANDARD);
5003    dump_post_test_stats(stdout);
5004    publish_async_data(false);
5005
5006    return (0);
5007}
5008
5009bool CGlobalTRex::is_all_cores_finished() {
5010    int i;
5011    for (i=0; i<get_cores_tx(); i++) {
5012        if ( m_signal[i+1]==0){
5013            return false;
5014        }
5015    }
5016    if (m_sl_rx_running)
5017        return false;
5018
5019    return true;
5020}
5021
5022
5023int CGlobalTRex::start_master_stateless(){
5024    int i;
5025    for (i=0; i<BP_MAX_CORES; i++) {
5026        m_signal[i]=0;
5027    }
5028    m_fl.Create();
5029    m_expected_pps = 0;
5030    m_expected_cps = 0;
5031    m_expected_bps = 0;
5032
5033    m_fl.generate_p_thread_info(get_cores_tx());
5034    CFlowGenListPerThread   * lpt;
5035
5036    for (i=0; i<get_cores_tx(); i++) {
5037        lpt = m_fl.m_threads_info[i];
5038        CVirtualIF * erf_vif = m_cores_vif[i+1];
5039        lpt->set_vif(erf_vif);
5040        lpt->m_node_gen.m_socket_id =m_cores_vif[i+1]->get_socket_id();
5041    }
5042    m_fl_was_init=true;
5043
5044    return (0);
5045}
5046
5047int CGlobalTRex::start_master_statefull() {
5048    int i;
5049    for (i=0; i<BP_MAX_CORES; i++) {
5050        m_signal[i]=0;
5051    }
5052
5053    m_fl.Create();
5054    m_fl.load_from_yaml(CGlobalInfo::m_options.cfg_file,get_cores_tx());
5055
5056    if ( CGlobalInfo::m_options.m_active_flows>0 ) {
5057        m_fl.update_active_flows(CGlobalInfo::m_options.m_active_flows);
5058    }
5059
5060    /* client config */
5061    if (CGlobalInfo::m_options.client_cfg_file != "") {
5062        try {
5063            m_fl.load_client_config_file(CGlobalInfo::m_options.client_cfg_file);
5064        } catch (const std::runtime_error &e) {
5065            std::cout << "\n*** " << e.what() << "\n\n";
5066            exit(-1);
5067        }
5068        CGlobalInfo::m_options.preview.set_client_cfg_enable(true);
5069        m_fl.set_client_config_tuple_gen_info(&m_fl.m_yaml_info.m_tuple_gen);
5070        pre_test();
5071    }
5072
5073    /* verify options */
5074    try {
5075        CGlobalInfo::m_options.verify();
5076    } catch (const std::runtime_error &e) {
5077        std::cout << "\n*** " << e.what() << "\n\n";
5078        exit(-1);
5079    }
5080
5081    m_expected_pps = m_fl.get_total_pps();
5082    m_expected_cps = 1000.0*m_fl.get_total_kcps();
5083    m_expected_bps = m_fl.get_total_tx_bps();
5084    if ( m_fl.get_total_repeat_flows() > 2000) {
5085        /* disable flows cache */
5086        CGlobalInfo::m_options.preview.setDisableMbufCache(true);
5087    }
5088
5089    CTupleGenYamlInfo * tg=&m_fl.m_yaml_info.m_tuple_gen;
5090
5091    m_mg.set_ip( tg->m_client_pool[0].get_ip_start(),
5092                 tg->m_server_pool[0].get_ip_start(),
5093                 tg->m_client_pool[0].getDualMask()
5094                 );
5095
5096    if (  CGlobalInfo::m_options.preview.getVMode() >0 ) {
5097        m_fl.DumpCsv(stdout);
5098        for (i=0; i<100; i++) {
5099            fprintf(stdout,"\n");
5100        }
5101        fflush(stdout);
5102    }
5103
5104    m_fl.generate_p_thread_info(get_cores_tx());
5105    CFlowGenListPerThread   * lpt;
5106
5107    for (i=0; i<get_cores_tx(); i++) {
5108        lpt = m_fl.m_threads_info[i];
5109        //CNullIF * erf_vif = new CNullIF();
5110        CVirtualIF * erf_vif = m_cores_vif[i+1];
5111        lpt->set_vif(erf_vif);
5112        /* socket id */
5113        lpt->m_node_gen.m_socket_id =m_cores_vif[i+1]->get_socket_id();
5114
5115    }
5116    m_fl_was_init=true;
5117
5118    return (0);
5119}
5120
5121
5122////////////////////////////////////////////
5123static CGlobalTRex g_trex;
5124
5125void CPhyEthIF::conf_queues() {
5126    CTrexDpdkParams dpdk_p;
5127    get_ex_drv()->get_dpdk_drv_params(dpdk_p);
5128    uint16_t num_tx_q = (CGlobalInfo::get_queues_mode() == CGlobalInfo::Q_MODE_ONE_QUEUE) ?
5129        1 : g_trex.m_max_queues_per_port;
5130    socket_id_t socket_id = CGlobalInfo::m_socket.port_to_socket((port_id_t)m_port_id);
5131    assert(CGlobalInfo::m_mem_pool[socket_id].m_mbuf_pool_2048);
5132
5133    configure(dpdk_p.rx_drop_q_num + dpdk_p.rx_data_q_num, num_tx_q, &g_trex.m_port_cfg.m_port_conf);
5134    for (uint16_t qid = 0; qid < num_tx_q; qid++) {
5135        tx_queue_setup(qid, dpdk_p.tx_desc_num , socket_id, &g_trex.m_port_cfg.m_tx_conf);
5136    }
5137
5138    switch (dpdk_p.rx_drop_q_num) {
5139    case 0:
5140        if (dpdk_p.rx_data_q_num == 1) {
5141            // 1 rx rcv q. no drop q. VM mode.
5142            // Only 1 rx queue, so use it for everything
5143            g_trex.m_rx_core_tx_q_id = 0;
5144            rx_queue_setup(0, dpdk_p.rx_desc_num_data_q, socket_id, &g_trex.m_port_cfg.m_rx_conf,
5145                           get_ex_drv()->get_rx_mem_pool(socket_id));
5146            set_rx_queue(0);
5147        } else {
5148            // no drop q. Many rcv queues. RSS mode.
5149            // rss on all rcv queues. Do not skip any q.
5150            configure_rss_redirect_table(dpdk_p.rx_data_q_num, 0xff);
5151            g_trex.m_rx_core_tx_q_id = g_trex.m_cores_to_dual_ports;
5152            for (int queue = 0; queue < dpdk_p.rx_data_q_num; queue++) {
5153                rx_queue_setup(queue, dpdk_p.rx_desc_num_data_q, socket_id,
5154                               &g_trex.m_port_cfg.m_rx_conf, CGlobalInfo::m_mem_pool[socket_id].m_mbuf_pool_9k);
5155            }
5156        }
5157        break;
5158    case 1:
5159        // 1 drop q. 1 or more rx queues. Normal mode.
5160        // rx core will use largest tx q
5161        g_trex.m_rx_core_tx_q_id = g_trex.m_cores_to_dual_ports;
5162        // configure drop q
5163        rx_queue_setup(MAIN_DPDK_DROP_Q, dpdk_p.rx_desc_num_drop_q, socket_id, &g_trex.m_port_cfg.m_rx_conf,
5164                            CGlobalInfo::m_mem_pool[socket_id].m_mbuf_pool_2048);
5165        set_rx_queue(MAIN_DPDK_RX_Q);
5166        rx_queue_setup(MAIN_DPDK_RX_Q, dpdk_p.rx_desc_num_data_q, socket_id,
5167                       &g_trex.m_port_cfg.m_rx_conf, CGlobalInfo::m_mem_pool[socket_id].m_mbuf_pool_9k);
5168        break;
5169    default:
5170        // Many drop queues. Mellanox mode.
5171        g_trex.m_rx_core_tx_q_id = g_trex.m_cores_to_dual_ports;
5172        // configure drop queues (all queues but MAIN_DPDK_RX_Q)
5173        for (int j = 0; j < dpdk_p.rx_drop_q_num + 1; j++) {
5174            if (j == MAIN_DPDK_RX_Q) {
5175                continue;
5176            }
5177            rx_queue_setup(j, dpdk_p.rx_desc_num_drop_q, socket_id, &g_trex.m_port_cfg.m_rx_conf,
5178                           CGlobalInfo::m_mem_pool[socket_id].m_mbuf_pool_2048);
5179        }
5180        rx_queue_setup(MAIN_DPDK_RX_Q, dpdk_p.rx_desc_num_data_q, socket_id,
5181                       &g_trex.m_port_cfg.m_rx_conf, CGlobalInfo::m_mem_pool[socket_id].m_mbuf_pool_9k);
5182        // rss on all drop queues. Skip MAIN_DPDK_RX_Q
5183        configure_rss_redirect_table(dpdk_p.rx_drop_q_num + 1, MAIN_DPDK_RX_Q);
5184        break;
5185    }
5186}
5187
5188void CPhyEthIF::configure_rss_redirect_table(uint16_t numer_of_queues, uint16_t skip_queue) {
5189     struct rte_eth_dev_info dev_info;
5190
5191     rte_eth_dev_info_get(m_port_id,&dev_info);
5192     assert(dev_info.reta_size > 0);
5193     int reta_conf_size = std::max(1, dev_info.reta_size / RTE_RETA_GROUP_SIZE);
5194     struct rte_eth_rss_reta_entry64 reta_conf[reta_conf_size];
5195
5196     rte_eth_dev_rss_reta_query(m_port_id,&reta_conf[0],dev_info.reta_size);
5197
5198     for (int j = 0; j < reta_conf_size; j++) {
5199         uint16_t skip = 0;
5200         reta_conf[j].mask = ~0ULL;
5201         for (int i = 0; i < RTE_RETA_GROUP_SIZE; i++) {
5202             uint16_t q;
5203             while (true) {
5204                 q=(i + skip) % numer_of_queues;
5205                 if (q != skip_queue) {
5206                     break;
5207                 }
5208                 skip += 1;
5209             }
5210             reta_conf[j].reta[i] = q;
5211         }
5212     }
5213     rte_eth_dev_rss_reta_update(m_port_id, &reta_conf[0], dev_info.reta_size);
5214     rte_eth_dev_rss_reta_query(m_port_id, &reta_conf[0], dev_info.reta_size);
5215
5216#if 0
5217     /* verification */
5218     for (j = 0; j < reta_conf_size; j++) {
5219         for (i = 0; i<RTE_RETA_GROUP_SIZE; i++) {
5220             printf(" R  %d %d %d \n",j,i,reta_conf[j].reta[i]);
5221         }
5222     }
5223#endif
5224}
5225
5226void CPhyEthIF::update_counters() {
5227    get_ex_drv()->get_extended_stats(this, &m_stats);
5228    CRXCoreIgnoreStat ign_stats;
5229
5230    if (get_is_stateless()) {
5231        g_trex.m_rx_sl.get_ignore_stats(m_port_id, ign_stats, true);
5232    } else {
5233        g_trex.m_mg.get_ignore_stats(m_port_id, ign_stats, true);
5234    }
5235
5236    m_stats.obytes -= ign_stats.get_tx_bytes();
5237    m_stats.opackets -= ign_stats.get_tx_pkts();
5238    m_ignore_stats.opackets += ign_stats.get_tx_pkts();
5239    m_ignore_stats.obytes += ign_stats.get_tx_bytes();
5240    m_ignore_stats.m_tx_arp += ign_stats.get_tx_arp();
5241
5242    m_last_tx_rate      =  m_bw_tx.add(m_stats.obytes);
5243    m_last_rx_rate      =  m_bw_rx.add(m_stats.ibytes);
5244    m_last_tx_pps       =  m_pps_tx.add(m_stats.opackets);
5245    m_last_rx_pps       =  m_pps_rx.add(m_stats.ipackets);
5246}
5247
5248bool CPhyEthIF::Create(uint8_t portid) {
5249    m_port_id      = portid;
5250    m_last_rx_rate = 0.0;
5251    m_last_tx_rate = 0.0;
5252    m_last_tx_pps  = 0.0;
5253    m_port_attr    = g_trex.m_drv->create_port_attr(portid);
5254
5255    /* set src MAC addr */
5256    uint8_t empty_mac[ETHER_ADDR_LEN] = {0,0,0,0,0,0};
5257    if (! memcmp( CGlobalInfo::m_options.m_mac_addr[m_port_id].u.m_mac.src, empty_mac, ETHER_ADDR_LEN)) {
5258        rte_eth_macaddr_get(m_port_id,
5259                            (struct ether_addr *)&CGlobalInfo::m_options.m_mac_addr[m_port_id].u.m_mac.src);
5260    }
5261
5262    return true;
5263}
5264
5265const std::vector<std::pair<uint8_t, uint8_t>> &
5266CPhyEthIF::get_core_list() {
5267
5268    /* lazy find */
5269    if (m_core_id_list.size() == 0) {
5270
5271        for (uint8_t core_id = 0; core_id < g_trex.get_cores_tx(); core_id++) {
5272
5273            /* iterate over all the directions*/
5274            for (uint8_t dir = 0 ; dir < CS_NUM; dir++) {
5275                if (g_trex.m_cores_vif[core_id + 1]->get_ports()[dir].m_port->get_port_id() == m_port_id) {
5276                    m_core_id_list.push_back(std::make_pair(core_id, dir));
5277                }
5278            }
5279        }
5280    }
5281
5282    return m_core_id_list;
5283
5284}
5285
5286int CPhyEthIF::reset_hw_flow_stats() {
5287    if (get_ex_drv()->hw_rx_stat_supported()) {
5288        get_ex_drv()->reset_rx_stats(this, m_stats.m_fdir_prev_pkts, 0, MAX_FLOW_STATS);
5289    } else {
5290        g_trex.m_rx_sl.reset_rx_stats(get_port_id());
5291    }
5292    return 0;
5293}
5294
5295// get/reset flow director counters
5296// return 0 if OK. -1 if operation not supported.
5297// rx_stats, tx_stats - arrays of len max - min + 1. Returning rx, tx updated absolute values.
5298// min, max - minimum, maximum counters range to get
5299// reset - If true, need to reset counter value after reading
5300int CPhyEthIF::get_flow_stats(rx_per_flow_t *rx_stats, tx_per_flow_t *tx_stats, int min, int max, bool reset) {
5301    uint32_t diff_pkts[MAX_FLOW_STATS];
5302    uint32_t diff_bytes[MAX_FLOW_STATS];
5303    bool hw_rx_stat_supported = get_ex_drv()->hw_rx_stat_supported();
5304
5305    if (hw_rx_stat_supported) {
5306        if (get_ex_drv()->get_rx_stats(this, diff_pkts, m_stats.m_fdir_prev_pkts
5307                                       , diff_bytes, m_stats.m_fdir_prev_bytes, min, max) < 0) {
5308            return -1;
5309        }
5310    } else {
5311        g_trex.m_rx_sl.get_rx_stats(get_port_id(), rx_stats, min, max, reset, TrexPlatformApi::IF_STAT_IPV4_ID);
5312    }
5313
5314    for (int i = min; i <= max; i++) {
5315        if ( reset ) {
5316            // return value so far, and reset
5317            if (hw_rx_stat_supported) {
5318                if (rx_stats != NULL) {
5319                    rx_stats[i - min].set_pkts(m_stats.m_rx_per_flow_pkts[i] + diff_pkts[i]);
5320                    rx_stats[i - min].set_bytes(m_stats.m_rx_per_flow_bytes[i] + diff_bytes[i]);
5321                }
5322                m_stats.m_rx_per_flow_pkts[i] = 0;
5323                m_stats.m_rx_per_flow_bytes[i] = 0;
5324                get_ex_drv()->reset_rx_stats(this, &m_stats.m_fdir_prev_pkts[i], i, 1);
5325
5326            }
5327            if (tx_stats != NULL) {
5328                tx_stats[i - min] = g_trex.clear_flow_tx_stats(m_port_id, i, false);
5329            }
5330        } else {
5331            if (hw_rx_stat_supported) {
5332                m_stats.m_rx_per_flow_pkts[i] += diff_pkts[i];
5333                m_stats.m_rx_per_flow_bytes[i] += diff_bytes[i];
5334                if (rx_stats != NULL) {
5335                    rx_stats[i - min].set_pkts(m_stats.m_rx_per_flow_pkts[i]);
5336                    rx_stats[i - min].set_bytes(m_stats.m_rx_per_flow_bytes[i]);
5337                }
5338            }
5339            if (tx_stats != NULL) {
5340                tx_stats[i - min] = g_trex.get_flow_tx_stats(m_port_id, i);
5341            }
5342        }
5343    }
5344
5345    return 0;
5346}
5347
5348int CPhyEthIF::get_flow_stats_payload(rx_per_flow_t *rx_stats, tx_per_flow_t *tx_stats, int min, int max, bool reset) {
5349    g_trex.m_rx_sl.get_rx_stats(get_port_id(), rx_stats, min, max, reset, TrexPlatformApi::IF_STAT_PAYLOAD);
5350    for (int i = min; i <= max; i++) {
5351        if ( reset ) {
5352            if (tx_stats != NULL) {
5353                tx_stats[i - min] = g_trex.clear_flow_tx_stats(m_port_id, i + MAX_FLOW_STATS, true);
5354            }
5355        } else {
5356            if (tx_stats != NULL) {
5357                tx_stats[i - min] = g_trex.get_flow_tx_stats(m_port_id, i + MAX_FLOW_STATS);
5358            }
5359        }
5360    }
5361
5362    return 0;
5363}
5364
5365TrexStateless * get_stateless_obj() {
5366    return g_trex.m_trex_stateless;
5367}
5368
5369CRxCoreStateless * get_rx_sl_core_obj() {
5370    return &g_trex.m_rx_sl;
5371}
5372
5373static int latency_one_lcore(__attribute__((unused)) void *dummy)
5374{
5375    CPlatformSocketInfo * lpsock=&CGlobalInfo::m_socket;
5376    physical_thread_id_t  phy_id =rte_lcore_id();
5377
5378    if ( lpsock->thread_phy_is_rx(phy_id) ) {
5379        g_trex.run_in_rx_core();
5380    }else{
5381
5382        if ( lpsock->thread_phy_is_master( phy_id ) ) {
5383            g_trex.run_in_master();
5384            delay(1);
5385        }else{
5386            delay((uint32_t)(1000.0*CGlobalInfo::m_options.m_duration));
5387            /* this core has stopped */
5388            g_trex.m_signal[ lpsock->thread_phy_to_virt( phy_id ) ]=1;
5389        }
5390    }
5391    return 0;
5392}
5393
5394
5395
5396static int slave_one_lcore(__attribute__((unused)) void *dummy)
5397{
5398    CPlatformSocketInfo * lpsock=&CGlobalInfo::m_socket;
5399    physical_thread_id_t  phy_id =rte_lcore_id();
5400
5401    if ( lpsock->thread_phy_is_rx(phy_id) ) {
5402        g_trex.run_in_rx_core();
5403    }else{
5404        if ( lpsock->thread_phy_is_master( phy_id ) ) {
5405            g_trex.run_in_master();
5406            delay(1);
5407        }else{
5408            g_trex.run_in_core( lpsock->thread_phy_to_virt( phy_id ) );
5409        }
5410    }
5411    return 0;
5412}
5413
5414
5415
5416uint32_t get_cores_mask(uint32_t cores,int offset){
5417    int i;
5418
5419    uint32_t res=1;
5420
5421    uint32_t mask=(1<<(offset+1));
5422    for (i=0; i<(cores-1); i++) {
5423        res |= mask ;
5424        mask = mask <<1;
5425    }
5426    return (res);
5427}
5428
5429
5430static char *g_exe_name;
5431const char *get_exe_name() {
5432    return g_exe_name;
5433}
5434
5435
5436int main(int argc , char * argv[]){
5437    g_exe_name = argv[0];
5438
5439    return ( main_test(argc , argv));
5440}
5441
5442
5443int update_global_info_from_platform_file(){
5444
5445    CPlatformYamlInfo *cg=&global_platform_cfg_info;
5446
5447    CGlobalInfo::m_socket.Create(&cg->m_platform);
5448
5449
5450    if (!cg->m_info_exist) {
5451        /* nothing to do ! */
5452        return 0;
5453    }
5454
5455    CGlobalInfo::m_options.prefix =cg->m_prefix;
5456    CGlobalInfo::m_options.preview.setCores(cg->m_thread_per_dual_if);
5457
5458    if ( cg->m_port_limit_exist ){
5459        CGlobalInfo::m_options.m_expected_portd =cg->m_port_limit;
5460    }
5461
5462    if ( cg->m_enable_zmq_pub_exist ){
5463        CGlobalInfo::m_options.preview.set_zmq_publish_enable(cg->m_enable_zmq_pub);
5464        CGlobalInfo::m_options.m_zmq_port = cg->m_zmq_pub_port;
5465    }
5466    if ( cg->m_telnet_exist ){
5467        CGlobalInfo::m_options.m_telnet_port = cg->m_telnet_port;
5468    }
5469
5470    if ( cg->m_mac_info_exist ){
5471        int i;
5472        /* cop the file info */
5473
5474        int port_size=cg->m_mac_info.size();
5475
5476        if ( port_size > TREX_MAX_PORTS ){
5477            port_size = TREX_MAX_PORTS;
5478        }
5479        for (i=0; i<port_size; i++){
5480            cg->m_mac_info[i].copy_src(( char *)CGlobalInfo::m_options.m_mac_addr[i].u.m_mac.src)   ;
5481            cg->m_mac_info[i].copy_dest(( char *)CGlobalInfo::m_options.m_mac_addr[i].u.m_mac.dest)  ;
5482            CGlobalInfo::m_options.m_mac_addr[i].u.m_mac.is_set = 1;
5483
5484            CGlobalInfo::m_options.m_ip_cfg[i].set_def_gw(cg->m_mac_info[i].get_def_gw());
5485            CGlobalInfo::m_options.m_ip_cfg[i].set_ip(cg->m_mac_info[i].get_ip());
5486            CGlobalInfo::m_options.m_ip_cfg[i].set_mask(cg->m_mac_info[i].get_mask());
5487            CGlobalInfo::m_options.m_ip_cfg[i].set_vlan(cg->m_mac_info[i].get_vlan());
5488            // If one of the ports has vlan, work in vlan mode
5489            if (cg->m_mac_info[i].get_vlan() != 0) {
5490                CGlobalInfo::m_options.preview.set_vlan_mode_verify(CPreviewMode::VLAN_MODE_NORMAL);
5491            }
5492        }
5493    }
5494
5495    /* mul by interface type */
5496    float mul=1.0;
5497    if (cg->m_port_bandwidth_gb<10) {
5498        cg->m_port_bandwidth_gb=10.0;
5499    }
5500
5501    mul = mul*(float)cg->m_port_bandwidth_gb/10.0;
5502    mul= mul * (float)cg->m_port_limit/2.0;
5503
5504    mul= mul * CGlobalInfo::m_options.m_mbuf_factor;
5505
5506
5507    CGlobalInfo::m_memory_cfg.set_pool_cache_size(RTE_MEMPOOL_CACHE_MAX_SIZE);
5508
5509    CGlobalInfo::m_memory_cfg.set_number_of_dp_cors(
5510                                                    CGlobalInfo::m_options.get_number_of_dp_cores_needed() );
5511
5512    CGlobalInfo::m_memory_cfg.set(cg->m_memory,mul);
5513    return (0);
5514}
5515
5516extern "C" int eal_cpu_detected(unsigned lcore_id);
5517// return mask representing available cores
5518int core_mask_calc() {
5519    uint32_t mask = 0;
5520    int lcore_id;
5521
5522    for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
5523        if (eal_cpu_detected(lcore_id)) {
5524            mask |= (1 << lcore_id);
5525        }
5526    }
5527
5528    return mask;
5529}
5530
5531// Return number of set bits in i
5532uint32_t num_set_bits(uint32_t i)
5533{
5534    i = i - ((i >> 1) & 0x55555555);
5535    i = (i & 0x33333333) + ((i >> 2) & 0x33333333);
5536    return (((i + (i >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24;
5537}
5538
5539// sanity check if the cores we want to use really exist
5540int core_mask_sanity(uint32_t wanted_core_mask) {
5541    uint32_t calc_core_mask = core_mask_calc();
5542    uint32_t wanted_core_num, calc_core_num;
5543
5544    wanted_core_num = num_set_bits(wanted_core_mask);
5545    calc_core_num = num_set_bits(calc_core_mask);
5546
5547    if (calc_core_num == 1) {
5548        printf ("Error: You have only 1 core available. Minimum configuration requires 2 cores\n");
5549        printf("        If you are running on VM, consider adding more cores if possible\n");
5550        return -1;
5551    }
5552    if (wanted_core_num > calc_core_num) {
5553        printf("Error: You have %d threads available, but you asked for %d threads.\n", calc_core_num, wanted_core_num);
5554        printf("       Calculation is: -c <num>(%d) * dual ports (%d) + 1 master thread %s"
5555               , CGlobalInfo::m_options.preview.getCores(), CGlobalInfo::m_options.get_expected_dual_ports()
5556               , get_is_rx_thread_enabled() ? "+1 latency thread (because of -l flag)\n" : "\n");
5557        if (CGlobalInfo::m_options.preview.getCores() > 1)
5558            printf("       Maybe try smaller -c <num>.\n");
5559        printf("       If you are running on VM, consider adding more cores if possible\n");
5560        return -1;
5561    }
5562
5563    if (wanted_core_mask != (wanted_core_mask & calc_core_mask)) {
5564        printf ("Serious error: Something is wrong with the hardware. Wanted core mask is %x. Existing core mask is %x\n", wanted_core_mask, calc_core_mask);
5565        return -1;
5566    }
5567
5568    return 0;
5569}
5570
5571int  update_dpdk_args(void){
5572
5573    CPlatformSocketInfo * lpsock=&CGlobalInfo::m_socket;
5574    CParserOption * lpop= &CGlobalInfo::m_options;
5575
5576    lpsock->set_rx_thread_is_enabled(get_is_rx_thread_enabled());
5577    lpsock->set_number_of_threads_per_ports(lpop->preview.getCores() );
5578    lpsock->set_number_of_dual_ports(lpop->get_expected_dual_ports());
5579    if ( !lpsock->sanity_check() ){
5580        printf(" ERROR in configuration file \n");
5581        return (-1);
5582    }
5583
5584    if ( CGlobalInfo::m_options.preview.getVMode() > 0  ) {
5585        lpsock->dump(stdout);
5586    }
5587
5588    snprintf(global_cores_str, sizeof(global_cores_str), "0x%llx" ,(unsigned long long)lpsock->get_cores_mask());
5589    if (core_mask_sanity(strtol(global_cores_str, NULL, 16)) < 0) {
5590        return -1;
5591    }
5592
5593    /* set the DPDK options */
5594    global_dpdk_args_num = 0;
5595
5596    global_dpdk_args[global_dpdk_args_num++]=(char *)"xx";
5597
5598    if ( CGlobalInfo::m_options.preview.get_mlx5_so_mode() ){
5599        global_dpdk_args[global_dpdk_args_num++]=(char *)"-d";
5600        snprintf(global_mlx5_so_id_str, sizeof(global_mlx5_so_id_str), "libmlx5-64%s.so",global_image_postfix );
5601        global_dpdk_args[global_dpdk_args_num++]=(char *)global_mlx5_so_id_str;
5602    }
5603
5604    if ( CGlobalInfo::m_options.preview.get_mlx4_so_mode() ){
5605        global_dpdk_args[global_dpdk_args_num++]=(char *)"-d";
5606        snprintf(global_mlx4_so_id_str, sizeof(global_mlx4_so_id_str), "libmlx4-64%s.so",global_image_postfix );
5607        global_dpdk_args[global_dpdk_args_num++]=(char *)global_mlx4_so_id_str;
5608    }
5609
5610    global_dpdk_args[global_dpdk_args_num++]=(char *)"-c";
5611    global_dpdk_args[global_dpdk_args_num++]=(char *)global_cores_str;
5612    global_dpdk_args[global_dpdk_args_num++]=(char *)"-n";
5613    global_dpdk_args[global_dpdk_args_num++]=(char *)"4";
5614
5615    if ( CGlobalInfo::m_options.preview.getVMode() == 0  ) {
5616        global_dpdk_args[global_dpdk_args_num++]=(char *)"--log-level";
5617        snprintf(global_loglevel_str, sizeof(global_loglevel_str), "%d", 4);
5618        global_dpdk_args[global_dpdk_args_num++]=(char *)global_loglevel_str;
5619    }else{
5620        global_dpdk_args[global_dpdk_args_num++]=(char *)"--log-level";
5621        snprintf(global_loglevel_str, sizeof(global_loglevel_str), "%d", CGlobalInfo::m_options.preview.getVMode()+1);
5622        global_dpdk_args[global_dpdk_args_num++]=(char *)global_loglevel_str;
5623    }
5624
5625    global_dpdk_args[global_dpdk_args_num++] = (char *)"--master-lcore";
5626
5627    snprintf(global_master_id_str, sizeof(global_master_id_str), "%u", lpsock->get_master_phy_id());
5628    global_dpdk_args[global_dpdk_args_num++] = global_master_id_str;
5629
5630    /* add white list */
5631    if (lpop->m_run_mode == CParserOption::RUN_MODE_DUMP_INFO and lpop->dump_interfaces.size()) {
5632        for (int i=0; i<(int)lpop->dump_interfaces.size(); i++) {
5633            global_dpdk_args[global_dpdk_args_num++]=(char *)"-w";
5634            global_dpdk_args[global_dpdk_args_num++]=(char *)lpop->dump_interfaces[i].c_str();
5635        }
5636    }
5637    else {
5638        for (int i=0; i<(int)global_platform_cfg_info.m_if_list.size(); i++) {
5639            global_dpdk_args[global_dpdk_args_num++]=(char *)"-w";
5640            global_dpdk_args[global_dpdk_args_num++]=(char *)global_platform_cfg_info.m_if_list[i].c_str();
5641        }
5642    }
5643
5644
5645
5646    if ( lpop->prefix.length()  ){
5647        global_dpdk_args[global_dpdk_args_num++]=(char *)"--file-prefix";
5648        snprintf(global_prefix_str, sizeof(global_prefix_str), "%s", lpop->prefix.c_str());
5649        global_dpdk_args[global_dpdk_args_num++]=(char *)global_prefix_str;
5650        global_dpdk_args[global_dpdk_args_num++]=(char *)"-m";
5651        if (global_platform_cfg_info.m_limit_memory.length()) {
5652            global_dpdk_args[global_dpdk_args_num++]=(char *)global_platform_cfg_info.m_limit_memory.c_str();
5653        }else{
5654            global_dpdk_args[global_dpdk_args_num++]=(char *)"1024";
5655        }
5656    }
5657
5658
5659    if ( CGlobalInfo::m_options.preview.getVMode() > 0  ) {
5660        printf("args \n");
5661        int i;
5662        for (i=0; i<global_dpdk_args_num; i++) {
5663            printf(" %s \n",global_dpdk_args[i]);
5664        }
5665    }
5666    return (0);
5667}
5668
5669
5670int sim_load_list_of_cap_files(CParserOption * op){
5671
5672    CFlowGenList fl;
5673    fl.Create();
5674    fl.load_from_yaml(op->cfg_file,1);
5675    if ( op->preview.getVMode() >0 ) {
5676        fl.DumpCsv(stdout);
5677    }
5678    uint32_t start=    os_get_time_msec();
5679
5680    CErfIF erf_vif;
5681
5682    fl.generate_p_thread_info(1);
5683    CFlowGenListPerThread   * lpt;
5684    lpt=fl.m_threads_info[0];
5685    lpt->set_vif(&erf_vif);
5686
5687    if ( (op->preview.getVMode() >1)  || op->preview.getFileWrite() ) {
5688        lpt->start_generate_stateful(op->out_file,op->preview);
5689    }
5690
5691    lpt->m_node_gen.DumpHist(stdout);
5692
5693    uint32_t stop=    os_get_time_msec();
5694    printf(" d time = %ul %ul \n",stop-start,os_get_time_freq());
5695    fl.Delete();
5696    return (0);
5697}
5698
5699void dump_interfaces_info() {
5700    printf("Showing interfaces info.\n");
5701    uint8_t m_max_ports = rte_eth_dev_count();
5702    struct ether_addr mac_addr;
5703    char mac_str[ETHER_ADDR_FMT_SIZE];
5704    struct rte_pci_addr pci_addr;
5705
5706    for (uint8_t port_id=0; port_id<m_max_ports; port_id++) {
5707        // PCI, MAC and Driver
5708        pci_addr = rte_eth_devices[port_id].device->devargs->pci.addr;
5709        rte_eth_macaddr_get(port_id, &mac_addr);
5710        ether_format_addr(mac_str, sizeof mac_str, &mac_addr);
5711        printf("PCI: %04x:%02x:%02x.%d - MAC: %s - Driver: %s\n",
5712            pci_addr.domain, pci_addr.bus, pci_addr.devid, pci_addr.function, mac_str,
5713            rte_eth_devices[port_id].data->drv_name);
5714    }
5715}
5716
5717
5718int learn_image_postfix(char * image_name){
5719
5720    char *p = strstr(image_name,TREX_NAME);
5721    if (p) {
5722        strcpy(global_image_postfix,p+strlen(TREX_NAME));
5723    }
5724    return(0);
5725}
5726
5727int main_test(int argc , char * argv[]){
5728
5729    learn_image_postfix(argv[0]);
5730
5731    utl_termio_init();
5732
5733    int ret;
5734    unsigned lcore_id;
5735    printf("Starting  TRex %s please wait  ... \n",VERSION_BUILD_NUM);
5736
5737    CGlobalInfo::m_options.preview.clean();
5738
5739    if ( parse_options_wrapper(argc, argv, &CGlobalInfo::m_options,true ) != 0){
5740        exit(-1);
5741    }
5742
5743    /* enable core dump if requested */
5744    if (CGlobalInfo::m_options.preview.getCoreDumpEnable()) {
5745        utl_set_coredump_size(-1);
5746    }
5747    else {
5748        utl_set_coredump_size(0);
5749    }
5750
5751
5752    update_global_info_from_platform_file();
5753
5754    /* It is not a mistake. Give the user higher priorty over the configuration file */
5755    if (parse_options_wrapper(argc, argv, &CGlobalInfo::m_options ,false) != 0) {
5756        exit(-1);
5757    }
5758
5759
5760    if ( CGlobalInfo::m_options.preview.getVMode() > 0){
5761        CGlobalInfo::m_options.dump(stdout);
5762        CGlobalInfo::m_memory_cfg.Dump(stdout);
5763    }
5764
5765
5766    if (update_dpdk_args() < 0) {
5767        return -1;
5768    }
5769
5770    CParserOption * po=&CGlobalInfo::m_options;
5771
5772
5773    if ( CGlobalInfo::m_options.preview.getVMode() == 0  ) {
5774        rte_set_log_level(1);
5775
5776    }
5777    uid_t uid;
5778    uid = geteuid ();
5779    if ( uid != 0 ) {
5780        printf("ERROR you must run with superuser priviliges \n");
5781        printf("User id   : %d \n",uid);
5782        printf("try 'sudo' %s \n",argv[0]);
5783        return (-1);
5784    }
5785
5786    /* set affinity to the master core as default */
5787    cpu_set_t mask;
5788    CPU_ZERO(&mask);
5789    CPU_SET(CGlobalInfo::m_socket.get_master_phy_id(), &mask);
5790    pthread_setaffinity_np(pthread_self(), sizeof(mask), &mask);
5791
5792    ret = rte_eal_init(global_dpdk_args_num, (char **)global_dpdk_args);
5793    if (ret < 0){
5794        printf(" You might need to run ./trex-cfg  once  \n");
5795        rte_exit(EXIT_FAILURE, "Invalid EAL arguments\n");
5796    }
5797    set_driver();
5798    if (CGlobalInfo::m_options.m_run_mode == CParserOption::RUN_MODE_DUMP_INFO) {
5799        dump_interfaces_info();
5800        exit(0);
5801    }
5802    reorder_dpdk_ports();
5803    time_init();
5804
5805    /* check if we are in simulation mode */
5806    if ( CGlobalInfo::m_options.out_file != "" ){
5807        printf(" t-rex simulation mode into %s \n",CGlobalInfo::m_options.out_file.c_str());
5808        return ( sim_load_list_of_cap_files(&CGlobalInfo::m_options) );
5809    }
5810
5811    if ( !g_trex.Create() ){
5812        exit(1);
5813    }
5814
5815    if (po->preview.get_is_rx_check_enable() &&  (po->m_rx_check_sample< get_min_sample_rate()) ) {
5816        po->m_rx_check_sample = get_min_sample_rate();
5817        printf("Warning:rx check sample rate should not be lower than %d. Setting it to %d\n",get_min_sample_rate(),get_min_sample_rate());
5818    }
5819
5820    /* set dump mode */
5821    g_trex.m_io_modes.set_mode((CTrexGlobalIoMode::CliDumpMode)CGlobalInfo::m_options.m_io_mode);
5822
5823    /* disable WD if needed */
5824    bool wd_enable = (CGlobalInfo::m_options.preview.getWDDisable() ? false : true);
5825    TrexWatchDog::getInstance().init(wd_enable);
5826
5827    g_trex.m_sl_rx_running = false;
5828    if ( get_is_stateless() ) {
5829        g_trex.start_master_stateless();
5830
5831    }else{
5832        g_trex.start_master_statefull();
5833    }
5834
5835    // For unit testing of HW rules and queues configuration. Just send some packets and exit.
5836    if (CGlobalInfo::m_options.m_debug_pkt_proto != 0) {
5837        CTrexDpdkParams dpdk_p;
5838        get_ex_drv()->get_dpdk_drv_params(dpdk_p);
5839        CTrexDebug debug = CTrexDebug(g_trex.m_ports, g_trex.m_max_ports
5840                                      , dpdk_p.rx_data_q_num + dpdk_p.rx_drop_q_num);
5841        int ret;
5842
5843        if (CGlobalInfo::m_options.m_debug_pkt_proto == D_PKT_TYPE_HW_TOGGLE_TEST) {
5844            // Unit test: toggle many times between receive all and stateless/stateful modes,
5845            // to test resiliency of add/delete fdir filters
5846            printf("Starting receive all/normal mode toggle unit test\n");
5847            for (int i = 0; i < 100; i++) {
5848                for (int port_id = 0; port_id < g_trex.m_max_ports; port_id++) {
5849                    CPhyEthIF *pif = &g_trex.m_ports[port_id];
5850                    pif->set_port_rcv_all(true);
5851                }
5852                ret = debug.test_send(D_PKT_TYPE_HW_VERIFY_RCV_ALL);
5853                if (ret != 0) {
5854                    printf("Iteration %d: Receive all mode failed\n", i);
5855                    exit(ret);
5856                }
5857
5858                for (int port_id = 0; port_id < g_trex.m_max_ports; port_id++) {
5859                    CPhyEthIF *pif = &g_trex.m_ports[port_id];
5860                    CTRexExtendedDriverDb::Ins()->get_drv()->configure_rx_filter_rules(pif);
5861                }
5862
5863                ret = debug.test_send(D_PKT_TYPE_HW_VERIFY);
5864                if (ret != 0) {
5865                    printf("Iteration %d: Normal mode failed\n", i);
5866                    exit(ret);
5867                }
5868
5869                printf("Iteration %d OK\n", i);
5870            }
5871            exit(0);
5872        } else {
5873            if (CGlobalInfo::m_options.m_debug_pkt_proto == D_PKT_TYPE_HW_VERIFY_RCV_ALL) {
5874                for (int port_id = 0; port_id < g_trex.m_max_ports; port_id++) {
5875                    CPhyEthIF *pif = &g_trex.m_ports[port_id];
5876                    pif->set_port_rcv_all(true);
5877                }
5878            }
5879            ret = debug.test_send(CGlobalInfo::m_options.m_debug_pkt_proto);
5880            exit(ret);
5881        }
5882    }
5883
5884    // in case of client config, we already run pretest
5885    if (! CGlobalInfo::m_options.preview.get_is_client_cfg_enable()) {
5886        g_trex.pre_test();
5887    }
5888
5889    // after doing all needed ARP resolution, we need to flush queues, and stop our drop queue
5890    g_trex.ixgbe_rx_queue_flush();
5891    for (int i = 0; i < g_trex.m_max_ports; i++) {
5892        CPhyEthIF *_if = &g_trex.m_ports[i];
5893        _if->stop_rx_drop_queue();
5894    }
5895
5896    if ( CGlobalInfo::m_options.is_latency_enabled()
5897         && (CGlobalInfo::m_options.m_latency_prev > 0)) {
5898        uint32_t pkts = CGlobalInfo::m_options.m_latency_prev *
5899            CGlobalInfo::m_options.m_latency_rate;
5900        printf("Starting warm up phase for %d sec\n",CGlobalInfo::m_options.