testpmd.c revision 3d9b7210
1/*-
2 *   BSD LICENSE
3 *
4 *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5 *   All rights reserved.
6 *
7 *   Redistribution and use in source and binary forms, with or without
8 *   modification, are permitted provided that the following conditions
9 *   are met:
10 *
11 *     * Redistributions of source code must retain the above copyright
12 *       notice, this list of conditions and the following disclaimer.
13 *     * Redistributions in binary form must reproduce the above copyright
14 *       notice, this list of conditions and the following disclaimer in
15 *       the documentation and/or other materials provided with the
16 *       distribution.
17 *     * Neither the name of Intel Corporation nor the names of its
18 *       contributors may be used to endorse or promote products derived
19 *       from this software without specific prior written permission.
20 *
21 *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <stdarg.h>
35#include <stdio.h>
36#include <stdlib.h>
37#include <signal.h>
38#include <string.h>
39#include <time.h>
40#include <fcntl.h>
41#include <sys/types.h>
42#include <errno.h>
43
44#include <sys/queue.h>
45#include <sys/stat.h>
46
47#include <stdint.h>
48#include <unistd.h>
49#include <inttypes.h>
50
51#include <rte_common.h>
52#include <rte_errno.h>
53#include <rte_byteorder.h>
54#include <rte_log.h>
55#include <rte_debug.h>
56#include <rte_cycles.h>
57#include <rte_memory.h>
58#include <rte_memcpy.h>
59#include <rte_memzone.h>
60#include <rte_launch.h>
61#include <rte_eal.h>
62#include <rte_per_lcore.h>
63#include <rte_lcore.h>
64#include <rte_atomic.h>
65#include <rte_branch_prediction.h>
66#include <rte_mempool.h>
67#include <rte_malloc.h>
68#include <rte_mbuf.h>
69#include <rte_interrupts.h>
70#include <rte_pci.h>
71#include <rte_ether.h>
72#include <rte_ethdev.h>
73#include <rte_dev.h>
74#include <rte_string_fns.h>
75#ifdef RTE_LIBRTE_PMD_XENVIRT
76#include <rte_eth_xenvirt.h>
77#endif
78#ifdef RTE_LIBRTE_PDUMP
79#include <rte_pdump.h>
80#endif
81
82#include "testpmd.h"
83
84uint16_t verbose_level = 0; /**< Silent by default. */
85
86/* use master core for command line ? */
87uint8_t interactive = 0;
88uint8_t auto_start = 0;
89
90/*
91 * NUMA support configuration.
92 * When set, the NUMA support attempts to dispatch the allocation of the
93 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
94 * probed ports among the CPU sockets 0 and 1.
95 * Otherwise, all memory is allocated from CPU socket 0.
96 */
97uint8_t numa_support = 0; /**< No numa support by default */
98
99/*
100 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
101 * not configured.
102 */
103uint8_t socket_num = UMA_NO_CONFIG;
104
105/*
106 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
107 */
108uint8_t mp_anon = 0;
109
110/*
111 * Record the Ethernet address of peer target ports to which packets are
112 * forwarded.
113 * Must be instanciated with the ethernet addresses of peer traffic generator
114 * ports.
115 */
116struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
117portid_t nb_peer_eth_addrs = 0;
118
119/*
120 * Probed Target Environment.
121 */
122struct rte_port *ports;	       /**< For all probed ethernet ports. */
123portid_t nb_ports;             /**< Number of probed ethernet ports. */
124struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
125lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
126
127/*
128 * Test Forwarding Configuration.
129 *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
130 *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
131 */
132lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
133lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
134portid_t  nb_cfg_ports;  /**< Number of configured ports. */
135portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
136
137unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
138portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
139
140struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
141streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
142
143/*
144 * Forwarding engines.
145 */
146struct fwd_engine * fwd_engines[] = {
147	&io_fwd_engine,
148	&mac_fwd_engine,
149	&mac_swap_engine,
150	&flow_gen_engine,
151	&rx_only_engine,
152	&tx_only_engine,
153	&csum_fwd_engine,
154	&icmp_echo_engine,
155#ifdef RTE_LIBRTE_IEEE1588
156	&ieee1588_fwd_engine,
157#endif
158	NULL,
159};
160
161struct fwd_config cur_fwd_config;
162struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
163uint32_t retry_enabled;
164uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
165uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
166
167uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
168uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
169                                      * specified on command-line. */
170
171/*
172 * Configuration of packet segments used by the "txonly" processing engine.
173 */
174uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
175uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
176	TXONLY_DEF_PACKET_LEN,
177};
178uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
179
180enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
181/**< Split policy for packets to TX. */
182
183uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
184uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
185
186/* current configuration is in DCB or not,0 means it is not in DCB mode */
187uint8_t dcb_config = 0;
188
189/* Whether the dcb is in testing status */
190uint8_t dcb_test = 0;
191
192/*
193 * Configurable number of RX/TX queues.
194 */
195queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
196queueid_t nb_txq = 1; /**< Number of TX queues per port. */
197
198/*
199 * Configurable number of RX/TX ring descriptors.
200 */
201#define RTE_TEST_RX_DESC_DEFAULT 128
202#define RTE_TEST_TX_DESC_DEFAULT 512
203uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
204uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
205
206#define RTE_PMD_PARAM_UNSET -1
207/*
208 * Configurable values of RX and TX ring threshold registers.
209 */
210
211int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
212int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
213int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
214
215int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
216int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
217int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
218
219/*
220 * Configurable value of RX free threshold.
221 */
222int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
223
224/*
225 * Configurable value of RX drop enable.
226 */
227int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
228
229/*
230 * Configurable value of TX free threshold.
231 */
232int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
233
234/*
235 * Configurable value of TX RS bit threshold.
236 */
237int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
238
239/*
240 * Configurable value of TX queue flags.
241 */
242int32_t txq_flags = RTE_PMD_PARAM_UNSET;
243
244/*
245 * Receive Side Scaling (RSS) configuration.
246 */
247uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
248
249/*
250 * Port topology configuration
251 */
252uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
253
254/*
255 * Avoids to flush all the RX streams before starts forwarding.
256 */
257uint8_t no_flush_rx = 0; /* flush by default */
258
259/*
260 * Avoids to check link status when starting/stopping a port.
261 */
262uint8_t no_link_check = 0; /* check by default */
263
264/*
265 * NIC bypass mode configuration options.
266 */
267#ifdef RTE_NIC_BYPASS
268
269/* The NIC bypass watchdog timeout. */
270uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
271
272#endif
273
274/*
275 * Ethernet device configuration.
276 */
277struct rte_eth_rxmode rx_mode = {
278	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
279	.split_hdr_size = 0,
280	.header_split   = 0, /**< Header Split disabled. */
281	.hw_ip_checksum = 0, /**< IP checksum offload disabled. */
282	.hw_vlan_filter = 1, /**< VLAN filtering enabled. */
283	.hw_vlan_strip  = 1, /**< VLAN strip enabled. */
284	.hw_vlan_extend = 0, /**< Extended VLAN disabled. */
285	.jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
286	.hw_strip_crc   = 0, /**< CRC stripping by hardware disabled. */
287};
288
289struct rte_fdir_conf fdir_conf = {
290	.mode = RTE_FDIR_MODE_NONE,
291	.pballoc = RTE_FDIR_PBALLOC_64K,
292	.status = RTE_FDIR_REPORT_STATUS,
293	.mask = {
294		.vlan_tci_mask = 0x0,
295		.ipv4_mask     = {
296			.src_ip = 0xFFFFFFFF,
297			.dst_ip = 0xFFFFFFFF,
298		},
299		.ipv6_mask     = {
300			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
301			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
302		},
303		.src_port_mask = 0xFFFF,
304		.dst_port_mask = 0xFFFF,
305		.mac_addr_byte_mask = 0xFF,
306		.tunnel_type_mask = 1,
307		.tunnel_id_mask = 0xFFFFFFFF,
308	},
309	.drop_queue = 127,
310};
311
312volatile int test_done = 1; /* stop packet forwarding when set to 1. */
313
314struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
315struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
316
317struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
318struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
319
320uint16_t nb_tx_queue_stats_mappings = 0;
321uint16_t nb_rx_queue_stats_mappings = 0;
322
323unsigned max_socket = 0;
324
325/* Forward function declarations */
326static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
327static void check_all_ports_link_status(uint32_t port_mask);
328
329/*
330 * Check if all the ports are started.
331 * If yes, return positive value. If not, return zero.
332 */
333static int all_ports_started(void);
334
335/*
336 * Find next enabled port
337 */
338portid_t
339find_next_port(portid_t p, struct rte_port *ports, int size)
340{
341	if (ports == NULL)
342		rte_exit(-EINVAL, "failed to find a next port id\n");
343
344	while ((p < size) && (ports[p].enabled == 0))
345		p++;
346	return p;
347}
348
349/*
350 * Setup default configuration.
351 */
352static void
353set_default_fwd_lcores_config(void)
354{
355	unsigned int i;
356	unsigned int nb_lc;
357	unsigned int sock_num;
358
359	nb_lc = 0;
360	for (i = 0; i < RTE_MAX_LCORE; i++) {
361		sock_num = rte_lcore_to_socket_id(i) + 1;
362		if (sock_num > max_socket) {
363			if (sock_num > RTE_MAX_NUMA_NODES)
364				rte_exit(EXIT_FAILURE, "Total sockets greater than %u\n", RTE_MAX_NUMA_NODES);
365			max_socket = sock_num;
366		}
367		if (!rte_lcore_is_enabled(i))
368			continue;
369		if (i == rte_get_master_lcore())
370			continue;
371		fwd_lcores_cpuids[nb_lc++] = i;
372	}
373	nb_lcores = (lcoreid_t) nb_lc;
374	nb_cfg_lcores = nb_lcores;
375	nb_fwd_lcores = 1;
376}
377
378static void
379set_def_peer_eth_addrs(void)
380{
381	portid_t i;
382
383	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
384		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
385		peer_eth_addrs[i].addr_bytes[5] = i;
386	}
387}
388
389static void
390set_default_fwd_ports_config(void)
391{
392	portid_t pt_id;
393
394	for (pt_id = 0; pt_id < nb_ports; pt_id++)
395		fwd_ports_ids[pt_id] = pt_id;
396
397	nb_cfg_ports = nb_ports;
398	nb_fwd_ports = nb_ports;
399}
400
401void
402set_def_fwd_config(void)
403{
404	set_default_fwd_lcores_config();
405	set_def_peer_eth_addrs();
406	set_default_fwd_ports_config();
407}
408
409/*
410 * Configuration initialisation done once at init time.
411 */
412static void
413mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
414		 unsigned int socket_id)
415{
416	char pool_name[RTE_MEMPOOL_NAMESIZE];
417	struct rte_mempool *rte_mp = NULL;
418	uint32_t mb_size;
419
420	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
421	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
422
423	RTE_LOG(INFO, USER1,
424		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
425		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
426
427#ifdef RTE_LIBRTE_PMD_XENVIRT
428	rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
429		(unsigned) mb_mempool_cache,
430		sizeof(struct rte_pktmbuf_pool_private),
431		rte_pktmbuf_pool_init, NULL,
432		rte_pktmbuf_init, NULL,
433		socket_id, 0);
434#endif
435
436	/* if the former XEN allocation failed fall back to normal allocation */
437	if (rte_mp == NULL) {
438		if (mp_anon != 0) {
439			rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
440				mb_size, (unsigned) mb_mempool_cache,
441				sizeof(struct rte_pktmbuf_pool_private),
442				socket_id, 0);
443			if (rte_mp == NULL)
444				goto err;
445
446			if (rte_mempool_populate_anon(rte_mp) == 0) {
447				rte_mempool_free(rte_mp);
448				rte_mp = NULL;
449				goto err;
450			}
451			rte_pktmbuf_pool_init(rte_mp, NULL);
452			rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
453		} else {
454			/* wrapper to rte_mempool_create() */
455			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
456				mb_mempool_cache, 0, mbuf_seg_size, socket_id);
457		}
458	}
459
460err:
461	if (rte_mp == NULL) {
462		rte_exit(EXIT_FAILURE,
463			"Creation of mbuf pool for socket %u failed: %s\n",
464			socket_id, rte_strerror(rte_errno));
465	} else if (verbose_level > 0) {
466		rte_mempool_dump(stdout, rte_mp);
467	}
468}
469
470/*
471 * Check given socket id is valid or not with NUMA mode,
472 * if valid, return 0, else return -1
473 */
474static int
475check_socket_id(const unsigned int socket_id)
476{
477	static int warning_once = 0;
478
479	if (socket_id >= max_socket) {
480		if (!warning_once && numa_support)
481			printf("Warning: NUMA should be configured manually by"
482			       " using --port-numa-config and"
483			       " --ring-numa-config parameters along with"
484			       " --numa.\n");
485		warning_once = 1;
486		return -1;
487	}
488	return 0;
489}
490
491static void
492init_config(void)
493{
494	portid_t pid;
495	struct rte_port *port;
496	struct rte_mempool *mbp;
497	unsigned int nb_mbuf_per_pool;
498	lcoreid_t  lc_id;
499	uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
500
501	memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
502	/* Configuration of logical cores. */
503	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
504				sizeof(struct fwd_lcore *) * nb_lcores,
505				RTE_CACHE_LINE_SIZE);
506	if (fwd_lcores == NULL) {
507		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
508							"failed\n", nb_lcores);
509	}
510	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
511		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
512					       sizeof(struct fwd_lcore),
513					       RTE_CACHE_LINE_SIZE);
514		if (fwd_lcores[lc_id] == NULL) {
515			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
516								"failed\n");
517		}
518		fwd_lcores[lc_id]->cpuid_idx = lc_id;
519	}
520
521	/*
522	 * Create pools of mbuf.
523	 * If NUMA support is disabled, create a single pool of mbuf in
524	 * socket 0 memory by default.
525	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
526	 *
527	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
528	 * nb_txd can be configured at run time.
529	 */
530	if (param_total_num_mbufs)
531		nb_mbuf_per_pool = param_total_num_mbufs;
532	else {
533		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
534				+ RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
535
536		if (!numa_support)
537			nb_mbuf_per_pool =
538				(nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
539	}
540
541	if (!numa_support) {
542		if (socket_num == UMA_NO_CONFIG)
543			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
544		else
545			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
546						 socket_num);
547	}
548
549	FOREACH_PORT(pid, ports) {
550		port = &ports[pid];
551		rte_eth_dev_info_get(pid, &port->dev_info);
552
553		if (numa_support) {
554			if (port_numa[pid] != NUMA_NO_CONFIG)
555				port_per_socket[port_numa[pid]]++;
556			else {
557				uint32_t socket_id = rte_eth_dev_socket_id(pid);
558
559				/* if socket_id is invalid, set to 0 */
560				if (check_socket_id(socket_id) < 0)
561					socket_id = 0;
562				port_per_socket[socket_id]++;
563			}
564		}
565
566		/* set flag to initialize port/queue */
567		port->need_reconfig = 1;
568		port->need_reconfig_queues = 1;
569	}
570
571	if (numa_support) {
572		uint8_t i;
573		unsigned int nb_mbuf;
574
575		if (param_total_num_mbufs)
576			nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
577
578		for (i = 0; i < max_socket; i++) {
579			nb_mbuf = (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
580			if (nb_mbuf)
581				mbuf_pool_create(mbuf_data_size,
582						nb_mbuf,i);
583		}
584	}
585	init_port_config();
586
587	/*
588	 * Records which Mbuf pool to use by each logical core, if needed.
589	 */
590	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
591		mbp = mbuf_pool_find(
592			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
593
594		if (mbp == NULL)
595			mbp = mbuf_pool_find(0);
596		fwd_lcores[lc_id]->mbp = mbp;
597	}
598
599	/* Configuration of packet forwarding streams. */
600	if (init_fwd_streams() < 0)
601		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
602
603	fwd_config_setup();
604}
605
606
607void
608reconfig(portid_t new_port_id, unsigned socket_id)
609{
610	struct rte_port *port;
611
612	/* Reconfiguration of Ethernet ports. */
613	port = &ports[new_port_id];
614	rte_eth_dev_info_get(new_port_id, &port->dev_info);
615
616	/* set flag to initialize port/queue */
617	port->need_reconfig = 1;
618	port->need_reconfig_queues = 1;
619	port->socket_id = socket_id;
620
621	init_port_config();
622}
623
624
625int
626init_fwd_streams(void)
627{
628	portid_t pid;
629	struct rte_port *port;
630	streamid_t sm_id, nb_fwd_streams_new;
631	queueid_t q;
632
633	/* set socket id according to numa or not */
634	FOREACH_PORT(pid, ports) {
635		port = &ports[pid];
636		if (nb_rxq > port->dev_info.max_rx_queues) {
637			printf("Fail: nb_rxq(%d) is greater than "
638				"max_rx_queues(%d)\n", nb_rxq,
639				port->dev_info.max_rx_queues);
640			return -1;
641		}
642		if (nb_txq > port->dev_info.max_tx_queues) {
643			printf("Fail: nb_txq(%d) is greater than "
644				"max_tx_queues(%d)\n", nb_txq,
645				port->dev_info.max_tx_queues);
646			return -1;
647		}
648		if (numa_support) {
649			if (port_numa[pid] != NUMA_NO_CONFIG)
650				port->socket_id = port_numa[pid];
651			else {
652				port->socket_id = rte_eth_dev_socket_id(pid);
653
654				/* if socket_id is invalid, set to 0 */
655				if (check_socket_id(port->socket_id) < 0)
656					port->socket_id = 0;
657			}
658		}
659		else {
660			if (socket_num == UMA_NO_CONFIG)
661				port->socket_id = 0;
662			else
663				port->socket_id = socket_num;
664		}
665	}
666
667	q = RTE_MAX(nb_rxq, nb_txq);
668	if (q == 0) {
669		printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
670		return -1;
671	}
672	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
673	if (nb_fwd_streams_new == nb_fwd_streams)
674		return 0;
675	/* clear the old */
676	if (fwd_streams != NULL) {
677		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
678			if (fwd_streams[sm_id] == NULL)
679				continue;
680			rte_free(fwd_streams[sm_id]);
681			fwd_streams[sm_id] = NULL;
682		}
683		rte_free(fwd_streams);
684		fwd_streams = NULL;
685	}
686
687	/* init new */
688	nb_fwd_streams = nb_fwd_streams_new;
689	fwd_streams = rte_zmalloc("testpmd: fwd_streams",
690		sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
691	if (fwd_streams == NULL)
692		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
693						"failed\n", nb_fwd_streams);
694
695	for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
696		fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
697				sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
698		if (fwd_streams[sm_id] == NULL)
699			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
700								" failed\n");
701	}
702
703	return 0;
704}
705
706#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
707static void
708pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
709{
710	unsigned int total_burst;
711	unsigned int nb_burst;
712	unsigned int burst_stats[3];
713	uint16_t pktnb_stats[3];
714	uint16_t nb_pkt;
715	int burst_percent[3];
716
717	/*
718	 * First compute the total number of packet bursts and the
719	 * two highest numbers of bursts of the same number of packets.
720	 */
721	total_burst = 0;
722	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
723	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
724	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
725		nb_burst = pbs->pkt_burst_spread[nb_pkt];
726		if (nb_burst == 0)
727			continue;
728		total_burst += nb_burst;
729		if (nb_burst > burst_stats[0]) {
730			burst_stats[1] = burst_stats[0];
731			pktnb_stats[1] = pktnb_stats[0];
732			burst_stats[0] = nb_burst;
733			pktnb_stats[0] = nb_pkt;
734		}
735	}
736	if (total_burst == 0)
737		return;
738	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
739	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
740	       burst_percent[0], (int) pktnb_stats[0]);
741	if (burst_stats[0] == total_burst) {
742		printf("]\n");
743		return;
744	}
745	if (burst_stats[0] + burst_stats[1] == total_burst) {
746		printf(" + %d%% of %d pkts]\n",
747		       100 - burst_percent[0], pktnb_stats[1]);
748		return;
749	}
750	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
751	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
752	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
753		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
754		return;
755	}
756	printf(" + %d%% of %d pkts + %d%% of others]\n",
757	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
758}
759#endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
760
761static void
762fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
763{
764	struct rte_port *port;
765	uint8_t i;
766
767	static const char *fwd_stats_border = "----------------------";
768
769	port = &ports[port_id];
770	printf("\n  %s Forward statistics for port %-2d %s\n",
771	       fwd_stats_border, port_id, fwd_stats_border);
772
773	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
774		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
775		       "%-"PRIu64"\n",
776		       stats->ipackets, stats->imissed,
777		       (uint64_t) (stats->ipackets + stats->imissed));
778
779		if (cur_fwd_eng == &csum_fwd_engine)
780			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
781			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
782		if ((stats->ierrors + stats->rx_nombuf) > 0) {
783			printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
784			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
785		}
786
787		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
788		       "%-"PRIu64"\n",
789		       stats->opackets, port->tx_dropped,
790		       (uint64_t) (stats->opackets + port->tx_dropped));
791	}
792	else {
793		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
794		       "%14"PRIu64"\n",
795		       stats->ipackets, stats->imissed,
796		       (uint64_t) (stats->ipackets + stats->imissed));
797
798		if (cur_fwd_eng == &csum_fwd_engine)
799			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
800			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
801		if ((stats->ierrors + stats->rx_nombuf) > 0) {
802			printf("  RX-error:%"PRIu64"\n", stats->ierrors);
803			printf("  RX-nombufs:             %14"PRIu64"\n",
804			       stats->rx_nombuf);
805		}
806
807		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
808		       "%14"PRIu64"\n",
809		       stats->opackets, port->tx_dropped,
810		       (uint64_t) (stats->opackets + port->tx_dropped));
811	}
812
813#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
814	if (port->rx_stream)
815		pkt_burst_stats_display("RX",
816			&port->rx_stream->rx_burst_stats);
817	if (port->tx_stream)
818		pkt_burst_stats_display("TX",
819			&port->tx_stream->tx_burst_stats);
820#endif
821
822	if (port->rx_queue_stats_mapping_enabled) {
823		printf("\n");
824		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
825			printf("  Stats reg %2d RX-packets:%14"PRIu64
826			       "     RX-errors:%14"PRIu64
827			       "    RX-bytes:%14"PRIu64"\n",
828			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
829		}
830		printf("\n");
831	}
832	if (port->tx_queue_stats_mapping_enabled) {
833		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
834			printf("  Stats reg %2d TX-packets:%14"PRIu64
835			       "                                 TX-bytes:%14"PRIu64"\n",
836			       i, stats->q_opackets[i], stats->q_obytes[i]);
837		}
838	}
839
840	printf("  %s--------------------------------%s\n",
841	       fwd_stats_border, fwd_stats_border);
842}
843
844static void
845fwd_stream_stats_display(streamid_t stream_id)
846{
847	struct fwd_stream *fs;
848	static const char *fwd_top_stats_border = "-------";
849
850	fs = fwd_streams[stream_id];
851	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
852	    (fs->fwd_dropped == 0))
853		return;
854	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
855	       "TX Port=%2d/Queue=%2d %s\n",
856	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
857	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
858	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
859	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
860
861	/* if checksum mode */
862	if (cur_fwd_eng == &csum_fwd_engine) {
863	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
864			"%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
865	}
866
867#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
868	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
869	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
870#endif
871}
872
873static void
874flush_fwd_rx_queues(void)
875{
876	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
877	portid_t  rxp;
878	portid_t port_id;
879	queueid_t rxq;
880	uint16_t  nb_rx;
881	uint16_t  i;
882	uint8_t   j;
883	uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
884	uint64_t timer_period;
885
886	/* convert to number of cycles */
887	timer_period = rte_get_timer_hz(); /* 1 second timeout */
888
889	for (j = 0; j < 2; j++) {
890		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
891			for (rxq = 0; rxq < nb_rxq; rxq++) {
892				port_id = fwd_ports_ids[rxp];
893				/**
894				* testpmd can stuck in the below do while loop
895				* if rte_eth_rx_burst() always returns nonzero
896				* packets. So timer is added to exit this loop
897				* after 1sec timer expiry.
898				*/
899				prev_tsc = rte_rdtsc();
900				do {
901					nb_rx = rte_eth_rx_burst(port_id, rxq,
902						pkts_burst, MAX_PKT_BURST);
903					for (i = 0; i < nb_rx; i++)
904						rte_pktmbuf_free(pkts_burst[i]);
905
906					cur_tsc = rte_rdtsc();
907					diff_tsc = cur_tsc - prev_tsc;
908					timer_tsc += diff_tsc;
909				} while ((nb_rx > 0) &&
910					(timer_tsc < timer_period));
911				timer_tsc = 0;
912			}
913		}
914		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
915	}
916}
917
918static void
919run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
920{
921	struct fwd_stream **fsm;
922	streamid_t nb_fs;
923	streamid_t sm_id;
924
925	fsm = &fwd_streams[fc->stream_idx];
926	nb_fs = fc->stream_nb;
927	do {
928		for (sm_id = 0; sm_id < nb_fs; sm_id++)
929			(*pkt_fwd)(fsm[sm_id]);
930	} while (! fc->stopped);
931}
932
933static int
934start_pkt_forward_on_core(void *fwd_arg)
935{
936	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
937			     cur_fwd_config.fwd_eng->packet_fwd);
938	return 0;
939}
940
941/*
942 * Run the TXONLY packet forwarding engine to send a single burst of packets.
943 * Used to start communication flows in network loopback test configurations.
944 */
945static int
946run_one_txonly_burst_on_core(void *fwd_arg)
947{
948	struct fwd_lcore *fwd_lc;
949	struct fwd_lcore tmp_lcore;
950
951	fwd_lc = (struct fwd_lcore *) fwd_arg;
952	tmp_lcore = *fwd_lc;
953	tmp_lcore.stopped = 1;
954	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
955	return 0;
956}
957
958/*
959 * Launch packet forwarding:
960 *     - Setup per-port forwarding context.
961 *     - launch logical cores with their forwarding configuration.
962 */
963static void
964launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
965{
966	port_fwd_begin_t port_fwd_begin;
967	unsigned int i;
968	unsigned int lc_id;
969	int diag;
970
971	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
972	if (port_fwd_begin != NULL) {
973		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
974			(*port_fwd_begin)(fwd_ports_ids[i]);
975	}
976	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
977		lc_id = fwd_lcores_cpuids[i];
978		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
979			fwd_lcores[i]->stopped = 0;
980			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
981						     fwd_lcores[i], lc_id);
982			if (diag != 0)
983				printf("launch lcore %u failed - diag=%d\n",
984				       lc_id, diag);
985		}
986	}
987}
988
989/*
990 * Launch packet forwarding configuration.
991 */
992void
993start_packet_forwarding(int with_tx_first)
994{
995	port_fwd_begin_t port_fwd_begin;
996	port_fwd_end_t  port_fwd_end;
997	struct rte_port *port;
998	unsigned int i;
999	portid_t   pt_id;
1000	streamid_t sm_id;
1001
1002	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1003		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1004
1005	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1006		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1007
1008	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1009		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1010		(!nb_rxq || !nb_txq))
1011		rte_exit(EXIT_FAILURE,
1012			"Either rxq or txq are 0, cannot use %s fwd mode\n",
1013			cur_fwd_eng->fwd_mode_name);
1014
1015	if (all_ports_started() == 0) {
1016		printf("Not all ports were started\n");
1017		return;
1018	}
1019	if (test_done == 0) {
1020		printf("Packet forwarding already started\n");
1021		return;
1022	}
1023
1024	if (init_fwd_streams() < 0) {
1025		printf("Fail from init_fwd_streams()\n");
1026		return;
1027	}
1028
1029	if(dcb_test) {
1030		for (i = 0; i < nb_fwd_ports; i++) {
1031			pt_id = fwd_ports_ids[i];
1032			port = &ports[pt_id];
1033			if (!port->dcb_flag) {
1034				printf("In DCB mode, all forwarding ports must "
1035                                       "be configured in this mode.\n");
1036				return;
1037			}
1038		}
1039		if (nb_fwd_lcores == 1) {
1040			printf("In DCB mode,the nb forwarding cores "
1041                               "should be larger than 1.\n");
1042			return;
1043		}
1044	}
1045	test_done = 0;
1046
1047	if(!no_flush_rx)
1048		flush_fwd_rx_queues();
1049
1050	fwd_config_setup();
1051	pkt_fwd_config_display(&cur_fwd_config);
1052	rxtx_config_display();
1053
1054	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1055		pt_id = fwd_ports_ids[i];
1056		port = &ports[pt_id];
1057		rte_eth_stats_get(pt_id, &port->stats);
1058		port->tx_dropped = 0;
1059
1060		map_port_queue_stats_mapping_registers(pt_id, port);
1061	}
1062	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1063		fwd_streams[sm_id]->rx_packets = 0;
1064		fwd_streams[sm_id]->tx_packets = 0;
1065		fwd_streams[sm_id]->fwd_dropped = 0;
1066		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1067		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1068
1069#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1070		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1071		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
1072		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1073		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
1074#endif
1075#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1076		fwd_streams[sm_id]->core_cycles = 0;
1077#endif
1078	}
1079	if (with_tx_first) {
1080		port_fwd_begin = tx_only_engine.port_fwd_begin;
1081		if (port_fwd_begin != NULL) {
1082			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1083				(*port_fwd_begin)(fwd_ports_ids[i]);
1084		}
1085		while (with_tx_first--) {
1086			launch_packet_forwarding(
1087					run_one_txonly_burst_on_core);
1088			rte_eal_mp_wait_lcore();
1089		}
1090		port_fwd_end = tx_only_engine.port_fwd_end;
1091		if (port_fwd_end != NULL) {
1092			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1093				(*port_fwd_end)(fwd_ports_ids[i]);
1094		}
1095	}
1096	launch_packet_forwarding(start_pkt_forward_on_core);
1097}
1098
1099void
1100stop_packet_forwarding(void)
1101{
1102	struct rte_eth_stats stats;
1103	struct rte_port *port;
1104	port_fwd_end_t  port_fwd_end;
1105	int i;
1106	portid_t   pt_id;
1107	streamid_t sm_id;
1108	lcoreid_t  lc_id;
1109	uint64_t total_recv;
1110	uint64_t total_xmit;
1111	uint64_t total_rx_dropped;
1112	uint64_t total_tx_dropped;
1113	uint64_t total_rx_nombuf;
1114	uint64_t tx_dropped;
1115	uint64_t rx_bad_ip_csum;
1116	uint64_t rx_bad_l4_csum;
1117#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1118	uint64_t fwd_cycles;
1119#endif
1120	static const char *acc_stats_border = "+++++++++++++++";
1121
1122	if (test_done) {
1123		printf("Packet forwarding not started\n");
1124		return;
1125	}
1126	printf("Telling cores to stop...");
1127	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1128		fwd_lcores[lc_id]->stopped = 1;
1129	printf("\nWaiting for lcores to finish...\n");
1130	rte_eal_mp_wait_lcore();
1131	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1132	if (port_fwd_end != NULL) {
1133		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1134			pt_id = fwd_ports_ids[i];
1135			(*port_fwd_end)(pt_id);
1136		}
1137	}
1138#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1139	fwd_cycles = 0;
1140#endif
1141	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1142		if (cur_fwd_config.nb_fwd_streams >
1143		    cur_fwd_config.nb_fwd_ports) {
1144			fwd_stream_stats_display(sm_id);
1145			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1146			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1147		} else {
1148			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1149				fwd_streams[sm_id];
1150			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1151				fwd_streams[sm_id];
1152		}
1153		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1154		tx_dropped = (uint64_t) (tx_dropped +
1155					 fwd_streams[sm_id]->fwd_dropped);
1156		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1157
1158		rx_bad_ip_csum =
1159			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1160		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1161					 fwd_streams[sm_id]->rx_bad_ip_csum);
1162		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1163							rx_bad_ip_csum;
1164
1165		rx_bad_l4_csum =
1166			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1167		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1168					 fwd_streams[sm_id]->rx_bad_l4_csum);
1169		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1170							rx_bad_l4_csum;
1171
1172#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1173		fwd_cycles = (uint64_t) (fwd_cycles +
1174					 fwd_streams[sm_id]->core_cycles);
1175#endif
1176	}
1177	total_recv = 0;
1178	total_xmit = 0;
1179	total_rx_dropped = 0;
1180	total_tx_dropped = 0;
1181	total_rx_nombuf  = 0;
1182	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1183		pt_id = fwd_ports_ids[i];
1184
1185		port = &ports[pt_id];
1186		rte_eth_stats_get(pt_id, &stats);
1187		stats.ipackets -= port->stats.ipackets;
1188		port->stats.ipackets = 0;
1189		stats.opackets -= port->stats.opackets;
1190		port->stats.opackets = 0;
1191		stats.ibytes   -= port->stats.ibytes;
1192		port->stats.ibytes = 0;
1193		stats.obytes   -= port->stats.obytes;
1194		port->stats.obytes = 0;
1195		stats.imissed  -= port->stats.imissed;
1196		port->stats.imissed = 0;
1197		stats.oerrors  -= port->stats.oerrors;
1198		port->stats.oerrors = 0;
1199		stats.rx_nombuf -= port->stats.rx_nombuf;
1200		port->stats.rx_nombuf = 0;
1201
1202		total_recv += stats.ipackets;
1203		total_xmit += stats.opackets;
1204		total_rx_dropped += stats.imissed;
1205		total_tx_dropped += port->tx_dropped;
1206		total_rx_nombuf  += stats.rx_nombuf;
1207
1208		fwd_port_stats_display(pt_id, &stats);
1209	}
1210	printf("\n  %s Accumulated forward statistics for all ports"
1211	       "%s\n",
1212	       acc_stats_border, acc_stats_border);
1213	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1214	       "%-"PRIu64"\n"
1215	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1216	       "%-"PRIu64"\n",
1217	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1218	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1219	if (total_rx_nombuf > 0)
1220		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1221	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1222	       "%s\n",
1223	       acc_stats_border, acc_stats_border);
1224#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1225	if (total_recv > 0)
1226		printf("\n  CPU cycles/packet=%u (total cycles="
1227		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1228		       (unsigned int)(fwd_cycles / total_recv),
1229		       fwd_cycles, total_recv);
1230#endif
1231	printf("\nDone.\n");
1232	test_done = 1;
1233}
1234
1235void
1236dev_set_link_up(portid_t pid)
1237{
1238	if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1239		printf("\nSet link up fail.\n");
1240}
1241
1242void
1243dev_set_link_down(portid_t pid)
1244{
1245	if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1246		printf("\nSet link down fail.\n");
1247}
1248
1249static int
1250all_ports_started(void)
1251{
1252	portid_t pi;
1253	struct rte_port *port;
1254
1255	FOREACH_PORT(pi, ports) {
1256		port = &ports[pi];
1257		/* Check if there is a port which is not started */
1258		if ((port->port_status != RTE_PORT_STARTED) &&
1259			(port->slave_flag == 0))
1260			return 0;
1261	}
1262
1263	/* No port is not started */
1264	return 1;
1265}
1266
1267int
1268all_ports_stopped(void)
1269{
1270	portid_t pi;
1271	struct rte_port *port;
1272
1273	FOREACH_PORT(pi, ports) {
1274		port = &ports[pi];
1275		if ((port->port_status != RTE_PORT_STOPPED) &&
1276			(port->slave_flag == 0))
1277			return 0;
1278	}
1279
1280	return 1;
1281}
1282
1283int
1284port_is_started(portid_t port_id)
1285{
1286	if (port_id_is_invalid(port_id, ENABLED_WARN))
1287		return 0;
1288
1289	if (ports[port_id].port_status != RTE_PORT_STARTED)
1290		return 0;
1291
1292	return 1;
1293}
1294
1295static int
1296port_is_closed(portid_t port_id)
1297{
1298	if (port_id_is_invalid(port_id, ENABLED_WARN))
1299		return 0;
1300
1301	if (ports[port_id].port_status != RTE_PORT_CLOSED)
1302		return 0;
1303
1304	return 1;
1305}
1306
1307int
1308start_port(portid_t pid)
1309{
1310	int diag, need_check_link_status = -1;
1311	portid_t pi;
1312	queueid_t qi;
1313	struct rte_port *port;
1314	struct ether_addr mac_addr;
1315
1316	if (port_id_is_invalid(pid, ENABLED_WARN))
1317		return 0;
1318
1319	if(dcb_config)
1320		dcb_test = 1;
1321	FOREACH_PORT(pi, ports) {
1322		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1323			continue;
1324
1325		need_check_link_status = 0;
1326		port = &ports[pi];
1327		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1328						 RTE_PORT_HANDLING) == 0) {
1329			printf("Port %d is now not stopped\n", pi);
1330			continue;
1331		}
1332
1333		if (port->need_reconfig > 0) {
1334			port->need_reconfig = 0;
1335
1336			printf("Configuring Port %d (socket %u)\n", pi,
1337					port->socket_id);
1338			/* configure port */
1339			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1340						&(port->dev_conf));
1341			if (diag != 0) {
1342				if (rte_atomic16_cmpset(&(port->port_status),
1343				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1344					printf("Port %d can not be set back "
1345							"to stopped\n", pi);
1346				printf("Fail to configure port %d\n", pi);
1347				/* try to reconfigure port next time */
1348				port->need_reconfig = 1;
1349				return -1;
1350			}
1351		}
1352		if (port->need_reconfig_queues > 0) {
1353			port->need_reconfig_queues = 0;
1354			/* setup tx queues */
1355			for (qi = 0; qi < nb_txq; qi++) {
1356				if ((numa_support) &&
1357					(txring_numa[pi] != NUMA_NO_CONFIG))
1358					diag = rte_eth_tx_queue_setup(pi, qi,
1359						nb_txd,txring_numa[pi],
1360						&(port->tx_conf));
1361				else
1362					diag = rte_eth_tx_queue_setup(pi, qi,
1363						nb_txd,port->socket_id,
1364						&(port->tx_conf));
1365
1366				if (diag == 0)
1367					continue;
1368
1369				/* Fail to setup tx queue, return */
1370				if (rte_atomic16_cmpset(&(port->port_status),
1371							RTE_PORT_HANDLING,
1372							RTE_PORT_STOPPED) == 0)
1373					printf("Port %d can not be set back "
1374							"to stopped\n", pi);
1375				printf("Fail to configure port %d tx queues\n", pi);
1376				/* try to reconfigure queues next time */
1377				port->need_reconfig_queues = 1;
1378				return -1;
1379			}
1380			/* setup rx queues */
1381			for (qi = 0; qi < nb_rxq; qi++) {
1382				if ((numa_support) &&
1383					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
1384					struct rte_mempool * mp =
1385						mbuf_pool_find(rxring_numa[pi]);
1386					if (mp == NULL) {
1387						printf("Failed to setup RX queue:"
1388							"No mempool allocation"
1389							" on the socket %d\n",
1390							rxring_numa[pi]);
1391						return -1;
1392					}
1393
1394					diag = rte_eth_rx_queue_setup(pi, qi,
1395					     nb_rxd,rxring_numa[pi],
1396					     &(port->rx_conf),mp);
1397				} else {
1398					struct rte_mempool *mp =
1399						mbuf_pool_find(port->socket_id);
1400					if (mp == NULL) {
1401						printf("Failed to setup RX queue:"
1402							"No mempool allocation"
1403							" on the socket %d\n",
1404							port->socket_id);
1405						return -1;
1406					}
1407					diag = rte_eth_rx_queue_setup(pi, qi,
1408					     nb_rxd,port->socket_id,
1409					     &(port->rx_conf), mp);
1410				}
1411				if (diag == 0)
1412					continue;
1413
1414				/* Fail to setup rx queue, return */
1415				if (rte_atomic16_cmpset(&(port->port_status),
1416							RTE_PORT_HANDLING,
1417							RTE_PORT_STOPPED) == 0)
1418					printf("Port %d can not be set back "
1419							"to stopped\n", pi);
1420				printf("Fail to configure port %d rx queues\n", pi);
1421				/* try to reconfigure queues next time */
1422				port->need_reconfig_queues = 1;
1423				return -1;
1424			}
1425		}
1426		/* start port */
1427		if (rte_eth_dev_start(pi) < 0) {
1428			printf("Fail to start port %d\n", pi);
1429
1430			/* Fail to setup rx queue, return */
1431			if (rte_atomic16_cmpset(&(port->port_status),
1432				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1433				printf("Port %d can not be set back to "
1434							"stopped\n", pi);
1435			continue;
1436		}
1437
1438		if (rte_atomic16_cmpset(&(port->port_status),
1439			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1440			printf("Port %d can not be set into started\n", pi);
1441
1442		rte_eth_macaddr_get(pi, &mac_addr);
1443		printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1444				mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1445				mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1446				mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1447
1448		/* at least one port started, need checking link status */
1449		need_check_link_status = 1;
1450	}
1451
1452	if (need_check_link_status == 1 && !no_link_check)
1453		check_all_ports_link_status(RTE_PORT_ALL);
1454	else if (need_check_link_status == 0)
1455		printf("Please stop the ports first\n");
1456
1457	printf("Done\n");
1458	return 0;
1459}
1460
1461void
1462stop_port(portid_t pid)
1463{
1464	portid_t pi;
1465	struct rte_port *port;
1466	int need_check_link_status = 0;
1467
1468	if (dcb_test) {
1469		dcb_test = 0;
1470		dcb_config = 0;
1471	}
1472
1473	if (port_id_is_invalid(pid, ENABLED_WARN))
1474		return;
1475
1476	printf("Stopping ports...\n");
1477
1478	FOREACH_PORT(pi, ports) {
1479		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1480			continue;
1481
1482		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1483			printf("Please remove port %d from forwarding configuration.\n", pi);
1484			continue;
1485		}
1486
1487		if (port_is_bonding_slave(pi)) {
1488			printf("Please remove port %d from bonded device.\n", pi);
1489			continue;
1490		}
1491
1492		port = &ports[pi];
1493		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1494						RTE_PORT_HANDLING) == 0)
1495			continue;
1496
1497		rte_eth_dev_stop(pi);
1498
1499		if (rte_atomic16_cmpset(&(port->port_status),
1500			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1501			printf("Port %d can not be set into stopped\n", pi);
1502		need_check_link_status = 1;
1503	}
1504	if (need_check_link_status && !no_link_check)
1505		check_all_ports_link_status(RTE_PORT_ALL);
1506
1507	printf("Done\n");
1508}
1509
1510void
1511close_port(portid_t pid)
1512{
1513	portid_t pi;
1514	struct rte_port *port;
1515
1516	if (port_id_is_invalid(pid, ENABLED_WARN))
1517		return;
1518
1519	printf("Closing ports...\n");
1520
1521	FOREACH_PORT(pi, ports) {
1522		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1523			continue;
1524
1525		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1526			printf("Please remove port %d from forwarding configuration.\n", pi);
1527			continue;
1528		}
1529
1530		if (port_is_bonding_slave(pi)) {
1531			printf("Please remove port %d from bonded device.\n", pi);
1532			continue;
1533		}
1534
1535		port = &ports[pi];
1536		if (rte_atomic16_cmpset(&(port->port_status),
1537			RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1538			printf("Port %d is already closed\n", pi);
1539			continue;
1540		}
1541
1542		if (rte_atomic16_cmpset(&(port->port_status),
1543			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1544			printf("Port %d is now not stopped\n", pi);
1545			continue;
1546		}
1547
1548		rte_eth_dev_close(pi);
1549
1550		if (rte_atomic16_cmpset(&(port->port_status),
1551			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1552			printf("Port %d cannot be set to closed\n", pi);
1553	}
1554
1555	printf("Done\n");
1556}
1557
1558void
1559attach_port(char *identifier)
1560{
1561	portid_t pi = 0;
1562	unsigned int socket_id;
1563
1564	printf("Attaching a new port...\n");
1565
1566	if (identifier == NULL) {
1567		printf("Invalid parameters are specified\n");
1568		return;
1569	}
1570
1571	if (rte_eth_dev_attach(identifier, &pi))
1572		return;
1573
1574	ports[pi].enabled = 1;
1575	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1576	/* if socket_id is invalid, set to 0 */
1577	if (check_socket_id(socket_id) < 0)
1578		socket_id = 0;
1579	reconfig(pi, socket_id);
1580	rte_eth_promiscuous_enable(pi);
1581
1582	nb_ports = rte_eth_dev_count();
1583
1584	ports[pi].port_status = RTE_PORT_STOPPED;
1585
1586	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1587	printf("Done\n");
1588}
1589
1590void
1591detach_port(uint8_t port_id)
1592{
1593	char name[RTE_ETH_NAME_MAX_LEN];
1594
1595	printf("Detaching a port...\n");
1596
1597	if (!port_is_closed(port_id)) {
1598		printf("Please close port first\n");
1599		return;
1600	}
1601
1602	if (rte_eth_dev_detach(port_id, name))
1603		return;
1604
1605	ports[port_id].enabled = 0;
1606	nb_ports = rte_eth_dev_count();
1607
1608	printf("Port '%s' is detached. Now total ports is %d\n",
1609			name, nb_ports);
1610	printf("Done\n");
1611	return;
1612}
1613
1614void
1615pmd_test_exit(void)
1616{
1617	portid_t pt_id;
1618
1619	if (test_done == 0)
1620		stop_packet_forwarding();
1621
1622	if (ports != NULL) {
1623		no_link_check = 1;
1624		FOREACH_PORT(pt_id, ports) {
1625			printf("\nShutting down port %d...\n", pt_id);
1626			fflush(stdout);
1627			stop_port(pt_id);
1628			close_port(pt_id);
1629		}
1630	}
1631	printf("\nBye...\n");
1632}
1633
1634typedef void (*cmd_func_t)(void);
1635struct pmd_test_command {
1636	const char *cmd_name;
1637	cmd_func_t cmd_func;
1638};
1639
1640#define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1641
1642/* Check the link status of all ports in up to 9s, and print them finally */
1643static void
1644check_all_ports_link_status(uint32_t port_mask)
1645{
1646#define CHECK_INTERVAL 100 /* 100ms */
1647#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1648	uint8_t portid, count, all_ports_up, print_flag = 0;
1649	struct rte_eth_link link;
1650
1651	printf("Checking link statuses...\n");
1652	fflush(stdout);
1653	for (count = 0; count <= MAX_CHECK_TIME; count++) {
1654		all_ports_up = 1;
1655		FOREACH_PORT(portid, ports) {
1656			if ((port_mask & (1 << portid)) == 0)
1657				continue;
1658			memset(&link, 0, sizeof(link));
1659			rte_eth_link_get_nowait(portid, &link);
1660			/* print link status if flag set */
1661			if (print_flag == 1) {
1662				if (link.link_status)
1663					printf("Port %d Link Up - speed %u "
1664						"Mbps - %s\n", (uint8_t)portid,
1665						(unsigned)link.link_speed,
1666				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1667					("full-duplex") : ("half-duplex\n"));
1668				else
1669					printf("Port %d Link Down\n",
1670						(uint8_t)portid);
1671				continue;
1672			}
1673			/* clear all_ports_up flag if any link down */
1674			if (link.link_status == ETH_LINK_DOWN) {
1675				all_ports_up = 0;
1676				break;
1677			}
1678		}
1679		/* after finally printing all link status, get out */
1680		if (print_flag == 1)
1681			break;
1682
1683		if (all_ports_up == 0) {
1684			fflush(stdout);
1685			rte_delay_ms(CHECK_INTERVAL);
1686		}
1687
1688		/* set the print_flag if all ports up or timeout */
1689		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1690			print_flag = 1;
1691		}
1692	}
1693}
1694
1695static int
1696set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1697{
1698	uint16_t i;
1699	int diag;
1700	uint8_t mapping_found = 0;
1701
1702	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1703		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1704				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1705			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1706					tx_queue_stats_mappings[i].queue_id,
1707					tx_queue_stats_mappings[i].stats_counter_id);
1708			if (diag != 0)
1709				return diag;
1710			mapping_found = 1;
1711		}
1712	}
1713	if (mapping_found)
1714		port->tx_queue_stats_mapping_enabled = 1;
1715	return 0;
1716}
1717
1718static int
1719set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1720{
1721	uint16_t i;
1722	int diag;
1723	uint8_t mapping_found = 0;
1724
1725	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1726		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1727				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1728			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1729					rx_queue_stats_mappings[i].queue_id,
1730					rx_queue_stats_mappings[i].stats_counter_id);
1731			if (diag != 0)
1732				return diag;
1733			mapping_found = 1;
1734		}
1735	}
1736	if (mapping_found)
1737		port->rx_queue_stats_mapping_enabled = 1;
1738	return 0;
1739}
1740
1741static void
1742map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1743{
1744	int diag = 0;
1745
1746	diag = set_tx_queue_stats_mapping_registers(pi, port);
1747	if (diag != 0) {
1748		if (diag == -ENOTSUP) {
1749			port->tx_queue_stats_mapping_enabled = 0;
1750			printf("TX queue stats mapping not supported port id=%d\n", pi);
1751		}
1752		else
1753			rte_exit(EXIT_FAILURE,
1754					"set_tx_queue_stats_mapping_registers "
1755					"failed for port id=%d diag=%d\n",
1756					pi, diag);
1757	}
1758
1759	diag = set_rx_queue_stats_mapping_registers(pi, port);
1760	if (diag != 0) {
1761		if (diag == -ENOTSUP) {
1762			port->rx_queue_stats_mapping_enabled = 0;
1763			printf("RX queue stats mapping not supported port id=%d\n", pi);
1764		}
1765		else
1766			rte_exit(EXIT_FAILURE,
1767					"set_rx_queue_stats_mapping_registers "
1768					"failed for port id=%d diag=%d\n",
1769					pi, diag);
1770	}
1771}
1772
1773static void
1774rxtx_port_config(struct rte_port *port)
1775{
1776	port->rx_conf = port->dev_info.default_rxconf;
1777	port->tx_conf = port->dev_info.default_txconf;
1778
1779	/* Check if any RX/TX parameters have been passed */
1780	if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1781		port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1782
1783	if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1784		port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1785
1786	if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1787		port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1788
1789	if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1790		port->rx_conf.rx_free_thresh = rx_free_thresh;
1791
1792	if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1793		port->rx_conf.rx_drop_en = rx_drop_en;
1794
1795	if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1796		port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1797
1798	if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1799		port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1800
1801	if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1802		port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1803
1804	if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1805		port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1806
1807	if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1808		port->tx_conf.tx_free_thresh = tx_free_thresh;
1809
1810	if (txq_flags != RTE_PMD_PARAM_UNSET)
1811		port->tx_conf.txq_flags = txq_flags;
1812}
1813
1814void
1815init_port_config(void)
1816{
1817	portid_t pid;
1818	struct rte_port *port;
1819
1820	FOREACH_PORT(pid, ports) {
1821		port = &ports[pid];
1822		port->dev_conf.rxmode = rx_mode;
1823		port->dev_conf.fdir_conf = fdir_conf;
1824		if (nb_rxq > 1) {
1825			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1826			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1827		} else {
1828			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1829			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1830		}
1831
1832		if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) {
1833			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1834				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1835			else
1836				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1837		}
1838
1839		if (port->dev_info.max_vfs != 0) {
1840			if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1841				port->dev_conf.rxmode.mq_mode =
1842					ETH_MQ_RX_VMDQ_RSS;
1843			else
1844				port->dev_conf.rxmode.mq_mode =
1845					ETH_MQ_RX_NONE;
1846
1847			port->dev_conf.txmode.mq_mode = ETH_MQ_TX_NONE;
1848		}
1849
1850		rxtx_port_config(port);
1851
1852		rte_eth_macaddr_get(pid, &port->eth_addr);
1853
1854		map_port_queue_stats_mapping_registers(pid, port);
1855#ifdef RTE_NIC_BYPASS
1856		rte_eth_dev_bypass_init(pid);
1857#endif
1858	}
1859}
1860
1861void set_port_slave_flag(portid_t slave_pid)
1862{
1863	struct rte_port *port;
1864
1865	port = &ports[slave_pid];
1866	port->slave_flag = 1;
1867}
1868
1869void clear_port_slave_flag(portid_t slave_pid)
1870{
1871	struct rte_port *port;
1872
1873	port = &ports[slave_pid];
1874	port->slave_flag = 0;
1875}
1876
1877uint8_t port_is_bonding_slave(portid_t slave_pid)
1878{
1879	struct rte_port *port;
1880
1881	port = &ports[slave_pid];
1882	return port->slave_flag;
1883}
1884
1885const uint16_t vlan_tags[] = {
1886		0,  1,  2,  3,  4,  5,  6,  7,
1887		8,  9, 10, 11,  12, 13, 14, 15,
1888		16, 17, 18, 19, 20, 21, 22, 23,
1889		24, 25, 26, 27, 28, 29, 30, 31
1890};
1891
1892static  int
1893get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
1894		 enum dcb_mode_enable dcb_mode,
1895		 enum rte_eth_nb_tcs num_tcs,
1896		 uint8_t pfc_en)
1897{
1898	uint8_t i;
1899
1900	/*
1901	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
1902	 * given above, and the number of traffic classes available for use.
1903	 */
1904	if (dcb_mode == DCB_VT_ENABLED) {
1905		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
1906				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
1907		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
1908				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
1909
1910		/* VMDQ+DCB RX and TX configrations */
1911		vmdq_rx_conf->enable_default_pool = 0;
1912		vmdq_rx_conf->default_pool = 0;
1913		vmdq_rx_conf->nb_queue_pools =
1914			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1915		vmdq_tx_conf->nb_queue_pools =
1916			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1917
1918		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
1919		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
1920			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
1921			vmdq_rx_conf->pool_map[i].pools =
1922				1 << (i % vmdq_rx_conf->nb_queue_pools);
1923		}
1924		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1925			vmdq_rx_conf->dcb_tc[i] = i;
1926			vmdq_tx_conf->dcb_tc[i] = i;
1927		}
1928
1929		/* set DCB mode of RX and TX of multiple queues */
1930		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1931		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1932	} else {
1933		struct rte_eth_dcb_rx_conf *rx_conf =
1934				&eth_conf->rx_adv_conf.dcb_rx_conf;
1935		struct rte_eth_dcb_tx_conf *tx_conf =
1936				&eth_conf->tx_adv_conf.dcb_tx_conf;
1937
1938		rx_conf->nb_tcs = num_tcs;
1939		tx_conf->nb_tcs = num_tcs;
1940
1941		for (i = 0; i < num_tcs; i++) {
1942			rx_conf->dcb_tc[i] = i;
1943			tx_conf->dcb_tc[i] = i;
1944		}
1945		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
1946		eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
1947		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1948	}
1949
1950	if (pfc_en)
1951		eth_conf->dcb_capability_en =
1952				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
1953	else
1954		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1955
1956	return 0;
1957}
1958
1959int
1960init_port_dcb_config(portid_t pid,
1961		     enum dcb_mode_enable dcb_mode,
1962		     enum rte_eth_nb_tcs num_tcs,
1963		     uint8_t pfc_en)
1964{
1965	struct rte_eth_conf port_conf;
1966	struct rte_port *rte_port;
1967	int retval;
1968	uint16_t i;
1969
1970	rte_port = &ports[pid];
1971
1972	memset(&port_conf, 0, sizeof(struct rte_eth_conf));
1973	/* Enter DCB configuration status */
1974	dcb_config = 1;
1975
1976	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
1977	retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
1978	if (retval < 0)
1979		return retval;
1980	port_conf.rxmode.hw_vlan_filter = 1;
1981
1982	/**
1983	 * Write the configuration into the device.
1984	 * Set the numbers of RX & TX queues to 0, so
1985	 * the RX & TX queues will not be setup.
1986	 */
1987	(void)rte_eth_dev_configure(pid, 0, 0, &port_conf);
1988
1989	rte_eth_dev_info_get(pid, &rte_port->dev_info);
1990
1991	/* If dev_info.vmdq_pool_base is greater than 0,
1992	 * the queue id of vmdq pools is started after pf queues.
1993	 */
1994	if (dcb_mode == DCB_VT_ENABLED &&
1995	    rte_port->dev_info.vmdq_pool_base > 0) {
1996		printf("VMDQ_DCB multi-queue mode is nonsensical"
1997			" for port %d.", pid);
1998		return -1;
1999	}
2000
2001	/* Assume the ports in testpmd have the same dcb capability
2002	 * and has the same number of rxq and txq in dcb mode
2003	 */
2004	if (dcb_mode == DCB_VT_ENABLED) {
2005		if (rte_port->dev_info.max_vfs > 0) {
2006			nb_rxq = rte_port->dev_info.nb_rx_queues;
2007			nb_txq = rte_port->dev_info.nb_tx_queues;
2008		} else {
2009			nb_rxq = rte_port->dev_info.max_rx_queues;
2010			nb_txq = rte_port->dev_info.max_tx_queues;
2011		}
2012	} else {
2013		/*if vt is disabled, use all pf queues */
2014		if (rte_port->dev_info.vmdq_pool_base == 0) {
2015			nb_rxq = rte_port->dev_info.max_rx_queues;
2016			nb_txq = rte_port->dev_info.max_tx_queues;
2017		} else {
2018			nb_rxq = (queueid_t)num_tcs;
2019			nb_txq = (queueid_t)num_tcs;
2020
2021		}
2022	}
2023	rx_free_thresh = 64;
2024
2025	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2026
2027	rxtx_port_config(rte_port);
2028	/* VLAN filter */
2029	rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
2030	for (i = 0; i < RTE_DIM(vlan_tags); i++)
2031		rx_vft_set(pid, vlan_tags[i], 1);
2032
2033	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2034	map_port_queue_stats_mapping_registers(pid, rte_port);
2035
2036	rte_port->dcb_flag = 1;
2037
2038	return 0;
2039}
2040
2041static void
2042init_port(void)
2043{
2044	portid_t pid;
2045
2046	/* Configuration of Ethernet ports. */
2047	ports = rte_zmalloc("testpmd: ports",
2048			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2049			    RTE_CACHE_LINE_SIZE);
2050	if (ports == NULL) {
2051		rte_exit(EXIT_FAILURE,
2052				"rte_zmalloc(%d struct rte_port) failed\n",
2053				RTE_MAX_ETHPORTS);
2054	}
2055
2056	/* enabled allocated ports */
2057	for (pid = 0; pid < nb_ports; pid++)
2058		ports[pid].enabled = 1;
2059}
2060
2061static void
2062force_quit(void)
2063{
2064	pmd_test_exit();
2065	prompt_exit();
2066}
2067
2068static void
2069signal_handler(int signum)
2070{
2071	if (signum == SIGINT || signum == SIGTERM) {
2072		printf("\nSignal %d received, preparing to exit...\n",
2073				signum);
2074#ifdef RTE_LIBRTE_PDUMP
2075		/* uninitialize packet capture framework */
2076		rte_pdump_uninit();
2077#endif
2078		force_quit();
2079		/* exit with the expected status */
2080		signal(signum, SIG_DFL);
2081		kill(getpid(), signum);
2082	}
2083}
2084
2085int
2086main(int argc, char** argv)
2087{
2088	int  diag;
2089	uint8_t port_id;
2090
2091	signal(SIGINT, signal_handler);
2092	signal(SIGTERM, signal_handler);
2093
2094	diag = rte_eal_init(argc, argv);
2095	if (diag < 0)
2096		rte_panic("Cannot init EAL\n");
2097
2098#ifdef RTE_LIBRTE_PDUMP
2099	/* initialize packet capture framework */
2100	rte_pdump_init(NULL);
2101#endif
2102
2103	nb_ports = (portid_t) rte_eth_dev_count();
2104	if (nb_ports == 0)
2105		RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2106
2107	/* allocate port structures, and init them */
2108	init_port();
2109
2110	set_def_fwd_config();
2111	if (nb_lcores == 0)
2112		rte_panic("Empty set of forwarding logical cores - check the "
2113			  "core mask supplied in the command parameters\n");
2114
2115	argc -= diag;
2116	argv += diag;
2117	if (argc > 1)
2118		launch_args_parse(argc, argv);
2119
2120	if (!nb_rxq && !nb_txq)
2121		printf("Warning: Either rx or tx queues should be non-zero\n");
2122
2123	if (nb_rxq > 1 && nb_rxq > nb_txq)
2124		printf("Warning: nb_rxq=%d enables RSS configuration, "
2125		       "but nb_txq=%d will prevent to fully test it.\n",
2126		       nb_rxq, nb_txq);
2127
2128	init_config();
2129	if (start_port(RTE_PORT_ALL) != 0)
2130		rte_exit(EXIT_FAILURE, "Start ports failed\n");
2131
2132	/* set all ports to promiscuous mode by default */
2133	FOREACH_PORT(port_id, ports)
2134		rte_eth_promiscuous_enable(port_id);
2135
2136#ifdef RTE_LIBRTE_CMDLINE
2137	if (interactive == 1) {
2138		if (auto_start) {
2139			printf("Start automatic packet forwarding\n");
2140			start_packet_forwarding(0);
2141		}
2142		prompt();
2143	} else
2144#endif
2145	{
2146		char c;
2147		int rc;
2148
2149		printf("No commandline core given, start packet forwarding\n");
2150		start_packet_forwarding(0);
2151		printf("Press enter to exit\n");
2152		rc = read(0, &c, 1);
2153		pmd_test_exit();
2154		if (rc < 0)
2155			return 1;
2156	}
2157
2158	return 0;
2159}
2160