testpmd.c revision f7a9461e
1/*-
2 *   BSD LICENSE
3 *
4 *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5 *   All rights reserved.
6 *
7 *   Redistribution and use in source and binary forms, with or without
8 *   modification, are permitted provided that the following conditions
9 *   are met:
10 *
11 *     * Redistributions of source code must retain the above copyright
12 *       notice, this list of conditions and the following disclaimer.
13 *     * Redistributions in binary form must reproduce the above copyright
14 *       notice, this list of conditions and the following disclaimer in
15 *       the documentation and/or other materials provided with the
16 *       distribution.
17 *     * Neither the name of Intel Corporation nor the names of its
18 *       contributors may be used to endorse or promote products derived
19 *       from this software without specific prior written permission.
20 *
21 *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <stdarg.h>
35#include <stdio.h>
36#include <stdlib.h>
37#include <signal.h>
38#include <string.h>
39#include <time.h>
40#include <fcntl.h>
41#include <sys/types.h>
42#include <errno.h>
43
44#include <sys/queue.h>
45#include <sys/stat.h>
46
47#include <stdint.h>
48#include <unistd.h>
49#include <inttypes.h>
50
51#include <rte_common.h>
52#include <rte_errno.h>
53#include <rte_byteorder.h>
54#include <rte_log.h>
55#include <rte_debug.h>
56#include <rte_cycles.h>
57#include <rte_memory.h>
58#include <rte_memcpy.h>
59#include <rte_memzone.h>
60#include <rte_launch.h>
61#include <rte_eal.h>
62#include <rte_per_lcore.h>
63#include <rte_lcore.h>
64#include <rte_atomic.h>
65#include <rte_branch_prediction.h>
66#include <rte_mempool.h>
67#include <rte_malloc.h>
68#include <rte_mbuf.h>
69#include <rte_interrupts.h>
70#include <rte_pci.h>
71#include <rte_ether.h>
72#include <rte_ethdev.h>
73#include <rte_dev.h>
74#include <rte_string_fns.h>
75#ifdef RTE_LIBRTE_PMD_XENVIRT
76#include <rte_eth_xenvirt.h>
77#endif
78#ifdef RTE_LIBRTE_PDUMP
79#include <rte_pdump.h>
80#endif
81
82#include "testpmd.h"
83
84uint16_t verbose_level = 0; /**< Silent by default. */
85
86/* use master core for command line ? */
87uint8_t interactive = 0;
88uint8_t auto_start = 0;
89
90/*
91 * NUMA support configuration.
92 * When set, the NUMA support attempts to dispatch the allocation of the
93 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
94 * probed ports among the CPU sockets 0 and 1.
95 * Otherwise, all memory is allocated from CPU socket 0.
96 */
97uint8_t numa_support = 0; /**< No numa support by default */
98
99/*
100 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
101 * not configured.
102 */
103uint8_t socket_num = UMA_NO_CONFIG;
104
105/*
106 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
107 */
108uint8_t mp_anon = 0;
109
110/*
111 * Record the Ethernet address of peer target ports to which packets are
112 * forwarded.
113 * Must be instanciated with the ethernet addresses of peer traffic generator
114 * ports.
115 */
116struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
117portid_t nb_peer_eth_addrs = 0;
118
119/*
120 * Probed Target Environment.
121 */
122struct rte_port *ports;	       /**< For all probed ethernet ports. */
123portid_t nb_ports;             /**< Number of probed ethernet ports. */
124struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
125lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
126
127/*
128 * Test Forwarding Configuration.
129 *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
130 *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
131 */
132lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
133lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
134portid_t  nb_cfg_ports;  /**< Number of configured ports. */
135portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
136
137unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
138portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
139
140struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
141streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
142
143/*
144 * Forwarding engines.
145 */
146struct fwd_engine * fwd_engines[] = {
147	&io_fwd_engine,
148	&mac_fwd_engine,
149	&mac_swap_engine,
150	&flow_gen_engine,
151	&rx_only_engine,
152	&tx_only_engine,
153	&csum_fwd_engine,
154	&icmp_echo_engine,
155#ifdef RTE_LIBRTE_IEEE1588
156	&ieee1588_fwd_engine,
157#endif
158	NULL,
159};
160
161struct fwd_config cur_fwd_config;
162struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
163uint32_t retry_enabled;
164uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
165uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
166
167uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
168uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
169                                      * specified on command-line. */
170
171/*
172 * Configuration of packet segments used by the "txonly" processing engine.
173 */
174uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
175uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
176	TXONLY_DEF_PACKET_LEN,
177};
178uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
179
180enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
181/**< Split policy for packets to TX. */
182
183uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
184uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
185
186/* current configuration is in DCB or not,0 means it is not in DCB mode */
187uint8_t dcb_config = 0;
188
189/* Whether the dcb is in testing status */
190uint8_t dcb_test = 0;
191
192/*
193 * Configurable number of RX/TX queues.
194 */
195queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
196queueid_t nb_txq = 1; /**< Number of TX queues per port. */
197
198/*
199 * Configurable number of RX/TX ring descriptors.
200 */
201#define RTE_TEST_RX_DESC_DEFAULT 128
202#define RTE_TEST_TX_DESC_DEFAULT 512
203uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
204uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
205
206#define RTE_PMD_PARAM_UNSET -1
207/*
208 * Configurable values of RX and TX ring threshold registers.
209 */
210
211int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
212int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
213int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
214
215int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
216int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
217int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
218
219/*
220 * Configurable value of RX free threshold.
221 */
222int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
223
224/*
225 * Configurable value of RX drop enable.
226 */
227int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
228
229/*
230 * Configurable value of TX free threshold.
231 */
232int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
233
234/*
235 * Configurable value of TX RS bit threshold.
236 */
237int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
238
239/*
240 * Configurable value of TX queue flags.
241 */
242int32_t txq_flags = RTE_PMD_PARAM_UNSET;
243
244/*
245 * Receive Side Scaling (RSS) configuration.
246 */
247uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
248
249/*
250 * Port topology configuration
251 */
252uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
253
254/*
255 * Avoids to flush all the RX streams before starts forwarding.
256 */
257uint8_t no_flush_rx = 0; /* flush by default */
258
259/*
260 * Avoids to check link status when starting/stopping a port.
261 */
262uint8_t no_link_check = 0; /* check by default */
263
264/*
265 * NIC bypass mode configuration options.
266 */
267#ifdef RTE_NIC_BYPASS
268
269/* The NIC bypass watchdog timeout. */
270uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
271
272#endif
273
274/*
275 * Ethernet device configuration.
276 */
277struct rte_eth_rxmode rx_mode = {
278	.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
279	.split_hdr_size = 0,
280	.header_split   = 0, /**< Header Split disabled. */
281	.hw_ip_checksum = 0, /**< IP checksum offload disabled. */
282	.hw_vlan_filter = 1, /**< VLAN filtering enabled. */
283	.hw_vlan_strip  = 1, /**< VLAN strip enabled. */
284	.hw_vlan_extend = 0, /**< Extended VLAN disabled. */
285	.jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
286	.hw_strip_crc   = 1, /**< CRC stripping by hardware enabled. */
287};
288
289struct rte_fdir_conf fdir_conf = {
290	.mode = RTE_FDIR_MODE_NONE,
291	.pballoc = RTE_FDIR_PBALLOC_64K,
292	.status = RTE_FDIR_REPORT_STATUS,
293	.mask = {
294		.vlan_tci_mask = 0xFFEF,
295		.ipv4_mask     = {
296			.src_ip = 0xFFFFFFFF,
297			.dst_ip = 0xFFFFFFFF,
298		},
299		.ipv6_mask     = {
300			.src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
301			.dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
302		},
303		.src_port_mask = 0xFFFF,
304		.dst_port_mask = 0xFFFF,
305		.mac_addr_byte_mask = 0xFF,
306		.tunnel_type_mask = 1,
307		.tunnel_id_mask = 0xFFFFFFFF,
308	},
309	.drop_queue = 127,
310};
311
312volatile int test_done = 1; /* stop packet forwarding when set to 1. */
313
314struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
315struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
316
317struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
318struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
319
320uint16_t nb_tx_queue_stats_mappings = 0;
321uint16_t nb_rx_queue_stats_mappings = 0;
322
323unsigned max_socket = 0;
324
325/* Forward function declarations */
326static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
327static void check_all_ports_link_status(uint32_t port_mask);
328
329/*
330 * Check if all the ports are started.
331 * If yes, return positive value. If not, return zero.
332 */
333static int all_ports_started(void);
334
335/*
336 * Find next enabled port
337 */
338portid_t
339find_next_port(portid_t p, struct rte_port *ports, int size)
340{
341	if (ports == NULL)
342		rte_exit(-EINVAL, "failed to find a next port id\n");
343
344	while ((p < size) && (ports[p].enabled == 0))
345		p++;
346	return p;
347}
348
349/*
350 * Setup default configuration.
351 */
352static void
353set_default_fwd_lcores_config(void)
354{
355	unsigned int i;
356	unsigned int nb_lc;
357	unsigned int sock_num;
358
359	nb_lc = 0;
360	for (i = 0; i < RTE_MAX_LCORE; i++) {
361		if (!rte_lcore_is_enabled(i))
362			continue;
363		sock_num = rte_lcore_to_socket_id(i) + 1;
364		if (sock_num > max_socket) {
365			if (sock_num > RTE_MAX_NUMA_NODES)
366				rte_exit(EXIT_FAILURE, "Total sockets greater than %u\n", RTE_MAX_NUMA_NODES);
367			max_socket = sock_num;
368		}
369		if (i == rte_get_master_lcore())
370			continue;
371		fwd_lcores_cpuids[nb_lc++] = i;
372	}
373	nb_lcores = (lcoreid_t) nb_lc;
374	nb_cfg_lcores = nb_lcores;
375	nb_fwd_lcores = 1;
376}
377
378static void
379set_def_peer_eth_addrs(void)
380{
381	portid_t i;
382
383	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
384		peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
385		peer_eth_addrs[i].addr_bytes[5] = i;
386	}
387}
388
389static void
390set_default_fwd_ports_config(void)
391{
392	portid_t pt_id;
393
394	for (pt_id = 0; pt_id < nb_ports; pt_id++)
395		fwd_ports_ids[pt_id] = pt_id;
396
397	nb_cfg_ports = nb_ports;
398	nb_fwd_ports = nb_ports;
399}
400
401void
402set_def_fwd_config(void)
403{
404	set_default_fwd_lcores_config();
405	set_def_peer_eth_addrs();
406	set_default_fwd_ports_config();
407}
408
409/*
410 * Configuration initialisation done once at init time.
411 */
412static void
413mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
414		 unsigned int socket_id)
415{
416	char pool_name[RTE_MEMPOOL_NAMESIZE];
417	struct rte_mempool *rte_mp = NULL;
418	uint32_t mb_size;
419
420	mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
421	mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
422
423	RTE_LOG(INFO, USER1,
424		"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
425		pool_name, nb_mbuf, mbuf_seg_size, socket_id);
426
427#ifdef RTE_LIBRTE_PMD_XENVIRT
428	rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
429		(unsigned) mb_mempool_cache,
430		sizeof(struct rte_pktmbuf_pool_private),
431		rte_pktmbuf_pool_init, NULL,
432		rte_pktmbuf_init, NULL,
433		socket_id, 0);
434#endif
435
436	/* if the former XEN allocation failed fall back to normal allocation */
437	if (rte_mp == NULL) {
438		if (mp_anon != 0) {
439			rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
440				mb_size, (unsigned) mb_mempool_cache,
441				sizeof(struct rte_pktmbuf_pool_private),
442				socket_id, 0);
443			if (rte_mp == NULL)
444				goto err;
445
446			if (rte_mempool_populate_anon(rte_mp) == 0) {
447				rte_mempool_free(rte_mp);
448				rte_mp = NULL;
449				goto err;
450			}
451			rte_pktmbuf_pool_init(rte_mp, NULL);
452			rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
453		} else {
454			/* wrapper to rte_mempool_create() */
455			rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
456				mb_mempool_cache, 0, mbuf_seg_size, socket_id);
457		}
458	}
459
460err:
461	if (rte_mp == NULL) {
462		rte_exit(EXIT_FAILURE,
463			"Creation of mbuf pool for socket %u failed: %s\n",
464			socket_id, rte_strerror(rte_errno));
465	} else if (verbose_level > 0) {
466		rte_mempool_dump(stdout, rte_mp);
467	}
468}
469
470/*
471 * Check given socket id is valid or not with NUMA mode,
472 * if valid, return 0, else return -1
473 */
474static int
475check_socket_id(const unsigned int socket_id)
476{
477	static int warning_once = 0;
478
479	if (socket_id >= max_socket) {
480		if (!warning_once && numa_support)
481			printf("Warning: NUMA should be configured manually by"
482			       " using --port-numa-config and"
483			       " --ring-numa-config parameters along with"
484			       " --numa.\n");
485		warning_once = 1;
486		return -1;
487	}
488	return 0;
489}
490
491static void
492init_config(void)
493{
494	portid_t pid;
495	struct rte_port *port;
496	struct rte_mempool *mbp;
497	unsigned int nb_mbuf_per_pool;
498	lcoreid_t  lc_id;
499	uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
500
501	memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
502	/* Configuration of logical cores. */
503	fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
504				sizeof(struct fwd_lcore *) * nb_lcores,
505				RTE_CACHE_LINE_SIZE);
506	if (fwd_lcores == NULL) {
507		rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
508							"failed\n", nb_lcores);
509	}
510	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
511		fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
512					       sizeof(struct fwd_lcore),
513					       RTE_CACHE_LINE_SIZE);
514		if (fwd_lcores[lc_id] == NULL) {
515			rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
516								"failed\n");
517		}
518		fwd_lcores[lc_id]->cpuid_idx = lc_id;
519	}
520
521	FOREACH_PORT(pid, ports) {
522		port = &ports[pid];
523		rte_eth_dev_info_get(pid, &port->dev_info);
524
525		if (numa_support) {
526			if (port_numa[pid] != NUMA_NO_CONFIG)
527				port_per_socket[port_numa[pid]]++;
528			else {
529				uint32_t socket_id = rte_eth_dev_socket_id(pid);
530
531				/* if socket_id is invalid, set to 0 */
532				if (check_socket_id(socket_id) < 0)
533					socket_id = 0;
534				port_per_socket[socket_id]++;
535			}
536		}
537
538		/* set flag to initialize port/queue */
539		port->need_reconfig = 1;
540		port->need_reconfig_queues = 1;
541	}
542
543	/*
544	 * Create pools of mbuf.
545	 * If NUMA support is disabled, create a single pool of mbuf in
546	 * socket 0 memory by default.
547	 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
548	 *
549	 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
550	 * nb_txd can be configured at run time.
551	 */
552	if (param_total_num_mbufs)
553		nb_mbuf_per_pool = param_total_num_mbufs;
554	else {
555		nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
556			(nb_lcores * mb_mempool_cache) +
557			RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
558		nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
559	}
560
561	if (numa_support) {
562		uint8_t i;
563
564		for (i = 0; i < max_socket; i++)
565			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, i);
566	} else {
567		if (socket_num == UMA_NO_CONFIG)
568			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
569		else
570			mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
571						 socket_num);
572	}
573
574	init_port_config();
575
576	/*
577	 * Records which Mbuf pool to use by each logical core, if needed.
578	 */
579	for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
580		mbp = mbuf_pool_find(
581			rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
582
583		if (mbp == NULL)
584			mbp = mbuf_pool_find(0);
585		fwd_lcores[lc_id]->mbp = mbp;
586	}
587
588	/* Configuration of packet forwarding streams. */
589	if (init_fwd_streams() < 0)
590		rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
591
592	fwd_config_setup();
593}
594
595
596void
597reconfig(portid_t new_port_id, unsigned socket_id)
598{
599	struct rte_port *port;
600
601	/* Reconfiguration of Ethernet ports. */
602	port = &ports[new_port_id];
603	rte_eth_dev_info_get(new_port_id, &port->dev_info);
604
605	/* set flag to initialize port/queue */
606	port->need_reconfig = 1;
607	port->need_reconfig_queues = 1;
608	port->socket_id = socket_id;
609
610	init_port_config();
611}
612
613
614int
615init_fwd_streams(void)
616{
617	portid_t pid;
618	struct rte_port *port;
619	streamid_t sm_id, nb_fwd_streams_new;
620	queueid_t q;
621
622	/* set socket id according to numa or not */
623	FOREACH_PORT(pid, ports) {
624		port = &ports[pid];
625		if (nb_rxq > port->dev_info.max_rx_queues) {
626			printf("Fail: nb_rxq(%d) is greater than "
627				"max_rx_queues(%d)\n", nb_rxq,
628				port->dev_info.max_rx_queues);
629			return -1;
630		}
631		if (nb_txq > port->dev_info.max_tx_queues) {
632			printf("Fail: nb_txq(%d) is greater than "
633				"max_tx_queues(%d)\n", nb_txq,
634				port->dev_info.max_tx_queues);
635			return -1;
636		}
637		if (numa_support) {
638			if (port_numa[pid] != NUMA_NO_CONFIG)
639				port->socket_id = port_numa[pid];
640			else {
641				port->socket_id = rte_eth_dev_socket_id(pid);
642
643				/* if socket_id is invalid, set to 0 */
644				if (check_socket_id(port->socket_id) < 0)
645					port->socket_id = 0;
646			}
647		}
648		else {
649			if (socket_num == UMA_NO_CONFIG)
650				port->socket_id = 0;
651			else
652				port->socket_id = socket_num;
653		}
654	}
655
656	q = RTE_MAX(nb_rxq, nb_txq);
657	if (q == 0) {
658		printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
659		return -1;
660	}
661	nb_fwd_streams_new = (streamid_t)(nb_ports * q);
662	if (nb_fwd_streams_new == nb_fwd_streams)
663		return 0;
664	/* clear the old */
665	if (fwd_streams != NULL) {
666		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
667			if (fwd_streams[sm_id] == NULL)
668				continue;
669			rte_free(fwd_streams[sm_id]);
670			fwd_streams[sm_id] = NULL;
671		}
672		rte_free(fwd_streams);
673		fwd_streams = NULL;
674	}
675
676	/* init new */
677	nb_fwd_streams = nb_fwd_streams_new;
678	if (nb_fwd_streams) {
679		fwd_streams = rte_zmalloc("testpmd: fwd_streams",
680			sizeof(struct fwd_stream *) * nb_fwd_streams,
681			RTE_CACHE_LINE_SIZE);
682		if (fwd_streams == NULL)
683			rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
684				 " (struct fwd_stream *)) failed\n",
685				 nb_fwd_streams);
686
687		for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
688			fwd_streams[sm_id] = rte_zmalloc("testpmd:"
689				" struct fwd_stream", sizeof(struct fwd_stream),
690				RTE_CACHE_LINE_SIZE);
691			if (fwd_streams[sm_id] == NULL)
692				rte_exit(EXIT_FAILURE, "rte_zmalloc"
693					 "(struct fwd_stream) failed\n");
694		}
695	}
696
697	return 0;
698}
699
700#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
701static void
702pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
703{
704	unsigned int total_burst;
705	unsigned int nb_burst;
706	unsigned int burst_stats[3];
707	uint16_t pktnb_stats[3];
708	uint16_t nb_pkt;
709	int burst_percent[3];
710
711	/*
712	 * First compute the total number of packet bursts and the
713	 * two highest numbers of bursts of the same number of packets.
714	 */
715	total_burst = 0;
716	burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
717	pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
718	for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
719		nb_burst = pbs->pkt_burst_spread[nb_pkt];
720		if (nb_burst == 0)
721			continue;
722		total_burst += nb_burst;
723		if (nb_burst > burst_stats[0]) {
724			burst_stats[1] = burst_stats[0];
725			pktnb_stats[1] = pktnb_stats[0];
726			burst_stats[0] = nb_burst;
727			pktnb_stats[0] = nb_pkt;
728		} else if (nb_burst > burst_stats[1]) {
729			burst_stats[1] = nb_burst;
730			pktnb_stats[1] = nb_pkt;
731		}
732	}
733	if (total_burst == 0)
734		return;
735	burst_percent[0] = (burst_stats[0] * 100) / total_burst;
736	printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
737	       burst_percent[0], (int) pktnb_stats[0]);
738	if (burst_stats[0] == total_burst) {
739		printf("]\n");
740		return;
741	}
742	if (burst_stats[0] + burst_stats[1] == total_burst) {
743		printf(" + %d%% of %d pkts]\n",
744		       100 - burst_percent[0], pktnb_stats[1]);
745		return;
746	}
747	burst_percent[1] = (burst_stats[1] * 100) / total_burst;
748	burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
749	if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
750		printf(" + %d%% of others]\n", 100 - burst_percent[0]);
751		return;
752	}
753	printf(" + %d%% of %d pkts + %d%% of others]\n",
754	       burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
755}
756#endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
757
758static void
759fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
760{
761	struct rte_port *port;
762	uint8_t i;
763
764	static const char *fwd_stats_border = "----------------------";
765
766	port = &ports[port_id];
767	printf("\n  %s Forward statistics for port %-2d %s\n",
768	       fwd_stats_border, port_id, fwd_stats_border);
769
770	if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
771		printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
772		       "%-"PRIu64"\n",
773		       stats->ipackets, stats->imissed,
774		       (uint64_t) (stats->ipackets + stats->imissed));
775
776		if (cur_fwd_eng == &csum_fwd_engine)
777			printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
778			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
779		if ((stats->ierrors + stats->rx_nombuf) > 0) {
780			printf("  RX-error: %-"PRIu64"\n",  stats->ierrors);
781			printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
782		}
783
784		printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
785		       "%-"PRIu64"\n",
786		       stats->opackets, port->tx_dropped,
787		       (uint64_t) (stats->opackets + port->tx_dropped));
788	}
789	else {
790		printf("  RX-packets:             %14"PRIu64"    RX-dropped:%14"PRIu64"    RX-total:"
791		       "%14"PRIu64"\n",
792		       stats->ipackets, stats->imissed,
793		       (uint64_t) (stats->ipackets + stats->imissed));
794
795		if (cur_fwd_eng == &csum_fwd_engine)
796			printf("  Bad-ipcsum:%14"PRIu64"    Bad-l4csum:%14"PRIu64"\n",
797			       port->rx_bad_ip_csum, port->rx_bad_l4_csum);
798		if ((stats->ierrors + stats->rx_nombuf) > 0) {
799			printf("  RX-error:%"PRIu64"\n", stats->ierrors);
800			printf("  RX-nombufs:             %14"PRIu64"\n",
801			       stats->rx_nombuf);
802		}
803
804		printf("  TX-packets:             %14"PRIu64"    TX-dropped:%14"PRIu64"    TX-total:"
805		       "%14"PRIu64"\n",
806		       stats->opackets, port->tx_dropped,
807		       (uint64_t) (stats->opackets + port->tx_dropped));
808	}
809
810#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
811	if (port->rx_stream)
812		pkt_burst_stats_display("RX",
813			&port->rx_stream->rx_burst_stats);
814	if (port->tx_stream)
815		pkt_burst_stats_display("TX",
816			&port->tx_stream->tx_burst_stats);
817#endif
818
819	if (port->rx_queue_stats_mapping_enabled) {
820		printf("\n");
821		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
822			printf("  Stats reg %2d RX-packets:%14"PRIu64
823			       "     RX-errors:%14"PRIu64
824			       "    RX-bytes:%14"PRIu64"\n",
825			       i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
826		}
827		printf("\n");
828	}
829	if (port->tx_queue_stats_mapping_enabled) {
830		for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
831			printf("  Stats reg %2d TX-packets:%14"PRIu64
832			       "                                 TX-bytes:%14"PRIu64"\n",
833			       i, stats->q_opackets[i], stats->q_obytes[i]);
834		}
835	}
836
837	printf("  %s--------------------------------%s\n",
838	       fwd_stats_border, fwd_stats_border);
839}
840
841static void
842fwd_stream_stats_display(streamid_t stream_id)
843{
844	struct fwd_stream *fs;
845	static const char *fwd_top_stats_border = "-------";
846
847	fs = fwd_streams[stream_id];
848	if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
849	    (fs->fwd_dropped == 0))
850		return;
851	printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
852	       "TX Port=%2d/Queue=%2d %s\n",
853	       fwd_top_stats_border, fs->rx_port, fs->rx_queue,
854	       fs->tx_port, fs->tx_queue, fwd_top_stats_border);
855	printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
856	       fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
857
858	/* if checksum mode */
859	if (cur_fwd_eng == &csum_fwd_engine) {
860	       printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: "
861			"%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
862	}
863
864#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
865	pkt_burst_stats_display("RX", &fs->rx_burst_stats);
866	pkt_burst_stats_display("TX", &fs->tx_burst_stats);
867#endif
868}
869
870static void
871flush_fwd_rx_queues(void)
872{
873	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
874	portid_t  rxp;
875	portid_t port_id;
876	queueid_t rxq;
877	uint16_t  nb_rx;
878	uint16_t  i;
879	uint8_t   j;
880	uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
881	uint64_t timer_period;
882
883	/* convert to number of cycles */
884	timer_period = rte_get_timer_hz(); /* 1 second timeout */
885
886	for (j = 0; j < 2; j++) {
887		for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
888			for (rxq = 0; rxq < nb_rxq; rxq++) {
889				port_id = fwd_ports_ids[rxp];
890				/**
891				* testpmd can stuck in the below do while loop
892				* if rte_eth_rx_burst() always returns nonzero
893				* packets. So timer is added to exit this loop
894				* after 1sec timer expiry.
895				*/
896				prev_tsc = rte_rdtsc();
897				do {
898					nb_rx = rte_eth_rx_burst(port_id, rxq,
899						pkts_burst, MAX_PKT_BURST);
900					for (i = 0; i < nb_rx; i++)
901						rte_pktmbuf_free(pkts_burst[i]);
902
903					cur_tsc = rte_rdtsc();
904					diff_tsc = cur_tsc - prev_tsc;
905					timer_tsc += diff_tsc;
906				} while ((nb_rx > 0) &&
907					(timer_tsc < timer_period));
908				timer_tsc = 0;
909			}
910		}
911		rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
912	}
913}
914
915static void
916run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
917{
918	struct fwd_stream **fsm;
919	streamid_t nb_fs;
920	streamid_t sm_id;
921
922	fsm = &fwd_streams[fc->stream_idx];
923	nb_fs = fc->stream_nb;
924	do {
925		for (sm_id = 0; sm_id < nb_fs; sm_id++)
926			(*pkt_fwd)(fsm[sm_id]);
927	} while (! fc->stopped);
928}
929
930static int
931start_pkt_forward_on_core(void *fwd_arg)
932{
933	run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
934			     cur_fwd_config.fwd_eng->packet_fwd);
935	return 0;
936}
937
938/*
939 * Run the TXONLY packet forwarding engine to send a single burst of packets.
940 * Used to start communication flows in network loopback test configurations.
941 */
942static int
943run_one_txonly_burst_on_core(void *fwd_arg)
944{
945	struct fwd_lcore *fwd_lc;
946	struct fwd_lcore tmp_lcore;
947
948	fwd_lc = (struct fwd_lcore *) fwd_arg;
949	tmp_lcore = *fwd_lc;
950	tmp_lcore.stopped = 1;
951	run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
952	return 0;
953}
954
955/*
956 * Launch packet forwarding:
957 *     - Setup per-port forwarding context.
958 *     - launch logical cores with their forwarding configuration.
959 */
960static void
961launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
962{
963	port_fwd_begin_t port_fwd_begin;
964	unsigned int i;
965	unsigned int lc_id;
966	int diag;
967
968	port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
969	if (port_fwd_begin != NULL) {
970		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
971			(*port_fwd_begin)(fwd_ports_ids[i]);
972	}
973	for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
974		lc_id = fwd_lcores_cpuids[i];
975		if ((interactive == 0) || (lc_id != rte_lcore_id())) {
976			fwd_lcores[i]->stopped = 0;
977			diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
978						     fwd_lcores[i], lc_id);
979			if (diag != 0)
980				printf("launch lcore %u failed - diag=%d\n",
981				       lc_id, diag);
982		}
983	}
984}
985
986/*
987 * Update the forward ports list.
988 */
989void
990update_fwd_ports(portid_t new_pid)
991{
992	unsigned int i;
993	unsigned int new_nb_fwd_ports = 0;
994	int move = 0;
995
996	for (i = 0; i < nb_fwd_ports; ++i) {
997		if (port_id_is_invalid(fwd_ports_ids[i], DISABLED_WARN))
998			move = 1;
999		else if (move)
1000			fwd_ports_ids[new_nb_fwd_ports++] = fwd_ports_ids[i];
1001		else
1002			new_nb_fwd_ports++;
1003	}
1004	if (new_pid < RTE_MAX_ETHPORTS)
1005		fwd_ports_ids[new_nb_fwd_ports++] = new_pid;
1006
1007	nb_fwd_ports = new_nb_fwd_ports;
1008	nb_cfg_ports = new_nb_fwd_ports;
1009}
1010
1011/*
1012 * Launch packet forwarding configuration.
1013 */
1014void
1015start_packet_forwarding(int with_tx_first)
1016{
1017	port_fwd_begin_t port_fwd_begin;
1018	port_fwd_end_t  port_fwd_end;
1019	struct rte_port *port;
1020	unsigned int i;
1021	portid_t   pt_id;
1022	streamid_t sm_id;
1023
1024	if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1025		rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1026
1027	if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1028		rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1029
1030	if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1031		strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1032		(!nb_rxq || !nb_txq))
1033		rte_exit(EXIT_FAILURE,
1034			"Either rxq or txq are 0, cannot use %s fwd mode\n",
1035			cur_fwd_eng->fwd_mode_name);
1036
1037	if (all_ports_started() == 0) {
1038		printf("Not all ports were started\n");
1039		return;
1040	}
1041	if (test_done == 0) {
1042		printf("Packet forwarding already started\n");
1043		return;
1044	}
1045
1046
1047	if(dcb_test) {
1048		for (i = 0; i < nb_fwd_ports; i++) {
1049			pt_id = fwd_ports_ids[i];
1050			port = &ports[pt_id];
1051			if (!port->dcb_flag) {
1052				printf("In DCB mode, all forwarding ports must "
1053                                       "be configured in this mode.\n");
1054				return;
1055			}
1056		}
1057		if (nb_fwd_lcores == 1) {
1058			printf("In DCB mode,the nb forwarding cores "
1059                               "should be larger than 1.\n");
1060			return;
1061		}
1062	}
1063	test_done = 0;
1064
1065	fwd_config_setup();
1066
1067	if(!no_flush_rx)
1068		flush_fwd_rx_queues();
1069
1070	pkt_fwd_config_display(&cur_fwd_config);
1071	rxtx_config_display();
1072
1073	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1074		pt_id = fwd_ports_ids[i];
1075		port = &ports[pt_id];
1076		rte_eth_stats_get(pt_id, &port->stats);
1077		port->tx_dropped = 0;
1078
1079		map_port_queue_stats_mapping_registers(pt_id, port);
1080	}
1081	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1082		fwd_streams[sm_id]->rx_packets = 0;
1083		fwd_streams[sm_id]->tx_packets = 0;
1084		fwd_streams[sm_id]->fwd_dropped = 0;
1085		fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1086		fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1087
1088#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1089		memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1090		       sizeof(fwd_streams[sm_id]->rx_burst_stats));
1091		memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1092		       sizeof(fwd_streams[sm_id]->tx_burst_stats));
1093#endif
1094#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1095		fwd_streams[sm_id]->core_cycles = 0;
1096#endif
1097	}
1098	if (with_tx_first) {
1099		port_fwd_begin = tx_only_engine.port_fwd_begin;
1100		if (port_fwd_begin != NULL) {
1101			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1102				(*port_fwd_begin)(fwd_ports_ids[i]);
1103		}
1104		while (with_tx_first--) {
1105			launch_packet_forwarding(
1106					run_one_txonly_burst_on_core);
1107			rte_eal_mp_wait_lcore();
1108		}
1109		port_fwd_end = tx_only_engine.port_fwd_end;
1110		if (port_fwd_end != NULL) {
1111			for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1112				(*port_fwd_end)(fwd_ports_ids[i]);
1113		}
1114	}
1115	launch_packet_forwarding(start_pkt_forward_on_core);
1116}
1117
1118void
1119stop_packet_forwarding(void)
1120{
1121	struct rte_eth_stats stats;
1122	struct rte_port *port;
1123	port_fwd_end_t  port_fwd_end;
1124	int i;
1125	portid_t   pt_id;
1126	streamid_t sm_id;
1127	lcoreid_t  lc_id;
1128	uint64_t total_recv;
1129	uint64_t total_xmit;
1130	uint64_t total_rx_dropped;
1131	uint64_t total_tx_dropped;
1132	uint64_t total_rx_nombuf;
1133	uint64_t tx_dropped;
1134	uint64_t rx_bad_ip_csum;
1135	uint64_t rx_bad_l4_csum;
1136#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1137	uint64_t fwd_cycles;
1138#endif
1139	static const char *acc_stats_border = "+++++++++++++++";
1140
1141	if (test_done) {
1142		printf("Packet forwarding not started\n");
1143		return;
1144	}
1145	printf("Telling cores to stop...");
1146	for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1147		fwd_lcores[lc_id]->stopped = 1;
1148	printf("\nWaiting for lcores to finish...\n");
1149	rte_eal_mp_wait_lcore();
1150	port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1151	if (port_fwd_end != NULL) {
1152		for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1153			pt_id = fwd_ports_ids[i];
1154			(*port_fwd_end)(pt_id);
1155		}
1156	}
1157#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1158	fwd_cycles = 0;
1159#endif
1160	for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1161		if (cur_fwd_config.nb_fwd_streams >
1162		    cur_fwd_config.nb_fwd_ports) {
1163			fwd_stream_stats_display(sm_id);
1164			ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1165			ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1166		} else {
1167			ports[fwd_streams[sm_id]->tx_port].tx_stream =
1168				fwd_streams[sm_id];
1169			ports[fwd_streams[sm_id]->rx_port].rx_stream =
1170				fwd_streams[sm_id];
1171		}
1172		tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1173		tx_dropped = (uint64_t) (tx_dropped +
1174					 fwd_streams[sm_id]->fwd_dropped);
1175		ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1176
1177		rx_bad_ip_csum =
1178			ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1179		rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1180					 fwd_streams[sm_id]->rx_bad_ip_csum);
1181		ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1182							rx_bad_ip_csum;
1183
1184		rx_bad_l4_csum =
1185			ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1186		rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1187					 fwd_streams[sm_id]->rx_bad_l4_csum);
1188		ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1189							rx_bad_l4_csum;
1190
1191#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1192		fwd_cycles = (uint64_t) (fwd_cycles +
1193					 fwd_streams[sm_id]->core_cycles);
1194#endif
1195	}
1196	total_recv = 0;
1197	total_xmit = 0;
1198	total_rx_dropped = 0;
1199	total_tx_dropped = 0;
1200	total_rx_nombuf  = 0;
1201	for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1202		pt_id = fwd_ports_ids[i];
1203
1204		port = &ports[pt_id];
1205		rte_eth_stats_get(pt_id, &stats);
1206		stats.ipackets -= port->stats.ipackets;
1207		port->stats.ipackets = 0;
1208		stats.opackets -= port->stats.opackets;
1209		port->stats.opackets = 0;
1210		stats.ibytes   -= port->stats.ibytes;
1211		port->stats.ibytes = 0;
1212		stats.obytes   -= port->stats.obytes;
1213		port->stats.obytes = 0;
1214		stats.imissed  -= port->stats.imissed;
1215		port->stats.imissed = 0;
1216		stats.oerrors  -= port->stats.oerrors;
1217		port->stats.oerrors = 0;
1218		stats.rx_nombuf -= port->stats.rx_nombuf;
1219		port->stats.rx_nombuf = 0;
1220
1221		total_recv += stats.ipackets;
1222		total_xmit += stats.opackets;
1223		total_rx_dropped += stats.imissed;
1224		total_tx_dropped += port->tx_dropped;
1225		total_rx_nombuf  += stats.rx_nombuf;
1226
1227		fwd_port_stats_display(pt_id, &stats);
1228	}
1229	printf("\n  %s Accumulated forward statistics for all ports"
1230	       "%s\n",
1231	       acc_stats_border, acc_stats_border);
1232	printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1233	       "%-"PRIu64"\n"
1234	       "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1235	       "%-"PRIu64"\n",
1236	       total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1237	       total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1238	if (total_rx_nombuf > 0)
1239		printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1240	printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
1241	       "%s\n",
1242	       acc_stats_border, acc_stats_border);
1243#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1244	if (total_recv > 0)
1245		printf("\n  CPU cycles/packet=%u (total cycles="
1246		       "%"PRIu64" / total RX packets=%"PRIu64")\n",
1247		       (unsigned int)(fwd_cycles / total_recv),
1248		       fwd_cycles, total_recv);
1249#endif
1250	printf("\nDone.\n");
1251	test_done = 1;
1252}
1253
1254void
1255dev_set_link_up(portid_t pid)
1256{
1257	if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1258		printf("\nSet link up fail.\n");
1259}
1260
1261void
1262dev_set_link_down(portid_t pid)
1263{
1264	if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1265		printf("\nSet link down fail.\n");
1266}
1267
1268static int
1269all_ports_started(void)
1270{
1271	portid_t pi;
1272	struct rte_port *port;
1273
1274	FOREACH_PORT(pi, ports) {
1275		port = &ports[pi];
1276		/* Check if there is a port which is not started */
1277		if ((port->port_status != RTE_PORT_STARTED) &&
1278			(port->slave_flag == 0))
1279			return 0;
1280	}
1281
1282	/* No port is not started */
1283	return 1;
1284}
1285
1286int
1287all_ports_stopped(void)
1288{
1289	portid_t pi;
1290	struct rte_port *port;
1291
1292	FOREACH_PORT(pi, ports) {
1293		port = &ports[pi];
1294		if ((port->port_status != RTE_PORT_STOPPED) &&
1295			(port->slave_flag == 0))
1296			return 0;
1297	}
1298
1299	return 1;
1300}
1301
1302int
1303port_is_started(portid_t port_id)
1304{
1305	if (port_id_is_invalid(port_id, ENABLED_WARN))
1306		return 0;
1307
1308	if (ports[port_id].port_status != RTE_PORT_STARTED)
1309		return 0;
1310
1311	return 1;
1312}
1313
1314static int
1315port_is_closed(portid_t port_id)
1316{
1317	if (port_id_is_invalid(port_id, ENABLED_WARN))
1318		return 0;
1319
1320	if (ports[port_id].port_status != RTE_PORT_CLOSED)
1321		return 0;
1322
1323	return 1;
1324}
1325
1326int
1327start_port(portid_t pid)
1328{
1329	int diag, need_check_link_status = -1;
1330	portid_t pi;
1331	queueid_t qi;
1332	struct rte_port *port;
1333	struct ether_addr mac_addr;
1334
1335	if (port_id_is_invalid(pid, ENABLED_WARN))
1336		return 0;
1337
1338	if(dcb_config)
1339		dcb_test = 1;
1340	FOREACH_PORT(pi, ports) {
1341		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1342			continue;
1343
1344		need_check_link_status = 0;
1345		port = &ports[pi];
1346		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1347						 RTE_PORT_HANDLING) == 0) {
1348			printf("Port %d is now not stopped\n", pi);
1349			continue;
1350		}
1351
1352		if (port->need_reconfig > 0) {
1353			port->need_reconfig = 0;
1354
1355			printf("Configuring Port %d (socket %u)\n", pi,
1356					port->socket_id);
1357			/* configure port */
1358			diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1359						&(port->dev_conf));
1360			if (diag != 0) {
1361				if (rte_atomic16_cmpset(&(port->port_status),
1362				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1363					printf("Port %d can not be set back "
1364							"to stopped\n", pi);
1365				printf("Fail to configure port %d\n", pi);
1366				/* try to reconfigure port next time */
1367				port->need_reconfig = 1;
1368				return -1;
1369			}
1370		}
1371		if (port->need_reconfig_queues > 0) {
1372			port->need_reconfig_queues = 0;
1373			/* setup tx queues */
1374			for (qi = 0; qi < nb_txq; qi++) {
1375				if ((numa_support) &&
1376					(txring_numa[pi] != NUMA_NO_CONFIG))
1377					diag = rte_eth_tx_queue_setup(pi, qi,
1378						nb_txd,txring_numa[pi],
1379						&(port->tx_conf));
1380				else
1381					diag = rte_eth_tx_queue_setup(pi, qi,
1382						nb_txd,port->socket_id,
1383						&(port->tx_conf));
1384
1385				if (diag == 0)
1386					continue;
1387
1388				/* Fail to setup tx queue, return */
1389				if (rte_atomic16_cmpset(&(port->port_status),
1390							RTE_PORT_HANDLING,
1391							RTE_PORT_STOPPED) == 0)
1392					printf("Port %d can not be set back "
1393							"to stopped\n", pi);
1394				printf("Fail to configure port %d tx queues\n", pi);
1395				/* try to reconfigure queues next time */
1396				port->need_reconfig_queues = 1;
1397				return -1;
1398			}
1399			/* setup rx queues */
1400			for (qi = 0; qi < nb_rxq; qi++) {
1401				if ((numa_support) &&
1402					(rxring_numa[pi] != NUMA_NO_CONFIG)) {
1403					struct rte_mempool * mp =
1404						mbuf_pool_find(rxring_numa[pi]);
1405					if (mp == NULL) {
1406						printf("Failed to setup RX queue:"
1407							"No mempool allocation"
1408							" on the socket %d\n",
1409							rxring_numa[pi]);
1410						return -1;
1411					}
1412
1413					diag = rte_eth_rx_queue_setup(pi, qi,
1414					     nb_rxd,rxring_numa[pi],
1415					     &(port->rx_conf),mp);
1416				} else {
1417					struct rte_mempool *mp =
1418						mbuf_pool_find(port->socket_id);
1419					if (mp == NULL) {
1420						printf("Failed to setup RX queue:"
1421							"No mempool allocation"
1422							" on the socket %d\n",
1423							port->socket_id);
1424						return -1;
1425					}
1426					diag = rte_eth_rx_queue_setup(pi, qi,
1427					     nb_rxd,port->socket_id,
1428					     &(port->rx_conf), mp);
1429				}
1430				if (diag == 0)
1431					continue;
1432
1433				/* Fail to setup rx queue, return */
1434				if (rte_atomic16_cmpset(&(port->port_status),
1435							RTE_PORT_HANDLING,
1436							RTE_PORT_STOPPED) == 0)
1437					printf("Port %d can not be set back "
1438							"to stopped\n", pi);
1439				printf("Fail to configure port %d rx queues\n", pi);
1440				/* try to reconfigure queues next time */
1441				port->need_reconfig_queues = 1;
1442				return -1;
1443			}
1444		}
1445		/* start port */
1446		if (rte_eth_dev_start(pi) < 0) {
1447			printf("Fail to start port %d\n", pi);
1448
1449			/* Fail to setup rx queue, return */
1450			if (rte_atomic16_cmpset(&(port->port_status),
1451				RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1452				printf("Port %d can not be set back to "
1453							"stopped\n", pi);
1454			continue;
1455		}
1456
1457		if (rte_atomic16_cmpset(&(port->port_status),
1458			RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1459			printf("Port %d can not be set into started\n", pi);
1460
1461		rte_eth_macaddr_get(pi, &mac_addr);
1462		printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1463				mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1464				mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1465				mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1466
1467		/* at least one port started, need checking link status */
1468		need_check_link_status = 1;
1469	}
1470
1471	if (need_check_link_status == 1 && !no_link_check)
1472		check_all_ports_link_status(RTE_PORT_ALL);
1473	else if (need_check_link_status == 0)
1474		printf("Please stop the ports first\n");
1475
1476	printf("Done\n");
1477	return 0;
1478}
1479
1480void
1481stop_port(portid_t pid)
1482{
1483	portid_t pi;
1484	struct rte_port *port;
1485	int need_check_link_status = 0;
1486
1487	if (dcb_test) {
1488		dcb_test = 0;
1489		dcb_config = 0;
1490	}
1491
1492	if (port_id_is_invalid(pid, ENABLED_WARN))
1493		return;
1494
1495	printf("Stopping ports...\n");
1496
1497	FOREACH_PORT(pi, ports) {
1498		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1499			continue;
1500
1501		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1502			printf("Please remove port %d from forwarding configuration.\n", pi);
1503			continue;
1504		}
1505
1506		if (port_is_bonding_slave(pi)) {
1507			printf("Please remove port %d from bonded device.\n", pi);
1508			continue;
1509		}
1510
1511		port = &ports[pi];
1512		if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1513						RTE_PORT_HANDLING) == 0)
1514			continue;
1515
1516		rte_eth_dev_stop(pi);
1517
1518		if (rte_atomic16_cmpset(&(port->port_status),
1519			RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1520			printf("Port %d can not be set into stopped\n", pi);
1521		need_check_link_status = 1;
1522	}
1523	if (need_check_link_status && !no_link_check)
1524		check_all_ports_link_status(RTE_PORT_ALL);
1525
1526	printf("Done\n");
1527}
1528
1529void
1530close_port(portid_t pid)
1531{
1532	portid_t pi;
1533	struct rte_port *port;
1534
1535	if (port_id_is_invalid(pid, ENABLED_WARN))
1536		return;
1537
1538	printf("Closing ports...\n");
1539
1540	FOREACH_PORT(pi, ports) {
1541		if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1542			continue;
1543
1544		if (port_is_forwarding(pi) != 0 && test_done == 0) {
1545			printf("Please remove port %d from forwarding configuration.\n", pi);
1546			continue;
1547		}
1548
1549		if (port_is_bonding_slave(pi)) {
1550			printf("Please remove port %d from bonded device.\n", pi);
1551			continue;
1552		}
1553
1554		port = &ports[pi];
1555		if (rte_atomic16_cmpset(&(port->port_status),
1556			RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1557			printf("Port %d is already closed\n", pi);
1558			continue;
1559		}
1560
1561		if (rte_atomic16_cmpset(&(port->port_status),
1562			RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1563			printf("Port %d is now not stopped\n", pi);
1564			continue;
1565		}
1566
1567		rte_eth_dev_close(pi);
1568
1569		if (rte_atomic16_cmpset(&(port->port_status),
1570			RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1571			printf("Port %d cannot be set to closed\n", pi);
1572	}
1573
1574	printf("Done\n");
1575}
1576
1577void
1578attach_port(char *identifier)
1579{
1580	portid_t pi = 0;
1581	unsigned int socket_id;
1582
1583	printf("Attaching a new port...\n");
1584
1585	if (identifier == NULL) {
1586		printf("Invalid parameters are specified\n");
1587		return;
1588	}
1589
1590	if (rte_eth_dev_attach(identifier, &pi))
1591		return;
1592
1593	ports[pi].enabled = 1;
1594	socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1595	/* if socket_id is invalid, set to 0 */
1596	if (check_socket_id(socket_id) < 0)
1597		socket_id = 0;
1598	reconfig(pi, socket_id);
1599	rte_eth_promiscuous_enable(pi);
1600
1601	nb_ports = rte_eth_dev_count();
1602
1603	ports[pi].port_status = RTE_PORT_STOPPED;
1604
1605	update_fwd_ports(pi);
1606
1607	printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1608	printf("Done\n");
1609}
1610
1611void
1612detach_port(uint8_t port_id)
1613{
1614	char name[RTE_ETH_NAME_MAX_LEN];
1615
1616	printf("Detaching a port...\n");
1617
1618	if (!port_is_closed(port_id)) {
1619		printf("Please close port first\n");
1620		return;
1621	}
1622
1623	if (rte_eth_dev_detach(port_id, name))
1624		return;
1625
1626	ports[port_id].enabled = 0;
1627	nb_ports = rte_eth_dev_count();
1628
1629	update_fwd_ports(RTE_MAX_ETHPORTS);
1630
1631	printf("Port '%s' is detached. Now total ports is %d\n",
1632			name, nb_ports);
1633	printf("Done\n");
1634	return;
1635}
1636
1637void
1638pmd_test_exit(void)
1639{
1640	portid_t pt_id;
1641
1642	if (test_done == 0)
1643		stop_packet_forwarding();
1644
1645	if (ports != NULL) {
1646		no_link_check = 1;
1647		FOREACH_PORT(pt_id, ports) {
1648			printf("\nShutting down port %d...\n", pt_id);
1649			fflush(stdout);
1650			stop_port(pt_id);
1651			close_port(pt_id);
1652		}
1653	}
1654	printf("\nBye...\n");
1655}
1656
1657typedef void (*cmd_func_t)(void);
1658struct pmd_test_command {
1659	const char *cmd_name;
1660	cmd_func_t cmd_func;
1661};
1662
1663#define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1664
1665/* Check the link status of all ports in up to 9s, and print them finally */
1666static void
1667check_all_ports_link_status(uint32_t port_mask)
1668{
1669#define CHECK_INTERVAL 100 /* 100ms */
1670#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1671	uint8_t portid, count, all_ports_up, print_flag = 0;
1672	struct rte_eth_link link;
1673
1674	printf("Checking link statuses...\n");
1675	fflush(stdout);
1676	for (count = 0; count <= MAX_CHECK_TIME; count++) {
1677		all_ports_up = 1;
1678		FOREACH_PORT(portid, ports) {
1679			if ((port_mask & (1 << portid)) == 0)
1680				continue;
1681			memset(&link, 0, sizeof(link));
1682			rte_eth_link_get_nowait(portid, &link);
1683			/* print link status if flag set */
1684			if (print_flag == 1) {
1685				if (link.link_status)
1686					printf("Port %d Link Up - speed %u "
1687						"Mbps - %s\n", (uint8_t)portid,
1688						(unsigned)link.link_speed,
1689				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1690					("full-duplex") : ("half-duplex\n"));
1691				else
1692					printf("Port %d Link Down\n",
1693						(uint8_t)portid);
1694				continue;
1695			}
1696			/* clear all_ports_up flag if any link down */
1697			if (link.link_status == ETH_LINK_DOWN) {
1698				all_ports_up = 0;
1699				break;
1700			}
1701		}
1702		/* after finally printing all link status, get out */
1703		if (print_flag == 1)
1704			break;
1705
1706		if (all_ports_up == 0) {
1707			fflush(stdout);
1708			rte_delay_ms(CHECK_INTERVAL);
1709		}
1710
1711		/* set the print_flag if all ports up or timeout */
1712		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1713			print_flag = 1;
1714		}
1715	}
1716}
1717
1718static int
1719set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1720{
1721	uint16_t i;
1722	int diag;
1723	uint8_t mapping_found = 0;
1724
1725	for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1726		if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1727				(tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1728			diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1729					tx_queue_stats_mappings[i].queue_id,
1730					tx_queue_stats_mappings[i].stats_counter_id);
1731			if (diag != 0)
1732				return diag;
1733			mapping_found = 1;
1734		}
1735	}
1736	if (mapping_found)
1737		port->tx_queue_stats_mapping_enabled = 1;
1738	return 0;
1739}
1740
1741static int
1742set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1743{
1744	uint16_t i;
1745	int diag;
1746	uint8_t mapping_found = 0;
1747
1748	for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1749		if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1750				(rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1751			diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1752					rx_queue_stats_mappings[i].queue_id,
1753					rx_queue_stats_mappings[i].stats_counter_id);
1754			if (diag != 0)
1755				return diag;
1756			mapping_found = 1;
1757		}
1758	}
1759	if (mapping_found)
1760		port->rx_queue_stats_mapping_enabled = 1;
1761	return 0;
1762}
1763
1764static void
1765map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1766{
1767	int diag = 0;
1768
1769	diag = set_tx_queue_stats_mapping_registers(pi, port);
1770	if (diag != 0) {
1771		if (diag == -ENOTSUP) {
1772			port->tx_queue_stats_mapping_enabled = 0;
1773			printf("TX queue stats mapping not supported port id=%d\n", pi);
1774		}
1775		else
1776			rte_exit(EXIT_FAILURE,
1777					"set_tx_queue_stats_mapping_registers "
1778					"failed for port id=%d diag=%d\n",
1779					pi, diag);
1780	}
1781
1782	diag = set_rx_queue_stats_mapping_registers(pi, port);
1783	if (diag != 0) {
1784		if (diag == -ENOTSUP) {
1785			port->rx_queue_stats_mapping_enabled = 0;
1786			printf("RX queue stats mapping not supported port id=%d\n", pi);
1787		}
1788		else
1789			rte_exit(EXIT_FAILURE,
1790					"set_rx_queue_stats_mapping_registers "
1791					"failed for port id=%d diag=%d\n",
1792					pi, diag);
1793	}
1794}
1795
1796static void
1797rxtx_port_config(struct rte_port *port)
1798{
1799	port->rx_conf = port->dev_info.default_rxconf;
1800	port->tx_conf = port->dev_info.default_txconf;
1801
1802	/* Check if any RX/TX parameters have been passed */
1803	if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1804		port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1805
1806	if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1807		port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1808
1809	if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1810		port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1811
1812	if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1813		port->rx_conf.rx_free_thresh = rx_free_thresh;
1814
1815	if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1816		port->rx_conf.rx_drop_en = rx_drop_en;
1817
1818	if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1819		port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1820
1821	if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1822		port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1823
1824	if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1825		port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1826
1827	if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1828		port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1829
1830	if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1831		port->tx_conf.tx_free_thresh = tx_free_thresh;
1832
1833	if (txq_flags != RTE_PMD_PARAM_UNSET)
1834		port->tx_conf.txq_flags = txq_flags;
1835}
1836
1837void
1838init_port_config(void)
1839{
1840	portid_t pid;
1841	struct rte_port *port;
1842
1843	FOREACH_PORT(pid, ports) {
1844		port = &ports[pid];
1845		port->dev_conf.rxmode = rx_mode;
1846		port->dev_conf.fdir_conf = fdir_conf;
1847		if (nb_rxq > 1) {
1848			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1849			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1850		} else {
1851			port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1852			port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1853		}
1854
1855		if (port->dcb_flag == 0) {
1856			if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1857				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1858			else
1859				port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1860		}
1861
1862		rxtx_port_config(port);
1863
1864		rte_eth_macaddr_get(pid, &port->eth_addr);
1865
1866		map_port_queue_stats_mapping_registers(pid, port);
1867#ifdef RTE_NIC_BYPASS
1868		rte_eth_dev_bypass_init(pid);
1869#endif
1870	}
1871}
1872
1873void set_port_slave_flag(portid_t slave_pid)
1874{
1875	struct rte_port *port;
1876
1877	port = &ports[slave_pid];
1878	port->slave_flag = 1;
1879}
1880
1881void clear_port_slave_flag(portid_t slave_pid)
1882{
1883	struct rte_port *port;
1884
1885	port = &ports[slave_pid];
1886	port->slave_flag = 0;
1887}
1888
1889uint8_t port_is_bonding_slave(portid_t slave_pid)
1890{
1891	struct rte_port *port;
1892
1893	port = &ports[slave_pid];
1894	if ((rte_eth_devices[slave_pid].data->dev_flags &
1895	    RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
1896		return 1;
1897	return 0;
1898}
1899
1900const uint16_t vlan_tags[] = {
1901		0,  1,  2,  3,  4,  5,  6,  7,
1902		8,  9, 10, 11,  12, 13, 14, 15,
1903		16, 17, 18, 19, 20, 21, 22, 23,
1904		24, 25, 26, 27, 28, 29, 30, 31
1905};
1906
1907static  int
1908get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
1909		 enum dcb_mode_enable dcb_mode,
1910		 enum rte_eth_nb_tcs num_tcs,
1911		 uint8_t pfc_en)
1912{
1913	uint8_t i;
1914	int32_t rc;
1915	struct rte_eth_rss_conf rss_conf;
1916
1917	/*
1918	 * Builds up the correct configuration for dcb+vt based on the vlan tags array
1919	 * given above, and the number of traffic classes available for use.
1920	 */
1921	if (dcb_mode == DCB_VT_ENABLED) {
1922		struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
1923				&eth_conf->rx_adv_conf.vmdq_dcb_conf;
1924		struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
1925				&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
1926
1927		/* VMDQ+DCB RX and TX configrations */
1928		vmdq_rx_conf->enable_default_pool = 0;
1929		vmdq_rx_conf->default_pool = 0;
1930		vmdq_rx_conf->nb_queue_pools =
1931			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1932		vmdq_tx_conf->nb_queue_pools =
1933			(num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1934
1935		vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
1936		for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
1937			vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
1938			vmdq_rx_conf->pool_map[i].pools =
1939				1 << (i % vmdq_rx_conf->nb_queue_pools);
1940		}
1941		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1942			vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
1943			vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
1944		}
1945
1946		/* set DCB mode of RX and TX of multiple queues */
1947		eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1948		eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1949	} else {
1950		struct rte_eth_dcb_rx_conf *rx_conf =
1951				&eth_conf->rx_adv_conf.dcb_rx_conf;
1952		struct rte_eth_dcb_tx_conf *tx_conf =
1953				&eth_conf->tx_adv_conf.dcb_tx_conf;
1954
1955		rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
1956		if (rc != 0)
1957			return rc;
1958
1959		rx_conf->nb_tcs = num_tcs;
1960		tx_conf->nb_tcs = num_tcs;
1961
1962		for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1963			rx_conf->dcb_tc[i] = i % num_tcs;
1964			tx_conf->dcb_tc[i] = i % num_tcs;
1965		}
1966
1967		eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
1968		eth_conf->rx_adv_conf.rss_conf = rss_conf;
1969		eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1970	}
1971
1972	if (pfc_en)
1973		eth_conf->dcb_capability_en =
1974				ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
1975	else
1976		eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1977
1978	return 0;
1979}
1980
1981int
1982init_port_dcb_config(portid_t pid,
1983		     enum dcb_mode_enable dcb_mode,
1984		     enum rte_eth_nb_tcs num_tcs,
1985		     uint8_t pfc_en)
1986{
1987	struct rte_eth_conf port_conf;
1988	struct rte_port *rte_port;
1989	int retval;
1990	uint16_t i;
1991
1992	rte_port = &ports[pid];
1993
1994	memset(&port_conf, 0, sizeof(struct rte_eth_conf));
1995	/* Enter DCB configuration status */
1996	dcb_config = 1;
1997
1998	/*set configuration of DCB in vt mode and DCB in non-vt mode*/
1999	retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
2000	if (retval < 0)
2001		return retval;
2002	port_conf.rxmode.hw_vlan_filter = 1;
2003
2004	/**
2005	 * Write the configuration into the device.
2006	 * Set the numbers of RX & TX queues to 0, so
2007	 * the RX & TX queues will not be setup.
2008	 */
2009	(void)rte_eth_dev_configure(pid, 0, 0, &port_conf);
2010
2011	rte_eth_dev_info_get(pid, &rte_port->dev_info);
2012
2013	/* If dev_info.vmdq_pool_base is greater than 0,
2014	 * the queue id of vmdq pools is started after pf queues.
2015	 */
2016	if (dcb_mode == DCB_VT_ENABLED &&
2017	    rte_port->dev_info.vmdq_pool_base > 0) {
2018		printf("VMDQ_DCB multi-queue mode is nonsensical"
2019			" for port %d.", pid);
2020		return -1;
2021	}
2022
2023	/* Assume the ports in testpmd have the same dcb capability
2024	 * and has the same number of rxq and txq in dcb mode
2025	 */
2026	if (dcb_mode == DCB_VT_ENABLED) {
2027		if (rte_port->dev_info.max_vfs > 0) {
2028			nb_rxq = rte_port->dev_info.nb_rx_queues;
2029			nb_txq = rte_port->dev_info.nb_tx_queues;
2030		} else {
2031			nb_rxq = rte_port->dev_info.max_rx_queues;
2032			nb_txq = rte_port->dev_info.max_tx_queues;
2033		}
2034	} else {
2035		/*if vt is disabled, use all pf queues */
2036		if (rte_port->dev_info.vmdq_pool_base == 0) {
2037			nb_rxq = rte_port->dev_info.max_rx_queues;
2038			nb_txq = rte_port->dev_info.max_tx_queues;
2039		} else {
2040			nb_rxq = (queueid_t)num_tcs;
2041			nb_txq = (queueid_t)num_tcs;
2042
2043		}
2044	}
2045	rx_free_thresh = 64;
2046
2047	memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2048
2049	rxtx_port_config(rte_port);
2050	/* VLAN filter */
2051	rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
2052	for (i = 0; i < RTE_DIM(vlan_tags); i++)
2053		rx_vft_set(pid, vlan_tags[i], 1);
2054
2055	rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2056	map_port_queue_stats_mapping_registers(pid, rte_port);
2057
2058	rte_port->dcb_flag = 1;
2059
2060	return 0;
2061}
2062
2063static void
2064init_port(void)
2065{
2066	portid_t pid;
2067
2068	/* Configuration of Ethernet ports. */
2069	ports = rte_zmalloc("testpmd: ports",
2070			    sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2071			    RTE_CACHE_LINE_SIZE);
2072	if (ports == NULL) {
2073		rte_exit(EXIT_FAILURE,
2074				"rte_zmalloc(%d struct rte_port) failed\n",
2075				RTE_MAX_ETHPORTS);
2076	}
2077
2078	/* enabled allocated ports */
2079	for (pid = 0; pid < nb_ports; pid++)
2080		ports[pid].enabled = 1;
2081}
2082
2083static void
2084force_quit(void)
2085{
2086	pmd_test_exit();
2087	prompt_exit();
2088}
2089
2090static void
2091signal_handler(int signum)
2092{
2093	if (signum == SIGINT || signum == SIGTERM) {
2094		printf("\nSignal %d received, preparing to exit...\n",
2095				signum);
2096#ifdef RTE_LIBRTE_PDUMP
2097		/* uninitialize packet capture framework */
2098		rte_pdump_uninit();
2099#endif
2100		force_quit();
2101		/* exit with the expected status */
2102		signal(signum, SIG_DFL);
2103		kill(getpid(), signum);
2104	}
2105}
2106
2107int
2108main(int argc, char** argv)
2109{
2110	int  diag;
2111	uint8_t port_id;
2112
2113	signal(SIGINT, signal_handler);
2114	signal(SIGTERM, signal_handler);
2115
2116	diag = rte_eal_init(argc, argv);
2117	if (diag < 0)
2118		rte_panic("Cannot init EAL\n");
2119
2120#ifdef RTE_LIBRTE_PDUMP
2121	/* initialize packet capture framework */
2122	rte_pdump_init(NULL);
2123#endif
2124
2125	nb_ports = (portid_t) rte_eth_dev_count();
2126	if (nb_ports == 0)
2127		RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2128
2129	/* allocate port structures, and init them */
2130	init_port();
2131
2132	set_def_fwd_config();
2133	if (nb_lcores == 0)
2134		rte_panic("Empty set of forwarding logical cores - check the "
2135			  "core mask supplied in the command parameters\n");
2136
2137	argc -= diag;
2138	argv += diag;
2139	if (argc > 1)
2140		launch_args_parse(argc, argv);
2141
2142	if (!nb_rxq && !nb_txq)
2143		printf("Warning: Either rx or tx queues should be non-zero\n");
2144
2145	if (nb_rxq > 1 && nb_rxq > nb_txq)
2146		printf("Warning: nb_rxq=%d enables RSS configuration, "
2147		       "but nb_txq=%d will prevent to fully test it.\n",
2148		       nb_rxq, nb_txq);
2149
2150	init_config();
2151	if (start_port(RTE_PORT_ALL) != 0)
2152		rte_exit(EXIT_FAILURE, "Start ports failed\n");
2153
2154	/* set all ports to promiscuous mode by default */
2155	FOREACH_PORT(port_id, ports)
2156		rte_eth_promiscuous_enable(port_id);
2157
2158#ifdef RTE_LIBRTE_CMDLINE
2159	if (interactive == 1) {
2160		if (auto_start) {
2161			printf("Start automatic packet forwarding\n");
2162			start_packet_forwarding(0);
2163		}
2164		prompt();
2165		pmd_test_exit();
2166	} else
2167#endif
2168	{
2169		char c;
2170		int rc;
2171
2172		printf("No commandline core given, start packet forwarding\n");
2173		start_packet_forwarding(0);
2174		printf("Press enter to exit\n");
2175		rc = read(0, &c, 1);
2176		pmd_test_exit();
2177		if (rc < 0)
2178			return 1;
2179	}
2180
2181	return 0;
2182}
2183