main.c revision 5b1ff351
1/*-
2 *   BSD LICENSE
3 *
4 *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5 *   All rights reserved.
6 *
7 *   Redistribution and use in source and binary forms, with or without
8 *   modification, are permitted provided that the following conditions
9 *   are met:
10 *
11 *     * Redistributions of source code must retain the above copyright
12 *       notice, this list of conditions and the following disclaimer.
13 *     * Redistributions in binary form must reproduce the above copyright
14 *       notice, this list of conditions and the following disclaimer in
15 *       the documentation and/or other materials provided with the
16 *       distribution.
17 *     * Neither the name of Intel Corporation nor the names of its
18 *       contributors may be used to endorse or promote products derived
19 *       from this software without specific prior written permission.
20 *
21 *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <arpa/inet.h>
35#include <getopt.h>
36#include <linux/if_ether.h>
37#include <linux/if_vlan.h>
38#include <linux/virtio_net.h>
39#include <linux/virtio_ring.h>
40#include <signal.h>
41#include <stdint.h>
42#include <sys/eventfd.h>
43#include <sys/param.h>
44#include <unistd.h>
45
46#include <rte_atomic.h>
47#include <rte_cycles.h>
48#include <rte_ethdev.h>
49#include <rte_log.h>
50#include <rte_string_fns.h>
51#include <rte_malloc.h>
52#include <rte_virtio_net.h>
53#include <rte_ip.h>
54#include <rte_tcp.h>
55
56#include "main.h"
57
58#ifndef MAX_QUEUES
59#define MAX_QUEUES 128
60#endif
61
62/* the maximum number of external ports supported */
63#define MAX_SUP_PORTS 1
64
65#define MBUF_CACHE_SIZE	128
66#define MBUF_DATA_SIZE	RTE_MBUF_DEFAULT_BUF_SIZE
67
68#define MAX_PKT_BURST 32		/* Max burst size for RX/TX */
69#define BURST_TX_DRAIN_US 100	/* TX drain every ~100us */
70
71#define BURST_RX_WAIT_US 15	/* Defines how long we wait between retries on RX */
72#define BURST_RX_RETRIES 4		/* Number of retries on RX. */
73
74#define JUMBO_FRAME_MAX_SIZE    0x2600
75
76/* State of virtio device. */
77#define DEVICE_MAC_LEARNING 0
78#define DEVICE_RX			1
79#define DEVICE_SAFE_REMOVE	2
80
81/* Configurable number of RX/TX ring descriptors */
82#define RTE_TEST_RX_DESC_DEFAULT 1024
83#define RTE_TEST_TX_DESC_DEFAULT 512
84
85#define INVALID_PORT_ID 0xFF
86
87/* Max number of devices. Limited by vmdq. */
88#define MAX_DEVICES 64
89
90/* Size of buffers used for snprintfs. */
91#define MAX_PRINT_BUFF 6072
92
93/* Maximum character device basename size. */
94#define MAX_BASENAME_SZ 10
95
96/* Maximum long option length for option parsing. */
97#define MAX_LONG_OPT_SZ 64
98
99/* mask of enabled ports */
100static uint32_t enabled_port_mask = 0;
101
102/* Promiscuous mode */
103static uint32_t promiscuous;
104
105/* number of devices/queues to support*/
106static uint32_t num_queues = 0;
107static uint32_t num_devices;
108
109static struct rte_mempool *mbuf_pool;
110static int mergeable;
111
112/* Do vlan strip on host, enabled on default */
113static uint32_t vlan_strip = 1;
114
115/* Enable VM2VM communications. If this is disabled then the MAC address compare is skipped. */
116typedef enum {
117	VM2VM_DISABLED = 0,
118	VM2VM_SOFTWARE = 1,
119	VM2VM_HARDWARE = 2,
120	VM2VM_LAST
121} vm2vm_type;
122static vm2vm_type vm2vm_mode = VM2VM_SOFTWARE;
123
124/* Enable stats. */
125static uint32_t enable_stats = 0;
126/* Enable retries on RX. */
127static uint32_t enable_retry = 1;
128
129/* Disable TX checksum offload */
130static uint32_t enable_tx_csum;
131
132/* Disable TSO offload */
133static uint32_t enable_tso;
134
135static int client_mode;
136
137/* Specify timeout (in useconds) between retries on RX. */
138static uint32_t burst_rx_delay_time = BURST_RX_WAIT_US;
139/* Specify the number of retries on RX. */
140static uint32_t burst_rx_retry_num = BURST_RX_RETRIES;
141
142/* Character device basename. Can be set by user. */
143static char dev_basename[MAX_BASENAME_SZ] = "vhost-net";
144
145/* empty vmdq configuration structure. Filled in programatically */
146static struct rte_eth_conf vmdq_conf_default = {
147	.rxmode = {
148		.mq_mode        = ETH_MQ_RX_VMDQ_ONLY,
149		.split_hdr_size = 0,
150		.header_split   = 0, /**< Header Split disabled */
151		.hw_ip_checksum = 0, /**< IP checksum offload disabled */
152		.hw_vlan_filter = 0, /**< VLAN filtering disabled */
153		/*
154		 * It is necessary for 1G NIC such as I350,
155		 * this fixes bug of ipv4 forwarding in guest can't
156		 * forward pakets from one virtio dev to another virtio dev.
157		 */
158		.hw_vlan_strip  = 1, /**< VLAN strip enabled. */
159		.jumbo_frame    = 0, /**< Jumbo Frame Support disabled */
160		.hw_strip_crc   = 0, /**< CRC stripped by hardware */
161	},
162
163	.txmode = {
164		.mq_mode = ETH_MQ_TX_NONE,
165	},
166	.rx_adv_conf = {
167		/*
168		 * should be overridden separately in code with
169		 * appropriate values
170		 */
171		.vmdq_rx_conf = {
172			.nb_queue_pools = ETH_8_POOLS,
173			.enable_default_pool = 0,
174			.default_pool = 0,
175			.nb_pool_maps = 0,
176			.pool_map = {{0, 0},},
177		},
178	},
179};
180
181static unsigned lcore_ids[RTE_MAX_LCORE];
182static uint8_t ports[RTE_MAX_ETHPORTS];
183static unsigned num_ports = 0; /**< The number of ports specified in command line */
184static uint16_t num_pf_queues, num_vmdq_queues;
185static uint16_t vmdq_pool_base, vmdq_queue_base;
186static uint16_t queues_per_pool;
187
188const uint16_t vlan_tags[] = {
189	1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007,
190	1008, 1009, 1010, 1011,	1012, 1013, 1014, 1015,
191	1016, 1017, 1018, 1019, 1020, 1021, 1022, 1023,
192	1024, 1025, 1026, 1027, 1028, 1029, 1030, 1031,
193	1032, 1033, 1034, 1035, 1036, 1037, 1038, 1039,
194	1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047,
195	1048, 1049, 1050, 1051, 1052, 1053, 1054, 1055,
196	1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063,
197};
198
199/* ethernet addresses of ports */
200static struct ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS];
201
202static struct vhost_dev_tailq_list vhost_dev_list =
203	TAILQ_HEAD_INITIALIZER(vhost_dev_list);
204
205static struct lcore_info lcore_info[RTE_MAX_LCORE];
206
207/* Used for queueing bursts of TX packets. */
208struct mbuf_table {
209	unsigned len;
210	unsigned txq_id;
211	struct rte_mbuf *m_table[MAX_PKT_BURST];
212};
213
214/* TX queue for each data core. */
215struct mbuf_table lcore_tx_queue[RTE_MAX_LCORE];
216
217#define MBUF_TABLE_DRAIN_TSC	((rte_get_tsc_hz() + US_PER_S - 1) \
218				 / US_PER_S * BURST_TX_DRAIN_US)
219#define VLAN_HLEN       4
220
221/*
222 * Builds up the correct configuration for VMDQ VLAN pool map
223 * according to the pool & queue limits.
224 */
225static inline int
226get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t num_devices)
227{
228	struct rte_eth_vmdq_rx_conf conf;
229	struct rte_eth_vmdq_rx_conf *def_conf =
230		&vmdq_conf_default.rx_adv_conf.vmdq_rx_conf;
231	unsigned i;
232
233	memset(&conf, 0, sizeof(conf));
234	conf.nb_queue_pools = (enum rte_eth_nb_pools)num_devices;
235	conf.nb_pool_maps = num_devices;
236	conf.enable_loop_back = def_conf->enable_loop_back;
237	conf.rx_mode = def_conf->rx_mode;
238
239	for (i = 0; i < conf.nb_pool_maps; i++) {
240		conf.pool_map[i].vlan_id = vlan_tags[ i ];
241		conf.pool_map[i].pools = (1UL << i);
242	}
243
244	(void)(rte_memcpy(eth_conf, &vmdq_conf_default, sizeof(*eth_conf)));
245	(void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_rx_conf, &conf,
246		   sizeof(eth_conf->rx_adv_conf.vmdq_rx_conf)));
247	return 0;
248}
249
250/*
251 * Validate the device number according to the max pool number gotten form
252 * dev_info. If the device number is invalid, give the error message and
253 * return -1. Each device must have its own pool.
254 */
255static inline int
256validate_num_devices(uint32_t max_nb_devices)
257{
258	if (num_devices > max_nb_devices) {
259		RTE_LOG(ERR, VHOST_PORT, "invalid number of devices\n");
260		return -1;
261	}
262	return 0;
263}
264
265/*
266 * Initialises a given port using global settings and with the rx buffers
267 * coming from the mbuf_pool passed as parameter
268 */
269static inline int
270port_init(uint8_t port)
271{
272	struct rte_eth_dev_info dev_info;
273	struct rte_eth_conf port_conf;
274	struct rte_eth_rxconf *rxconf;
275	struct rte_eth_txconf *txconf;
276	int16_t rx_rings, tx_rings;
277	uint16_t rx_ring_size, tx_ring_size;
278	int retval;
279	uint16_t q;
280
281	/* The max pool number from dev_info will be used to validate the pool number specified in cmd line */
282	rte_eth_dev_info_get (port, &dev_info);
283
284	if (dev_info.max_rx_queues > MAX_QUEUES) {
285		rte_exit(EXIT_FAILURE,
286			"please define MAX_QUEUES no less than %u in %s\n",
287			dev_info.max_rx_queues, __FILE__);
288	}
289
290	rxconf = &dev_info.default_rxconf;
291	txconf = &dev_info.default_txconf;
292	rxconf->rx_drop_en = 1;
293
294	/* Enable vlan offload */
295	txconf->txq_flags &= ~ETH_TXQ_FLAGS_NOVLANOFFL;
296
297	/*configure the number of supported virtio devices based on VMDQ limits */
298	num_devices = dev_info.max_vmdq_pools;
299
300	rx_ring_size = RTE_TEST_RX_DESC_DEFAULT;
301	tx_ring_size = RTE_TEST_TX_DESC_DEFAULT;
302	tx_rings = (uint16_t)rte_lcore_count();
303
304	retval = validate_num_devices(MAX_DEVICES);
305	if (retval < 0)
306		return retval;
307
308	/* Get port configuration. */
309	retval = get_eth_conf(&port_conf, num_devices);
310	if (retval < 0)
311		return retval;
312	/* NIC queues are divided into pf queues and vmdq queues.  */
313	num_pf_queues = dev_info.max_rx_queues - dev_info.vmdq_queue_num;
314	queues_per_pool = dev_info.vmdq_queue_num / dev_info.max_vmdq_pools;
315	num_vmdq_queues = num_devices * queues_per_pool;
316	num_queues = num_pf_queues + num_vmdq_queues;
317	vmdq_queue_base = dev_info.vmdq_queue_base;
318	vmdq_pool_base  = dev_info.vmdq_pool_base;
319	printf("pf queue num: %u, configured vmdq pool num: %u, each vmdq pool has %u queues\n",
320		num_pf_queues, num_devices, queues_per_pool);
321
322	if (port >= rte_eth_dev_count()) return -1;
323
324	if (enable_tx_csum == 0)
325		rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_CSUM);
326
327	if (enable_tso == 0) {
328		rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_HOST_TSO4);
329		rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_HOST_TSO6);
330		rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_GUEST_TSO4);
331		rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_GUEST_TSO6);
332	}
333
334	rx_rings = (uint16_t)dev_info.max_rx_queues;
335	/* Configure ethernet device. */
336	retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
337	if (retval != 0) {
338		RTE_LOG(ERR, VHOST_PORT, "Failed to configure port %u: %s.\n",
339			port, strerror(-retval));
340		return retval;
341	}
342
343	/* Setup the queues. */
344	for (q = 0; q < rx_rings; q ++) {
345		retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
346						rte_eth_dev_socket_id(port),
347						rxconf,
348						mbuf_pool);
349		if (retval < 0) {
350			RTE_LOG(ERR, VHOST_PORT,
351				"Failed to setup rx queue %u of port %u: %s.\n",
352				q, port, strerror(-retval));
353			return retval;
354		}
355	}
356	for (q = 0; q < tx_rings; q ++) {
357		retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
358						rte_eth_dev_socket_id(port),
359						txconf);
360		if (retval < 0) {
361			RTE_LOG(ERR, VHOST_PORT,
362				"Failed to setup tx queue %u of port %u: %s.\n",
363				q, port, strerror(-retval));
364			return retval;
365		}
366	}
367
368	/* Start the device. */
369	retval  = rte_eth_dev_start(port);
370	if (retval < 0) {
371		RTE_LOG(ERR, VHOST_PORT, "Failed to start port %u: %s\n",
372			port, strerror(-retval));
373		return retval;
374	}
375
376	if (promiscuous)
377		rte_eth_promiscuous_enable(port);
378
379	rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]);
380	RTE_LOG(INFO, VHOST_PORT, "Max virtio devices supported: %u\n", num_devices);
381	RTE_LOG(INFO, VHOST_PORT, "Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
382			" %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
383			(unsigned)port,
384			vmdq_ports_eth_addr[port].addr_bytes[0],
385			vmdq_ports_eth_addr[port].addr_bytes[1],
386			vmdq_ports_eth_addr[port].addr_bytes[2],
387			vmdq_ports_eth_addr[port].addr_bytes[3],
388			vmdq_ports_eth_addr[port].addr_bytes[4],
389			vmdq_ports_eth_addr[port].addr_bytes[5]);
390
391	return 0;
392}
393
394/*
395 * Set character device basename.
396 */
397static int
398us_vhost_parse_basename(const char *q_arg)
399{
400	/* parse number string */
401
402	if (strnlen(q_arg, MAX_BASENAME_SZ) > MAX_BASENAME_SZ)
403		return -1;
404	else
405		snprintf((char*)&dev_basename, MAX_BASENAME_SZ, "%s", q_arg);
406
407	return 0;
408}
409
410/*
411 * Parse the portmask provided at run time.
412 */
413static int
414parse_portmask(const char *portmask)
415{
416	char *end = NULL;
417	unsigned long pm;
418
419	errno = 0;
420
421	/* parse hexadecimal string */
422	pm = strtoul(portmask, &end, 16);
423	if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
424		return -1;
425
426	if (pm == 0)
427		return -1;
428
429	return pm;
430
431}
432
433/*
434 * Parse num options at run time.
435 */
436static int
437parse_num_opt(const char *q_arg, uint32_t max_valid_value)
438{
439	char *end = NULL;
440	unsigned long num;
441
442	errno = 0;
443
444	/* parse unsigned int string */
445	num = strtoul(q_arg, &end, 10);
446	if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
447		return -1;
448
449	if (num > max_valid_value)
450		return -1;
451
452	return num;
453
454}
455
456/*
457 * Display usage
458 */
459static void
460us_vhost_usage(const char *prgname)
461{
462	RTE_LOG(INFO, VHOST_CONFIG, "%s [EAL options] -- -p PORTMASK\n"
463	"		--vm2vm [0|1|2]\n"
464	"		--rx_retry [0|1] --mergeable [0|1] --stats [0-N]\n"
465	"		--dev-basename <name>\n"
466	"		--nb-devices ND\n"
467	"		-p PORTMASK: Set mask for ports to be used by application\n"
468	"		--vm2vm [0|1|2]: disable/software(default)/hardware vm2vm comms\n"
469	"		--rx-retry [0|1]: disable/enable(default) retries on rx. Enable retry if destintation queue is full\n"
470	"		--rx-retry-delay [0-N]: timeout(in usecond) between retries on RX. This makes effect only if retries on rx enabled\n"
471	"		--rx-retry-num [0-N]: the number of retries on rx. This makes effect only if retries on rx enabled\n"
472	"		--mergeable [0|1]: disable(default)/enable RX mergeable buffers\n"
473	"		--vlan-strip [0|1]: disable/enable(default) RX VLAN strip on host\n"
474	"		--stats [0-N]: 0: Disable stats, N: Time in seconds to print stats\n"
475	"		--dev-basename: The basename to be used for the character device.\n"
476	"		--tx-csum [0|1] disable/enable TX checksum offload.\n"
477	"		--tso [0|1] disable/enable TCP segment offload.\n"
478	"		--client register a vhost-user socket as client mode.\n",
479	       prgname);
480}
481
482/*
483 * Parse the arguments given in the command line of the application.
484 */
485static int
486us_vhost_parse_args(int argc, char **argv)
487{
488	int opt, ret;
489	int option_index;
490	unsigned i;
491	const char *prgname = argv[0];
492	static struct option long_option[] = {
493		{"vm2vm", required_argument, NULL, 0},
494		{"rx-retry", required_argument, NULL, 0},
495		{"rx-retry-delay", required_argument, NULL, 0},
496		{"rx-retry-num", required_argument, NULL, 0},
497		{"mergeable", required_argument, NULL, 0},
498		{"vlan-strip", required_argument, NULL, 0},
499		{"stats", required_argument, NULL, 0},
500		{"dev-basename", required_argument, NULL, 0},
501		{"tx-csum", required_argument, NULL, 0},
502		{"tso", required_argument, NULL, 0},
503		{"client", no_argument, &client_mode, 1},
504		{NULL, 0, 0, 0},
505	};
506
507	/* Parse command line */
508	while ((opt = getopt_long(argc, argv, "p:P",
509			long_option, &option_index)) != EOF) {
510		switch (opt) {
511		/* Portmask */
512		case 'p':
513			enabled_port_mask = parse_portmask(optarg);
514			if (enabled_port_mask == 0) {
515				RTE_LOG(INFO, VHOST_CONFIG, "Invalid portmask\n");
516				us_vhost_usage(prgname);
517				return -1;
518			}
519			break;
520
521		case 'P':
522			promiscuous = 1;
523			vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.rx_mode =
524				ETH_VMDQ_ACCEPT_BROADCAST |
525				ETH_VMDQ_ACCEPT_MULTICAST;
526			rte_vhost_feature_enable(1ULL << VIRTIO_NET_F_CTRL_RX);
527
528			break;
529
530		case 0:
531			/* Enable/disable vm2vm comms. */
532			if (!strncmp(long_option[option_index].name, "vm2vm",
533				MAX_LONG_OPT_SZ)) {
534				ret = parse_num_opt(optarg, (VM2VM_LAST - 1));
535				if (ret == -1) {
536					RTE_LOG(INFO, VHOST_CONFIG,
537						"Invalid argument for "
538						"vm2vm [0|1|2]\n");
539					us_vhost_usage(prgname);
540					return -1;
541				} else {
542					vm2vm_mode = (vm2vm_type)ret;
543				}
544			}
545
546			/* Enable/disable retries on RX. */
547			if (!strncmp(long_option[option_index].name, "rx-retry", MAX_LONG_OPT_SZ)) {
548				ret = parse_num_opt(optarg, 1);
549				if (ret == -1) {
550					RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry [0|1]\n");
551					us_vhost_usage(prgname);
552					return -1;
553				} else {
554					enable_retry = ret;
555				}
556			}
557
558			/* Enable/disable TX checksum offload. */
559			if (!strncmp(long_option[option_index].name, "tx-csum", MAX_LONG_OPT_SZ)) {
560				ret = parse_num_opt(optarg, 1);
561				if (ret == -1) {
562					RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for tx-csum [0|1]\n");
563					us_vhost_usage(prgname);
564					return -1;
565				} else
566					enable_tx_csum = ret;
567			}
568
569			/* Enable/disable TSO offload. */
570			if (!strncmp(long_option[option_index].name, "tso", MAX_LONG_OPT_SZ)) {
571				ret = parse_num_opt(optarg, 1);
572				if (ret == -1) {
573					RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for tso [0|1]\n");
574					us_vhost_usage(prgname);
575					return -1;
576				} else
577					enable_tso = ret;
578			}
579
580			/* Specify the retries delay time (in useconds) on RX. */
581			if (!strncmp(long_option[option_index].name, "rx-retry-delay", MAX_LONG_OPT_SZ)) {
582				ret = parse_num_opt(optarg, INT32_MAX);
583				if (ret == -1) {
584					RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-delay [0-N]\n");
585					us_vhost_usage(prgname);
586					return -1;
587				} else {
588					burst_rx_delay_time = ret;
589				}
590			}
591
592			/* Specify the retries number on RX. */
593			if (!strncmp(long_option[option_index].name, "rx-retry-num", MAX_LONG_OPT_SZ)) {
594				ret = parse_num_opt(optarg, INT32_MAX);
595				if (ret == -1) {
596					RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-num [0-N]\n");
597					us_vhost_usage(prgname);
598					return -1;
599				} else {
600					burst_rx_retry_num = ret;
601				}
602			}
603
604			/* Enable/disable RX mergeable buffers. */
605			if (!strncmp(long_option[option_index].name, "mergeable", MAX_LONG_OPT_SZ)) {
606				ret = parse_num_opt(optarg, 1);
607				if (ret == -1) {
608					RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for mergeable [0|1]\n");
609					us_vhost_usage(prgname);
610					return -1;
611				} else {
612					mergeable = !!ret;
613					if (ret) {
614						vmdq_conf_default.rxmode.jumbo_frame = 1;
615						vmdq_conf_default.rxmode.max_rx_pkt_len
616							= JUMBO_FRAME_MAX_SIZE;
617					}
618				}
619			}
620
621			/* Enable/disable RX VLAN strip on host. */
622			if (!strncmp(long_option[option_index].name,
623				"vlan-strip", MAX_LONG_OPT_SZ)) {
624				ret = parse_num_opt(optarg, 1);
625				if (ret == -1) {
626					RTE_LOG(INFO, VHOST_CONFIG,
627						"Invalid argument for VLAN strip [0|1]\n");
628					us_vhost_usage(prgname);
629					return -1;
630				} else {
631					vlan_strip = !!ret;
632					vmdq_conf_default.rxmode.hw_vlan_strip =
633						vlan_strip;
634				}
635			}
636
637			/* Enable/disable stats. */
638			if (!strncmp(long_option[option_index].name, "stats", MAX_LONG_OPT_SZ)) {
639				ret = parse_num_opt(optarg, INT32_MAX);
640				if (ret == -1) {
641					RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for stats [0..N]\n");
642					us_vhost_usage(prgname);
643					return -1;
644				} else {
645					enable_stats = ret;
646				}
647			}
648
649			/* Set character device basename. */
650			if (!strncmp(long_option[option_index].name, "dev-basename", MAX_LONG_OPT_SZ)) {
651				if (us_vhost_parse_basename(optarg) == -1) {
652					RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for character device basename (Max %d characters)\n", MAX_BASENAME_SZ);
653					us_vhost_usage(prgname);
654					return -1;
655				}
656			}
657
658			break;
659
660			/* Invalid option - print options. */
661		default:
662			us_vhost_usage(prgname);
663			return -1;
664		}
665	}
666
667	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
668		if (enabled_port_mask & (1 << i))
669			ports[num_ports++] = (uint8_t)i;
670	}
671
672	if ((num_ports ==  0) || (num_ports > MAX_SUP_PORTS)) {
673		RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u,"
674			"but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS);
675		return -1;
676	}
677
678	return 0;
679}
680
681/*
682 * Update the global var NUM_PORTS and array PORTS according to system ports number
683 * and return valid ports number
684 */
685static unsigned check_ports_num(unsigned nb_ports)
686{
687	unsigned valid_num_ports = num_ports;
688	unsigned portid;
689
690	if (num_ports > nb_ports) {
691		RTE_LOG(INFO, VHOST_PORT, "\nSpecified port number(%u) exceeds total system port number(%u)\n",
692			num_ports, nb_ports);
693		num_ports = nb_ports;
694	}
695
696	for (portid = 0; portid < num_ports; portid ++) {
697		if (ports[portid] >= nb_ports) {
698			RTE_LOG(INFO, VHOST_PORT, "\nSpecified port ID(%u) exceeds max system port ID(%u)\n",
699				ports[portid], (nb_ports - 1));
700			ports[portid] = INVALID_PORT_ID;
701			valid_num_ports--;
702		}
703	}
704	return valid_num_ports;
705}
706
707static inline struct vhost_dev *__attribute__((always_inline))
708find_vhost_dev(struct ether_addr *mac)
709{
710	struct vhost_dev *vdev;
711
712	TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
713		if (vdev->ready == DEVICE_RX &&
714		    is_same_ether_addr(mac, &vdev->mac_address))
715			return vdev;
716	}
717
718	return NULL;
719}
720
721/*
722 * This function learns the MAC address of the device and registers this along with a
723 * vlan tag to a VMDQ.
724 */
725static int
726link_vmdq(struct vhost_dev *vdev, struct rte_mbuf *m)
727{
728	struct ether_hdr *pkt_hdr;
729	int i, ret;
730
731	/* Learn MAC address of guest device from packet */
732	pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
733
734	if (find_vhost_dev(&pkt_hdr->s_addr)) {
735		RTE_LOG(ERR, VHOST_DATA,
736			"(%d) device is using a registered MAC!\n",
737			vdev->vid);
738		return -1;
739	}
740
741	for (i = 0; i < ETHER_ADDR_LEN; i++)
742		vdev->mac_address.addr_bytes[i] = pkt_hdr->s_addr.addr_bytes[i];
743
744	/* vlan_tag currently uses the device_id. */
745	vdev->vlan_tag = vlan_tags[vdev->vid];
746
747	/* Print out VMDQ registration info. */
748	RTE_LOG(INFO, VHOST_DATA,
749		"(%d) mac %02x:%02x:%02x:%02x:%02x:%02x and vlan %d registered\n",
750		vdev->vid,
751		vdev->mac_address.addr_bytes[0], vdev->mac_address.addr_bytes[1],
752		vdev->mac_address.addr_bytes[2], vdev->mac_address.addr_bytes[3],
753		vdev->mac_address.addr_bytes[4], vdev->mac_address.addr_bytes[5],
754		vdev->vlan_tag);
755
756	/* Register the MAC address. */
757	ret = rte_eth_dev_mac_addr_add(ports[0], &vdev->mac_address,
758				(uint32_t)vdev->vid + vmdq_pool_base);
759	if (ret)
760		RTE_LOG(ERR, VHOST_DATA,
761			"(%d) failed to add device MAC address to VMDQ\n",
762			vdev->vid);
763
764	/* Enable stripping of the vlan tag as we handle routing. */
765	if (vlan_strip)
766		rte_eth_dev_set_vlan_strip_on_queue(ports[0],
767			(uint16_t)vdev->vmdq_rx_q, 1);
768
769	/* Set device as ready for RX. */
770	vdev->ready = DEVICE_RX;
771
772	return 0;
773}
774
775/*
776 * Removes MAC address and vlan tag from VMDQ. Ensures that nothing is adding buffers to the RX
777 * queue before disabling RX on the device.
778 */
779static inline void
780unlink_vmdq(struct vhost_dev *vdev)
781{
782	unsigned i = 0;
783	unsigned rx_count;
784	struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
785
786	if (vdev->ready == DEVICE_RX) {
787		/*clear MAC and VLAN settings*/
788		rte_eth_dev_mac_addr_remove(ports[0], &vdev->mac_address);
789		for (i = 0; i < 6; i++)
790			vdev->mac_address.addr_bytes[i] = 0;
791
792		vdev->vlan_tag = 0;
793
794		/*Clear out the receive buffers*/
795		rx_count = rte_eth_rx_burst(ports[0],
796					(uint16_t)vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
797
798		while (rx_count) {
799			for (i = 0; i < rx_count; i++)
800				rte_pktmbuf_free(pkts_burst[i]);
801
802			rx_count = rte_eth_rx_burst(ports[0],
803					(uint16_t)vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
804		}
805
806		vdev->ready = DEVICE_MAC_LEARNING;
807	}
808}
809
810static inline void __attribute__((always_inline))
811virtio_xmit(struct vhost_dev *dst_vdev, struct vhost_dev *src_vdev,
812	    struct rte_mbuf *m)
813{
814	uint16_t ret;
815
816	ret = rte_vhost_enqueue_burst(dst_vdev->vid, VIRTIO_RXQ, &m, 1);
817	if (enable_stats) {
818		rte_atomic64_inc(&dst_vdev->stats.rx_total_atomic);
819		rte_atomic64_add(&dst_vdev->stats.rx_atomic, ret);
820		src_vdev->stats.tx_total++;
821		src_vdev->stats.tx += ret;
822	}
823}
824
825/*
826 * Check if the packet destination MAC address is for a local device. If so then put
827 * the packet on that devices RX queue. If not then return.
828 */
829static inline int __attribute__((always_inline))
830virtio_tx_local(struct vhost_dev *vdev, struct rte_mbuf *m)
831{
832	struct ether_hdr *pkt_hdr;
833	struct vhost_dev *dst_vdev;
834
835	pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
836
837	dst_vdev = find_vhost_dev(&pkt_hdr->d_addr);
838	if (!dst_vdev)
839		return -1;
840
841	if (vdev->vid == dst_vdev->vid) {
842		RTE_LOG(DEBUG, VHOST_DATA,
843			"(%d) TX: src and dst MAC is same. Dropping packet.\n",
844			vdev->vid);
845		return 0;
846	}
847
848	RTE_LOG(DEBUG, VHOST_DATA,
849		"(%d) TX: MAC address is local\n", dst_vdev->vid);
850
851	if (unlikely(dst_vdev->remove)) {
852		RTE_LOG(DEBUG, VHOST_DATA,
853			"(%d) device is marked for removal\n", dst_vdev->vid);
854		return 0;
855	}
856
857	virtio_xmit(dst_vdev, vdev, m);
858	return 0;
859}
860
861/*
862 * Check if the destination MAC of a packet is one local VM,
863 * and get its vlan tag, and offset if it is.
864 */
865static inline int __attribute__((always_inline))
866find_local_dest(struct vhost_dev *vdev, struct rte_mbuf *m,
867	uint32_t *offset, uint16_t *vlan_tag)
868{
869	struct vhost_dev *dst_vdev;
870	struct ether_hdr *pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
871
872	dst_vdev = find_vhost_dev(&pkt_hdr->d_addr);
873	if (!dst_vdev)
874		return 0;
875
876	if (vdev->vid == dst_vdev->vid) {
877		RTE_LOG(DEBUG, VHOST_DATA,
878			"(%d) TX: src and dst MAC is same. Dropping packet.\n",
879			vdev->vid);
880		return -1;
881	}
882
883	/*
884	 * HW vlan strip will reduce the packet length
885	 * by minus length of vlan tag, so need restore
886	 * the packet length by plus it.
887	 */
888	*offset  = VLAN_HLEN;
889	*vlan_tag = vlan_tags[vdev->vid];
890
891	RTE_LOG(DEBUG, VHOST_DATA,
892		"(%d) TX: pkt to local VM device id: (%d), vlan tag: %u.\n",
893		vdev->vid, dst_vdev->vid, *vlan_tag);
894
895	return 0;
896}
897
898static uint16_t
899get_psd_sum(void *l3_hdr, uint64_t ol_flags)
900{
901	if (ol_flags & PKT_TX_IPV4)
902		return rte_ipv4_phdr_cksum(l3_hdr, ol_flags);
903	else /* assume ethertype == ETHER_TYPE_IPv6 */
904		return rte_ipv6_phdr_cksum(l3_hdr, ol_flags);
905}
906
907static void virtio_tx_offload(struct rte_mbuf *m)
908{
909	void *l3_hdr;
910	struct ipv4_hdr *ipv4_hdr = NULL;
911	struct tcp_hdr *tcp_hdr = NULL;
912	struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
913
914	l3_hdr = (char *)eth_hdr + m->l2_len;
915
916	if (m->ol_flags & PKT_TX_IPV4) {
917		ipv4_hdr = l3_hdr;
918		ipv4_hdr->hdr_checksum = 0;
919		m->ol_flags |= PKT_TX_IP_CKSUM;
920	}
921
922	tcp_hdr = (struct tcp_hdr *)((char *)l3_hdr + m->l3_len);
923	tcp_hdr->cksum = get_psd_sum(l3_hdr, m->ol_flags);
924}
925
926static inline void
927free_pkts(struct rte_mbuf **pkts, uint16_t n)
928{
929	while (n--)
930		rte_pktmbuf_free(pkts[n]);
931}
932
933static inline void __attribute__((always_inline))
934do_drain_mbuf_table(struct mbuf_table *tx_q)
935{
936	uint16_t count;
937
938	count = rte_eth_tx_burst(ports[0], tx_q->txq_id,
939				 tx_q->m_table, tx_q->len);
940	if (unlikely(count < tx_q->len))
941		free_pkts(&tx_q->m_table[count], tx_q->len - count);
942
943	tx_q->len = 0;
944}
945
946/*
947 * This function routes the TX packet to the correct interface. This
948 * may be a local device or the physical port.
949 */
950static inline void __attribute__((always_inline))
951virtio_tx_route(struct vhost_dev *vdev, struct rte_mbuf *m, uint16_t vlan_tag)
952{
953	struct mbuf_table *tx_q;
954	unsigned offset = 0;
955	const uint16_t lcore_id = rte_lcore_id();
956	struct ether_hdr *nh;
957
958
959	nh = rte_pktmbuf_mtod(m, struct ether_hdr *);
960	if (unlikely(is_broadcast_ether_addr(&nh->d_addr))) {
961		struct vhost_dev *vdev2;
962
963		TAILQ_FOREACH(vdev2, &vhost_dev_list, global_vdev_entry) {
964			virtio_xmit(vdev2, vdev, m);
965		}
966		goto queue2nic;
967	}
968
969	/*check if destination is local VM*/
970	if ((vm2vm_mode == VM2VM_SOFTWARE) && (virtio_tx_local(vdev, m) == 0)) {
971		rte_pktmbuf_free(m);
972		return;
973	}
974
975	if (unlikely(vm2vm_mode == VM2VM_HARDWARE)) {
976		if (unlikely(find_local_dest(vdev, m, &offset,
977					     &vlan_tag) != 0)) {
978			rte_pktmbuf_free(m);
979			return;
980		}
981	}
982
983	RTE_LOG(DEBUG, VHOST_DATA,
984		"(%d) TX: MAC address is external\n", vdev->vid);
985
986queue2nic:
987
988	/*Add packet to the port tx queue*/
989	tx_q = &lcore_tx_queue[lcore_id];
990
991	nh = rte_pktmbuf_mtod(m, struct ether_hdr *);
992	if (unlikely(nh->ether_type == rte_cpu_to_be_16(ETHER_TYPE_VLAN))) {
993		/* Guest has inserted the vlan tag. */
994		struct vlan_hdr *vh = (struct vlan_hdr *) (nh + 1);
995		uint16_t vlan_tag_be = rte_cpu_to_be_16(vlan_tag);
996		if ((vm2vm_mode == VM2VM_HARDWARE) &&
997			(vh->vlan_tci != vlan_tag_be))
998			vh->vlan_tci = vlan_tag_be;
999	} else {
1000		m->ol_flags |= PKT_TX_VLAN_PKT;
1001
1002		/*
1003		 * Find the right seg to adjust the data len when offset is
1004		 * bigger than tail room size.
1005		 */
1006		if (unlikely(vm2vm_mode == VM2VM_HARDWARE)) {
1007			if (likely(offset <= rte_pktmbuf_tailroom(m)))
1008				m->data_len += offset;
1009			else {
1010				struct rte_mbuf *seg = m;
1011
1012				while ((seg->next != NULL) &&
1013					(offset > rte_pktmbuf_tailroom(seg)))
1014					seg = seg->next;
1015
1016				seg->data_len += offset;
1017			}
1018			m->pkt_len += offset;
1019		}
1020
1021		m->vlan_tci = vlan_tag;
1022	}
1023
1024	if (m->ol_flags & PKT_TX_TCP_SEG)
1025		virtio_tx_offload(m);
1026
1027	tx_q->m_table[tx_q->len++] = m;
1028	if (enable_stats) {
1029		vdev->stats.tx_total++;
1030		vdev->stats.tx++;
1031	}
1032
1033	if (unlikely(tx_q->len == MAX_PKT_BURST))
1034		do_drain_mbuf_table(tx_q);
1035}
1036
1037
1038static inline void __attribute__((always_inline))
1039drain_mbuf_table(struct mbuf_table *tx_q)
1040{
1041	static uint64_t prev_tsc;
1042	uint64_t cur_tsc;
1043
1044	if (tx_q->len == 0)
1045		return;
1046
1047	cur_tsc = rte_rdtsc();
1048	if (unlikely(cur_tsc - prev_tsc > MBUF_TABLE_DRAIN_TSC)) {
1049		prev_tsc = cur_tsc;
1050
1051		RTE_LOG(DEBUG, VHOST_DATA,
1052			"TX queue drained after timeout with burst size %u\n",
1053			tx_q->len);
1054		do_drain_mbuf_table(tx_q);
1055	}
1056}
1057
1058static inline void __attribute__((always_inline))
1059drain_eth_rx(struct vhost_dev *vdev)
1060{
1061	uint16_t rx_count, enqueue_count;
1062	struct rte_mbuf *pkts[MAX_PKT_BURST];
1063
1064	rx_count = rte_eth_rx_burst(ports[0], vdev->vmdq_rx_q,
1065				    pkts, MAX_PKT_BURST);
1066	if (!rx_count)
1067		return;
1068
1069	/*
1070	 * When "enable_retry" is set, here we wait and retry when there
1071	 * is no enough free slots in the queue to hold @rx_count packets,
1072	 * to diminish packet loss.
1073	 */
1074	if (enable_retry &&
1075	    unlikely(rx_count > rte_vhost_avail_entries(vdev->vid,
1076			VIRTIO_RXQ))) {
1077		uint32_t retry;
1078
1079		for (retry = 0; retry < burst_rx_retry_num; retry++) {
1080			rte_delay_us(burst_rx_delay_time);
1081			if (rx_count <= rte_vhost_avail_entries(vdev->vid,
1082					VIRTIO_RXQ))
1083				break;
1084		}
1085	}
1086
1087	enqueue_count = rte_vhost_enqueue_burst(vdev->vid, VIRTIO_RXQ,
1088						pkts, rx_count);
1089	if (enable_stats) {
1090		rte_atomic64_add(&vdev->stats.rx_total_atomic, rx_count);
1091		rte_atomic64_add(&vdev->stats.rx_atomic, enqueue_count);
1092	}
1093
1094	free_pkts(pkts, rx_count);
1095}
1096
1097static inline void __attribute__((always_inline))
1098drain_virtio_tx(struct vhost_dev *vdev)
1099{
1100	struct rte_mbuf *pkts[MAX_PKT_BURST];
1101	uint16_t count;
1102	uint16_t i;
1103
1104	count = rte_vhost_dequeue_burst(vdev->vid, VIRTIO_TXQ, mbuf_pool,
1105					pkts, MAX_PKT_BURST);
1106
1107	/* setup VMDq for the first packet */
1108	if (unlikely(vdev->ready == DEVICE_MAC_LEARNING) && count) {
1109		if (vdev->remove || link_vmdq(vdev, pkts[0]) == -1)
1110			free_pkts(pkts, count);
1111	}
1112
1113	for (i = 0; i < count; ++i)
1114		virtio_tx_route(vdev, pkts[i], vlan_tags[vdev->vid]);
1115}
1116
1117/*
1118 * Main function of vhost-switch. It basically does:
1119 *
1120 * for each vhost device {
1121 *    - drain_eth_rx()
1122 *
1123 *      Which drains the host eth Rx queue linked to the vhost device,
1124 *      and deliver all of them to guest virito Rx ring associated with
1125 *      this vhost device.
1126 *
1127 *    - drain_virtio_tx()
1128 *
1129 *      Which drains the guest virtio Tx queue and deliver all of them
1130 *      to the target, which could be another vhost device, or the
1131 *      physical eth dev. The route is done in function "virtio_tx_route".
1132 * }
1133 */
1134static int
1135switch_worker(void *arg __rte_unused)
1136{
1137	unsigned i;
1138	unsigned lcore_id = rte_lcore_id();
1139	struct vhost_dev *vdev;
1140	struct mbuf_table *tx_q;
1141
1142	RTE_LOG(INFO, VHOST_DATA, "Procesing on Core %u started\n", lcore_id);
1143
1144	tx_q = &lcore_tx_queue[lcore_id];
1145	for (i = 0; i < rte_lcore_count(); i++) {
1146		if (lcore_ids[i] == lcore_id) {
1147			tx_q->txq_id = i;
1148			break;
1149		}
1150	}
1151
1152	while(1) {
1153		drain_mbuf_table(tx_q);
1154
1155		/*
1156		 * Inform the configuration core that we have exited the
1157		 * linked list and that no devices are in use if requested.
1158		 */
1159		if (lcore_info[lcore_id].dev_removal_flag == REQUEST_DEV_REMOVAL)
1160			lcore_info[lcore_id].dev_removal_flag = ACK_DEV_REMOVAL;
1161
1162		/*
1163		 * Process vhost devices
1164		 */
1165		TAILQ_FOREACH(vdev, &lcore_info[lcore_id].vdev_list,
1166			      lcore_vdev_entry) {
1167			if (unlikely(vdev->remove)) {
1168				unlink_vmdq(vdev);
1169				vdev->ready = DEVICE_SAFE_REMOVE;
1170				continue;
1171			}
1172
1173			if (likely(vdev->ready == DEVICE_RX))
1174				drain_eth_rx(vdev);
1175
1176			if (likely(!vdev->remove))
1177				drain_virtio_tx(vdev);
1178		}
1179	}
1180
1181	return 0;
1182}
1183
1184/*
1185 * Remove a device from the specific data core linked list and from the
1186 * main linked list. Synchonization  occurs through the use of the
1187 * lcore dev_removal_flag. Device is made volatile here to avoid re-ordering
1188 * of dev->remove=1 which can cause an infinite loop in the rte_pause loop.
1189 */
1190static void
1191destroy_device(int vid)
1192{
1193	struct vhost_dev *vdev = NULL;
1194	int lcore;
1195
1196	TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
1197		if (vdev->vid == vid)
1198			break;
1199	}
1200	if (!vdev)
1201		return;
1202	/*set the remove flag. */
1203	vdev->remove = 1;
1204	while(vdev->ready != DEVICE_SAFE_REMOVE) {
1205		rte_pause();
1206	}
1207
1208	TAILQ_REMOVE(&lcore_info[vdev->coreid].vdev_list, vdev,
1209		     lcore_vdev_entry);
1210	TAILQ_REMOVE(&vhost_dev_list, vdev, global_vdev_entry);
1211
1212
1213	/* Set the dev_removal_flag on each lcore. */
1214	RTE_LCORE_FOREACH_SLAVE(lcore)
1215		lcore_info[lcore].dev_removal_flag = REQUEST_DEV_REMOVAL;
1216
1217	/*
1218	 * Once each core has set the dev_removal_flag to ACK_DEV_REMOVAL
1219	 * we can be sure that they can no longer access the device removed
1220	 * from the linked lists and that the devices are no longer in use.
1221	 */
1222	RTE_LCORE_FOREACH_SLAVE(lcore) {
1223		while (lcore_info[lcore].dev_removal_flag != ACK_DEV_REMOVAL)
1224			rte_pause();
1225	}
1226
1227	lcore_info[vdev->coreid].device_num--;
1228
1229	RTE_LOG(INFO, VHOST_DATA,
1230		"(%d) device has been removed from data core\n",
1231		vdev->vid);
1232
1233	rte_free(vdev);
1234}
1235
1236/*
1237 * A new device is added to a data core. First the device is added to the main linked list
1238 * and the allocated to a specific data core.
1239 */
1240static int
1241new_device(int vid)
1242{
1243	int lcore, core_add = 0;
1244	uint32_t device_num_min = num_devices;
1245	struct vhost_dev *vdev;
1246
1247	vdev = rte_zmalloc("vhost device", sizeof(*vdev), RTE_CACHE_LINE_SIZE);
1248	if (vdev == NULL) {
1249		RTE_LOG(INFO, VHOST_DATA,
1250			"(%d) couldn't allocate memory for vhost dev\n",
1251			vid);
1252		return -1;
1253	}
1254	vdev->vid = vid;
1255
1256	TAILQ_INSERT_TAIL(&vhost_dev_list, vdev, global_vdev_entry);
1257	vdev->vmdq_rx_q = vid * queues_per_pool + vmdq_queue_base;
1258
1259	/*reset ready flag*/
1260	vdev->ready = DEVICE_MAC_LEARNING;
1261	vdev->remove = 0;
1262
1263	/* Find a suitable lcore to add the device. */
1264	RTE_LCORE_FOREACH_SLAVE(lcore) {
1265		if (lcore_info[lcore].device_num < device_num_min) {
1266			device_num_min = lcore_info[lcore].device_num;
1267			core_add = lcore;
1268		}
1269	}
1270	vdev->coreid = core_add;
1271
1272	TAILQ_INSERT_TAIL(&lcore_info[vdev->coreid].vdev_list, vdev,
1273			  lcore_vdev_entry);
1274	lcore_info[vdev->coreid].device_num++;
1275
1276	/* Disable notifications. */
1277	rte_vhost_enable_guest_notification(vid, VIRTIO_RXQ, 0);
1278	rte_vhost_enable_guest_notification(vid, VIRTIO_TXQ, 0);
1279
1280	RTE_LOG(INFO, VHOST_DATA,
1281		"(%d) device has been added to data core %d\n",
1282		vid, vdev->coreid);
1283
1284	return 0;
1285}
1286
1287/*
1288 * These callback allow devices to be added to the data core when configuration
1289 * has been fully complete.
1290 */
1291static const struct virtio_net_device_ops virtio_net_device_ops =
1292{
1293	.new_device =  new_device,
1294	.destroy_device = destroy_device,
1295};
1296
1297/*
1298 * This is a thread will wake up after a period to print stats if the user has
1299 * enabled them.
1300 */
1301static void
1302print_stats(void)
1303{
1304	struct vhost_dev *vdev;
1305	uint64_t tx_dropped, rx_dropped;
1306	uint64_t tx, tx_total, rx, rx_total;
1307	const char clr[] = { 27, '[', '2', 'J', '\0' };
1308	const char top_left[] = { 27, '[', '1', ';', '1', 'H','\0' };
1309
1310	while(1) {
1311		sleep(enable_stats);
1312
1313		/* Clear screen and move to top left */
1314		printf("%s%s\n", clr, top_left);
1315		printf("Device statistics =================================\n");
1316
1317		TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
1318			tx_total   = vdev->stats.tx_total;
1319			tx         = vdev->stats.tx;
1320			tx_dropped = tx_total - tx;
1321
1322			rx_total   = rte_atomic64_read(&vdev->stats.rx_total_atomic);
1323			rx         = rte_atomic64_read(&vdev->stats.rx_atomic);
1324			rx_dropped = rx_total - rx;
1325
1326			printf("Statistics for device %d\n"
1327				"-----------------------\n"
1328				"TX total:              %" PRIu64 "\n"
1329				"TX dropped:            %" PRIu64 "\n"
1330				"TX successful:         %" PRIu64 "\n"
1331				"RX total:              %" PRIu64 "\n"
1332				"RX dropped:            %" PRIu64 "\n"
1333				"RX successful:         %" PRIu64 "\n",
1334				vdev->vid,
1335				tx_total, tx_dropped, tx,
1336				rx_total, rx_dropped, rx);
1337		}
1338
1339		printf("===================================================\n");
1340	}
1341}
1342
1343/* When we receive a INT signal, unregister vhost driver */
1344static void
1345sigint_handler(__rte_unused int signum)
1346{
1347	/* Unregister vhost driver. */
1348	int ret = rte_vhost_driver_unregister((char *)&dev_basename);
1349	if (ret != 0)
1350		rte_exit(EXIT_FAILURE, "vhost driver unregister failure.\n");
1351	exit(0);
1352}
1353
1354/*
1355 * While creating an mbuf pool, one key thing is to figure out how
1356 * many mbuf entries is enough for our use. FYI, here are some
1357 * guidelines:
1358 *
1359 * - Each rx queue would reserve @nr_rx_desc mbufs at queue setup stage
1360 *
1361 * - For each switch core (A CPU core does the packet switch), we need
1362 *   also make some reservation for receiving the packets from virtio
1363 *   Tx queue. How many is enough depends on the usage. It's normally
1364 *   a simple calculation like following:
1365 *
1366 *       MAX_PKT_BURST * max packet size / mbuf size
1367 *
1368 *   So, we definitely need allocate more mbufs when TSO is enabled.
1369 *
1370 * - Similarly, for each switching core, we should serve @nr_rx_desc
1371 *   mbufs for receiving the packets from physical NIC device.
1372 *
1373 * - We also need make sure, for each switch core, we have allocated
1374 *   enough mbufs to fill up the mbuf cache.
1375 */
1376static void
1377create_mbuf_pool(uint16_t nr_port, uint32_t nr_switch_core, uint32_t mbuf_size,
1378	uint32_t nr_queues, uint32_t nr_rx_desc, uint32_t nr_mbuf_cache)
1379{
1380	uint32_t nr_mbufs;
1381	uint32_t nr_mbufs_per_core;
1382	uint32_t mtu = 1500;
1383
1384	if (mergeable)
1385		mtu = 9000;
1386	if (enable_tso)
1387		mtu = 64 * 1024;
1388
1389	nr_mbufs_per_core  = (mtu + mbuf_size) * MAX_PKT_BURST /
1390			(mbuf_size - RTE_PKTMBUF_HEADROOM) * MAX_PKT_BURST;
1391	nr_mbufs_per_core += nr_rx_desc;
1392	nr_mbufs_per_core  = RTE_MAX(nr_mbufs_per_core, nr_mbuf_cache);
1393
1394	nr_mbufs  = nr_queues * nr_rx_desc;
1395	nr_mbufs += nr_mbufs_per_core * nr_switch_core;
1396	nr_mbufs *= nr_port;
1397
1398	mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", nr_mbufs,
1399					    nr_mbuf_cache, 0, mbuf_size,
1400					    rte_socket_id());
1401	if (mbuf_pool == NULL)
1402		rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
1403}
1404
1405/*
1406 * Main function, does initialisation and calls the per-lcore functions. The CUSE
1407 * device is also registered here to handle the IOCTLs.
1408 */
1409int
1410main(int argc, char *argv[])
1411{
1412	unsigned lcore_id, core_id = 0;
1413	unsigned nb_ports, valid_num_ports;
1414	int ret;
1415	uint8_t portid;
1416	static pthread_t tid;
1417	char thread_name[RTE_MAX_THREAD_NAME_LEN];
1418	uint64_t flags = 0;
1419
1420	signal(SIGINT, sigint_handler);
1421
1422	/* init EAL */
1423	ret = rte_eal_init(argc, argv);
1424	if (ret < 0)
1425		rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
1426	argc -= ret;
1427	argv += ret;
1428
1429	/* parse app arguments */
1430	ret = us_vhost_parse_args(argc, argv);
1431	if (ret < 0)
1432		rte_exit(EXIT_FAILURE, "Invalid argument\n");
1433
1434	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id ++)
1435		TAILQ_INIT(&lcore_info[lcore_id].vdev_list);
1436
1437		if (rte_lcore_is_enabled(lcore_id))
1438			lcore_ids[core_id ++] = lcore_id;
1439
1440	if (rte_lcore_count() > RTE_MAX_LCORE)
1441		rte_exit(EXIT_FAILURE,"Not enough cores\n");
1442
1443	/* Get the number of physical ports. */
1444	nb_ports = rte_eth_dev_count();
1445
1446	/*
1447	 * Update the global var NUM_PORTS and global array PORTS
1448	 * and get value of var VALID_NUM_PORTS according to system ports number
1449	 */
1450	valid_num_ports = check_ports_num(nb_ports);
1451
1452	if ((valid_num_ports ==  0) || (valid_num_ports > MAX_SUP_PORTS)) {
1453		RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u,"
1454			"but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS);
1455		return -1;
1456	}
1457
1458	/*
1459	 * FIXME: here we are trying to allocate mbufs big enough for
1460	 * @MAX_QUEUES, but the truth is we're never going to use that
1461	 * many queues here. We probably should only do allocation for
1462	 * those queues we are going to use.
1463	 */
1464	create_mbuf_pool(valid_num_ports, rte_lcore_count() - 1, MBUF_DATA_SIZE,
1465			 MAX_QUEUES, RTE_TEST_RX_DESC_DEFAULT, MBUF_CACHE_SIZE);
1466
1467	if (vm2vm_mode == VM2VM_HARDWARE) {
1468		/* Enable VT loop back to let L2 switch to do it. */
1469		vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.enable_loop_back = 1;
1470		RTE_LOG(DEBUG, VHOST_CONFIG,
1471			"Enable loop back for L2 switch in vmdq.\n");
1472	}
1473
1474	/* initialize all ports */
1475	for (portid = 0; portid < nb_ports; portid++) {
1476		/* skip ports that are not enabled */
1477		if ((enabled_port_mask & (1 << portid)) == 0) {
1478			RTE_LOG(INFO, VHOST_PORT,
1479				"Skipping disabled port %d\n", portid);
1480			continue;
1481		}
1482		if (port_init(portid) != 0)
1483			rte_exit(EXIT_FAILURE,
1484				"Cannot initialize network ports\n");
1485	}
1486
1487	/* Enable stats if the user option is set. */
1488	if (enable_stats) {
1489		ret = pthread_create(&tid, NULL, (void *)print_stats, NULL);
1490		if (ret != 0)
1491			rte_exit(EXIT_FAILURE,
1492				"Cannot create print-stats thread\n");
1493
1494		/* Set thread_name for aid in debugging.  */
1495		snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN, "print-stats");
1496		ret = rte_thread_setname(tid, thread_name);
1497		if (ret != 0)
1498			RTE_LOG(DEBUG, VHOST_CONFIG,
1499				"Cannot set print-stats name\n");
1500	}
1501
1502	/* Launch all data cores. */
1503	RTE_LCORE_FOREACH_SLAVE(lcore_id)
1504		rte_eal_remote_launch(switch_worker, NULL, lcore_id);
1505
1506	if (mergeable == 0)
1507		rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_MRG_RXBUF);
1508
1509	if (client_mode)
1510		flags |= RTE_VHOST_USER_CLIENT;
1511
1512	/* Register vhost(cuse or user) driver to handle vhost messages. */
1513	ret = rte_vhost_driver_register(dev_basename, flags);
1514	if (ret != 0)
1515		rte_exit(EXIT_FAILURE, "vhost driver register failure.\n");
1516
1517	rte_vhost_driver_callback_register(&virtio_net_device_ops);
1518
1519	/* Start CUSE session. */
1520	rte_vhost_driver_session_start();
1521	return 0;
1522
1523}
1524