main.c revision 8b25d1ad
1/*-
2 *   BSD LICENSE
3 *
4 *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5 *   All rights reserved.
6 *
7 *   Redistribution and use in source and binary forms, with or without
8 *   modification, are permitted provided that the following conditions
9 *   are met:
10 *
11 *     * Redistributions of source code must retain the above copyright
12 *       notice, this list of conditions and the following disclaimer.
13 *     * Redistributions in binary form must reproduce the above copyright
14 *       notice, this list of conditions and the following disclaimer in
15 *       the documentation and/or other materials provided with the
16 *       distribution.
17 *     * Neither the name of Intel Corporation nor the names of its
18 *       contributors may be used to endorse or promote products derived
19 *       from this software without specific prior written permission.
20 *
21 *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <stdio.h>
35#include <stdlib.h>
36#include <stdint.h>
37#include <inttypes.h>
38#include <string.h>
39#include <sys/queue.h>
40#include <stdarg.h>
41#include <errno.h>
42#include <getopt.h>
43
44#include <netinet/in.h>
45#include <linux/if.h>
46#include <linux/if_tun.h>
47#include <fcntl.h>
48#include <sys/ioctl.h>
49#include <unistd.h>
50#include <signal.h>
51
52#include <rte_common.h>
53#include <rte_log.h>
54#include <rte_memory.h>
55#include <rte_memcpy.h>
56#include <rte_memzone.h>
57#include <rte_eal.h>
58#include <rte_per_lcore.h>
59#include <rte_launch.h>
60#include <rte_atomic.h>
61#include <rte_lcore.h>
62#include <rte_branch_prediction.h>
63#include <rte_interrupts.h>
64#include <rte_pci.h>
65#include <rte_debug.h>
66#include <rte_ether.h>
67#include <rte_ethdev.h>
68#include <rte_ring.h>
69#include <rte_log.h>
70#include <rte_mempool.h>
71#include <rte_mbuf.h>
72#include <rte_string_fns.h>
73#include <rte_cycles.h>
74#include <rte_malloc.h>
75#include <rte_kni.h>
76
77/* Macros for printing using RTE_LOG */
78#define RTE_LOGTYPE_APP RTE_LOGTYPE_USER1
79
80/* Max size of a single packet */
81#define MAX_PACKET_SZ           2048
82
83/* Size of the data buffer in each mbuf */
84#define MBUF_DATA_SZ (MAX_PACKET_SZ + RTE_PKTMBUF_HEADROOM)
85
86/* Number of mbufs in mempool that is created */
87#define NB_MBUF                 (8192 * 16)
88
89/* How many packets to attempt to read from NIC in one go */
90#define PKT_BURST_SZ            32
91
92/* How many objects (mbufs) to keep in per-lcore mempool cache */
93#define MEMPOOL_CACHE_SZ        PKT_BURST_SZ
94
95/* Number of RX ring descriptors */
96#define NB_RXD                  128
97
98/* Number of TX ring descriptors */
99#define NB_TXD                  512
100
101/* Total octets in ethernet header */
102#define KNI_ENET_HEADER_SIZE    14
103
104/* Total octets in the FCS */
105#define KNI_ENET_FCS_SIZE       4
106
107#define KNI_US_PER_SECOND       1000000
108#define KNI_SECOND_PER_DAY      86400
109
110#define KNI_MAX_KTHREAD 32
111/*
112 * Structure of port parameters
113 */
114struct kni_port_params {
115	uint8_t port_id;/* Port ID */
116	unsigned lcore_rx; /* lcore ID for RX */
117	unsigned lcore_tx; /* lcore ID for TX */
118	uint32_t nb_lcore_k; /* Number of lcores for KNI multi kernel threads */
119	uint32_t nb_kni; /* Number of KNI devices to be created */
120	unsigned lcore_k[KNI_MAX_KTHREAD]; /* lcore ID list for kthreads */
121	struct rte_kni *kni[KNI_MAX_KTHREAD]; /* KNI context pointers */
122} __rte_cache_aligned;
123
124static struct kni_port_params *kni_port_params_array[RTE_MAX_ETHPORTS];
125
126
127/* Options for configuring ethernet port */
128static struct rte_eth_conf port_conf = {
129	.rxmode = {
130		.header_split = 0,      /* Header Split disabled */
131		.hw_ip_checksum = 0,    /* IP checksum offload disabled */
132		.hw_vlan_filter = 0,    /* VLAN filtering disabled */
133		.jumbo_frame = 0,       /* Jumbo Frame Support disabled */
134		.hw_strip_crc = 0,      /* CRC stripped by hardware */
135	},
136	.txmode = {
137		.mq_mode = ETH_MQ_TX_NONE,
138	},
139};
140
141/* Mempool for mbufs */
142static struct rte_mempool * pktmbuf_pool = NULL;
143
144/* Mask of enabled ports */
145static uint32_t ports_mask = 0;
146/* Ports set in promiscuous mode off by default. */
147static int promiscuous_on = 0;
148
149/* Structure type for recording kni interface specific stats */
150struct kni_interface_stats {
151	/* number of pkts received from NIC, and sent to KNI */
152	uint64_t rx_packets;
153
154	/* number of pkts received from NIC, but failed to send to KNI */
155	uint64_t rx_dropped;
156
157	/* number of pkts received from KNI, and sent to NIC */
158	uint64_t tx_packets;
159
160	/* number of pkts received from KNI, but failed to send to NIC */
161	uint64_t tx_dropped;
162};
163
164/* kni device statistics array */
165static struct kni_interface_stats kni_stats[RTE_MAX_ETHPORTS];
166
167static int kni_change_mtu(uint8_t port_id, unsigned new_mtu);
168static int kni_config_network_interface(uint8_t port_id, uint8_t if_up);
169
170static rte_atomic32_t kni_stop = RTE_ATOMIC32_INIT(0);
171
172/* Print out statistics on packets handled */
173static void
174print_stats(void)
175{
176	uint8_t i;
177
178	printf("\n**KNI example application statistics**\n"
179	       "======  ==============  ============  ============  ============  ============\n"
180	       " Port    Lcore(RX/TX)    rx_packets    rx_dropped    tx_packets    tx_dropped\n"
181	       "------  --------------  ------------  ------------  ------------  ------------\n");
182	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
183		if (!kni_port_params_array[i])
184			continue;
185
186		printf("%7d %10u/%2u %13"PRIu64" %13"PRIu64" %13"PRIu64" "
187							"%13"PRIu64"\n", i,
188					kni_port_params_array[i]->lcore_rx,
189					kni_port_params_array[i]->lcore_tx,
190						kni_stats[i].rx_packets,
191						kni_stats[i].rx_dropped,
192						kni_stats[i].tx_packets,
193						kni_stats[i].tx_dropped);
194	}
195	printf("======  ==============  ============  ============  ============  ============\n");
196}
197
198/* Custom handling of signals to handle stats and kni processing */
199static void
200signal_handler(int signum)
201{
202	/* When we receive a USR1 signal, print stats */
203	if (signum == SIGUSR1) {
204		print_stats();
205	}
206
207	/* When we receive a USR2 signal, reset stats */
208	if (signum == SIGUSR2) {
209		memset(&kni_stats, 0, sizeof(kni_stats));
210		printf("\n**Statistics have been reset**\n");
211		return;
212	}
213
214	/* When we receive a RTMIN or SIGINT signal, stop kni processing */
215	if (signum == SIGRTMIN || signum == SIGINT){
216		printf("SIGRTMIN is received, and the KNI processing is "
217							"going to stop\n");
218		rte_atomic32_inc(&kni_stop);
219		return;
220        }
221}
222
223static void
224kni_burst_free_mbufs(struct rte_mbuf **pkts, unsigned num)
225{
226	unsigned i;
227
228	if (pkts == NULL)
229		return;
230
231	for (i = 0; i < num; i++) {
232		rte_pktmbuf_free(pkts[i]);
233		pkts[i] = NULL;
234	}
235}
236
237/**
238 * Interface to burst rx and enqueue mbufs into rx_q
239 */
240static void
241kni_ingress(struct kni_port_params *p)
242{
243	uint8_t i, port_id;
244	unsigned nb_rx, num;
245	uint32_t nb_kni;
246	struct rte_mbuf *pkts_burst[PKT_BURST_SZ];
247
248	if (p == NULL)
249		return;
250
251	nb_kni = p->nb_kni;
252	port_id = p->port_id;
253	for (i = 0; i < nb_kni; i++) {
254		/* Burst rx from eth */
255		nb_rx = rte_eth_rx_burst(port_id, 0, pkts_burst, PKT_BURST_SZ);
256		if (unlikely(nb_rx > PKT_BURST_SZ)) {
257			RTE_LOG(ERR, APP, "Error receiving from eth\n");
258			return;
259		}
260		/* Burst tx to kni */
261		num = rte_kni_tx_burst(p->kni[i], pkts_burst, nb_rx);
262		kni_stats[port_id].rx_packets += num;
263
264		rte_kni_handle_request(p->kni[i]);
265		if (unlikely(num < nb_rx)) {
266			/* Free mbufs not tx to kni interface */
267			kni_burst_free_mbufs(&pkts_burst[num], nb_rx - num);
268			kni_stats[port_id].rx_dropped += nb_rx - num;
269		}
270	}
271}
272
273/**
274 * Interface to dequeue mbufs from tx_q and burst tx
275 */
276static void
277kni_egress(struct kni_port_params *p)
278{
279	uint8_t i, port_id;
280	unsigned nb_tx, num;
281	uint32_t nb_kni;
282	struct rte_mbuf *pkts_burst[PKT_BURST_SZ];
283
284	if (p == NULL)
285		return;
286
287	nb_kni = p->nb_kni;
288	port_id = p->port_id;
289	for (i = 0; i < nb_kni; i++) {
290		/* Burst rx from kni */
291		num = rte_kni_rx_burst(p->kni[i], pkts_burst, PKT_BURST_SZ);
292		if (unlikely(num > PKT_BURST_SZ)) {
293			RTE_LOG(ERR, APP, "Error receiving from KNI\n");
294			return;
295		}
296		/* Burst tx to eth */
297		nb_tx = rte_eth_tx_burst(port_id, 0, pkts_burst, (uint16_t)num);
298		kni_stats[port_id].tx_packets += nb_tx;
299		if (unlikely(nb_tx < num)) {
300			/* Free mbufs not tx to NIC */
301			kni_burst_free_mbufs(&pkts_burst[nb_tx], num - nb_tx);
302			kni_stats[port_id].tx_dropped += num - nb_tx;
303		}
304	}
305}
306
307static int
308main_loop(__rte_unused void *arg)
309{
310	uint8_t i, nb_ports = rte_eth_dev_count();
311	int32_t f_stop;
312	const unsigned lcore_id = rte_lcore_id();
313	enum lcore_rxtx {
314		LCORE_NONE,
315		LCORE_RX,
316		LCORE_TX,
317		LCORE_MAX
318	};
319	enum lcore_rxtx flag = LCORE_NONE;
320
321	for (i = 0; i < nb_ports; i++) {
322		if (!kni_port_params_array[i])
323			continue;
324		if (kni_port_params_array[i]->lcore_rx == (uint8_t)lcore_id) {
325			flag = LCORE_RX;
326			break;
327		} else if (kni_port_params_array[i]->lcore_tx ==
328						(uint8_t)lcore_id) {
329			flag = LCORE_TX;
330			break;
331		}
332	}
333
334	if (flag == LCORE_RX) {
335		RTE_LOG(INFO, APP, "Lcore %u is reading from port %d\n",
336					kni_port_params_array[i]->lcore_rx,
337					kni_port_params_array[i]->port_id);
338		while (1) {
339			f_stop = rte_atomic32_read(&kni_stop);
340			if (f_stop)
341				break;
342			kni_ingress(kni_port_params_array[i]);
343		}
344	} else if (flag == LCORE_TX) {
345		RTE_LOG(INFO, APP, "Lcore %u is writing to port %d\n",
346					kni_port_params_array[i]->lcore_tx,
347					kni_port_params_array[i]->port_id);
348		while (1) {
349			f_stop = rte_atomic32_read(&kni_stop);
350			if (f_stop)
351				break;
352			kni_egress(kni_port_params_array[i]);
353		}
354	} else
355		RTE_LOG(INFO, APP, "Lcore %u has nothing to do\n", lcore_id);
356
357	return 0;
358}
359
360/* Display usage instructions */
361static void
362print_usage(const char *prgname)
363{
364	RTE_LOG(INFO, APP, "\nUsage: %s [EAL options] -- -p PORTMASK -P "
365		   "[--config (port,lcore_rx,lcore_tx,lcore_kthread...)"
366		   "[,(port,lcore_rx,lcore_tx,lcore_kthread...)]]\n"
367		   "    -p PORTMASK: hex bitmask of ports to use\n"
368		   "    -P : enable promiscuous mode\n"
369		   "    --config (port,lcore_rx,lcore_tx,lcore_kthread...): "
370		   "port and lcore configurations\n",
371	           prgname);
372}
373
374/* Convert string to unsigned number. 0 is returned if error occurs */
375static uint32_t
376parse_unsigned(const char *portmask)
377{
378	char *end = NULL;
379	unsigned long num;
380
381	num = strtoul(portmask, &end, 16);
382	if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
383		return 0;
384
385	return (uint32_t)num;
386}
387
388static void
389print_config(void)
390{
391	uint32_t i, j;
392	struct kni_port_params **p = kni_port_params_array;
393
394	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
395		if (!p[i])
396			continue;
397		RTE_LOG(DEBUG, APP, "Port ID: %d\n", p[i]->port_id);
398		RTE_LOG(DEBUG, APP, "Rx lcore ID: %u, Tx lcore ID: %u\n",
399					p[i]->lcore_rx, p[i]->lcore_tx);
400		for (j = 0; j < p[i]->nb_lcore_k; j++)
401			RTE_LOG(DEBUG, APP, "Kernel thread lcore ID: %u\n",
402							p[i]->lcore_k[j]);
403	}
404}
405
406static int
407parse_config(const char *arg)
408{
409	const char *p, *p0 = arg;
410	char s[256], *end;
411	unsigned size;
412	enum fieldnames {
413		FLD_PORT = 0,
414		FLD_LCORE_RX,
415		FLD_LCORE_TX,
416		_NUM_FLD = KNI_MAX_KTHREAD + 3,
417	};
418	int i, j, nb_token;
419	char *str_fld[_NUM_FLD];
420	unsigned long int_fld[_NUM_FLD];
421	uint8_t port_id, nb_kni_port_params = 0;
422
423	memset(&kni_port_params_array, 0, sizeof(kni_port_params_array));
424	while (((p = strchr(p0, '(')) != NULL) &&
425		nb_kni_port_params < RTE_MAX_ETHPORTS) {
426		p++;
427		if ((p0 = strchr(p, ')')) == NULL)
428			goto fail;
429		size = p0 - p;
430		if (size >= sizeof(s)) {
431			printf("Invalid config parameters\n");
432			goto fail;
433		}
434		snprintf(s, sizeof(s), "%.*s", size, p);
435		nb_token = rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',');
436		if (nb_token <= FLD_LCORE_TX) {
437			printf("Invalid config parameters\n");
438			goto fail;
439		}
440		for (i = 0; i < nb_token; i++) {
441			errno = 0;
442			int_fld[i] = strtoul(str_fld[i], &end, 0);
443			if (errno != 0 || end == str_fld[i]) {
444				printf("Invalid config parameters\n");
445				goto fail;
446			}
447		}
448
449		i = 0;
450		port_id = (uint8_t)int_fld[i++];
451		if (port_id >= RTE_MAX_ETHPORTS) {
452			printf("Port ID %d could not exceed the maximum %d\n",
453						port_id, RTE_MAX_ETHPORTS);
454			goto fail;
455		}
456		if (kni_port_params_array[port_id]) {
457			printf("Port %d has been configured\n", port_id);
458			goto fail;
459		}
460		kni_port_params_array[port_id] =
461			rte_zmalloc("KNI_port_params",
462				    sizeof(struct kni_port_params), RTE_CACHE_LINE_SIZE);
463		kni_port_params_array[port_id]->port_id = port_id;
464		kni_port_params_array[port_id]->lcore_rx =
465					(uint8_t)int_fld[i++];
466		kni_port_params_array[port_id]->lcore_tx =
467					(uint8_t)int_fld[i++];
468		if (kni_port_params_array[port_id]->lcore_rx >= RTE_MAX_LCORE ||
469		kni_port_params_array[port_id]->lcore_tx >= RTE_MAX_LCORE) {
470			printf("lcore_rx %u or lcore_tx %u ID could not "
471						"exceed the maximum %u\n",
472				kni_port_params_array[port_id]->lcore_rx,
473				kni_port_params_array[port_id]->lcore_tx,
474						(unsigned)RTE_MAX_LCORE);
475			goto fail;
476		}
477		for (j = 0; i < nb_token && j < KNI_MAX_KTHREAD; i++, j++)
478			kni_port_params_array[port_id]->lcore_k[j] =
479						(uint8_t)int_fld[i];
480		kni_port_params_array[port_id]->nb_lcore_k = j;
481	}
482	print_config();
483
484	return 0;
485
486fail:
487	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
488		if (kni_port_params_array[i]) {
489			rte_free(kni_port_params_array[i]);
490			kni_port_params_array[i] = NULL;
491		}
492	}
493
494	return -1;
495}
496
497static int
498validate_parameters(uint32_t portmask)
499{
500	uint32_t i;
501
502	if (!portmask) {
503		printf("No port configured in port mask\n");
504		return -1;
505	}
506
507	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
508		if (((portmask & (1 << i)) && !kni_port_params_array[i]) ||
509			(!(portmask & (1 << i)) && kni_port_params_array[i]))
510			rte_exit(EXIT_FAILURE, "portmask is not consistent "
511				"to port ids specified in --config\n");
512
513		if (kni_port_params_array[i] && !rte_lcore_is_enabled(\
514			(unsigned)(kni_port_params_array[i]->lcore_rx)))
515			rte_exit(EXIT_FAILURE, "lcore id %u for "
516					"port %d receiving not enabled\n",
517					kni_port_params_array[i]->lcore_rx,
518					kni_port_params_array[i]->port_id);
519
520		if (kni_port_params_array[i] && !rte_lcore_is_enabled(\
521			(unsigned)(kni_port_params_array[i]->lcore_tx)))
522			rte_exit(EXIT_FAILURE, "lcore id %u for "
523					"port %d transmitting not enabled\n",
524					kni_port_params_array[i]->lcore_tx,
525					kni_port_params_array[i]->port_id);
526
527	}
528
529	return 0;
530}
531
532#define CMDLINE_OPT_CONFIG  "config"
533
534/* Parse the arguments given in the command line of the application */
535static int
536parse_args(int argc, char **argv)
537{
538	int opt, longindex, ret = 0;
539	const char *prgname = argv[0];
540	static struct option longopts[] = {
541		{CMDLINE_OPT_CONFIG, required_argument, NULL, 0},
542		{NULL, 0, NULL, 0}
543	};
544
545	/* Disable printing messages within getopt() */
546	opterr = 0;
547
548	/* Parse command line */
549	while ((opt = getopt_long(argc, argv, "p:P", longopts,
550						&longindex)) != EOF) {
551		switch (opt) {
552		case 'p':
553			ports_mask = parse_unsigned(optarg);
554			break;
555		case 'P':
556			promiscuous_on = 1;
557			break;
558		case 0:
559			if (!strncmp(longopts[longindex].name,
560				     CMDLINE_OPT_CONFIG,
561				     sizeof(CMDLINE_OPT_CONFIG))) {
562				ret = parse_config(optarg);
563				if (ret) {
564					printf("Invalid config\n");
565					print_usage(prgname);
566					return -1;
567				}
568			}
569			break;
570		default:
571			print_usage(prgname);
572			rte_exit(EXIT_FAILURE, "Invalid option specified\n");
573		}
574	}
575
576	/* Check that options were parsed ok */
577	if (validate_parameters(ports_mask) < 0) {
578		print_usage(prgname);
579		rte_exit(EXIT_FAILURE, "Invalid parameters\n");
580	}
581
582	return ret;
583}
584
585/* Initialize KNI subsystem */
586static void
587init_kni(void)
588{
589	unsigned int num_of_kni_ports = 0, i;
590	struct kni_port_params **params = kni_port_params_array;
591
592	/* Calculate the maximum number of KNI interfaces that will be used */
593	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
594		if (kni_port_params_array[i]) {
595			num_of_kni_ports += (params[i]->nb_lcore_k ?
596				params[i]->nb_lcore_k : 1);
597		}
598	}
599
600	/* Invoke rte KNI init to preallocate the ports */
601	rte_kni_init(num_of_kni_ports);
602}
603
604/* Initialise a single port on an Ethernet device */
605static void
606init_port(uint8_t port)
607{
608	int ret;
609
610	/* Initialise device and RX/TX queues */
611	RTE_LOG(INFO, APP, "Initialising port %u ...\n", (unsigned)port);
612	fflush(stdout);
613	ret = rte_eth_dev_configure(port, 1, 1, &port_conf);
614	if (ret < 0)
615		rte_exit(EXIT_FAILURE, "Could not configure port%u (%d)\n",
616		            (unsigned)port, ret);
617
618	ret = rte_eth_rx_queue_setup(port, 0, NB_RXD,
619		rte_eth_dev_socket_id(port), NULL, pktmbuf_pool);
620	if (ret < 0)
621		rte_exit(EXIT_FAILURE, "Could not setup up RX queue for "
622				"port%u (%d)\n", (unsigned)port, ret);
623
624	ret = rte_eth_tx_queue_setup(port, 0, NB_TXD,
625		rte_eth_dev_socket_id(port), NULL);
626	if (ret < 0)
627		rte_exit(EXIT_FAILURE, "Could not setup up TX queue for "
628				"port%u (%d)\n", (unsigned)port, ret);
629
630	ret = rte_eth_dev_start(port);
631	if (ret < 0)
632		rte_exit(EXIT_FAILURE, "Could not start port%u (%d)\n",
633						(unsigned)port, ret);
634
635	if (promiscuous_on)
636		rte_eth_promiscuous_enable(port);
637}
638
639/* Check the link status of all ports in up to 9s, and print them finally */
640static void
641check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
642{
643#define CHECK_INTERVAL 100 /* 100ms */
644#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
645	uint8_t portid, count, all_ports_up, print_flag = 0;
646	struct rte_eth_link link;
647
648	printf("\nChecking link status\n");
649	fflush(stdout);
650	for (count = 0; count <= MAX_CHECK_TIME; count++) {
651		all_ports_up = 1;
652		for (portid = 0; portid < port_num; portid++) {
653			if ((port_mask & (1 << portid)) == 0)
654				continue;
655			memset(&link, 0, sizeof(link));
656			rte_eth_link_get_nowait(portid, &link);
657			/* print link status if flag set */
658			if (print_flag == 1) {
659				if (link.link_status)
660					printf("Port %d Link Up - speed %u "
661						"Mbps - %s\n", (uint8_t)portid,
662						(unsigned)link.link_speed,
663				(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
664					("full-duplex") : ("half-duplex\n"));
665				else
666					printf("Port %d Link Down\n",
667						(uint8_t)portid);
668				continue;
669			}
670			/* clear all_ports_up flag if any link down */
671			if (link.link_status == ETH_LINK_DOWN) {
672				all_ports_up = 0;
673				break;
674			}
675		}
676		/* after finally printing all link status, get out */
677		if (print_flag == 1)
678			break;
679
680		if (all_ports_up == 0) {
681			printf(".");
682			fflush(stdout);
683			rte_delay_ms(CHECK_INTERVAL);
684		}
685
686		/* set the print_flag if all ports up or timeout */
687		if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
688			print_flag = 1;
689			printf("done\n");
690		}
691	}
692}
693
694/* Callback for request of changing MTU */
695static int
696kni_change_mtu(uint8_t port_id, unsigned new_mtu)
697{
698	int ret;
699	struct rte_eth_conf conf;
700
701	if (port_id >= rte_eth_dev_count()) {
702		RTE_LOG(ERR, APP, "Invalid port id %d\n", port_id);
703		return -EINVAL;
704	}
705
706	RTE_LOG(INFO, APP, "Change MTU of port %d to %u\n", port_id, new_mtu);
707
708	/* Stop specific port */
709	rte_eth_dev_stop(port_id);
710
711	memcpy(&conf, &port_conf, sizeof(conf));
712	/* Set new MTU */
713	if (new_mtu > ETHER_MAX_LEN)
714		conf.rxmode.jumbo_frame = 1;
715	else
716		conf.rxmode.jumbo_frame = 0;
717
718	/* mtu + length of header + length of FCS = max pkt length */
719	conf.rxmode.max_rx_pkt_len = new_mtu + KNI_ENET_HEADER_SIZE +
720							KNI_ENET_FCS_SIZE;
721	ret = rte_eth_dev_configure(port_id, 1, 1, &conf);
722	if (ret < 0) {
723		RTE_LOG(ERR, APP, "Fail to reconfigure port %d\n", port_id);
724		return ret;
725	}
726
727	/* Restart specific port */
728	ret = rte_eth_dev_start(port_id);
729	if (ret < 0) {
730		RTE_LOG(ERR, APP, "Fail to restart port %d\n", port_id);
731		return ret;
732	}
733
734	return 0;
735}
736
737/* Callback for request of configuring network interface up/down */
738static int
739kni_config_network_interface(uint8_t port_id, uint8_t if_up)
740{
741	int ret = 0;
742
743	if (port_id >= rte_eth_dev_count() || port_id >= RTE_MAX_ETHPORTS) {
744		RTE_LOG(ERR, APP, "Invalid port id %d\n", port_id);
745		return -EINVAL;
746	}
747
748	RTE_LOG(INFO, APP, "Configure network interface of %d %s\n",
749					port_id, if_up ? "up" : "down");
750
751	if (if_up != 0) { /* Configure network interface up */
752		rte_eth_dev_stop(port_id);
753		ret = rte_eth_dev_start(port_id);
754	} else /* Configure network interface down */
755		rte_eth_dev_stop(port_id);
756
757	if (ret < 0)
758		RTE_LOG(ERR, APP, "Failed to start port %d\n", port_id);
759
760	return ret;
761}
762
763static int
764kni_alloc(uint8_t port_id)
765{
766	uint8_t i;
767	struct rte_kni *kni;
768	struct rte_kni_conf conf;
769	struct kni_port_params **params = kni_port_params_array;
770
771	if (port_id >= RTE_MAX_ETHPORTS || !params[port_id])
772		return -1;
773
774	params[port_id]->nb_kni = params[port_id]->nb_lcore_k ?
775				params[port_id]->nb_lcore_k : 1;
776
777	for (i = 0; i < params[port_id]->nb_kni; i++) {
778		/* Clear conf at first */
779		memset(&conf, 0, sizeof(conf));
780		if (params[port_id]->nb_lcore_k) {
781			snprintf(conf.name, RTE_KNI_NAMESIZE,
782					"vEth%u_%u", port_id, i);
783			conf.core_id = params[port_id]->lcore_k[i];
784			conf.force_bind = 1;
785		} else
786			snprintf(conf.name, RTE_KNI_NAMESIZE,
787						"vEth%u", port_id);
788		conf.group_id = (uint16_t)port_id;
789		conf.mbuf_size = MAX_PACKET_SZ;
790		/*
791		 * The first KNI device associated to a port
792		 * is the master, for multiple kernel thread
793		 * environment.
794		 */
795		if (i == 0) {
796			struct rte_kni_ops ops;
797			struct rte_eth_dev_info dev_info;
798
799			memset(&dev_info, 0, sizeof(dev_info));
800			rte_eth_dev_info_get(port_id, &dev_info);
801			conf.addr = dev_info.pci_dev->addr;
802			conf.id = dev_info.pci_dev->id;
803
804			memset(&ops, 0, sizeof(ops));
805			ops.port_id = port_id;
806			ops.change_mtu = kni_change_mtu;
807			ops.config_network_if = kni_config_network_interface;
808
809			kni = rte_kni_alloc(pktmbuf_pool, &conf, &ops);
810		} else
811			kni = rte_kni_alloc(pktmbuf_pool, &conf, NULL);
812
813		if (!kni)
814			rte_exit(EXIT_FAILURE, "Fail to create kni for "
815						"port: %d\n", port_id);
816		params[port_id]->kni[i] = kni;
817	}
818
819	return 0;
820}
821
822static int
823kni_free_kni(uint8_t port_id)
824{
825	uint8_t i;
826	struct kni_port_params **p = kni_port_params_array;
827
828	if (port_id >= RTE_MAX_ETHPORTS || !p[port_id])
829		return -1;
830
831	for (i = 0; i < p[port_id]->nb_kni; i++) {
832		if (rte_kni_release(p[port_id]->kni[i]))
833			printf("Fail to release kni\n");
834		p[port_id]->kni[i] = NULL;
835	}
836	rte_eth_dev_stop(port_id);
837
838	return 0;
839}
840
841/* Initialise ports/queues etc. and start main loop on each core */
842int
843main(int argc, char** argv)
844{
845	int ret;
846	uint8_t nb_sys_ports, port;
847	unsigned i;
848
849	/* Associate signal_hanlder function with USR signals */
850	signal(SIGUSR1, signal_handler);
851	signal(SIGUSR2, signal_handler);
852	signal(SIGRTMIN, signal_handler);
853	signal(SIGINT, signal_handler);
854
855	/* Initialise EAL */
856	ret = rte_eal_init(argc, argv);
857	if (ret < 0)
858		rte_exit(EXIT_FAILURE, "Could not initialise EAL (%d)\n", ret);
859	argc -= ret;
860	argv += ret;
861
862	/* Parse application arguments (after the EAL ones) */
863	ret = parse_args(argc, argv);
864	if (ret < 0)
865		rte_exit(EXIT_FAILURE, "Could not parse input parameters\n");
866
867	/* Create the mbuf pool */
868	pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", NB_MBUF,
869		MEMPOOL_CACHE_SZ, 0, MBUF_DATA_SZ, rte_socket_id());
870	if (pktmbuf_pool == NULL) {
871		rte_exit(EXIT_FAILURE, "Could not initialise mbuf pool\n");
872		return -1;
873	}
874
875	/* Get number of ports found in scan */
876	nb_sys_ports = rte_eth_dev_count();
877	if (nb_sys_ports == 0)
878		rte_exit(EXIT_FAILURE, "No supported Ethernet device found\n");
879
880	/* Check if the configured port ID is valid */
881	for (i = 0; i < RTE_MAX_ETHPORTS; i++)
882		if (kni_port_params_array[i] && i >= nb_sys_ports)
883			rte_exit(EXIT_FAILURE, "Configured invalid "
884						"port ID %u\n", i);
885
886	/* Initialize KNI subsystem */
887	init_kni();
888
889	/* Initialise each port */
890	for (port = 0; port < nb_sys_ports; port++) {
891		/* Skip ports that are not enabled */
892		if (!(ports_mask & (1 << port)))
893			continue;
894		init_port(port);
895
896		if (port >= RTE_MAX_ETHPORTS)
897			rte_exit(EXIT_FAILURE, "Can not use more than "
898				"%d ports for kni\n", RTE_MAX_ETHPORTS);
899
900		kni_alloc(port);
901	}
902	check_all_ports_link_status(nb_sys_ports, ports_mask);
903
904	/* Launch per-lcore function on every lcore */
905	rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
906	RTE_LCORE_FOREACH_SLAVE(i) {
907		if (rte_eal_wait_lcore(i) < 0)
908			return -1;
909	}
910
911	/* Release resources */
912	for (port = 0; port < nb_sys_ports; port++) {
913		if (!(ports_mask & (1 << port)))
914			continue;
915		kni_free_kni(port);
916	}
917#ifdef RTE_LIBRTE_XEN_DOM0
918	rte_kni_close();
919#endif
920	for (i = 0; i < RTE_MAX_ETHPORTS; i++)
921		if (kni_port_params_array[i]) {
922			rte_free(kni_port_params_array[i]);
923			kni_port_params_array[i] = NULL;
924		}
925
926	return 0;
927}
928