rte_eth_af_packet.c revision 3d9b7210
1/*-
2 *   BSD LICENSE
3 *
4 *   Copyright(c) 2014 John W. Linville <linville@tuxdriver.com>
5 *
6 *   Originally based upon librte_pmd_pcap code:
7 *
8 *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
9 *   Copyright(c) 2014 6WIND S.A.
10 *   All rights reserved.
11 *
12 *   Redistribution and use in source and binary forms, with or without
13 *   modification, are permitted provided that the following conditions
14 *   are met:
15 *
16 *     * Redistributions of source code must retain the above copyright
17 *       notice, this list of conditions and the following disclaimer.
18 *     * Redistributions in binary form must reproduce the above copyright
19 *       notice, this list of conditions and the following disclaimer in
20 *       the documentation and/or other materials provided with the
21 *       distribution.
22 *     * Neither the name of Intel Corporation nor the names of its
23 *       contributors may be used to endorse or promote products derived
24 *       from this software without specific prior written permission.
25 *
26 *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
27 *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
28 *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
29 *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
30 *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
31 *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
32 *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
33 *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
34 *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
35 *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
36 *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 */
38
39#include <rte_mbuf.h>
40#include <rte_ethdev.h>
41#include <rte_malloc.h>
42#include <rte_kvargs.h>
43#include <rte_vdev.h>
44
45#include <linux/if_ether.h>
46#include <linux/if_packet.h>
47#include <arpa/inet.h>
48#include <net/if.h>
49#include <sys/types.h>
50#include <sys/socket.h>
51#include <sys/ioctl.h>
52#include <sys/mman.h>
53#include <unistd.h>
54#include <poll.h>
55
56#define ETH_AF_PACKET_IFACE_ARG		"iface"
57#define ETH_AF_PACKET_NUM_Q_ARG		"qpairs"
58#define ETH_AF_PACKET_BLOCKSIZE_ARG	"blocksz"
59#define ETH_AF_PACKET_FRAMESIZE_ARG	"framesz"
60#define ETH_AF_PACKET_FRAMECOUNT_ARG	"framecnt"
61
62#define DFLT_BLOCK_SIZE		(1 << 12)
63#define DFLT_FRAME_SIZE		(1 << 11)
64#define DFLT_FRAME_COUNT	(1 << 9)
65
66#define RTE_PMD_AF_PACKET_MAX_RINGS 16
67
68struct pkt_rx_queue {
69	int sockfd;
70
71	struct iovec *rd;
72	uint8_t *map;
73	unsigned int framecount;
74	unsigned int framenum;
75
76	struct rte_mempool *mb_pool;
77	uint8_t in_port;
78
79	volatile unsigned long rx_pkts;
80	volatile unsigned long err_pkts;
81	volatile unsigned long rx_bytes;
82};
83
84struct pkt_tx_queue {
85	int sockfd;
86
87	struct iovec *rd;
88	uint8_t *map;
89	unsigned int framecount;
90	unsigned int framenum;
91
92	volatile unsigned long tx_pkts;
93	volatile unsigned long err_pkts;
94	volatile unsigned long tx_bytes;
95};
96
97struct pmd_internals {
98	unsigned nb_queues;
99
100	int if_index;
101	struct ether_addr eth_addr;
102
103	struct tpacket_req req;
104
105	struct pkt_rx_queue rx_queue[RTE_PMD_AF_PACKET_MAX_RINGS];
106	struct pkt_tx_queue tx_queue[RTE_PMD_AF_PACKET_MAX_RINGS];
107};
108
109static const char *valid_arguments[] = {
110	ETH_AF_PACKET_IFACE_ARG,
111	ETH_AF_PACKET_NUM_Q_ARG,
112	ETH_AF_PACKET_BLOCKSIZE_ARG,
113	ETH_AF_PACKET_FRAMESIZE_ARG,
114	ETH_AF_PACKET_FRAMECOUNT_ARG,
115	NULL
116};
117
118static const char *drivername = "AF_PACKET PMD";
119
120static struct rte_eth_link pmd_link = {
121	.link_speed = ETH_SPEED_NUM_10G,
122	.link_duplex = ETH_LINK_FULL_DUPLEX,
123	.link_status = ETH_LINK_DOWN,
124	.link_autoneg = ETH_LINK_SPEED_AUTONEG
125};
126
127static uint16_t
128eth_af_packet_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
129{
130	unsigned i;
131	struct tpacket2_hdr *ppd;
132	struct rte_mbuf *mbuf;
133	uint8_t *pbuf;
134	struct pkt_rx_queue *pkt_q = queue;
135	uint16_t num_rx = 0;
136	unsigned long num_rx_bytes = 0;
137	unsigned int framecount, framenum;
138
139	if (unlikely(nb_pkts == 0))
140		return 0;
141
142	/*
143	 * Reads the given number of packets from the AF_PACKET socket one by
144	 * one and copies the packet data into a newly allocated mbuf.
145	 */
146	framecount = pkt_q->framecount;
147	framenum = pkt_q->framenum;
148	for (i = 0; i < nb_pkts; i++) {
149		/* point at the next incoming frame */
150		ppd = (struct tpacket2_hdr *) pkt_q->rd[framenum].iov_base;
151		if ((ppd->tp_status & TP_STATUS_USER) == 0)
152			break;
153
154		/* allocate the next mbuf */
155		mbuf = rte_pktmbuf_alloc(pkt_q->mb_pool);
156		if (unlikely(mbuf == NULL))
157			break;
158
159		/* packet will fit in the mbuf, go ahead and receive it */
160		rte_pktmbuf_pkt_len(mbuf) = rte_pktmbuf_data_len(mbuf) = ppd->tp_snaplen;
161		pbuf = (uint8_t *) ppd + ppd->tp_mac;
162		memcpy(rte_pktmbuf_mtod(mbuf, void *), pbuf, rte_pktmbuf_data_len(mbuf));
163
164		/* release incoming frame and advance ring buffer */
165		ppd->tp_status = TP_STATUS_KERNEL;
166		if (++framenum >= framecount)
167			framenum = 0;
168		mbuf->port = pkt_q->in_port;
169
170		/* account for the receive frame */
171		bufs[i] = mbuf;
172		num_rx++;
173		num_rx_bytes += mbuf->pkt_len;
174	}
175	pkt_q->framenum = framenum;
176	pkt_q->rx_pkts += num_rx;
177	pkt_q->rx_bytes += num_rx_bytes;
178	return num_rx;
179}
180
181/*
182 * Callback to handle sending packets through a real NIC.
183 */
184static uint16_t
185eth_af_packet_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
186{
187	struct tpacket2_hdr *ppd;
188	struct rte_mbuf *mbuf;
189	uint8_t *pbuf;
190	unsigned int framecount, framenum;
191	struct pollfd pfd;
192	struct pkt_tx_queue *pkt_q = queue;
193	uint16_t num_tx = 0;
194	unsigned long num_tx_bytes = 0;
195	int i;
196
197	if (unlikely(nb_pkts == 0))
198		return 0;
199
200	memset(&pfd, 0, sizeof(pfd));
201	pfd.fd = pkt_q->sockfd;
202	pfd.events = POLLOUT;
203	pfd.revents = 0;
204
205	framecount = pkt_q->framecount;
206	framenum = pkt_q->framenum;
207	ppd = (struct tpacket2_hdr *) pkt_q->rd[framenum].iov_base;
208	for (i = 0; i < nb_pkts; i++) {
209		/* point at the next incoming frame */
210		if ((ppd->tp_status != TP_STATUS_AVAILABLE) &&
211		    (poll(&pfd, 1, -1) < 0))
212				continue;
213
214		/* copy the tx frame data */
215		mbuf = bufs[num_tx];
216		pbuf = (uint8_t *) ppd + TPACKET2_HDRLEN -
217			sizeof(struct sockaddr_ll);
218		memcpy(pbuf, rte_pktmbuf_mtod(mbuf, void*), rte_pktmbuf_data_len(mbuf));
219		ppd->tp_len = ppd->tp_snaplen = rte_pktmbuf_data_len(mbuf);
220
221		/* release incoming frame and advance ring buffer */
222		ppd->tp_status = TP_STATUS_SEND_REQUEST;
223		if (++framenum >= framecount)
224			framenum = 0;
225		ppd = (struct tpacket2_hdr *) pkt_q->rd[framenum].iov_base;
226
227		num_tx++;
228		num_tx_bytes += mbuf->pkt_len;
229		rte_pktmbuf_free(mbuf);
230	}
231
232	/* kick-off transmits */
233	if (sendto(pkt_q->sockfd, NULL, 0, MSG_DONTWAIT, NULL, 0) == -1)
234		return 0; /* error sending -- no packets transmitted */
235
236	pkt_q->framenum = framenum;
237	pkt_q->tx_pkts += num_tx;
238	pkt_q->err_pkts += nb_pkts - num_tx;
239	pkt_q->tx_bytes += num_tx_bytes;
240	return num_tx;
241}
242
243static int
244eth_dev_start(struct rte_eth_dev *dev)
245{
246	dev->data->dev_link.link_status = ETH_LINK_UP;
247	return 0;
248}
249
250/*
251 * This function gets called when the current port gets stopped.
252 */
253static void
254eth_dev_stop(struct rte_eth_dev *dev)
255{
256	unsigned i;
257	int sockfd;
258	struct pmd_internals *internals = dev->data->dev_private;
259
260	for (i = 0; i < internals->nb_queues; i++) {
261		sockfd = internals->rx_queue[i].sockfd;
262		if (sockfd != -1)
263			close(sockfd);
264		sockfd = internals->tx_queue[i].sockfd;
265		if (sockfd != -1)
266			close(sockfd);
267	}
268
269	dev->data->dev_link.link_status = ETH_LINK_DOWN;
270}
271
272static int
273eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
274{
275	return 0;
276}
277
278static void
279eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
280{
281	struct pmd_internals *internals = dev->data->dev_private;
282
283	dev_info->driver_name = drivername;
284	dev_info->if_index = internals->if_index;
285	dev_info->max_mac_addrs = 1;
286	dev_info->max_rx_pktlen = (uint32_t)ETH_FRAME_LEN;
287	dev_info->max_rx_queues = (uint16_t)internals->nb_queues;
288	dev_info->max_tx_queues = (uint16_t)internals->nb_queues;
289	dev_info->min_rx_bufsize = 0;
290	dev_info->pci_dev = NULL;
291}
292
293static void
294eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
295{
296	unsigned i, imax;
297	unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
298	unsigned long rx_bytes_total = 0, tx_bytes_total = 0;
299	const struct pmd_internals *internal = dev->data->dev_private;
300
301	imax = (internal->nb_queues < RTE_ETHDEV_QUEUE_STAT_CNTRS ?
302	        internal->nb_queues : RTE_ETHDEV_QUEUE_STAT_CNTRS);
303	for (i = 0; i < imax; i++) {
304		igb_stats->q_ipackets[i] = internal->rx_queue[i].rx_pkts;
305		igb_stats->q_ibytes[i] = internal->rx_queue[i].rx_bytes;
306		rx_total += igb_stats->q_ipackets[i];
307		rx_bytes_total += igb_stats->q_ibytes[i];
308	}
309
310	imax = (internal->nb_queues < RTE_ETHDEV_QUEUE_STAT_CNTRS ?
311	        internal->nb_queues : RTE_ETHDEV_QUEUE_STAT_CNTRS);
312	for (i = 0; i < imax; i++) {
313		igb_stats->q_opackets[i] = internal->tx_queue[i].tx_pkts;
314		igb_stats->q_errors[i] = internal->tx_queue[i].err_pkts;
315		igb_stats->q_obytes[i] = internal->tx_queue[i].tx_bytes;
316		tx_total += igb_stats->q_opackets[i];
317		tx_err_total += igb_stats->q_errors[i];
318		tx_bytes_total += igb_stats->q_obytes[i];
319	}
320
321	igb_stats->ipackets = rx_total;
322	igb_stats->ibytes = rx_bytes_total;
323	igb_stats->opackets = tx_total;
324	igb_stats->oerrors = tx_err_total;
325	igb_stats->obytes = tx_bytes_total;
326}
327
328static void
329eth_stats_reset(struct rte_eth_dev *dev)
330{
331	unsigned i;
332	struct pmd_internals *internal = dev->data->dev_private;
333
334	for (i = 0; i < internal->nb_queues; i++) {
335		internal->rx_queue[i].rx_pkts = 0;
336		internal->rx_queue[i].rx_bytes = 0;
337	}
338
339	for (i = 0; i < internal->nb_queues; i++) {
340		internal->tx_queue[i].tx_pkts = 0;
341		internal->tx_queue[i].err_pkts = 0;
342		internal->tx_queue[i].tx_bytes = 0;
343	}
344}
345
346static void
347eth_dev_close(struct rte_eth_dev *dev __rte_unused)
348{
349}
350
351static void
352eth_queue_release(void *q __rte_unused)
353{
354}
355
356static int
357eth_link_update(struct rte_eth_dev *dev __rte_unused,
358                int wait_to_complete __rte_unused)
359{
360	return 0;
361}
362
363static int
364eth_rx_queue_setup(struct rte_eth_dev *dev,
365                   uint16_t rx_queue_id,
366                   uint16_t nb_rx_desc __rte_unused,
367                   unsigned int socket_id __rte_unused,
368                   const struct rte_eth_rxconf *rx_conf __rte_unused,
369                   struct rte_mempool *mb_pool)
370{
371	struct pmd_internals *internals = dev->data->dev_private;
372	struct pkt_rx_queue *pkt_q = &internals->rx_queue[rx_queue_id];
373	uint16_t buf_size;
374
375	pkt_q->mb_pool = mb_pool;
376
377	/* Now get the space available for data in the mbuf */
378	buf_size = (uint16_t)(rte_pktmbuf_data_room_size(pkt_q->mb_pool) -
379		RTE_PKTMBUF_HEADROOM);
380
381	if (ETH_FRAME_LEN > buf_size) {
382		RTE_LOG(ERR, PMD,
383			"%s: %d bytes will not fit in mbuf (%d bytes)\n",
384			dev->data->name, ETH_FRAME_LEN, buf_size);
385		return -ENOMEM;
386	}
387
388	dev->data->rx_queues[rx_queue_id] = pkt_q;
389	pkt_q->in_port = dev->data->port_id;
390
391	return 0;
392}
393
394static int
395eth_tx_queue_setup(struct rte_eth_dev *dev,
396                   uint16_t tx_queue_id,
397                   uint16_t nb_tx_desc __rte_unused,
398                   unsigned int socket_id __rte_unused,
399                   const struct rte_eth_txconf *tx_conf __rte_unused)
400{
401
402	struct pmd_internals *internals = dev->data->dev_private;
403
404	dev->data->tx_queues[tx_queue_id] = &internals->tx_queue[tx_queue_id];
405	return 0;
406}
407
408static const struct eth_dev_ops ops = {
409	.dev_start = eth_dev_start,
410	.dev_stop = eth_dev_stop,
411	.dev_close = eth_dev_close,
412	.dev_configure = eth_dev_configure,
413	.dev_infos_get = eth_dev_info,
414	.rx_queue_setup = eth_rx_queue_setup,
415	.tx_queue_setup = eth_tx_queue_setup,
416	.rx_queue_release = eth_queue_release,
417	.tx_queue_release = eth_queue_release,
418	.link_update = eth_link_update,
419	.stats_get = eth_stats_get,
420	.stats_reset = eth_stats_reset,
421};
422
423/*
424 * Opens an AF_PACKET socket
425 */
426static int
427open_packet_iface(const char *key __rte_unused,
428                  const char *value __rte_unused,
429                  void *extra_args)
430{
431	int *sockfd = extra_args;
432
433	/* Open an AF_PACKET socket... */
434	*sockfd = socket(AF_PACKET, SOCK_RAW, htons(ETH_P_ALL));
435	if (*sockfd == -1) {
436		RTE_LOG(ERR, PMD, "Could not open AF_PACKET socket\n");
437		return -1;
438	}
439
440	return 0;
441}
442
443static int
444rte_pmd_init_internals(const char *name,
445                       const int sockfd,
446                       const unsigned nb_queues,
447                       unsigned int blocksize,
448                       unsigned int blockcnt,
449                       unsigned int framesize,
450                       unsigned int framecnt,
451                       const unsigned numa_node,
452                       struct pmd_internals **internals,
453                       struct rte_eth_dev **eth_dev,
454                       struct rte_kvargs *kvlist)
455{
456	struct rte_eth_dev_data *data = NULL;
457	struct rte_kvargs_pair *pair = NULL;
458	struct ifreq ifr;
459	size_t ifnamelen;
460	unsigned k_idx;
461	struct sockaddr_ll sockaddr;
462	struct tpacket_req *req;
463	struct pkt_rx_queue *rx_queue;
464	struct pkt_tx_queue *tx_queue;
465	int rc, tpver, discard;
466	int qsockfd = -1;
467	unsigned int i, q, rdsize;
468	int fanout_arg __rte_unused, bypass __rte_unused;
469
470	for (k_idx = 0; k_idx < kvlist->count; k_idx++) {
471		pair = &kvlist->pairs[k_idx];
472		if (strstr(pair->key, ETH_AF_PACKET_IFACE_ARG) != NULL)
473			break;
474	}
475	if (pair == NULL) {
476		RTE_LOG(ERR, PMD,
477			"%s: no interface specified for AF_PACKET ethdev\n",
478		        name);
479		goto error_early;
480	}
481
482	RTE_LOG(INFO, PMD,
483		"%s: creating AF_PACKET-backed ethdev on numa socket %u\n",
484		name, numa_node);
485
486	/*
487	 * now do all data allocation - for eth_dev structure, dummy pci driver
488	 * and internal (private) data
489	 */
490	data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
491	if (data == NULL)
492		goto error_early;
493
494	*internals = rte_zmalloc_socket(name, sizeof(**internals),
495	                                0, numa_node);
496	if (*internals == NULL)
497		goto error_early;
498
499	for (q = 0; q < nb_queues; q++) {
500		(*internals)->rx_queue[q].map = MAP_FAILED;
501		(*internals)->tx_queue[q].map = MAP_FAILED;
502	}
503
504	req = &((*internals)->req);
505
506	req->tp_block_size = blocksize;
507	req->tp_block_nr = blockcnt;
508	req->tp_frame_size = framesize;
509	req->tp_frame_nr = framecnt;
510
511	ifnamelen = strlen(pair->value);
512	if (ifnamelen < sizeof(ifr.ifr_name)) {
513		memcpy(ifr.ifr_name, pair->value, ifnamelen);
514		ifr.ifr_name[ifnamelen] = '\0';
515	} else {
516		RTE_LOG(ERR, PMD,
517			"%s: I/F name too long (%s)\n",
518			name, pair->value);
519		goto error_early;
520	}
521	if (ioctl(sockfd, SIOCGIFINDEX, &ifr) == -1) {
522		RTE_LOG(ERR, PMD,
523			"%s: ioctl failed (SIOCGIFINDEX)\n",
524		        name);
525		goto error_early;
526	}
527	(*internals)->if_index = ifr.ifr_ifindex;
528
529	if (ioctl(sockfd, SIOCGIFHWADDR, &ifr) == -1) {
530		RTE_LOG(ERR, PMD,
531			"%s: ioctl failed (SIOCGIFHWADDR)\n",
532		        name);
533		goto error_early;
534	}
535	memcpy(&(*internals)->eth_addr, ifr.ifr_hwaddr.sa_data, ETH_ALEN);
536
537	memset(&sockaddr, 0, sizeof(sockaddr));
538	sockaddr.sll_family = AF_PACKET;
539	sockaddr.sll_protocol = htons(ETH_P_ALL);
540	sockaddr.sll_ifindex = (*internals)->if_index;
541
542#if defined(PACKET_FANOUT)
543	fanout_arg = (getpid() ^ (*internals)->if_index) & 0xffff;
544	fanout_arg |= (PACKET_FANOUT_HASH | PACKET_FANOUT_FLAG_DEFRAG) << 16;
545#if defined(PACKET_FANOUT_FLAG_ROLLOVER)
546	fanout_arg |= PACKET_FANOUT_FLAG_ROLLOVER << 16;
547#endif
548#endif
549
550	for (q = 0; q < nb_queues; q++) {
551		/* Open an AF_PACKET socket for this queue... */
552		qsockfd = socket(AF_PACKET, SOCK_RAW, htons(ETH_P_ALL));
553		if (qsockfd == -1) {
554			RTE_LOG(ERR, PMD,
555			        "%s: could not open AF_PACKET socket\n",
556			        name);
557			return -1;
558		}
559
560		tpver = TPACKET_V2;
561		rc = setsockopt(qsockfd, SOL_PACKET, PACKET_VERSION,
562				&tpver, sizeof(tpver));
563		if (rc == -1) {
564			RTE_LOG(ERR, PMD,
565				"%s: could not set PACKET_VERSION on AF_PACKET "
566				"socket for %s\n", name, pair->value);
567			goto error;
568		}
569
570		discard = 1;
571		rc = setsockopt(qsockfd, SOL_PACKET, PACKET_LOSS,
572				&discard, sizeof(discard));
573		if (rc == -1) {
574			RTE_LOG(ERR, PMD,
575				"%s: could not set PACKET_LOSS on "
576			        "AF_PACKET socket for %s\n", name, pair->value);
577			goto error;
578		}
579
580#if defined(PACKET_QDISC_BYPASS)
581		bypass = 1;
582		rc = setsockopt(qsockfd, SOL_PACKET, PACKET_QDISC_BYPASS,
583				&bypass, sizeof(bypass));
584		if (rc == -1) {
585			RTE_LOG(ERR, PMD,
586				"%s: could not set PACKET_QDISC_BYPASS "
587			        "on AF_PACKET socket for %s\n", name,
588			        pair->value);
589			goto error;
590		}
591#endif
592
593		rc = setsockopt(qsockfd, SOL_PACKET, PACKET_RX_RING, req, sizeof(*req));
594		if (rc == -1) {
595			RTE_LOG(ERR, PMD,
596				"%s: could not set PACKET_RX_RING on AF_PACKET "
597				"socket for %s\n", name, pair->value);
598			goto error;
599		}
600
601		rc = setsockopt(qsockfd, SOL_PACKET, PACKET_TX_RING, req, sizeof(*req));
602		if (rc == -1) {
603			RTE_LOG(ERR, PMD,
604				"%s: could not set PACKET_TX_RING on AF_PACKET "
605				"socket for %s\n", name, pair->value);
606			goto error;
607		}
608
609		rx_queue = &((*internals)->rx_queue[q]);
610		rx_queue->framecount = req->tp_frame_nr;
611
612		rx_queue->map = mmap(NULL, 2 * req->tp_block_size * req->tp_block_nr,
613				    PROT_READ | PROT_WRITE, MAP_SHARED | MAP_LOCKED,
614				    qsockfd, 0);
615		if (rx_queue->map == MAP_FAILED) {
616			RTE_LOG(ERR, PMD,
617				"%s: call to mmap failed on AF_PACKET socket for %s\n",
618				name, pair->value);
619			goto error;
620		}
621
622		/* rdsize is same for both Tx and Rx */
623		rdsize = req->tp_frame_nr * sizeof(*(rx_queue->rd));
624
625		rx_queue->rd = rte_zmalloc_socket(name, rdsize, 0, numa_node);
626		if (rx_queue->rd == NULL)
627			goto error;
628		for (i = 0; i < req->tp_frame_nr; ++i) {
629			rx_queue->rd[i].iov_base = rx_queue->map + (i * framesize);
630			rx_queue->rd[i].iov_len = req->tp_frame_size;
631		}
632		rx_queue->sockfd = qsockfd;
633
634		tx_queue = &((*internals)->tx_queue[q]);
635		tx_queue->framecount = req->tp_frame_nr;
636
637		tx_queue->map = rx_queue->map + req->tp_block_size * req->tp_block_nr;
638
639		tx_queue->rd = rte_zmalloc_socket(name, rdsize, 0, numa_node);
640		if (tx_queue->rd == NULL)
641			goto error;
642		for (i = 0; i < req->tp_frame_nr; ++i) {
643			tx_queue->rd[i].iov_base = tx_queue->map + (i * framesize);
644			tx_queue->rd[i].iov_len = req->tp_frame_size;
645		}
646		tx_queue->sockfd = qsockfd;
647
648		rc = bind(qsockfd, (const struct sockaddr*)&sockaddr, sizeof(sockaddr));
649		if (rc == -1) {
650			RTE_LOG(ERR, PMD,
651				"%s: could not bind AF_PACKET socket to %s\n",
652			        name, pair->value);
653			goto error;
654		}
655
656#if defined(PACKET_FANOUT)
657		rc = setsockopt(qsockfd, SOL_PACKET, PACKET_FANOUT,
658				&fanout_arg, sizeof(fanout_arg));
659		if (rc == -1) {
660			RTE_LOG(ERR, PMD,
661				"%s: could not set PACKET_FANOUT on AF_PACKET socket "
662				"for %s\n", name, pair->value);
663			goto error;
664		}
665#endif
666	}
667
668	/* reserve an ethdev entry */
669	*eth_dev = rte_eth_dev_allocate(name);
670	if (*eth_dev == NULL)
671		goto error;
672
673	/*
674	 * now put it all together
675	 * - store queue data in internals,
676	 * - store numa_node in eth_dev
677	 * - point eth_dev_data to internals
678	 * - and point eth_dev structure to new eth_dev_data structure
679	 */
680
681	(*internals)->nb_queues = nb_queues;
682
683	data->dev_private = *internals;
684	data->port_id = (*eth_dev)->data->port_id;
685	data->nb_rx_queues = (uint16_t)nb_queues;
686	data->nb_tx_queues = (uint16_t)nb_queues;
687	data->dev_link = pmd_link;
688	data->mac_addrs = &(*internals)->eth_addr;
689	strncpy(data->name,
690		(*eth_dev)->data->name, strlen((*eth_dev)->data->name));
691
692	(*eth_dev)->data = data;
693	(*eth_dev)->dev_ops = &ops;
694	(*eth_dev)->driver = NULL;
695	(*eth_dev)->data->dev_flags = RTE_ETH_DEV_DETACHABLE;
696	(*eth_dev)->data->drv_name = drivername;
697	(*eth_dev)->data->kdrv = RTE_KDRV_NONE;
698	(*eth_dev)->data->numa_node = numa_node;
699
700	return 0;
701
702error:
703	if (qsockfd != -1)
704		close(qsockfd);
705	for (q = 0; q < nb_queues; q++) {
706		munmap((*internals)->rx_queue[q].map,
707		       2 * req->tp_block_size * req->tp_block_nr);
708
709		rte_free((*internals)->rx_queue[q].rd);
710		rte_free((*internals)->tx_queue[q].rd);
711		if (((*internals)->rx_queue[q].sockfd != 0) &&
712			((*internals)->rx_queue[q].sockfd != qsockfd))
713			close((*internals)->rx_queue[q].sockfd);
714	}
715	rte_free(*internals);
716error_early:
717	rte_free(data);
718	return -1;
719}
720
721static int
722rte_eth_from_packet(const char *name,
723                    int const *sockfd,
724                    const unsigned numa_node,
725                    struct rte_kvargs *kvlist)
726{
727	struct pmd_internals *internals = NULL;
728	struct rte_eth_dev *eth_dev = NULL;
729	struct rte_kvargs_pair *pair = NULL;
730	unsigned k_idx;
731	unsigned int blockcount;
732	unsigned int blocksize = DFLT_BLOCK_SIZE;
733	unsigned int framesize = DFLT_FRAME_SIZE;
734	unsigned int framecount = DFLT_FRAME_COUNT;
735	unsigned int qpairs = 1;
736
737	/* do some parameter checking */
738	if (*sockfd < 0)
739		return -1;
740
741	/*
742	 * Walk arguments for configurable settings
743	 */
744	for (k_idx = 0; k_idx < kvlist->count; k_idx++) {
745		pair = &kvlist->pairs[k_idx];
746		if (strstr(pair->key, ETH_AF_PACKET_NUM_Q_ARG) != NULL) {
747			qpairs = atoi(pair->value);
748			if (qpairs < 1 ||
749			    qpairs > RTE_PMD_AF_PACKET_MAX_RINGS) {
750				RTE_LOG(ERR, PMD,
751					"%s: invalid qpairs value\n",
752				        name);
753				return -1;
754			}
755			continue;
756		}
757		if (strstr(pair->key, ETH_AF_PACKET_BLOCKSIZE_ARG) != NULL) {
758			blocksize = atoi(pair->value);
759			if (!blocksize) {
760				RTE_LOG(ERR, PMD,
761					"%s: invalid blocksize value\n",
762				        name);
763				return -1;
764			}
765			continue;
766		}
767		if (strstr(pair->key, ETH_AF_PACKET_FRAMESIZE_ARG) != NULL) {
768			framesize = atoi(pair->value);
769			if (!framesize) {
770				RTE_LOG(ERR, PMD,
771					"%s: invalid framesize value\n",
772				        name);
773				return -1;
774			}
775			continue;
776		}
777		if (strstr(pair->key, ETH_AF_PACKET_FRAMECOUNT_ARG) != NULL) {
778			framecount = atoi(pair->value);
779			if (!framecount) {
780				RTE_LOG(ERR, PMD,
781					"%s: invalid framecount value\n",
782				        name);
783				return -1;
784			}
785			continue;
786		}
787	}
788
789	if (framesize > blocksize) {
790		RTE_LOG(ERR, PMD,
791			"%s: AF_PACKET MMAP frame size exceeds block size!\n",
792		        name);
793		return -1;
794	}
795
796	blockcount = framecount / (blocksize / framesize);
797	if (!blockcount) {
798		RTE_LOG(ERR, PMD,
799			"%s: invalid AF_PACKET MMAP parameters\n", name);
800		return -1;
801	}
802
803	RTE_LOG(INFO, PMD, "%s: AF_PACKET MMAP parameters:\n", name);
804	RTE_LOG(INFO, PMD, "%s:\tblock size %d\n", name, blocksize);
805	RTE_LOG(INFO, PMD, "%s:\tblock count %d\n", name, blockcount);
806	RTE_LOG(INFO, PMD, "%s:\tframe size %d\n", name, framesize);
807	RTE_LOG(INFO, PMD, "%s:\tframe count %d\n", name, framecount);
808
809	if (rte_pmd_init_internals(name, *sockfd, qpairs,
810	                           blocksize, blockcount,
811	                           framesize, framecount,
812	                           numa_node, &internals, &eth_dev,
813	                           kvlist) < 0)
814		return -1;
815
816	eth_dev->rx_pkt_burst = eth_af_packet_rx;
817	eth_dev->tx_pkt_burst = eth_af_packet_tx;
818
819	return 0;
820}
821
822static int
823rte_pmd_af_packet_probe(const char *name, const char *params)
824{
825	unsigned numa_node;
826	int ret = 0;
827	struct rte_kvargs *kvlist;
828	int sockfd = -1;
829
830	RTE_LOG(INFO, PMD, "Initializing pmd_af_packet for %s\n", name);
831
832	numa_node = rte_socket_id();
833
834	kvlist = rte_kvargs_parse(params, valid_arguments);
835	if (kvlist == NULL) {
836		ret = -1;
837		goto exit;
838	}
839
840	/*
841	 * If iface argument is passed we open the NICs and use them for
842	 * reading / writing
843	 */
844	if (rte_kvargs_count(kvlist, ETH_AF_PACKET_IFACE_ARG) == 1) {
845
846		ret = rte_kvargs_process(kvlist, ETH_AF_PACKET_IFACE_ARG,
847		                         &open_packet_iface, &sockfd);
848		if (ret < 0)
849			goto exit;
850	}
851
852	ret = rte_eth_from_packet(name, &sockfd, numa_node, kvlist);
853	close(sockfd); /* no longer needed */
854
855exit:
856	rte_kvargs_free(kvlist);
857	return ret;
858}
859
860static int
861rte_pmd_af_packet_remove(const char *name)
862{
863	struct rte_eth_dev *eth_dev = NULL;
864	struct pmd_internals *internals;
865	unsigned q;
866
867	RTE_LOG(INFO, PMD, "Closing AF_PACKET ethdev on numa socket %u\n",
868			rte_socket_id());
869
870	if (name == NULL)
871		return -1;
872
873	/* find the ethdev entry */
874	eth_dev = rte_eth_dev_allocated(name);
875	if (eth_dev == NULL)
876		return -1;
877
878	internals = eth_dev->data->dev_private;
879	for (q = 0; q < internals->nb_queues; q++) {
880		rte_free(internals->rx_queue[q].rd);
881		rte_free(internals->tx_queue[q].rd);
882	}
883
884	rte_free(eth_dev->data->dev_private);
885	rte_free(eth_dev->data);
886
887	rte_eth_dev_release_port(eth_dev);
888
889	return 0;
890}
891
892static struct rte_vdev_driver pmd_af_packet_drv = {
893	.probe = rte_pmd_af_packet_probe,
894	.remove = rte_pmd_af_packet_remove,
895};
896
897RTE_PMD_REGISTER_VDEV(net_af_packet, pmd_af_packet_drv);
898RTE_PMD_REGISTER_ALIAS(net_af_packet, eth_af_packet);
899RTE_PMD_REGISTER_PARAM_STRING(net_af_packet,
900	"iface=<string> "
901	"qpairs=<int> "
902	"blocksz=<int> "
903	"framesz=<int> "
904	"framecnt=<int>");
905