rte_eth_null.c revision 9ca4a157
1/*-
2 *   BSD LICENSE
3 *
4 *   Copyright (C) IGEL Co.,Ltd.
5 *   All rights reserved.
6 *
7 *   Redistribution and use in source and binary forms, with or without
8 *   modification, are permitted provided that the following conditions
9 *   are met:
10 *
11 *     * Redistributions of source code must retain the above copyright
12 *       notice, this list of conditions and the following disclaimer.
13 *     * Redistributions in binary form must reproduce the above copyright
14 *       notice, this list of conditions and the following disclaimer in
15 *       the documentation and/or other materials provided with the
16 *       distribution.
17 *     * Neither the name of IGEL Co.,Ltd. nor the names of its
18 *       contributors may be used to endorse or promote products derived
19 *       from this software without specific prior written permission.
20 *
21 *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <rte_mbuf.h>
35#include <rte_ethdev.h>
36#include <rte_malloc.h>
37#include <rte_memcpy.h>
38#include <rte_vdev.h>
39#include <rte_kvargs.h>
40#include <rte_spinlock.h>
41
42#include "rte_eth_null.h"
43
44#define ETH_NULL_PACKET_SIZE_ARG	"size"
45#define ETH_NULL_PACKET_COPY_ARG	"copy"
46
47static unsigned default_packet_size = 64;
48static unsigned default_packet_copy;
49
50static const char *valid_arguments[] = {
51	ETH_NULL_PACKET_SIZE_ARG,
52	ETH_NULL_PACKET_COPY_ARG,
53	NULL
54};
55
56struct pmd_internals;
57
58struct null_queue {
59	struct pmd_internals *internals;
60
61	struct rte_mempool *mb_pool;
62	struct rte_mbuf *dummy_packet;
63
64	rte_atomic64_t rx_pkts;
65	rte_atomic64_t tx_pkts;
66	rte_atomic64_t err_pkts;
67};
68
69struct pmd_internals {
70	unsigned packet_size;
71	unsigned packet_copy;
72	uint8_t port_id;
73
74	struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
75	struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
76
77	/** Bit mask of RSS offloads, the bit offset also means flow type */
78	uint64_t flow_type_rss_offloads;
79
80	rte_spinlock_t rss_lock;
81
82	uint16_t reta_size;
83	struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_128 /
84			RTE_RETA_GROUP_SIZE];
85
86	uint8_t rss_key[40];                /**< 40-byte hash key. */
87};
88
89
90static struct ether_addr eth_addr = { .addr_bytes = {0} };
91static struct rte_eth_link pmd_link = {
92	.link_speed = ETH_SPEED_NUM_10G,
93	.link_duplex = ETH_LINK_FULL_DUPLEX,
94	.link_status = ETH_LINK_DOWN,
95	.link_autoneg = ETH_LINK_SPEED_AUTONEG,
96};
97
98static uint16_t
99eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
100{
101	int i;
102	struct null_queue *h = q;
103	unsigned packet_size;
104
105	if ((q == NULL) || (bufs == NULL))
106		return 0;
107
108	packet_size = h->internals->packet_size;
109	for (i = 0; i < nb_bufs; i++) {
110		bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
111		if (!bufs[i])
112			break;
113		bufs[i]->data_len = (uint16_t)packet_size;
114		bufs[i]->pkt_len = packet_size;
115		bufs[i]->nb_segs = 1;
116		bufs[i]->next = NULL;
117		bufs[i]->port = h->internals->port_id;
118	}
119
120	rte_atomic64_add(&(h->rx_pkts), i);
121
122	return i;
123}
124
125static uint16_t
126eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
127{
128	int i;
129	struct null_queue *h = q;
130	unsigned packet_size;
131
132	if ((q == NULL) || (bufs == NULL))
133		return 0;
134
135	packet_size = h->internals->packet_size;
136	for (i = 0; i < nb_bufs; i++) {
137		bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
138		if (!bufs[i])
139			break;
140		rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
141					packet_size);
142		bufs[i]->data_len = (uint16_t)packet_size;
143		bufs[i]->pkt_len = packet_size;
144		bufs[i]->nb_segs = 1;
145		bufs[i]->next = NULL;
146		bufs[i]->port = h->internals->port_id;
147	}
148
149	rte_atomic64_add(&(h->rx_pkts), i);
150
151	return i;
152}
153
154static uint16_t
155eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
156{
157	int i;
158	struct null_queue *h = q;
159
160	if ((q == NULL) || (bufs == NULL))
161		return 0;
162
163	for (i = 0; i < nb_bufs; i++)
164		rte_pktmbuf_free(bufs[i]);
165
166	rte_atomic64_add(&(h->tx_pkts), i);
167
168	return i;
169}
170
171static uint16_t
172eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
173{
174	int i;
175	struct null_queue *h = q;
176	unsigned packet_size;
177
178	if ((q == NULL) || (bufs == NULL))
179		return 0;
180
181	packet_size = h->internals->packet_size;
182	for (i = 0; i < nb_bufs; i++) {
183		rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
184					packet_size);
185		rte_pktmbuf_free(bufs[i]);
186	}
187
188	rte_atomic64_add(&(h->tx_pkts), i);
189
190	return i;
191}
192
193static int
194eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
195{
196	return 0;
197}
198
199static int
200eth_dev_start(struct rte_eth_dev *dev)
201{
202	if (dev == NULL)
203		return -EINVAL;
204
205	dev->data->dev_link.link_status = ETH_LINK_UP;
206	return 0;
207}
208
209static void
210eth_dev_stop(struct rte_eth_dev *dev)
211{
212	if (dev == NULL)
213		return;
214
215	dev->data->dev_link.link_status = ETH_LINK_DOWN;
216}
217
218static int
219eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
220		uint16_t nb_rx_desc __rte_unused,
221		unsigned int socket_id __rte_unused,
222		const struct rte_eth_rxconf *rx_conf __rte_unused,
223		struct rte_mempool *mb_pool)
224{
225	struct rte_mbuf *dummy_packet;
226	struct pmd_internals *internals;
227	unsigned packet_size;
228
229	if ((dev == NULL) || (mb_pool == NULL))
230		return -EINVAL;
231
232	internals = dev->data->dev_private;
233
234	if (rx_queue_id >= dev->data->nb_rx_queues)
235		return -ENODEV;
236
237	packet_size = internals->packet_size;
238
239	internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
240	dev->data->rx_queues[rx_queue_id] =
241		&internals->rx_null_queues[rx_queue_id];
242	dummy_packet = rte_zmalloc_socket(NULL,
243			packet_size, 0, dev->data->numa_node);
244	if (dummy_packet == NULL)
245		return -ENOMEM;
246
247	internals->rx_null_queues[rx_queue_id].internals = internals;
248	internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet;
249
250	return 0;
251}
252
253static int
254eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
255		uint16_t nb_tx_desc __rte_unused,
256		unsigned int socket_id __rte_unused,
257		const struct rte_eth_txconf *tx_conf __rte_unused)
258{
259	struct rte_mbuf *dummy_packet;
260	struct pmd_internals *internals;
261	unsigned packet_size;
262
263	if (dev == NULL)
264		return -EINVAL;
265
266	internals = dev->data->dev_private;
267
268	if (tx_queue_id >= dev->data->nb_tx_queues)
269		return -ENODEV;
270
271	packet_size = internals->packet_size;
272
273	dev->data->tx_queues[tx_queue_id] =
274		&internals->tx_null_queues[tx_queue_id];
275	dummy_packet = rte_zmalloc_socket(NULL,
276			packet_size, 0, dev->data->numa_node);
277	if (dummy_packet == NULL)
278		return -ENOMEM;
279
280	internals->tx_null_queues[tx_queue_id].internals = internals;
281	internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet;
282
283	return 0;
284}
285
286
287static void
288eth_dev_info(struct rte_eth_dev *dev,
289		struct rte_eth_dev_info *dev_info)
290{
291	struct pmd_internals *internals;
292
293	if ((dev == NULL) || (dev_info == NULL))
294		return;
295
296	internals = dev->data->dev_private;
297	dev_info->max_mac_addrs = 1;
298	dev_info->max_rx_pktlen = (uint32_t)-1;
299	dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
300	dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
301	dev_info->min_rx_bufsize = 0;
302	dev_info->reta_size = internals->reta_size;
303	dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
304}
305
306static void
307eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
308{
309	unsigned i, num_stats;
310	unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
311	const struct pmd_internals *internal;
312
313	if ((dev == NULL) || (igb_stats == NULL))
314		return;
315
316	internal = dev->data->dev_private;
317	num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
318			RTE_MIN(dev->data->nb_rx_queues,
319				RTE_DIM(internal->rx_null_queues)));
320	for (i = 0; i < num_stats; i++) {
321		igb_stats->q_ipackets[i] =
322			internal->rx_null_queues[i].rx_pkts.cnt;
323		rx_total += igb_stats->q_ipackets[i];
324	}
325
326	num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
327			RTE_MIN(dev->data->nb_tx_queues,
328				RTE_DIM(internal->tx_null_queues)));
329	for (i = 0; i < num_stats; i++) {
330		igb_stats->q_opackets[i] =
331			internal->tx_null_queues[i].tx_pkts.cnt;
332		igb_stats->q_errors[i] =
333			internal->tx_null_queues[i].err_pkts.cnt;
334		tx_total += igb_stats->q_opackets[i];
335		tx_err_total += igb_stats->q_errors[i];
336	}
337
338	igb_stats->ipackets = rx_total;
339	igb_stats->opackets = tx_total;
340	igb_stats->oerrors = tx_err_total;
341}
342
343static void
344eth_stats_reset(struct rte_eth_dev *dev)
345{
346	unsigned i;
347	struct pmd_internals *internal;
348
349	if (dev == NULL)
350		return;
351
352	internal = dev->data->dev_private;
353	for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++)
354		internal->rx_null_queues[i].rx_pkts.cnt = 0;
355	for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++) {
356		internal->tx_null_queues[i].tx_pkts.cnt = 0;
357		internal->tx_null_queues[i].err_pkts.cnt = 0;
358	}
359}
360
361static void
362eth_queue_release(void *q)
363{
364	struct null_queue *nq;
365
366	if (q == NULL)
367		return;
368
369	nq = q;
370	rte_free(nq->dummy_packet);
371}
372
373static int
374eth_link_update(struct rte_eth_dev *dev __rte_unused,
375		int wait_to_complete __rte_unused) { return 0; }
376
377static int
378eth_rss_reta_update(struct rte_eth_dev *dev,
379		struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
380{
381	int i, j;
382	struct pmd_internals *internal = dev->data->dev_private;
383
384	if (reta_size != internal->reta_size)
385		return -EINVAL;
386
387	rte_spinlock_lock(&internal->rss_lock);
388
389	/* Copy RETA table */
390	for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
391		internal->reta_conf[i].mask = reta_conf[i].mask;
392		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
393			if ((reta_conf[i].mask >> j) & 0x01)
394				internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
395	}
396
397	rte_spinlock_unlock(&internal->rss_lock);
398
399	return 0;
400}
401
402static int
403eth_rss_reta_query(struct rte_eth_dev *dev,
404		struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
405{
406	int i, j;
407	struct pmd_internals *internal = dev->data->dev_private;
408
409	if (reta_size != internal->reta_size)
410		return -EINVAL;
411
412	rte_spinlock_lock(&internal->rss_lock);
413
414	/* Copy RETA table */
415	for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
416		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
417			if ((reta_conf[i].mask >> j) & 0x01)
418				reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
419	}
420
421	rte_spinlock_unlock(&internal->rss_lock);
422
423	return 0;
424}
425
426static int
427eth_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf)
428{
429	struct pmd_internals *internal = dev->data->dev_private;
430
431	rte_spinlock_lock(&internal->rss_lock);
432
433	if ((rss_conf->rss_hf & internal->flow_type_rss_offloads) != 0)
434		dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
435				rss_conf->rss_hf & internal->flow_type_rss_offloads;
436
437	if (rss_conf->rss_key)
438		rte_memcpy(internal->rss_key, rss_conf->rss_key, 40);
439
440	rte_spinlock_unlock(&internal->rss_lock);
441
442	return 0;
443}
444
445static int
446eth_rss_hash_conf_get(struct rte_eth_dev *dev,
447		struct rte_eth_rss_conf *rss_conf)
448{
449	struct pmd_internals *internal = dev->data->dev_private;
450
451	rte_spinlock_lock(&internal->rss_lock);
452
453	rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
454	if (rss_conf->rss_key)
455		rte_memcpy(rss_conf->rss_key, internal->rss_key, 40);
456
457	rte_spinlock_unlock(&internal->rss_lock);
458
459	return 0;
460}
461
462static const struct eth_dev_ops ops = {
463	.dev_start = eth_dev_start,
464	.dev_stop = eth_dev_stop,
465	.dev_configure = eth_dev_configure,
466	.dev_infos_get = eth_dev_info,
467	.rx_queue_setup = eth_rx_queue_setup,
468	.tx_queue_setup = eth_tx_queue_setup,
469	.rx_queue_release = eth_queue_release,
470	.tx_queue_release = eth_queue_release,
471	.link_update = eth_link_update,
472	.stats_get = eth_stats_get,
473	.stats_reset = eth_stats_reset,
474	.reta_update = eth_rss_reta_update,
475	.reta_query = eth_rss_reta_query,
476	.rss_hash_update = eth_rss_hash_update,
477	.rss_hash_conf_get = eth_rss_hash_conf_get
478};
479
480static struct rte_vdev_driver pmd_null_drv;
481
482int
483eth_dev_null_create(const char *name,
484		const unsigned numa_node,
485		unsigned packet_size,
486		unsigned packet_copy)
487{
488	const unsigned nb_rx_queues = 1;
489	const unsigned nb_tx_queues = 1;
490	struct rte_eth_dev_data *data = NULL;
491	struct pmd_internals *internals = NULL;
492	struct rte_eth_dev *eth_dev = NULL;
493
494	static const uint8_t default_rss_key[40] = {
495		0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
496		0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
497		0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
498		0xBE, 0xAC, 0x01, 0xFA
499	};
500
501	if (name == NULL)
502		return -EINVAL;
503
504	RTE_LOG(INFO, PMD, "Creating null ethdev on numa socket %u\n",
505			numa_node);
506
507	/* now do all data allocation - for eth_dev structure, dummy pci driver
508	 * and internal (private) data
509	 */
510	data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
511	if (data == NULL)
512		goto error;
513
514	internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
515	if (internals == NULL)
516		goto error;
517
518	/* reserve an ethdev entry */
519	eth_dev = rte_eth_dev_allocate(name);
520	if (eth_dev == NULL)
521		goto error;
522
523	/* now put it all together
524	 * - store queue data in internals,
525	 * - store numa_node info in ethdev data
526	 * - point eth_dev_data to internals
527	 * - and point eth_dev structure to new eth_dev_data structure
528	 */
529	/* NOTE: we'll replace the data element, of originally allocated eth_dev
530	 * so the nulls are local per-process */
531
532	internals->packet_size = packet_size;
533	internals->packet_copy = packet_copy;
534	internals->port_id = eth_dev->data->port_id;
535
536	internals->flow_type_rss_offloads =  ETH_RSS_PROTO_MASK;
537	internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE;
538
539	rte_memcpy(internals->rss_key, default_rss_key, 40);
540
541	data->dev_private = internals;
542	data->port_id = eth_dev->data->port_id;
543	data->nb_rx_queues = (uint16_t)nb_rx_queues;
544	data->nb_tx_queues = (uint16_t)nb_tx_queues;
545	data->dev_link = pmd_link;
546	data->mac_addrs = &eth_addr;
547	strncpy(data->name, eth_dev->data->name, strlen(eth_dev->data->name));
548
549	eth_dev->data = data;
550	eth_dev->dev_ops = &ops;
551
552	eth_dev->driver = NULL;
553	data->dev_flags = RTE_ETH_DEV_DETACHABLE;
554	data->kdrv = RTE_KDRV_NONE;
555	data->drv_name = pmd_null_drv.driver.name;
556	data->numa_node = numa_node;
557
558	/* finally assign rx and tx ops */
559	if (packet_copy) {
560		eth_dev->rx_pkt_burst = eth_null_copy_rx;
561		eth_dev->tx_pkt_burst = eth_null_copy_tx;
562	} else {
563		eth_dev->rx_pkt_burst = eth_null_rx;
564		eth_dev->tx_pkt_burst = eth_null_tx;
565	}
566
567	return 0;
568
569error:
570	rte_free(data);
571	rte_free(internals);
572
573	return -1;
574}
575
576static inline int
577get_packet_size_arg(const char *key __rte_unused,
578		const char *value, void *extra_args)
579{
580	const char *a = value;
581	unsigned *packet_size = extra_args;
582
583	if ((value == NULL) || (extra_args == NULL))
584		return -EINVAL;
585
586	*packet_size = (unsigned)strtoul(a, NULL, 0);
587	if (*packet_size == UINT_MAX)
588		return -1;
589
590	return 0;
591}
592
593static inline int
594get_packet_copy_arg(const char *key __rte_unused,
595		const char *value, void *extra_args)
596{
597	const char *a = value;
598	unsigned *packet_copy = extra_args;
599
600	if ((value == NULL) || (extra_args == NULL))
601		return -EINVAL;
602
603	*packet_copy = (unsigned)strtoul(a, NULL, 0);
604	if (*packet_copy == UINT_MAX)
605		return -1;
606
607	return 0;
608}
609
610static int
611rte_pmd_null_probe(const char *name, const char *params)
612{
613	unsigned numa_node;
614	unsigned packet_size = default_packet_size;
615	unsigned packet_copy = default_packet_copy;
616	struct rte_kvargs *kvlist = NULL;
617	int ret;
618
619	if (name == NULL)
620		return -EINVAL;
621
622	RTE_LOG(INFO, PMD, "Initializing pmd_null for %s\n", name);
623
624	numa_node = rte_socket_id();
625
626	if (params != NULL) {
627		kvlist = rte_kvargs_parse(params, valid_arguments);
628		if (kvlist == NULL)
629			return -1;
630
631		if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_SIZE_ARG) == 1) {
632
633			ret = rte_kvargs_process(kvlist,
634					ETH_NULL_PACKET_SIZE_ARG,
635					&get_packet_size_arg, &packet_size);
636			if (ret < 0)
637				goto free_kvlist;
638		}
639
640		if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_COPY_ARG) == 1) {
641
642			ret = rte_kvargs_process(kvlist,
643					ETH_NULL_PACKET_COPY_ARG,
644					&get_packet_copy_arg, &packet_copy);
645			if (ret < 0)
646				goto free_kvlist;
647		}
648	}
649
650	RTE_LOG(INFO, PMD, "Configure pmd_null: packet size is %d, "
651			"packet copy is %s\n", packet_size,
652			packet_copy ? "enabled" : "disabled");
653
654	ret = eth_dev_null_create(name, numa_node, packet_size, packet_copy);
655
656free_kvlist:
657	if (kvlist)
658		rte_kvargs_free(kvlist);
659	return ret;
660}
661
662static int
663rte_pmd_null_remove(const char *name)
664{
665	struct rte_eth_dev *eth_dev = NULL;
666
667	if (name == NULL)
668		return -EINVAL;
669
670	RTE_LOG(INFO, PMD, "Closing null ethdev on numa socket %u\n",
671			rte_socket_id());
672
673	/* find the ethdev entry */
674	eth_dev = rte_eth_dev_allocated(name);
675	if (eth_dev == NULL)
676		return -1;
677
678	rte_free(eth_dev->data->dev_private);
679	rte_free(eth_dev->data);
680
681	rte_eth_dev_release_port(eth_dev);
682
683	return 0;
684}
685
686static struct rte_vdev_driver pmd_null_drv = {
687	.probe = rte_pmd_null_probe,
688	.remove = rte_pmd_null_remove,
689};
690
691RTE_PMD_REGISTER_VDEV(net_null, pmd_null_drv);
692RTE_PMD_REGISTER_ALIAS(net_null, eth_null);
693RTE_PMD_REGISTER_PARAM_STRING(net_null,
694	"size=<int> "
695	"copy=<int>");
696