1/*-
2 *   BSD LICENSE
3 *
4 *   Copyright (C) IGEL Co.,Ltd.
5 *   All rights reserved.
6 *
7 *   Redistribution and use in source and binary forms, with or without
8 *   modification, are permitted provided that the following conditions
9 *   are met:
10 *
11 *     * Redistributions of source code must retain the above copyright
12 *       notice, this list of conditions and the following disclaimer.
13 *     * Redistributions in binary form must reproduce the above copyright
14 *       notice, this list of conditions and the following disclaimer in
15 *       the documentation and/or other materials provided with the
16 *       distribution.
17 *     * Neither the name of IGEL Co.,Ltd. nor the names of its
18 *       contributors may be used to endorse or promote products derived
19 *       from this software without specific prior written permission.
20 *
21 *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <rte_mbuf.h>
35#include <rte_ethdev.h>
36#include <rte_malloc.h>
37#include <rte_memcpy.h>
38#include <rte_vdev.h>
39#include <rte_kvargs.h>
40#include <rte_spinlock.h>
41
42#include "rte_eth_null.h"
43
44#define ETH_NULL_PACKET_SIZE_ARG	"size"
45#define ETH_NULL_PACKET_COPY_ARG	"copy"
46
47static unsigned default_packet_size = 64;
48static unsigned default_packet_copy;
49
50static const char *valid_arguments[] = {
51	ETH_NULL_PACKET_SIZE_ARG,
52	ETH_NULL_PACKET_COPY_ARG,
53	NULL
54};
55
56struct pmd_internals;
57
58struct null_queue {
59	struct pmd_internals *internals;
60
61	struct rte_mempool *mb_pool;
62	struct rte_mbuf *dummy_packet;
63
64	rte_atomic64_t rx_pkts;
65	rte_atomic64_t tx_pkts;
66	rte_atomic64_t err_pkts;
67};
68
69struct pmd_internals {
70	unsigned packet_size;
71	unsigned packet_copy;
72	uint8_t port_id;
73
74	struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT];
75	struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT];
76
77	/** Bit mask of RSS offloads, the bit offset also means flow type */
78	uint64_t flow_type_rss_offloads;
79
80	rte_spinlock_t rss_lock;
81
82	uint16_t reta_size;
83	struct rte_eth_rss_reta_entry64 reta_conf[ETH_RSS_RETA_SIZE_128 /
84			RTE_RETA_GROUP_SIZE];
85
86	uint8_t rss_key[40];                /**< 40-byte hash key. */
87};
88
89
90static struct ether_addr eth_addr = { .addr_bytes = {0} };
91static const char *drivername = "Null PMD";
92static struct rte_eth_link pmd_link = {
93	.link_speed = ETH_SPEED_NUM_10G,
94	.link_duplex = ETH_LINK_FULL_DUPLEX,
95	.link_status = ETH_LINK_DOWN,
96	.link_autoneg = ETH_LINK_FIXED,
97};
98
99static uint16_t
100eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
101{
102	int i;
103	struct null_queue *h = q;
104	unsigned packet_size;
105
106	if ((q == NULL) || (bufs == NULL))
107		return 0;
108
109	packet_size = h->internals->packet_size;
110	for (i = 0; i < nb_bufs; i++) {
111		bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
112		if (!bufs[i])
113			break;
114		bufs[i]->data_len = (uint16_t)packet_size;
115		bufs[i]->pkt_len = packet_size;
116		bufs[i]->nb_segs = 1;
117		bufs[i]->next = NULL;
118		bufs[i]->port = h->internals->port_id;
119	}
120
121	rte_atomic64_add(&(h->rx_pkts), i);
122
123	return i;
124}
125
126static uint16_t
127eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
128{
129	int i;
130	struct null_queue *h = q;
131	unsigned packet_size;
132
133	if ((q == NULL) || (bufs == NULL))
134		return 0;
135
136	packet_size = h->internals->packet_size;
137	for (i = 0; i < nb_bufs; i++) {
138		bufs[i] = rte_pktmbuf_alloc(h->mb_pool);
139		if (!bufs[i])
140			break;
141		rte_memcpy(rte_pktmbuf_mtod(bufs[i], void *), h->dummy_packet,
142					packet_size);
143		bufs[i]->data_len = (uint16_t)packet_size;
144		bufs[i]->pkt_len = packet_size;
145		bufs[i]->nb_segs = 1;
146		bufs[i]->next = NULL;
147		bufs[i]->port = h->internals->port_id;
148	}
149
150	rte_atomic64_add(&(h->rx_pkts), i);
151
152	return i;
153}
154
155static uint16_t
156eth_null_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
157{
158	int i;
159	struct null_queue *h = q;
160
161	if ((q == NULL) || (bufs == NULL))
162		return 0;
163
164	for (i = 0; i < nb_bufs; i++)
165		rte_pktmbuf_free(bufs[i]);
166
167	rte_atomic64_add(&(h->tx_pkts), i);
168
169	return i;
170}
171
172static uint16_t
173eth_null_copy_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs)
174{
175	int i;
176	struct null_queue *h = q;
177	unsigned packet_size;
178
179	if ((q == NULL) || (bufs == NULL))
180		return 0;
181
182	packet_size = h->internals->packet_size;
183	for (i = 0; i < nb_bufs; i++) {
184		rte_memcpy(h->dummy_packet, rte_pktmbuf_mtod(bufs[i], void *),
185					packet_size);
186		rte_pktmbuf_free(bufs[i]);
187	}
188
189	rte_atomic64_add(&(h->tx_pkts), i);
190
191	return i;
192}
193
194static int
195eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
196{
197	return 0;
198}
199
200static int
201eth_dev_start(struct rte_eth_dev *dev)
202{
203	if (dev == NULL)
204		return -EINVAL;
205
206	dev->data->dev_link.link_status = ETH_LINK_UP;
207	return 0;
208}
209
210static void
211eth_dev_stop(struct rte_eth_dev *dev)
212{
213	if (dev == NULL)
214		return;
215
216	dev->data->dev_link.link_status = ETH_LINK_DOWN;
217}
218
219static int
220eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
221		uint16_t nb_rx_desc __rte_unused,
222		unsigned int socket_id __rte_unused,
223		const struct rte_eth_rxconf *rx_conf __rte_unused,
224		struct rte_mempool *mb_pool)
225{
226	struct rte_mbuf *dummy_packet;
227	struct pmd_internals *internals;
228	unsigned packet_size;
229
230	if ((dev == NULL) || (mb_pool == NULL))
231		return -EINVAL;
232
233	internals = dev->data->dev_private;
234
235	if (rx_queue_id >= dev->data->nb_rx_queues)
236		return -ENODEV;
237
238	packet_size = internals->packet_size;
239
240	internals->rx_null_queues[rx_queue_id].mb_pool = mb_pool;
241	dev->data->rx_queues[rx_queue_id] =
242		&internals->rx_null_queues[rx_queue_id];
243	dummy_packet = rte_zmalloc_socket(NULL,
244			packet_size, 0, dev->data->numa_node);
245	if (dummy_packet == NULL)
246		return -ENOMEM;
247
248	internals->rx_null_queues[rx_queue_id].internals = internals;
249	internals->rx_null_queues[rx_queue_id].dummy_packet = dummy_packet;
250
251	return 0;
252}
253
254static int
255eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
256		uint16_t nb_tx_desc __rte_unused,
257		unsigned int socket_id __rte_unused,
258		const struct rte_eth_txconf *tx_conf __rte_unused)
259{
260	struct rte_mbuf *dummy_packet;
261	struct pmd_internals *internals;
262	unsigned packet_size;
263
264	if (dev == NULL)
265		return -EINVAL;
266
267	internals = dev->data->dev_private;
268
269	if (tx_queue_id >= dev->data->nb_tx_queues)
270		return -ENODEV;
271
272	packet_size = internals->packet_size;
273
274	dev->data->tx_queues[tx_queue_id] =
275		&internals->tx_null_queues[tx_queue_id];
276	dummy_packet = rte_zmalloc_socket(NULL,
277			packet_size, 0, dev->data->numa_node);
278	if (dummy_packet == NULL)
279		return -ENOMEM;
280
281	internals->tx_null_queues[tx_queue_id].internals = internals;
282	internals->tx_null_queues[tx_queue_id].dummy_packet = dummy_packet;
283
284	return 0;
285}
286
287
288static void
289eth_dev_info(struct rte_eth_dev *dev,
290		struct rte_eth_dev_info *dev_info)
291{
292	struct pmd_internals *internals;
293
294	if ((dev == NULL) || (dev_info == NULL))
295		return;
296
297	internals = dev->data->dev_private;
298	dev_info->driver_name = drivername;
299	dev_info->max_mac_addrs = 1;
300	dev_info->max_rx_pktlen = (uint32_t)-1;
301	dev_info->max_rx_queues = RTE_DIM(internals->rx_null_queues);
302	dev_info->max_tx_queues = RTE_DIM(internals->tx_null_queues);
303	dev_info->min_rx_bufsize = 0;
304	dev_info->pci_dev = NULL;
305	dev_info->reta_size = internals->reta_size;
306	dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads;
307}
308
309static void
310eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats)
311{
312	unsigned i, num_stats;
313	unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0;
314	const struct pmd_internals *internal;
315
316	if ((dev == NULL) || (igb_stats == NULL))
317		return;
318
319	internal = dev->data->dev_private;
320	num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
321			RTE_MIN(dev->data->nb_rx_queues,
322				RTE_DIM(internal->rx_null_queues)));
323	for (i = 0; i < num_stats; i++) {
324		igb_stats->q_ipackets[i] =
325			internal->rx_null_queues[i].rx_pkts.cnt;
326		rx_total += igb_stats->q_ipackets[i];
327	}
328
329	num_stats = RTE_MIN((unsigned)RTE_ETHDEV_QUEUE_STAT_CNTRS,
330			RTE_MIN(dev->data->nb_tx_queues,
331				RTE_DIM(internal->tx_null_queues)));
332	for (i = 0; i < num_stats; i++) {
333		igb_stats->q_opackets[i] =
334			internal->tx_null_queues[i].tx_pkts.cnt;
335		igb_stats->q_errors[i] =
336			internal->tx_null_queues[i].err_pkts.cnt;
337		tx_total += igb_stats->q_opackets[i];
338		tx_err_total += igb_stats->q_errors[i];
339	}
340
341	igb_stats->ipackets = rx_total;
342	igb_stats->opackets = tx_total;
343	igb_stats->oerrors = tx_err_total;
344}
345
346static void
347eth_stats_reset(struct rte_eth_dev *dev)
348{
349	unsigned i;
350	struct pmd_internals *internal;
351
352	if (dev == NULL)
353		return;
354
355	internal = dev->data->dev_private;
356	for (i = 0; i < RTE_DIM(internal->rx_null_queues); i++)
357		internal->rx_null_queues[i].rx_pkts.cnt = 0;
358	for (i = 0; i < RTE_DIM(internal->tx_null_queues); i++) {
359		internal->tx_null_queues[i].tx_pkts.cnt = 0;
360		internal->tx_null_queues[i].err_pkts.cnt = 0;
361	}
362}
363
364static void
365eth_queue_release(void *q)
366{
367	struct null_queue *nq;
368
369	if (q == NULL)
370		return;
371
372	nq = q;
373	rte_free(nq->dummy_packet);
374}
375
376static int
377eth_link_update(struct rte_eth_dev *dev __rte_unused,
378		int wait_to_complete __rte_unused) { return 0; }
379
380static int
381eth_rss_reta_update(struct rte_eth_dev *dev,
382		struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
383{
384	int i, j;
385	struct pmd_internals *internal = dev->data->dev_private;
386
387	if (reta_size != internal->reta_size)
388		return -EINVAL;
389
390	rte_spinlock_lock(&internal->rss_lock);
391
392	/* Copy RETA table */
393	for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
394		internal->reta_conf[i].mask = reta_conf[i].mask;
395		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
396			if ((reta_conf[i].mask >> j) & 0x01)
397				internal->reta_conf[i].reta[j] = reta_conf[i].reta[j];
398	}
399
400	rte_spinlock_unlock(&internal->rss_lock);
401
402	return 0;
403}
404
405static int
406eth_rss_reta_query(struct rte_eth_dev *dev,
407		struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
408{
409	int i, j;
410	struct pmd_internals *internal = dev->data->dev_private;
411
412	if (reta_size != internal->reta_size)
413		return -EINVAL;
414
415	rte_spinlock_lock(&internal->rss_lock);
416
417	/* Copy RETA table */
418	for (i = 0; i < (internal->reta_size / RTE_RETA_GROUP_SIZE); i++) {
419		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
420			if ((reta_conf[i].mask >> j) & 0x01)
421				reta_conf[i].reta[j] = internal->reta_conf[i].reta[j];
422	}
423
424	rte_spinlock_unlock(&internal->rss_lock);
425
426	return 0;
427}
428
429static int
430eth_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf)
431{
432	struct pmd_internals *internal = dev->data->dev_private;
433
434	rte_spinlock_lock(&internal->rss_lock);
435
436	if ((rss_conf->rss_hf & internal->flow_type_rss_offloads) != 0)
437		dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
438				rss_conf->rss_hf & internal->flow_type_rss_offloads;
439
440	if (rss_conf->rss_key)
441		rte_memcpy(internal->rss_key, rss_conf->rss_key, 40);
442
443	rte_spinlock_unlock(&internal->rss_lock);
444
445	return 0;
446}
447
448static int
449eth_rss_hash_conf_get(struct rte_eth_dev *dev,
450		struct rte_eth_rss_conf *rss_conf)
451{
452	struct pmd_internals *internal = dev->data->dev_private;
453
454	rte_spinlock_lock(&internal->rss_lock);
455
456	rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
457	if (rss_conf->rss_key)
458		rte_memcpy(rss_conf->rss_key, internal->rss_key, 40);
459
460	rte_spinlock_unlock(&internal->rss_lock);
461
462	return 0;
463}
464
465static void
466eth_mac_address_set(__rte_unused struct rte_eth_dev *dev,
467		    __rte_unused struct ether_addr *addr)
468{
469}
470
471static const struct eth_dev_ops ops = {
472	.dev_start = eth_dev_start,
473	.dev_stop = eth_dev_stop,
474	.dev_configure = eth_dev_configure,
475	.dev_infos_get = eth_dev_info,
476	.rx_queue_setup = eth_rx_queue_setup,
477	.tx_queue_setup = eth_tx_queue_setup,
478	.rx_queue_release = eth_queue_release,
479	.tx_queue_release = eth_queue_release,
480	.link_update = eth_link_update,
481	.mac_addr_set = eth_mac_address_set,
482	.stats_get = eth_stats_get,
483	.stats_reset = eth_stats_reset,
484	.reta_update = eth_rss_reta_update,
485	.reta_query = eth_rss_reta_query,
486	.rss_hash_update = eth_rss_hash_update,
487	.rss_hash_conf_get = eth_rss_hash_conf_get
488};
489
490int
491eth_dev_null_create(const char *name,
492		const unsigned numa_node,
493		unsigned packet_size,
494		unsigned packet_copy)
495{
496	const unsigned nb_rx_queues = 1;
497	const unsigned nb_tx_queues = 1;
498	struct rte_eth_dev_data *data = NULL;
499	struct pmd_internals *internals = NULL;
500	struct rte_eth_dev *eth_dev = NULL;
501
502	static const uint8_t default_rss_key[40] = {
503		0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D,
504		0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
505		0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B,
506		0xBE, 0xAC, 0x01, 0xFA
507	};
508
509	if (name == NULL)
510		return -EINVAL;
511
512	RTE_LOG(INFO, PMD, "Creating null ethdev on numa socket %u\n",
513			numa_node);
514
515	/* now do all data allocation - for eth_dev structure, dummy pci driver
516	 * and internal (private) data
517	 */
518	data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
519	if (data == NULL)
520		goto error;
521
522	internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
523	if (internals == NULL)
524		goto error;
525
526	/* reserve an ethdev entry */
527	eth_dev = rte_eth_dev_allocate(name);
528	if (eth_dev == NULL)
529		goto error;
530
531	/* now put it all together
532	 * - store queue data in internals,
533	 * - store numa_node info in ethdev data
534	 * - point eth_dev_data to internals
535	 * - and point eth_dev structure to new eth_dev_data structure
536	 */
537	/* NOTE: we'll replace the data element, of originally allocated eth_dev
538	 * so the nulls are local per-process */
539
540	internals->packet_size = packet_size;
541	internals->packet_copy = packet_copy;
542	internals->port_id = eth_dev->data->port_id;
543
544	internals->flow_type_rss_offloads =  ETH_RSS_PROTO_MASK;
545	internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE;
546
547	rte_memcpy(internals->rss_key, default_rss_key, 40);
548
549	data->dev_private = internals;
550	data->port_id = eth_dev->data->port_id;
551	data->nb_rx_queues = (uint16_t)nb_rx_queues;
552	data->nb_tx_queues = (uint16_t)nb_tx_queues;
553	data->dev_link = pmd_link;
554	data->mac_addrs = &eth_addr;
555	strncpy(data->name, eth_dev->data->name, strlen(eth_dev->data->name));
556
557	eth_dev->data = data;
558	eth_dev->dev_ops = &ops;
559
560	TAILQ_INIT(&eth_dev->link_intr_cbs);
561
562	eth_dev->driver = NULL;
563	data->dev_flags = RTE_ETH_DEV_DETACHABLE;
564	data->kdrv = RTE_KDRV_NONE;
565	data->drv_name = drivername;
566	data->numa_node = numa_node;
567
568	/* finally assign rx and tx ops */
569	if (packet_copy) {
570		eth_dev->rx_pkt_burst = eth_null_copy_rx;
571		eth_dev->tx_pkt_burst = eth_null_copy_tx;
572	} else {
573		eth_dev->rx_pkt_burst = eth_null_rx;
574		eth_dev->tx_pkt_burst = eth_null_tx;
575	}
576
577	return 0;
578
579error:
580	rte_free(data);
581	rte_free(internals);
582
583	return -1;
584}
585
586static inline int
587get_packet_size_arg(const char *key __rte_unused,
588		const char *value, void *extra_args)
589{
590	const char *a = value;
591	unsigned *packet_size = extra_args;
592
593	if ((value == NULL) || (extra_args == NULL))
594		return -EINVAL;
595
596	*packet_size = (unsigned)strtoul(a, NULL, 0);
597	if (*packet_size == UINT_MAX)
598		return -1;
599
600	return 0;
601}
602
603static inline int
604get_packet_copy_arg(const char *key __rte_unused,
605		const char *value, void *extra_args)
606{
607	const char *a = value;
608	unsigned *packet_copy = extra_args;
609
610	if ((value == NULL) || (extra_args == NULL))
611		return -EINVAL;
612
613	*packet_copy = (unsigned)strtoul(a, NULL, 0);
614	if (*packet_copy == UINT_MAX)
615		return -1;
616
617	return 0;
618}
619
620static int
621rte_pmd_null_probe(const char *name, const char *params)
622{
623	unsigned numa_node;
624	unsigned packet_size = default_packet_size;
625	unsigned packet_copy = default_packet_copy;
626	struct rte_kvargs *kvlist = NULL;
627	int ret;
628
629	if (name == NULL)
630		return -EINVAL;
631
632	RTE_LOG(INFO, PMD, "Initializing pmd_null for %s\n", name);
633
634	numa_node = rte_socket_id();
635
636	if (params != NULL) {
637		kvlist = rte_kvargs_parse(params, valid_arguments);
638		if (kvlist == NULL)
639			return -1;
640
641		if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_SIZE_ARG) == 1) {
642
643			ret = rte_kvargs_process(kvlist,
644					ETH_NULL_PACKET_SIZE_ARG,
645					&get_packet_size_arg, &packet_size);
646			if (ret < 0)
647				goto free_kvlist;
648		}
649
650		if (rte_kvargs_count(kvlist, ETH_NULL_PACKET_COPY_ARG) == 1) {
651
652			ret = rte_kvargs_process(kvlist,
653					ETH_NULL_PACKET_COPY_ARG,
654					&get_packet_copy_arg, &packet_copy);
655			if (ret < 0)
656				goto free_kvlist;
657		}
658	}
659
660	RTE_LOG(INFO, PMD, "Configure pmd_null: packet size is %d, "
661			"packet copy is %s\n", packet_size,
662			packet_copy ? "enabled" : "disabled");
663
664	ret = eth_dev_null_create(name, numa_node, packet_size, packet_copy);
665
666free_kvlist:
667	if (kvlist)
668		rte_kvargs_free(kvlist);
669	return ret;
670}
671
672static int
673rte_pmd_null_remove(const char *name)
674{
675	struct rte_eth_dev *eth_dev = NULL;
676
677	if (name == NULL)
678		return -EINVAL;
679
680	RTE_LOG(INFO, PMD, "Closing null ethdev on numa socket %u\n",
681			rte_socket_id());
682
683	/* find the ethdev entry */
684	eth_dev = rte_eth_dev_allocated(name);
685	if (eth_dev == NULL)
686		return -1;
687
688	rte_free(eth_dev->data->dev_private);
689	rte_free(eth_dev->data);
690
691	rte_eth_dev_release_port(eth_dev);
692
693	return 0;
694}
695
696static struct rte_vdev_driver pmd_null_drv = {
697	.probe = rte_pmd_null_probe,
698	.remove = rte_pmd_null_remove,
699};
700
701RTE_PMD_REGISTER_VDEV(net_null, pmd_null_drv);
702RTE_PMD_REGISTER_ALIAS(net_null, eth_null);
703RTE_PMD_REGISTER_PARAM_STRING(net_null,
704	"size=<int> "
705	"copy=<int>");
706