rte_ethdev.c revision 3d9b7210
1/*-
2 *   BSD LICENSE
3 *
4 *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5 *   All rights reserved.
6 *
7 *   Redistribution and use in source and binary forms, with or without
8 *   modification, are permitted provided that the following conditions
9 *   are met:
10 *
11 *     * Redistributions of source code must retain the above copyright
12 *       notice, this list of conditions and the following disclaimer.
13 *     * Redistributions in binary form must reproduce the above copyright
14 *       notice, this list of conditions and the following disclaimer in
15 *       the documentation and/or other materials provided with the
16 *       distribution.
17 *     * Neither the name of Intel Corporation nor the names of its
18 *       contributors may be used to endorse or promote products derived
19 *       from this software without specific prior written permission.
20 *
21 *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <sys/types.h>
35#include <sys/queue.h>
36#include <ctype.h>
37#include <stdio.h>
38#include <stdlib.h>
39#include <string.h>
40#include <stdarg.h>
41#include <errno.h>
42#include <stdint.h>
43#include <inttypes.h>
44#include <netinet/in.h>
45
46#include <rte_byteorder.h>
47#include <rte_log.h>
48#include <rte_debug.h>
49#include <rte_interrupts.h>
50#include <rte_pci.h>
51#include <rte_memory.h>
52#include <rte_memcpy.h>
53#include <rte_memzone.h>
54#include <rte_launch.h>
55#include <rte_eal.h>
56#include <rte_per_lcore.h>
57#include <rte_lcore.h>
58#include <rte_atomic.h>
59#include <rte_branch_prediction.h>
60#include <rte_common.h>
61#include <rte_mempool.h>
62#include <rte_malloc.h>
63#include <rte_mbuf.h>
64#include <rte_errno.h>
65#include <rte_spinlock.h>
66#include <rte_string_fns.h>
67
68#include "rte_ether.h"
69#include "rte_ethdev.h"
70
71static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
72struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
73static struct rte_eth_dev_data *rte_eth_dev_data;
74static uint8_t eth_dev_last_created_port;
75static uint8_t nb_ports;
76
77/* spinlock for eth device callbacks */
78static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
79
80/* spinlock for add/remove rx callbacks */
81static rte_spinlock_t rte_eth_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
82
83/* spinlock for add/remove tx callbacks */
84static rte_spinlock_t rte_eth_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
85
86/* store statistics names and its offset in stats structure  */
87struct rte_eth_xstats_name_off {
88	char name[RTE_ETH_XSTATS_NAME_SIZE];
89	unsigned offset;
90};
91
92static const struct rte_eth_xstats_name_off rte_stats_strings[] = {
93	{"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
94	{"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
95	{"rx_good_bytes", offsetof(struct rte_eth_stats, ibytes)},
96	{"tx_good_bytes", offsetof(struct rte_eth_stats, obytes)},
97	{"rx_errors", offsetof(struct rte_eth_stats, ierrors)},
98	{"tx_errors", offsetof(struct rte_eth_stats, oerrors)},
99	{"rx_mbuf_allocation_errors", offsetof(struct rte_eth_stats,
100		rx_nombuf)},
101};
102
103#define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
104
105static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
106	{"packets", offsetof(struct rte_eth_stats, q_ipackets)},
107	{"bytes", offsetof(struct rte_eth_stats, q_ibytes)},
108	{"errors", offsetof(struct rte_eth_stats, q_errors)},
109};
110
111#define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) /	\
112		sizeof(rte_rxq_stats_strings[0]))
113
114static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
115	{"packets", offsetof(struct rte_eth_stats, q_opackets)},
116	{"bytes", offsetof(struct rte_eth_stats, q_obytes)},
117};
118#define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) /	\
119		sizeof(rte_txq_stats_strings[0]))
120
121
122/**
123 * The user application callback description.
124 *
125 * It contains callback address to be registered by user application,
126 * the pointer to the parameters for callback, and the event type.
127 */
128struct rte_eth_dev_callback {
129	TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
130	rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
131	void *cb_arg;                           /**< Parameter for callback */
132	enum rte_eth_event_type event;          /**< Interrupt event type */
133	uint32_t active;                        /**< Callback is executing */
134};
135
136enum {
137	STAT_QMAP_TX = 0,
138	STAT_QMAP_RX
139};
140
141enum {
142	DEV_DETACHED = 0,
143	DEV_ATTACHED
144};
145
146static void
147rte_eth_dev_data_alloc(void)
148{
149	const unsigned flags = 0;
150	const struct rte_memzone *mz;
151
152	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
153		mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
154				RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data),
155				rte_socket_id(), flags);
156	} else
157		mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
158	if (mz == NULL)
159		rte_panic("Cannot allocate memzone for ethernet port data\n");
160
161	rte_eth_dev_data = mz->addr;
162	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
163		memset(rte_eth_dev_data, 0,
164				RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data));
165}
166
167struct rte_eth_dev *
168rte_eth_dev_allocated(const char *name)
169{
170	unsigned i;
171
172	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
173		if ((rte_eth_devices[i].attached == DEV_ATTACHED) &&
174		    strcmp(rte_eth_devices[i].data->name, name) == 0)
175			return &rte_eth_devices[i];
176	}
177	return NULL;
178}
179
180static uint8_t
181rte_eth_dev_find_free_port(void)
182{
183	unsigned i;
184
185	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
186		if (rte_eth_devices[i].attached == DEV_DETACHED)
187			return i;
188	}
189	return RTE_MAX_ETHPORTS;
190}
191
192struct rte_eth_dev *
193rte_eth_dev_allocate(const char *name)
194{
195	uint8_t port_id;
196	struct rte_eth_dev *eth_dev;
197
198	port_id = rte_eth_dev_find_free_port();
199	if (port_id == RTE_MAX_ETHPORTS) {
200		RTE_PMD_DEBUG_TRACE("Reached maximum number of Ethernet ports\n");
201		return NULL;
202	}
203
204	if (rte_eth_dev_data == NULL)
205		rte_eth_dev_data_alloc();
206
207	if (rte_eth_dev_allocated(name) != NULL) {
208		RTE_PMD_DEBUG_TRACE("Ethernet Device with name %s already allocated!\n",
209				name);
210		return NULL;
211	}
212
213	eth_dev = &rte_eth_devices[port_id];
214	eth_dev->data = &rte_eth_dev_data[port_id];
215	snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name);
216	eth_dev->data->port_id = port_id;
217	eth_dev->attached = DEV_ATTACHED;
218	eth_dev_last_created_port = port_id;
219	nb_ports++;
220	return eth_dev;
221}
222
223int
224rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
225{
226	if (eth_dev == NULL)
227		return -EINVAL;
228
229	eth_dev->attached = DEV_DETACHED;
230	nb_ports--;
231	return 0;
232}
233
234int
235rte_eth_dev_pci_probe(struct rte_pci_driver *pci_drv,
236		      struct rte_pci_device *pci_dev)
237{
238	struct eth_driver    *eth_drv;
239	struct rte_eth_dev *eth_dev;
240	char ethdev_name[RTE_ETH_NAME_MAX_LEN];
241
242	int diag;
243
244	eth_drv = (struct eth_driver *)pci_drv;
245
246	rte_eal_pci_device_name(&pci_dev->addr, ethdev_name,
247			sizeof(ethdev_name));
248
249	eth_dev = rte_eth_dev_allocate(ethdev_name);
250	if (eth_dev == NULL)
251		return -ENOMEM;
252
253	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
254		eth_dev->data->dev_private = rte_zmalloc("ethdev private structure",
255				  eth_drv->dev_private_size,
256				  RTE_CACHE_LINE_SIZE);
257		if (eth_dev->data->dev_private == NULL)
258			rte_panic("Cannot allocate memzone for private port data\n");
259	}
260	eth_dev->pci_dev = pci_dev;
261	eth_dev->driver = eth_drv;
262	eth_dev->data->rx_mbuf_alloc_failed = 0;
263
264	/* init user callbacks */
265	TAILQ_INIT(&(eth_dev->link_intr_cbs));
266
267	/*
268	 * Set the default MTU.
269	 */
270	eth_dev->data->mtu = ETHER_MTU;
271
272	/* Invoke PMD device initialization function */
273	diag = (*eth_drv->eth_dev_init)(eth_dev);
274	if (diag == 0)
275		return 0;
276
277	RTE_PMD_DEBUG_TRACE("driver %s: eth_dev_init(vendor_id=0x%x device_id=0x%x) failed\n",
278			pci_drv->driver.name,
279			(unsigned) pci_dev->id.vendor_id,
280			(unsigned) pci_dev->id.device_id);
281	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
282		rte_free(eth_dev->data->dev_private);
283	rte_eth_dev_release_port(eth_dev);
284	return diag;
285}
286
287int
288rte_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
289{
290	const struct eth_driver *eth_drv;
291	struct rte_eth_dev *eth_dev;
292	char ethdev_name[RTE_ETH_NAME_MAX_LEN];
293	int ret;
294
295	if (pci_dev == NULL)
296		return -EINVAL;
297
298	rte_eal_pci_device_name(&pci_dev->addr, ethdev_name,
299			sizeof(ethdev_name));
300
301	eth_dev = rte_eth_dev_allocated(ethdev_name);
302	if (eth_dev == NULL)
303		return -ENODEV;
304
305	eth_drv = (const struct eth_driver *)pci_dev->driver;
306
307	/* Invoke PMD device uninit function */
308	if (*eth_drv->eth_dev_uninit) {
309		ret = (*eth_drv->eth_dev_uninit)(eth_dev);
310		if (ret)
311			return ret;
312	}
313
314	/* free ether device */
315	rte_eth_dev_release_port(eth_dev);
316
317	if (rte_eal_process_type() == RTE_PROC_PRIMARY)
318		rte_free(eth_dev->data->dev_private);
319
320	eth_dev->pci_dev = NULL;
321	eth_dev->driver = NULL;
322	eth_dev->data = NULL;
323
324	return 0;
325}
326
327int
328rte_eth_dev_is_valid_port(uint8_t port_id)
329{
330	if (port_id >= RTE_MAX_ETHPORTS ||
331	    rte_eth_devices[port_id].attached != DEV_ATTACHED)
332		return 0;
333	else
334		return 1;
335}
336
337int
338rte_eth_dev_socket_id(uint8_t port_id)
339{
340	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1);
341	return rte_eth_devices[port_id].data->numa_node;
342}
343
344uint8_t
345rte_eth_dev_count(void)
346{
347	return nb_ports;
348}
349
350int
351rte_eth_dev_get_name_by_port(uint8_t port_id, char *name)
352{
353	char *tmp;
354
355	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
356
357	if (name == NULL) {
358		RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
359		return -EINVAL;
360	}
361
362	/* shouldn't check 'rte_eth_devices[i].data',
363	 * because it might be overwritten by VDEV PMD */
364	tmp = rte_eth_dev_data[port_id].name;
365	strcpy(name, tmp);
366	return 0;
367}
368
369int
370rte_eth_dev_get_port_by_name(const char *name, uint8_t *port_id)
371{
372	int i;
373
374	if (name == NULL) {
375		RTE_PMD_DEBUG_TRACE("Null pointer is specified\n");
376		return -EINVAL;
377	}
378
379	*port_id = RTE_MAX_ETHPORTS;
380
381	for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
382
383		if (!strncmp(name,
384			rte_eth_dev_data[i].name, strlen(name))) {
385
386			*port_id = i;
387
388			return 0;
389		}
390	}
391	return -ENODEV;
392}
393
394static int
395rte_eth_dev_is_detachable(uint8_t port_id)
396{
397	uint32_t dev_flags;
398
399	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
400
401	switch (rte_eth_devices[port_id].data->kdrv) {
402	case RTE_KDRV_IGB_UIO:
403	case RTE_KDRV_UIO_GENERIC:
404	case RTE_KDRV_NIC_UIO:
405	case RTE_KDRV_NONE:
406		break;
407	case RTE_KDRV_VFIO:
408	default:
409		return -ENOTSUP;
410	}
411	dev_flags = rte_eth_devices[port_id].data->dev_flags;
412	if ((dev_flags & RTE_ETH_DEV_DETACHABLE) &&
413		(!(dev_flags & RTE_ETH_DEV_BONDED_SLAVE)))
414		return 0;
415	else
416		return 1;
417}
418
419/* attach the new device, then store port_id of the device */
420int
421rte_eth_dev_attach(const char *devargs, uint8_t *port_id)
422{
423	int ret = -1;
424	int current = rte_eth_dev_count();
425	char *name = NULL;
426	char *args = NULL;
427
428	if ((devargs == NULL) || (port_id == NULL)) {
429		ret = -EINVAL;
430		goto err;
431	}
432
433	/* parse devargs, then retrieve device name and args */
434	if (rte_eal_parse_devargs_str(devargs, &name, &args))
435		goto err;
436
437	ret = rte_eal_dev_attach(name, args);
438	if (ret < 0)
439		goto err;
440
441	/* no point looking at the port count if no port exists */
442	if (!rte_eth_dev_count()) {
443		RTE_LOG(ERR, EAL, "No port found for device (%s)\n", name);
444		ret = -1;
445		goto err;
446	}
447
448	/* if nothing happened, there is a bug here, since some driver told us
449	 * it did attach a device, but did not create a port.
450	 */
451	if (current == rte_eth_dev_count()) {
452		ret = -1;
453		goto err;
454	}
455
456	*port_id = eth_dev_last_created_port;
457	ret = 0;
458
459err:
460	free(name);
461	free(args);
462	return ret;
463}
464
465/* detach the device, then store the name of the device */
466int
467rte_eth_dev_detach(uint8_t port_id, char *name)
468{
469	int ret = -1;
470
471	if (name == NULL) {
472		ret = -EINVAL;
473		goto err;
474	}
475
476	/* FIXME: move this to eal, once device flags are relocated there */
477	if (rte_eth_dev_is_detachable(port_id))
478		goto err;
479
480	snprintf(name, sizeof(rte_eth_devices[port_id].data->name),
481		 "%s", rte_eth_devices[port_id].data->name);
482	ret = rte_eal_dev_detach(name);
483	if (ret < 0)
484		goto err;
485
486	return 0;
487
488err:
489	return ret;
490}
491
492static int
493rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
494{
495	uint16_t old_nb_queues = dev->data->nb_rx_queues;
496	void **rxq;
497	unsigned i;
498
499	if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
500		dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
501				sizeof(dev->data->rx_queues[0]) * nb_queues,
502				RTE_CACHE_LINE_SIZE);
503		if (dev->data->rx_queues == NULL) {
504			dev->data->nb_rx_queues = 0;
505			return -(ENOMEM);
506		}
507	} else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
508		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
509
510		rxq = dev->data->rx_queues;
511
512		for (i = nb_queues; i < old_nb_queues; i++)
513			(*dev->dev_ops->rx_queue_release)(rxq[i]);
514		rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
515				RTE_CACHE_LINE_SIZE);
516		if (rxq == NULL)
517			return -(ENOMEM);
518		if (nb_queues > old_nb_queues) {
519			uint16_t new_qs = nb_queues - old_nb_queues;
520
521			memset(rxq + old_nb_queues, 0,
522				sizeof(rxq[0]) * new_qs);
523		}
524
525		dev->data->rx_queues = rxq;
526
527	} else if (dev->data->rx_queues != NULL && nb_queues == 0) {
528		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
529
530		rxq = dev->data->rx_queues;
531
532		for (i = nb_queues; i < old_nb_queues; i++)
533			(*dev->dev_ops->rx_queue_release)(rxq[i]);
534	}
535	dev->data->nb_rx_queues = nb_queues;
536	return 0;
537}
538
539int
540rte_eth_dev_rx_queue_start(uint8_t port_id, uint16_t rx_queue_id)
541{
542	struct rte_eth_dev *dev;
543
544	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
545
546	dev = &rte_eth_devices[port_id];
547	if (rx_queue_id >= dev->data->nb_rx_queues) {
548		RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
549		return -EINVAL;
550	}
551
552	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
553
554	if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
555		RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
556			" already started\n",
557			rx_queue_id, port_id);
558		return 0;
559	}
560
561	return dev->dev_ops->rx_queue_start(dev, rx_queue_id);
562
563}
564
565int
566rte_eth_dev_rx_queue_stop(uint8_t port_id, uint16_t rx_queue_id)
567{
568	struct rte_eth_dev *dev;
569
570	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
571
572	dev = &rte_eth_devices[port_id];
573	if (rx_queue_id >= dev->data->nb_rx_queues) {
574		RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
575		return -EINVAL;
576	}
577
578	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
579
580	if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
581		RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
582			" already stopped\n",
583			rx_queue_id, port_id);
584		return 0;
585	}
586
587	return dev->dev_ops->rx_queue_stop(dev, rx_queue_id);
588
589}
590
591int
592rte_eth_dev_tx_queue_start(uint8_t port_id, uint16_t tx_queue_id)
593{
594	struct rte_eth_dev *dev;
595
596	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
597
598	dev = &rte_eth_devices[port_id];
599	if (tx_queue_id >= dev->data->nb_tx_queues) {
600		RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
601		return -EINVAL;
602	}
603
604	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
605
606	if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
607		RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
608			" already started\n",
609			tx_queue_id, port_id);
610		return 0;
611	}
612
613	return dev->dev_ops->tx_queue_start(dev, tx_queue_id);
614
615}
616
617int
618rte_eth_dev_tx_queue_stop(uint8_t port_id, uint16_t tx_queue_id)
619{
620	struct rte_eth_dev *dev;
621
622	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
623
624	dev = &rte_eth_devices[port_id];
625	if (tx_queue_id >= dev->data->nb_tx_queues) {
626		RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
627		return -EINVAL;
628	}
629
630	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
631
632	if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
633		RTE_PMD_DEBUG_TRACE("Queue %" PRIu16" of device with port_id=%" PRIu8
634			" already stopped\n",
635			tx_queue_id, port_id);
636		return 0;
637	}
638
639	return dev->dev_ops->tx_queue_stop(dev, tx_queue_id);
640
641}
642
643static int
644rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
645{
646	uint16_t old_nb_queues = dev->data->nb_tx_queues;
647	void **txq;
648	unsigned i;
649
650	if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
651		dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
652						   sizeof(dev->data->tx_queues[0]) * nb_queues,
653						   RTE_CACHE_LINE_SIZE);
654		if (dev->data->tx_queues == NULL) {
655			dev->data->nb_tx_queues = 0;
656			return -(ENOMEM);
657		}
658	} else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
659		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
660
661		txq = dev->data->tx_queues;
662
663		for (i = nb_queues; i < old_nb_queues; i++)
664			(*dev->dev_ops->tx_queue_release)(txq[i]);
665		txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
666				  RTE_CACHE_LINE_SIZE);
667		if (txq == NULL)
668			return -ENOMEM;
669		if (nb_queues > old_nb_queues) {
670			uint16_t new_qs = nb_queues - old_nb_queues;
671
672			memset(txq + old_nb_queues, 0,
673			       sizeof(txq[0]) * new_qs);
674		}
675
676		dev->data->tx_queues = txq;
677
678	} else if (dev->data->tx_queues != NULL && nb_queues == 0) {
679		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
680
681		txq = dev->data->tx_queues;
682
683		for (i = nb_queues; i < old_nb_queues; i++)
684			(*dev->dev_ops->tx_queue_release)(txq[i]);
685	}
686	dev->data->nb_tx_queues = nb_queues;
687	return 0;
688}
689
690uint32_t
691rte_eth_speed_bitflag(uint32_t speed, int duplex)
692{
693	switch (speed) {
694	case ETH_SPEED_NUM_10M:
695		return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
696	case ETH_SPEED_NUM_100M:
697		return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
698	case ETH_SPEED_NUM_1G:
699		return ETH_LINK_SPEED_1G;
700	case ETH_SPEED_NUM_2_5G:
701		return ETH_LINK_SPEED_2_5G;
702	case ETH_SPEED_NUM_5G:
703		return ETH_LINK_SPEED_5G;
704	case ETH_SPEED_NUM_10G:
705		return ETH_LINK_SPEED_10G;
706	case ETH_SPEED_NUM_20G:
707		return ETH_LINK_SPEED_20G;
708	case ETH_SPEED_NUM_25G:
709		return ETH_LINK_SPEED_25G;
710	case ETH_SPEED_NUM_40G:
711		return ETH_LINK_SPEED_40G;
712	case ETH_SPEED_NUM_50G:
713		return ETH_LINK_SPEED_50G;
714	case ETH_SPEED_NUM_56G:
715		return ETH_LINK_SPEED_56G;
716	case ETH_SPEED_NUM_100G:
717		return ETH_LINK_SPEED_100G;
718	default:
719		return 0;
720	}
721}
722
723int
724rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
725		      const struct rte_eth_conf *dev_conf)
726{
727	struct rte_eth_dev *dev;
728	struct rte_eth_dev_info dev_info;
729	int diag;
730
731	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
732
733	if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
734		RTE_PMD_DEBUG_TRACE(
735			"Number of RX queues requested (%u) is greater than max supported(%d)\n",
736			nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
737		return -EINVAL;
738	}
739
740	if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
741		RTE_PMD_DEBUG_TRACE(
742			"Number of TX queues requested (%u) is greater than max supported(%d)\n",
743			nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
744		return -EINVAL;
745	}
746
747	dev = &rte_eth_devices[port_id];
748
749	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
750	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
751
752	if (dev->data->dev_started) {
753		RTE_PMD_DEBUG_TRACE(
754		    "port %d must be stopped to allow configuration\n", port_id);
755		return -EBUSY;
756	}
757
758	/* Copy the dev_conf parameter into the dev structure */
759	memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
760
761	/*
762	 * Check that the numbers of RX and TX queues are not greater
763	 * than the maximum number of RX and TX queues supported by the
764	 * configured device.
765	 */
766	(*dev->dev_ops->dev_infos_get)(dev, &dev_info);
767
768	if (nb_rx_q == 0 && nb_tx_q == 0) {
769		RTE_PMD_DEBUG_TRACE("ethdev port_id=%d both rx and tx queue cannot be 0\n", port_id);
770		return -EINVAL;
771	}
772
773	if (nb_rx_q > dev_info.max_rx_queues) {
774		RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d\n",
775				port_id, nb_rx_q, dev_info.max_rx_queues);
776		return -EINVAL;
777	}
778
779	if (nb_tx_q > dev_info.max_tx_queues) {
780		RTE_PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d\n",
781				port_id, nb_tx_q, dev_info.max_tx_queues);
782		return -EINVAL;
783	}
784
785	/*
786	 * If link state interrupt is enabled, check that the
787	 * device supports it.
788	 */
789	if ((dev_conf->intr_conf.lsc == 1) &&
790		(!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
791			RTE_PMD_DEBUG_TRACE("driver %s does not support lsc\n",
792					dev->data->drv_name);
793			return -EINVAL;
794	}
795
796	/*
797	 * If jumbo frames are enabled, check that the maximum RX packet
798	 * length is supported by the configured device.
799	 */
800	if (dev_conf->rxmode.jumbo_frame == 1) {
801		if (dev_conf->rxmode.max_rx_pkt_len >
802		    dev_info.max_rx_pktlen) {
803			RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
804				" > max valid value %u\n",
805				port_id,
806				(unsigned)dev_conf->rxmode.max_rx_pkt_len,
807				(unsigned)dev_info.max_rx_pktlen);
808			return -EINVAL;
809		} else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
810			RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
811				" < min valid value %u\n",
812				port_id,
813				(unsigned)dev_conf->rxmode.max_rx_pkt_len,
814				(unsigned)ETHER_MIN_LEN);
815			return -EINVAL;
816		}
817	} else {
818		if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||
819			dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
820			/* Use default value */
821			dev->data->dev_conf.rxmode.max_rx_pkt_len =
822							ETHER_MAX_LEN;
823	}
824
825	/*
826	 * Setup new number of RX/TX queues and reconfigure device.
827	 */
828	diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
829	if (diag != 0) {
830		RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
831				port_id, diag);
832		return diag;
833	}
834
835	diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
836	if (diag != 0) {
837		RTE_PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
838				port_id, diag);
839		rte_eth_dev_rx_queue_config(dev, 0);
840		return diag;
841	}
842
843	diag = (*dev->dev_ops->dev_configure)(dev);
844	if (diag != 0) {
845		RTE_PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
846				port_id, diag);
847		rte_eth_dev_rx_queue_config(dev, 0);
848		rte_eth_dev_tx_queue_config(dev, 0);
849		return diag;
850	}
851
852	return 0;
853}
854
855static void
856rte_eth_dev_config_restore(uint8_t port_id)
857{
858	struct rte_eth_dev *dev;
859	struct rte_eth_dev_info dev_info;
860	struct ether_addr addr;
861	uint16_t i;
862	uint32_t pool = 0;
863
864	dev = &rte_eth_devices[port_id];
865
866	rte_eth_dev_info_get(port_id, &dev_info);
867
868	if (RTE_ETH_DEV_SRIOV(dev).active)
869		pool = RTE_ETH_DEV_SRIOV(dev).def_vmdq_idx;
870
871	/* replay MAC address configuration */
872	for (i = 0; i < dev_info.max_mac_addrs; i++) {
873		addr = dev->data->mac_addrs[i];
874
875		/* skip zero address */
876		if (is_zero_ether_addr(&addr))
877			continue;
878
879		/* add address to the hardware */
880		if  (*dev->dev_ops->mac_addr_add &&
881			(dev->data->mac_pool_sel[i] & (1ULL << pool)))
882			(*dev->dev_ops->mac_addr_add)(dev, &addr, i, pool);
883		else {
884			RTE_PMD_DEBUG_TRACE("port %d: MAC address array not supported\n",
885					port_id);
886			/* exit the loop but not return an error */
887			break;
888		}
889	}
890
891	/* replay promiscuous configuration */
892	if (rte_eth_promiscuous_get(port_id) == 1)
893		rte_eth_promiscuous_enable(port_id);
894	else if (rte_eth_promiscuous_get(port_id) == 0)
895		rte_eth_promiscuous_disable(port_id);
896
897	/* replay all multicast configuration */
898	if (rte_eth_allmulticast_get(port_id) == 1)
899		rte_eth_allmulticast_enable(port_id);
900	else if (rte_eth_allmulticast_get(port_id) == 0)
901		rte_eth_allmulticast_disable(port_id);
902}
903
904int
905rte_eth_dev_start(uint8_t port_id)
906{
907	struct rte_eth_dev *dev;
908	int diag;
909
910	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
911
912	dev = &rte_eth_devices[port_id];
913
914	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
915
916	if (dev->data->dev_started != 0) {
917		RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
918			" already started\n",
919			port_id);
920		return 0;
921	}
922
923	diag = (*dev->dev_ops->dev_start)(dev);
924	if (diag == 0)
925		dev->data->dev_started = 1;
926	else
927		return diag;
928
929	rte_eth_dev_config_restore(port_id);
930
931	if (dev->data->dev_conf.intr_conf.lsc == 0) {
932		RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
933		(*dev->dev_ops->link_update)(dev, 0);
934	}
935	return 0;
936}
937
938void
939rte_eth_dev_stop(uint8_t port_id)
940{
941	struct rte_eth_dev *dev;
942
943	RTE_ETH_VALID_PORTID_OR_RET(port_id);
944	dev = &rte_eth_devices[port_id];
945
946	RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
947
948	if (dev->data->dev_started == 0) {
949		RTE_PMD_DEBUG_TRACE("Device with port_id=%" PRIu8
950			" already stopped\n",
951			port_id);
952		return;
953	}
954
955	dev->data->dev_started = 0;
956	(*dev->dev_ops->dev_stop)(dev);
957}
958
959int
960rte_eth_dev_set_link_up(uint8_t port_id)
961{
962	struct rte_eth_dev *dev;
963
964	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
965
966	dev = &rte_eth_devices[port_id];
967
968	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_up, -ENOTSUP);
969	return (*dev->dev_ops->dev_set_link_up)(dev);
970}
971
972int
973rte_eth_dev_set_link_down(uint8_t port_id)
974{
975	struct rte_eth_dev *dev;
976
977	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
978
979	dev = &rte_eth_devices[port_id];
980
981	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_set_link_down, -ENOTSUP);
982	return (*dev->dev_ops->dev_set_link_down)(dev);
983}
984
985void
986rte_eth_dev_close(uint8_t port_id)
987{
988	struct rte_eth_dev *dev;
989
990	RTE_ETH_VALID_PORTID_OR_RET(port_id);
991	dev = &rte_eth_devices[port_id];
992
993	RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
994	dev->data->dev_started = 0;
995	(*dev->dev_ops->dev_close)(dev);
996
997	rte_free(dev->data->rx_queues);
998	dev->data->rx_queues = NULL;
999	rte_free(dev->data->tx_queues);
1000	dev->data->tx_queues = NULL;
1001}
1002
1003int
1004rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
1005		       uint16_t nb_rx_desc, unsigned int socket_id,
1006		       const struct rte_eth_rxconf *rx_conf,
1007		       struct rte_mempool *mp)
1008{
1009	int ret;
1010	uint32_t mbp_buf_size;
1011	struct rte_eth_dev *dev;
1012	struct rte_eth_dev_info dev_info;
1013
1014	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1015
1016	dev = &rte_eth_devices[port_id];
1017	if (rx_queue_id >= dev->data->nb_rx_queues) {
1018		RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
1019		return -EINVAL;
1020	}
1021
1022	if (dev->data->dev_started) {
1023		RTE_PMD_DEBUG_TRACE(
1024		    "port %d must be stopped to allow configuration\n", port_id);
1025		return -EBUSY;
1026	}
1027
1028	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1029	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
1030
1031	/*
1032	 * Check the size of the mbuf data buffer.
1033	 * This value must be provided in the private data of the memory pool.
1034	 * First check that the memory pool has a valid private data.
1035	 */
1036	rte_eth_dev_info_get(port_id, &dev_info);
1037	if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
1038		RTE_PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
1039				mp->name, (int) mp->private_data_size,
1040				(int) sizeof(struct rte_pktmbuf_pool_private));
1041		return -ENOSPC;
1042	}
1043	mbp_buf_size = rte_pktmbuf_data_room_size(mp);
1044
1045	if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) {
1046		RTE_PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
1047				"(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
1048				"=%d)\n",
1049				mp->name,
1050				(int)mbp_buf_size,
1051				(int)(RTE_PKTMBUF_HEADROOM +
1052				      dev_info.min_rx_bufsize),
1053				(int)RTE_PKTMBUF_HEADROOM,
1054				(int)dev_info.min_rx_bufsize);
1055		return -EINVAL;
1056	}
1057
1058	if (nb_rx_desc > dev_info.rx_desc_lim.nb_max ||
1059			nb_rx_desc < dev_info.rx_desc_lim.nb_min ||
1060			nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
1061
1062		RTE_PMD_DEBUG_TRACE("Invalid value for nb_rx_desc(=%hu), "
1063			"should be: <= %hu, = %hu, and a product of %hu\n",
1064			nb_rx_desc,
1065			dev_info.rx_desc_lim.nb_max,
1066			dev_info.rx_desc_lim.nb_min,
1067			dev_info.rx_desc_lim.nb_align);
1068		return -EINVAL;
1069	}
1070
1071	if (rx_conf == NULL)
1072		rx_conf = &dev_info.default_rxconf;
1073
1074	ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
1075					      socket_id, rx_conf, mp);
1076	if (!ret) {
1077		if (!dev->data->min_rx_buf_size ||
1078		    dev->data->min_rx_buf_size > mbp_buf_size)
1079			dev->data->min_rx_buf_size = mbp_buf_size;
1080	}
1081
1082	return ret;
1083}
1084
1085int
1086rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
1087		       uint16_t nb_tx_desc, unsigned int socket_id,
1088		       const struct rte_eth_txconf *tx_conf)
1089{
1090	struct rte_eth_dev *dev;
1091	struct rte_eth_dev_info dev_info;
1092
1093	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1094
1095	dev = &rte_eth_devices[port_id];
1096	if (tx_queue_id >= dev->data->nb_tx_queues) {
1097		RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
1098		return -EINVAL;
1099	}
1100
1101	if (dev->data->dev_started) {
1102		RTE_PMD_DEBUG_TRACE(
1103		    "port %d must be stopped to allow configuration\n", port_id);
1104		return -EBUSY;
1105	}
1106
1107	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
1108	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
1109
1110	rte_eth_dev_info_get(port_id, &dev_info);
1111
1112	if (nb_tx_desc > dev_info.tx_desc_lim.nb_max ||
1113	    nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
1114	    nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
1115		RTE_PMD_DEBUG_TRACE("Invalid value for nb_tx_desc(=%hu), "
1116				"should be: <= %hu, = %hu, and a product of %hu\n",
1117				nb_tx_desc,
1118				dev_info.tx_desc_lim.nb_max,
1119				dev_info.tx_desc_lim.nb_min,
1120				dev_info.tx_desc_lim.nb_align);
1121		return -EINVAL;
1122	}
1123
1124	if (tx_conf == NULL)
1125		tx_conf = &dev_info.default_txconf;
1126
1127	return (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id, nb_tx_desc,
1128					       socket_id, tx_conf);
1129}
1130
1131void
1132rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
1133		void *userdata __rte_unused)
1134{
1135	unsigned i;
1136
1137	for (i = 0; i < unsent; i++)
1138		rte_pktmbuf_free(pkts[i]);
1139}
1140
1141void
1142rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent,
1143		void *userdata)
1144{
1145	uint64_t *count = userdata;
1146	unsigned i;
1147
1148	for (i = 0; i < unsent; i++)
1149		rte_pktmbuf_free(pkts[i]);
1150
1151	*count += unsent;
1152}
1153
1154int
1155rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer,
1156		buffer_tx_error_fn cbfn, void *userdata)
1157{
1158	buffer->error_callback = cbfn;
1159	buffer->error_userdata = userdata;
1160	return 0;
1161}
1162
1163int
1164rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
1165{
1166	int ret = 0;
1167
1168	if (buffer == NULL)
1169		return -EINVAL;
1170
1171	buffer->size = size;
1172	if (buffer->error_callback == NULL) {
1173		ret = rte_eth_tx_buffer_set_err_callback(
1174			buffer, rte_eth_tx_buffer_drop_callback, NULL);
1175	}
1176
1177	return ret;
1178}
1179
1180void
1181rte_eth_promiscuous_enable(uint8_t port_id)
1182{
1183	struct rte_eth_dev *dev;
1184
1185	RTE_ETH_VALID_PORTID_OR_RET(port_id);
1186	dev = &rte_eth_devices[port_id];
1187
1188	RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
1189	(*dev->dev_ops->promiscuous_enable)(dev);
1190	dev->data->promiscuous = 1;
1191}
1192
1193void
1194rte_eth_promiscuous_disable(uint8_t port_id)
1195{
1196	struct rte_eth_dev *dev;
1197
1198	RTE_ETH_VALID_PORTID_OR_RET(port_id);
1199	dev = &rte_eth_devices[port_id];
1200
1201	RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
1202	dev->data->promiscuous = 0;
1203	(*dev->dev_ops->promiscuous_disable)(dev);
1204}
1205
1206int
1207rte_eth_promiscuous_get(uint8_t port_id)
1208{
1209	struct rte_eth_dev *dev;
1210
1211	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1212
1213	dev = &rte_eth_devices[port_id];
1214	return dev->data->promiscuous;
1215}
1216
1217void
1218rte_eth_allmulticast_enable(uint8_t port_id)
1219{
1220	struct rte_eth_dev *dev;
1221
1222	RTE_ETH_VALID_PORTID_OR_RET(port_id);
1223	dev = &rte_eth_devices[port_id];
1224
1225	RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
1226	(*dev->dev_ops->allmulticast_enable)(dev);
1227	dev->data->all_multicast = 1;
1228}
1229
1230void
1231rte_eth_allmulticast_disable(uint8_t port_id)
1232{
1233	struct rte_eth_dev *dev;
1234
1235	RTE_ETH_VALID_PORTID_OR_RET(port_id);
1236	dev = &rte_eth_devices[port_id];
1237
1238	RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
1239	dev->data->all_multicast = 0;
1240	(*dev->dev_ops->allmulticast_disable)(dev);
1241}
1242
1243int
1244rte_eth_allmulticast_get(uint8_t port_id)
1245{
1246	struct rte_eth_dev *dev;
1247
1248	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1249
1250	dev = &rte_eth_devices[port_id];
1251	return dev->data->all_multicast;
1252}
1253
1254static inline int
1255rte_eth_dev_atomic_read_link_status(struct rte_eth_dev *dev,
1256				struct rte_eth_link *link)
1257{
1258	struct rte_eth_link *dst = link;
1259	struct rte_eth_link *src = &(dev->data->dev_link);
1260
1261	if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1262					*(uint64_t *)src) == 0)
1263		return -1;
1264
1265	return 0;
1266}
1267
1268void
1269rte_eth_link_get(uint8_t port_id, struct rte_eth_link *eth_link)
1270{
1271	struct rte_eth_dev *dev;
1272
1273	RTE_ETH_VALID_PORTID_OR_RET(port_id);
1274	dev = &rte_eth_devices[port_id];
1275
1276	if (dev->data->dev_conf.intr_conf.lsc != 0)
1277		rte_eth_dev_atomic_read_link_status(dev, eth_link);
1278	else {
1279		RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1280		(*dev->dev_ops->link_update)(dev, 1);
1281		*eth_link = dev->data->dev_link;
1282	}
1283}
1284
1285void
1286rte_eth_link_get_nowait(uint8_t port_id, struct rte_eth_link *eth_link)
1287{
1288	struct rte_eth_dev *dev;
1289
1290	RTE_ETH_VALID_PORTID_OR_RET(port_id);
1291	dev = &rte_eth_devices[port_id];
1292
1293	if (dev->data->dev_conf.intr_conf.lsc != 0)
1294		rte_eth_dev_atomic_read_link_status(dev, eth_link);
1295	else {
1296		RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
1297		(*dev->dev_ops->link_update)(dev, 0);
1298		*eth_link = dev->data->dev_link;
1299	}
1300}
1301
1302int
1303rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats)
1304{
1305	struct rte_eth_dev *dev;
1306
1307	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1308
1309	dev = &rte_eth_devices[port_id];
1310	memset(stats, 0, sizeof(*stats));
1311
1312	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP);
1313	stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1314	(*dev->dev_ops->stats_get)(dev, stats);
1315	return 0;
1316}
1317
1318void
1319rte_eth_stats_reset(uint8_t port_id)
1320{
1321	struct rte_eth_dev *dev;
1322
1323	RTE_ETH_VALID_PORTID_OR_RET(port_id);
1324	dev = &rte_eth_devices[port_id];
1325
1326	RTE_FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
1327	(*dev->dev_ops->stats_reset)(dev);
1328	dev->data->rx_mbuf_alloc_failed = 0;
1329}
1330
1331static int
1332get_xstats_count(uint8_t port_id)
1333{
1334	struct rte_eth_dev *dev;
1335	int count;
1336
1337	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1338	dev = &rte_eth_devices[port_id];
1339	if (dev->dev_ops->xstats_get_names != NULL) {
1340		count = (*dev->dev_ops->xstats_get_names)(dev, NULL, 0);
1341		if (count < 0)
1342			return count;
1343	} else
1344		count = 0;
1345	count += RTE_NB_STATS;
1346	count += dev->data->nb_rx_queues * RTE_NB_RXQ_STATS;
1347	count += dev->data->nb_tx_queues * RTE_NB_TXQ_STATS;
1348	return count;
1349}
1350
1351int
1352rte_eth_xstats_get_names(uint8_t port_id,
1353	struct rte_eth_xstat_name *xstats_names,
1354	unsigned size)
1355{
1356	struct rte_eth_dev *dev;
1357	int cnt_used_entries;
1358	int cnt_expected_entries;
1359	int cnt_driver_entries;
1360	uint32_t idx, id_queue;
1361
1362	cnt_expected_entries = get_xstats_count(port_id);
1363	if (xstats_names == NULL || cnt_expected_entries < 0 ||
1364			(int)size < cnt_expected_entries)
1365		return cnt_expected_entries;
1366
1367	/* port_id checked in get_xstats_count() */
1368	dev = &rte_eth_devices[port_id];
1369	cnt_used_entries = 0;
1370
1371	for (idx = 0; idx < RTE_NB_STATS; idx++) {
1372		snprintf(xstats_names[cnt_used_entries].name,
1373			sizeof(xstats_names[0].name),
1374			"%s", rte_stats_strings[idx].name);
1375		cnt_used_entries++;
1376	}
1377	for (id_queue = 0; id_queue < dev->data->nb_rx_queues; id_queue++) {
1378		for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
1379			snprintf(xstats_names[cnt_used_entries].name,
1380				sizeof(xstats_names[0].name),
1381				"rx_q%u%s",
1382				id_queue, rte_rxq_stats_strings[idx].name);
1383			cnt_used_entries++;
1384		}
1385
1386	}
1387	for (id_queue = 0; id_queue < dev->data->nb_tx_queues; id_queue++) {
1388		for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
1389			snprintf(xstats_names[cnt_used_entries].name,
1390				sizeof(xstats_names[0].name),
1391				"tx_q%u%s",
1392				id_queue, rte_txq_stats_strings[idx].name);
1393			cnt_used_entries++;
1394		}
1395	}
1396
1397	if (dev->dev_ops->xstats_get_names != NULL) {
1398		/* If there are any driver-specific xstats, append them
1399		 * to end of list.
1400		 */
1401		cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
1402			dev,
1403			xstats_names + cnt_used_entries,
1404			size - cnt_used_entries);
1405		if (cnt_driver_entries < 0)
1406			return cnt_driver_entries;
1407		cnt_used_entries += cnt_driver_entries;
1408	}
1409
1410	return cnt_used_entries;
1411}
1412
1413/* retrieve ethdev extended statistics */
1414int
1415rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstat *xstats,
1416	unsigned n)
1417{
1418	struct rte_eth_stats eth_stats;
1419	struct rte_eth_dev *dev;
1420	unsigned count = 0, i, q;
1421	signed xcount = 0;
1422	uint64_t val, *stats_ptr;
1423
1424	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
1425
1426	dev = &rte_eth_devices[port_id];
1427
1428	/* Return generic statistics */
1429	count = RTE_NB_STATS + (dev->data->nb_rx_queues * RTE_NB_RXQ_STATS) +
1430		(dev->data->nb_tx_queues * RTE_NB_TXQ_STATS);
1431
1432	/* implemented by the driver */
1433	if (dev->dev_ops->xstats_get != NULL) {
1434		/* Retrieve the xstats from the driver at the end of the
1435		 * xstats struct.
1436		 */
1437		xcount = (*dev->dev_ops->xstats_get)(dev,
1438				     xstats ? xstats + count : NULL,
1439				     (n > count) ? n - count : 0);
1440
1441		if (xcount < 0)
1442			return xcount;
1443	}
1444
1445	if (n < count + xcount || xstats == NULL)
1446		return count + xcount;
1447
1448	/* now fill the xstats structure */
1449	count = 0;
1450	rte_eth_stats_get(port_id, &eth_stats);
1451
1452	/* global stats */
1453	for (i = 0; i < RTE_NB_STATS; i++) {
1454		stats_ptr = RTE_PTR_ADD(&eth_stats,
1455					rte_stats_strings[i].offset);
1456		val = *stats_ptr;
1457		xstats[count++].value = val;
1458	}
1459
1460	/* per-rxq stats */
1461	for (q = 0; q < dev->data->nb_rx_queues; q++) {
1462		for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
1463			stats_ptr = RTE_PTR_ADD(&eth_stats,
1464					rte_rxq_stats_strings[i].offset +
1465					q * sizeof(uint64_t));
1466			val = *stats_ptr;
1467			xstats[count++].value = val;
1468		}
1469	}
1470
1471	/* per-txq stats */
1472	for (q = 0; q < dev->data->nb_tx_queues; q++) {
1473		for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
1474			stats_ptr = RTE_PTR_ADD(&eth_stats,
1475					rte_txq_stats_strings[i].offset +
1476					q * sizeof(uint64_t));
1477			val = *stats_ptr;
1478			xstats[count++].value = val;
1479		}
1480	}
1481
1482	for (i = 0; i < count + xcount; i++)
1483		xstats[i].id = i;
1484
1485	return count + xcount;
1486}
1487
1488/* reset ethdev extended statistics */
1489void
1490rte_eth_xstats_reset(uint8_t port_id)
1491{
1492	struct rte_eth_dev *dev;
1493
1494	RTE_ETH_VALID_PORTID_OR_RET(port_id);
1495	dev = &rte_eth_devices[port_id];
1496
1497	/* implemented by the driver */
1498	if (dev->dev_ops->xstats_reset != NULL) {
1499		(*dev->dev_ops->xstats_reset)(dev);
1500		return;
1501	}
1502
1503	/* fallback to default */
1504	rte_eth_stats_reset(port_id);
1505}
1506
1507static int
1508set_queue_stats_mapping(uint8_t port_id, uint16_t queue_id, uint8_t stat_idx,
1509		uint8_t is_rx)
1510{
1511	struct rte_eth_dev *dev;
1512
1513	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1514
1515	dev = &rte_eth_devices[port_id];
1516
1517	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_stats_mapping_set, -ENOTSUP);
1518	return (*dev->dev_ops->queue_stats_mapping_set)
1519			(dev, queue_id, stat_idx, is_rx);
1520}
1521
1522
1523int
1524rte_eth_dev_set_tx_queue_stats_mapping(uint8_t port_id, uint16_t tx_queue_id,
1525		uint8_t stat_idx)
1526{
1527	return set_queue_stats_mapping(port_id, tx_queue_id, stat_idx,
1528			STAT_QMAP_TX);
1529}
1530
1531
1532int
1533rte_eth_dev_set_rx_queue_stats_mapping(uint8_t port_id, uint16_t rx_queue_id,
1534		uint8_t stat_idx)
1535{
1536	return set_queue_stats_mapping(port_id, rx_queue_id, stat_idx,
1537			STAT_QMAP_RX);
1538}
1539
1540void
1541rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info)
1542{
1543	struct rte_eth_dev *dev;
1544	const struct rte_eth_desc_lim lim = {
1545		.nb_max = UINT16_MAX,
1546		.nb_min = 0,
1547		.nb_align = 1,
1548	};
1549
1550	RTE_ETH_VALID_PORTID_OR_RET(port_id);
1551	dev = &rte_eth_devices[port_id];
1552
1553	memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
1554	dev_info->rx_desc_lim = lim;
1555	dev_info->tx_desc_lim = lim;
1556
1557	RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
1558	(*dev->dev_ops->dev_infos_get)(dev, dev_info);
1559	dev_info->pci_dev = dev->pci_dev;
1560	dev_info->driver_name = dev->data->drv_name;
1561	dev_info->nb_rx_queues = dev->data->nb_rx_queues;
1562	dev_info->nb_tx_queues = dev->data->nb_tx_queues;
1563}
1564
1565int
1566rte_eth_dev_get_supported_ptypes(uint8_t port_id, uint32_t ptype_mask,
1567				 uint32_t *ptypes, int num)
1568{
1569	int i, j;
1570	struct rte_eth_dev *dev;
1571	const uint32_t *all_ptypes;
1572
1573	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1574	dev = &rte_eth_devices[port_id];
1575	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_supported_ptypes_get, 0);
1576	all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
1577
1578	if (!all_ptypes)
1579		return 0;
1580
1581	for (i = 0, j = 0; all_ptypes[i] != RTE_PTYPE_UNKNOWN; ++i)
1582		if (all_ptypes[i] & ptype_mask) {
1583			if (j < num)
1584				ptypes[j] = all_ptypes[i];
1585			j++;
1586		}
1587
1588	return j;
1589}
1590
1591void
1592rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr)
1593{
1594	struct rte_eth_dev *dev;
1595
1596	RTE_ETH_VALID_PORTID_OR_RET(port_id);
1597	dev = &rte_eth_devices[port_id];
1598	ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
1599}
1600
1601
1602int
1603rte_eth_dev_get_mtu(uint8_t port_id, uint16_t *mtu)
1604{
1605	struct rte_eth_dev *dev;
1606
1607	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1608
1609	dev = &rte_eth_devices[port_id];
1610	*mtu = dev->data->mtu;
1611	return 0;
1612}
1613
1614int
1615rte_eth_dev_set_mtu(uint8_t port_id, uint16_t mtu)
1616{
1617	int ret;
1618	struct rte_eth_dev *dev;
1619
1620	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1621	dev = &rte_eth_devices[port_id];
1622	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
1623
1624	ret = (*dev->dev_ops->mtu_set)(dev, mtu);
1625	if (!ret)
1626		dev->data->mtu = mtu;
1627
1628	return ret;
1629}
1630
1631int
1632rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on)
1633{
1634	struct rte_eth_dev *dev;
1635
1636	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1637	dev = &rte_eth_devices[port_id];
1638	if (!(dev->data->dev_conf.rxmode.hw_vlan_filter)) {
1639		RTE_PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
1640		return -ENOSYS;
1641	}
1642
1643	if (vlan_id > 4095) {
1644		RTE_PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
1645				port_id, (unsigned) vlan_id);
1646		return -EINVAL;
1647	}
1648	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
1649
1650	return (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
1651}
1652
1653int
1654rte_eth_dev_set_vlan_strip_on_queue(uint8_t port_id, uint16_t rx_queue_id, int on)
1655{
1656	struct rte_eth_dev *dev;
1657
1658	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1659	dev = &rte_eth_devices[port_id];
1660	if (rx_queue_id >= dev->data->nb_rx_queues) {
1661		RTE_PMD_DEBUG_TRACE("Invalid rx_queue_id=%d\n", port_id);
1662		return -EINVAL;
1663	}
1664
1665	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
1666	(*dev->dev_ops->vlan_strip_queue_set)(dev, rx_queue_id, on);
1667
1668	return 0;
1669}
1670
1671int
1672rte_eth_dev_set_vlan_ether_type(uint8_t port_id,
1673				enum rte_vlan_type vlan_type,
1674				uint16_t tpid)
1675{
1676	struct rte_eth_dev *dev;
1677
1678	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1679	dev = &rte_eth_devices[port_id];
1680	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_tpid_set, -ENOTSUP);
1681
1682	return (*dev->dev_ops->vlan_tpid_set)(dev, vlan_type, tpid);
1683}
1684
1685int
1686rte_eth_dev_set_vlan_offload(uint8_t port_id, int offload_mask)
1687{
1688	struct rte_eth_dev *dev;
1689	int ret = 0;
1690	int mask = 0;
1691	int cur, org = 0;
1692
1693	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1694	dev = &rte_eth_devices[port_id];
1695
1696	/*check which option changed by application*/
1697	cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
1698	org = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
1699	if (cur != org) {
1700		dev->data->dev_conf.rxmode.hw_vlan_strip = (uint8_t)cur;
1701		mask |= ETH_VLAN_STRIP_MASK;
1702	}
1703
1704	cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
1705	org = !!(dev->data->dev_conf.rxmode.hw_vlan_filter);
1706	if (cur != org) {
1707		dev->data->dev_conf.rxmode.hw_vlan_filter = (uint8_t)cur;
1708		mask |= ETH_VLAN_FILTER_MASK;
1709	}
1710
1711	cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
1712	org = !!(dev->data->dev_conf.rxmode.hw_vlan_extend);
1713	if (cur != org) {
1714		dev->data->dev_conf.rxmode.hw_vlan_extend = (uint8_t)cur;
1715		mask |= ETH_VLAN_EXTEND_MASK;
1716	}
1717
1718	/*no change*/
1719	if (mask == 0)
1720		return ret;
1721
1722	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
1723	(*dev->dev_ops->vlan_offload_set)(dev, mask);
1724
1725	return ret;
1726}
1727
1728int
1729rte_eth_dev_get_vlan_offload(uint8_t port_id)
1730{
1731	struct rte_eth_dev *dev;
1732	int ret = 0;
1733
1734	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1735	dev = &rte_eth_devices[port_id];
1736
1737	if (dev->data->dev_conf.rxmode.hw_vlan_strip)
1738		ret |= ETH_VLAN_STRIP_OFFLOAD;
1739
1740	if (dev->data->dev_conf.rxmode.hw_vlan_filter)
1741		ret |= ETH_VLAN_FILTER_OFFLOAD;
1742
1743	if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1744		ret |= ETH_VLAN_EXTEND_OFFLOAD;
1745
1746	return ret;
1747}
1748
1749int
1750rte_eth_dev_set_vlan_pvid(uint8_t port_id, uint16_t pvid, int on)
1751{
1752	struct rte_eth_dev *dev;
1753
1754	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1755	dev = &rte_eth_devices[port_id];
1756	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_pvid_set, -ENOTSUP);
1757	(*dev->dev_ops->vlan_pvid_set)(dev, pvid, on);
1758
1759	return 0;
1760}
1761
1762int
1763rte_eth_dev_flow_ctrl_get(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
1764{
1765	struct rte_eth_dev *dev;
1766
1767	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1768	dev = &rte_eth_devices[port_id];
1769	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_get, -ENOTSUP);
1770	memset(fc_conf, 0, sizeof(*fc_conf));
1771	return (*dev->dev_ops->flow_ctrl_get)(dev, fc_conf);
1772}
1773
1774int
1775rte_eth_dev_flow_ctrl_set(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
1776{
1777	struct rte_eth_dev *dev;
1778
1779	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1780	if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
1781		RTE_PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
1782		return -EINVAL;
1783	}
1784
1785	dev = &rte_eth_devices[port_id];
1786	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
1787	return (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf);
1788}
1789
1790int
1791rte_eth_dev_priority_flow_ctrl_set(uint8_t port_id, struct rte_eth_pfc_conf *pfc_conf)
1792{
1793	struct rte_eth_dev *dev;
1794
1795	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1796	if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
1797		RTE_PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
1798		return -EINVAL;
1799	}
1800
1801	dev = &rte_eth_devices[port_id];
1802	/* High water, low water validation are device specific */
1803	if  (*dev->dev_ops->priority_flow_ctrl_set)
1804		return (*dev->dev_ops->priority_flow_ctrl_set)(dev, pfc_conf);
1805	return -ENOTSUP;
1806}
1807
1808static int
1809rte_eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
1810			uint16_t reta_size)
1811{
1812	uint16_t i, num;
1813
1814	if (!reta_conf)
1815		return -EINVAL;
1816
1817	if (reta_size != RTE_ALIGN(reta_size, RTE_RETA_GROUP_SIZE)) {
1818		RTE_PMD_DEBUG_TRACE("Invalid reta size, should be %u aligned\n",
1819							RTE_RETA_GROUP_SIZE);
1820		return -EINVAL;
1821	}
1822
1823	num = reta_size / RTE_RETA_GROUP_SIZE;
1824	for (i = 0; i < num; i++) {
1825		if (reta_conf[i].mask)
1826			return 0;
1827	}
1828
1829	return -EINVAL;
1830}
1831
1832static int
1833rte_eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
1834			 uint16_t reta_size,
1835			 uint16_t max_rxq)
1836{
1837	uint16_t i, idx, shift;
1838
1839	if (!reta_conf)
1840		return -EINVAL;
1841
1842	if (max_rxq == 0) {
1843		RTE_PMD_DEBUG_TRACE("No receive queue is available\n");
1844		return -EINVAL;
1845	}
1846
1847	for (i = 0; i < reta_size; i++) {
1848		idx = i / RTE_RETA_GROUP_SIZE;
1849		shift = i % RTE_RETA_GROUP_SIZE;
1850		if ((reta_conf[idx].mask & (1ULL << shift)) &&
1851			(reta_conf[idx].reta[shift] >= max_rxq)) {
1852			RTE_PMD_DEBUG_TRACE("reta_conf[%u]->reta[%u]: %u exceeds "
1853				"the maximum rxq index: %u\n", idx, shift,
1854				reta_conf[idx].reta[shift], max_rxq);
1855			return -EINVAL;
1856		}
1857	}
1858
1859	return 0;
1860}
1861
1862int
1863rte_eth_dev_rss_reta_update(uint8_t port_id,
1864			    struct rte_eth_rss_reta_entry64 *reta_conf,
1865			    uint16_t reta_size)
1866{
1867	struct rte_eth_dev *dev;
1868	int ret;
1869
1870	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1871	/* Check mask bits */
1872	ret = rte_eth_check_reta_mask(reta_conf, reta_size);
1873	if (ret < 0)
1874		return ret;
1875
1876	dev = &rte_eth_devices[port_id];
1877
1878	/* Check entry value */
1879	ret = rte_eth_check_reta_entry(reta_conf, reta_size,
1880				dev->data->nb_rx_queues);
1881	if (ret < 0)
1882		return ret;
1883
1884	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
1885	return (*dev->dev_ops->reta_update)(dev, reta_conf, reta_size);
1886}
1887
1888int
1889rte_eth_dev_rss_reta_query(uint8_t port_id,
1890			   struct rte_eth_rss_reta_entry64 *reta_conf,
1891			   uint16_t reta_size)
1892{
1893	struct rte_eth_dev *dev;
1894	int ret;
1895
1896	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1897
1898	/* Check mask bits */
1899	ret = rte_eth_check_reta_mask(reta_conf, reta_size);
1900	if (ret < 0)
1901		return ret;
1902
1903	dev = &rte_eth_devices[port_id];
1904	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
1905	return (*dev->dev_ops->reta_query)(dev, reta_conf, reta_size);
1906}
1907
1908int
1909rte_eth_dev_rss_hash_update(uint8_t port_id, struct rte_eth_rss_conf *rss_conf)
1910{
1911	struct rte_eth_dev *dev;
1912	uint16_t rss_hash_protos;
1913
1914	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1915	rss_hash_protos = rss_conf->rss_hf;
1916	if ((rss_hash_protos != 0) &&
1917	    ((rss_hash_protos & ETH_RSS_PROTO_MASK) == 0)) {
1918		RTE_PMD_DEBUG_TRACE("Invalid rss_hash_protos=0x%x\n",
1919				rss_hash_protos);
1920		return -EINVAL;
1921	}
1922	dev = &rte_eth_devices[port_id];
1923	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
1924	return (*dev->dev_ops->rss_hash_update)(dev, rss_conf);
1925}
1926
1927int
1928rte_eth_dev_rss_hash_conf_get(uint8_t port_id,
1929			      struct rte_eth_rss_conf *rss_conf)
1930{
1931	struct rte_eth_dev *dev;
1932
1933	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1934	dev = &rte_eth_devices[port_id];
1935	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
1936	return (*dev->dev_ops->rss_hash_conf_get)(dev, rss_conf);
1937}
1938
1939int
1940rte_eth_dev_udp_tunnel_port_add(uint8_t port_id,
1941				struct rte_eth_udp_tunnel *udp_tunnel)
1942{
1943	struct rte_eth_dev *dev;
1944
1945	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1946	if (udp_tunnel == NULL) {
1947		RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
1948		return -EINVAL;
1949	}
1950
1951	if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
1952		RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
1953		return -EINVAL;
1954	}
1955
1956	dev = &rte_eth_devices[port_id];
1957	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_add, -ENOTSUP);
1958	return (*dev->dev_ops->udp_tunnel_port_add)(dev, udp_tunnel);
1959}
1960
1961int
1962rte_eth_dev_udp_tunnel_port_delete(uint8_t port_id,
1963				   struct rte_eth_udp_tunnel *udp_tunnel)
1964{
1965	struct rte_eth_dev *dev;
1966
1967	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1968	dev = &rte_eth_devices[port_id];
1969
1970	if (udp_tunnel == NULL) {
1971		RTE_PMD_DEBUG_TRACE("Invalid udp_tunnel parameter\n");
1972		return -EINVAL;
1973	}
1974
1975	if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
1976		RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
1977		return -EINVAL;
1978	}
1979
1980	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->udp_tunnel_port_del, -ENOTSUP);
1981	return (*dev->dev_ops->udp_tunnel_port_del)(dev, udp_tunnel);
1982}
1983
1984int
1985rte_eth_led_on(uint8_t port_id)
1986{
1987	struct rte_eth_dev *dev;
1988
1989	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
1990	dev = &rte_eth_devices[port_id];
1991	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
1992	return (*dev->dev_ops->dev_led_on)(dev);
1993}
1994
1995int
1996rte_eth_led_off(uint8_t port_id)
1997{
1998	struct rte_eth_dev *dev;
1999
2000	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2001	dev = &rte_eth_devices[port_id];
2002	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
2003	return (*dev->dev_ops->dev_led_off)(dev);
2004}
2005
2006/*
2007 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2008 * an empty spot.
2009 */
2010static int
2011get_mac_addr_index(uint8_t port_id, const struct ether_addr *addr)
2012{
2013	struct rte_eth_dev_info dev_info;
2014	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2015	unsigned i;
2016
2017	rte_eth_dev_info_get(port_id, &dev_info);
2018
2019	for (i = 0; i < dev_info.max_mac_addrs; i++)
2020		if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
2021			return i;
2022
2023	return -1;
2024}
2025
2026static const struct ether_addr null_mac_addr;
2027
2028int
2029rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr,
2030			uint32_t pool)
2031{
2032	struct rte_eth_dev *dev;
2033	int index;
2034	uint64_t pool_mask;
2035
2036	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2037	dev = &rte_eth_devices[port_id];
2038	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
2039
2040	if (is_zero_ether_addr(addr)) {
2041		RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2042			port_id);
2043		return -EINVAL;
2044	}
2045	if (pool >= ETH_64_POOLS) {
2046		RTE_PMD_DEBUG_TRACE("pool id must be 0-%d\n", ETH_64_POOLS - 1);
2047		return -EINVAL;
2048	}
2049
2050	index = get_mac_addr_index(port_id, addr);
2051	if (index < 0) {
2052		index = get_mac_addr_index(port_id, &null_mac_addr);
2053		if (index < 0) {
2054			RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2055				port_id);
2056			return -ENOSPC;
2057		}
2058	} else {
2059		pool_mask = dev->data->mac_pool_sel[index];
2060
2061		/* Check if both MAC address and pool is already there, and do nothing */
2062		if (pool_mask & (1ULL << pool))
2063			return 0;
2064	}
2065
2066	/* Update NIC */
2067	(*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
2068
2069	/* Update address in NIC data structure */
2070	ether_addr_copy(addr, &dev->data->mac_addrs[index]);
2071
2072	/* Update pool bitmap in NIC data structure */
2073	dev->data->mac_pool_sel[index] |= (1ULL << pool);
2074
2075	return 0;
2076}
2077
2078int
2079rte_eth_dev_mac_addr_remove(uint8_t port_id, struct ether_addr *addr)
2080{
2081	struct rte_eth_dev *dev;
2082	int index;
2083
2084	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2085	dev = &rte_eth_devices[port_id];
2086	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
2087
2088	index = get_mac_addr_index(port_id, addr);
2089	if (index == 0) {
2090		RTE_PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
2091		return -EADDRINUSE;
2092	} else if (index < 0)
2093		return 0;  /* Do nothing if address wasn't found */
2094
2095	/* Update NIC */
2096	(*dev->dev_ops->mac_addr_remove)(dev, index);
2097
2098	/* Update address in NIC data structure */
2099	ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
2100
2101	/* reset pool bitmap */
2102	dev->data->mac_pool_sel[index] = 0;
2103
2104	return 0;
2105}
2106
2107int
2108rte_eth_dev_default_mac_addr_set(uint8_t port_id, struct ether_addr *addr)
2109{
2110	struct rte_eth_dev *dev;
2111
2112	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2113
2114	if (!is_valid_assigned_ether_addr(addr))
2115		return -EINVAL;
2116
2117	dev = &rte_eth_devices[port_id];
2118	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_set, -ENOTSUP);
2119
2120	/* Update default address in NIC data structure */
2121	ether_addr_copy(addr, &dev->data->mac_addrs[0]);
2122
2123	(*dev->dev_ops->mac_addr_set)(dev, addr);
2124
2125	return 0;
2126}
2127
2128int
2129rte_eth_dev_set_vf_rxmode(uint8_t port_id,  uint16_t vf,
2130				uint16_t rx_mode, uint8_t on)
2131{
2132	uint16_t num_vfs;
2133	struct rte_eth_dev *dev;
2134	struct rte_eth_dev_info dev_info;
2135
2136	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2137
2138	dev = &rte_eth_devices[port_id];
2139	rte_eth_dev_info_get(port_id, &dev_info);
2140
2141	num_vfs = dev_info.max_vfs;
2142	if (vf > num_vfs) {
2143		RTE_PMD_DEBUG_TRACE("set VF RX mode:invalid VF id %d\n", vf);
2144		return -EINVAL;
2145	}
2146
2147	if (rx_mode == 0) {
2148		RTE_PMD_DEBUG_TRACE("set VF RX mode:mode mask ca not be zero\n");
2149		return -EINVAL;
2150	}
2151	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx_mode, -ENOTSUP);
2152	return (*dev->dev_ops->set_vf_rx_mode)(dev, vf, rx_mode, on);
2153}
2154
2155/*
2156 * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
2157 * an empty spot.
2158 */
2159static int
2160get_hash_mac_addr_index(uint8_t port_id, const struct ether_addr *addr)
2161{
2162	struct rte_eth_dev_info dev_info;
2163	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2164	unsigned i;
2165
2166	rte_eth_dev_info_get(port_id, &dev_info);
2167	if (!dev->data->hash_mac_addrs)
2168		return -1;
2169
2170	for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
2171		if (memcmp(addr, &dev->data->hash_mac_addrs[i],
2172			ETHER_ADDR_LEN) == 0)
2173			return i;
2174
2175	return -1;
2176}
2177
2178int
2179rte_eth_dev_uc_hash_table_set(uint8_t port_id, struct ether_addr *addr,
2180				uint8_t on)
2181{
2182	int index;
2183	int ret;
2184	struct rte_eth_dev *dev;
2185
2186	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2187
2188	dev = &rte_eth_devices[port_id];
2189	if (is_zero_ether_addr(addr)) {
2190		RTE_PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n",
2191			port_id);
2192		return -EINVAL;
2193	}
2194
2195	index = get_hash_mac_addr_index(port_id, addr);
2196	/* Check if it's already there, and do nothing */
2197	if ((index >= 0) && (on))
2198		return 0;
2199
2200	if (index < 0) {
2201		if (!on) {
2202			RTE_PMD_DEBUG_TRACE("port %d: the MAC address was not "
2203				"set in UTA\n", port_id);
2204			return -EINVAL;
2205		}
2206
2207		index = get_hash_mac_addr_index(port_id, &null_mac_addr);
2208		if (index < 0) {
2209			RTE_PMD_DEBUG_TRACE("port %d: MAC address array full\n",
2210					port_id);
2211			return -ENOSPC;
2212		}
2213	}
2214
2215	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_hash_table_set, -ENOTSUP);
2216	ret = (*dev->dev_ops->uc_hash_table_set)(dev, addr, on);
2217	if (ret == 0) {
2218		/* Update address in NIC data structure */
2219		if (on)
2220			ether_addr_copy(addr,
2221					&dev->data->hash_mac_addrs[index]);
2222		else
2223			ether_addr_copy(&null_mac_addr,
2224					&dev->data->hash_mac_addrs[index]);
2225	}
2226
2227	return ret;
2228}
2229
2230int
2231rte_eth_dev_uc_all_hash_table_set(uint8_t port_id, uint8_t on)
2232{
2233	struct rte_eth_dev *dev;
2234
2235	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2236
2237	dev = &rte_eth_devices[port_id];
2238
2239	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->uc_all_hash_table_set, -ENOTSUP);
2240	return (*dev->dev_ops->uc_all_hash_table_set)(dev, on);
2241}
2242
2243int
2244rte_eth_dev_set_vf_rx(uint8_t port_id, uint16_t vf, uint8_t on)
2245{
2246	uint16_t num_vfs;
2247	struct rte_eth_dev *dev;
2248	struct rte_eth_dev_info dev_info;
2249
2250	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2251
2252	dev = &rte_eth_devices[port_id];
2253	rte_eth_dev_info_get(port_id, &dev_info);
2254
2255	num_vfs = dev_info.max_vfs;
2256	if (vf > num_vfs) {
2257		RTE_PMD_DEBUG_TRACE("port %d: invalid vf id\n", port_id);
2258		return -EINVAL;
2259	}
2260
2261	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rx, -ENOTSUP);
2262	return (*dev->dev_ops->set_vf_rx)(dev, vf, on);
2263}
2264
2265int
2266rte_eth_dev_set_vf_tx(uint8_t port_id, uint16_t vf, uint8_t on)
2267{
2268	uint16_t num_vfs;
2269	struct rte_eth_dev *dev;
2270	struct rte_eth_dev_info dev_info;
2271
2272	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2273
2274	dev = &rte_eth_devices[port_id];
2275	rte_eth_dev_info_get(port_id, &dev_info);
2276
2277	num_vfs = dev_info.max_vfs;
2278	if (vf > num_vfs) {
2279		RTE_PMD_DEBUG_TRACE("set pool tx:invalid pool id=%d\n", vf);
2280		return -EINVAL;
2281	}
2282
2283	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_tx, -ENOTSUP);
2284	return (*dev->dev_ops->set_vf_tx)(dev, vf, on);
2285}
2286
2287int
2288rte_eth_dev_set_vf_vlan_filter(uint8_t port_id, uint16_t vlan_id,
2289			       uint64_t vf_mask, uint8_t vlan_on)
2290{
2291	struct rte_eth_dev *dev;
2292
2293	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2294
2295	dev = &rte_eth_devices[port_id];
2296
2297	if (vlan_id > ETHER_MAX_VLAN_ID) {
2298		RTE_PMD_DEBUG_TRACE("VF VLAN filter:invalid VLAN id=%d\n",
2299			vlan_id);
2300		return -EINVAL;
2301	}
2302
2303	if (vf_mask == 0) {
2304		RTE_PMD_DEBUG_TRACE("VF VLAN filter:pool_mask can not be 0\n");
2305		return -EINVAL;
2306	}
2307
2308	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_vlan_filter, -ENOTSUP);
2309	return (*dev->dev_ops->set_vf_vlan_filter)(dev, vlan_id,
2310						   vf_mask, vlan_on);
2311}
2312
2313int rte_eth_set_queue_rate_limit(uint8_t port_id, uint16_t queue_idx,
2314					uint16_t tx_rate)
2315{
2316	struct rte_eth_dev *dev;
2317	struct rte_eth_dev_info dev_info;
2318	struct rte_eth_link link;
2319
2320	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2321
2322	dev = &rte_eth_devices[port_id];
2323	rte_eth_dev_info_get(port_id, &dev_info);
2324	link = dev->data->dev_link;
2325
2326	if (queue_idx > dev_info.max_tx_queues) {
2327		RTE_PMD_DEBUG_TRACE("set queue rate limit:port %d: "
2328				"invalid queue id=%d\n", port_id, queue_idx);
2329		return -EINVAL;
2330	}
2331
2332	if (tx_rate > link.link_speed) {
2333		RTE_PMD_DEBUG_TRACE("set queue rate limit:invalid tx_rate=%d, "
2334				"bigger than link speed= %d\n",
2335			tx_rate, link.link_speed);
2336		return -EINVAL;
2337	}
2338
2339	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_queue_rate_limit, -ENOTSUP);
2340	return (*dev->dev_ops->set_queue_rate_limit)(dev, queue_idx, tx_rate);
2341}
2342
2343int rte_eth_set_vf_rate_limit(uint8_t port_id, uint16_t vf, uint16_t tx_rate,
2344				uint64_t q_msk)
2345{
2346	struct rte_eth_dev *dev;
2347	struct rte_eth_dev_info dev_info;
2348	struct rte_eth_link link;
2349
2350	if (q_msk == 0)
2351		return 0;
2352
2353	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2354
2355	dev = &rte_eth_devices[port_id];
2356	rte_eth_dev_info_get(port_id, &dev_info);
2357	link = dev->data->dev_link;
2358
2359	if (vf > dev_info.max_vfs) {
2360		RTE_PMD_DEBUG_TRACE("set VF rate limit:port %d: "
2361				"invalid vf id=%d\n", port_id, vf);
2362		return -EINVAL;
2363	}
2364
2365	if (tx_rate > link.link_speed) {
2366		RTE_PMD_DEBUG_TRACE("set VF rate limit:invalid tx_rate=%d, "
2367				"bigger than link speed= %d\n",
2368				tx_rate, link.link_speed);
2369		return -EINVAL;
2370	}
2371
2372	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_vf_rate_limit, -ENOTSUP);
2373	return (*dev->dev_ops->set_vf_rate_limit)(dev, vf, tx_rate, q_msk);
2374}
2375
2376int
2377rte_eth_mirror_rule_set(uint8_t port_id,
2378			struct rte_eth_mirror_conf *mirror_conf,
2379			uint8_t rule_id, uint8_t on)
2380{
2381	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2382
2383	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2384	if (mirror_conf->rule_type == 0) {
2385		RTE_PMD_DEBUG_TRACE("mirror rule type can not be 0.\n");
2386		return -EINVAL;
2387	}
2388
2389	if (mirror_conf->dst_pool >= ETH_64_POOLS) {
2390		RTE_PMD_DEBUG_TRACE("Invalid dst pool, pool id must be 0-%d\n",
2391				ETH_64_POOLS - 1);
2392		return -EINVAL;
2393	}
2394
2395	if ((mirror_conf->rule_type & (ETH_MIRROR_VIRTUAL_POOL_UP |
2396	     ETH_MIRROR_VIRTUAL_POOL_DOWN)) &&
2397	    (mirror_conf->pool_mask == 0)) {
2398		RTE_PMD_DEBUG_TRACE("Invalid mirror pool, pool mask can not be 0.\n");
2399		return -EINVAL;
2400	}
2401
2402	if ((mirror_conf->rule_type & ETH_MIRROR_VLAN) &&
2403	    mirror_conf->vlan.vlan_mask == 0) {
2404		RTE_PMD_DEBUG_TRACE("Invalid vlan mask, vlan mask can not be 0.\n");
2405		return -EINVAL;
2406	}
2407
2408	dev = &rte_eth_devices[port_id];
2409	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_set, -ENOTSUP);
2410
2411	return (*dev->dev_ops->mirror_rule_set)(dev, mirror_conf, rule_id, on);
2412}
2413
2414int
2415rte_eth_mirror_rule_reset(uint8_t port_id, uint8_t rule_id)
2416{
2417	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2418
2419	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2420
2421	dev = &rte_eth_devices[port_id];
2422	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mirror_rule_reset, -ENOTSUP);
2423
2424	return (*dev->dev_ops->mirror_rule_reset)(dev, rule_id);
2425}
2426
2427int
2428rte_eth_dev_callback_register(uint8_t port_id,
2429			enum rte_eth_event_type event,
2430			rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2431{
2432	struct rte_eth_dev *dev;
2433	struct rte_eth_dev_callback *user_cb;
2434
2435	if (!cb_fn)
2436		return -EINVAL;
2437
2438	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2439
2440	dev = &rte_eth_devices[port_id];
2441	rte_spinlock_lock(&rte_eth_dev_cb_lock);
2442
2443	TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) {
2444		if (user_cb->cb_fn == cb_fn &&
2445			user_cb->cb_arg == cb_arg &&
2446			user_cb->event == event) {
2447			break;
2448		}
2449	}
2450
2451	/* create a new callback. */
2452	if (user_cb == NULL) {
2453		user_cb = rte_zmalloc("INTR_USER_CALLBACK",
2454					sizeof(struct rte_eth_dev_callback), 0);
2455		if (user_cb != NULL) {
2456			user_cb->cb_fn = cb_fn;
2457			user_cb->cb_arg = cb_arg;
2458			user_cb->event = event;
2459			TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next);
2460		}
2461	}
2462
2463	rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2464	return (user_cb == NULL) ? -ENOMEM : 0;
2465}
2466
2467int
2468rte_eth_dev_callback_unregister(uint8_t port_id,
2469			enum rte_eth_event_type event,
2470			rte_eth_dev_cb_fn cb_fn, void *cb_arg)
2471{
2472	int ret;
2473	struct rte_eth_dev *dev;
2474	struct rte_eth_dev_callback *cb, *next;
2475
2476	if (!cb_fn)
2477		return -EINVAL;
2478
2479	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2480
2481	dev = &rte_eth_devices[port_id];
2482	rte_spinlock_lock(&rte_eth_dev_cb_lock);
2483
2484	ret = 0;
2485	for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) {
2486
2487		next = TAILQ_NEXT(cb, next);
2488
2489		if (cb->cb_fn != cb_fn || cb->event != event ||
2490				(cb->cb_arg != (void *)-1 &&
2491				cb->cb_arg != cb_arg))
2492			continue;
2493
2494		/*
2495		 * if this callback is not executing right now,
2496		 * then remove it.
2497		 */
2498		if (cb->active == 0) {
2499			TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next);
2500			rte_free(cb);
2501		} else {
2502			ret = -EAGAIN;
2503		}
2504	}
2505
2506	rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2507	return ret;
2508}
2509
2510void
2511_rte_eth_dev_callback_process(struct rte_eth_dev *dev,
2512	enum rte_eth_event_type event, void *cb_arg)
2513{
2514	struct rte_eth_dev_callback *cb_lst;
2515	struct rte_eth_dev_callback dev_cb;
2516
2517	rte_spinlock_lock(&rte_eth_dev_cb_lock);
2518	TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
2519		if (cb_lst->cb_fn == NULL || cb_lst->event != event)
2520			continue;
2521		dev_cb = *cb_lst;
2522		cb_lst->active = 1;
2523		if (cb_arg != NULL)
2524			dev_cb.cb_arg = (void *) cb_arg;
2525
2526		rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2527		dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
2528						dev_cb.cb_arg);
2529		rte_spinlock_lock(&rte_eth_dev_cb_lock);
2530		cb_lst->active = 0;
2531	}
2532	rte_spinlock_unlock(&rte_eth_dev_cb_lock);
2533}
2534
2535int
2536rte_eth_dev_rx_intr_ctl(uint8_t port_id, int epfd, int op, void *data)
2537{
2538	uint32_t vec;
2539	struct rte_eth_dev *dev;
2540	struct rte_intr_handle *intr_handle;
2541	uint16_t qid;
2542	int rc;
2543
2544	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2545
2546	dev = &rte_eth_devices[port_id];
2547	intr_handle = &dev->pci_dev->intr_handle;
2548	if (!intr_handle->intr_vec) {
2549		RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
2550		return -EPERM;
2551	}
2552
2553	for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
2554		vec = intr_handle->intr_vec[qid];
2555		rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
2556		if (rc && rc != -EEXIST) {
2557			RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
2558					" op %d epfd %d vec %u\n",
2559					port_id, qid, op, epfd, vec);
2560		}
2561	}
2562
2563	return 0;
2564}
2565
2566const struct rte_memzone *
2567rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
2568			 uint16_t queue_id, size_t size, unsigned align,
2569			 int socket_id)
2570{
2571	char z_name[RTE_MEMZONE_NAMESIZE];
2572	const struct rte_memzone *mz;
2573
2574	snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
2575		 dev->driver->pci_drv.driver.name, ring_name,
2576		 dev->data->port_id, queue_id);
2577
2578	mz = rte_memzone_lookup(z_name);
2579	if (mz)
2580		return mz;
2581
2582	if (rte_xen_dom0_supported())
2583		return rte_memzone_reserve_bounded(z_name, size, socket_id,
2584						   0, align, RTE_PGSIZE_2M);
2585	else
2586		return rte_memzone_reserve_aligned(z_name, size, socket_id,
2587						   0, align);
2588}
2589
2590int
2591rte_eth_dev_rx_intr_ctl_q(uint8_t port_id, uint16_t queue_id,
2592			  int epfd, int op, void *data)
2593{
2594	uint32_t vec;
2595	struct rte_eth_dev *dev;
2596	struct rte_intr_handle *intr_handle;
2597	int rc;
2598
2599	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2600
2601	dev = &rte_eth_devices[port_id];
2602	if (queue_id >= dev->data->nb_rx_queues) {
2603		RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%u\n", queue_id);
2604		return -EINVAL;
2605	}
2606
2607	intr_handle = &dev->pci_dev->intr_handle;
2608	if (!intr_handle->intr_vec) {
2609		RTE_PMD_DEBUG_TRACE("RX Intr vector unset\n");
2610		return -EPERM;
2611	}
2612
2613	vec = intr_handle->intr_vec[queue_id];
2614	rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
2615	if (rc && rc != -EEXIST) {
2616		RTE_PMD_DEBUG_TRACE("p %u q %u rx ctl error"
2617				" op %d epfd %d vec %u\n",
2618				port_id, queue_id, op, epfd, vec);
2619		return rc;
2620	}
2621
2622	return 0;
2623}
2624
2625int
2626rte_eth_dev_rx_intr_enable(uint8_t port_id,
2627			   uint16_t queue_id)
2628{
2629	struct rte_eth_dev *dev;
2630
2631	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2632
2633	dev = &rte_eth_devices[port_id];
2634
2635	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_enable, -ENOTSUP);
2636	return (*dev->dev_ops->rx_queue_intr_enable)(dev, queue_id);
2637}
2638
2639int
2640rte_eth_dev_rx_intr_disable(uint8_t port_id,
2641			    uint16_t queue_id)
2642{
2643	struct rte_eth_dev *dev;
2644
2645	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2646
2647	dev = &rte_eth_devices[port_id];
2648
2649	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_intr_disable, -ENOTSUP);
2650	return (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id);
2651}
2652
2653#ifdef RTE_NIC_BYPASS
2654int rte_eth_dev_bypass_init(uint8_t port_id)
2655{
2656	struct rte_eth_dev *dev;
2657
2658	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2659
2660	dev = &rte_eth_devices[port_id];
2661	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_init, -ENOTSUP);
2662	(*dev->dev_ops->bypass_init)(dev);
2663	return 0;
2664}
2665
2666int
2667rte_eth_dev_bypass_state_show(uint8_t port_id, uint32_t *state)
2668{
2669	struct rte_eth_dev *dev;
2670
2671	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2672
2673	dev = &rte_eth_devices[port_id];
2674	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
2675	(*dev->dev_ops->bypass_state_show)(dev, state);
2676	return 0;
2677}
2678
2679int
2680rte_eth_dev_bypass_state_set(uint8_t port_id, uint32_t *new_state)
2681{
2682	struct rte_eth_dev *dev;
2683
2684	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2685
2686	dev = &rte_eth_devices[port_id];
2687	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_set, -ENOTSUP);
2688	(*dev->dev_ops->bypass_state_set)(dev, new_state);
2689	return 0;
2690}
2691
2692int
2693rte_eth_dev_bypass_event_show(uint8_t port_id, uint32_t event, uint32_t *state)
2694{
2695	struct rte_eth_dev *dev;
2696
2697	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2698
2699	dev = &rte_eth_devices[port_id];
2700	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
2701	(*dev->dev_ops->bypass_event_show)(dev, event, state);
2702	return 0;
2703}
2704
2705int
2706rte_eth_dev_bypass_event_store(uint8_t port_id, uint32_t event, uint32_t state)
2707{
2708	struct rte_eth_dev *dev;
2709
2710	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2711
2712	dev = &rte_eth_devices[port_id];
2713
2714	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_event_set, -ENOTSUP);
2715	(*dev->dev_ops->bypass_event_set)(dev, event, state);
2716	return 0;
2717}
2718
2719int
2720rte_eth_dev_wd_timeout_store(uint8_t port_id, uint32_t timeout)
2721{
2722	struct rte_eth_dev *dev;
2723
2724	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2725
2726	dev = &rte_eth_devices[port_id];
2727
2728	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_set, -ENOTSUP);
2729	(*dev->dev_ops->bypass_wd_timeout_set)(dev, timeout);
2730	return 0;
2731}
2732
2733int
2734rte_eth_dev_bypass_ver_show(uint8_t port_id, uint32_t *ver)
2735{
2736	struct rte_eth_dev *dev;
2737
2738	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2739
2740	dev = &rte_eth_devices[port_id];
2741
2742	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_ver_show, -ENOTSUP);
2743	(*dev->dev_ops->bypass_ver_show)(dev, ver);
2744	return 0;
2745}
2746
2747int
2748rte_eth_dev_bypass_wd_timeout_show(uint8_t port_id, uint32_t *wd_timeout)
2749{
2750	struct rte_eth_dev *dev;
2751
2752	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2753
2754	dev = &rte_eth_devices[port_id];
2755
2756	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_show, -ENOTSUP);
2757	(*dev->dev_ops->bypass_wd_timeout_show)(dev, wd_timeout);
2758	return 0;
2759}
2760
2761int
2762rte_eth_dev_bypass_wd_reset(uint8_t port_id)
2763{
2764	struct rte_eth_dev *dev;
2765
2766	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2767
2768	dev = &rte_eth_devices[port_id];
2769
2770	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_reset, -ENOTSUP);
2771	(*dev->dev_ops->bypass_wd_reset)(dev);
2772	return 0;
2773}
2774#endif
2775
2776int
2777rte_eth_dev_filter_supported(uint8_t port_id, enum rte_filter_type filter_type)
2778{
2779	struct rte_eth_dev *dev;
2780
2781	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2782
2783	dev = &rte_eth_devices[port_id];
2784	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
2785	return (*dev->dev_ops->filter_ctrl)(dev, filter_type,
2786				RTE_ETH_FILTER_NOP, NULL);
2787}
2788
2789int
2790rte_eth_dev_filter_ctrl(uint8_t port_id, enum rte_filter_type filter_type,
2791		       enum rte_filter_op filter_op, void *arg)
2792{
2793	struct rte_eth_dev *dev;
2794
2795	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2796
2797	dev = &rte_eth_devices[port_id];
2798	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->filter_ctrl, -ENOTSUP);
2799	return (*dev->dev_ops->filter_ctrl)(dev, filter_type, filter_op, arg);
2800}
2801
2802void *
2803rte_eth_add_rx_callback(uint8_t port_id, uint16_t queue_id,
2804		rte_rx_callback_fn fn, void *user_param)
2805{
2806#ifndef RTE_ETHDEV_RXTX_CALLBACKS
2807	rte_errno = ENOTSUP;
2808	return NULL;
2809#endif
2810	/* check input parameters */
2811	if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
2812		    queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
2813		rte_errno = EINVAL;
2814		return NULL;
2815	}
2816	struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
2817
2818	if (cb == NULL) {
2819		rte_errno = ENOMEM;
2820		return NULL;
2821	}
2822
2823	cb->fn.rx = fn;
2824	cb->param = user_param;
2825
2826	rte_spinlock_lock(&rte_eth_rx_cb_lock);
2827	/* Add the callbacks in fifo order. */
2828	struct rte_eth_rxtx_callback *tail =
2829		rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
2830
2831	if (!tail) {
2832		rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
2833
2834	} else {
2835		while (tail->next)
2836			tail = tail->next;
2837		tail->next = cb;
2838	}
2839	rte_spinlock_unlock(&rte_eth_rx_cb_lock);
2840
2841	return cb;
2842}
2843
2844void *
2845rte_eth_add_first_rx_callback(uint8_t port_id, uint16_t queue_id,
2846		rte_rx_callback_fn fn, void *user_param)
2847{
2848#ifndef RTE_ETHDEV_RXTX_CALLBACKS
2849	rte_errno = ENOTSUP;
2850	return NULL;
2851#endif
2852	/* check input parameters */
2853	if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
2854		queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
2855		rte_errno = EINVAL;
2856		return NULL;
2857	}
2858
2859	struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
2860
2861	if (cb == NULL) {
2862		rte_errno = ENOMEM;
2863		return NULL;
2864	}
2865
2866	cb->fn.rx = fn;
2867	cb->param = user_param;
2868
2869	rte_spinlock_lock(&rte_eth_rx_cb_lock);
2870	/* Add the callbacks at fisrt position*/
2871	cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
2872	rte_smp_wmb();
2873	rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
2874	rte_spinlock_unlock(&rte_eth_rx_cb_lock);
2875
2876	return cb;
2877}
2878
2879void *
2880rte_eth_add_tx_callback(uint8_t port_id, uint16_t queue_id,
2881		rte_tx_callback_fn fn, void *user_param)
2882{
2883#ifndef RTE_ETHDEV_RXTX_CALLBACKS
2884	rte_errno = ENOTSUP;
2885	return NULL;
2886#endif
2887	/* check input parameters */
2888	if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
2889		    queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
2890		rte_errno = EINVAL;
2891		return NULL;
2892	}
2893
2894	struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
2895
2896	if (cb == NULL) {
2897		rte_errno = ENOMEM;
2898		return NULL;
2899	}
2900
2901	cb->fn.tx = fn;
2902	cb->param = user_param;
2903
2904	rte_spinlock_lock(&rte_eth_tx_cb_lock);
2905	/* Add the callbacks in fifo order. */
2906	struct rte_eth_rxtx_callback *tail =
2907		rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id];
2908
2909	if (!tail) {
2910		rte_eth_devices[port_id].pre_tx_burst_cbs[queue_id] = cb;
2911
2912	} else {
2913		while (tail->next)
2914			tail = tail->next;
2915		tail->next = cb;
2916	}
2917	rte_spinlock_unlock(&rte_eth_tx_cb_lock);
2918
2919	return cb;
2920}
2921
2922int
2923rte_eth_remove_rx_callback(uint8_t port_id, uint16_t queue_id,
2924		struct rte_eth_rxtx_callback *user_cb)
2925{
2926#ifndef RTE_ETHDEV_RXTX_CALLBACKS
2927	return -ENOTSUP;
2928#endif
2929	/* Check input parameters. */
2930	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2931	if (user_cb == NULL ||
2932			queue_id >= rte_eth_devices[port_id].data->nb_rx_queues)
2933		return -EINVAL;
2934
2935	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2936	struct rte_eth_rxtx_callback *cb;
2937	struct rte_eth_rxtx_callback **prev_cb;
2938	int ret = -EINVAL;
2939
2940	rte_spinlock_lock(&rte_eth_rx_cb_lock);
2941	prev_cb = &dev->post_rx_burst_cbs[queue_id];
2942	for (; *prev_cb != NULL; prev_cb = &cb->next) {
2943		cb = *prev_cb;
2944		if (cb == user_cb) {
2945			/* Remove the user cb from the callback list. */
2946			*prev_cb = cb->next;
2947			ret = 0;
2948			break;
2949		}
2950	}
2951	rte_spinlock_unlock(&rte_eth_rx_cb_lock);
2952
2953	return ret;
2954}
2955
2956int
2957rte_eth_remove_tx_callback(uint8_t port_id, uint16_t queue_id,
2958		struct rte_eth_rxtx_callback *user_cb)
2959{
2960#ifndef RTE_ETHDEV_RXTX_CALLBACKS
2961	return -ENOTSUP;
2962#endif
2963	/* Check input parameters. */
2964	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
2965	if (user_cb == NULL ||
2966			queue_id >= rte_eth_devices[port_id].data->nb_tx_queues)
2967		return -EINVAL;
2968
2969	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2970	int ret = -EINVAL;
2971	struct rte_eth_rxtx_callback *cb;
2972	struct rte_eth_rxtx_callback **prev_cb;
2973
2974	rte_spinlock_lock(&rte_eth_tx_cb_lock);
2975	prev_cb = &dev->pre_tx_burst_cbs[queue_id];
2976	for (; *prev_cb != NULL; prev_cb = &cb->next) {
2977		cb = *prev_cb;
2978		if (cb == user_cb) {
2979			/* Remove the user cb from the callback list. */
2980			*prev_cb = cb->next;
2981			ret = 0;
2982			break;
2983		}
2984	}
2985	rte_spinlock_unlock(&rte_eth_tx_cb_lock);
2986
2987	return ret;
2988}
2989
2990int
2991rte_eth_rx_queue_info_get(uint8_t port_id, uint16_t queue_id,
2992	struct rte_eth_rxq_info *qinfo)
2993{
2994	struct rte_eth_dev *dev;
2995
2996	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2997
2998	if (qinfo == NULL)
2999		return -EINVAL;
3000
3001	dev = &rte_eth_devices[port_id];
3002	if (queue_id >= dev->data->nb_rx_queues) {
3003		RTE_PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
3004		return -EINVAL;
3005	}
3006
3007	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
3008
3009	memset(qinfo, 0, sizeof(*qinfo));
3010	dev->dev_ops->rxq_info_get(dev, queue_id, qinfo);
3011	return 0;
3012}
3013
3014int
3015rte_eth_tx_queue_info_get(uint8_t port_id, uint16_t queue_id,
3016	struct rte_eth_txq_info *qinfo)
3017{
3018	struct rte_eth_dev *dev;
3019
3020	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3021
3022	if (qinfo == NULL)
3023		return -EINVAL;
3024
3025	dev = &rte_eth_devices[port_id];
3026	if (queue_id >= dev->data->nb_tx_queues) {
3027		RTE_PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
3028		return -EINVAL;
3029	}
3030
3031	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
3032
3033	memset(qinfo, 0, sizeof(*qinfo));
3034	dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
3035	return 0;
3036}
3037
3038int
3039rte_eth_dev_set_mc_addr_list(uint8_t port_id,
3040			     struct ether_addr *mc_addr_set,
3041			     uint32_t nb_mc_addr)
3042{
3043	struct rte_eth_dev *dev;
3044
3045	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3046
3047	dev = &rte_eth_devices[port_id];
3048	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_mc_addr_list, -ENOTSUP);
3049	return dev->dev_ops->set_mc_addr_list(dev, mc_addr_set, nb_mc_addr);
3050}
3051
3052int
3053rte_eth_timesync_enable(uint8_t port_id)
3054{
3055	struct rte_eth_dev *dev;
3056
3057	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3058	dev = &rte_eth_devices[port_id];
3059
3060	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_enable, -ENOTSUP);
3061	return (*dev->dev_ops->timesync_enable)(dev);
3062}
3063
3064int
3065rte_eth_timesync_disable(uint8_t port_id)
3066{
3067	struct rte_eth_dev *dev;
3068
3069	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3070	dev = &rte_eth_devices[port_id];
3071
3072	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_disable, -ENOTSUP);
3073	return (*dev->dev_ops->timesync_disable)(dev);
3074}
3075
3076int
3077rte_eth_timesync_read_rx_timestamp(uint8_t port_id, struct timespec *timestamp,
3078				   uint32_t flags)
3079{
3080	struct rte_eth_dev *dev;
3081
3082	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3083	dev = &rte_eth_devices[port_id];
3084
3085	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_rx_timestamp, -ENOTSUP);
3086	return (*dev->dev_ops->timesync_read_rx_timestamp)(dev, timestamp, flags);
3087}
3088
3089int
3090rte_eth_timesync_read_tx_timestamp(uint8_t port_id, struct timespec *timestamp)
3091{
3092	struct rte_eth_dev *dev;
3093
3094	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3095	dev = &rte_eth_devices[port_id];
3096
3097	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_tx_timestamp, -ENOTSUP);
3098	return (*dev->dev_ops->timesync_read_tx_timestamp)(dev, timestamp);
3099}
3100
3101int
3102rte_eth_timesync_adjust_time(uint8_t port_id, int64_t delta)
3103{
3104	struct rte_eth_dev *dev;
3105
3106	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3107	dev = &rte_eth_devices[port_id];
3108
3109	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_adjust_time, -ENOTSUP);
3110	return (*dev->dev_ops->timesync_adjust_time)(dev, delta);
3111}
3112
3113int
3114rte_eth_timesync_read_time(uint8_t port_id, struct timespec *timestamp)
3115{
3116	struct rte_eth_dev *dev;
3117
3118	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3119	dev = &rte_eth_devices[port_id];
3120
3121	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_read_time, -ENOTSUP);
3122	return (*dev->dev_ops->timesync_read_time)(dev, timestamp);
3123}
3124
3125int
3126rte_eth_timesync_write_time(uint8_t port_id, const struct timespec *timestamp)
3127{
3128	struct rte_eth_dev *dev;
3129
3130	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3131	dev = &rte_eth_devices[port_id];
3132
3133	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timesync_write_time, -ENOTSUP);
3134	return (*dev->dev_ops->timesync_write_time)(dev, timestamp);
3135}
3136
3137int
3138rte_eth_dev_get_reg_info(uint8_t port_id, struct rte_dev_reg_info *info)
3139{
3140	struct rte_eth_dev *dev;
3141
3142	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3143
3144	dev = &rte_eth_devices[port_id];
3145	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_reg, -ENOTSUP);
3146	return (*dev->dev_ops->get_reg)(dev, info);
3147}
3148
3149int
3150rte_eth_dev_get_eeprom_length(uint8_t port_id)
3151{
3152	struct rte_eth_dev *dev;
3153
3154	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3155
3156	dev = &rte_eth_devices[port_id];
3157	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom_length, -ENOTSUP);
3158	return (*dev->dev_ops->get_eeprom_length)(dev);
3159}
3160
3161int
3162rte_eth_dev_get_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info)
3163{
3164	struct rte_eth_dev *dev;
3165
3166	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3167
3168	dev = &rte_eth_devices[port_id];
3169	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_eeprom, -ENOTSUP);
3170	return (*dev->dev_ops->get_eeprom)(dev, info);
3171}
3172
3173int
3174rte_eth_dev_set_eeprom(uint8_t port_id, struct rte_dev_eeprom_info *info)
3175{
3176	struct rte_eth_dev *dev;
3177
3178	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3179
3180	dev = &rte_eth_devices[port_id];
3181	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->set_eeprom, -ENOTSUP);
3182	return (*dev->dev_ops->set_eeprom)(dev, info);
3183}
3184
3185int
3186rte_eth_dev_get_dcb_info(uint8_t port_id,
3187			     struct rte_eth_dcb_info *dcb_info)
3188{
3189	struct rte_eth_dev *dev;
3190
3191	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3192
3193	dev = &rte_eth_devices[port_id];
3194	memset(dcb_info, 0, sizeof(struct rte_eth_dcb_info));
3195
3196	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->get_dcb_info, -ENOTSUP);
3197	return (*dev->dev_ops->get_dcb_info)(dev, dcb_info);
3198}
3199
3200void
3201rte_eth_copy_pci_info(struct rte_eth_dev *eth_dev, struct rte_pci_device *pci_dev)
3202{
3203	if ((eth_dev == NULL) || (pci_dev == NULL)) {
3204		RTE_PMD_DEBUG_TRACE("NULL pointer eth_dev=%p pci_dev=%p\n",
3205				eth_dev, pci_dev);
3206		return;
3207	}
3208
3209	eth_dev->data->dev_flags = 0;
3210	if (pci_dev->driver->drv_flags & RTE_PCI_DRV_INTR_LSC)
3211		eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
3212	if (pci_dev->driver->drv_flags & RTE_PCI_DRV_DETACHABLE)
3213		eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
3214
3215	eth_dev->data->kdrv = pci_dev->kdrv;
3216	eth_dev->data->numa_node = pci_dev->device.numa_node;
3217	eth_dev->data->drv_name = pci_dev->driver->driver.name;
3218}
3219
3220int
3221rte_eth_dev_l2_tunnel_eth_type_conf(uint8_t port_id,
3222				    struct rte_eth_l2_tunnel_conf *l2_tunnel)
3223{
3224	struct rte_eth_dev *dev;
3225
3226	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3227	if (l2_tunnel == NULL) {
3228		RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
3229		return -EINVAL;
3230	}
3231
3232	if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
3233		RTE_PMD_DEBUG_TRACE("Invalid tunnel type\n");
3234		return -EINVAL;
3235	}
3236
3237	dev = &rte_eth_devices[port_id];
3238	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_eth_type_conf,
3239				-ENOTSUP);
3240	return (*dev->dev_ops->l2_tunnel_eth_type_conf)(dev, l2_tunnel);
3241}
3242
3243int
3244rte_eth_dev_l2_tunnel_offload_set(uint8_t port_id,
3245				  struct rte_eth_l2_tunnel_conf *l2_tunnel,
3246				  uint32_t mask,
3247				  uint8_t en)
3248{
3249	struct rte_eth_dev *dev;
3250
3251	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
3252
3253	if (l2_tunnel == NULL) {
3254		RTE_PMD_DEBUG_TRACE("Invalid l2_tunnel parameter\n");
3255		return -EINVAL;
3256	}
3257
3258	if (l2_tunnel->l2_tunnel_type >= RTE_TUNNEL_TYPE_MAX) {
3259		RTE_PMD_DEBUG_TRACE("Invalid tunnel type.\n");
3260		return -EINVAL;
3261	}
3262
3263	if (mask == 0) {
3264		RTE_PMD_DEBUG_TRACE("Mask should have a value.\n");
3265		return -EINVAL;
3266	}
3267
3268	dev = &rte_eth_devices[port_id];
3269	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->l2_tunnel_offload_set,
3270				-ENOTSUP);
3271	return (*dev->dev_ops->l2_tunnel_offload_set)(dev, l2_tunnel, mask, en);
3272}
3273