mlx5_ethdev.c revision be13fd96
1/*-
2 *   BSD LICENSE
3 *
4 *   Copyright 2015 6WIND S.A.
5 *   Copyright 2015 Mellanox.
6 *
7 *   Redistribution and use in source and binary forms, with or without
8 *   modification, are permitted provided that the following conditions
9 *   are met:
10 *
11 *     * Redistributions of source code must retain the above copyright
12 *       notice, this list of conditions and the following disclaimer.
13 *     * Redistributions in binary form must reproduce the above copyright
14 *       notice, this list of conditions and the following disclaimer in
15 *       the documentation and/or other materials provided with the
16 *       distribution.
17 *     * Neither the name of 6WIND S.A. nor the names of its
18 *       contributors may be used to endorse or promote products derived
19 *       from this software without specific prior written permission.
20 *
21 *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <stddef.h>
35#include <assert.h>
36#include <unistd.h>
37#include <stdint.h>
38#include <stdio.h>
39#include <string.h>
40#include <stdlib.h>
41#include <errno.h>
42#include <dirent.h>
43#include <net/if.h>
44#include <sys/ioctl.h>
45#include <sys/socket.h>
46#include <netinet/in.h>
47#include <linux/ethtool.h>
48#include <linux/sockios.h>
49#include <fcntl.h>
50
51/* DPDK headers don't like -pedantic. */
52#ifdef PEDANTIC
53#pragma GCC diagnostic ignored "-Wpedantic"
54#endif
55#include <rte_atomic.h>
56#include <rte_ethdev.h>
57#include <rte_mbuf.h>
58#include <rte_common.h>
59#include <rte_interrupts.h>
60#include <rte_alarm.h>
61#include <rte_malloc.h>
62#ifdef PEDANTIC
63#pragma GCC diagnostic error "-Wpedantic"
64#endif
65
66#include "mlx5.h"
67#include "mlx5_rxtx.h"
68#include "mlx5_utils.h"
69
70/**
71 * Return private structure associated with an Ethernet device.
72 *
73 * @param dev
74 *   Pointer to Ethernet device structure.
75 *
76 * @return
77 *   Pointer to private structure.
78 */
79struct priv *
80mlx5_get_priv(struct rte_eth_dev *dev)
81{
82	struct mlx5_secondary_data *sd;
83
84	if (!mlx5_is_secondary())
85		return dev->data->dev_private;
86	sd = &mlx5_secondary_data[dev->data->port_id];
87	return sd->data.dev_private;
88}
89
90/**
91 * Check if running as a secondary process.
92 *
93 * @return
94 *   Nonzero if running as a secondary process.
95 */
96inline int
97mlx5_is_secondary(void)
98{
99	return rte_eal_process_type() != RTE_PROC_PRIMARY;
100}
101
102/**
103 * Get interface name from private structure.
104 *
105 * @param[in] priv
106 *   Pointer to private structure.
107 * @param[out] ifname
108 *   Interface name output buffer.
109 *
110 * @return
111 *   0 on success, -1 on failure and errno is set.
112 */
113int
114priv_get_ifname(const struct priv *priv, char (*ifname)[IF_NAMESIZE])
115{
116	DIR *dir;
117	struct dirent *dent;
118	unsigned int dev_type = 0;
119	unsigned int dev_port_prev = ~0u;
120	char match[IF_NAMESIZE] = "";
121
122	{
123		MKSTR(path, "%s/device/net", priv->ctx->device->ibdev_path);
124
125		dir = opendir(path);
126		if (dir == NULL)
127			return -1;
128	}
129	while ((dent = readdir(dir)) != NULL) {
130		char *name = dent->d_name;
131		FILE *file;
132		unsigned int dev_port;
133		int r;
134
135		if ((name[0] == '.') &&
136		    ((name[1] == '\0') ||
137		     ((name[1] == '.') && (name[2] == '\0'))))
138			continue;
139
140		MKSTR(path, "%s/device/net/%s/%s",
141		      priv->ctx->device->ibdev_path, name,
142		      (dev_type ? "dev_id" : "dev_port"));
143
144		file = fopen(path, "rb");
145		if (file == NULL) {
146			if (errno != ENOENT)
147				continue;
148			/*
149			 * Switch to dev_id when dev_port does not exist as
150			 * is the case with Linux kernel versions < 3.15.
151			 */
152try_dev_id:
153			match[0] = '\0';
154			if (dev_type)
155				break;
156			dev_type = 1;
157			dev_port_prev = ~0u;
158			rewinddir(dir);
159			continue;
160		}
161		r = fscanf(file, (dev_type ? "%x" : "%u"), &dev_port);
162		fclose(file);
163		if (r != 1)
164			continue;
165		/*
166		 * Switch to dev_id when dev_port returns the same value for
167		 * all ports. May happen when using a MOFED release older than
168		 * 3.0 with a Linux kernel >= 3.15.
169		 */
170		if (dev_port == dev_port_prev)
171			goto try_dev_id;
172		dev_port_prev = dev_port;
173		if (dev_port == (priv->port - 1u))
174			snprintf(match, sizeof(match), "%s", name);
175	}
176	closedir(dir);
177	if (match[0] == '\0')
178		return -1;
179	strncpy(*ifname, match, sizeof(*ifname));
180	return 0;
181}
182
183/**
184 * Read from sysfs entry.
185 *
186 * @param[in] priv
187 *   Pointer to private structure.
188 * @param[in] entry
189 *   Entry name relative to sysfs path.
190 * @param[out] buf
191 *   Data output buffer.
192 * @param size
193 *   Buffer size.
194 *
195 * @return
196 *   0 on success, -1 on failure and errno is set.
197 */
198static int
199priv_sysfs_read(const struct priv *priv, const char *entry,
200		char *buf, size_t size)
201{
202	char ifname[IF_NAMESIZE];
203	FILE *file;
204	int ret;
205	int err;
206
207	if (priv_get_ifname(priv, &ifname))
208		return -1;
209
210	MKSTR(path, "%s/device/net/%s/%s", priv->ctx->device->ibdev_path,
211	      ifname, entry);
212
213	file = fopen(path, "rb");
214	if (file == NULL)
215		return -1;
216	ret = fread(buf, 1, size, file);
217	err = errno;
218	if (((size_t)ret < size) && (ferror(file)))
219		ret = -1;
220	else
221		ret = size;
222	fclose(file);
223	errno = err;
224	return ret;
225}
226
227/**
228 * Write to sysfs entry.
229 *
230 * @param[in] priv
231 *   Pointer to private structure.
232 * @param[in] entry
233 *   Entry name relative to sysfs path.
234 * @param[in] buf
235 *   Data buffer.
236 * @param size
237 *   Buffer size.
238 *
239 * @return
240 *   0 on success, -1 on failure and errno is set.
241 */
242static int
243priv_sysfs_write(const struct priv *priv, const char *entry,
244		 char *buf, size_t size)
245{
246	char ifname[IF_NAMESIZE];
247	FILE *file;
248	int ret;
249	int err;
250
251	if (priv_get_ifname(priv, &ifname))
252		return -1;
253
254	MKSTR(path, "%s/device/net/%s/%s", priv->ctx->device->ibdev_path,
255	      ifname, entry);
256
257	file = fopen(path, "wb");
258	if (file == NULL)
259		return -1;
260	ret = fwrite(buf, 1, size, file);
261	err = errno;
262	if (((size_t)ret < size) || (ferror(file)))
263		ret = -1;
264	else
265		ret = size;
266	fclose(file);
267	errno = err;
268	return ret;
269}
270
271/**
272 * Get unsigned long sysfs property.
273 *
274 * @param priv
275 *   Pointer to private structure.
276 * @param[in] name
277 *   Entry name relative to sysfs path.
278 * @param[out] value
279 *   Value output buffer.
280 *
281 * @return
282 *   0 on success, -1 on failure and errno is set.
283 */
284static int
285priv_get_sysfs_ulong(struct priv *priv, const char *name, unsigned long *value)
286{
287	int ret;
288	unsigned long value_ret;
289	char value_str[32];
290
291	ret = priv_sysfs_read(priv, name, value_str, (sizeof(value_str) - 1));
292	if (ret == -1) {
293		DEBUG("cannot read %s value from sysfs: %s",
294		      name, strerror(errno));
295		return -1;
296	}
297	value_str[ret] = '\0';
298	errno = 0;
299	value_ret = strtoul(value_str, NULL, 0);
300	if (errno) {
301		DEBUG("invalid %s value `%s': %s", name, value_str,
302		      strerror(errno));
303		return -1;
304	}
305	*value = value_ret;
306	return 0;
307}
308
309/**
310 * Set unsigned long sysfs property.
311 *
312 * @param priv
313 *   Pointer to private structure.
314 * @param[in] name
315 *   Entry name relative to sysfs path.
316 * @param value
317 *   Value to set.
318 *
319 * @return
320 *   0 on success, -1 on failure and errno is set.
321 */
322static int
323priv_set_sysfs_ulong(struct priv *priv, const char *name, unsigned long value)
324{
325	int ret;
326	MKSTR(value_str, "%lu", value);
327
328	ret = priv_sysfs_write(priv, name, value_str, (sizeof(value_str) - 1));
329	if (ret == -1) {
330		DEBUG("cannot write %s `%s' (%lu) to sysfs: %s",
331		      name, value_str, value, strerror(errno));
332		return -1;
333	}
334	return 0;
335}
336
337/**
338 * Perform ifreq ioctl() on associated Ethernet device.
339 *
340 * @param[in] priv
341 *   Pointer to private structure.
342 * @param req
343 *   Request number to pass to ioctl().
344 * @param[out] ifr
345 *   Interface request structure output buffer.
346 *
347 * @return
348 *   0 on success, -1 on failure and errno is set.
349 */
350int
351priv_ifreq(const struct priv *priv, int req, struct ifreq *ifr)
352{
353	int sock = socket(PF_INET, SOCK_DGRAM, IPPROTO_IP);
354	int ret = -1;
355
356	if (sock == -1)
357		return ret;
358	if (priv_get_ifname(priv, &ifr->ifr_name) == 0)
359		ret = ioctl(sock, req, ifr);
360	close(sock);
361	return ret;
362}
363
364/**
365 * Return the number of active VFs for the current device.
366 *
367 * @param[in] priv
368 *   Pointer to private structure.
369 * @param[out] num_vfs
370 *   Number of active VFs.
371 *
372 * @return
373 *   0 on success, -1 on failure and errno is set.
374 */
375int
376priv_get_num_vfs(struct priv *priv, uint16_t *num_vfs)
377{
378	/* The sysfs entry name depends on the operating system. */
379	const char **name = (const char *[]){
380		"device/sriov_numvfs",
381		"device/mlx5_num_vfs",
382		NULL,
383	};
384	int ret;
385
386	do {
387		unsigned long ulong_num_vfs;
388
389		ret = priv_get_sysfs_ulong(priv, *name, &ulong_num_vfs);
390		if (!ret)
391			*num_vfs = ulong_num_vfs;
392	} while (*(++name) && ret);
393	return ret;
394}
395
396/**
397 * Get device MTU.
398 *
399 * @param priv
400 *   Pointer to private structure.
401 * @param[out] mtu
402 *   MTU value output buffer.
403 *
404 * @return
405 *   0 on success, -1 on failure and errno is set.
406 */
407int
408priv_get_mtu(struct priv *priv, uint16_t *mtu)
409{
410	unsigned long ulong_mtu;
411
412	if (priv_get_sysfs_ulong(priv, "mtu", &ulong_mtu) == -1)
413		return -1;
414	*mtu = ulong_mtu;
415	return 0;
416}
417
418/**
419 * Set device MTU.
420 *
421 * @param priv
422 *   Pointer to private structure.
423 * @param mtu
424 *   MTU value to set.
425 *
426 * @return
427 *   0 on success, -1 on failure and errno is set.
428 */
429static int
430priv_set_mtu(struct priv *priv, uint16_t mtu)
431{
432	uint16_t new_mtu;
433
434	if (priv_set_sysfs_ulong(priv, "mtu", mtu) ||
435	    priv_get_mtu(priv, &new_mtu))
436		return -1;
437	if (new_mtu == mtu)
438		return 0;
439	errno = EINVAL;
440	return -1;
441}
442
443/**
444 * Set device flags.
445 *
446 * @param priv
447 *   Pointer to private structure.
448 * @param keep
449 *   Bitmask for flags that must remain untouched.
450 * @param flags
451 *   Bitmask for flags to modify.
452 *
453 * @return
454 *   0 on success, -1 on failure and errno is set.
455 */
456int
457priv_set_flags(struct priv *priv, unsigned int keep, unsigned int flags)
458{
459	unsigned long tmp;
460
461	if (priv_get_sysfs_ulong(priv, "flags", &tmp) == -1)
462		return -1;
463	tmp &= keep;
464	tmp |= (flags & (~keep));
465	return priv_set_sysfs_ulong(priv, "flags", tmp);
466}
467
468/**
469 * Ethernet device configuration.
470 *
471 * Prepare the driver for a given number of TX and RX queues.
472 *
473 * @param dev
474 *   Pointer to Ethernet device structure.
475 *
476 * @return
477 *   0 on success, errno value on failure.
478 */
479static int
480dev_configure(struct rte_eth_dev *dev)
481{
482	struct priv *priv = dev->data->dev_private;
483	unsigned int rxqs_n = dev->data->nb_rx_queues;
484	unsigned int txqs_n = dev->data->nb_tx_queues;
485	unsigned int i;
486	unsigned int j;
487	unsigned int reta_idx_n;
488
489	priv->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
490	priv->rxqs = (void *)dev->data->rx_queues;
491	priv->txqs = (void *)dev->data->tx_queues;
492	if (txqs_n != priv->txqs_n) {
493		INFO("%p: TX queues number update: %u -> %u",
494		     (void *)dev, priv->txqs_n, txqs_n);
495		priv->txqs_n = txqs_n;
496	}
497	if (rxqs_n > priv->ind_table_max_size) {
498		ERROR("cannot handle this many RX queues (%u)", rxqs_n);
499		return EINVAL;
500	}
501	if (rxqs_n == priv->rxqs_n)
502		return 0;
503	INFO("%p: RX queues number update: %u -> %u",
504	     (void *)dev, priv->rxqs_n, rxqs_n);
505	priv->rxqs_n = rxqs_n;
506	/* If the requested number of RX queues is not a power of two, use the
507	 * maximum indirection table size for better balancing.
508	 * The result is always rounded to the next power of two. */
509	reta_idx_n = (1 << log2above((rxqs_n & (rxqs_n - 1)) ?
510				     priv->ind_table_max_size :
511				     rxqs_n));
512	if (priv_rss_reta_index_resize(priv, reta_idx_n))
513		return ENOMEM;
514	/* When the number of RX queues is not a power of two, the remaining
515	 * table entries are padded with reused WQs and hashes are not spread
516	 * uniformly. */
517	for (i = 0, j = 0; (i != reta_idx_n); ++i) {
518		(*priv->reta_idx)[i] = j;
519		if (++j == rxqs_n)
520			j = 0;
521	}
522	return 0;
523}
524
525/**
526 * DPDK callback for Ethernet device configuration.
527 *
528 * @param dev
529 *   Pointer to Ethernet device structure.
530 *
531 * @return
532 *   0 on success, negative errno value on failure.
533 */
534int
535mlx5_dev_configure(struct rte_eth_dev *dev)
536{
537	struct priv *priv = dev->data->dev_private;
538	int ret;
539
540	if (mlx5_is_secondary())
541		return -E_RTE_SECONDARY;
542
543	priv_lock(priv);
544	ret = dev_configure(dev);
545	assert(ret >= 0);
546	priv_unlock(priv);
547	return -ret;
548}
549
550/**
551 * DPDK callback to get information about the device.
552 *
553 * @param dev
554 *   Pointer to Ethernet device structure.
555 * @param[out] info
556 *   Info structure output buffer.
557 */
558void
559mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
560{
561	struct priv *priv = mlx5_get_priv(dev);
562	unsigned int max;
563	char ifname[IF_NAMESIZE];
564
565	priv_lock(priv);
566	/* FIXME: we should ask the device for these values. */
567	info->min_rx_bufsize = 32;
568	info->max_rx_pktlen = 65536;
569	/*
570	 * Since we need one CQ per QP, the limit is the minimum number
571	 * between the two values.
572	 */
573	max = ((priv->device_attr.max_cq > priv->device_attr.max_qp) ?
574	       priv->device_attr.max_qp : priv->device_attr.max_cq);
575	/* If max >= 65535 then max = 0, max_rx_queues is uint16_t. */
576	if (max >= 65535)
577		max = 65535;
578	info->max_rx_queues = max;
579	info->max_tx_queues = max;
580	info->max_mac_addrs = RTE_DIM(priv->mac);
581	info->rx_offload_capa =
582		(priv->hw_csum ?
583		 (DEV_RX_OFFLOAD_IPV4_CKSUM |
584		  DEV_RX_OFFLOAD_UDP_CKSUM |
585		  DEV_RX_OFFLOAD_TCP_CKSUM) :
586		 0) |
587		(priv->hw_vlan_strip ? DEV_RX_OFFLOAD_VLAN_STRIP : 0);
588	if (!priv->mps)
589		info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
590	if (priv->hw_csum)
591		info->tx_offload_capa |=
592			(DEV_TX_OFFLOAD_IPV4_CKSUM |
593			 DEV_TX_OFFLOAD_UDP_CKSUM |
594			 DEV_TX_OFFLOAD_TCP_CKSUM);
595	if (priv_get_ifname(priv, &ifname) == 0)
596		info->if_index = if_nametoindex(ifname);
597	/* FIXME: RETA update/query API expects the callee to know the size of
598	 * the indirection table, for this PMD the size varies depending on
599	 * the number of RX queues, it becomes impossible to find the correct
600	 * size if it is not fixed.
601	 * The API should be updated to solve this problem. */
602	info->reta_size = priv->ind_table_max_size;
603	info->hash_key_size = ((*priv->rss_conf) ?
604			       (*priv->rss_conf)[0]->rss_key_len :
605			       0);
606	info->speed_capa = priv->link_speed_capa;
607	priv_unlock(priv);
608}
609
610const uint32_t *
611mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev)
612{
613	static const uint32_t ptypes[] = {
614		/* refers to rxq_cq_to_pkt_type() */
615		RTE_PTYPE_L3_IPV4,
616		RTE_PTYPE_L3_IPV6,
617		RTE_PTYPE_INNER_L3_IPV4,
618		RTE_PTYPE_INNER_L3_IPV6,
619		RTE_PTYPE_UNKNOWN
620
621	};
622
623	if (dev->rx_pkt_burst == mlx5_rx_burst)
624		return ptypes;
625	return NULL;
626}
627
628/**
629 * Retrieve physical link information (unlocked version using legacy ioctl).
630 *
631 * @param dev
632 *   Pointer to Ethernet device structure.
633 * @param wait_to_complete
634 *   Wait for request completion (ignored).
635 */
636static int
637mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev, int wait_to_complete)
638{
639	struct priv *priv = mlx5_get_priv(dev);
640	struct ethtool_cmd edata = {
641		.cmd = ETHTOOL_GSET /* Deprecated since Linux v4.5. */
642	};
643	struct ifreq ifr;
644	struct rte_eth_link dev_link;
645	int link_speed = 0;
646
647	(void)wait_to_complete;
648	if (priv_ifreq(priv, SIOCGIFFLAGS, &ifr)) {
649		WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(errno));
650		return -1;
651	}
652	memset(&dev_link, 0, sizeof(dev_link));
653	dev_link.link_status = ((ifr.ifr_flags & IFF_UP) &&
654				(ifr.ifr_flags & IFF_RUNNING));
655	ifr.ifr_data = (void *)&edata;
656	if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
657		WARN("ioctl(SIOCETHTOOL, ETHTOOL_GSET) failed: %s",
658		     strerror(errno));
659		return -1;
660	}
661	link_speed = ethtool_cmd_speed(&edata);
662	if (link_speed == -1)
663		dev_link.link_speed = 0;
664	else
665		dev_link.link_speed = link_speed;
666	priv->link_speed_capa = 0;
667	if (edata.supported & SUPPORTED_Autoneg)
668		priv->link_speed_capa |= ETH_LINK_SPEED_AUTONEG;
669	if (edata.supported & (SUPPORTED_1000baseT_Full |
670			       SUPPORTED_1000baseKX_Full))
671		priv->link_speed_capa |= ETH_LINK_SPEED_1G;
672	if (edata.supported & SUPPORTED_10000baseKR_Full)
673		priv->link_speed_capa |= ETH_LINK_SPEED_10G;
674	if (edata.supported & (SUPPORTED_40000baseKR4_Full |
675			       SUPPORTED_40000baseCR4_Full |
676			       SUPPORTED_40000baseSR4_Full |
677			       SUPPORTED_40000baseLR4_Full))
678		priv->link_speed_capa |= ETH_LINK_SPEED_40G;
679	dev_link.link_duplex = ((edata.duplex == DUPLEX_HALF) ?
680				ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX);
681	dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
682			ETH_LINK_SPEED_FIXED);
683	if (memcmp(&dev_link, &dev->data->dev_link, sizeof(dev_link))) {
684		/* Link status changed. */
685		dev->data->dev_link = dev_link;
686		return 0;
687	}
688	/* Link status is still the same. */
689	return -1;
690}
691
692/**
693 * Retrieve physical link information (unlocked version using new ioctl from
694 * Linux 4.5).
695 *
696 * @param dev
697 *   Pointer to Ethernet device structure.
698 * @param wait_to_complete
699 *   Wait for request completion (ignored).
700 */
701static int
702mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete)
703{
704#ifdef ETHTOOL_GLINKSETTINGS
705	struct priv *priv = mlx5_get_priv(dev);
706	struct ethtool_link_settings edata = {
707		.cmd = ETHTOOL_GLINKSETTINGS,
708	};
709	struct ifreq ifr;
710	struct rte_eth_link dev_link;
711	uint64_t sc;
712
713	(void)wait_to_complete;
714	if (priv_ifreq(priv, SIOCGIFFLAGS, &ifr)) {
715		WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(errno));
716		return -1;
717	}
718	memset(&dev_link, 0, sizeof(dev_link));
719	dev_link.link_status = ((ifr.ifr_flags & IFF_UP) &&
720				(ifr.ifr_flags & IFF_RUNNING));
721	ifr.ifr_data = (void *)&edata;
722	if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
723		DEBUG("ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS) failed: %s",
724		      strerror(errno));
725		return -1;
726	}
727	dev_link.link_speed = edata.speed;
728	sc = edata.link_mode_masks[0] |
729		((uint64_t)edata.link_mode_masks[1] << 32);
730	priv->link_speed_capa = 0;
731	/* Link speeds available in kernel v4.5. */
732	if (sc & ETHTOOL_LINK_MODE_Autoneg_BIT)
733		priv->link_speed_capa |= ETH_LINK_SPEED_AUTONEG;
734	if (sc & (ETHTOOL_LINK_MODE_1000baseT_Full_BIT |
735		  ETHTOOL_LINK_MODE_1000baseKX_Full_BIT))
736		priv->link_speed_capa |= ETH_LINK_SPEED_1G;
737	if (sc & (ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT |
738		  ETHTOOL_LINK_MODE_10000baseKR_Full_BIT |
739		  ETHTOOL_LINK_MODE_10000baseR_FEC_BIT))
740		priv->link_speed_capa |= ETH_LINK_SPEED_10G;
741	if (sc & (ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT |
742		  ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT))
743		priv->link_speed_capa |= ETH_LINK_SPEED_20G;
744	if (sc & (ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT |
745		  ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT |
746		  ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT |
747		  ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT))
748		priv->link_speed_capa |= ETH_LINK_SPEED_40G;
749	if (sc & (ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT |
750		  ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT |
751		  ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT |
752		  ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT))
753		priv->link_speed_capa |= ETH_LINK_SPEED_56G;
754	/* Link speeds available in kernel v4.6. */
755#ifdef HAVE_ETHTOOL_LINK_MODE_25G
756	if (sc & (ETHTOOL_LINK_MODE_25000baseCR_Full_BIT |
757		  ETHTOOL_LINK_MODE_25000baseKR_Full_BIT |
758		  ETHTOOL_LINK_MODE_25000baseSR_Full_BIT))
759		priv->link_speed_capa |= ETH_LINK_SPEED_25G;
760#endif
761#ifdef HAVE_ETHTOOL_LINK_MODE_50G
762	if (sc & (ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT |
763		  ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT))
764		priv->link_speed_capa |= ETH_LINK_SPEED_50G;
765#endif
766#ifdef HAVE_ETHTOOL_LINK_MODE_100G
767	if (sc & (ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT |
768		  ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT |
769		  ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT |
770		  ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT))
771		priv->link_speed_capa |= ETH_LINK_SPEED_100G;
772#endif
773	dev_link.link_duplex = ((edata.duplex == DUPLEX_HALF) ?
774				ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX);
775	dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
776				  ETH_LINK_SPEED_FIXED);
777	if (memcmp(&dev_link, &dev->data->dev_link, sizeof(dev_link))) {
778		/* Link status changed. */
779		dev->data->dev_link = dev_link;
780		return 0;
781	}
782#else
783	(void)dev;
784	(void)wait_to_complete;
785#endif
786	/* Link status is still the same. */
787	return -1;
788}
789
790/**
791 * DPDK callback to retrieve physical link information (unlocked version).
792 *
793 * @param dev
794 *   Pointer to Ethernet device structure.
795 * @param wait_to_complete
796 *   Wait for request completion (ignored).
797 */
798int
799mlx5_link_update_unlocked(struct rte_eth_dev *dev, int wait_to_complete)
800{
801	int ret;
802
803	ret = mlx5_link_update_unlocked_gs(dev, wait_to_complete);
804	if (ret < 0)
805		ret = mlx5_link_update_unlocked_gset(dev, wait_to_complete);
806	return ret;
807}
808
809/**
810 * DPDK callback to retrieve physical link information.
811 *
812 * @param dev
813 *   Pointer to Ethernet device structure.
814 * @param wait_to_complete
815 *   Wait for request completion (ignored).
816 */
817int
818mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete)
819{
820	struct priv *priv = mlx5_get_priv(dev);
821	int ret;
822
823	priv_lock(priv);
824	ret = mlx5_link_update_unlocked(dev, wait_to_complete);
825	priv_unlock(priv);
826	return ret;
827}
828
829/**
830 * DPDK callback to change the MTU.
831 *
832 * Setting the MTU affects hardware MRU (packets larger than the MTU cannot be
833 * received). Use this as a hint to enable/disable scattered packets support
834 * and improve performance when not needed.
835 * Since failure is not an option, reconfiguring queues on the fly is not
836 * recommended.
837 *
838 * @param dev
839 *   Pointer to Ethernet device structure.
840 * @param in_mtu
841 *   New MTU.
842 *
843 * @return
844 *   0 on success, negative errno value on failure.
845 */
846int
847mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
848{
849	struct priv *priv = dev->data->dev_private;
850	int ret = 0;
851	unsigned int i;
852	uint16_t (*rx_func)(void *, struct rte_mbuf **, uint16_t) =
853		mlx5_rx_burst;
854	unsigned int max_frame_len;
855	int rehash;
856	int restart = priv->started;
857
858	if (mlx5_is_secondary())
859		return -E_RTE_SECONDARY;
860
861	priv_lock(priv);
862	/* Set kernel interface MTU first. */
863	if (priv_set_mtu(priv, mtu)) {
864		ret = errno;
865		WARN("cannot set port %u MTU to %u: %s", priv->port, mtu,
866		     strerror(ret));
867		goto out;
868	} else
869		DEBUG("adapter port %u MTU set to %u", priv->port, mtu);
870	/* Temporarily replace RX handler with a fake one, assuming it has not
871	 * been copied elsewhere. */
872	dev->rx_pkt_burst = removed_rx_burst;
873	/* Make sure everyone has left mlx5_rx_burst() and uses
874	 * removed_rx_burst() instead. */
875	rte_wmb();
876	usleep(1000);
877	/* MTU does not include header and CRC. */
878	max_frame_len = ETHER_HDR_LEN + mtu + ETHER_CRC_LEN;
879	/* Check if at least one queue is going to need a SGE update. */
880	for (i = 0; i != priv->rxqs_n; ++i) {
881		struct rxq *rxq = (*priv->rxqs)[i];
882		unsigned int mb_len;
883		unsigned int size = RTE_PKTMBUF_HEADROOM + max_frame_len;
884		unsigned int sges_n;
885
886		if (rxq == NULL)
887			continue;
888		mb_len = rte_pktmbuf_data_room_size(rxq->mp);
889		assert(mb_len >= RTE_PKTMBUF_HEADROOM);
890		/*
891		 * Determine the number of SGEs needed for a full packet
892		 * and round it to the next power of two.
893		 */
894		sges_n = log2above((size / mb_len) + !!(size % mb_len));
895		if (sges_n != rxq->sges_n)
896			break;
897	}
898	/*
899	 * If all queues have the right number of SGEs, a simple rehash
900	 * of their buffers is enough, otherwise SGE information can only
901	 * be updated in a queue by recreating it. All resources that depend
902	 * on queues (flows, indirection tables) must be recreated as well in
903	 * that case.
904	 */
905	rehash = (i == priv->rxqs_n);
906	if (!rehash) {
907		/* Clean up everything as with mlx5_dev_stop(). */
908		priv_special_flow_disable_all(priv);
909		priv_mac_addrs_disable(priv);
910		priv_destroy_hash_rxqs(priv);
911		priv_fdir_disable(priv);
912		priv_dev_interrupt_handler_uninstall(priv, dev);
913	}
914recover:
915	/* Reconfigure each RX queue. */
916	for (i = 0; (i != priv->rxqs_n); ++i) {
917		struct rxq *rxq = (*priv->rxqs)[i];
918		struct rxq_ctrl *rxq_ctrl =
919			container_of(rxq, struct rxq_ctrl, rxq);
920		int sp;
921		unsigned int mb_len;
922		unsigned int tmp;
923
924		if (rxq == NULL)
925			continue;
926		mb_len = rte_pktmbuf_data_room_size(rxq->mp);
927		assert(mb_len >= RTE_PKTMBUF_HEADROOM);
928		/* Toggle scattered support (sp) if necessary. */
929		sp = (max_frame_len > (mb_len - RTE_PKTMBUF_HEADROOM));
930		/* Provide new values to rxq_setup(). */
931		dev->data->dev_conf.rxmode.jumbo_frame = sp;
932		dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame_len;
933		if (rehash)
934			ret = rxq_rehash(dev, rxq_ctrl);
935		else
936			ret = rxq_ctrl_setup(dev, rxq_ctrl, 1 << rxq->elts_n,
937					     rxq_ctrl->socket, NULL, rxq->mp);
938		if (!ret)
939			continue;
940		/* Attempt to roll back in case of error. */
941		tmp = (mb_len << rxq->sges_n) - RTE_PKTMBUF_HEADROOM;
942		if (max_frame_len != tmp) {
943			max_frame_len = tmp;
944			goto recover;
945		}
946		/* Double fault, disable RX. */
947		break;
948	}
949	/*
950	 * Use a safe RX burst function in case of error, otherwise mimic
951	 * mlx5_dev_start().
952	 */
953	if (ret) {
954		ERROR("unable to reconfigure RX queues, RX disabled");
955		rx_func = removed_rx_burst;
956	} else if (restart &&
957		 !rehash &&
958		 !priv_create_hash_rxqs(priv) &&
959		 !priv_rehash_flows(priv)) {
960		if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_NONE)
961			priv_fdir_enable(priv);
962		priv_dev_interrupt_handler_install(priv, dev);
963	}
964	priv->mtu = mtu;
965	/* Burst functions can now be called again. */
966	rte_wmb();
967	dev->rx_pkt_burst = rx_func;
968out:
969	priv_unlock(priv);
970	assert(ret >= 0);
971	return -ret;
972}
973
974/**
975 * DPDK callback to get flow control status.
976 *
977 * @param dev
978 *   Pointer to Ethernet device structure.
979 * @param[out] fc_conf
980 *   Flow control output buffer.
981 *
982 * @return
983 *   0 on success, negative errno value on failure.
984 */
985int
986mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
987{
988	struct priv *priv = dev->data->dev_private;
989	struct ifreq ifr;
990	struct ethtool_pauseparam ethpause = {
991		.cmd = ETHTOOL_GPAUSEPARAM
992	};
993	int ret;
994
995	if (mlx5_is_secondary())
996		return -E_RTE_SECONDARY;
997
998	ifr.ifr_data = (void *)&ethpause;
999	priv_lock(priv);
1000	if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
1001		ret = errno;
1002		WARN("ioctl(SIOCETHTOOL, ETHTOOL_GPAUSEPARAM)"
1003		     " failed: %s",
1004		     strerror(ret));
1005		goto out;
1006	}
1007
1008	fc_conf->autoneg = ethpause.autoneg;
1009	if (ethpause.rx_pause && ethpause.tx_pause)
1010		fc_conf->mode = RTE_FC_FULL;
1011	else if (ethpause.rx_pause)
1012		fc_conf->mode = RTE_FC_RX_PAUSE;
1013	else if (ethpause.tx_pause)
1014		fc_conf->mode = RTE_FC_TX_PAUSE;
1015	else
1016		fc_conf->mode = RTE_FC_NONE;
1017	ret = 0;
1018
1019out:
1020	priv_unlock(priv);
1021	assert(ret >= 0);
1022	return -ret;
1023}
1024
1025/**
1026 * DPDK callback to modify flow control parameters.
1027 *
1028 * @param dev
1029 *   Pointer to Ethernet device structure.
1030 * @param[in] fc_conf
1031 *   Flow control parameters.
1032 *
1033 * @return
1034 *   0 on success, negative errno value on failure.
1035 */
1036int
1037mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1038{
1039	struct priv *priv = dev->data->dev_private;
1040	struct ifreq ifr;
1041	struct ethtool_pauseparam ethpause = {
1042		.cmd = ETHTOOL_SPAUSEPARAM
1043	};
1044	int ret;
1045
1046	if (mlx5_is_secondary())
1047		return -E_RTE_SECONDARY;
1048
1049	ifr.ifr_data = (void *)&ethpause;
1050	ethpause.autoneg = fc_conf->autoneg;
1051	if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
1052	    (fc_conf->mode & RTE_FC_RX_PAUSE))
1053		ethpause.rx_pause = 1;
1054	else
1055		ethpause.rx_pause = 0;
1056
1057	if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
1058	    (fc_conf->mode & RTE_FC_TX_PAUSE))
1059		ethpause.tx_pause = 1;
1060	else
1061		ethpause.tx_pause = 0;
1062
1063	priv_lock(priv);
1064	if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
1065		ret = errno;
1066		WARN("ioctl(SIOCETHTOOL, ETHTOOL_SPAUSEPARAM)"
1067		     " failed: %s",
1068		     strerror(ret));
1069		goto out;
1070	}
1071	ret = 0;
1072
1073out:
1074	priv_unlock(priv);
1075	assert(ret >= 0);
1076	return -ret;
1077}
1078
1079/**
1080 * Get PCI information from struct ibv_device.
1081 *
1082 * @param device
1083 *   Pointer to Ethernet device structure.
1084 * @param[out] pci_addr
1085 *   PCI bus address output buffer.
1086 *
1087 * @return
1088 *   0 on success, -1 on failure and errno is set.
1089 */
1090int
1091mlx5_ibv_device_to_pci_addr(const struct ibv_device *device,
1092			    struct rte_pci_addr *pci_addr)
1093{
1094	FILE *file;
1095	char line[32];
1096	MKSTR(path, "%s/device/uevent", device->ibdev_path);
1097
1098	file = fopen(path, "rb");
1099	if (file == NULL)
1100		return -1;
1101	while (fgets(line, sizeof(line), file) == line) {
1102		size_t len = strlen(line);
1103		int ret;
1104
1105		/* Truncate long lines. */
1106		if (len == (sizeof(line) - 1))
1107			while (line[(len - 1)] != '\n') {
1108				ret = fgetc(file);
1109				if (ret == EOF)
1110					break;
1111				line[(len - 1)] = ret;
1112			}
1113		/* Extract information. */
1114		if (sscanf(line,
1115			   "PCI_SLOT_NAME="
1116			   "%" SCNx16 ":%" SCNx8 ":%" SCNx8 ".%" SCNx8 "\n",
1117			   &pci_addr->domain,
1118			   &pci_addr->bus,
1119			   &pci_addr->devid,
1120			   &pci_addr->function) == 4) {
1121			ret = 0;
1122			break;
1123		}
1124	}
1125	fclose(file);
1126	return 0;
1127}
1128
1129/**
1130 * Link status handler.
1131 *
1132 * @param priv
1133 *   Pointer to private structure.
1134 * @param dev
1135 *   Pointer to the rte_eth_dev structure.
1136 *
1137 * @return
1138 *   Nonzero if the callback process can be called immediately.
1139 */
1140static int
1141priv_dev_link_status_handler(struct priv *priv, struct rte_eth_dev *dev)
1142{
1143	struct ibv_async_event event;
1144	int port_change = 0;
1145	int ret = 0;
1146
1147	/* Read all message and acknowledge them. */
1148	for (;;) {
1149		if (ibv_get_async_event(priv->ctx, &event))
1150			break;
1151
1152		if (event.event_type == IBV_EVENT_PORT_ACTIVE ||
1153		    event.event_type == IBV_EVENT_PORT_ERR)
1154			port_change = 1;
1155		else
1156			DEBUG("event type %d on port %d not handled",
1157			      event.event_type, event.element.port_num);
1158		ibv_ack_async_event(&event);
1159	}
1160
1161	if (port_change ^ priv->pending_alarm) {
1162		struct rte_eth_link *link = &dev->data->dev_link;
1163
1164		priv->pending_alarm = 0;
1165		mlx5_link_update_unlocked(dev, 0);
1166		if (((link->link_speed == 0) && link->link_status) ||
1167		    ((link->link_speed != 0) && !link->link_status)) {
1168			/* Inconsistent status, check again later. */
1169			priv->pending_alarm = 1;
1170			rte_eal_alarm_set(MLX5_ALARM_TIMEOUT_US,
1171					  mlx5_dev_link_status_handler,
1172					  dev);
1173		} else
1174			ret = 1;
1175	}
1176	return ret;
1177}
1178
1179/**
1180 * Handle delayed link status event.
1181 *
1182 * @param arg
1183 *   Registered argument.
1184 */
1185void
1186mlx5_dev_link_status_handler(void *arg)
1187{
1188	struct rte_eth_dev *dev = arg;
1189	struct priv *priv = dev->data->dev_private;
1190	int ret;
1191
1192	priv_lock(priv);
1193	assert(priv->pending_alarm == 1);
1194	ret = priv_dev_link_status_handler(priv, dev);
1195	priv_unlock(priv);
1196	//if (ret)
1197	//	_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1198}
1199
1200/**
1201 * Handle interrupts from the NIC.
1202 *
1203 * @param[in] intr_handle
1204 *   Interrupt handler.
1205 * @param cb_arg
1206 *   Callback argument.
1207 */
1208void
1209mlx5_dev_interrupt_handler(struct rte_intr_handle *intr_handle, void *cb_arg)
1210{
1211	struct rte_eth_dev *dev = cb_arg;
1212	struct priv *priv = dev->data->dev_private;
1213	int ret;
1214
1215	(void)intr_handle;
1216	priv_lock(priv);
1217	ret = priv_dev_link_status_handler(priv, dev);
1218	priv_unlock(priv);
1219	//if (ret)
1220	//	_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1221}
1222
1223/**
1224 * Uninstall interrupt handler.
1225 *
1226 * @param priv
1227 *   Pointer to private structure.
1228 * @param dev
1229 *   Pointer to the rte_eth_dev structure.
1230 */
1231void
1232priv_dev_interrupt_handler_uninstall(struct priv *priv, struct rte_eth_dev *dev)
1233{
1234	if (!dev->data->dev_conf.intr_conf.lsc)
1235		return;
1236	rte_intr_callback_unregister(&priv->intr_handle,
1237				     mlx5_dev_interrupt_handler,
1238				     dev);
1239	if (priv->pending_alarm)
1240		rte_eal_alarm_cancel(mlx5_dev_link_status_handler, dev);
1241	priv->pending_alarm = 0;
1242	priv->intr_handle.fd = 0;
1243	priv->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
1244}
1245
1246/**
1247 * Install interrupt handler.
1248 *
1249 * @param priv
1250 *   Pointer to private structure.
1251 * @param dev
1252 *   Pointer to the rte_eth_dev structure.
1253 */
1254void
1255priv_dev_interrupt_handler_install(struct priv *priv, struct rte_eth_dev *dev)
1256{
1257	int rc, flags;
1258
1259	if (!dev->data->dev_conf.intr_conf.lsc)
1260		return;
1261	assert(priv->ctx->async_fd > 0);
1262	flags = fcntl(priv->ctx->async_fd, F_GETFL);
1263	rc = fcntl(priv->ctx->async_fd, F_SETFL, flags | O_NONBLOCK);
1264	if (rc < 0) {
1265		INFO("failed to change file descriptor async event queue");
1266		dev->data->dev_conf.intr_conf.lsc = 0;
1267	} else {
1268		priv->intr_handle.fd = priv->ctx->async_fd;
1269		priv->intr_handle.type = RTE_INTR_HANDLE_EXT;
1270		rte_intr_callback_register(&priv->intr_handle,
1271					   mlx5_dev_interrupt_handler,
1272					   dev);
1273	}
1274}
1275
1276/**
1277 * Change the link state (UP / DOWN).
1278 *
1279 * @param priv
1280 *   Pointer to Ethernet device structure.
1281 * @param up
1282 *   Nonzero for link up, otherwise link down.
1283 *
1284 * @return
1285 *   0 on success, errno value on failure.
1286 */
1287static int
1288priv_set_link(struct priv *priv, int up)
1289{
1290	struct rte_eth_dev *dev = priv->dev;
1291	int err;
1292
1293	if (up) {
1294		err = priv_set_flags(priv, ~IFF_UP, IFF_UP);
1295		if (err)
1296			return err;
1297		priv_select_tx_function(priv);
1298		priv_select_rx_function(priv);
1299	} else {
1300		err = priv_set_flags(priv, ~IFF_UP, ~IFF_UP);
1301		if (err)
1302			return err;
1303		dev->rx_pkt_burst = removed_rx_burst;
1304		dev->tx_pkt_burst = removed_tx_burst;
1305	}
1306	return 0;
1307}
1308
1309/**
1310 * DPDK callback to bring the link DOWN.
1311 *
1312 * @param dev
1313 *   Pointer to Ethernet device structure.
1314 *
1315 * @return
1316 *   0 on success, errno value on failure.
1317 */
1318int
1319mlx5_set_link_down(struct rte_eth_dev *dev)
1320{
1321	struct priv *priv = dev->data->dev_private;
1322	int err;
1323
1324	priv_lock(priv);
1325	err = priv_set_link(priv, 0);
1326	priv_unlock(priv);
1327	return err;
1328}
1329
1330/**
1331 * DPDK callback to bring the link UP.
1332 *
1333 * @param dev
1334 *   Pointer to Ethernet device structure.
1335 *
1336 * @return
1337 *   0 on success, errno value on failure.
1338 */
1339int
1340mlx5_set_link_up(struct rte_eth_dev *dev)
1341{
1342	struct priv *priv = dev->data->dev_private;
1343	int err;
1344
1345	priv_lock(priv);
1346	err = priv_set_link(priv, 1);
1347	priv_unlock(priv);
1348	return err;
1349}
1350
1351/**
1352 * Configure secondary process queues from a private data pointer (primary
1353 * or secondary) and update burst callbacks. Can take place only once.
1354 *
1355 * All queues must have been previously created by the primary process to
1356 * avoid undefined behavior.
1357 *
1358 * @param priv
1359 *   Private data pointer from either primary or secondary process.
1360 *
1361 * @return
1362 *   Private data pointer from secondary process, NULL in case of error.
1363 */
1364struct priv *
1365mlx5_secondary_data_setup(struct priv *priv)
1366{
1367	unsigned int port_id = 0;
1368	struct mlx5_secondary_data *sd;
1369	void **tx_queues;
1370	void **rx_queues;
1371	unsigned int nb_tx_queues;
1372	unsigned int nb_rx_queues;
1373	unsigned int i;
1374
1375	/* priv must be valid at this point. */
1376	assert(priv != NULL);
1377	/* priv->dev must also be valid but may point to local memory from
1378	 * another process, possibly with the same address and must not
1379	 * be dereferenced yet. */
1380	assert(priv->dev != NULL);
1381	/* Determine port ID by finding out where priv comes from. */
1382	while (1) {
1383		sd = &mlx5_secondary_data[port_id];
1384		rte_spinlock_lock(&sd->lock);
1385		/* Primary process? */
1386		if (sd->primary_priv == priv)
1387			break;
1388		/* Secondary process? */
1389		if (sd->data.dev_private == priv)
1390			break;
1391		rte_spinlock_unlock(&sd->lock);
1392		if (++port_id == RTE_DIM(mlx5_secondary_data))
1393			port_id = 0;
1394	}
1395	/* Switch to secondary private structure. If private data has already
1396	 * been updated by another thread, there is nothing else to do. */
1397	priv = sd->data.dev_private;
1398	if (priv->dev->data == &sd->data)
1399		goto end;
1400	/* Sanity checks. Secondary private structure is supposed to point
1401	 * to local eth_dev, itself still pointing to the shared device data
1402	 * structure allocated by the primary process. */
1403	assert(sd->shared_dev_data != &sd->data);
1404	assert(sd->data.nb_tx_queues == 0);
1405	assert(sd->data.tx_queues == NULL);
1406	assert(sd->data.nb_rx_queues == 0);
1407	assert(sd->data.rx_queues == NULL);
1408	assert(priv != sd->primary_priv);
1409	assert(priv->dev->data == sd->shared_dev_data);
1410	assert(priv->txqs_n == 0);
1411	assert(priv->txqs == NULL);
1412	assert(priv->rxqs_n == 0);
1413	assert(priv->rxqs == NULL);
1414	nb_tx_queues = sd->shared_dev_data->nb_tx_queues;
1415	nb_rx_queues = sd->shared_dev_data->nb_rx_queues;
1416	/* Allocate local storage for queues. */
1417	tx_queues = rte_zmalloc("secondary ethdev->tx_queues",
1418				sizeof(sd->data.tx_queues[0]) * nb_tx_queues,
1419				RTE_CACHE_LINE_SIZE);
1420	rx_queues = rte_zmalloc("secondary ethdev->rx_queues",
1421				sizeof(sd->data.rx_queues[0]) * nb_rx_queues,
1422				RTE_CACHE_LINE_SIZE);
1423	if (tx_queues == NULL || rx_queues == NULL)
1424		goto error;
1425	/* Lock to prevent control operations during setup. */
1426	priv_lock(priv);
1427	/* TX queues. */
1428	for (i = 0; i != nb_tx_queues; ++i) {
1429		struct txq *primary_txq = (*sd->primary_priv->txqs)[i];
1430		struct txq_ctrl *primary_txq_ctrl;
1431		struct txq_ctrl *txq_ctrl;
1432
1433		if (primary_txq == NULL)
1434			continue;
1435		primary_txq_ctrl = container_of(primary_txq,
1436						struct txq_ctrl, txq);
1437		txq_ctrl = rte_calloc_socket("TXQ", 1, sizeof(*txq_ctrl) +
1438					     (1 << primary_txq->elts_n) *
1439					     sizeof(struct rte_mbuf *), 0,
1440					     primary_txq_ctrl->socket);
1441		if (txq_ctrl != NULL) {
1442			if (txq_ctrl_setup(priv->dev,
1443					   txq_ctrl,
1444					   1 << primary_txq->elts_n,
1445					   primary_txq_ctrl->socket,
1446					   NULL) == 0) {
1447				txq_ctrl->txq.stats.idx =
1448					primary_txq->stats.idx;
1449				tx_queues[i] = &txq_ctrl->txq;
1450				continue;
1451			}
1452			rte_free(txq_ctrl);
1453		}
1454		while (i) {
1455			txq_ctrl = tx_queues[--i];
1456			txq_cleanup(txq_ctrl);
1457			rte_free(txq_ctrl);
1458		}
1459		goto error;
1460	}
1461	/* RX queues. */
1462	for (i = 0; i != nb_rx_queues; ++i) {
1463		struct rxq_ctrl *primary_rxq =
1464			container_of((*sd->primary_priv->rxqs)[i],
1465				     struct rxq_ctrl, rxq);
1466
1467		if (primary_rxq == NULL)
1468			continue;
1469		/* Not supported yet. */
1470		rx_queues[i] = NULL;
1471	}
1472	/* Update everything. */
1473	priv->txqs = (void *)tx_queues;
1474	priv->txqs_n = nb_tx_queues;
1475	priv->rxqs = (void *)rx_queues;
1476	priv->rxqs_n = nb_rx_queues;
1477	sd->data.rx_queues = rx_queues;
1478	sd->data.tx_queues = tx_queues;
1479	sd->data.nb_rx_queues = nb_rx_queues;
1480	sd->data.nb_tx_queues = nb_tx_queues;
1481	sd->data.dev_link = sd->shared_dev_data->dev_link;
1482	sd->data.mtu = sd->shared_dev_data->mtu;
1483	memcpy(sd->data.rx_queue_state, sd->shared_dev_data->rx_queue_state,
1484	       sizeof(sd->data.rx_queue_state));
1485	memcpy(sd->data.tx_queue_state, sd->shared_dev_data->tx_queue_state,
1486	       sizeof(sd->data.tx_queue_state));
1487	sd->data.dev_flags = sd->shared_dev_data->dev_flags;
1488	/* Use local data from now on. */
1489	rte_mb();
1490	priv->dev->data = &sd->data;
1491	rte_mb();
1492	priv_select_tx_function(priv);
1493	priv_select_rx_function(priv);
1494	priv_unlock(priv);
1495end:
1496	/* More sanity checks. */
1497	assert(priv->dev->data == &sd->data);
1498	rte_spinlock_unlock(&sd->lock);
1499	return priv;
1500error:
1501	priv_unlock(priv);
1502	rte_free(tx_queues);
1503	rte_free(rx_queues);
1504	rte_spinlock_unlock(&sd->lock);
1505	return NULL;
1506}
1507
1508/**
1509 * Configure the TX function to use.
1510 *
1511 * @param priv
1512 *   Pointer to private structure.
1513 */
1514void
1515priv_select_tx_function(struct priv *priv)
1516{
1517	priv->dev->tx_pkt_burst = mlx5_tx_burst;
1518	/* Display warning for unsupported configurations. */
1519	if (priv->sriov && priv->mps)
1520		WARN("multi-packet send WQE cannot be used on a SR-IOV setup");
1521	/* Select appropriate TX function. */
1522	if ((priv->sriov == 0) && priv->mps && priv->txq_inline) {
1523		priv->dev->tx_pkt_burst = mlx5_tx_burst_mpw_inline;
1524		DEBUG("selected MPW inline TX function");
1525	} else if ((priv->sriov == 0) && priv->mps) {
1526		priv->dev->tx_pkt_burst = mlx5_tx_burst_mpw;
1527		DEBUG("selected MPW TX function");
1528	}
1529}
1530
1531/**
1532 * Configure the RX function to use.
1533 *
1534 * @param priv
1535 *   Pointer to private structure.
1536 */
1537void
1538priv_select_rx_function(struct priv *priv)
1539{
1540	priv->dev->rx_pkt_burst = mlx5_rx_burst;
1541}
1542