1/*-
2 *   BSD LICENSE
3 *
4 *   Copyright 2015 6WIND S.A.
5 *   Copyright 2015 Mellanox.
6 *
7 *   Redistribution and use in source and binary forms, with or without
8 *   modification, are permitted provided that the following conditions
9 *   are met:
10 *
11 *     * Redistributions of source code must retain the above copyright
12 *       notice, this list of conditions and the following disclaimer.
13 *     * Redistributions in binary form must reproduce the above copyright
14 *       notice, this list of conditions and the following disclaimer in
15 *       the documentation and/or other materials provided with the
16 *       distribution.
17 *     * Neither the name of 6WIND S.A. nor the names of its
18 *       contributors may be used to endorse or promote products derived
19 *       from this software without specific prior written permission.
20 *
21 *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <stddef.h>
35#include <assert.h>
36#include <errno.h>
37#include <string.h>
38#include <stdint.h>
39
40/* Verbs header. */
41/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
42#ifdef PEDANTIC
43#pragma GCC diagnostic ignored "-Wpedantic"
44#endif
45#include <infiniband/verbs.h>
46#ifdef PEDANTIC
47#pragma GCC diagnostic error "-Wpedantic"
48#endif
49
50/* DPDK headers don't like -pedantic. */
51#ifdef PEDANTIC
52#pragma GCC diagnostic ignored "-Wpedantic"
53#endif
54#include <rte_mbuf.h>
55#include <rte_malloc.h>
56#include <rte_ethdev.h>
57#include <rte_common.h>
58#ifdef PEDANTIC
59#pragma GCC diagnostic error "-Wpedantic"
60#endif
61
62#include "mlx5_utils.h"
63#include "mlx5_defs.h"
64#include "mlx5.h"
65#include "mlx5_rxtx.h"
66#include "mlx5_autoconf.h"
67#include "mlx5_defs.h"
68
69/**
70 * Allocate TX queue elements.
71 *
72 * @param txq_ctrl
73 *   Pointer to TX queue structure.
74 * @param elts_n
75 *   Number of elements to allocate.
76 */
77static void
78txq_alloc_elts(struct txq_ctrl *txq_ctrl, unsigned int elts_n)
79{
80	unsigned int i;
81
82	for (i = 0; (i != elts_n); ++i)
83		(*txq_ctrl->txq.elts)[i] = NULL;
84	for (i = 0; (i != (1u << txq_ctrl->txq.wqe_n)); ++i) {
85		volatile struct mlx5_wqe64 *wqe = &(*txq_ctrl->txq.wqes)[i];
86
87		memset((void *)(uintptr_t)wqe, 0x0, sizeof(*wqe));
88	}
89	DEBUG("%p: allocated and configured %u WRs", (void *)txq_ctrl, elts_n);
90	txq_ctrl->txq.elts_head = 0;
91	txq_ctrl->txq.elts_tail = 0;
92	txq_ctrl->txq.elts_comp = 0;
93}
94
95/**
96 * Free TX queue elements.
97 *
98 * @param txq_ctrl
99 *   Pointer to TX queue structure.
100 */
101static void
102txq_free_elts(struct txq_ctrl *txq_ctrl)
103{
104	unsigned int elts_n = 1 << txq_ctrl->txq.elts_n;
105	unsigned int elts_head = txq_ctrl->txq.elts_head;
106	unsigned int elts_tail = txq_ctrl->txq.elts_tail;
107	struct rte_mbuf *(*elts)[elts_n] = txq_ctrl->txq.elts;
108
109	DEBUG("%p: freeing WRs", (void *)txq_ctrl);
110	txq_ctrl->txq.elts_head = 0;
111	txq_ctrl->txq.elts_tail = 0;
112	txq_ctrl->txq.elts_comp = 0;
113
114	while (elts_tail != elts_head) {
115		struct rte_mbuf *elt = (*elts)[elts_tail];
116
117		assert(elt != NULL);
118		rte_pktmbuf_free_seg(elt);
119#ifndef NDEBUG
120		/* Poisoning. */
121		memset(&(*elts)[elts_tail],
122		       0x77,
123		       sizeof((*elts)[elts_tail]));
124#endif
125		if (++elts_tail == elts_n)
126			elts_tail = 0;
127	}
128}
129
130/**
131 * Clean up a TX queue.
132 *
133 * Destroy objects, free allocated memory and reset the structure for reuse.
134 *
135 * @param txq_ctrl
136 *   Pointer to TX queue structure.
137 */
138void
139txq_cleanup(struct txq_ctrl *txq_ctrl)
140{
141	struct ibv_exp_release_intf_params params;
142	size_t i;
143
144	DEBUG("cleaning up %p", (void *)txq_ctrl);
145	txq_free_elts(txq_ctrl);
146	if (txq_ctrl->if_qp != NULL) {
147		assert(txq_ctrl->priv != NULL);
148		assert(txq_ctrl->priv->ctx != NULL);
149		assert(txq_ctrl->qp != NULL);
150		params = (struct ibv_exp_release_intf_params){
151			.comp_mask = 0,
152		};
153		claim_zero(ibv_exp_release_intf(txq_ctrl->priv->ctx,
154						txq_ctrl->if_qp,
155						&params));
156	}
157	if (txq_ctrl->if_cq != NULL) {
158		assert(txq_ctrl->priv != NULL);
159		assert(txq_ctrl->priv->ctx != NULL);
160		assert(txq_ctrl->cq != NULL);
161		params = (struct ibv_exp_release_intf_params){
162			.comp_mask = 0,
163		};
164		claim_zero(ibv_exp_release_intf(txq_ctrl->priv->ctx,
165						txq_ctrl->if_cq,
166						&params));
167	}
168	if (txq_ctrl->qp != NULL)
169		claim_zero(ibv_destroy_qp(txq_ctrl->qp));
170	if (txq_ctrl->cq != NULL)
171		claim_zero(ibv_destroy_cq(txq_ctrl->cq));
172	if (txq_ctrl->rd != NULL) {
173		struct ibv_exp_destroy_res_domain_attr attr = {
174			.comp_mask = 0,
175		};
176
177		assert(txq_ctrl->priv != NULL);
178		assert(txq_ctrl->priv->ctx != NULL);
179		claim_zero(ibv_exp_destroy_res_domain(txq_ctrl->priv->ctx,
180						      txq_ctrl->rd,
181						      &attr));
182	}
183	for (i = 0; (i != RTE_DIM(txq_ctrl->txq.mp2mr)); ++i) {
184		if (txq_ctrl->txq.mp2mr[i].mp == NULL)
185			break;
186		assert(txq_ctrl->txq.mp2mr[i].mr != NULL);
187		claim_zero(ibv_dereg_mr(txq_ctrl->txq.mp2mr[i].mr));
188	}
189	memset(txq_ctrl, 0, sizeof(*txq_ctrl));
190}
191
192/**
193 * Initialize TX queue.
194 *
195 * @param tmpl
196 *   Pointer to TX queue control template.
197 * @param txq_ctrl
198 *   Pointer to TX queue control.
199 *
200 * @return
201 *   0 on success, errno value on failure.
202 */
203static inline int
204txq_setup(struct txq_ctrl *tmpl, struct txq_ctrl *txq_ctrl)
205{
206	struct mlx5_qp *qp = to_mqp(tmpl->qp);
207	struct ibv_cq *ibcq = tmpl->cq;
208	struct ibv_mlx5_cq_info cq_info;
209
210	if (ibv_mlx5_exp_get_cq_info(ibcq, &cq_info)) {
211		ERROR("Unable to query CQ info. check your OFED.");
212		return ENOTSUP;
213	}
214	if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
215		ERROR("Wrong MLX5_CQE_SIZE environment variable value: "
216		      "it should be set to %u", RTE_CACHE_LINE_SIZE);
217		return EINVAL;
218	}
219	tmpl->txq.cqe_n = log2above(cq_info.cqe_cnt);
220	tmpl->txq.qp_num_8s = qp->ctrl_seg.qp_num << 8;
221	tmpl->txq.wqes =
222		(volatile struct mlx5_wqe64 (*)[])
223		(uintptr_t)qp->gen_data.sqstart;
224	tmpl->txq.wqe_n = log2above(qp->sq.wqe_cnt);
225	tmpl->txq.qp_db = &qp->gen_data.db[MLX5_SND_DBR];
226	tmpl->txq.bf_reg = qp->gen_data.bf->reg;
227	tmpl->txq.cq_db = cq_info.dbrec;
228	tmpl->txq.cqes =
229		(volatile struct mlx5_cqe (*)[])
230		(uintptr_t)cq_info.buf;
231	tmpl->txq.elts =
232		(struct rte_mbuf *(*)[1 << tmpl->txq.elts_n])
233		((uintptr_t)txq_ctrl + sizeof(*txq_ctrl));
234	return 0;
235}
236
237/**
238 * Configure a TX queue.
239 *
240 * @param dev
241 *   Pointer to Ethernet device structure.
242 * @param txq_ctrl
243 *   Pointer to TX queue structure.
244 * @param desc
245 *   Number of descriptors to configure in queue.
246 * @param socket
247 *   NUMA socket on which memory must be allocated.
248 * @param[in] conf
249 *   Thresholds parameters.
250 *
251 * @return
252 *   0 on success, errno value on failure.
253 */
254int
255txq_ctrl_setup(struct rte_eth_dev *dev, struct txq_ctrl *txq_ctrl,
256	       uint16_t desc, unsigned int socket,
257	       const struct rte_eth_txconf *conf)
258{
259	struct priv *priv = mlx5_get_priv(dev);
260	struct txq_ctrl tmpl = {
261		.priv = priv,
262		.socket = socket,
263	};
264	union {
265		struct ibv_exp_query_intf_params params;
266		struct ibv_exp_qp_init_attr init;
267		struct ibv_exp_res_domain_init_attr rd;
268		struct ibv_exp_cq_init_attr cq;
269		struct ibv_exp_qp_attr mod;
270		struct ibv_exp_cq_attr cq_attr;
271	} attr;
272	enum ibv_exp_query_intf_status status;
273	int ret = 0;
274
275	if (mlx5_getenv_int("MLX5_ENABLE_CQE_COMPRESSION")) {
276		ret = ENOTSUP;
277		ERROR("MLX5_ENABLE_CQE_COMPRESSION must never be set");
278		goto error;
279	}
280	(void)conf; /* Thresholds configuration (ignored). */
281	assert(desc > MLX5_TX_COMP_THRESH);
282	tmpl.txq.elts_n = log2above(desc);
283	/* MRs will be registered in mp2mr[] later. */
284	attr.rd = (struct ibv_exp_res_domain_init_attr){
285		.comp_mask = (IBV_EXP_RES_DOMAIN_THREAD_MODEL |
286			      IBV_EXP_RES_DOMAIN_MSG_MODEL),
287		.thread_model = IBV_EXP_THREAD_SINGLE,
288		.msg_model = IBV_EXP_MSG_HIGH_BW,
289	};
290	tmpl.rd = ibv_exp_create_res_domain(priv->ctx, &attr.rd);
291	if (tmpl.rd == NULL) {
292		ret = ENOMEM;
293		ERROR("%p: RD creation failure: %s",
294		      (void *)dev, strerror(ret));
295		goto error;
296	}
297	attr.cq = (struct ibv_exp_cq_init_attr){
298		.comp_mask = IBV_EXP_CQ_INIT_ATTR_RES_DOMAIN,
299		.res_domain = tmpl.rd,
300	};
301	tmpl.cq = ibv_exp_create_cq(priv->ctx,
302				    (((desc / MLX5_TX_COMP_THRESH) - 1) ?
303				     ((desc / MLX5_TX_COMP_THRESH) - 1) : 1),
304				    NULL, NULL, 0, &attr.cq);
305	if (tmpl.cq == NULL) {
306		ret = ENOMEM;
307		ERROR("%p: CQ creation failure: %s",
308		      (void *)dev, strerror(ret));
309		goto error;
310	}
311	DEBUG("priv->device_attr.max_qp_wr is %d",
312	      priv->device_attr.max_qp_wr);
313	DEBUG("priv->device_attr.max_sge is %d",
314	      priv->device_attr.max_sge);
315	attr.init = (struct ibv_exp_qp_init_attr){
316		/* CQ to be associated with the send queue. */
317		.send_cq = tmpl.cq,
318		/* CQ to be associated with the receive queue. */
319		.recv_cq = tmpl.cq,
320		.cap = {
321			/* Max number of outstanding WRs. */
322			.max_send_wr = ((priv->device_attr.max_qp_wr < desc) ?
323					priv->device_attr.max_qp_wr :
324					desc),
325			/*
326			 * Max number of scatter/gather elements in a WR,
327			 * must be 1 to prevent libmlx5 from trying to affect
328			 * too much memory. TX gather is not impacted by the
329			 * priv->device_attr.max_sge limit and will still work
330			 * properly.
331			 */
332			.max_send_sge = 1,
333		},
334		.qp_type = IBV_QPT_RAW_PACKET,
335		/* Do *NOT* enable this, completions events are managed per
336		 * TX burst. */
337		.sq_sig_all = 0,
338		.pd = priv->pd,
339		.res_domain = tmpl.rd,
340		.comp_mask = (IBV_EXP_QP_INIT_ATTR_PD |
341			      IBV_EXP_QP_INIT_ATTR_RES_DOMAIN),
342	};
343	if (priv->txq_inline && (priv->txqs_n >= priv->txqs_inline)) {
344		tmpl.txq.max_inline =
345			((priv->txq_inline + (RTE_CACHE_LINE_SIZE - 1)) /
346			 RTE_CACHE_LINE_SIZE);
347		attr.init.cap.max_inline_data =
348			tmpl.txq.max_inline * RTE_CACHE_LINE_SIZE;
349	}
350	tmpl.qp = ibv_exp_create_qp(priv->ctx, &attr.init);
351	if (tmpl.qp == NULL) {
352		ret = (errno ? errno : EINVAL);
353		ERROR("%p: QP creation failure: %s",
354		      (void *)dev, strerror(ret));
355		goto error;
356	}
357	DEBUG("TX queue capabilities: max_send_wr=%u, max_send_sge=%u,"
358	      " max_inline_data=%u",
359	      attr.init.cap.max_send_wr,
360	      attr.init.cap.max_send_sge,
361	      attr.init.cap.max_inline_data);
362	attr.mod = (struct ibv_exp_qp_attr){
363		/* Move the QP to this state. */
364		.qp_state = IBV_QPS_INIT,
365		/* Primary port number. */
366		.port_num = priv->port
367	};
368	ret = ibv_exp_modify_qp(tmpl.qp, &attr.mod,
369				(IBV_EXP_QP_STATE | IBV_EXP_QP_PORT));
370	if (ret) {
371		ERROR("%p: QP state to IBV_QPS_INIT failed: %s",
372		      (void *)dev, strerror(ret));
373		goto error;
374	}
375	ret = txq_setup(&tmpl, txq_ctrl);
376	if (ret) {
377		ERROR("%p: cannot initialize TX queue structure: %s",
378		      (void *)dev, strerror(ret));
379		goto error;
380	}
381	txq_alloc_elts(&tmpl, desc);
382	attr.mod = (struct ibv_exp_qp_attr){
383		.qp_state = IBV_QPS_RTR
384	};
385	ret = ibv_exp_modify_qp(tmpl.qp, &attr.mod, IBV_EXP_QP_STATE);
386	if (ret) {
387		ERROR("%p: QP state to IBV_QPS_RTR failed: %s",
388		      (void *)dev, strerror(ret));
389		goto error;
390	}
391	attr.mod.qp_state = IBV_QPS_RTS;
392	ret = ibv_exp_modify_qp(tmpl.qp, &attr.mod, IBV_EXP_QP_STATE);
393	if (ret) {
394		ERROR("%p: QP state to IBV_QPS_RTS failed: %s",
395		      (void *)dev, strerror(ret));
396		goto error;
397	}
398	attr.params = (struct ibv_exp_query_intf_params){
399		.intf_scope = IBV_EXP_INTF_GLOBAL,
400		.intf = IBV_EXP_INTF_CQ,
401		.obj = tmpl.cq,
402	};
403	tmpl.if_cq = ibv_exp_query_intf(priv->ctx, &attr.params, &status);
404	if (tmpl.if_cq == NULL) {
405		ret = EINVAL;
406		ERROR("%p: CQ interface family query failed with status %d",
407		      (void *)dev, status);
408		goto error;
409	}
410	attr.params = (struct ibv_exp_query_intf_params){
411		.intf_scope = IBV_EXP_INTF_GLOBAL,
412		.intf = IBV_EXP_INTF_QP_BURST,
413		.intf_version = 1,
414		.obj = tmpl.qp,
415		/* Enable multi-packet send if supported. */
416		.family_flags =
417			((priv->mps && !priv->sriov) ?
418			 IBV_EXP_QP_BURST_CREATE_ENABLE_MULTI_PACKET_SEND_WR :
419			 0),
420	};
421	tmpl.if_qp = ibv_exp_query_intf(priv->ctx, &attr.params, &status);
422	if (tmpl.if_qp == NULL) {
423		ret = EINVAL;
424		ERROR("%p: QP interface family query failed with status %d",
425		      (void *)dev, status);
426		goto error;
427	}
428	/* Clean up txq in case we're reinitializing it. */
429	DEBUG("%p: cleaning-up old txq just in case", (void *)txq_ctrl);
430	txq_cleanup(txq_ctrl);
431	*txq_ctrl = tmpl;
432	DEBUG("%p: txq updated with %p", (void *)txq_ctrl, (void *)&tmpl);
433	/* Pre-register known mempools. */
434	rte_mempool_walk(txq_mp2mr_iter, txq_ctrl);
435	assert(ret == 0);
436	return 0;
437error:
438	txq_cleanup(&tmpl);
439	assert(ret > 0);
440	return ret;
441}
442
443/**
444 * DPDK callback to configure a TX queue.
445 *
446 * @param dev
447 *   Pointer to Ethernet device structure.
448 * @param idx
449 *   TX queue index.
450 * @param desc
451 *   Number of descriptors to configure in queue.
452 * @param socket
453 *   NUMA socket on which memory must be allocated.
454 * @param[in] conf
455 *   Thresholds parameters.
456 *
457 * @return
458 *   0 on success, negative errno value on failure.
459 */
460int
461mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
462		    unsigned int socket, const struct rte_eth_txconf *conf)
463{
464	struct priv *priv = dev->data->dev_private;
465	struct txq *txq = (*priv->txqs)[idx];
466	struct txq_ctrl *txq_ctrl = container_of(txq, struct txq_ctrl, txq);
467	int ret;
468
469	if (mlx5_is_secondary())
470		return -E_RTE_SECONDARY;
471
472	priv_lock(priv);
473	if (desc <= MLX5_TX_COMP_THRESH) {
474		WARN("%p: number of descriptors requested for TX queue %u"
475		     " must be higher than MLX5_TX_COMP_THRESH, using"
476		     " %u instead of %u",
477		     (void *)dev, idx, MLX5_TX_COMP_THRESH + 1, desc);
478		desc = MLX5_TX_COMP_THRESH + 1;
479	}
480	if (!rte_is_power_of_2(desc)) {
481		desc = 1 << log2above(desc);
482		WARN("%p: increased number of descriptors in TX queue %u"
483		     " to the next power of two (%d)",
484		     (void *)dev, idx, desc);
485	}
486	DEBUG("%p: configuring queue %u for %u descriptors",
487	      (void *)dev, idx, desc);
488	if (idx >= priv->txqs_n) {
489		ERROR("%p: queue index out of range (%u >= %u)",
490		      (void *)dev, idx, priv->txqs_n);
491		priv_unlock(priv);
492		return -EOVERFLOW;
493	}
494	if (txq != NULL) {
495		DEBUG("%p: reusing already allocated queue index %u (%p)",
496		      (void *)dev, idx, (void *)txq);
497		if (priv->started) {
498			priv_unlock(priv);
499			return -EEXIST;
500		}
501		(*priv->txqs)[idx] = NULL;
502		txq_cleanup(txq_ctrl);
503		/* Resize if txq size is changed. */
504		if (txq_ctrl->txq.elts_n != log2above(desc)) {
505			txq_ctrl = rte_realloc(txq_ctrl,
506					       sizeof(*txq_ctrl) +
507					       desc * sizeof(struct rte_mbuf *),
508					       RTE_CACHE_LINE_SIZE);
509			if (!txq_ctrl) {
510				ERROR("%p: unable to reallocate queue index %u",
511					(void *)dev, idx);
512				priv_unlock(priv);
513				return -ENOMEM;
514			}
515		}
516	} else {
517		txq_ctrl =
518			rte_calloc_socket("TXQ", 1,
519					  sizeof(*txq_ctrl) +
520					  desc * sizeof(struct rte_mbuf *),
521					  0, socket);
522		if (txq_ctrl == NULL) {
523			ERROR("%p: unable to allocate queue index %u",
524			      (void *)dev, idx);
525			priv_unlock(priv);
526			return -ENOMEM;
527		}
528	}
529	ret = txq_ctrl_setup(dev, txq_ctrl, desc, socket, conf);
530	if (ret)
531		rte_free(txq_ctrl);
532	else {
533		txq_ctrl->txq.stats.idx = idx;
534		DEBUG("%p: adding TX queue %p to list",
535		      (void *)dev, (void *)txq_ctrl);
536		(*priv->txqs)[idx] = &txq_ctrl->txq;
537		/* Update send callback. */
538		priv_select_tx_function(priv);
539	}
540	priv_unlock(priv);
541	return -ret;
542}
543
544/**
545 * DPDK callback to release a TX queue.
546 *
547 * @param dpdk_txq
548 *   Generic TX queue pointer.
549 */
550void
551mlx5_tx_queue_release(void *dpdk_txq)
552{
553	struct txq *txq = (struct txq *)dpdk_txq;
554	struct txq_ctrl *txq_ctrl;
555	struct priv *priv;
556	unsigned int i;
557
558	if (mlx5_is_secondary())
559		return;
560
561	if (txq == NULL)
562		return;
563	txq_ctrl = container_of(txq, struct txq_ctrl, txq);
564	priv = txq_ctrl->priv;
565	priv_lock(priv);
566	for (i = 0; (i != priv->txqs_n); ++i)
567		if ((*priv->txqs)[i] == txq) {
568			DEBUG("%p: removing TX queue %p from list",
569			      (void *)priv->dev, (void *)txq_ctrl);
570			(*priv->txqs)[i] = NULL;
571			break;
572		}
573	txq_cleanup(txq_ctrl);
574	rte_free(txq_ctrl);
575	priv_unlock(priv);
576}
577
578/**
579 * DPDK callback for TX in secondary processes.
580 *
581 * This function configures all queues from primary process information
582 * if necessary before reverting to the normal TX burst callback.
583 *
584 * @param dpdk_txq
585 *   Generic pointer to TX queue structure.
586 * @param[in] pkts
587 *   Packets to transmit.
588 * @param pkts_n
589 *   Number of packets in array.
590 *
591 * @return
592 *   Number of packets successfully transmitted (<= pkts_n).
593 */
594uint16_t
595mlx5_tx_burst_secondary_setup(void *dpdk_txq, struct rte_mbuf **pkts,
596			      uint16_t pkts_n)
597{
598	struct txq *txq = dpdk_txq;
599	struct txq_ctrl *txq_ctrl = container_of(txq, struct txq_ctrl, txq);
600	struct priv *priv = mlx5_secondary_data_setup(txq_ctrl->priv);
601	struct priv *primary_priv;
602	unsigned int index;
603
604	if (priv == NULL)
605		return 0;
606	primary_priv =
607		mlx5_secondary_data[priv->dev->data->port_id].primary_priv;
608	/* Look for queue index in both private structures. */
609	for (index = 0; index != priv->txqs_n; ++index)
610		if (((*primary_priv->txqs)[index] == txq) ||
611		    ((*priv->txqs)[index] == txq))
612			break;
613	if (index == priv->txqs_n)
614		return 0;
615	txq = (*priv->txqs)[index];
616	return priv->dev->tx_pkt_burst(txq, pkts, pkts_n);
617}
618