197f17497SC.J. Collier/*-
297f17497SC.J. Collier *   BSD LICENSE
397f17497SC.J. Collier *
497f17497SC.J. Collier *   Copyright 2015 6WIND S.A.
597f17497SC.J. Collier *   Copyright 2015 Mellanox.
697f17497SC.J. Collier *
797f17497SC.J. Collier *   Redistribution and use in source and binary forms, with or without
897f17497SC.J. Collier *   modification, are permitted provided that the following conditions
997f17497SC.J. Collier *   are met:
1097f17497SC.J. Collier *
1197f17497SC.J. Collier *     * Redistributions of source code must retain the above copyright
1297f17497SC.J. Collier *       notice, this list of conditions and the following disclaimer.
1397f17497SC.J. Collier *     * Redistributions in binary form must reproduce the above copyright
1497f17497SC.J. Collier *       notice, this list of conditions and the following disclaimer in
1597f17497SC.J. Collier *       the documentation and/or other materials provided with the
1697f17497SC.J. Collier *       distribution.
1797f17497SC.J. Collier *     * Neither the name of 6WIND S.A. nor the names of its
1897f17497SC.J. Collier *       contributors may be used to endorse or promote products derived
1997f17497SC.J. Collier *       from this software without specific prior written permission.
2097f17497SC.J. Collier *
3297f17497SC.J. Collier */
3397f17497SC.J. Collier
3497f17497SC.J. Collier#include <stddef.h>
3597f17497SC.J. Collier#include <assert.h>
3697f17497SC.J. Collier#include <errno.h>
3797f17497SC.J. Collier#include <string.h>
3897f17497SC.J. Collier#include <stdint.h>
3997f17497SC.J. Collier
4097f17497SC.J. Collier/* Verbs header. */
4197f17497SC.J. Collier/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
4297f17497SC.J. Collier#ifdef PEDANTIC
4332e04ea0SChristian Ehrhardt#pragma GCC diagnostic ignored "-Wpedantic"
4497f17497SC.J. Collier#endif
4597f17497SC.J. Collier#include <infiniband/verbs.h>
468b25d1adSChristian Ehrhardt#include <infiniband/arch.h>
478b25d1adSChristian Ehrhardt#include <infiniband/mlx5_hw.h>
4897f17497SC.J. Collier#ifdef PEDANTIC
4932e04ea0SChristian Ehrhardt#pragma GCC diagnostic error "-Wpedantic"
5097f17497SC.J. Collier#endif
5197f17497SC.J. Collier
5297f17497SC.J. Collier/* DPDK headers don't like -pedantic. */
5397f17497SC.J. Collier#ifdef PEDANTIC
5432e04ea0SChristian Ehrhardt#pragma GCC diagnostic ignored "-Wpedantic"
5597f17497SC.J. Collier#endif
5697f17497SC.J. Collier#include <rte_mbuf.h>
5797f17497SC.J. Collier#include <rte_malloc.h>
5897f17497SC.J. Collier#include <rte_ethdev.h>
5997f17497SC.J. Collier#include <rte_common.h>
6097f17497SC.J. Collier#ifdef PEDANTIC
6132e04ea0SChristian Ehrhardt#pragma GCC diagnostic error "-Wpedantic"
6297f17497SC.J. Collier#endif
6397f17497SC.J. Collier
6497f17497SC.J. Collier#include "mlx5.h"
6597f17497SC.J. Collier#include "mlx5_rxtx.h"
6697f17497SC.J. Collier#include "mlx5_utils.h"
6797f17497SC.J. Collier#include "mlx5_autoconf.h"
6897f17497SC.J. Collier#include "mlx5_defs.h"
6997f17497SC.J. Collier
7097f17497SC.J. Collier/* Initialization data for hash RX queues. */
7197f17497SC.J. Collierconst struct hash_rxq_init hash_rxq_init[] = {
7297f17497SC.J. Collier	[HASH_RXQ_TCPV4] = {
7397f17497SC.J. Collier		.hash_fields = (IBV_EXP_RX_HASH_SRC_IPV4 |
7497f17497SC.J. Collier				IBV_EXP_RX_HASH_DST_IPV4 |
7597f17497SC.J. Collier				IBV_EXP_RX_HASH_SRC_PORT_TCP |
7697f17497SC.J. Collier				IBV_EXP_RX_HASH_DST_PORT_TCP),
7797f17497SC.J. Collier		.dpdk_rss_hf = ETH_RSS_NONFRAG_IPV4_TCP,
7897f17497SC.J. Collier		.flow_priority = 0,
7997f17497SC.J. Collier		.flow_spec.tcp_udp = {
8097f17497SC.J. Collier			.type = IBV_EXP_FLOW_SPEC_TCP,
8197f17497SC.J. Collier			.size = sizeof(hash_rxq_init[0].flow_spec.tcp_udp),
8297f17497SC.J. Collier		},
8397f17497SC.J. Collier		.underlayer = &hash_rxq_init[HASH_RXQ_IPV4],
8497f17497SC.J. Collier	},
8597f17497SC.J. Collier	[HASH_RXQ_UDPV4] = {
8697f17497SC.J. Collier		.hash_fields = (IBV_EXP_RX_HASH_SRC_IPV4 |
8797f17497SC.J. Collier				IBV_EXP_RX_HASH_DST_IPV4 |
8897f17497SC.J. Collier				IBV_EXP_RX_HASH_SRC_PORT_UDP |
8997f17497SC.J. Collier				IBV_EXP_RX_HASH_DST_PORT_UDP),
9097f17497SC.J. Collier		.dpdk_rss_hf = ETH_RSS_NONFRAG_IPV4_UDP,
9197f17497SC.J. Collier		.flow_priority = 0,
9297f17497SC.J. Collier		.flow_spec.tcp_udp = {
9397f17497SC.J. Collier			.type = IBV_EXP_FLOW_SPEC_UDP,
9497f17497SC.J. Collier			.size = sizeof(hash_rxq_init[0].flow_spec.tcp_udp),
9597f17497SC.J. Collier		},
9697f17497SC.J. Collier		.underlayer = &hash_rxq_init[HASH_RXQ_IPV4],
9797f17497SC.J. Collier	},
9897f17497SC.J. Collier	[HASH_RXQ_IPV4] = {
9997f17497SC.J. Collier		.hash_fields = (IBV_EXP_RX_HASH_SRC_IPV4 |
10097f17497SC.J. Collier				IBV_EXP_RX_HASH_DST_IPV4),
10197f17497SC.J. Collier		.dpdk_rss_hf = (ETH_RSS_IPV4 |
10297f17497SC.J. Collier				ETH_RSS_FRAG_IPV4),
10397f17497SC.J. Collier		.flow_priority = 1,
10497f17497SC.J. Collier		.flow_spec.ipv4 = {
10597f17497SC.J. Collier			.type = IBV_EXP_FLOW_SPEC_IPV4,
10697f17497SC.J. Collier			.size = sizeof(hash_rxq_init[0].flow_spec.ipv4),
10797f17497SC.J. Collier		},
10897f17497SC.J. Collier		.underlayer = &hash_rxq_init[HASH_RXQ_ETH],
10997f17497SC.J. Collier	},
11097f17497SC.J. Collier	[HASH_RXQ_TCPV6] = {
11197f17497SC.J. Collier		.hash_fields = (IBV_EXP_RX_HASH_SRC_IPV6 |
11297f17497SC.J. Collier				IBV_EXP_RX_HASH_DST_IPV6 |
11397f17497SC.J. Collier				IBV_EXP_RX_HASH_SRC_PORT_TCP |
11497f17497SC.J. Collier				IBV_EXP_RX_HASH_DST_PORT_TCP),
11597f17497SC.J. Collier		.dpdk_rss_hf = ETH_RSS_NONFRAG_IPV6_TCP,
11697f17497SC.J. Collier		.flow_priority = 0,
11797f17497SC.J. Collier		.flow_spec.tcp_udp = {
11897f17497SC.J. Collier			.type = IBV_EXP_FLOW_SPEC_TCP,
11997f17497SC.J. Collier			.size = sizeof(hash_rxq_init[0].flow_spec.tcp_udp),
12097f17497SC.J. Collier		},
12197f17497SC.J. Collier		.underlayer = &hash_rxq_init[HASH_RXQ_IPV6],
12297f17497SC.J. Collier	},
12397f17497SC.J. Collier	[HASH_RXQ_UDPV6] = {
12497f17497SC.J. Collier		.hash_fields = (IBV_EXP_RX_HASH_SRC_IPV6 |
12597f17497SC.J. Collier				IBV_EXP_RX_HASH_DST_IPV6 |
12697f17497SC.J. Collier				IBV_EXP_RX_HASH_SRC_PORT_UDP |
12797f17497SC.J. Collier				IBV_EXP_RX_HASH_DST_PORT_UDP),
12897f17497SC.J. Collier		.dpdk_rss_hf = ETH_RSS_NONFRAG_IPV6_UDP,
12997f17497SC.J. Collier		.flow_priority = 0,
13097f17497SC.J. Collier		.flow_spec.tcp_udp = {
13197f17497SC.J. Collier			.type = IBV_EXP_FLOW_SPEC_UDP,
13297f17497SC.J. Collier			.size = sizeof(hash_rxq_init[0].flow_spec.tcp_udp),
13397f17497SC.J. Collier		},
13497f17497SC.J. Collier		.underlayer = &hash_rxq_init[HASH_RXQ_IPV6],
13597f17497SC.J. Collier	},
13697f17497SC.J. Collier	[HASH_RXQ_IPV6] = {
13797f17497SC.J. Collier		.hash_fields = (IBV_EXP_RX_HASH_SRC_IPV6 |
13897f17497SC.J. Collier				IBV_EXP_RX_HASH_DST_IPV6),
13997f17497SC.J. Collier		.dpdk_rss_hf = (ETH_RSS_IPV6 |
14097f17497SC.J. Collier				ETH_RSS_FRAG_IPV6),
14197f17497SC.J. Collier		.flow_priority = 1,
14297f17497SC.J. Collier		.flow_spec.ipv6 = {
14397f17497SC.J. Collier			.type = IBV_EXP_FLOW_SPEC_IPV6,
14497f17497SC.J. Collier			.size = sizeof(hash_rxq_init[0].flow_spec.ipv6),
14597f17497SC.J. Collier		},
14697f17497SC.J. Collier		.underlayer = &hash_rxq_init[HASH_RXQ_ETH],
14797f17497SC.J. Collier	},
14897f17497SC.J. Collier	[HASH_RXQ_ETH] = {
14997f17497SC.J. Collier		.hash_fields = 0,
15097f17497SC.J. Collier		.dpdk_rss_hf = 0,
15197f17497SC.J. Collier		.flow_priority = 2,
15297f17497SC.J. Collier		.flow_spec.eth = {
15397f17497SC.J. Collier			.type = IBV_EXP_FLOW_SPEC_ETH,
15497f17497SC.J. Collier			.size = sizeof(hash_rxq_init[0].flow_spec.eth),
15597f17497SC.J. Collier		},
15697f17497SC.J. Collier		.underlayer = NULL,
15797f17497SC.J. Collier	},
15897f17497SC.J. Collier};
15997f17497SC.J. Collier
16097f17497SC.J. Collier/* Number of entries in hash_rxq_init[]. */
16197f17497SC.J. Collierconst unsigned int hash_rxq_init_n = RTE_DIM(hash_rxq_init);
16297f17497SC.J. Collier
16397f17497SC.J. Collier/* Initialization data for hash RX queue indirection tables. */
16497f17497SC.J. Collierstatic const struct ind_table_init ind_table_init[] = {
16597f17497SC.J. Collier	{
16697f17497SC.J. Collier		.max_size = -1u, /* Superseded by HW limitations. */
16797f17497SC.J. Collier		.hash_types =
16897f17497SC.J. Collier			1 << HASH_RXQ_TCPV4 |
16997f17497SC.J. Collier			1 << HASH_RXQ_UDPV4 |
17097f17497SC.J. Collier			1 << HASH_RXQ_IPV4 |
17197f17497SC.J. Collier			1 << HASH_RXQ_TCPV6 |
17297f17497SC.J. Collier			1 << HASH_RXQ_UDPV6 |
17397f17497SC.J. Collier			1 << HASH_RXQ_IPV6 |
17497f17497SC.J. Collier			0,
17597f17497SC.J. Collier		.hash_types_n = 6,
17697f17497SC.J. Collier	},
17797f17497SC.J. Collier	{
17897f17497SC.J. Collier		.max_size = 1,
17997f17497SC.J. Collier		.hash_types = 1 << HASH_RXQ_ETH,
18097f17497SC.J. Collier		.hash_types_n = 1,
18197f17497SC.J. Collier	},
18297f17497SC.J. Collier};
18397f17497SC.J. Collier
18497f17497SC.J. Collier#define IND_TABLE_INIT_N RTE_DIM(ind_table_init)
18597f17497SC.J. Collier
18697f17497SC.J. Collier/* Default RSS hash key also used for ConnectX-3. */
18797f17497SC.J. Collieruint8_t rss_hash_default_key[] = {
18897f17497SC.J. Collier	0x2c, 0xc6, 0x81, 0xd1,
18997f17497SC.J. Collier	0x5b, 0xdb, 0xf4, 0xf7,
19097f17497SC.J. Collier	0xfc, 0xa2, 0x83, 0x19,
19197f17497SC.J. Collier	0xdb, 0x1a, 0x3e, 0x94,
19297f17497SC.J. Collier	0x6b, 0x9e, 0x38, 0xd9,
19397f17497SC.J. Collier	0x2c, 0x9c, 0x03, 0xd1,
19497f17497SC.J. Collier	0xad, 0x99, 0x44, 0xa7,
19597f17497SC.J. Collier	0xd9, 0x56, 0x3d, 0x59,
19697f17497SC.J. Collier	0x06, 0x3c, 0x25, 0xf3,
19797f17497SC.J. Collier	0xfc, 0x1f, 0xdc, 0x2a,
19897f17497SC.J. Collier};
19997f17497SC.J. Collier
20097f17497SC.J. Collier/* Length of the default RSS hash key. */
20197f17497SC.J. Collierconst size_t rss_hash_default_key_len = sizeof(rss_hash_default_key);
20297f17497SC.J. Collier
20397f17497SC.J. Collier/**
20497f17497SC.J. Collier * Populate flow steering rule for a given hash RX queue type using
20597f17497SC.J. Collier * information from hash_rxq_init[]. Nothing is written to flow_attr when
20697f17497SC.J. Collier * flow_attr_size is not large enough, but the required size is still returned.
20797f17497SC.J. Collier *
20897f17497SC.J. Collier * @param priv
20997f17497SC.J. Collier *   Pointer to private structure.
21097f17497SC.J. Collier * @param[out] flow_attr
21197f17497SC.J. Collier *   Pointer to flow attribute structure to fill. Note that the allocated
21297f17497SC.J. Collier *   area must be larger and large enough to hold all flow specifications.
21397f17497SC.J. Collier * @param flow_attr_size
21497f17497SC.J. Collier *   Entire size of flow_attr and trailing room for flow specifications.
21597f17497SC.J. Collier * @param type
21697f17497SC.J. Collier *   Hash RX queue type to use for flow steering rule.
21797f17497SC.J. Collier *
21897f17497SC.J. Collier * @return
21997f17497SC.J. Collier *   Total size of the flow attribute buffer. No errors are defined.
22097f17497SC.J. Collier */
22197f17497SC.J. Colliersize_t
22297f17497SC.J. Collierpriv_flow_attr(struct priv *priv, struct ibv_exp_flow_attr *flow_attr,
22397f17497SC.J. Collier	       size_t flow_attr_size, enum hash_rxq_type type)
22497f17497SC.J. Collier{
22597f17497SC.J. Collier	size_t offset = sizeof(*flow_attr);
22697f17497SC.J. Collier	const struct hash_rxq_init *init = &hash_rxq_init[type];
22797f17497SC.J. Collier
22897f17497SC.J. Collier	assert(priv != NULL);
22997f17497SC.J. Collier	assert((size_t)type < RTE_DIM(hash_rxq_init));
23097f17497SC.J. Collier	do {
23197f17497SC.J. Collier		offset += init->flow_spec.hdr.size;
23297f17497SC.J. Collier		init = init->underlayer;
23397f17497SC.J. Collier	} while (init != NULL);
23497f17497SC.J. Collier	if (offset > flow_attr_size)
23597f17497SC.J. Collier		return offset;
23697f17497SC.J. Collier	flow_attr_size = offset;
23797f17497SC.J. Collier	init = &hash_rxq_init[type];
23897f17497SC.J. Collier	*flow_attr = (struct ibv_exp_flow_attr){
23997f17497SC.J. Collier		.type = IBV_EXP_FLOW_ATTR_NORMAL,
24097f17497SC.J. Collier		/* Priorities < 3 are reserved for flow director. */
24197f17497SC.J. Collier		.priority = init->flow_priority + 3,
24297f17497SC.J. Collier		.num_of_specs = 0,
24397f17497SC.J. Collier		.port = priv->port,
24497f17497SC.J. Collier		.flags = 0,
24597f17497SC.J. Collier	};
24697f17497SC.J. Collier	do {
24797f17497SC.J. Collier		offset -= init->flow_spec.hdr.size;
24897f17497SC.J. Collier		memcpy((void *)((uintptr_t)flow_attr + offset),
24997f17497SC.J. Collier		       &init->flow_spec,
25097f17497SC.J. Collier		       init->flow_spec.hdr.size);
25197f17497SC.J. Collier		++flow_attr->num_of_specs;
25297f17497SC.J. Collier		init = init->underlayer;
25397f17497SC.J. Collier	} while (init != NULL);
25497f17497SC.J. Collier	return flow_attr_size;
25597f17497SC.J. Collier}
25697f17497SC.J. Collier
25797f17497SC.J. Collier/**
25897f17497SC.J. Collier * Convert hash type position in indirection table initializer to
25997f17497SC.J. Collier * hash RX queue type.
26097f17497SC.J. Collier *
26197f17497SC.J. Collier * @param table
26297f17497SC.J. Collier *   Indirection table initializer.
26397f17497SC.J. Collier * @param pos
26497f17497SC.J. Collier *   Hash type position.
26597f17497SC.J. Collier *
26697f17497SC.J. Collier * @return
26797f17497SC.J. Collier *   Hash RX queue type.
26897f17497SC.J. Collier */
26997f17497SC.J. Collierstatic enum hash_rxq_type
27097f17497SC.J. Collierhash_rxq_type_from_pos(const struct ind_table_init *table, unsigned int pos)
27197f17497SC.J. Collier{
2728b25d1adSChristian Ehrhardt	enum hash_rxq_type type = HASH_RXQ_TCPV4;
27397f17497SC.J. Collier
27497f17497SC.J. Collier	assert(pos < table->hash_types_n);
27597f17497SC.J. Collier	do {
27697f17497SC.J. Collier		if ((table->hash_types & (1 << type)) && (pos-- == 0))
27797f17497SC.J. Collier			break;
27897f17497SC.J. Collier		++type;
27997f17497SC.J. Collier	} while (1);
28097f17497SC.J. Collier	return type;
28197f17497SC.J. Collier}
28297f17497SC.J. Collier
28397f17497SC.J. Collier/**
28497f17497SC.J. Collier * Filter out disabled hash RX queue types from ind_table_init[].
28597f17497SC.J. Collier *
28697f17497SC.J. Collier * @param priv
28797f17497SC.J. Collier *   Pointer to private structure.
28897f17497SC.J. Collier * @param[out] table
28997f17497SC.J. Collier *   Output table.
29097f17497SC.J. Collier *
29197f17497SC.J. Collier * @return
29297f17497SC.J. Collier *   Number of table entries.
29397f17497SC.J. Collier */
29497f17497SC.J. Collierstatic unsigned int
29597f17497SC.J. Collierpriv_make_ind_table_init(struct priv *priv,
29697f17497SC.J. Collier			 struct ind_table_init (*table)[IND_TABLE_INIT_N])
29797f17497SC.J. Collier{
29897f17497SC.J. Collier	uint64_t rss_hf;
29997f17497SC.J. Collier	unsigned int i;
30097f17497SC.J. Collier	unsigned int j;
30197f17497SC.J. Collier	unsigned int table_n = 0;
30297f17497SC.J. Collier	/* Mandatory to receive frames not handled by normal hash RX queues. */
30397f17497SC.J. Collier	unsigned int hash_types_sup = 1 << HASH_RXQ_ETH;
30497f17497SC.J. Collier
30597f17497SC.J. Collier	rss_hf = priv->rss_hf;
30697f17497SC.J. Collier	/* Process other protocols only if more than one queue. */
30797f17497SC.J. Collier	if (priv->rxqs_n > 1)
30897f17497SC.J. Collier		for (i = 0; (i != hash_rxq_init_n); ++i)
30997f17497SC.J. Collier			if (rss_hf & hash_rxq_init[i].dpdk_rss_hf)
31097f17497SC.J. Collier				hash_types_sup |= (1 << i);
31197f17497SC.J. Collier
31297f17497SC.J. Collier	/* Filter out entries whose protocols are not in the set. */
31397f17497SC.J. Collier	for (i = 0, j = 0; (i != IND_TABLE_INIT_N); ++i) {
31497f17497SC.J. Collier		unsigned int nb;
31597f17497SC.J. Collier		unsigned int h;
31697f17497SC.J. Collier
31797f17497SC.J. Collier		/* j is increased only if the table has valid protocols. */
31897f17497SC.J. Collier		assert(j <= i);
31997f17497SC.J. Collier		(*table)[j] = ind_table_init[i];
32097f17497SC.J. Collier		(*table)[j].hash_types &= hash_types_sup;
32197f17497SC.J. Collier		for (h = 0, nb = 0; (h != hash_rxq_init_n); ++h)
32297f17497SC.J. Collier			if (((*table)[j].hash_types >> h) & 0x1)
32397f17497SC.J. Collier				++nb;
32497f17497SC.J. Collier		(*table)[i].hash_types_n = nb;
32597f17497SC.J. Collier		if (nb) {
32697f17497SC.J. Collier			++table_n;
32797f17497SC.J. Collier			++j;
32897f17497SC.J. Collier		}
32997f17497SC.J. Collier	}
33097f17497SC.J. Collier	return table_n;
33197f17497SC.J. Collier}
33297f17497SC.J. Collier
33397f17497SC.J. Collier/**
33497f17497SC.J. Collier * Initialize hash RX queues and indirection table.
33597f17497SC.J. Collier *
33697f17497SC.J. Collier * @param priv
33797f17497SC.J. Collier *   Pointer to private structure.
33897f17497SC.J. Collier *
33997f17497SC.J. Collier * @return
34097f17497SC.J. Collier *   0 on success, errno value on failure.
34197f17497SC.J. Collier */
34297f17497SC.J. Collierint
34397f17497SC.J. Collierpriv_create_hash_rxqs(struct priv *priv)
34497f17497SC.J. Collier{
34597f17497SC.J. Collier	struct ibv_exp_wq *wqs[priv->reta_idx_n];
34697f17497SC.J. Collier	struct ind_table_init ind_table_init[IND_TABLE_INIT_N];
34797f17497SC.J. Collier	unsigned int ind_tables_n =
34897f17497SC.J. Collier		priv_make_ind_table_init(priv, &ind_table_init);
34997f17497SC.J. Collier	unsigned int hash_rxqs_n = 0;
35097f17497SC.J. Collier	struct hash_rxq (*hash_rxqs)[] = NULL;
35197f17497SC.J. Collier	struct ibv_exp_rwq_ind_table *(*ind_tables)[] = NULL;
35297f17497SC.J. Collier	unsigned int i;
35397f17497SC.J. Collier	unsigned int j;
35497f17497SC.J. Collier	unsigned int k;
35597f17497SC.J. Collier	int err = 0;
35697f17497SC.J. Collier
35797f17497SC.J. Collier	assert(priv->ind_tables == NULL);
35897f17497SC.J. Collier	assert(priv->ind_tables_n == 0);
35997f17497SC.J. Collier	assert(priv->hash_rxqs == NULL);
36097f17497SC.J. Collier	assert(priv->hash_rxqs_n == 0);
36197f17497SC.J. Collier	assert(priv->pd != NULL);
36297f17497SC.J. Collier	assert(priv->ctx != NULL);
36397f17497SC.J. Collier	if (priv->rxqs_n == 0)
36497f17497SC.J. Collier		return EINVAL;
36597f17497SC.J. Collier	assert(priv->rxqs != NULL);
36697f17497SC.J. Collier	if (ind_tables_n == 0) {
36797f17497SC.J. Collier		ERROR("all hash RX queue types have been filtered out,"
36897f17497SC.J. Collier		      " indirection table cannot be created");
36997f17497SC.J. Collier		return EINVAL;
37097f17497SC.J. Collier	}
37197f17497SC.J. Collier	if (priv->rxqs_n & (priv->rxqs_n - 1)) {
37297f17497SC.J. Collier		INFO("%u RX queues are configured, consider rounding this"
37397f17497SC.J. Collier		     " number to the next power of two for better balancing",
37497f17497SC.J. Collier		     priv->rxqs_n);
37597f17497SC.J. Collier		DEBUG("indirection table extended to assume %u WQs",
37697f17497SC.J. Collier		      priv->reta_idx_n);
37797f17497SC.J. Collier	}
3788b25d1adSChristian Ehrhardt	for (i = 0; (i != priv->reta_idx_n); ++i) {
3798b25d1adSChristian Ehrhardt		struct rxq_ctrl *rxq_ctrl;
3808b25d1adSChristian Ehrhardt
3818b25d1adSChristian Ehrhardt		rxq_ctrl = container_of((*priv->rxqs)[(*priv->reta_idx)[i]],
3828b25d1adSChristian Ehrhardt					struct rxq_ctrl, rxq);
3838b25d1adSChristian Ehrhardt		wqs[i] = rxq_ctrl->wq;
3848b25d1adSChristian Ehrhardt	}
38597f17497SC.J. Collier	/* Get number of hash RX queues to configure. */
38697f17497SC.J. Collier	for (i = 0, hash_rxqs_n = 0; (i != ind_tables_n); ++i)
38797f17497SC.J. Collier		hash_rxqs_n += ind_table_init[i].hash_types_n;
38897f17497SC.J. Collier	DEBUG("allocating %u hash RX queues for %u WQs, %u indirection tables",
38997f17497SC.J. Collier	      hash_rxqs_n, priv->rxqs_n, ind_tables_n);
39097f17497SC.J. Collier	/* Create indirection tables. */
39197f17497SC.J. Collier	ind_tables = rte_calloc(__func__, ind_tables_n,
39297f17497SC.J. Collier				sizeof((*ind_tables)[0]), 0);
39397f17497SC.J. Collier	if (ind_tables == NULL) {
39497f17497SC.J. Collier		err = ENOMEM;
39597f17497SC.J. Collier		ERROR("cannot allocate indirection tables container: %s",
39697f17497SC.J. Collier		      strerror(err));
39797f17497SC.J. Collier		goto error;
39897f17497SC.J. Collier	}
39997f17497SC.J. Collier	for (i = 0; (i != ind_tables_n); ++i) {
40097f17497SC.J. Collier		struct ibv_exp_rwq_ind_table_init_attr ind_init_attr = {
40197f17497SC.J. Collier			.pd = priv->pd,
40297f17497SC.J. Collier			.log_ind_tbl_size = 0, /* Set below. */
40397f17497SC.J. Collier			.ind_tbl = wqs,
40497f17497SC.J. Collier			.comp_mask = 0,
40597f17497SC.J. Collier		};
40697f17497SC.J. Collier		unsigned int ind_tbl_size = ind_table_init[i].max_size;
40797f17497SC.J. Collier		struct ibv_exp_rwq_ind_table *ind_table;
40897f17497SC.J. Collier
40997f17497SC.J. Collier		if (priv->reta_idx_n < ind_tbl_size)
41097f17497SC.J. Collier			ind_tbl_size = priv->reta_idx_n;
41197f17497SC.J. Collier		ind_init_attr.log_ind_tbl_size = log2above(ind_tbl_size);
41297f17497SC.J. Collier		errno = 0;
41397f17497SC.J. Collier		ind_table = ibv_exp_create_rwq_ind_table(priv->ctx,
41497f17497SC.J. Collier							 &ind_init_attr);
41597f17497SC.J. Collier		if (ind_table != NULL) {
41697f17497SC.J. Collier			(*ind_tables)[i] = ind_table;
41797f17497SC.J. Collier			continue;
41897f17497SC.J. Collier		}
41997f17497SC.J. Collier		/* Not clear whether errno is set. */
42097f17497SC.J. Collier		err = (errno ? errno : EINVAL);
42197f17497SC.J. Collier		ERROR("RX indirection table creation failed with error %d: %s",
42297f17497SC.J. Collier		      err, strerror(err));
42397f17497SC.J. Collier		goto error;
42497f17497SC.J. Collier	}
42597f17497SC.J. Collier	/* Allocate array that holds hash RX queues and related data. */
42697f17497SC.J. Collier	hash_rxqs = rte_calloc(__func__, hash_rxqs_n,
42797f17497SC.J. Collier			       sizeof((*hash_rxqs)[0]), 0);
42897f17497SC.J. Collier	if (hash_rxqs == NULL) {
42997f17497SC.J. Collier		err = ENOMEM;
43097f17497SC.J. Collier		ERROR("cannot allocate hash RX queues container: %s",
43197f17497SC.J. Collier		      strerror(err));
43297f17497SC.J. Collier		goto error;
43397f17497SC.J. Collier	}
43497f17497SC.J. Collier	for (i = 0, j = 0, k = 0;
43597f17497SC.J. Collier	     ((i != hash_rxqs_n) && (j != ind_tables_n));
43697f17497SC.J. Collier	     ++i) {
43797f17497SC.J. Collier		struct hash_rxq *hash_rxq = &(*hash_rxqs)[i];
43897f17497SC.J. Collier		enum hash_rxq_type type =
43997f17497SC.J. Collier			hash_rxq_type_from_pos(&ind_table_init[j], k);
44097f17497SC.J. Collier		struct rte_eth_rss_conf *priv_rss_conf =
44197f17497SC.J. Collier			(*priv->rss_conf)[type];
44297f17497SC.J. Collier		struct ibv_exp_rx_hash_conf hash_conf = {
44397f17497SC.J. Collier			.rx_hash_function = IBV_EXP_RX_HASH_FUNC_TOEPLITZ,
44497f17497SC.J. Collier			.rx_hash_key_len = (priv_rss_conf ?
44597f17497SC.J. Collier					    priv_rss_conf->rss_key_len :
44697f17497SC.J. Collier					    rss_hash_default_key_len),
44797f17497SC.J. Collier			.rx_hash_key = (priv_rss_conf ?
44897f17497SC.J. Collier					priv_rss_conf->rss_key :
44997f17497SC.J. Collier					rss_hash_default_key),
45097f17497SC.J. Collier			.rx_hash_fields_mask = hash_rxq_init[type].hash_fields,
45197f17497SC.J. Collier			.rwq_ind_tbl = (*ind_tables)[j],
45297f17497SC.J. Collier		};
45397f17497SC.J. Collier		struct ibv_exp_qp_init_attr qp_init_attr = {
45497f17497SC.J. Collier			.max_inl_recv = 0, /* Currently not supported. */
45597f17497SC.J. Collier			.qp_type = IBV_QPT_RAW_PACKET,
45697f17497SC.J. Collier			.comp_mask = (IBV_EXP_QP_INIT_ATTR_PD |
45797f17497SC.J. Collier				      IBV_EXP_QP_INIT_ATTR_RX_HASH),
45897f17497SC.J. Collier			.pd = priv->pd,
45997f17497SC.J. Collier			.rx_hash_conf = &hash_conf,
46097f17497SC.J. Collier			.port_num = priv->port,
46197f17497SC.J. Collier		};
46297f17497SC.J. Collier
46397f17497SC.J. Collier		DEBUG("using indirection table %u for hash RX queue %u type %d",
46497f17497SC.J. Collier		      j, i, type);
46597f17497SC.J. Collier		*hash_rxq = (struct hash_rxq){
46697f17497SC.J. Collier			.priv = priv,
46797f17497SC.J. Collier			.qp = ibv_exp_create_qp(priv->ctx, &qp_init_attr),
46897f17497SC.J. Collier			.type = type,
46997f17497SC.J. Collier		};
47097f17497SC.J. Collier		if (hash_rxq->qp == NULL) {
47197f17497SC.J. Collier			err = (errno ? errno : EINVAL);
47297f17497SC.J. Collier			ERROR("Hash RX QP creation failure: %s",
47397f17497SC.J. Collier			      strerror(err));
47497f17497SC.J. Collier			goto error;
47597f17497SC.J. Collier		}
47697f17497SC.J. Collier		if (++k < ind_table_init[j].hash_types_n)
47797f17497SC.J. Collier			continue;
47897f17497SC.J. Collier		/* Switch to the next indirection table and reset hash RX
47997f17497SC.J. Collier		 * queue type array index. */
48097f17497SC.J. Collier		++j;
48197f17497SC.J. Collier		k = 0;
48297f17497SC.J. Collier	}
48397f17497SC.J. Collier	priv->ind_tables = ind_tables;
48497f17497SC.J. Collier	priv->ind_tables_n = ind_tables_n;
48597f17497SC.J. Collier	priv->hash_rxqs = hash_rxqs;
48697f17497SC.J. Collier	priv->hash_rxqs_n = hash_rxqs_n;
48797f17497SC.J. Collier	assert(err == 0);
48897f17497SC.J. Collier	return 0;
48997f17497SC.J. Colliererror:
49097f17497SC.J. Collier	if (hash_rxqs != NULL) {
49197f17497SC.J. Collier		for (i = 0; (i != hash_rxqs_n); ++i) {
49297f17497SC.J. Collier			struct ibv_qp *qp = (*hash_rxqs)[i].qp;
49397f17497SC.J. Collier
49497f17497SC.J. Collier			if (qp == NULL)
49597f17497SC.J. Collier				continue;
49697f17497SC.J. Collier			claim_zero(ibv_destroy_qp(qp));
49797f17497SC.J. Collier		}
49897f17497SC.J. Collier		rte_free(hash_rxqs);
49997f17497SC.J. Collier	}
50097f17497SC.J. Collier	if (ind_tables != NULL) {
50197f17497SC.J. Collier		for (j = 0; (j != ind_tables_n); ++j) {
50297f17497SC.J. Collier			struct ibv_exp_rwq_ind_table *ind_table =
50397f17497SC.J. Collier				(*ind_tables)[j];
50497f17497SC.J. Collier
50597f17497SC.J. Collier			if (ind_table == NULL)
50697f17497SC.J. Collier				continue;
50797f17497SC.J. Collier			claim_zero(ibv_exp_destroy_rwq_ind_table(ind_table));
50897f17497SC.J. Collier		}
50997f17497SC.J. Collier		rte_free(ind_tables);
51097f17497SC.J. Collier	}
51197f17497SC.J. Collier	return err;
51297f17497SC.J. Collier}
51397f17497SC.J. Collier
51497f17497SC.J. Collier/**
51597f17497SC.J. Collier * Clean up hash RX queues and indirection table.
51697f17497SC.J. Collier *
51797f17497SC.J. Collier * @param priv
51897f17497SC.J. Collier *   Pointer to private structure.
51997f17497SC.J. Collier */
52097f17497SC.J. Colliervoid
52197f17497SC.J. Collierpriv_destroy_hash_rxqs(struct priv *priv)
52297f17497SC.J. Collier{
52397f17497SC.J. Collier	unsigned int i;
52497f17497SC.J. Collier
52597f17497SC.J. Collier	DEBUG("destroying %u hash RX queues", priv->hash_rxqs_n);
52697f17497SC.J. Collier	if (priv->hash_rxqs_n == 0) {
52797f17497SC.J. Collier		assert(priv->hash_rxqs == NULL);
52897f17497SC.J. Collier		assert(priv->ind_tables == NULL);
52997f17497SC.J. Collier		return;
53097f17497SC.J. Collier	}
53197f17497SC.J. Collier	for (i = 0; (i != priv->hash_rxqs_n); ++i) {
53297f17497SC.J. Collier		struct hash_rxq *hash_rxq = &(*priv->hash_rxqs)[i];
53397f17497SC.J. Collier		unsigned int j, k;
53497f17497SC.J. Collier
53597f17497SC.J. Collier		assert(hash_rxq->priv == priv);
53697f17497SC.J. Collier		assert(hash_rxq->qp != NULL);
53797f17497SC.J. Collier		/* Also check that there are no remaining flows. */
53897f17497SC.J. Collier		for (j = 0; (j != RTE_DIM(hash_rxq->special_flow)); ++j)
53997f17497SC.J. Collier			for (k = 0;
54097f17497SC.J. Collier			     (k != RTE_DIM(hash_rxq->special_flow[j]));
54197f17497SC.J. Collier			     ++k)
54297f17497SC.J. Collier				assert(hash_rxq->special_flow[j][k] == NULL);
54397f17497SC.J. Collier		for (j = 0; (j != RTE_DIM(hash_rxq->mac_flow)); ++j)
54497f17497SC.J. Collier			for (k = 0; (k != RTE_DIM(hash_rxq->mac_flow[j])); ++k)
54597f17497SC.J. Collier				assert(hash_rxq->mac_flow[j][k] == NULL);
54697f17497SC.J. Collier		claim_zero(ibv_destroy_qp(hash_rxq->qp));
54797f17497SC.J. Collier	}
54897f17497SC.J. Collier	priv->hash_rxqs_n = 0;
54997f17497SC.J. Collier	rte_free(priv->hash_rxqs);
55097f17497SC.J. Collier	priv->hash_rxqs = NULL;
55197f17497SC.J. Collier	for (i = 0; (i != priv->ind_tables_n); ++i) {
55297f17497SC.J. Collier		struct ibv_exp_rwq_ind_table *ind_table =
55397f17497SC.J. Collier			(*priv->ind_tables)[i];
55497f17497SC.J. Collier
55597f17497SC.J. Collier		assert(ind_table != NULL);
55697f17497SC.J. Collier		claim_zero(ibv_exp_destroy_rwq_ind_table(ind_table));
55797f17497SC.J. Collier	}
55897f17497SC.J. Collier	priv->ind_tables_n = 0;
55997f17497SC.J. Collier	rte_free(priv->ind_tables);
56097f17497SC.J. Collier	priv->ind_tables = NULL;
56197f17497SC.J. Collier}
56297f17497SC.J. Collier
56397f17497SC.J. Collier/**
56497f17497SC.J. Collier * Check whether a given flow type is allowed.
56597f17497SC.J. Collier *
56697f17497SC.J. Collier * @param priv
56797f17497SC.J. Collier *   Pointer to private structure.
56897f17497SC.J. Collier * @param type
56997f17497SC.J. Collier *   Flow type to check.
57097f17497SC.J. Collier *
57197f17497SC.J. Collier * @return
57297f17497SC.J. Collier *   Nonzero if the given flow type is allowed.
57397f17497SC.J. Collier */
57497f17497SC.J. Collierint
57597f17497SC.J. Collierpriv_allow_flow_type(struct priv *priv, enum hash_rxq_flow_type type)
57697f17497SC.J. Collier{
57797f17497SC.J. Collier	/* Only FLOW_TYPE_PROMISC is allowed when promiscuous mode
57897f17497SC.J. Collier	 * has been requested. */
57997f17497SC.J. Collier	if (priv->promisc_req)
58097f17497SC.J. Collier		return type == HASH_RXQ_FLOW_TYPE_PROMISC;
58197f17497SC.J. Collier	switch (type) {
58297f17497SC.J. Collier	case HASH_RXQ_FLOW_TYPE_PROMISC:
58397f17497SC.J. Collier		return !!priv->promisc_req;
58497f17497SC.J. Collier	case HASH_RXQ_FLOW_TYPE_ALLMULTI:
58597f17497SC.J. Collier		return !!priv->allmulti_req;
58697f17497SC.J. Collier	case HASH_RXQ_FLOW_TYPE_BROADCAST:
58797f17497SC.J. Collier	case HASH_RXQ_FLOW_TYPE_IPV6MULTI:
58897f17497SC.J. Collier		/* If allmulti is enabled, broadcast and ipv6multi
58997f17497SC.J. Collier		 * are unnecessary. */
59097f17497SC.J. Collier		return !priv->allmulti_req;
59197f17497SC.J. Collier	case HASH_RXQ_FLOW_TYPE_MAC:
59297f17497SC.J. Collier		return 1;
59397f17497SC.J. Collier	default:
59497f17497SC.J. Collier		/* Unsupported flow type is not allowed. */
59597f17497SC.J. Collier		return 0;
59697f17497SC.J. Collier	}
59797f17497SC.J. Collier	return 0;
59897f17497SC.J. Collier}
59997f17497SC.J. Collier
60097f17497SC.J. Collier/**
60197f17497SC.J. Collier * Automatically enable/disable flows according to configuration.
60297f17497SC.J. Collier *
60397f17497SC.J. Collier * @param priv
60497f17497SC.J. Collier *   Private structure.
60597f17497SC.J. Collier *
60697f17497SC.J. Collier * @return
60797f17497SC.J. Collier *   0 on success, errno value on failure.
60897f17497SC.J. Collier */
60997f17497SC.J. Collierint
61097f17497SC.J. Collierpriv_rehash_flows(struct priv *priv)
61197f17497SC.J. Collier{
61247d9763aSLuca Boccassi	size_t i;
61397f17497SC.J. Collier
61447d9763aSLuca Boccassi	for (i = 0; i != RTE_DIM((*priv->hash_rxqs)[0].special_flow); ++i)
61597f17497SC.J. Collier		if (!priv_allow_flow_type(priv, i)) {
61697f17497SC.J. Collier			priv_special_flow_disable(priv, i);
61797f17497SC.J. Collier		} else {
61897f17497SC.J. Collier			int ret = priv_special_flow_enable(priv, i);
61997f17497SC.J. Collier
62097f17497SC.J. Collier			if (ret)
62197f17497SC.J. Collier				return ret;
62297f17497SC.J. Collier		}
62397f17497SC.J. Collier	if (priv_allow_flow_type(priv, HASH_RXQ_FLOW_TYPE_MAC))
62497f17497SC.J. Collier		return priv_mac_addrs_enable(priv);
62597f17497SC.J. Collier	priv_mac_addrs_disable(priv);
62697f17497SC.J. Collier	return 0;
62797f17497SC.J. Collier}
62897f17497SC.J. Collier
62997f17497SC.J. Collier/**
63097f17497SC.J. Collier * Allocate RX queue elements.
63197f17497SC.J. Collier *
6328b25d1adSChristian Ehrhardt * @param rxq_ctrl
63397f17497SC.J. Collier *   Pointer to RX queue structure.
63497f17497SC.J. Collier * @param elts_n
63597f17497SC.J. Collier *   Number of elements to allocate.
63697f17497SC.J. Collier * @param[in] pool
63797f17497SC.J. Collier *   If not NULL, fetch buffers from this array instead of allocating them
63897f17497SC.J. Collier *   with rte_pktmbuf_alloc().
63997f17497SC.J. Collier *
64097f17497SC.J. Collier * @return
64197f17497SC.J. Collier *   0 on success, errno value on failure.
64297f17497SC.J. Collier */
64397f17497SC.J. Collierstatic int
6448b25d1adSChristian Ehrhardtrxq_alloc_elts(struct rxq_ctrl *rxq_ctrl, unsigned int elts_n,
6458b25d1adSChristian Ehrhardt	       struct rte_mbuf *(*pool)[])
64697f17497SC.J. Collier{
6478b25d1adSChristian Ehrhardt	const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n;
64897f17497SC.J. Collier	unsigned int i;
64997f17497SC.J. Collier	int ret = 0;
65097f17497SC.J. Collier
6518b25d1adSChristian Ehrhardt	/* Iterate on segments. */
65297f17497SC.J. Collier	for (i = 0; (i != elts_n); ++i) {
65397f17497SC.J. Collier		struct rte_mbuf *buf;
6548b25d1adSChristian Ehrhardt		volatile struct mlx5_wqe_data_seg *scat =
6558b25d1adSChristian Ehrhardt			&(*rxq_ctrl->rxq.wqes)[i];
65697f17497SC.J. Collier
65797f17497SC.J. Collier		if (pool != NULL) {
6588b25d1adSChristian Ehrhardt			buf = (*pool)[i];
65997f17497SC.J. Collier			assert(buf != NULL);
66097f17497SC.J. Collier			rte_pktmbuf_reset(buf);
6618b25d1adSChristian Ehrhardt			rte_pktmbuf_refcnt_update(buf, 1);
66297f17497SC.J. Collier		} else
6638b25d1adSChristian Ehrhardt			buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp);
66497f17497SC.J. Collier		if (buf == NULL) {
66597f17497SC.J. Collier			assert(pool == NULL);
6668b25d1adSChristian Ehrhardt			ERROR("%p: empty mbuf pool", (void *)rxq_ctrl);
66797f17497SC.J. Collier			ret = ENOMEM;
66897f17497SC.J. Collier			goto error;
66997f17497SC.J. Collier		}
67097f17497SC.J. Collier		/* Headroom is reserved by rte_pktmbuf_alloc(). */
67197f17497SC.J. Collier		assert(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM);
67297f17497SC.J. Collier		/* Buffer is supposed to be empty. */
67397f17497SC.J. Collier		assert(rte_pktmbuf_data_len(buf) == 0);
67497f17497SC.J. Collier		assert(rte_pktmbuf_pkt_len(buf) == 0);
6758b25d1adSChristian Ehrhardt		assert(!buf->next);
6768b25d1adSChristian Ehrhardt		/* Only the first segment keeps headroom. */
6778b25d1adSChristian Ehrhardt		if (i % sges_n)
6788b25d1adSChristian Ehrhardt			SET_DATA_OFF(buf, 0);
6798b25d1adSChristian Ehrhardt		PORT(buf) = rxq_ctrl->rxq.port_id;
6808b25d1adSChristian Ehrhardt		DATA_LEN(buf) = rte_pktmbuf_tailroom(buf);
6818b25d1adSChristian Ehrhardt		PKT_LEN(buf) = DATA_LEN(buf);
6828b25d1adSChristian Ehrhardt		NB_SEGS(buf) = 1;
6838b25d1adSChristian Ehrhardt		/* scat->addr must be able to store a pointer. */
6848b25d1adSChristian Ehrhardt		assert(sizeof(scat->addr) >= sizeof(uintptr_t));
6858b25d1adSChristian Ehrhardt		*scat = (struct mlx5_wqe_data_seg){
6868b25d1adSChristian Ehrhardt			.addr = htonll(rte_pktmbuf_mtod(buf, uintptr_t)),
6878b25d1adSChristian Ehrhardt			.byte_count = htonl(DATA_LEN(buf)),
6888b25d1adSChristian Ehrhardt			.lkey = htonl(rxq_ctrl->mr->lkey),
6898b25d1adSChristian Ehrhardt		};
6908b25d1adSChristian Ehrhardt		(*rxq_ctrl->rxq.elts)[i] = buf;
69197f17497SC.J. Collier	}
6928b25d1adSChristian Ehrhardt	DEBUG("%p: allocated and configured %u segments (max %u packets)",
6938b25d1adSChristian Ehrhardt	      (void *)rxq_ctrl, elts_n, elts_n / (1 << rxq_ctrl->rxq.sges_n));
69497f17497SC.J. Collier	assert(ret == 0);
69597f17497SC.J. Collier	return 0;
69697f17497SC.J. Colliererror:
6978b25d1adSChristian Ehrhardt	assert(pool == NULL);
6988b25d1adSChristian Ehrhardt	elts_n = i;
6998b25d1adSChristian Ehrhardt	for (i = 0; (i != elts_n); ++i) {
7008b25d1adSChristian Ehrhardt		if ((*rxq_ctrl->rxq.elts)[i] != NULL)
7018b25d1adSChristian Ehrhardt			rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]);
7028b25d1adSChristian Ehrhardt		(*rxq_ctrl->rxq.elts)[i] = NULL;
70397f17497SC.J. Collier	}
7048b25d1adSChristian Ehrhardt	DEBUG("%p: failed, freed everything", (void *)rxq_ctrl);
70597f17497SC.J. Collier	assert(ret > 0);
70697f17497SC.J. Collier	return ret;
70797f17497SC.J. Collier}
70897f17497SC.J. Collier
70997f17497SC.J. Collier/**
71097f17497SC.J. Collier * Free RX queue elements.
71197f17497SC.J. Collier *
7128b25d1adSChristian Ehrhardt * @param rxq_ctrl
71397f17497SC.J. Collier *   Pointer to RX queue structure.
71497f17497SC.J. Collier */
71597f17497SC.J. Collierstatic void
7168b25d1adSChristian Ehrhardtrxq_free_elts(struct rxq_ctrl *rxq_ctrl)
71797f17497SC.J. Collier{
71897f17497SC.J. Collier	unsigned int i;
71997f17497SC.J. Collier
7208b25d1adSChristian Ehrhardt	DEBUG("%p: freeing WRs", (void *)rxq_ctrl);
7218b25d1adSChristian Ehrhardt	if (rxq_ctrl->rxq.elts == NULL)
72297f17497SC.J. Collier		return;
72397f17497SC.J. Collier
7246b3e017eSChristian Ehrhardt	for (i = 0; (i != (1u << rxq_ctrl->rxq.elts_n)); ++i) {
7258b25d1adSChristian Ehrhardt		if ((*rxq_ctrl->rxq.elts)[i] != NULL)
7268b25d1adSChristian Ehrhardt			rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]);
7278b25d1adSChristian Ehrhardt		(*rxq_ctrl->rxq.elts)[i] = NULL;
72897f17497SC.J. Collier	}
72997f17497SC.J. Collier}
73097f17497SC.J. Collier
73197f17497SC.J. Collier/**
73297f17497SC.J. Collier * Clean up a RX queue.
73397f17497SC.J. Collier *
73497f17497SC.J. Collier * Destroy objects, free allocated memory and reset the structure for reuse.
73597f17497SC.J. Collier *
7368b25d1adSChristian Ehrhardt * @param rxq_ctrl
73797f17497SC.J. Collier *   Pointer to RX queue structure.
73897f17497SC.J. Collier */
73997f17497SC.J. Colliervoid
7408b25d1adSChristian Ehrhardtrxq_cleanup(struct rxq_ctrl *rxq_ctrl)
74197f17497SC.J. Collier{
74297f17497SC.J. Collier	struct ibv_exp_release_intf_params params;
74397f17497SC.J. Collier
7448b25d1adSChristian Ehrhardt	DEBUG("cleaning up %p", (void *)rxq_ctrl);
7458b25d1adSChristian Ehrhardt	rxq_free_elts(rxq_ctrl);
74632e04ea0SChristian Ehrhardt	if (rxq_ctrl->fdir_queue != NULL)
74732e04ea0SChristian Ehrhardt		priv_fdir_queue_destroy(rxq_ctrl->priv, rxq_ctrl->fdir_queue);
7488b25d1adSChristian Ehrhardt	if (rxq_ctrl->if_wq != NULL) {
7498b25d1adSChristian Ehrhardt		assert(rxq_ctrl->priv != NULL);
7508b25d1adSChristian Ehrhardt		assert(rxq_ctrl->priv->ctx != NULL);
7518b25d1adSChristian Ehrhardt		assert(rxq_ctrl->wq != NULL);
75297f17497SC.J. Collier		params = (struct ibv_exp_release_intf_params){
75397f17497SC.J. Collier			.comp_mask = 0,
75497f17497SC.J. Collier		};
7558b25d1adSChristian Ehrhardt		claim_zero(ibv_exp_release_intf(rxq_ctrl->priv->ctx,
7568b25d1adSChristian Ehrhardt						rxq_ctrl->if_wq,
75797f17497SC.J. Collier						&params));
75897f17497SC.J. Collier	}
7598b25d1adSChristian Ehrhardt	if (rxq_ctrl->if_cq != NULL) {
7608b25d1adSChristian Ehrhardt		assert(rxq_ctrl->priv != NULL);
7618b25d1adSChristian Ehrhardt		assert(rxq_ctrl->priv->ctx != NULL);
7628b25d1adSChristian Ehrhardt		assert(rxq_ctrl->cq != NULL);
76397f17497SC.J. Collier		params = (struct ibv_exp_release_intf_params){
76497f17497SC.J. Collier			.comp_mask = 0,
76597f17497SC.J. Collier		};
7668b25d1adSChristian Ehrhardt		claim_zero(ibv_exp_release_intf(rxq_ctrl->priv->ctx,
7678b25d1adSChristian Ehrhardt						rxq_ctrl->if_cq,
76897f17497SC.J. Collier						&params));
76997f17497SC.J. Collier	}
7708b25d1adSChristian Ehrhardt	if (rxq_ctrl->wq != NULL)
7718b25d1adSChristian Ehrhardt		claim_zero(ibv_exp_destroy_wq(rxq_ctrl->wq));
7728b25d1adSChristian Ehrhardt	if (rxq_ctrl->cq != NULL)
7738b25d1adSChristian Ehrhardt		claim_zero(ibv_destroy_cq(rxq_ctrl->cq));
7748b25d1adSChristian Ehrhardt	if (rxq_ctrl->rd != NULL) {
77597f17497SC.J. Collier		struct ibv_exp_destroy_res_domain_attr attr = {
77697f17497SC.J. Collier			.comp_mask = 0,
77797f17497SC.J. Collier		};
77897f17497SC.J. Collier
7798b25d1adSChristian Ehrhardt		assert(rxq_ctrl->priv != NULL);
7808b25d1adSChristian Ehrhardt		assert(rxq_ctrl->priv->ctx != NULL);
7818b25d1adSChristian Ehrhardt		claim_zero(ibv_exp_destroy_res_domain(rxq_ctrl->priv->ctx,
7828b25d1adSChristian Ehrhardt						      rxq_ctrl->rd,
78397f17497SC.J. Collier						      &attr));
78497f17497SC.J. Collier	}
7858b25d1adSChristian Ehrhardt	if (rxq_ctrl->mr != NULL)
7868b25d1adSChristian Ehrhardt		claim_zero(ibv_dereg_mr(rxq_ctrl->mr));
7878b25d1adSChristian Ehrhardt	memset(rxq_ctrl, 0, sizeof(*rxq_ctrl));
78897f17497SC.J. Collier}
78997f17497SC.J. Collier
79097f17497SC.J. Collier/**
7918b25d1adSChristian Ehrhardt * Reconfigure RX queue buffers.
79297f17497SC.J. Collier *
79397f17497SC.J. Collier * rxq_rehash() does not allocate mbufs, which, if not done from the right
79497f17497SC.J. Collier * thread (such as a control thread), may corrupt the pool.
79597f17497SC.J. Collier * In case of failure, the queue is left untouched.
79697f17497SC.J. Collier *
79797f17497SC.J. Collier * @param dev
79897f17497SC.J. Collier *   Pointer to Ethernet device structure.
7998b25d1adSChristian Ehrhardt * @param rxq_ctrl
80097f17497SC.J. Collier *   RX queue pointer.
80197f17497SC.J. Collier *
80297f17497SC.J. Collier * @return
80397f17497SC.J. Collier *   0 on success, errno value on failure.
80497f17497SC.J. Collier */
80597f17497SC.J. Collierint
8068b25d1adSChristian Ehrhardtrxq_rehash(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl)
80797f17497SC.J. Collier{
8086b3e017eSChristian Ehrhardt	unsigned int elts_n = 1 << rxq_ctrl->rxq.elts_n;
8098b25d1adSChristian Ehrhardt	unsigned int i;
81097f17497SC.J. Collier	struct ibv_exp_wq_attr mod;
81197f17497SC.J. Collier	int err;
81297f17497SC.J. Collier
8138b25d1adSChristian Ehrhardt	DEBUG("%p: rehashing queue %p with %u SGE(s) per packet",
8148b25d1adSChristian Ehrhardt	      (void *)dev, (void *)rxq_ctrl, 1 << rxq_ctrl->rxq.sges_n);
8158b25d1adSChristian Ehrhardt	assert(!(elts_n % (1 << rxq_ctrl->rxq.sges_n)));
81697f17497SC.J. Collier	/* From now on, any failure will render the queue unusable.
81797f17497SC.J. Collier	 * Reinitialize WQ. */
81897f17497SC.J. Collier	mod = (struct ibv_exp_wq_attr){
81997f17497SC.J. Collier		.attr_mask = IBV_EXP_WQ_ATTR_STATE,
82097f17497SC.J. Collier		.wq_state = IBV_EXP_WQS_RESET,
82197f17497SC.J. Collier	};
8228b25d1adSChristian Ehrhardt	err = ibv_exp_modify_wq(rxq_ctrl->wq, &mod);
82397f17497SC.J. Collier	if (err) {
82497f17497SC.J. Collier		ERROR("%p: cannot reset WQ: %s", (void *)dev, strerror(err));
82597f17497SC.J. Collier		assert(err > 0);
82697f17497SC.J. Collier		return err;
82797f17497SC.J. Collier	}
82897f17497SC.J. Collier	/* Snatch mbufs from original queue. */
8298b25d1adSChristian Ehrhardt	claim_zero(rxq_alloc_elts(rxq_ctrl, elts_n, rxq_ctrl->rxq.elts));
8308b25d1adSChristian Ehrhardt	for (i = 0; i != elts_n; ++i) {
8318b25d1adSChristian Ehrhardt		struct rte_mbuf *buf = (*rxq_ctrl->rxq.elts)[i];
83297f17497SC.J. Collier
8338b25d1adSChristian Ehrhardt		assert(rte_mbuf_refcnt_read(buf) == 2);
8348b25d1adSChristian Ehrhardt		rte_pktmbuf_free_seg(buf);
83597f17497SC.J. Collier	}
83697f17497SC.J. Collier	/* Change queue state to ready. */
83797f17497SC.J. Collier	mod = (struct ibv_exp_wq_attr){
83897f17497SC.J. Collier		.attr_mask = IBV_EXP_WQ_ATTR_STATE,
83997f17497SC.J. Collier		.wq_state = IBV_EXP_WQS_RDY,
84097f17497SC.J. Collier	};
8418b25d1adSChristian Ehrhardt	err = ibv_exp_modify_wq(rxq_ctrl->wq, &mod);
84297f17497SC.J. Collier	if (err) {
84397f17497SC.J. Collier		ERROR("%p: WQ state to IBV_EXP_WQS_RDY failed: %s",
84497f17497SC.J. Collier		      (void *)dev, strerror(err));
84597f17497SC.J. Collier		goto error;
84697f17497SC.J. Collier	}
8478b25d1adSChristian Ehrhardt	/* Update doorbell counter. */
8488b25d1adSChristian Ehrhardt	rxq_ctrl->rxq.rq_ci = elts_n >> rxq_ctrl->rxq.sges_n;
8498b25d1adSChristian Ehrhardt	rte_wmb();
8508b25d1adSChristian Ehrhardt	*rxq_ctrl->rxq.rq_db = htonl(rxq_ctrl->rxq.rq_ci);
85197f17497SC.J. Colliererror:
85297f17497SC.J. Collier	assert(err >= 0);
85397f17497SC.J. Collier	return err;
85497f17497SC.J. Collier}
85597f17497SC.J. Collier
8568b25d1adSChristian Ehrhardt/**
8578b25d1adSChristian Ehrhardt * Initialize RX queue.
8588b25d1adSChristian Ehrhardt *
8598b25d1adSChristian Ehrhardt * @param tmpl
8608b25d1adSChristian Ehrhardt *   Pointer to RX queue control template.
8618b25d1adSChristian Ehrhardt *
8628b25d1adSChristian Ehrhardt * @return
8638b25d1adSChristian Ehrhardt *   0 on success, errno value on failure.
8648b25d1adSChristian Ehrhardt */
8658b25d1adSChristian Ehrhardtstatic inline int
8668b25d1adSChristian Ehrhardtrxq_setup(struct rxq_ctrl *tmpl)
8678b25d1adSChristian Ehrhardt{
8688b25d1adSChristian Ehrhardt	struct ibv_cq *ibcq = tmpl->cq;
869fdd2322bSLuca Boccassi	struct ibv_mlx5_cq_info cq_info;
8708b25d1adSChristian Ehrhardt	struct mlx5_rwq *rwq = container_of(tmpl->wq, struct mlx5_rwq, wq);
8716b3e017eSChristian Ehrhardt	struct rte_mbuf *(*elts)[1 << tmpl->rxq.elts_n] =
8728b25d1adSChristian Ehrhardt		rte_calloc_socket("RXQ", 1, sizeof(*elts), 0, tmpl->socket);
8738b25d1adSChristian Ehrhardt
874fdd2322bSLuca Boccassi	if (ibv_mlx5_exp_get_cq_info(ibcq, &cq_info)) {
875fdd2322bSLuca Boccassi		ERROR("Unable to query CQ info. check your OFED.");
876fdd2322bSLuca Boccassi		return ENOTSUP;
877fdd2322bSLuca Boccassi	}
878fdd2322bSLuca Boccassi	if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
8798b25d1adSChristian Ehrhardt		ERROR("Wrong MLX5_CQE_SIZE environment variable value: "
8808b25d1adSChristian Ehrhardt		      "it should be set to %u", RTE_CACHE_LINE_SIZE);
8818b25d1adSChristian Ehrhardt		return EINVAL;
8828b25d1adSChristian Ehrhardt	}
8838b25d1adSChristian Ehrhardt	if (elts == NULL)
8848b25d1adSChristian Ehrhardt		return ENOMEM;
8858b25d1adSChristian Ehrhardt	tmpl->rxq.rq_db = rwq->rq.db;
886fdd2322bSLuca Boccassi	tmpl->rxq.cqe_n = log2above(cq_info.cqe_cnt);
8878b25d1adSChristian Ehrhardt	tmpl->rxq.cq_ci = 0;
8888b25d1adSChristian Ehrhardt	tmpl->rxq.rq_ci = 0;
889fdd2322bSLuca Boccassi	tmpl->rxq.cq_db = cq_info.dbrec;
8908b25d1adSChristian Ehrhardt	tmpl->rxq.wqes =
8918b25d1adSChristian Ehrhardt		(volatile struct mlx5_wqe_data_seg (*)[])
8928b25d1adSChristian Ehrhardt		(uintptr_t)rwq->rq.buff;
8938b25d1adSChristian Ehrhardt	tmpl->rxq.cqes =
8948b25d1adSChristian Ehrhardt		(volatile struct mlx5_cqe (*)[])
895fdd2322bSLuca Boccassi		(uintptr_t)cq_info.buf;
8968b25d1adSChristian Ehrhardt	tmpl->rxq.elts = elts;
8978b25d1adSChristian Ehrhardt	return 0;
8988b25d1adSChristian Ehrhardt}
8998b25d1adSChristian Ehrhardt
90097f17497SC.J. Collier/**
90197f17497SC.J. Collier * Configure a RX queue.
90297f17497SC.J. Collier *
90397f17497SC.J. Collier * @param dev
90497f17497SC.J. Collier *   Pointer to Ethernet device structure.
9058b25d1adSChristian Ehrhardt * @param rxq_ctrl
90697f17497SC.J. Collier *   Pointer to RX queue structure.
90797f17497SC.J. Collier * @param desc
90897f17497SC.J. Collier *   Number of descriptors to configure in queue.
90997f17497SC.J. Collier * @param socket
91097f17497SC.J. Collier *   NUMA socket on which memory must be allocated.
91197f17497SC.J. Collier * @param[in] conf
91297f17497SC.J. Collier *   Thresholds parameters.
91397f17497SC.J. Collier * @param mp
91497f17497SC.J. Collier *   Memory pool for buffer allocations.
91597f17497SC.J. Collier *
91697f17497SC.J. Collier * @return
91797f17497SC.J. Collier *   0 on success, errno value on failure.
91897f17497SC.J. Collier */
91997f17497SC.J. Collierint
9208b25d1adSChristian Ehrhardtrxq_ctrl_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl,
9218b25d1adSChristian Ehrhardt	       uint16_t desc, unsigned int socket,
9228b25d1adSChristian Ehrhardt	       const struct rte_eth_rxconf *conf, struct rte_mempool *mp)
92397f17497SC.J. Collier{
92497f17497SC.J. Collier	struct priv *priv = dev->data->dev_private;
9258b25d1adSChristian Ehrhardt	struct rxq_ctrl tmpl = {
92697f17497SC.J. Collier		.priv = priv,
9278b25d1adSChristian Ehrhardt		.socket = socket,
9288b25d1adSChristian Ehrhardt		.rxq = {
9296b3e017eSChristian Ehrhardt			.elts_n = log2above(desc),
9308b25d1adSChristian Ehrhardt			.mp = mp,
9316b3e017eSChristian Ehrhardt			.rss_hash = priv->rxqs_n > 1,
9328b25d1adSChristian Ehrhardt		},
93397f17497SC.J. Collier	};
93497f17497SC.J. Collier	struct ibv_exp_wq_attr mod;
93597f17497SC.J. Collier	union {
93697f17497SC.J. Collier		struct ibv_exp_query_intf_params params;
93797f17497SC.J. Collier		struct ibv_exp_cq_init_attr cq;
93897f17497SC.J. Collier		struct ibv_exp_res_domain_init_attr rd;
93997f17497SC.J. Collier		struct ibv_exp_wq_init_attr wq;
9408b25d1adSChristian Ehrhardt		struct ibv_exp_cq_attr cq_attr;
94197f17497SC.J. Collier	} attr;
94297f17497SC.J. Collier	enum ibv_exp_query_intf_status status;
9438b25d1adSChristian Ehrhardt	unsigned int mb_len = rte_pktmbuf_data_room_size(mp);
9448b25d1adSChristian Ehrhardt	unsigned int cqe_n = desc - 1;
9458b25d1adSChristian Ehrhardt	struct rte_mbuf *(*elts)[desc] = NULL;
94697f17497SC.J. Collier	int ret = 0;
94797f17497SC.J. Collier
94897f17497SC.J. Collier	(void)conf; /* Thresholds configuration (ignored). */
9498b25d1adSChristian Ehrhardt	/* Enable scattered packets support for this queue if necessary. */
9508b25d1adSChristian Ehrhardt	assert(mb_len >= RTE_PKTMBUF_HEADROOM);
95132e04ea0SChristian Ehrhardt	/* If smaller than MRU, multi-segment support must be enabled. */
95232e04ea0SChristian Ehrhardt	if (mb_len < (priv->mtu > dev->data->dev_conf.rxmode.max_rx_pkt_len ?
95332e04ea0SChristian Ehrhardt		     dev->data->dev_conf.rxmode.max_rx_pkt_len :
95432e04ea0SChristian Ehrhardt		     priv->mtu))
95532e04ea0SChristian Ehrhardt		dev->data->dev_conf.rxmode.jumbo_frame = 1;
9568b25d1adSChristian Ehrhardt	if ((dev->data->dev_conf.rxmode.jumbo_frame) &&
9578b25d1adSChristian Ehrhardt	    (dev->data->dev_conf.rxmode.max_rx_pkt_len >
9588b25d1adSChristian Ehrhardt	     (mb_len - RTE_PKTMBUF_HEADROOM))) {
9598b25d1adSChristian Ehrhardt		unsigned int size =
9608b25d1adSChristian Ehrhardt			RTE_PKTMBUF_HEADROOM +
9618b25d1adSChristian Ehrhardt			dev->data->dev_conf.rxmode.max_rx_pkt_len;
9628b25d1adSChristian Ehrhardt		unsigned int sges_n;
9638b25d1adSChristian Ehrhardt
9648b25d1adSChristian Ehrhardt		/*
9658b25d1adSChristian Ehrhardt		 * Determine the number of SGEs needed for a full packet
9668b25d1adSChristian Ehrhardt		 * and round it to the next power of two.
9678b25d1adSChristian Ehrhardt		 */
9688b25d1adSChristian Ehrhardt		sges_n = log2above((size / mb_len) + !!(size % mb_len));
9698b25d1adSChristian Ehrhardt		tmpl.rxq.sges_n = sges_n;
9708b25d1adSChristian Ehrhardt		/* Make sure rxq.sges_n did not overflow. */
9718b25d1adSChristian Ehrhardt		size = mb_len * (1 << tmpl.rxq.sges_n);
9728b25d1adSChristian Ehrhardt		size -= RTE_PKTMBUF_HEADROOM;
9738b25d1adSChristian Ehrhardt		if (size < dev->data->dev_conf.rxmode.max_rx_pkt_len) {
9748b25d1adSChristian Ehrhardt			ERROR("%p: too many SGEs (%u) needed to handle"
9758b25d1adSChristian Ehrhardt			      " requested maximum packet size %u",
9768b25d1adSChristian Ehrhardt			      (void *)dev,
9778b25d1adSChristian Ehrhardt			      1 << sges_n,
9788b25d1adSChristian Ehrhardt			      dev->data->dev_conf.rxmode.max_rx_pkt_len);
9798b25d1adSChristian Ehrhardt			return EOVERFLOW;
9808b25d1adSChristian Ehrhardt		}
98197f17497SC.J. Collier	}
9828b25d1adSChristian Ehrhardt	DEBUG("%p: maximum number of segments per packet: %u",
9838b25d1adSChristian Ehrhardt	      (void *)dev, 1 << tmpl.rxq.sges_n);
9848b25d1adSChristian Ehrhardt	if (desc % (1 << tmpl.rxq.sges_n)) {
9858b25d1adSChristian Ehrhardt		ERROR("%p: number of RX queue descriptors (%u) is not a"
9868b25d1adSChristian Ehrhardt		      " multiple of SGEs per packet (%u)",
9878b25d1adSChristian Ehrhardt		      (void *)dev,
9888b25d1adSChristian Ehrhardt		      desc,
9898b25d1adSChristian Ehrhardt		      1 << tmpl.rxq.sges_n);
9908b25d1adSChristian Ehrhardt		return EINVAL;
99197f17497SC.J. Collier	}
99297f17497SC.J. Collier	/* Toggle RX checksum offload if hardware supports it. */
99397f17497SC.J. Collier	if (priv->hw_csum)
9948b25d1adSChristian Ehrhardt		tmpl.rxq.csum = !!dev->data->dev_conf.rxmode.hw_ip_checksum;
99597f17497SC.J. Collier	if (priv->hw_csum_l2tun)
9968b25d1adSChristian Ehrhardt		tmpl.rxq.csum_l2tun =
9978b25d1adSChristian Ehrhardt			!!dev->data->dev_conf.rxmode.hw_ip_checksum;
99897f17497SC.J. Collier	/* Use the entire RX mempool as the memory region. */
99997f17497SC.J. Collier	tmpl.mr = mlx5_mp2mr(priv->pd, mp);
100097f17497SC.J. Collier	if (tmpl.mr == NULL) {
100197f17497SC.J. Collier		ret = EINVAL;
100297f17497SC.J. Collier		ERROR("%p: MR creation failure: %s",
100397f17497SC.J. Collier		      (void *)dev, strerror(ret));
100497f17497SC.J. Collier		goto error;
100597f17497SC.J. Collier	}
100697f17497SC.J. Collier	attr.rd = (struct ibv_exp_res_domain_init_attr){
100797f17497SC.J. Collier		.comp_mask = (IBV_EXP_RES_DOMAIN_THREAD_MODEL |
100897f17497SC.J. Collier			      IBV_EXP_RES_DOMAIN_MSG_MODEL),
100997f17497SC.J. Collier		.thread_model = IBV_EXP_THREAD_SINGLE,
101097f17497SC.J. Collier		.msg_model = IBV_EXP_MSG_HIGH_BW,
101197f17497SC.J. Collier	};
101297f17497SC.J. Collier	tmpl.rd = ibv_exp_create_res_domain(priv->ctx, &attr.rd);
101397f17497SC.J. Collier	if (tmpl.rd == NULL) {
101497f17497SC.J. Collier		ret = ENOMEM;
101597f17497SC.J. Collier		ERROR("%p: RD creation failure: %s",
101697f17497SC.J. Collier		      (void *)dev, strerror(ret));
101797f17497SC.J. Collier		goto error;
101897f17497SC.J. Collier	}
101997f17497SC.J. Collier	attr.cq = (struct ibv_exp_cq_init_attr){
102097f17497SC.J. Collier		.comp_mask = IBV_EXP_CQ_INIT_ATTR_RES_DOMAIN,
102197f17497SC.J. Collier		.res_domain = tmpl.rd,
102297f17497SC.J. Collier	};
10238b25d1adSChristian Ehrhardt	if (priv->cqe_comp) {
10248b25d1adSChristian Ehrhardt		attr.cq.comp_mask |= IBV_EXP_CQ_INIT_ATTR_FLAGS;
10258b25d1adSChristian Ehrhardt		attr.cq.flags |= IBV_EXP_CQ_COMPRESSED_CQE;
10268b25d1adSChristian Ehrhardt		cqe_n = (desc * 2) - 1; /* Double the number of CQEs. */
10278b25d1adSChristian Ehrhardt	}
10288b25d1adSChristian Ehrhardt	tmpl.cq = ibv_exp_create_cq(priv->ctx, cqe_n, NULL, NULL, 0,
102997f17497SC.J. Collier				    &attr.cq);
103097f17497SC.J. Collier	if (tmpl.cq == NULL) {
103197f17497SC.J. Collier		ret = ENOMEM;
103297f17497SC.J. Collier		ERROR("%p: CQ creation failure: %s",
103397f17497SC.J. Collier		      (void *)dev, strerror(ret));
103497f17497SC.J. Collier		goto error;
103597f17497SC.J. Collier	}
103697f17497SC.J. Collier	DEBUG("priv->device_attr.max_qp_wr is %d",
103797f17497SC.J. Collier	      priv->device_attr.max_qp_wr);
103897f17497SC.J. Collier	DEBUG("priv->device_attr.max_sge is %d",
103997f17497SC.J. Collier	      priv->device_attr.max_sge);
104097f17497SC.J. Collier	/* Configure VLAN stripping. */
10418b25d1adSChristian Ehrhardt	tmpl.rxq.vlan_strip = (priv->hw_vlan_strip &&
10428b25d1adSChristian Ehrhardt			       !!dev->data->dev_conf.rxmode.hw_vlan_strip);
104397f17497SC.J. Collier	attr.wq = (struct ibv_exp_wq_init_attr){
104497f17497SC.J. Collier		.wq_context = NULL, /* Could be useful in the future. */