bnxt_rxq.c revision 43192222
1/*-
2 *   BSD LICENSE
3 *
4 *   Copyright(c) Broadcom Limited.
5 *   All rights reserved.
6 *
7 *   Redistribution and use in source and binary forms, with or without
8 *   modification, are permitted provided that the following conditions
9 *   are met:
10 *
11 *     * Redistributions of source code must retain the above copyright
12 *       notice, this list of conditions and the following disclaimer.
13 *     * Redistributions in binary form must reproduce the above copyright
14 *       notice, this list of conditions and the following disclaimer in
15 *       the documentation and/or other materials provided with the
16 *       distribution.
17 *     * Neither the name of Broadcom Corporation nor the names of its
18 *       contributors may be used to endorse or promote products derived
19 *       from this software without specific prior written permission.
20 *
21 *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <inttypes.h>
35
36#include <rte_malloc.h>
37
38#include "bnxt.h"
39#include "bnxt_cpr.h"
40#include "bnxt_filter.h"
41#include "bnxt_hwrm.h"
42#include "bnxt_ring.h"
43#include "bnxt_rxq.h"
44#include "bnxt_rxr.h"
45#include "bnxt_vnic.h"
46#include "hsi_struct_def_dpdk.h"
47
48/*
49 * RX Queues
50 */
51
52void bnxt_free_rxq_stats(struct bnxt_rx_queue *rxq)
53{
54	struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
55
56	if (cpr->hw_stats)
57		cpr->hw_stats = NULL;
58}
59
60int bnxt_mq_rx_configure(struct bnxt *bp)
61{
62	struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
63	unsigned int i, j, nb_q_per_grp, ring_idx;
64	int start_grp_id, end_grp_id, rc = 0;
65	struct bnxt_vnic_info *vnic;
66	struct bnxt_filter_info *filter;
67	struct bnxt_rx_queue *rxq;
68
69	bp->nr_vnics = 0;
70
71	/* Single queue mode */
72	if (bp->rx_cp_nr_rings < 2) {
73		vnic = bnxt_alloc_vnic(bp);
74		if (!vnic) {
75			RTE_LOG(ERR, PMD, "VNIC alloc failed\n");
76			rc = -ENOMEM;
77			goto err_out;
78		}
79		STAILQ_INSERT_TAIL(&bp->ff_pool[0], vnic, next);
80		bp->nr_vnics++;
81
82		rxq = bp->eth_dev->data->rx_queues[0];
83		rxq->vnic = vnic;
84
85		vnic->func_default = true;
86		vnic->ff_pool_idx = 0;
87		vnic->start_grp_id = 1;
88		vnic->end_grp_id = vnic->start_grp_id +
89				   bp->rx_cp_nr_rings - 1;
90		filter = bnxt_alloc_filter(bp);
91		if (!filter) {
92			RTE_LOG(ERR, PMD, "L2 filter alloc failed\n");
93			rc = -ENOMEM;
94			goto err_out;
95		}
96		STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
97		goto out;
98	}
99
100	/* Multi-queue mode */
101	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) {
102		/* VMDq ONLY, VMDq+RSS, VMDq+DCB, VMDq+DCB+RSS */
103		enum rte_eth_nb_pools pools;
104
105		switch (dev_conf->rxmode.mq_mode) {
106		case ETH_MQ_RX_VMDQ_RSS:
107		case ETH_MQ_RX_VMDQ_ONLY:
108			{
109				const struct rte_eth_vmdq_rx_conf *conf =
110				    &dev_conf->rx_adv_conf.vmdq_rx_conf;
111
112				/* ETH_8/64_POOLs */
113				pools = conf->nb_queue_pools;
114				break;
115			}
116		default:
117			RTE_LOG(ERR, PMD, "Unsupported mq_mod %d\n",
118				dev_conf->rxmode.mq_mode);
119			rc = -EINVAL;
120			goto err_out;
121		}
122		/* For each pool, allocate MACVLAN CFA rule & VNIC */
123		if (!pools) {
124			RTE_LOG(ERR, PMD,
125				"VMDq pool not set, defaulted to 64\n");
126			pools = ETH_64_POOLS;
127		}
128		nb_q_per_grp = bp->rx_cp_nr_rings / pools;
129		start_grp_id = 1;
130		end_grp_id = start_grp_id + nb_q_per_grp - 1;
131
132		ring_idx = 0;
133		for (i = 0; i < pools; i++) {
134			vnic = bnxt_alloc_vnic(bp);
135			if (!vnic) {
136				RTE_LOG(ERR, PMD,
137					"VNIC alloc failed\n");
138				rc = -ENOMEM;
139				goto err_out;
140			}
141			STAILQ_INSERT_TAIL(&bp->ff_pool[i], vnic, next);
142			bp->nr_vnics++;
143
144			for (j = 0; j < nb_q_per_grp; j++, ring_idx++) {
145				rxq = bp->eth_dev->data->rx_queues[ring_idx];
146				rxq->vnic = vnic;
147			}
148			if (i == 0)
149				vnic->func_default = true;
150			vnic->ff_pool_idx = i;
151			vnic->start_grp_id = start_grp_id;
152			vnic->end_grp_id = end_grp_id;
153
154			filter = bnxt_alloc_filter(bp);
155			if (!filter) {
156				RTE_LOG(ERR, PMD,
157					"L2 filter alloc failed\n");
158				rc = -ENOMEM;
159				goto err_out;
160			}
161			/*
162			 * TODO: Configure & associate CFA rule for
163			 * each VNIC for each VMDq with MACVLAN, MACVLAN+TC
164			 */
165			STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
166
167			start_grp_id = end_grp_id + 1;
168			end_grp_id += nb_q_per_grp;
169		}
170		goto out;
171	}
172
173	/* Non-VMDq mode - RSS, DCB, RSS+DCB */
174	/* Init default VNIC for RSS or DCB only */
175	vnic = bnxt_alloc_vnic(bp);
176	if (!vnic) {
177		RTE_LOG(ERR, PMD, "VNIC alloc failed\n");
178		rc = -ENOMEM;
179		goto err_out;
180	}
181	/* Partition the rx queues for the single pool */
182	for (i = 0; i < bp->rx_cp_nr_rings; i++) {
183		rxq = bp->eth_dev->data->rx_queues[i];
184		rxq->vnic = vnic;
185	}
186	STAILQ_INSERT_TAIL(&bp->ff_pool[0], vnic, next);
187	bp->nr_vnics++;
188
189	vnic->func_default = true;
190	vnic->ff_pool_idx = 0;
191	vnic->start_grp_id = 1;
192	vnic->end_grp_id = vnic->start_grp_id +
193			   bp->rx_cp_nr_rings - 1;
194	filter = bnxt_alloc_filter(bp);
195	if (!filter) {
196		RTE_LOG(ERR, PMD, "L2 filter alloc failed\n");
197		rc = -ENOMEM;
198		goto err_out;
199	}
200	STAILQ_INSERT_TAIL(&vnic->filter, filter, next);
201
202	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
203		vnic->hash_type =
204			HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4 |
205			HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6;
206
207out:
208	return rc;
209
210err_out:
211	/* Free allocated vnic/filters */
212
213	return rc;
214}
215
216static void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq __rte_unused)
217{
218	struct bnxt_sw_rx_bd *sw_ring;
219	uint16_t i;
220
221	if (rxq) {
222		sw_ring = rxq->rx_ring->rx_buf_ring;
223		if (sw_ring) {
224			for (i = 0;
225			     i < rxq->rx_ring->rx_ring_struct->ring_size; i++) {
226				if (sw_ring[i].mbuf) {
227					rte_pktmbuf_free_seg(sw_ring[i].mbuf);
228					sw_ring[i].mbuf = NULL;
229				}
230			}
231		}
232	}
233}
234
235void bnxt_free_rx_mbufs(struct bnxt *bp)
236{
237	struct bnxt_rx_queue *rxq;
238	int i;
239
240	for (i = 0; i < (int)bp->rx_nr_rings; i++) {
241		rxq = bp->rx_queues[i];
242		bnxt_rx_queue_release_mbufs(rxq);
243	}
244}
245
246void bnxt_rx_queue_release_op(void *rx_queue)
247{
248	struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue;
249
250	if (rxq) {
251		bnxt_rx_queue_release_mbufs(rxq);
252
253		/* Free RX ring hardware descriptors */
254		bnxt_free_ring(rxq->rx_ring->rx_ring_struct);
255
256		/* Free RX completion ring hardware descriptors */
257		bnxt_free_ring(rxq->cp_ring->cp_ring_struct);
258
259		bnxt_free_rxq_stats(rxq);
260		rte_memzone_free(rxq->mz);
261		rxq->mz = NULL;
262
263		rte_free(rxq);
264	}
265}
266
267int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
268			       uint16_t queue_idx,
269			       uint16_t nb_desc,
270			       unsigned int socket_id,
271			       const struct rte_eth_rxconf *rx_conf,
272			       struct rte_mempool *mp)
273{
274	struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
275	struct bnxt_rx_queue *rxq;
276	int rc = 0;
277
278	if (!nb_desc || nb_desc > MAX_RX_DESC_CNT) {
279		RTE_LOG(ERR, PMD, "nb_desc %d is invalid", nb_desc);
280		rc = -EINVAL;
281		goto out;
282	}
283
284	if (eth_dev->data->rx_queues) {
285		rxq = eth_dev->data->rx_queues[queue_idx];
286		if (rxq)
287			bnxt_rx_queue_release_op(rxq);
288	}
289	rxq = rte_zmalloc_socket("bnxt_rx_queue", sizeof(struct bnxt_rx_queue),
290				 RTE_CACHE_LINE_SIZE, socket_id);
291	if (!rxq) {
292		RTE_LOG(ERR, PMD, "bnxt_rx_queue allocation failed!");
293		rc = -ENOMEM;
294		goto out;
295	}
296	rxq->bp = bp;
297	rxq->mb_pool = mp;
298	rxq->nb_rx_desc = nb_desc;
299	rxq->rx_free_thresh = rx_conf->rx_free_thresh;
300
301	rc = bnxt_init_rx_ring_struct(rxq, socket_id);
302	if (rc)
303		goto out;
304
305	rxq->queue_id = queue_idx;
306	rxq->port_id = eth_dev->data->port_id;
307	rxq->crc_len = (uint8_t)((eth_dev->data->dev_conf.rxmode.hw_strip_crc) ?
308				0 : ETHER_CRC_LEN);
309
310	eth_dev->data->rx_queues[queue_idx] = rxq;
311	/* Allocate RX ring hardware descriptors */
312	if (bnxt_alloc_rings(bp, queue_idx, NULL, rxq, rxq->cp_ring,
313			"rxr")) {
314		RTE_LOG(ERR, PMD, "ring_dma_zone_reserve for rx_ring failed!");
315		bnxt_rx_queue_release_op(rxq);
316		rc = -ENOMEM;
317		goto out;
318	}
319
320out:
321	return rc;
322}
323