197f17497SC.J. Collier/*-
297f17497SC.J. Collier *   BSD LICENSE
397f17497SC.J. Collier *
497f17497SC.J. Collier *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
597f17497SC.J. Collier *   All rights reserved.
697f17497SC.J. Collier *
797f17497SC.J. Collier *   Redistribution and use in source and binary forms, with or without
897f17497SC.J. Collier *   modification, are permitted provided that the following conditions
997f17497SC.J. Collier *   are met:
1097f17497SC.J. Collier *
1197f17497SC.J. Collier *     * Redistributions of source code must retain the above copyright
1297f17497SC.J. Collier *       notice, this list of conditions and the following disclaimer.
1397f17497SC.J. Collier *     * Redistributions in binary form must reproduce the above copyright
1497f17497SC.J. Collier *       notice, this list of conditions and the following disclaimer in
1597f17497SC.J. Collier *       the documentation and/or other materials provided with the
1697f17497SC.J. Collier *       distribution.
1797f17497SC.J. Collier *     * Neither the name of Intel Corporation nor the names of its
1897f17497SC.J. Collier *       contributors may be used to endorse or promote products derived
1997f17497SC.J. Collier *       from this software without specific prior written permission.
2097f17497SC.J. Collier *
2197f17497SC.J. Collier *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
2297f17497SC.J. Collier *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
2397f17497SC.J. Collier *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
2497f17497SC.J. Collier *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
2597f17497SC.J. Collier *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
2697f17497SC.J. Collier *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
2797f17497SC.J. Collier *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
2897f17497SC.J. Collier *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
2997f17497SC.J. Collier *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
3097f17497SC.J. Collier *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
3197f17497SC.J. Collier *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
3297f17497SC.J. Collier */
3397f17497SC.J. Collier
3497f17497SC.J. Collier#include <sys/queue.h>
3597f17497SC.J. Collier
3697f17497SC.J. Collier#include <stdio.h>
3797f17497SC.J. Collier#include <stdlib.h>
3897f17497SC.J. Collier#include <string.h>
3997f17497SC.J. Collier#include <errno.h>
4097f17497SC.J. Collier#include <stdint.h>
4197f17497SC.J. Collier#include <stdarg.h>
4297f17497SC.J. Collier#include <unistd.h>
4397f17497SC.J. Collier#include <inttypes.h>
4497f17497SC.J. Collier
4597f17497SC.J. Collier#include <rte_byteorder.h>
4697f17497SC.J. Collier#include <rte_common.h>
4797f17497SC.J. Collier#include <rte_cycles.h>
4897f17497SC.J. Collier#include <rte_log.h>
4997f17497SC.J. Collier#include <rte_debug.h>
5097f17497SC.J. Collier#include <rte_interrupts.h>
5197f17497SC.J. Collier#include <rte_pci.h>
5297f17497SC.J. Collier#include <rte_memory.h>
5397f17497SC.J. Collier#include <rte_memzone.h>
5497f17497SC.J. Collier#include <rte_launch.h>
5597f17497SC.J. Collier#include <rte_eal.h>
5697f17497SC.J. Collier#include <rte_per_lcore.h>
5797f17497SC.J. Collier#include <rte_lcore.h>
5897f17497SC.J. Collier#include <rte_atomic.h>
5997f17497SC.J. Collier#include <rte_branch_prediction.h>
6097f17497SC.J. Collier#include <rte_mempool.h>
6197f17497SC.J. Collier#include <rte_malloc.h>
6297f17497SC.J. Collier#include <rte_mbuf.h>
6397f17497SC.J. Collier#include <rte_ether.h>
6497f17497SC.J. Collier#include <rte_ethdev.h>
6597f17497SC.J. Collier#include <rte_prefetch.h>
6697f17497SC.J. Collier#include <rte_ip.h>
6797f17497SC.J. Collier#include <rte_udp.h>
6897f17497SC.J. Collier#include <rte_tcp.h>
6997f17497SC.J. Collier#include <rte_sctp.h>
7097f17497SC.J. Collier#include <rte_string_fns.h>
7197f17497SC.J. Collier#include <rte_errno.h>
7297f17497SC.J. Collier
7397f17497SC.J. Collier#include "base/vmxnet3_defs.h"
7497f17497SC.J. Collier#include "vmxnet3_ring.h"
7597f17497SC.J. Collier
7697f17497SC.J. Collier#include "vmxnet3_logs.h"
7797f17497SC.J. Collier#include "vmxnet3_ethdev.h"
7897f17497SC.J. Collier
7997f17497SC.J. Collierstatic const uint32_t rxprod_reg[2] = {VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2};
8097f17497SC.J. Collier
8197f17497SC.J. Collierstatic int vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t*, uint8_t);
8297f17497SC.J. Collierstatic void vmxnet3_tq_tx_complete(vmxnet3_tx_queue_t *);
8397f17497SC.J. Collier#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER_NOT_USED
8497f17497SC.J. Collierstatic void vmxnet3_rxq_dump(struct vmxnet3_rx_queue *);
8597f17497SC.J. Collierstatic void vmxnet3_txq_dump(struct vmxnet3_tx_queue *);
8697f17497SC.J. Collier#endif
8797f17497SC.J. Collier
8897f17497SC.J. Collier#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER_NOT_USED
8997f17497SC.J. Collierstatic void
9097f17497SC.J. Colliervmxnet3_rxq_dump(struct vmxnet3_rx_queue *rxq)
9197f17497SC.J. Collier{
9297f17497SC.J. Collier	uint32_t avail = 0;
9397f17497SC.J. Collier
9497f17497SC.J. Collier	if (rxq == NULL)
9597f17497SC.J. Collier		return;
9697f17497SC.J. Collier
9797f17497SC.J. Collier	PMD_RX_LOG(DEBUG,
988b25d1adSChristian Ehrhardt		   "RXQ: cmd0 base : %p cmd1 base : %p comp ring base : %p.",
9997f17497SC.J. Collier		   rxq->cmd_ring[0].base, rxq->cmd_ring[1].base, rxq->comp_ring.base);
10097f17497SC.J. Collier	PMD_RX_LOG(DEBUG,
10197f17497SC.J. Collier		   "RXQ: cmd0 basePA : 0x%lx cmd1 basePA : 0x%lx comp ring basePA : 0x%lx.",
10297f17497SC.J. Collier		   (unsigned long)rxq->cmd_ring[0].basePA,
10397f17497SC.J. Collier		   (unsigned long)rxq->cmd_ring[1].basePA,
10497f17497SC.J. Collier		   (unsigned long)rxq->comp_ring.basePA);
10597f17497SC.J. Collier
10697f17497SC.J. Collier	avail = vmxnet3_cmd_ring_desc_avail(&rxq->cmd_ring[0]);
10797f17497SC.J. Collier	PMD_RX_LOG(DEBUG,
10897f17497SC.J. Collier		   "RXQ:cmd0: size=%u; free=%u; next2proc=%u; queued=%u",
10997f17497SC.J. Collier		   (uint32_t)rxq->cmd_ring[0].size, avail,
11097f17497SC.J. Collier		   rxq->comp_ring.next2proc,
11197f17497SC.J. Collier		   rxq->cmd_ring[0].size - avail);
11297f17497SC.J. Collier
11397f17497SC.J. Collier	avail = vmxnet3_cmd_ring_desc_avail(&rxq->cmd_ring[1]);
11497f17497SC.J. Collier	PMD_RX_LOG(DEBUG, "RXQ:cmd1 size=%u; free=%u; next2proc=%u; queued=%u",
11597f17497SC.J. Collier		   (uint32_t)rxq->cmd_ring[1].size, avail, rxq->comp_ring.next2proc,
11697f17497SC.J. Collier		   rxq->cmd_ring[1].size - avail);
11797f17497SC.J. Collier
11897f17497SC.J. Collier}
11997f17497SC.J. Collier
12097f17497SC.J. Collierstatic void
12197f17497SC.J. Colliervmxnet3_txq_dump(struct vmxnet3_tx_queue *txq)
12297f17497SC.J. Collier{
12397f17497SC.J. Collier	uint32_t avail = 0;
12497f17497SC.J. Collier
12597f17497SC.J. Collier	if (txq == NULL)
12697f17497SC.J. Collier		return;
12797f17497SC.J. Collier
1288b25d1adSChristian Ehrhardt	PMD_TX_LOG(DEBUG, "TXQ: cmd base : %p comp ring base : %p data ring base : %p.",
12997f17497SC.J. Collier		   txq->cmd_ring.base, txq->comp_ring.base, txq->data_ring.base);
13097f17497SC.J. Collier	PMD_TX_LOG(DEBUG, "TXQ: cmd basePA : 0x%lx comp ring basePA : 0x%lx data ring basePA : 0x%lx.",
13197f17497SC.J. Collier		   (unsigned long)txq->cmd_ring.basePA,
13297f17497SC.J. Collier		   (unsigned long)txq->comp_ring.basePA,
13397f17497SC.J. Collier		   (unsigned long)txq->data_ring.basePA);
13497f17497SC.J. Collier
13597f17497SC.J. Collier	avail = vmxnet3_cmd_ring_desc_avail(&txq->cmd_ring);
13697f17497SC.J. Collier	PMD_TX_LOG(DEBUG, "TXQ: size=%u; free=%u; next2proc=%u; queued=%u",
13797f17497SC.J. Collier		   (uint32_t)txq->cmd_ring.size, avail,
13897f17497SC.J. Collier		   txq->comp_ring.next2proc, txq->cmd_ring.size - avail);
13997f17497SC.J. Collier}
14097f17497SC.J. Collier#endif
14197f17497SC.J. Collier
14297f17497SC.J. Collierstatic void
14332e04ea0SChristian Ehrhardtvmxnet3_tx_cmd_ring_release_mbufs(vmxnet3_cmd_ring_t *ring)
14497f17497SC.J. Collier{
14597f17497SC.J. Collier	while (ring->next2comp != ring->next2fill) {
14632e04ea0SChristian Ehrhardt		/* No need to worry about desc ownership, device is quiesced by now. */
14797f17497SC.J. Collier		vmxnet3_buf_info_t *buf_info = ring->buf_info + ring->next2comp;
14897f17497SC.J. Collier
14997f17497SC.J. Collier		if (buf_info->m) {
15097f17497SC.J. Collier			rte_pktmbuf_free(buf_info->m);
15197f17497SC.J. Collier			buf_info->m = NULL;
15297f17497SC.J. Collier			buf_info->bufPA = 0;
15397f17497SC.J. Collier			buf_info->len = 0;
15497f17497SC.J. Collier		}
15597f17497SC.J. Collier		vmxnet3_cmd_ring_adv_next2comp(ring);
15697f17497SC.J. Collier	}
15797f17497SC.J. Collier}
15897f17497SC.J. Collier
15932e04ea0SChristian Ehrhardtstatic void
16032e04ea0SChristian Ehrhardtvmxnet3_rx_cmd_ring_release_mbufs(vmxnet3_cmd_ring_t *ring)
16132e04ea0SChristian Ehrhardt{
16232e04ea0SChristian Ehrhardt	uint32_t i;
16332e04ea0SChristian Ehrhardt
16432e04ea0SChristian Ehrhardt	for (i = 0; i < ring->size; i++) {
16532e04ea0SChristian Ehrhardt		/* No need to worry about desc ownership, device is quiesced by now. */
16632e04ea0SChristian Ehrhardt		vmxnet3_buf_info_t *buf_info = &ring->buf_info[i];
16732e04ea0SChristian Ehrhardt
16832e04ea0SChristian Ehrhardt		if (buf_info->m) {
16932e04ea0SChristian Ehrhardt			rte_pktmbuf_free_seg(buf_info->m);
17032e04ea0SChristian Ehrhardt			buf_info->m = NULL;
17132e04ea0SChristian Ehrhardt			buf_info->bufPA = 0;
17232e04ea0SChristian Ehrhardt			buf_info->len = 0;
17332e04ea0SChristian Ehrhardt		}
17432e04ea0SChristian Ehrhardt		vmxnet3_cmd_ring_adv_next2comp(ring);
17532e04ea0SChristian Ehrhardt	}
17632e04ea0SChristian Ehrhardt}
17732e04ea0SChristian Ehrhardt
17897f17497SC.J. Collierstatic void
17997f17497SC.J. Colliervmxnet3_cmd_ring_release(vmxnet3_cmd_ring_t *ring)
18097f17497SC.J. Collier{
18197f17497SC.J. Collier	rte_free(ring->buf_info);
18297f17497SC.J. Collier	ring->buf_info = NULL;
18397f17497SC.J. Collier}
18497f17497SC.J. Collier
18597f17497SC.J. Colliervoid
18697f17497SC.J. Colliervmxnet3_dev_tx_queue_release(void *txq)
18797f17497SC.J. Collier{
18897f17497SC.J. Collier	vmxnet3_tx_queue_t *tq = txq;
18997f17497SC.J. Collier
19097f17497SC.J. Collier	if (tq != NULL) {
19132e04ea0SChristian Ehrhardt		/* Release mbufs */
19232e04ea0SChristian Ehrhardt		vmxnet3_tx_cmd_ring_release_mbufs(&tq->cmd_ring);
19397f17497SC.J. Collier		/* Release the cmd_ring */
19497f17497SC.J. Collier		vmxnet3_cmd_ring_release(&tq->cmd_ring);
195aab0c291SChristian Ehrhardt		/* Release the memzone */
196aab0c291SChristian Ehrhardt		rte_memzone_free(tq->mz);
19747d9763aSLuca Boccassi		/* Release the queue */
19847d9763aSLuca Boccassi		rte_free(tq);
19997f17497SC.J. Collier	}
20097f17497SC.J. Collier}
20197f17497SC.J. Collier
20297f17497SC.J. Colliervoid
20397f17497SC.J. Colliervmxnet3_dev_rx_queue_release(void *rxq)
20497f17497SC.J. Collier{
20597f17497SC.J. Collier	int i;
20697f17497SC.J. Collier	vmxnet3_rx_queue_t *rq = rxq;
20797f17497SC.J. Collier
20897f17497SC.J. Collier	if (rq != NULL) {
20932e04ea0SChristian Ehrhardt		/* Release mbufs */
21032e04ea0SChristian Ehrhardt		for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++)
21132e04ea0SChristian Ehrhardt			vmxnet3_rx_cmd_ring_release_mbufs(&rq->cmd_ring[i]);
21232e04ea0SChristian Ehrhardt
21397f17497SC.J. Collier		/* Release both the cmd_rings */
21497f17497SC.J. Collier		for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++)
21597f17497SC.J. Collier			vmxnet3_cmd_ring_release(&rq->cmd_ring[i]);
216aab0c291SChristian Ehrhardt
217aab0c291SChristian Ehrhardt		/* Release the memzone */
218aab0c291SChristian Ehrhardt		rte_memzone_free(rq->mz);
21947d9763aSLuca Boccassi
22047d9763aSLuca Boccassi		/* Release the queue */
22147d9763aSLuca Boccassi		rte_free(rq);
22297f17497SC.J. Collier	}
22397f17497SC.J. Collier}
22497f17497SC.J. Collier
22597f17497SC.J. Collierstatic void
22697f17497SC.J. Colliervmxnet3_dev_tx_queue_reset(void *txq)
22797f17497SC.J. Collier{
22897f17497SC.J. Collier	vmxnet3_tx_queue_t *tq = txq;
22997f17497SC.J. Collier	struct vmxnet3_cmd_ring *ring = &tq->cmd_ring;
23097f17497SC.J. Collier	struct vmxnet3_comp_ring *comp_ring = &tq->comp_ring;
23197f17497SC.J. Collier	struct vmxnet3_data_ring *data_ring = &tq->data_ring;
23297f17497SC.J. Collier	int size;
23397f17497SC.J. Collier
23497f17497SC.J. Collier	if (tq != NULL) {
23597f17497SC.J. Collier		/* Release the cmd_ring mbufs */
23632e04ea0SChristian Ehrhardt		vmxnet3_tx_cmd_ring_release_mbufs(&tq->cmd_ring);
23797f17497SC.J. Collier	}
23897f17497SC.J. Collier
23997f17497SC.J. Collier	/* Tx vmxnet rings structure initialization*/
24097f17497SC.J. Collier	ring->next2fill = 0;
24197f17497SC.J. Collier	ring->next2comp = 0;
24297f17497SC.J. Collier	ring->gen = VMXNET3_INIT_GEN;
24397f17497SC.J. Collier	comp_ring->next2proc = 0;
24497f17497SC.J. Collier	comp_ring->gen = VMXNET3_INIT_GEN;
24597f17497SC.J. Collier
24697f17497SC.J. Collier	size = sizeof(struct Vmxnet3_TxDesc) * ring->size;
24797f17497SC.J. Collier	size += sizeof(struct Vmxnet3_TxCompDesc) * comp_ring->size;
24897f17497SC.J. Collier	size += sizeof(struct Vmxnet3_TxDataDesc) * data_ring->size;
24997f17497SC.J. Collier
25097f17497SC.J. Collier	memset(ring->base, 0, size);
25197f17497SC.J. Collier}
25297f17497SC.J. Collier
25397f17497SC.J. Collierstatic void
25497f17497SC.J. Colliervmxnet3_dev_rx_queue_reset(void *rxq)
25597f17497SC.J. Collier{
25697f17497SC.J. Collier	int i;
25797f17497SC.J. Collier	vmxnet3_rx_queue_t *rq = rxq;
25897f17497SC.J. Collier	struct vmxnet3_cmd_ring *ring0, *ring1;
25997f17497SC.J. Collier	struct vmxnet3_comp_ring *comp_ring;
26097f17497SC.J. Collier	int size;
26197f17497SC.J. Collier
26247d9763aSLuca Boccassi	/* Release both the cmd_rings mbufs */
26347d9763aSLuca Boccassi	for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++)
26447d9763aSLuca Boccassi		vmxnet3_rx_cmd_ring_release_mbufs(&rq->cmd_ring[i]);
26597f17497SC.J. Collier
26697f17497SC.J. Collier	ring0 = &rq->cmd_ring[0];
26797f17497SC.J. Collier	ring1 = &rq->cmd_ring[1];
26897f17497SC.J. Collier	comp_ring = &rq->comp_ring;
26997f17497SC.J. Collier
27097f17497SC.J. Collier	/* Rx vmxnet rings structure initialization */
27197f17497SC.J. Collier	ring0->next2fill = 0;
27297f17497SC.J. Collier	ring1->next2fill = 0;
27397f17497SC.J. Collier	ring0->next2comp = 0;
27497f17497SC.J. Collier	ring1->next2comp = 0;
27597f17497SC.J. Collier	ring0->gen = VMXNET3_INIT_GEN;
27697f17497SC.J. Collier	ring1->gen = VMXNET3_INIT_GEN;
27797f17497SC.J. Collier	comp_ring->next2proc = 0;
27897f17497SC.J. Collier	comp_ring->gen = VMXNET3_INIT_GEN;
27997f17497SC.J. Collier
28097f17497SC.J. Collier	size = sizeof(struct Vmxnet3_RxDesc) * (ring0->size + ring1->size);
28197f17497SC.J. Collier	size += sizeof(struct Vmxnet3_RxCompDesc) * comp_ring->size;
28297f17497SC.J. Collier
28397f17497SC.J. Collier	memset(ring0->base, 0, size);
28497f17497SC.J. Collier}
28597f17497SC.J. Collier
28697f17497SC.J. Colliervoid
28797f17497SC.J. Colliervmxnet3_dev_clear_queues(struct rte_eth_dev *dev)
28897f17497SC.J. Collier{
28997f17497SC.J. Collier	unsigned i;
29097f17497SC.J. Collier
29197f17497SC.J. Collier	PMD_INIT_FUNC_TRACE();
29297f17497SC.J. Collier
29397f17497SC.J. Collier	for (i = 0; i < dev->data->nb_tx_queues; i++) {
29497f17497SC.J. Collier		struct vmxnet3_tx_queue *txq = dev->data->tx_queues[i];
29597f17497SC.J. Collier
29697f17497SC.J. Collier		if (txq != NULL) {
29797f17497SC.J. Collier			txq->stopped = TRUE;
29897f17497SC.J. Collier			vmxnet3_dev_tx_queue_reset(txq);
29997f17497SC.J. Collier		}
30097f17497SC.J. Collier	}
30197f17497SC.J. Collier
30297f17497SC.J. Collier	for (i = 0; i < dev->data->nb_rx_queues; i++) {
30397f17497SC.J. Collier		struct vmxnet3_rx_queue *rxq = dev->data->rx_queues[i];
30497f17497SC.J. Collier
30597f17497SC.J. Collier		if (rxq != NULL) {
30697f17497SC.J. Collier			rxq->stopped = TRUE;
30797f17497SC.J. Collier			vmxnet3_dev_rx_queue_reset(rxq);
30897f17497SC.J. Collier		}
30997f17497SC.J. Collier	}
31097f17497SC.J. Collier}
31197f17497SC.J. Collier
31297f17497SC.J. Collierstatic int
31397f17497SC.J. Colliervmxnet3_unmap_pkt(uint16_t eop_idx, vmxnet3_tx_queue_t *txq)
31497f17497SC.J. Collier{
31597f17497SC.J. Collier	int completed = 0;
31697f17497SC.J. Collier	struct rte_mbuf *mbuf;
31797f17497SC.J. Collier
31897f17497SC.J. Collier	/* Release cmd_ring descriptor and free mbuf */
3198b25d1adSChristian Ehrhardt	RTE_ASSERT(txq->cmd_ring.base[eop_idx].txd.eop == 1);
32097f17497SC.J. Collier
32197f17497SC.J. Collier	mbuf = txq->cmd_ring.buf_info[eop_idx].m;
32297f17497SC.J. Collier	if (mbuf == NULL)
32397f17497SC.J. Collier		rte_panic("EOP desc does not point to a valid mbuf");
32497f17497SC.J. Collier	rte_pktmbuf_free(mbuf);
32597f17497SC.J. Collier
32697f17497SC.J. Collier	txq->cmd_ring.buf_info[eop_idx].m = NULL;
32797f17497SC.J. Collier
32897f17497SC.J. Collier	while (txq->cmd_ring.next2comp != eop_idx) {
32997f17497SC.J. Collier		/* no out-of-order completion */
3308b25d1adSChristian Ehrhardt		RTE_ASSERT(txq->cmd_ring.base[txq->cmd_ring.next2comp].txd.cq == 0);
33197f17497SC.J. Collier		vmxnet3_cmd_ring_adv_next2comp(&txq->cmd_ring);
33297f17497SC.J. Collier		completed++;
33397f17497SC.J. Collier	}
33497f17497SC.J. Collier
33597f17497SC.J. Collier	/* Mark the txd for which tcd was generated as completed */
33697f17497SC.J. Collier	vmxnet3_cmd_ring_adv_next2comp(&txq->cmd_ring);
33797f17497SC.J. Collier
33897f17497SC.J. Collier	return completed + 1;
33997f17497SC.J. Collier}
34097f17497SC.J. Collier
34197f17497SC.J. Collierstatic void
34297f17497SC.J. Colliervmxnet3_tq_tx_complete(vmxnet3_tx_queue_t *txq)
34397f17497SC.J. Collier{
34497f17497SC.J. Collier	int completed = 0;
34597f17497SC.J. Collier	vmxnet3_comp_ring_t *comp_ring = &txq->comp_ring;
34697f17497SC.J. Collier	struct Vmxnet3_TxCompDesc *tcd = (struct Vmxnet3_TxCompDesc *)
34797f17497SC.J. Collier		(comp_ring->base + comp_ring->next2proc);
34897f17497SC.J. Collier
34997f17497SC.J. Collier	while (tcd->gen == comp_ring->gen) {
35097f17497SC.J. Collier		completed += vmxnet3_unmap_pkt(tcd->txdIdx, txq);
35197f17497SC.J. Collier
35297f17497SC.J. Collier		vmxnet3_comp_ring_adv_next2proc(comp_ring);
35397f17497SC.J. Collier		tcd = (struct Vmxnet3_TxCompDesc *)(comp_ring->base +
35497f17497SC.J. Collier						    comp_ring->next2proc);
35597f17497SC.J. Collier	}
35697f17497SC.J. Collier
35797f17497SC.J. Collier	PMD_TX_LOG(DEBUG, "Processed %d tx comps & command descs.", completed);
35897f17497SC.J. Collier}
35997f17497SC.J. Collier
36097f17497SC.J. Collieruint16_t
36197f17497SC.J. Colliervmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
36297f17497SC.J. Collier		  uint16_t nb_pkts)
36397f17497SC.J. Collier{
36497f17497SC.J. Collier	uint16_t nb_tx;
36597f17497SC.J. Collier	vmxnet3_tx_queue_t *txq = tx_queue;
36697f17497SC.J. Collier	struct vmxnet3_hw *hw = txq->hw;
36797f17497SC.J. Collier	Vmxnet3_TxQueueCtrl *txq_ctrl = &txq->shared->ctrl;
36897f17497SC.J. Collier	uint32_t deferred = rte_le_to_cpu_32(txq_ctrl->txNumDeferred);
36997f17497SC.J. Collier
37097f17497SC.J. Collier	if (unlikely(txq->stopped)) {
37197f17497SC.J. Collier		PMD_TX_LOG(DEBUG, "Tx queue is stopped.");
37297f17497SC.J. Collier		return 0;
37397f17497SC.J. Collier	}
37497f17497SC.J. Collier
37597f17497SC.J. Collier	/* Free up the comp_descriptors aggressively */
37697f17497SC.J. Collier	vmxnet3_tq_tx_complete(txq);
37797f17497SC.J. Collier
37897f17497SC.J. Collier	nb_tx = 0;
37997f17497SC.J. Collier	while (nb_tx < nb_pkts) {
38097f17497SC.J. Collier		Vmxnet3_GenericDesc *gdesc;
38197f17497SC.J. Collier		vmxnet3_buf_info_t *tbi;
38297f17497SC.J. Collier		uint32_t first2fill, avail, dw2;
38397f17497SC.J. Collier		struct rte_mbuf *txm = tx_pkts[nb_tx];
38497f17497SC.J. Collier		struct rte_mbuf *m_seg = txm;
38597f17497SC.J. Collier		int copy_size = 0;
38697f17497SC.J. Collier		bool tso = (txm->ol_flags & PKT_TX_TCP_SEG) != 0;
38797f17497SC.J. Collier		/* # of descriptors needed for a packet. */
38897f17497SC.J. Collier		unsigned count = txm->nb_segs;
38997f17497SC.J. Collier
39097f17497SC.J. Collier		avail = vmxnet3_cmd_ring_desc_avail(&txq->cmd_ring);
39197f17497SC.J. Collier		if (count > avail) {
39297f17497SC.J. Collier			/* Is command ring full? */
39397f17497SC.J. Collier			if (unlikely(avail == 0)) {
39497f17497SC.J. Collier				PMD_TX_LOG(DEBUG, "No free ring descriptors");
39597f17497SC.J. Collier				txq->stats.tx_ring_full++;
39697f17497SC.J. Collier				txq->stats.drop_total += (nb_pkts - nb_tx);
39797f17497SC.J. Collier				break;
39897f17497SC.J. Collier			}
39997f17497SC.J. Collier
40097f17497SC.J. Collier			/* Command ring is not full but cannot handle the
40197f17497SC.J. Collier			 * multi-segmented packet. Let's try the next packet
40297f17497SC.J. Collier			 * in this case.
40397f17497SC.J. Collier			 */
40497f17497SC.J. Collier			PMD_TX_LOG(DEBUG, "Running out of ring descriptors "
40597f17497SC.J. Collier				   "(avail %d needed %d)", avail, count);
40697f17497SC.J. Collier			txq->stats.drop_total++;
40797f17497SC.J. Collier			if (tso)
40897f17497SC.J. Collier				txq->stats.drop_tso++;
40997f17497SC.J. Collier			rte_pktmbuf_free(txm);
41097f17497SC.J. Collier			nb_tx++;
41197f17497SC.J. Collier			continue;
41297f17497SC.J. Collier		}
41397f17497SC.J. Collier
41497f17497SC.J. Collier		/* Drop non-TSO packet that is excessively fragmented */
41597f17497SC.J. Collier		if (unlikely(!tso && count > VMXNET3_MAX_TXD_PER_PKT)) {
41697f17497SC.J. Collier			PMD_TX_LOG(ERR, "Non-TSO packet cannot occupy more than %d tx "
41797f17497SC.J. Collier				   "descriptors. Packet dropped.", VMXNET3_MAX_TXD_PER_PKT);
41897f17497SC.J. Collier			txq->stats.drop_too_many_segs++;
41997f17497SC.J. Collier			txq->stats.drop_total++;
42097f17497SC.J. Collier			rte_pktmbuf_free(txm);
42197f17497SC.J. Collier			nb_tx++;
42297f17497SC.J. Collier			continue;
42397f17497SC.J. Collier		}
42497f17497SC.J. Collier
4256b3e017eSChristian Ehrhardt		if (txm->nb_segs == 1 &&
4266b3e017eSChristian Ehrhardt		    rte_pktmbuf_pkt_len(txm) <= VMXNET3_HDR_COPY_SIZE) {
42797f17497SC.J. Collier			struct Vmxnet3_TxDataDesc *tdd;
42897f17497SC.J. Collier
42997f17497SC.J. Collier			tdd = txq->data_ring.base + txq->cmd_ring.next2fill;
43097f17497SC.J. Collier			copy_size = rte_pktmbuf_pkt_len(txm);
43197f17497SC.J. Collier			rte_memcpy(tdd->data, rte_pktmbuf_mtod(txm, char *), copy_size);
43297f17497SC.J. Collier		}
43397f17497SC.J. Collier
43497f17497SC.J. Collier		/* use the previous gen bit for the SOP desc */
43597f17497SC.J. Collier		dw2 = (txq->cmd_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
43697f17497SC.J. Collier		first2fill = txq->cmd_ring.next2fill;
43797f17497SC.J. Collier		do {
43897f17497SC.J. Collier			/* Remember the transmit buffer for cleanup */
43997f17497SC.J. Collier			tbi = txq->cmd_ring.buf_info + txq->cmd_ring.next2fill;
44097f17497SC.J. Collier
44197f17497SC.J. Collier			/* NB: the following assumes that VMXNET3 maximum
44297f17497SC.J. Collier			 * transmit buffer size (16K) is greater than
44397f17497SC.J. Collier			 * maximum size of mbuf segment size.
44497f17497SC.J. Collier			 */
44597f17497SC.J. Collier			gdesc = txq->cmd_ring.base + txq->cmd_ring.next2fill;
44697f17497SC.J. Collier			if (copy_size)
44797f17497SC.J. Collier				gdesc->txd.addr = rte_cpu_to_le_64(txq->data_ring.basePA +
4486b3e017eSChristian Ehrhardt								   txq->cmd_ring.next2fill *
4496b3e017eSChristian Ehrhardt								   sizeof(struct Vmxnet3_TxDataDesc));
45097f17497SC.J. Collier			else
45197f17497SC.J. Collier				gdesc->txd.addr = rte_mbuf_data_dma_addr(m_seg);
45297f17497SC.J. Collier
45397f17497SC.J. Collier			gdesc->dword[2] = dw2 | m_seg->data_len;
45497f17497SC.J. Collier			gdesc->dword[3] = 0;
45597f17497SC.J. Collier
45697f17497SC.J. Collier			/* move to the next2fill descriptor */
45797f17497SC.J. Collier			vmxnet3_cmd_ring_adv_next2fill(&txq->cmd_ring);
45897f17497SC.J. Collier
45997f17497SC.J. Collier			/* use the right gen for non-SOP desc */
46097f17497SC.J. Collier			dw2 = txq->cmd_ring.gen << VMXNET3_TXD_GEN_SHIFT;
46197f17497SC.J. Collier		} while ((m_seg = m_seg->next) != NULL);
46297f17497SC.J. Collier
46397f17497SC.J. Collier		/* set the last buf_info for the pkt */
46497f17497SC.J. Collier		tbi->m = txm;
46597f17497SC.J. Collier		/* Update the EOP descriptor */
46697f17497SC.J. Collier		gdesc->dword[3] |= VMXNET3_TXD_EOP | VMXNET3_TXD_CQ;
46797f17497SC.J. Collier
46897f17497SC.J. Collier		/* Add VLAN tag if present */
46997f17497SC.J. Collier		gdesc = txq->cmd_ring.base + first2fill;
47097f17497SC.J. Collier		if (txm->ol_flags & PKT_TX_VLAN_PKT) {
47197f17497SC.J. Collier			gdesc->txd.ti = 1;
47297f17497SC.J. Collier			gdesc->txd.tci = txm->vlan_tci;
47397f17497SC.J. Collier		}
47497f17497SC.J. Collier
47597f17497SC.J. Collier		if (tso) {
47697f17497SC.J. Collier			uint16_t mss = txm->tso_segsz;
47797f17497SC.J. Collier
4788b25d1adSChristian Ehrhardt			RTE_ASSERT(mss > 0);
47997f17497SC.J. Collier
48097f17497SC.J. Collier			gdesc->txd.hlen = txm->l2_len + txm->l3_len + txm->l4_len;
48197f17497SC.J. Collier			gdesc->txd.om = VMXNET3_OM_TSO;
48297f17497SC.J. Collier			gdesc->txd.msscof = mss;
48397f17497SC.J. Collier
48497f17497SC.J. Collier			deferred += (rte_pktmbuf_pkt_len(txm) - gdesc->txd.hlen + mss - 1) / mss;
48597f17497SC.J. Collier		} else if (txm->ol_flags & PKT_TX_L4_MASK) {
48697f17497SC.J. Collier			gdesc->txd.om = VMXNET3_OM_CSUM;
48797f17497SC.J. Collier			gdesc->txd.hlen = txm->l2_len + txm->l3_len;
48897f17497SC.J. Collier
48997f17497SC.J. Collier			switch (txm->ol_flags & PKT_TX_L4_MASK) {
49097f17497SC.J. Collier			case PKT_TX_TCP_CKSUM:
49197f17497SC.J. Collier				gdesc->txd.msscof = gdesc->txd.hlen + offsetof(struct tcp_hdr, cksum);
49297f17497SC.J. Collier				break;
49397f17497SC.J. Collier			case PKT_TX_UDP_CKSUM:
49497f17497SC.J. Collier				gdesc->txd.msscof = gdesc->txd.hlen + offsetof(struct udp_hdr, dgram_cksum);
49597f17497SC.J. Collier				break;
49697f17497SC.J. Collier			default:
49797f17497SC.J. Collier				PMD_TX_LOG(WARNING, "requested cksum offload not supported %#llx",
49897f17497SC.J. Collier					   txm->ol_flags & PKT_TX_L4_MASK);
49997f17497SC.J. Collier				abort();
50097f17497SC.J. Collier			}
50197f17497SC.J. Collier			deferred++;
50297f17497SC.J. Collier		} else {
50397f17497SC.J. Collier			gdesc->txd.hlen = 0;
50497f17497SC.J. Collier			gdesc->txd.om = VMXNET3_OM_NONE;
50597f17497SC.J. Collier			gdesc->txd.msscof = 0;
50697f17497SC.J. Collier			deferred++;
50797f17497SC.J. Collier		}
50897f17497SC.J. Collier
50997f17497SC.J. Collier		/* flip the GEN bit on the SOP */
51097f17497SC.J. Collier		rte_compiler_barrier();
51197f17497SC.J. Collier		gdesc->dword[2] ^= VMXNET3_TXD_GEN;
51297f17497SC.J. Collier
51397f17497SC.J. Collier		txq_ctrl->txNumDeferred = rte_cpu_to_le_32(deferred);
51497f17497SC.J. Collier		nb_tx++;
51597f17497SC.J. Collier	}
51697f17497SC.J. Collier
51797f17497SC.J. Collier	PMD_TX_LOG(DEBUG, "vmxnet3 txThreshold: %u", rte_le_to_cpu_32(txq_ctrl->txThreshold));
51897f17497SC.J. Collier
51997f17497SC.J. Collier	if (deferred >= rte_le_to_cpu_32(txq_ctrl->txThreshold)) {
52097f17497SC.J. Collier		txq_ctrl->txNumDeferred = 0;
52197f17497SC.J. Collier		/* Notify vSwitch that packets are available. */
52297f17497SC.J. Collier		VMXNET3_WRITE_BAR0_REG(hw, (VMXNET3_REG_TXPROD + txq->queue_id * VMXNET3_REG_ALIGN),
52397f17497SC.J. Collier				       txq->cmd_ring.next2fill);
52497f17497SC.J. Collier	}
52597f17497SC.J. Collier
52697f17497SC.J. Collier	return nb_tx;
52797f17497SC.J. Collier}
52897f17497SC.J. Collier
529ce3d555eSChristian Ehrhardtstatic inline void
530ce3d555eSChristian Ehrhardtvmxnet3_renew_desc(vmxnet3_rx_queue_t *rxq, uint8_t ring_id,
531ce3d555eSChristian Ehrhardt		   struct rte_mbuf *mbuf)
532ce3d555eSChristian Ehrhardt{
533ce3d555eSChristian Ehrhardt	uint32_t val = 0;
534ce3d555eSChristian Ehrhardt	struct vmxnet3_cmd_ring *ring = &rxq->cmd_ring[ring_id];
535ce3d555eSChristian Ehrhardt	struct Vmxnet3_RxDesc *rxd =
536ce3d555eSChristian Ehrhardt		(struct Vmxnet3_RxDesc *)(ring->base + ring->next2fill);
537ce3d555eSChristian Ehrhardt	vmxnet3_buf_info_t *buf_info = &ring->buf_info[ring->next2fill];
538ce3d555eSChristian Ehrhardt
539ce3d555eSChristian Ehrhardt	if (ring_id == 0)
540ce3d555eSChristian Ehrhardt		val = VMXNET3_RXD_BTYPE_HEAD;
541ce3d555eSChristian Ehrhardt	else
542ce3d555eSChristian Ehrhardt		val = VMXNET3_RXD_BTYPE_BODY;
543ce3d555eSChristian Ehrhardt
544ce3d555eSChristian Ehrhardt	buf_info->m = mbuf;
545ce3d555eSChristian Ehrhardt	buf_info->len = (uint16_t)(mbuf->buf_len - RTE_PKTMBUF_HEADROOM);
546ce3d555eSChristian Ehrhardt	buf_info->bufPA = rte_mbuf_data_dma_addr_default(mbuf);
547ce3d555eSChristian Ehrhardt
548ce3d555eSChristian Ehrhardt	rxd->addr = buf_info->bufPA;
549ce3d555eSChristian Ehrhardt	rxd->btype = val;
550ce3d555eSChristian Ehrhardt	rxd->len = buf_info->len;
551ce3d555eSChristian Ehrhardt	rxd->gen = ring->gen;
552ce3d555eSChristian Ehrhardt
553ce3d555eSChristian Ehrhardt	vmxnet3_cmd_ring_adv_next2fill(ring);
554ce3d555eSChristian Ehrhardt}
55597f17497SC.J. Collier/*
55697f17497SC.J. Collier *  Allocates mbufs and clusters. Post rx descriptors with buffer details
55797f17497SC.J. Collier *  so that device can receive packets in those buffers.
5586b3e017eSChristian Ehrhardt *  Ring layout:
5596b3e017eSChristian Ehrhardt *      Among the two rings, 1st ring contains buffers of type 0 and type 1.
56097f17497SC.J. Collier *      bufs_per_pkt is set such that for non-LRO cases all the buffers required
56197f17497SC.J. Collier *      by a frame will fit in 1st ring (1st buf of type0 and rest of type1).
56297f17497SC.J. Collier *      2nd ring contains buffers of type 1 alone. Second ring mostly be used
56397f17497SC.J. Collier *      only for LRO.
56497f17497SC.J. Collier */
56597f17497SC.J. Collierstatic int
56697f17497SC.J. Colliervmxnet3_post_rx_bufs(vmxnet3_rx_queue_t *rxq, uint8_t ring_id)
56797f17497SC.J. Collier{
56897f17497SC.J. Collier	int err = 0;
56997f17497SC.J. Collier	uint32_t i = 0, val = 0;
57097f17497SC.J. Collier	struct vmxnet3_cmd_ring *ring = &rxq->cmd_ring[ring_id];
57197f17497SC.J. Collier
57297f17497SC.J. Collier	if (ring_id == 0) {
57397f17497SC.J. Collier		/* Usually: One HEAD type buf per packet
57497f17497SC.J. Collier		 * val = (ring->next2fill % rxq->hw->bufs_per_pkt) ?
57597f17497SC.J. Collier		 * VMXNET3_RXD_BTYPE_BODY : VMXNET3_RXD_BTYPE_HEAD;
57697f17497SC.J. Collier		 */
57797f17497SC.J. Collier
57897f17497SC.J. Collier		/* We use single packet buffer so all heads here */
57997f17497SC.J. Collier		val = VMXNET3_RXD_BTYPE_HEAD;
58097f17497SC.J. Collier	} else {
58197f17497SC.J. Collier		/* All BODY type buffers for 2nd ring */
58297f17497SC.J. Collier		val = VMXNET3_RXD_BTYPE_BODY;
58397f17497SC.J. Collier	}
58497f17497SC.J. Collier
58597f17497SC.J. Collier	while (vmxnet3_cmd_ring_desc_avail(ring) > 0) {
58697f17497SC.J. Collier		struct Vmxnet3_RxDesc *rxd;
58797f17497SC.J. Collier		struct rte_mbuf *mbuf;
58897f17497SC.J. Collier		vmxnet3_buf_info_t *buf_info = &ring->buf_info[ring->next2fill];
58997f17497SC.J. Collier
59097f17497SC.J. Collier		rxd = (struct Vmxnet3_RxDesc *)(ring->base + ring->next2fill);
59197f17497SC.J. Collier
59297f17497SC.J. Collier		/* Allocate blank mbuf for the current Rx Descriptor */
5938b25d1adSChristian Ehrhardt		mbuf = rte_mbuf_raw_alloc(rxq->mp);
59497f17497SC.J. Collier		if (unlikely(mbuf == NULL)) {
59597f17497SC.J. Collier			PMD_RX_LOG(ERR, "Error allocating mbuf");
59697f17497SC.J. Collier			rxq->stats.rx_buf_alloc_failure++;
59797f17497SC.J. Collier			err = ENOMEM;
59897f17497SC.J. Collier			break;
59997f17497SC.J. Collier		}
60097f17497SC.J. Collier
60197f17497SC.J. Collier		/*
60297f17497SC.J. Collier		 * Load mbuf pointer into buf_info[ring_size]
60397f17497SC.J. Collier		 * buf_info structure is equivalent to cookie for virtio-virtqueue
60497f17497SC.J. Collier		 */
60597f17497SC.J. Collier		buf_info->m = mbuf;
60697f17497SC.J. Collier		buf_info->len = (uint16_t)(mbuf->buf_len -
60797f17497SC.J. Collier					   RTE_PKTMBUF_HEADROOM);
6086b3e017eSChristian Ehrhardt		buf_info->bufPA = rte_mbuf_data_dma_addr_default(mbuf);
60997f17497SC.J. Collier
61097f17497SC.J. Collier		/* Load Rx Descriptor with the buffer's GPA */
61197f17497SC.J. Collier		rxd->addr = buf_info->bufPA;
61297f17497SC.J. Collier
61397f17497SC.J. Collier		/* After this point rxd->addr MUST not be NULL */
61497f17497SC.J. Collier		rxd->btype = val;
61597f17497SC.J. Collier		rxd->len = buf_info->len;
61697f17497SC.J. Collier		/* Flip gen bit at the end to change ownership */
61797f17497SC.J. Collier		rxd->gen = ring->gen;
61897f17497SC.J. Collier
61997f17497SC.J. Collier		vmxnet3_cmd_ring_adv_next2fill(ring);
62097f17497SC.J. Collier		i++;
62197f17497SC.J. Collier	}
62297f17497SC.J. Collier
62397f17497SC.J. Collier	/* Return error only if no buffers are posted at present */
62497f17497SC.J. Collier	if (vmxnet3_cmd_ring_desc_avail(ring) >= (ring->size - 1))
62597f17497SC.J. Collier		return -err;
62697f17497SC.J. Collier	else
62797f17497SC.J. Collier		return i;
62897f17497SC.J. Collier}
62997f17497SC.J. Collier
63097f17497SC.J. Collier
63197f17497SC.J. Collier/* Receive side checksum and other offloads */
63297f17497SC.J. Collierstatic void
63397f17497SC.J. Colliervmxnet3_rx_offload(const Vmxnet3_RxCompDesc *rcd, struct rte_mbuf *rxm)
63497f17497SC.J. Collier{
63597f17497SC.J. Collier	/* Check for RSS */
63697f17497SC.J. Collier	if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE) {
63797f17497SC.J. Collier		rxm->ol_flags |= PKT_RX_RSS_HASH;
63897f17497SC.J. Collier		rxm->hash.rss = rcd->rssHash;
63997f17497SC.J. Collier	}
64097f17497SC.J. Collier
64197f17497SC.J. Collier	/* Check packet type, checksum errors, etc. Only support IPv4 for now. */
64297f17497SC.J. Collier	if (rcd->v4) {
64397f17497SC.J. Collier		struct ether_hdr *eth = rte_pktmbuf_mtod(rxm, struct ether_hdr *);
64497f17497SC.J. Collier		struct ipv4_hdr *ip = (struct ipv4_hdr *)(eth + 1);
64597f17497SC.J. Collier
64697f17497SC.J. Collier		if (((ip->version_ihl & 0xf) << 2) > (int)sizeof(struct ipv4_hdr))
64797f17497SC.J. Collier			rxm->packet_type = RTE_PTYPE_L3_IPV4_EXT;
64897f17497SC.J. Collier		else
64997f17497SC.J. Collier			rxm->packet_type = RTE_PTYPE_L3_IPV4;
65097f17497SC.J. Collier
65197f17497SC.J. Collier		if (!rcd->cnc) {
65297f17497SC.J. Collier			if (!rcd->ipc)
65397f17497SC.J. Collier				rxm->ol_flags |= PKT_RX_IP_CKSUM_BAD;
65497f17497SC.J. Collier
65597f17497SC.J. Collier			if ((rcd->tcp || rcd->udp) && !rcd->tuc)
65697f17497SC.J. Collier				rxm->ol_flags |= PKT_RX_L4_CKSUM_BAD;
65797f17497SC.J. Collier		}
65897f17497SC.J. Collier	}
65997f17497SC.J. Collier}
66097f17497SC.J. Collier
66197f17497SC.J. Collier/*
66297f17497SC.J. Collier * Process the Rx Completion Ring of given vmxnet3_rx_queue
66397f17497SC.J. Collier * for nb_pkts burst and return the number of packets received
66497f17497SC.J. Collier */
66597f17497SC.J. Collieruint16_t
66697f17497SC.J. Colliervmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
66797f17497SC.J. Collier{
66897f17497SC.J. Collier	uint16_t nb_rx;
66997f17497SC.J. Collier	uint32_t nb_rxd, idx;
67097f17497SC.J. Collier	uint8_t ring_idx;
67197f17497SC.J. Collier	vmxnet3_rx_queue_t *rxq;
67297f17497SC.J. Collier	Vmxnet3_RxCompDesc *rcd;
67397f17497SC.J. Collier	vmxnet3_buf_info_t *rbi;
67497f17497SC.J. Collier	Vmxnet3_RxDesc *rxd;
67597f17497SC.J. Collier	struct rte_mbuf *rxm = NULL;
67697f17497SC.J. Collier	struct vmxnet3_hw *hw;
67797f17497SC.J. Collier
67897f17497SC.J. Collier	nb_rx = 0;
67997f17497SC.J. Collier	ring_idx = 0;
68097f17497SC.J. Collier	nb_rxd = 0;
68197f17497SC.J. Collier	idx = 0;
68297f17497SC.J. Collier
68397f17497SC.J. Collier	rxq = rx_queue;
68497f17497SC.J. Collier	hw = rxq->hw;
68597f17497SC.J. Collier
68697f17497SC.J. Collier	rcd = &rxq->comp_ring.base[rxq->comp_ring.next2proc].rcd;
68797f17497SC.J. Collier
68897f17497SC.J. Collier	if (unlikely(rxq->stopped)) {
68997f17497SC.J. Collier		PMD_RX_LOG(DEBUG, "Rx queue is stopped.");
69097f17497SC.J. Collier		return 0;
69197f17497SC.J. Collier	}
69297f17497SC.J. Collier
69397f17497SC.J. Collier	while (rcd->gen == rxq->comp_ring.gen) {
694ce3d555eSChristian Ehrhardt		struct rte_mbuf *newm;
695ce3d555eSChristian Ehrhardt
69697f17497SC.J. Collier		if (nb_rx >= nb_pkts)
69797f17497SC.J. Collier			break;
69897f17497SC.J. Collier
699ce3d555eSChristian Ehrhardt		newm = rte_mbuf_raw_alloc(rxq->mp);
700ce3d555eSChristian Ehrhardt		if (unlikely(newm == NULL)) {
701ce3d555eSChristian Ehrhardt			PMD_RX_LOG(ERR, "Error allocating mbuf");
702ce3d555eSChristian Ehrhardt			rxq->stats.rx_buf_alloc_failure++;
703ce3d555eSChristian Ehrhardt			break;
704ce3d555eSChristian Ehrhardt		}
705ce3d555eSChristian Ehrhardt
70697f17497SC.J. Collier		idx = rcd->rxdIdx;
70797f17497SC.J. Collier		ring_idx = (uint8_t)((rcd->rqID == rxq->qid1) ? 0 : 1);
70897f17497SC.J. Collier		rxd = (Vmxnet3_RxDesc *)rxq->cmd_ring[ring_idx].base + idx;
7098b25d1adSChristian Ehrhardt		RTE_SET_USED(rxd); /* used only for assert when enabled */
71097f17497SC.J. Collier		rbi = rxq->cmd_ring[ring_idx].buf_info + idx;
71197f17497SC.J. Collier
71297f17497SC.J. Collier		PMD_RX_LOG(DEBUG, "rxd idx: %d ring idx: %d.", idx, ring_idx);
71397f17497SC.J. Collier
7148b25d1adSChristian Ehrhardt		RTE_ASSERT(rcd->len <= rxd->len);
7158b25d1adSChristian Ehrhardt		RTE_ASSERT(rbi->m);
71697f17497SC.J. Collier
71797f17497SC.J. Collier		/* Get the packet buffer pointer from buf_info */
71897f17497SC.J. Collier		rxm = rbi->m;
71997f17497SC.J. Collier
72097f17497SC.J. Collier		/* Clear descriptor associated buf_info to be reused */
72197f17497SC.J. Collier		rbi->m = NULL;
72297f17497SC.J. Collier		rbi->bufPA = 0;
72397f17497SC.J. Collier
72497f17497SC.J. Collier		/* Update the index that we received a packet */
72597f17497SC.J. Collier		rxq->cmd_ring[ring_idx].next2comp = idx;
72697f17497SC.J. Collier
72797f17497SC.J. Collier		/* For RCD with EOP set, check if there is frame error */
72897f17497SC.J. Collier		if (unlikely(rcd->eop && rcd->err)) {
72997f17497SC.J. Collier			rxq->stats.drop_total++;
73097f17497SC.J. Collier			rxq->stats.drop_err++;
73197f17497SC.J. Collier
73297f17497SC.J. Collier			if (!rcd->fcs) {
73397f17497SC.J. Collier				rxq->stats.drop_fcs++;
73497f17497SC.J. Collier				PMD_RX_LOG(ERR, "Recv packet dropped due to frame err.");
73597f17497SC.J. Collier			}
73697f17497SC.J. Collier			PMD_RX_LOG(ERR, "Error in received packet rcd#:%d rxd:%d",
73797f17497SC.J. Collier				   (int)(rcd - (struct Vmxnet3_RxCompDesc *)
73897f17497SC.J. Collier					 rxq->comp_ring.base), rcd->rxdIdx);
73997f17497SC.J. Collier			rte_pktmbuf_free_seg(rxm);
740fdd2322bSLuca Boccassi			if (rxq->start_seg) {
741fdd2322bSLuca Boccassi				struct rte_mbuf *start = rxq->start_seg;
742fdd2322bSLuca Boccassi
743fdd2322bSLuca Boccassi				rxq->start_seg = NULL;
744fdd2322bSLuca Boccassi				rte_pktmbuf_free(start);
745fdd2322bSLuca Boccassi			}
74697f17497SC.J. Collier			goto rcd_done;
74797f17497SC.J. Collier		}
74897f17497SC.J. Collier
74997f17497SC.J. Collier		/* Initialize newly received packet buffer */
75097f17497SC.J. Collier		rxm->port = rxq->port_id;
75197f17497SC.J. Collier		rxm->nb_segs = 1;
75297f17497SC.J. Collier		rxm->next = NULL;
75397f17497SC.J. Collier		rxm->pkt_len = (uint16_t)rcd->len;
75497f17497SC.J. Collier		rxm->data_len = (uint16_t)rcd->len;
75597f17497SC.J. Collier		rxm->data_off = RTE_PKTMBUF_HEADROOM;
75697f17497SC.J. Collier		rxm->ol_flags = 0;
75797f17497SC.J. Collier		rxm->vlan_tci = 0;
75897f17497SC.J. Collier
75997f17497SC.J. Collier		/*
76097f17497SC.J. Collier		 * If this is the first buffer of the received packet,
76197f17497SC.J. Collier		 * set the pointer to the first mbuf of the packet
76297f17497SC.J. Collier		 * Otherwise, update the total length and the number of segments
76397f17497SC.J. Collier		 * of the current scattered packet, and update the pointer to
76497f17497SC.J. Collier		 * the last mbuf of the current packet.
76597f17497SC.J. Collier		 */
76697f17497SC.J. Collier		if (rcd->sop) {
7678b25d1adSChristian Ehrhardt			RTE_ASSERT(rxd->btype == VMXNET3_RXD_BTYPE_HEAD);
76897f17497SC.J. Collier
76997f17497SC.J. Collier			if (unlikely(rcd->len == 0)) {
7708b25d1adSChristian Ehrhardt				RTE_ASSERT(rcd->eop);
77197f17497SC.J. Collier
77297f17497SC.J. Collier				PMD_RX_LOG(DEBUG,
77397f17497SC.J. Collier					   "Rx buf was skipped. rxring[%d][%d])",
77497f17497SC.J. Collier					   ring_idx, idx);
77597f17497SC.J. Collier				rte_pktmbuf_free_seg(rxm);
77697f17497SC.J. Collier				goto rcd_done;
77797f17497SC.J. Collier			}
77897f17497SC.J. Collier
77997f17497SC.J. Collier			rxq->start_seg = rxm;
78097f17497SC.J. Collier			vmxnet3_rx_offload(rcd, rxm);
78197f17497SC.J. Collier		} else {
78297f17497SC.J. Collier			struct rte_mbuf *start = rxq->start_seg;
78397f17497SC.J. Collier
7848b25d1adSChristian Ehrhardt			RTE_ASSERT(rxd->btype == VMXNET3_RXD_BTYPE_BODY);
78597f17497SC.J. Collier
78697f17497SC.J. Collier			start->pkt_len += rxm->data_len;
78797f17497SC.J. Collier			start->nb_segs++;
78897f17497SC.J. Collier
78997f17497SC.J. Collier			rxq->last_seg->next = rxm;
79097f17497SC.J. Collier		}
79197f17497SC.J. Collier		rxq->last_seg = rxm;
79297f17497SC.J. Collier
79397f17497SC.J. Collier		if (rcd->eop) {
7948b25d1adSChristian Ehrhardt			struct rte_mbuf *start = rxq->start_seg;
7958b25d1adSChristian Ehrhardt
7968b25d1adSChristian Ehrhardt			/* Check for hardware stripped VLAN tag */
7978b25d1adSChristian Ehrhardt			if (rcd->ts) {
7988b25d1adSChristian Ehrhardt				start->ol_flags |= (PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED);
7998b25d1adSChristian Ehrhardt				start->vlan_tci = rte_le_to_cpu_16((uint16_t)rcd->tci);
8008b25d1adSChristian Ehrhardt			}
8018b25d1adSChristian Ehrhardt
8028b25d1adSChristian Ehrhardt			rx_pkts[nb_rx++] = start;
80397f17497SC.J. Collier			rxq->start_seg = NULL;
80497f17497SC.J. Collier		}
80597f17497SC.J. Collier
80697f17497SC.J. Collierrcd_done:
80797f17497SC.J. Collier		rxq->cmd_ring[ring_idx].next2comp = idx;
8086b3e017eSChristian Ehrhardt		VMXNET3_INC_RING_IDX_ONLY(rxq->cmd_ring[ring_idx].next2comp,
8096b3e017eSChristian Ehrhardt					  rxq->cmd_ring[ring_idx].size);
81097f17497SC.J. Collier
811ce3d555eSChristian Ehrhardt		/* It's time to renew descriptors */
812ce3d555eSChristian Ehrhardt		vmxnet3_renew_desc(rxq, ring_idx, newm);
81397f17497SC.J. Collier		if (unlikely(rxq->shared->ctrl.updateRxProd)) {
81497f17497SC.J. Collier			VMXNET3_WRITE_BAR0_REG(hw, rxprod_reg[ring_idx] + (rxq->queue_id * VMXNET3_REG_ALIGN),
81597f17497SC.J. Collier					       rxq->cmd_ring[ring_idx].next2fill);
81697f17497SC.J. Collier		}
81797f17497SC.J. Collier
81897f17497SC.J. Collier		/* Advance to the next descriptor in comp_ring */
81997f17497SC.J. Collier		vmxnet3_comp_ring_adv_next2proc(&rxq->comp_ring);
82097f17497SC.J. Collier
82197f17497SC.J. Collier		rcd = &rxq->comp_ring.base[rxq->comp_ring.next2proc].rcd;
82297f17497SC.J. Collier		nb_rxd++;
82397f17497SC.J. Collier		if (nb_rxd > rxq->cmd_ring[0].size) {
8246b3e017eSChristian Ehrhardt			PMD_RX_LOG(ERR, "Used up quota of receiving packets,"
82597f17497SC.J. Collier				   " relinquish control.");
82697f17497SC.J. Collier			break;
82797f17497SC.J. Collier		}
82897f17497SC.J. Collier	}
82997f17497SC.J. Collier
83097f17497SC.J. Collier	return nb_rx;
83197f17497SC.J. Collier}
83297f17497SC.J. Collier
83397f17497SC.J. Collierint
83497f17497SC.J. Colliervmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
83597f17497SC.J. Collier			   uint16_t queue_idx,
83697f17497SC.J. Collier			   uint16_t nb_desc,
83797f17497SC.J. Collier			   unsigned int socket_id,
8386b3e017eSChristian Ehrhardt			   __rte_unused const struct rte_eth_txconf *tx_conf)
83997f17497SC.J. Collier{
84097f17497SC.J. Collier	struct vmxnet3_hw *hw = dev->data->dev_private;
84197f17497SC.J. Collier	const struct rte_memzone *mz;
84297f17497SC.J. Collier	struct vmxnet3_tx_queue *txq;
84397f17497SC.J. Collier	struct vmxnet3_cmd_ring *ring;
84497f17497SC.J. Collier	struct vmxnet3_comp_ring *comp_ring;
84597f17497SC.J. Collier	struct vmxnet3_data_ring *data_ring;
84697f17497SC.J. Collier	int size;
84797f17497SC.J. Collier
84897f17497SC.J. Collier	PMD_INIT_FUNC_TRACE();
84997f17497SC.J. Collier
85097f17497SC.J. Collier	if ((tx_conf->txq_flags & ETH_TXQ_FLAGS_NOXSUMSCTP) !=
85197f17497SC.J. Collier	    ETH_TXQ_FLAGS_NOXSUMSCTP) {
85297f17497SC.J. Collier		PMD_INIT_LOG(ERR, "SCTP checksum offload not supported");
85397f17497SC.J. Collier		return -EINVAL;
85497f17497SC.J. Collier	}
85597f17497SC.J. Collier
8566b3e017eSChristian Ehrhardt	txq = rte_zmalloc("ethdev_tx_queue", sizeof(struct vmxnet3_tx_queue),
8576b3e017eSChristian Ehrhardt			  RTE_CACHE_LINE_SIZE);
85897f17497SC.J. Collier	if (txq == NULL) {
85997f17497SC.J. Collier		PMD_INIT_LOG(ERR, "Can not allocate tx queue structure");
86097f17497SC.J. Collier		return -ENOMEM;
86197f17497SC.J. Collier	}
86297f17497SC.J. Collier
86397f17497SC.J. Collier	txq->queue_id = queue_idx;
86497f17497SC.J. Collier	txq->port_id = dev->data->port_id;
86543192222SLuca Boccassi	txq->shared = NULL; /* set in vmxnet3_setup_driver_shared() */
86697f17497SC.J. Collier	txq->hw = hw;
86797f17497SC.J. Collier	txq->qid = queue_idx;
86897f17497SC.J. Collier	txq->stopped = TRUE;
86997f17497SC.J. Collier
87097f17497SC.J. Collier	ring = &txq->cmd_ring;
87197f17497SC.J. Collier	comp_ring = &txq->comp_ring;
87297f17497SC.J. Collier	data_ring = &txq->data_ring;
87397f17497SC.J. Collier
87497f17497SC.J. Collier	/* Tx vmxnet ring length should be between 512-4096 */
87597f17497SC.J. Collier	if (nb_desc < VMXNET3_DEF_TX_RING_SIZE) {
87697f17497SC.J. Collier		PMD_INIT_LOG(ERR, "VMXNET3 Tx Ring Size Min: %u",
87797f17497SC.J. Collier			     VMXNET3_DEF_TX_RING_SIZE);
87897f17497SC.J. Collier		return -EINVAL;
87997f17497SC.J. Collier	} else if (nb_desc > VMXNET3_TX_RING_MAX_SIZE) {
88097f17497SC.J. Collier		PMD_INIT_LOG(ERR, "VMXNET3 Tx Ring Size Max: %u",
88197f17497SC.J. Collier			     VMXNET3_TX_RING_MAX_SIZE);
88297f17497SC.J. Collier		return -EINVAL;
88397f17497SC.J. Collier	} else {
88497f17497SC.J. Collier		ring->size = nb_desc;
88597f17497SC.J. Collier		ring->size &= ~VMXNET3_RING_SIZE_MASK;
88697f17497SC.J. Collier	}
88797f17497SC.J. Collier	comp_ring->size = data_ring->size = ring->size;
88897f17497SC.J. Collier
88997f17497SC.J. Collier	/* Tx vmxnet rings structure initialization*/
89097f17497SC.J. Collier	ring->next2fill = 0;
89197f17497SC.J. Collier	ring->next2comp = 0;
89297f17497SC.J. Collier	ring->gen = VMXNET3_INIT_GEN;
89397f17497SC.J. Collier	comp_ring->next2proc = 0;
89497f17497SC.J. Collier	comp_ring->gen = VMXNET3_INIT_GEN;
89597f17497SC.J. Collier
89697f17497SC.J. Collier	size = sizeof(struct Vmxnet3_TxDesc) * ring->size;
89797f17497SC.J. Collier	size += sizeof(struct Vmxnet3_TxCompDesc) * comp_ring->size;
89897f17497SC.J. Collier	size += sizeof(struct Vmxnet3_TxDataDesc) * data_ring->size;
89997f17497SC.J. Collier
900aab0c291SChristian Ehrhardt	mz = rte_eth_dma_zone_reserve(dev, "txdesc", queue_idx, size,
901aab0c291SChristian Ehrhardt				      VMXNET3_RING_BA_ALIGN, socket_id);
90297f17497SC.J. Collier	if (mz == NULL) {
90397f17497SC.J. Collier		PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone");
90497f17497SC.J. Collier		return -ENOMEM;
90597f17497SC.J. Collier	}
906aab0c291SChristian Ehrhardt	txq->mz = mz;
90797f17497SC.J. Collier	memset(mz->addr, 0, mz->len);
90897f17497SC.J. Collier
90997f17497SC.J. Collier	/* cmd_ring initialization */
91097f17497SC.J. Collier	ring->base = mz->addr;
91197f17497SC.J. Collier	ring->basePA = mz->phys_addr;
91297f17497SC.J. Collier
91397f17497SC.J. Collier	/* comp_ring initialization */
91497f17497SC.J. Collier	comp_ring->base = ring->base + ring->size;
91597f17497SC.J. Collier	comp_ring->basePA = ring->basePA +
91697f17497SC.J. Collier		(sizeof(struct Vmxnet3_TxDesc) * ring->size);
91797f17497SC.J. Collier
91897f17497SC.J. Collier	/* data_ring initialization */
91997f17497SC.J. Collier	data_ring->base = (Vmxnet3_TxDataDesc *)(comp_ring->base + comp_ring->size);
92097f17497SC.J. Collier	data_ring->basePA = comp_ring->basePA +
92197f17497SC.J. Collier			(sizeof(struct Vmxnet3_TxCompDesc) * comp_ring->size);
92297f17497SC.J. Collier
92397f17497SC.J. Collier	/* cmd_ring0 buf_info allocation */
92497f17497SC.J. Collier	ring->buf_info = rte_zmalloc("tx_ring_buf_info",
92597f17497SC.J. Collier				     ring->size * sizeof(vmxnet3_buf_info_t), RTE_CACHE_LINE_SIZE);
92697f17497SC.J. Collier	if (ring->buf_info == NULL) {
92797f17497SC.J. Collier		PMD_INIT_LOG(ERR, "ERROR: Creating tx_buf_info structure");
92897f17497SC.J. Collier		return -ENOMEM;
92997f17497SC.J. Collier	}
93097f17497SC.J. Collier
93197f17497SC.J. Collier	/* Update the data portion with txq */
93297f17497SC.J. Collier	dev->data->tx_queues[queue_idx] = txq;
93397f17497SC.J. Collier
93497f17497SC.J. Collier	return 0;
93597f17497SC.J. Collier}
93697f17497SC.J. Collier
93797f17497SC.J. Collierint
93897f17497SC.J. Colliervmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev,
93997f17497SC.J. Collier			   uint16_t queue_idx,
94097f17497SC.J. Collier			   uint16_t nb_desc,
94197f17497SC.J. Collier			   unsigned int socket_id,
9426b3e017eSChristian Ehrhardt			   __rte_unused const struct rte_eth_rxconf *rx_conf,
94397f17497SC.J. Collier			   struct rte_mempool *mp)
94497f17497SC.J. Collier{
94597f17497SC.J. Collier	const struct rte_memzone *mz;
94697f17497SC.J. Collier	struct vmxnet3_rx_queue *rxq;
9476b3e017eSChristian Ehrhardt	struct vmxnet3_hw *hw = dev->data->dev_private;
94897f17497SC.J. Collier	struct vmxnet3_cmd_ring *ring0, *ring1, *ring;
94997f17497SC.J. Collier	struct vmxnet3_comp_ring *comp_ring;
95097f17497SC.J. Collier	int size;
95197f17497SC.J. Collier	uint8_t i;
95297f17497SC.J. Collier	char mem_name[32];
95397f17497SC.J. Collier
95497f17497SC.J. Collier	PMD_INIT_FUNC_TRACE();
95597f17497SC.J. Collier
9566b3e017eSChristian Ehrhardt	rxq = rte_zmalloc("ethdev_rx_queue", sizeof(struct vmxnet3_rx_queue),
9576b3e017eSChristian Ehrhardt			  RTE_CACHE_LINE_SIZE);
95897f17497SC.J. Collier	if (rxq == NULL) {
95997f17497SC.J. Collier		PMD_INIT_LOG(ERR, "Can not allocate rx queue structure");
96097f17497SC.J. Collier		return -ENOMEM;
96197f17497SC.J. Collier	}
96297f17497SC.J. Collier
96397f17497SC.J. Collier	rxq->mp = mp;
96497f17497SC.J. Collier	rxq->queue_id = queue_idx;
96597f17497SC.J. Collier	rxq->port_id = dev->data->port_id;
96643192222SLuca Boccassi	rxq->shared = NULL; /* set in vmxnet3_setup_driver_shared() */
96797f17497SC.J. Collier	rxq->hw = hw;
96897f17497SC.J. Collier	rxq->qid1 = queue_idx;
96997f17497SC.J. Collier	rxq->qid2 = queue_idx + hw->num_rx_queues;
97097f17497SC.J. Collier	rxq->stopped = TRUE;
97197f17497SC.J. Collier
97297f17497SC.J. Collier	ring0 = &rxq->cmd_ring[0];
97397f17497SC.J. Collier	ring1 = &rxq->cmd_ring[1];
97497f17497SC.J. Collier	comp_ring = &rxq->comp_ring;
97597f17497SC.J. Collier
97697f17497SC.J. Collier	/* Rx vmxnet rings length should be between 256-4096 */
97797f17497SC.J. Collier	if (nb_desc < VMXNET3_DEF_RX_RING_SIZE) {
97897f17497SC.J. Collier		PMD_INIT_LOG(ERR, "VMXNET3 Rx Ring Size Min: 256");
97997f17497SC.J. Collier		return -EINVAL;
98097f17497SC.J. Collier	} else if (nb_desc > VMXNET3_RX_RING_MAX_SIZE) {
98197f17497SC.J. Collier		PMD_INIT_LOG(ERR, "VMXNET3 Rx Ring Size Max: 4096");
98297f17497SC.J. Collier		return -EINVAL;
98397f17497SC.J. Collier	} else {
98497f17497SC.J. Collier		ring0->size = nb_desc;
98597f17497SC.J. Collier		ring0->size &= ~VMXNET3_RING_SIZE_MASK;
98697f17497SC.J. Collier		ring1->size = ring0->size;
98797f17497SC.J. Collier	}
98897f17497SC.J. Collier
98997f17497SC.J. Collier	comp_ring->size = ring0->size + ring1->size;
99097f17497SC.J. Collier
99197f17497SC.J. Collier	/* Rx vmxnet rings structure initialization */
99297f17497SC.J. Collier	ring0->next2fill = 0;
99397f17497SC.J. Collier	ring1->next2fill = 0;
99497f17497SC.J. Collier	ring0->next2comp = 0;
99597f17497SC.J. Collier	ring1->next2comp = 0;
99697f17497SC.J. Collier	ring0->gen = VMXNET3_INIT_GEN;
99797f17497SC.J. Collier	ring1->gen = VMXNET3_INIT_GEN;
99897f17497SC.J. Collier	comp_ring->next2proc = 0;
99997f17497SC.J. Collier	comp_ring->gen = VMXNET3_INIT_GEN;
100097f17497SC.J. Collier
100197f17497SC.J. Collier	size = sizeof(struct Vmxnet3_RxDesc) * (ring0->size + ring1->size);
100297f17497SC.J. Collier	size += sizeof(struct Vmxnet3_RxCompDesc) * comp_ring->size;
100397f17497SC.J. Collier
1004aab0c291SChristian Ehrhardt	mz = rte_eth_dma_zone_reserve(dev, "rxdesc", queue_idx, size,
1005aab0c291SChristian Ehrhardt				      VMXNET3_RING_BA_ALIGN, socket_id);
100697f17497SC.J. Collier	if (mz == NULL) {
100797f17497SC.J. Collier		PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone");
100897f17497SC.J. Collier		return -ENOMEM;
100997f17497SC.J. Collier	}
1010aab0c291SChristian Ehrhardt	rxq->mz = mz;
101197f17497SC.J. Collier	memset(mz->addr, 0, mz->len);
101297f17497SC.J. Collier
101397f17497SC.J. Collier	/* cmd_ring0 initialization */
101497f17497SC.J. Collier	ring0->base = mz->addr;
101597f17497SC.J. Collier	ring0->basePA = mz->phys_addr;
101697f17497SC.J. Collier
101797f17497SC.J. Collier	/* cmd_ring1 initialization */
101897f17497SC.J. Collier	ring1->base = ring0->base + ring0->size;
101997f17497SC.J. Collier	ring1->basePA = ring0->basePA + sizeof(struct Vmxnet3_RxDesc) * ring0->size;
102097f17497SC.J. Collier
102197f17497SC.J. Collier	/* comp_ring initialization */
102297f17497SC.J. Collier	comp_ring->base = ring1->base + ring1->size;
102397f17497SC.J. Collier	comp_ring->basePA = ring1->basePA + sizeof(struct Vmxnet3_RxDesc) *
102497f17497SC.J. Collier		ring1->size;
102597f17497SC.J. Collier
102697f17497SC.J. Collier	/* cmd_ring0-cmd_ring1 buf_info allocation */
102797f17497SC.J. Collier	for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++) {
102897f17497SC.J. Collier
102997f17497S