197f17497SC.J. Collier/*-
297f17497SC.J. Collier *   BSD LICENSE
397f17497SC.J. Collier *
497f17497SC.J. Collier *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
597f17497SC.J. Collier *   All rights reserved.
697f17497SC.J. Collier *
797f17497SC.J. Collier *   Redistribution and use in source and binary forms, with or without
897f17497SC.J. Collier *   modification, are permitted provided that the following conditions
997f17497SC.J. Collier *   are met:
1097f17497SC.J. Collier *
1197f17497SC.J. Collier *     * Redistributions of source code must retain the above copyright
1297f17497SC.J. Collier *       notice, this list of conditions and the following disclaimer.
1397f17497SC.J. Collier *     * Redistributions in binary form must reproduce the above copyright
1497f17497SC.J. Collier *       notice, this list of conditions and the following disclaimer in
1597f17497SC.J. Collier *       the documentation and/or other materials provided with the
1697f17497SC.J. Collier *       distribution.
1797f17497SC.J. Collier *     * Neither the name of Intel Corporation nor the names of its
1897f17497SC.J. Collier *       contributors may be used to endorse or promote products derived
1997f17497SC.J. Collier *       from this software without specific prior written permission.
2097f17497SC.J. Collier *
3297f17497SC.J. Collier */
3397f17497SC.J. Collier
3497f17497SC.J. Collier#include <stdint.h>
3597f17497SC.J. Collier#include <stdio.h>
3697f17497SC.J. Collier#include <stdlib.h>
3797f17497SC.J. Collier#include <string.h>
3897f17497SC.J. Collier#include <errno.h>
3997f17497SC.J. Collier
4097f17497SC.J. Collier#include <rte_cycles.h>
4197f17497SC.J. Collier#include <rte_memory.h>
4297f17497SC.J. Collier#include <rte_memzone.h>
4397f17497SC.J. Collier#include <rte_branch_prediction.h>
4497f17497SC.J. Collier#include <rte_mempool.h>
4597f17497SC.J. Collier#include <rte_malloc.h>
4697f17497SC.J. Collier#include <rte_mbuf.h>
4797f17497SC.J. Collier#include <rte_ether.h>
4897f17497SC.J. Collier#include <rte_ethdev.h>
4997f17497SC.J. Collier#include <rte_prefetch.h>
5097f17497SC.J. Collier#include <rte_string_fns.h>
5197f17497SC.J. Collier#include <rte_errno.h>
5297f17497SC.J. Collier#include <rte_byteorder.h>
536b3e017eSChristian Ehrhardt#include <rte_cpuflags.h>
546b3e017eSChristian Ehrhardt#include <rte_net.h>
556b3e017eSChristian Ehrhardt#include <rte_ip.h>
566b3e017eSChristian Ehrhardt#include <rte_udp.h>
576b3e017eSChristian Ehrhardt#include <rte_tcp.h>
5897f17497SC.J. Collier
5997f17497SC.J. Collier#include "virtio_logs.h"
6097f17497SC.J. Collier#include "virtio_ethdev.h"
6197f17497SC.J. Collier#include "virtio_pci.h"
6297f17497SC.J. Collier#include "virtqueue.h"
6397f17497SC.J. Collier#include "virtio_rxtx.h"
6439157ec0SLuca Boccassi#include "virtio_rxtx_simple.h"
6597f17497SC.J. Collier
6697f17497SC.J. Collier#ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
6797f17497SC.J. Collier#define VIRTIO_DUMP_PACKET(m, len) rte_pktmbuf_dump(stdout, m, len)
6897f17497SC.J. Collier#else
6997f17497SC.J. Collier#define  VIRTIO_DUMP_PACKET(m, len) do { } while (0)
7097f17497SC.J. Collier#endif
7197f17497SC.J. Collier
7297f17497SC.J. Collier
7397f17497SC.J. Collier#define VIRTIO_SIMPLE_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \
7597f17497SC.J. Collier
7647d9763aSLuca Boccassivoid
7797f17497SC.J. Colliervq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
7897f17497SC.J. Collier{
7997f17497SC.J. Collier	struct vring_desc *dp, *dp_tail;
8097f17497SC.J. Collier	struct vq_desc_extra *dxp;
8197f17497SC.J. Collier	uint16_t desc_idx_last = desc_idx;
8297f17497SC.J. Collier
8397f17497SC.J. Collier	dp  = &vq->vq_ring.desc[desc_idx];
8497f17497SC.J. Collier	dxp = &vq->vq_descx[desc_idx];
8597f17497SC.J. Collier	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs);
8697f17497SC.J. Collier	if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
8797f17497SC.J. Collier		while (dp->flags & VRING_DESC_F_NEXT) {
8897f17497SC.J. Collier			desc_idx_last = dp->next;
8997f17497SC.J. Collier			dp = &vq->vq_ring.desc[dp->next];
9097f17497SC.J. Collier		}
9197f17497SC.J. Collier	}
9297f17497SC.J. Collier	dxp->ndescs = 0;
9397f17497SC.J. Collier
9497f17497SC.J. Collier	/*
9597f17497SC.J. Collier	 * We must append the existing free chain, if any, to the end of
9697f17497SC.J. Collier	 * newly freed chain. If the virtqueue was completely used, then
9797f17497SC.J. Collier	 * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
9897f17497SC.J. Collier	 */
9997f17497SC.J. Collier	if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END) {
10097f17497SC.J. Collier		vq->vq_desc_head_idx = desc_idx;
10197f17497SC.J. Collier	} else {
10297f17497SC.J. Collier		dp_tail = &vq->vq_ring.desc[vq->vq_desc_tail_idx];
10397f17497SC.J. Collier		dp_tail->next = desc_idx;
10497f17497SC.J. Collier	}
10597f17497SC.J. Collier
10697f17497SC.J. Collier	vq->vq_desc_tail_idx = desc_idx_last;
10797f17497SC.J. Collier	dp->next = VQ_RING_DESC_CHAIN_END;
10897f17497SC.J. Collier}
10997f17497SC.J. Collier
11097f17497SC.J. Collierstatic uint16_t
11197f17497SC.J. Colliervirtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts,
11297f17497SC.J. Collier			   uint32_t *len, uint16_t num)
11397f17497SC.J. Collier{
11497f17497SC.J. Collier	struct vring_used_elem *uep;
11597f17497SC.J. Collier	struct rte_mbuf *cookie;
11697f17497SC.J. Collier	uint16_t used_idx, desc_idx;
11797f17497SC.J. Collier	uint16_t i;
11897f17497SC.J. Collier
11997f17497SC.J. Collier	/*  Caller does the check */
12097f17497SC.J. Collier	for (i = 0; i < num ; i++) {
12197f17497SC.J. Collier		used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
12297f17497SC.J. Collier		uep = &vq->vq_ring.used->ring[used_idx];
12397f17497SC.J. Collier		desc_idx = (uint16_t) uep->id;
12497f17497SC.J. Collier		len[i] = uep->len;
12597f17497SC.J. Collier		cookie = (struct rte_mbuf *)vq->vq_descx[desc_idx].cookie;
12697f17497SC.J. Collier
12797f17497SC.J. Collier		if (unlikely(cookie == NULL)) {
12897f17497SC.J. Collier			PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u\n",
12997f17497SC.J. Collier				vq->vq_used_cons_idx);
13097f17497SC.J. Collier			break;
13197f17497SC.J. Collier		}
13297f17497SC.J. Collier
13397f17497SC.J. Collier		rte_prefetch0(cookie);
13497f17497SC.J. Collier		rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
13597f17497SC.J. Collier		rx_pkts[i]  = cookie;
13697f17497SC.J. Collier		vq->vq_used_cons_idx++;
13797f17497SC.J. Collier		vq_ring_free_chain(vq, desc_idx);
13897f17497SC.J. Collier		vq->vq_descx[desc_idx].cookie = NULL;
13997f17497SC.J. Collier	}
14097f17497SC.J. Collier
14197f17497SC.J. Collier	return i;
14297f17497SC.J. Collier}
14397f17497SC.J. Collier
14497f17497SC.J. Collier#ifndef DEFAULT_TX_FREE_THRESH
14597f17497SC.J. Collier#define DEFAULT_TX_FREE_THRESH 32
14697f17497SC.J. Collier#endif
14797f17497SC.J. Collier
14897f17497SC.J. Collier/* Cleanup from completed transmits. */
14997f17497SC.J. Collierstatic void
15097f17497SC.J. Colliervirtio_xmit_cleanup(struct virtqueue *vq, uint16_t num)
15197f17497SC.J. Collier{
15297f17497SC.J. Collier	uint16_t i, used_idx, desc_idx;
15397f17497SC.J. Collier	for (i = 0; i < num; i++) {
15497f17497SC.J. Collier		struct vring_used_elem *uep;
15597f17497SC.J. Collier		struct vq_desc_extra *dxp;
15697f17497SC.J. Collier
15797f17497SC.J. Collier		used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
15897f17497SC.J. Collier		uep = &vq->vq_ring.used->ring[used_idx];
15997f17497SC.J. Collier
16097f17497SC.J. Collier		desc_idx = (uint16_t) uep->id;
16197f17497SC.J. Collier		dxp = &vq->vq_descx[desc_idx];
16297f17497SC.J. Collier		vq->vq_used_cons_idx++;
16397f17497SC.J. Collier		vq_ring_free_chain(vq, desc_idx);
16497f17497SC.J. Collier
16597f17497SC.J. Collier		if (dxp->cookie != NULL) {
16697f17497SC.J. Collier			rte_pktmbuf_free(dxp->cookie);
16797f17497SC.J. Collier			dxp->cookie = NULL;
16897f17497SC.J. Collier		}
16997f17497SC.J. Collier	}
17097f17497SC.J. Collier}
17197f17497SC.J. Collier
17297f17497SC.J. Collier
17397f17497SC.J. Collierstatic inline int
17497f17497SC.J. Colliervirtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf *cookie)
17597f17497SC.J. Collier{
17697f17497SC.J. Collier	struct vq_desc_extra *dxp;
17797f17497SC.J. Collier	struct virtio_hw *hw = vq->hw;
17897f17497SC.J. Collier	struct vring_desc *start_dp;
17997f17497SC.J. Collier	uint16_t needed = 1;
18097f17497SC.J. Collier	uint16_t head_idx, idx;
18197f17497SC.J. Collier
18297f17497SC.J. Collier	if (unlikely(vq->vq_free_cnt == 0))
18397f17497SC.J. Collier		return -ENOSPC;
18497f17497SC.J. Collier	if (unlikely(vq->vq_free_cnt < needed))
18597f17497SC.J. Collier		return -EMSGSIZE;
18697f17497SC.J. Collier
18797f17497SC.J. Collier	head_idx = vq->vq_desc_head_idx;
18897f17497SC.J. Collier	if (unlikely(head_idx >= vq->vq_nentries))
18997f17497SC.J. Collier		return -EFAULT;
19097f17497SC.J. Collier
19197f17497SC.J. Collier	idx = head_idx;
19297f17497SC.J. Collier	dxp = &vq->vq_descx[idx];
19397f17497SC.J. Collier	dxp->cookie = (void *)cookie;
19497f17497SC.J. Collier	dxp->ndescs = needed;
19597f17497SC.J. Collier
19697f17497SC.J. Collier	start_dp = vq->vq_ring.desc;
19797f17497SC.J. Collier	start_dp[idx].addr =
1987b53c036SRicardo Salveti		VIRTIO_MBUF_ADDR(cookie, vq) +
1997b53c036SRicardo Salveti		RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
20097f17497SC.J. Collier	start_dp[idx].len =
20197f17497SC.J. Collier		cookie->buf_len - RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
20297f17497SC.J. Collier	start_dp[idx].flags =  VRING_DESC_F_WRITE;
20397f17497SC.J. Collier	idx = start_dp[idx].next;
20497f17497SC.J. Collier	vq->vq_desc_head_idx = idx;
20597f17497SC.J. Collier	if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
20697f17497SC.J. Collier		vq->vq_desc_tail_idx = idx;
20797f17497SC.J. Collier	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
20897f17497SC.J. Collier	vq_update_avail_ring(vq, head_idx);
20997f17497SC.J. Collier
21097f17497SC.J. Collier	return 0;
21197f17497SC.J. Collier}
21297f17497SC.J. Collier
2136b3e017eSChristian Ehrhardt/* When doing TSO, the IP length is not included in the pseudo header
2146b3e017eSChristian Ehrhardt * checksum of the packet given to the PMD, but for virtio it is
2156b3e017eSChristian Ehrhardt * expected.
2166b3e017eSChristian Ehrhardt */
2176b3e017eSChristian Ehrhardtstatic void
2186b3e017eSChristian Ehrhardtvirtio_tso_fix_cksum(struct rte_mbuf *m)
2196b3e017eSChristian Ehrhardt{
2206b3e017eSChristian Ehrhardt	/* common case: header is not fragmented */
2216b3e017eSChristian Ehrhardt	if (likely(rte_pktmbuf_data_len(m) >= m->l2_len + m->l3_len +
2226b3e017eSChristian Ehrhardt			m->l4_len)) {
2236b3e017eSChristian Ehrhardt		struct ipv4_hdr *iph;
2246b3e017eSChristian Ehrhardt		struct ipv6_hdr *ip6h;
2256b3e017eSChristian Ehrhardt		struct tcp_hdr *th;
2266b3e017eSChristian Ehrhardt		uint16_t prev_cksum, new_cksum, ip_len, ip_paylen;
2276b3e017eSChristian Ehrhardt		uint32_t tmp;
2286b3e017eSChristian Ehrhardt
2296b3e017eSChristian Ehrhardt		iph = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, m->l2_len);
2306b3e017eSChristian Ehrhardt		th = RTE_PTR_ADD(iph, m->l3_len);
2316b3e017eSChristian Ehrhardt		if ((iph->version_ihl >> 4) == 4) {
2326b3e017eSChristian Ehrhardt			iph->hdr_checksum = 0;
2336b3e017eSChristian Ehrhardt			iph->hdr_checksum = rte_ipv4_cksum(iph);
2346b3e017eSChristian Ehrhardt			ip_len = iph->total_length;
2356b3e017eSChristian Ehrhardt			ip_paylen = rte_cpu_to_be_16(rte_be_to_cpu_16(ip_len) -
2366b3e017eSChristian Ehrhardt				m->l3_len);
2376b3e017eSChristian Ehrhardt		} else {
2386b3e017eSChristian Ehrhardt			ip6h = (struct ipv6_hdr *)iph;
2396b3e017eSChristian Ehrhardt			ip_paylen = ip6h->payload_len;
2406b3e017eSChristian Ehrhardt		}
2416b3e017eSChristian Ehrhardt
2426b3e017eSChristian Ehrhardt		/* calculate the new phdr checksum not including ip_paylen */
2436b3e017eSChristian Ehrhardt		prev_cksum = th->cksum;
2446b3e017eSChristian Ehrhardt		tmp = prev_cksum;
2456b3e017eSChristian Ehrhardt		tmp += ip_paylen;
2466b3e017eSChristian Ehrhardt		tmp = (tmp & 0xffff) + (tmp >> 16);
2476b3e017eSChristian Ehrhardt		new_cksum = tmp;
2486b3e017eSChristian Ehrhardt
2496b3e017eSChristian Ehrhardt		/* replace it in the packet */
2506b3e017eSChristian Ehrhardt		th->cksum = new_cksum;
2516b3e017eSChristian Ehrhardt	}
2526b3e017eSChristian Ehrhardt}
2536b3e017eSChristian Ehrhardt
2546b3e017eSChristian Ehrhardtstatic inline int
2556b3e017eSChristian Ehrhardttx_offload_enabled(struct virtio_hw *hw)
2566b3e017eSChristian Ehrhardt{
2576b3e017eSChristian Ehrhardt	return vtpci_with_feature(hw, VIRTIO_NET_F_CSUM) ||
2586b3e017eSChristian Ehrhardt		vtpci_with_feature(hw, VIRTIO_NET_F_HOST_TSO4) ||
2596b3e017eSChristian Ehrhardt		vtpci_with_feature(hw, VIRTIO_NET_F_HOST_TSO6);
2606b3e017eSChristian Ehrhardt}
2616b3e017eSChristian Ehrhardt
262ce3d555eSChristian Ehrhardt/* avoid write operation when necessary, to lessen cache issues */
263ce3d555eSChristian Ehrhardt#define ASSIGN_UNLESS_EQUAL(var, val) do {	\
264ce3d555eSChristian Ehrhardt	if ((var) != (val))			\
265ce3d555eSChristian Ehrhardt		(var) = (val);			\
266ce3d555eSChristian Ehrhardt} while (0)
267ce3d555eSChristian Ehrhardt
26897f17497SC.J. Collierstatic inline void
2698b25d1adSChristian Ehrhardtvirtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
27097f17497SC.J. Collier		       uint16_t needed, int use_indirect, int can_push)
27197f17497SC.J. Collier{
2726b3e017eSChristian Ehrhardt	struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
27397f17497SC.J. Collier	struct vq_desc_extra *dxp;
2748b25d1adSChristian Ehrhardt	struct virtqueue *vq = txvq->vq;
27597f17497SC.J. Collier	struct vring_desc *start_dp;
27697f17497SC.J. Collier	uint16_t seg_num = cookie->nb_segs;
27797f17497SC.J. Collier	uint16_t head_idx, idx;
2788b25d1adSChristian Ehrhardt	uint16_t head_size = vq->hw->vtnet_hdr_size;
2796b3e017eSChristian Ehrhardt	struct virtio_net_hdr *hdr;
2806b3e017eSChristian Ehrhardt	int offload;
28197f17497SC.J. Collier
2826b3e017eSChristian Ehrhardt	offload = tx_offload_enabled(vq->hw);
2838b25d1adSChristian Ehrhardt	head_idx = vq->vq_desc_head_idx;
28497f17497SC.J. Collier	idx = head_idx;
2858b25d1adSChristian Ehrhardt	dxp = &vq->vq_descx[idx];
28697f17497SC.J. Collier	dxp->cookie = (void *)cookie;
28797f17497SC.J. Collier	dxp->ndescs = needed;
28897f17497SC.J. Collier
2898b25d1adSChristian Ehrhardt	start_dp = vq->vq_ring.desc;
29097f17497SC.J. Collier
29197f17497SC.J. Collier	if (can_push) {
2926b3e017eSChristian Ehrhardt		/* prepend cannot fail, checked by caller */
2936b3e017eSChristian Ehrhardt		hdr = (struct virtio_net_hdr *)
2946b3e017eSChristian Ehrhardt			rte_pktmbuf_prepend(cookie, head_size);
29547d9763aSLuca Boccassi		/* rte_pktmbuf_prepend() counts the hdr size to the pkt length,
29647d9763aSLuca Boccassi		 * which is wrong. Below subtract restores correct pkt size.
29747d9763aSLuca Boccassi		 */
29847d9763aSLuca Boccassi		cookie->pkt_len -= head_size;
2996b3e017eSChristian Ehrhardt		/* if offload disabled, it is not zeroed below, do it now */
300ce3d555eSChristian Ehrhardt		if (offload == 0) {
301ce3d555eSChristian Ehrhardt			ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
302ce3d555eSChristian Ehrhardt			ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
303ce3d555eSChristian Ehrhardt			ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
304ce3d555eSChristian Ehrhardt			ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
305ce3d555eSChristian Ehrhardt			ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
306ce3d555eSChristian Ehrhardt			ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
307ce3d555eSChristian Ehrhardt		}
30897f17497SC.J. Collier	} else if (use_indirect) {
30997f17497SC.J. Collier		/* setup tx ring slot to point to indirect
31097f17497SC.J. Collier		 * descriptor list stored in reserved region.
31197f17497SC.J. Collier		 *
31297f17497SC.J. Collier		 * the first slot in indirect ring is already preset
31397f17497SC.J. Collier		 * to point to the header in reserved region
31497f17497SC.J. Collier		 */
3156b3e017eSChristian Ehrhardt		start_dp[idx].addr  = txvq->virtio_net_hdr_mem +
3166b3e017eSChristian Ehrhardt			RTE_PTR_DIFF(&txr[idx].tx_indir, txr);
31797f17497SC.J. Collier		start_dp[idx].len   = (seg_num + 1) * sizeof(struct vring_desc);
31897f17497SC.J. Collier		start_dp[idx].flags = VRING_DESC_F_INDIRECT;
3196b3e017eSChristian Ehrhardt		hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
32097f17497SC.J. Collier
32197f17497SC.J. Collier		/* loop below will fill in rest of the indirect elements */
32297f17497SC.J. Collier		start_dp = txr[idx].tx_indir;
32397f17497SC.J. Collier		idx = 1;
32497f17497SC.J. Collier	} else {
32597f17497SC.J. Collier		/* setup first tx ring slot to point to header
32697f17497SC.J. Collier		 * stored in reserved region.
32797f17497SC.J. Collier		 */
3286b3e017eSChristian Ehrhardt		start_dp[idx].addr  = txvq->virtio_net_hdr_mem +
3296b3e017eSChristian Ehrhardt			RTE_PTR_DIFF(&txr[idx].tx_hdr, txr);
3308b25d1adSChristian Ehrhardt		start_dp[idx].len   = vq->hw->vtnet_hdr_size;
33197f17497SC.J. Collier		start_dp[idx].flags = VRING_DESC_F_NEXT;
3326b3e017eSChristian Ehrhardt		hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
3336b3e017eSChristian Ehrhardt
33497f17497SC.J. Collier		idx = start_dp[idx].next;
33597f17497SC.J. Collier	}
33697f17497SC.J. Collier
3376b3e017eSChristian Ehrhardt	/* Checksum Offload / TSO */
3386b3e017eSChristian Ehrhardt	if (offload) {
3396b3e017eSChristian Ehrhardt		if (cookie->ol_flags & PKT_TX_TCP_SEG)
3406b3e017eSChristian Ehrhardt			cookie->ol_flags |= PKT_TX_TCP_CKSUM;
3416b3e017eSChristian Ehrhardt
3426b3e017eSChristian Ehrhardt		switch (cookie->ol_flags & PKT_TX_L4_MASK) {
3436b3e017eSChristian Ehrhardt		case PKT_TX_UDP_CKSUM:
3446b3e017eSChristian Ehrhardt			hdr->csum_start = cookie->l2_len + cookie->l3_len;
3456b3e017eSChristian Ehrhardt			hdr->csum_offset = offsetof(struct udp_hdr,
3466b3e017eSChristian Ehrhardt				dgram_cksum);
3476b3e017eSChristian Ehrhardt			hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
3486b3e017eSChristian Ehrhardt			break;
3496b3e017eSChristian Ehrhardt
3506b3e017eSChristian Ehrhardt		case PKT_TX_TCP_CKSUM:
3516b3e017eSChristian Ehrhardt			hdr->csum_start = cookie->l2_len + cookie->l3_len;
3526b3e017eSChristian Ehrhardt			hdr->csum_offset = offsetof(struct tcp_hdr, cksum);
3536b3e017eSChristian Ehrhardt			hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
3546b3e017eSChristian Ehrhardt			break;
3556b3e017eSChristian Ehrhardt
3566b3e017eSChristian Ehrhardt		default:
357ce3d555eSChristian Ehrhardt			ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
358ce3d555eSChristian Ehrhardt			ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
359ce3d555eSChristian Ehrhardt			ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
3606b3e017eSChristian Ehrhardt			break;
3616b3e017eSChristian Ehrhardt		}
3626b3e017eSChristian Ehrhardt
3636b3e017eSChristian Ehrhardt		/* TCP Segmentation Offload */
3646b3e017eSChristian Ehrhardt		if (cookie->ol_flags & PKT_TX_TCP_SEG) {
3656b3e017eSChristian Ehrhardt			virtio_tso_fix_cksum(cookie);
3666b3e017eSChristian Ehrhardt			hdr->gso_type = (cookie->ol_flags & PKT_TX_IPV6) ?
3676b3e017eSChristian Ehrhardt				VIRTIO_NET_HDR_GSO_TCPV6 :
3686b3e017eSChristian Ehrhardt				VIRTIO_NET_HDR_GSO_TCPV4;
3696b3e017eSChristian Ehrhardt			hdr->gso_size = cookie->tso_segsz;
3706b3e017eSChristian Ehrhardt			hdr->hdr_len =
3716b3e017eSChristian Ehrhardt				cookie->l2_len +
3726b3e017eSChristian Ehrhardt				cookie->l3_len +
3736b3e017eSChristian Ehrhardt				cookie->l4_len;
3746b3e017eSChristian Ehrhardt		} else {
375ce3d555eSChristian Ehrhardt			ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
376ce3d555eSChristian Ehrhardt			ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
377ce3d555eSChristian Ehrhardt			ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
3786b3e017eSChristian Ehrhardt		}
3796b3e017eSChristian Ehrhardt	}
3806b3e017eSChristian Ehrhardt
38197f17497SC.J. Collier	do {
3827b53c036SRicardo Salveti		start_dp[idx].addr  = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
38397f17497SC.J. Collier		start_dp[idx].len   = cookie->data_len;
38497f17497SC.J. Collier		start_dp[idx].flags = cookie->next ? VRING_DESC_F_NEXT : 0;
38597f17497SC.J. Collier		idx = start_dp[idx].next;
38697f17497SC.J. Collier	} while ((cookie = cookie->next) != NULL);
38797f17497SC.J. Collier
38897f17497SC.J. Collier	if (use_indirect)
3898b25d1adSChristian Ehrhardt		idx = vq->vq_ring.desc[head_idx].next;
39097f17497SC.J. Collier
3918b25d1adSChristian Ehrhardt	vq->vq_desc_head_idx = idx;
3928b25d1adSChristian Ehrhardt	if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
3938b25d1adSChristian Ehrhardt		vq->vq_desc_tail_idx = idx;
3948b25d1adSChristian Ehrhardt	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
3958b25d1adSChristian Ehrhardt	vq_update_avail_ring(vq, head_idx);
39697f17497SC.J. Collier}
39797f17497SC.J. Collier
3988b25d1adSChristian Ehrhardtvoid
3998b25d1adSChristian Ehrhardtvirtio_dev_cq_start(struct rte_eth_dev *dev)
4008b25d1adSChristian Ehrhardt{
4018b25d1adSChristian Ehrhardt	struct virtio_hw *hw = dev->data->dev_private;
4028b25d1adSChristian Ehrhardt
4038b25d1adSChristian Ehrhardt	if (hw->cvq && hw->cvq->vq) {
4048b25d1adSChristian Ehrhardt		VIRTQUEUE_DUMP((struct virtqueue *)hw->cvq->vq);
4058b25d1adSChristian Ehrhardt	}
4068b25d1adSChristian Ehrhardt}
40797f17497SC.J. Collier
4086b3e017eSChristian Ehrhardtint
4096b3e017eSChristian Ehrhardtvirtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
4106b3e017eSChristian Ehrhardt			uint16_t queue_idx,
4116b3e017eSChristian Ehrhardt			uint16_t nb_desc,
4126b3e017eSChristian Ehrhardt			unsigned int socket_id __rte_unused,
4136b3e017eSChristian Ehrhardt			__rte_unused const struct rte_eth_rxconf *rx_conf,
4146b3e017eSChristian Ehrhardt			struct rte_mempool *mp)
4158b25d1adSChristian Ehrhardt{
4166b3e017eSChristian Ehrhardt	uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
4176b3e017eSChristian Ehrhardt	struct virtio_hw *hw = dev->data->dev_private;
4186b3e017eSChristian Ehrhardt	struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
4196b3e017eSChristian Ehrhardt	struct virtnet_rx *rxvq;
4208b25d1adSChristian Ehrhardt
4218b25d1adSChristian Ehrhardt	PMD_INIT_FUNC_TRACE();
4228b25d1adSChristian Ehrhardt
4236b3e017eSChristian Ehrhardt	if (nb_desc == 0 || nb_desc > vq->vq_nentries)
4246b3e017eSChristian Ehrhardt		nb_desc = vq->vq_nentries;
4256b3e017eSChristian Ehrhardt	vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
42697f17497SC.J. Collier
4276b3e017eSChristian Ehrhardt	rxvq = &vq->rxq;
4286b3e017eSChristian Ehrhardt	rxvq->queue_id = queue_idx;
4296b3e017eSChristian Ehrhardt	rxvq->mpool = mp;
4306b3e017eSChristian Ehrhardt	if (rxvq->mpool == NULL) {
4316b3e017eSChristian Ehrhardt		rte_exit(EXIT_FAILURE,
4326b3e017eSChristian Ehrhardt			"Cannot allocate mbufs for rx virtqueue");
4336b3e017eSChristian Ehrhardt	}
4346b3e017eSChristian Ehrhardt	dev->data->rx_queues[queue_idx] = rxvq;
43597f17497SC.J. Collier
43647d9763aSLuca Boccassi	return 0;
43747d9763aSLuca Boccassi}
43847d9763aSLuca Boccassi
43947d9763aSLuca Boccassiint
44047d9763aSLuca Boccassivirtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
44147d9763aSLuca Boccassi{
44247d9763aSLuca Boccassi	uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
44347d9763aSLuca Boccassi	struct virtio_hw *hw = dev->data->dev_private;
44447d9763aSLuca Boccassi	struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
44547d9763aSLuca Boccassi	struct virtnet_rx *rxvq = &vq->rxq;
44647d9763aSLuca Boccassi	struct rte_mbuf *m;
44747d9763aSLuca Boccassi	uint16_t desc_idx;
44847d9763aSLuca Boccassi	int error, nbufs;
44947d9763aSLuca Boccassi
45047d9763aSLuca Boccassi	PMD_INIT_FUNC_TRACE();
45197f17497SC.J. Collier
4526b3e017eSChristian Ehrhardt	/* Allocate blank mbufs for the each rx descriptor */
4536b3e017eSChristian Ehrhardt	nbufs = 0;
4548b25d1adSChristian Ehrhardt
4556b3e017eSChristian Ehrhardt	if (hw->use_simple_rxtx) {
4566b3e017eSChristian Ehrhardt		for (desc_idx = 0; desc_idx < vq->vq_nentries;
4576b3e017eSChristian Ehrhardt		     desc_idx++) {
4586b3e017eSChristian Ehrhardt			vq->vq_ring.avail->ring[desc_idx] = desc_idx;
4596b3e017eSChristian Ehrhardt			vq->vq_ring.desc[desc_idx].flags =
4606b3e017eSChristian Ehrhardt				VRING_DESC_F_WRITE;
4616b3e017eSChristian Ehrhardt		}
46239157ec0SLuca Boccassi
46339157ec0SLuca Boccassi		virtio_rxq_vec_setup(rxvq);
4648b25d1adSChristian Ehrhardt	}
4658b25d1adSChristian Ehrhardt
4666b3e017eSChristian Ehrhardt	memset(&rxvq->fake_mbuf, 0, sizeof(rxvq->fake_mbuf));
4676b3e017eSChristian Ehrhardt	for (desc_idx = 0; desc_idx < RTE_PMD_VIRTIO_RX_MAX_BURST;
4686b3e017eSChristian Ehrhardt	     desc_idx++) {
4696b3e017eSChristian Ehrhardt		vq->sw_ring[vq->vq_nentries + desc_idx] =
4706b3e017eSChristian Ehrhardt			&rxvq->fake_mbuf;
47197f17497SC.J. Collier	}
47297f17497SC.J. Collier
47339157ec0SLuca Boccassi	if (hw->use_simple_rxtx) {
47439157ec0SLuca Boccassi		while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
47539157ec0SLuca Boccassi			virtio_rxq_rearm_vec(rxvq);
47639157ec0SLuca Boccassi			nbufs += RTE_VIRTIO_VPMD_RX_REARM_THRESH;
47739157ec0SLuca Boccassi		}
47839157ec0SLuca Boccassi	} else {
47939157ec0SLuca Boccassi		while (!virtqueue_full(vq)) {
48039157ec0SLuca Boccassi			m = rte_mbuf_raw_alloc(rxvq->mpool);
48139157ec0SLuca Boccassi			if (m == NULL)
48239157ec0SLuca Boccassi				break;
48397f17497SC.J. Collier
48439157ec0SLuca Boccassi			/* Enqueue allocated buffers */
4856b3e017eSChristian Ehrhardt			error = virtqueue_enqueue_recv_refill(vq, m);
48639157ec0SLuca Boccassi			if (error) {
48739157ec0SLuca Boccassi				rte_pktmbuf_free(m);
48839157ec0SLuca Boccassi				break;
48939157ec0SLuca Boccassi			}
49039157ec0SLuca Boccassi			nbufs++;
4916b3e017eSChristian Ehrhardt		}
49297f17497SC.J. Collier
49339157ec0SLuca Boccassi		vq_update_avail_idx(vq);
49439157ec0SLuca Boccassi	}
49597f17497SC.J. Collier
4966b3e017eSChristian Ehrhardt	PMD_INIT_LOG(DEBUG, "Allocated %d bufs", nbufs);
49797f17497SC.J. Collier
4986b3e017eSChristian Ehrhardt	VIRTQUEUE_DUMP(vq);
49997f17497SC.J. Collier
50097f17497SC.J. Collier	return 0;
50197f17497SC.J. Collier}
50297f17497SC.J. Collier
5036b3e017eSChristian Ehrhardtstatic void
5046b3e017eSChristian Ehrhardtvirtio_update_rxtx_handler(struct rte_eth_dev *dev,
5056b3e017eSChristian Ehrhardt			   const struct rte_eth_txconf *tx_conf)
50697f17497SC.J. Collier{
5076b3e017eSChristian Ehrhardt	uint8_t use_simple_rxtx = 0;
5086b3e017eSChristian Ehrhardt	struct virtio_hw *hw = dev->data->dev_private;
5098b25d1adSChristian Ehrhardt
5106b3e017eSChristian Ehrhardt#if defined RTE_ARCH_X86
5116b3e017eSChristian Ehrhardt	if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE3))
5126b3e017eSChristian Ehrhardt		use_simple_rxtx = 1;
51339157ec0SLuca Boccassi#elif defined RTE_ARCH_ARM64 || defined RTE_ARCH_ARM
5146b3e017eSChristian Ehrhardt	if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON))
5156b3e017eSChristian Ehrhardt		use_simple_rxtx = 1;
5166b3e017eSChristian Ehrhardt#endif
5176b3e017eSChristian Ehrhardt	/* Use simple rx/tx func if single segment and no offloads */
5186b3e017eSChristian Ehrhardt	if (use_simple_rxtx &&
5196b3e017eSChristian Ehrhardt	    (tx_conf->txq_flags & VIRTIO_SIMPLE_FLAGS) == VIRTIO_SIMPLE_FLAGS &&
5206b3e017eSChristian Ehrhardt	    !vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
5216b3e017eSChristian Ehrhardt		PMD_INIT_LOG(INFO, "Using simple rx/tx path");
5226b3e017eSChristian Ehrhardt		dev->tx_pkt_burst = virtio_xmit_pkts_simple;
5236b3e017eSChristian Ehrhardt		dev->rx_pkt_burst = virtio_recv_pkts_vec;
5246b3e017eSChristian Ehrhardt		hw->use_simple_rxtx = use_simple_rxtx;
5256b3e017eSChristian Ehrhardt	}
52697f17497SC.J. Collier}
52797f17497SC.J. Collier
52897f17497SC.J. Collier/*
52997f17497SC.J. Collier * struct rte_eth_dev *dev: Used to update dev
53097f17497SC.J. Collier * uint16_t nb_desc: Defaults to values read from config space
53197f17497SC.J. Collier * unsigned int socket_id: Used to allocate memzone
53297f17497SC.J. Collier * const struct rte_eth_txconf *tx_conf: Used to setup tx engine
53397f17497SC.J. Collier * uint16_t queue_idx: Just used as an index in dev txq list
53497f17497SC.J. Collier */
53597f17497SC.J. Collierint
53697f17497SC.J. Colliervirtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
53797f17497SC.J. Collier			uint16_t queue_idx,
53897f17497SC.J. Collier			uint16_t nb_desc,
5396b3e017eSChristian Ehrhardt			unsigned int socket_id __rte_unused,
54097f17497SC.J. Collier			const struct rte_eth_txconf *tx_conf)
54197f17497SC.J. Collier{
54297f17497SC.J. Collier	uint8_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
54397f17497SC.J. Collier	struct virtio_hw *hw = dev->data->dev_private;
5446b3e017eSChristian Ehrhardt	struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
5458b25d1adSChristian Ehrhardt	struct virtnet_tx *txvq;
54697f17497SC.J. Collier	uint16_t tx_free_thresh;
54797f17497SC.J. Collier
54897f17497SC.J. Collier	PMD_INIT_FUNC_TRACE();
54997f17497SC.J. Collier
5506b3e017eSChristian Ehrhardt	virtio_update_rxtx_handler(dev, tx_conf);
55197f17497SC.J. Collier
5526b3e017eSChristian Ehrhardt	if (nb_desc == 0 || nb_desc > vq->vq_nentries)
5536b3e017eSChristian Ehrhardt		nb_desc = vq->vq_nentries;
5546b3e017eSChristian Ehrhardt	vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
55597f17497SC.J. Collier
5566b3e017eSChristian Ehrhardt	txvq = &vq->txq;
5576b3e017eSChristian Ehrhardt	txvq->queue_id = queue_idx;
55897f17497SC.J. Collier
55997f17497SC.J. Collier	tx_free_thresh = tx_conf->tx_free_thresh;
56097f17497SC.J. Collier	if (tx_free_thresh == 0)
56197f17497SC.J. Collier		tx_free_thresh =
56297f17497SC.J. Collier			RTE_MIN(vq->vq_nentries / 4, DEFAULT_TX_FREE_THRESH);
56397f17497SC.J. Collier
56497f17497SC.J. Collier	if (tx_free_thresh >= (vq->vq_nentries - 3)) {
56597f17497SC.J. Collier		RTE_LOG(ERR, PMD, "tx_free_thresh must be less than the "
56697f17497SC.J. Collier			"number of TX entries minus 3 (%u)."
56797f17497SC.J. Collier			" (tx_free_thresh=%u port=%u queue=%u)\n",
56897f17497SC.J. Collier			vq->vq_nentries - 3,
56997f17497SC.J. Collier			tx_free_thresh, dev->data->port_id, queue_idx);
57097f17497SC.J. Collier		return -EINVAL;
57197f17497SC.J. Collier	}
57297f17497SC.J. Collier
57397f17497SC.J. Collier	vq->vq_free_thresh = tx_free_thresh;
57497f17497SC.J. Collier
57547d9763aSLuca Boccassi	dev->data->tx_queues[queue_idx] = txvq;
57647d9763aSLuca Boccassi	return 0;
57747d9763aSLuca Boccassi}
5786b3e017eSChristian Ehrhardt
57947d9763aSLuca Boccassiint
58047d9763aSLuca Boccassivirtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
58147d9763aSLuca Boccassi				uint16_t queue_idx)
58247d9763aSLuca Boccassi{
58347d9763aSLuca Boccassi	uint8_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
58447d9763aSLuca Boccassi	struct virtio_hw *hw = dev->data->dev_private;
58547d9763aSLuca Boccassi	struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
58647d9763aSLuca Boccassi	uint16_t mid_idx = vq->vq_nentries >> 1;
58747d9763aSLuca Boccassi	struct virtnet_tx *txvq = &vq->txq;
58847d9763aSLuca Boccassi	uint16_t desc_idx;
58947d9763aSLuca Boccassi
59047d9763aSLuca Boccassi	PMD_INIT_FUNC_TRACE();
59147d9763aSLuca Boccassi
59247d9763aSLuca Boccassi	if (hw->use_simple_rxtx) {
5936b3e017eSChristian Ehrhardt		for (desc_idx = 0; desc_idx < mid_idx; desc_idx++) {
5946b3e017eSChristian Ehrhardt			vq->vq_ring.avail->ring[desc_idx] =
5956b3e017eSChristian Ehrhardt				desc_idx + mid_idx;
5966b3e017eSChristian Ehrhardt			vq->vq_ring.desc[desc_idx + mid_idx].next =
5976b3e017eSChristian Ehrhardt				desc_idx;
5986b3e017eSChristian Ehrhardt			vq->vq_ring.desc[desc_idx + mid_idx].addr =
5996b3e017eSChristian Ehrhardt				txvq->virtio_net_hdr_mem +
6006b3e017eSChristian Ehrhardt				offsetof(struct virtio_tx_region, tx_hdr);
6016b3e017eSChristian Ehrhardt			vq->vq_ring.desc[desc_idx + mid_idx].len =
6026b3e017eSChristian Ehrhardt				vq->hw->vtnet_hdr_size;
6036b3e017eSChristian Ehrhardt			vq->vq_ring.desc[desc_idx + mid_idx].flags =
6046b3e017eSChristian Ehrhardt				VRING_DESC_F_NEXT;
6056b3e017eSChristian Ehrhardt			vq->vq_ring.desc[desc_idx].flags = 0;
6066b3e017eSChristian Ehrhardt		}
6076b3e017eSChristian Ehrhardt		for (desc_idx = mid_idx; desc_idx < vq->vq_nentries;
6086b3e017eSChristian Ehrhardt		     desc_idx++)
6096b3e017eSChristian Ehrhardt			vq->vq_ring.avail->ring[desc_idx] = desc_idx;
6106b3e017eSChristian Ehrhardt	}
6117b53c036SRicardo Salveti
6126b3e017eSChristian Ehrhardt	VIRTQUEUE_DUMP(vq);
6138b25d1adSChristian Ehrhardt
6146b3e017eSChristian Ehrhardt	return 0;
61597f17497SC.J. Collier}
61697f17497SC.J. Collier
61797f17497SC.J. Collierstatic void
61897f17497SC.J. Colliervirtio_discard_rxbuf(struct virtqueue *vq, struct rte_mbuf *m)
61997f17497SC.J. Collier{
62097f17497SC.J. Collier	int error;
62197f17497SC.J. Collier	/*
62297f17497SC.J. Collier	 * Requeue the discarded mbuf. This should always be
62397f17497SC.J. Collier	 * successful since it was just dequeued.
62497f17497SC.J. Collier	 */
62597f17497SC.J. Collier	error = virtqueue_enqueue_recv_refill(vq, m);
62697f17497SC.J. Collier	if (unlikely(error)) {
62797f17497SC.J. Collier		RTE_LOG(ERR, PMD, "cannot requeue discarded mbuf");
62897f17497SC.J. Collier		rte_pktmbuf_free(m);
62997f17497SC.J. Collier	}
63097f17497SC.J. Collier}
63197f17497SC.J. Collier
63297f17497SC.J. Collierstatic void
6338b25d1adSChristian Ehrhardtvirtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf)
63497f17497SC.J. Collier{
63597f17497SC.J. Collier	uint32_t s = mbuf->pkt_len;
63697f17497SC.J. Collier	struct ether_addr *ea;
63797f17497SC.J. Collier
63897f17497SC.J. Collier	if (s == 64) {
6398b25d1adSChristian Ehrhardt		stats->size_bins[1]++;
64097f17497SC.J. Collier	} else if (s > 64 && s < 1024) {
64197f17497SC.J. Collier		uint32_t bin;
64297f17497SC.J. Collier
64397f17497SC.J. Collier		/* count zeros, and offset into correct bin */
64497f17497SC.J. Collier		bin = (sizeof(s) * 8) - __builtin_clz(s) - 5;
6458b25d1adSChristian Ehrhardt		stats->size_bins[bin]++;
64697f17497SC.J. Collier	} else {
64797f17497SC.J. Collier		if (s < 64)
6488b25d1adSChristian Ehrhardt			stats->size_bins[0]++;
64997f17497SC.J. Collier		else if (s < 1519)
6508b25d1adSChristian Ehrhardt			stats->size_bins[6]++;
65197f17497SC.J. Collier		else if (s >= 1519)
6528b25d1adSChristian Ehrhardt			stats->size_bins[7]++;
65397f17497SC.J. Collier	}
65497f17497SC.J. Collier
65597f17497SC.J. Collier	ea = rte_pktmbuf_mtod(mbuf, struct ether_addr *);
65697f17497SC.J. Collier	if (is_multicast_ether_addr(ea)) {
65797f17497SC.J. Collier		if (is_broadcast_ether_addr(ea))
6588b25d1adSChristian Ehrhardt			stats->broadcast++;
65997f17497SC.J. Collier		else
6608b25d1adSChristian Ehrhardt			stats->multicast++;
66197f17497SC.J. Collier	}
66297f17497SC.J. Collier}
66397f17497SC.J. Collier
6646b3e017eSChristian Ehrhardt/* Optionally fill offload information in structure */
6656b3e017eSChristian Ehrhardtstatic int
6666b3e017eSChristian Ehrhardtvirtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr)
6676b3e017eSChristian Ehrhardt{
6686b3e017eSChristian Ehrhardt	struct rte_net_hdr_lens hdr_lens;
6696b3e017eSChristian Ehrhardt	uint32_t hdrlen, ptype;
6706b3e017eSChristian Ehrhardt	int l4_supported = 0;
6716b3e017eSChristian Ehrhardt
6726b3e017eSChristian Ehrhardt	/* nothing to do */
6736b3e017eSChristian Ehrhardt	if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE)
6746b3e017eSChristian Ehrhardt		return 0;
6756b3e017eSChristian Ehrhardt
6766b3e017eSChristian Ehrhardt	m->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
6776b3e017eSChristian Ehrhardt
6786b3e017eSChristian Ehrhardt	ptype = rte_net_get_ptype(m, &hdr_lens, RTE_PTYPE_ALL_MASK);
6796b3e017eSChristian Ehrhardt	m->packet_type = ptype;
6806b3e017eSChristian Ehrhardt	if ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP ||
6816b3e017eSChristian Ehrhardt	    (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP ||
6826b3e017eSChristian Ehrhardt	    (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_SCTP)
6836b3e017eSChristian Ehrhardt		l4_supported = 1;
6846b3e017eSChristian Ehrhardt
6856b3e017eSChristian Ehrhardt	if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
6866b3e017eSChristian Ehrhardt		hdrlen = hdr_lens.l2_len + hdr_lens.l3_len + hdr_lens.l4_len;
6876b3e017eSChristian Ehrhardt		if (hdr->csum_start <= hdrlen && l4_supported) {
6886b3e017eSChristian Ehrhardt			m->ol_flags |= PKT_RX_L4_CKSUM_NONE;
6896b3e017eSChristian Ehrhardt		} else {
6906b3e017eSChristian Ehrhardt			/* Unknown proto or tunnel, do sw cksum. We can assume
6916b3e017eSChristian Ehrhardt			 * the cksum field is in the first segment since the
6926b3e017eSChristian Ehrhardt			 * buffers we provided to the host are large enough.
6936b3e017eSChristian Ehrhardt			 * In case of SCTP, this will be wrong since it's a CRC
6946b3e017eSChristian Ehrhardt			 * but there's nothing we can do.
6956b3e017eSChristian Ehrhardt			 */
69647d9763aSLuca Boccassi			uint16_t csum = 0, off;
6976b3e017eSChristian Ehrhardt
6986b3e017eSChristian Ehrhardt			rte_raw_cksum_mbuf(m, hdr->csum_start,
6996b3e017eSChristian Ehrhardt				rte_pktmbuf_pkt_len(m) - hdr->csum_start,
7006b3e017eSChristian Ehrhardt				&csum);
7016b3e017eSChristian Ehrhardt			if (likely(csum != 0xffff))
7026b3e017eSChristian Ehrhardt				csum = ~csum;
7036b3e017eSChristian Ehrhardt			off = hdr->csum_offset + hdr->csum_start;
7046b3e017eSChristian Ehrhardt			if (rte_pktmbuf_data_len(m) >= off + 1)
7056b3e017eSChristian Ehrhardt				*rte_pktmbuf_mtod_offset(m, uint16_t *,
7066b3e017eSChristian Ehrhardt					off) = csum;
7076b3e017eSChristian Ehrhardt		}
7086b3e017eSChristian Ehrhardt	} else if (hdr->flags & VIRTIO_NET_HDR_F_DATA_VALID && l4_supported) {
7096b3e017eSChristian Ehrhardt		m->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
7106b3e017eSChristian Ehrhardt	}
7116b3e017eSChristian Ehrhardt
7126b3e017eSChristian Ehrhardt	/* GSO request, save required information in mbuf */
7136b3e017eSChristian Ehrhardt	if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
7146b3e017eSChristian Ehrhardt		/* Check unsupported modes */
7156b3e017eSChristian Ehrhardt		if ((hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN) ||
7166b3e017eSChristian Ehrhardt		    (hdr->gso_size == 0)) {
7176b3e017eSChristian Ehrhardt			return -EINVAL;
7186b3e017eSChristian Ehrhardt		}
7196b3e017eSChristian Ehrhardt
7206b3e017eSChristian Ehrhardt		/* Update mss lengthes in mbuf */
7216b3e017eSChristian Ehrhardt		m->tso_segsz = hdr->gso_size;
7226b3e017eSChristian Ehrhardt		switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
7236b3e017eSChristian Ehrhardt			case VIRTIO_NET_HDR_GSO_TCPV4:
7246b3e017eSChristian Ehrhardt			case VIRTIO_NET_HDR_GSO_TCPV6:
7256b3e017eSChristian Ehrhardt				m->ol_flags |= PKT_RX_LRO | \
7266b3e017eSChristian Ehrhardt					PKT_RX_L4_CKSUM_NONE;
7276b3e017eSChristian Ehrhardt				break;
7286b3e017eSChristian Ehrhardt			default:
7296b3e017eSChristian Ehrhardt				return -EINVAL;
7306b3e017eSChristian Ehrhardt		}
7316b3e017eSChristian Ehrhardt	}
7326b3e017eSChristian Ehrhardt
7336b3e017eSChristian Ehrhardt	return 0;
7346b3e017eSChristian Ehrhardt}
7356b3e017eSChristian Ehrhardt
7366b3e017eSChristian Ehrhardtstatic inline int
7376b3e017eSChristian Ehrhardtrx_offload_enabled(struct virtio_hw *hw)
7386b3e017eSChristian Ehrhardt{
7396b3e017eSChristian Ehrhardt	return vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM) ||
7406b3e017eSChristian Ehrhardt		vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
7416b3e017eSChristian Ehrhardt		vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6);
7426b3e017eSChristian Ehrhardt}
7436b3e017eSChristian Ehrhardt
74497f17497SC.J. Collier#define VIRTIO_MBUF_BURST_SZ 64
74597f17497SC.J. Collier#define DESC_PER_CACHELINE (RTE_CACHE_LINE_SIZE / sizeof(struct vring_desc))
74697f17497SC.J. Collieruint16_t
74797f17497SC.J. Colliervirtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
74897f17497SC.J. Collier{
7498b25d1adSChristian Ehrhardt	struct virtnet_rx *rxvq = rx_queue;
7508b25d1adSChristian Ehrhardt	struct virtqueue *vq = rxvq->vq;
75197f17497SC.J. Collier	struct virtio_hw *hw;
75297f17497SC.J. Collier	struct rte_mbuf *rxm, *new_mbuf;
75397f17497SC.J. Collier	uint16_t nb_used, num, nb_rx;
75497f17497SC.J. Collier	uint32_t len[VIRTIO_MBUF_BURST_SZ];
75597f17497SC.J. Collier	struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
75697f17497SC.J. Collier	int error;
75797f17497SC.J. Collier	uint32_t i, nb_enqueued;
75897f17497SC.J. Collier	uint32_t hdr_size;
7596b3e017eSChristian Ehrhardt	int offload;
7606b3e017eSChristian Ehrhardt	struct virtio_net_hdr *hdr;
76197f17497SC.J. Collier
7628b25d1adSChristian Ehrhardt	nb_used = VIRTQUEUE_NUSED(vq);
76397f17497SC.J. Collier
76497f17497SC.J. Collier	virtio_rmb();
76597f17497SC.J. Collier
76697f17497SC.J. Collier	num = (uint16_t)(likely(nb_used <= nb_pkts) ? nb_used : nb_pkts);
76797f17497SC.J. Collier	num = (uint16_t)(likely(num <= VIRTIO_MBUF_BURST_SZ) ? num : VIRTIO_MBUF_BURST_SZ);
76897f17497SC.J. Collier	if (likely(num > DESC_PER_CACHELINE))
7698b25d1adSChristian Ehrhardt		num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
77097f17497SC.J. Collier
7718b25d1adSChristian Ehrhardt	num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, num);
77297f17497SC.J. Collier	PMD_RX_LOG(DEBUG, "used:%d dequeue:%d", nb_used, num);
77397f17497SC.J. Collier
7748b25d1adSChristian Ehrhardt	hw = vq->hw;
77597f17497SC.J. Collier	nb_rx = 0;
77697f17497SC.J. Collier	nb_enqueued = 0;
77797f17497SC.J. Collier	hdr_size = hw->vtnet_hdr_size;
7786b3e017eSChristian Ehrhardt	offload = rx_offload_enabled(hw);
77997f17497SC.J. Collier
78097f17497SC.J. Collier	for (i = 0; i < num ; i++) {
78197f17497SC.J. Collier		rxm = rcv_pkts[i];
78297f17497SC.J. Collier
78397f17497SC.J. Collier		PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
78497f17497SC.J. Collier
78597f17497SC.J. Collier		if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
78697f17497SC.J. Collier			PMD_RX_LOG(ERR, "Packet drop");
78797f17497SC.J. Collier			nb_enqueued++;
7888b25d1adSChristian Ehrhardt			virtio_discard_rxbuf(vq, rxm);
7898b25d1adSChristian Ehrhardt			rxvq->stats.errors++;
79097f17497SC.J. Collier			continue;
79197f17497SC.J. Collier		}
79297f17497SC.J. Collier
79397f17497SC.J. Collier		rxm->port = rxvq->port_id;
79497f17497SC.J. Collier		rxm->data_off = RTE_PKTMBUF_HEADROOM;
79597f17497SC.J. Collier		rxm->ol_flags = 0;
79697f17497SC.J. Collier		rxm->vlan_tci = 0;
79797f17497SC.J. Collier
79897f17497SC.J. Collier		rxm->nb_segs = 1;
79997f17497SC.J. Collier		rxm->next = NULL;
80097f17497SC.J. Collier		rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
80197f17497SC.J. Collier		rxm->data_len = (uint16_t)(len[i] - hdr_size);
80297f17497SC.J. Collier
8036b3e017eSChristian Ehrhardt		hdr = (struct virtio_net_hdr *)((char *)rxm->buf_addr +
8046b3e017eSChristian Ehrhardt			RTE_PKTMBUF_HEADROOM - hdr_size);
8056b3e017eSChristian Ehrhardt
80697f17497SC.J. Collier		if (hw->vlan_strip)
80797f17497SC.J. Collier			rte_vlan_strip(rxm);
80897f17497SC.J. Collier
8096b3e017eSChristian Ehrhardt		if (offload && virtio_rx_offload(rxm, hdr) < 0) {
8106b3e017eSChristian Ehrhardt			virtio_discard_rxbuf(vq, rxm);
8116b3e017eSChristian Ehrhardt			rxvq->stats.errors++;
8126b3e017eSChristian Ehrhardt			continue;
8136b3e017eSChristian Ehrhardt		}
8146b3e017eSChristian Ehrhardt
81597f17497SC.J. Collier		VIRTIO_DUMP_PACKET(rxm, rxm->data_len);
81697f17497SC.J. Collier
81797f17497SC.J. Collier		rx_pkts[nb_rx++] = rxm;
81897f17497SC.J. Collier
8198b25d1adSChristian Ehrhardt		rxvq->stats.bytes += rx_pkts[nb_rx - 1]->pkt_len;
8208b25d1adSChristian Ehrhardt		virtio_update_packet_stats(&rxvq->stats, rxm);
82197f17497SC.J. Collier	}
82297f17497SC.J. Collier
8238b25d1adSChristian Ehrhardt	rxvq->stats.packets += nb_rx;
82497f17497SC.J. Collier
82597f17497SC.J. Collier	/* Allocate new mbuf for the used descriptor */
82697f17497SC.J. Collier	error = ENOSPC;
8278b25d1adSChristian Ehrhardt	while (likely(!virtqueue_full(vq))) {
8288b25d1adSChristian Ehrhardt		new_mbuf = rte_mbuf_raw_alloc(rxvq->mpool);
82997f17497SC.J. Collier		if (unlikely(new_mbuf == NULL)) {
83097f17497SC.J. Collier			struct rte_eth_dev *dev
83197f17497SC.J. Collier				= &rte_eth_devices[rxvq->port_id];
83297f17497SC.J. Collier			dev->data->rx_mbuf_alloc_failed++;
83397f17497SC.J. Collier			break;
83497f17497SC.J. Collier		}
8358b25d1adSChristian Ehrhardt		error = virtqueue_enqueue_recv_refill(vq, new_mbuf);
83697f17497SC.J. Collier		if (unlikely(error)) {
83797f17497SC.J. Collier			rte_pktmbuf_free(new_mbuf);
83897f17497SC.J. Collier			break;
83997f17497SC.J. Collier		}
84097f17497SC.J. Collier		nb_enqueued++;
84197f17497SC.J. Collier	}
84297f17497SC.J. Collier
84397f17497SC.J. Collier	if (likely(nb_enqueued)) {
8448b25d1adSChristian Ehrhardt		vq_update_avail_idx(vq);
84597f17497SC.J. Collier
8468b25d1adSChristian Ehrhardt		if (unlikely(virtqueue_kick_prepare(vq))) {
8478b25d1adSChristian Ehrhardt			virtqueue_notify(vq);
8488b25d1adSChristian Ehrhardt			PMD_RX_LOG(DEBUG, "Notified");
84997f17497SC.J. Collier		}
85097f17497SC.J. Collier	}
85197f17497SC.J. Collier
85297f17497SC.J. Collier	return nb_rx;
85397f17497SC.J. Collier}
85497f17497SC.J. Collier
85597f17497SC.J. Collieruint16_t
85697f17497SC.J. Colliervirtio_recv_mergeable_pkts(void *rx_queue,
85797f17497SC.J. Collier			struct rte_mbuf **rx_pkts,
85897f17497SC.J. Collier			uint16_t nb_pkts)
85997f17497SC.J. Collier{
8608b25d1adSChristian Ehrhardt	struct virtnet_rx *rxvq = rx_queue;
8618b25d1adSChristian Ehrhardt	struct virtqueue *vq = rxvq->vq;
86297f17497SC.J. Collier	struct virtio_hw *hw;
86397f17497SC.J. Collier	struct rte_mbuf *rxm, *new_mbuf;
86497f17497SC.J. Collier	uint16_t nb_used, num, nb_rx;
86597f17497SC.J. Collier	uint32_t len[VIRTIO_MBUF_BURST_SZ];
86697f17497SC.J. Collier	struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
86797f17497SC.J. Collier	struct rte_mbuf *prev;
86897f17497SC.J. Collier	int error;
86997f17497SC.J. Collier	uint32_t i, nb_enqueued;
87097f17497SC.J. Collier	uint32_t seg_num;
87197f17497SC.J. Collier	uint16_t extra_idx;
87297f17497SC.J. Collier	uint32_t seg_res;
87397f17497SC.J. Collier	uint32_t hdr_size;
8746b3e017eSChristian Ehrhardt	int offload;
87597f17497SC.J. Collier
8768b25d1adSChristian Ehrhardt	nb_used = VIRTQUEUE_NUSED(vq);
87797f17497SC.J. Collier
87897f17497SC.J. Collier	virtio_rmb();
87997f17497SC.J. Collier
8808b25d1adSChristian Ehrhardt	PMD_RX_LOG(DEBUG, "used:%d", nb_used);
88197f17497SC.J. Collier
8828b25d1adSChristian Ehrhardt	hw = vq->hw;
88397f17497SC.J. Collier	nb_rx = 0;
88497f17497SC.J. Collier	i = 0;
88597f17497SC.J. Collier	nb_enqueued = 0;
88697f17497SC.J. Collier	seg_num = 0;
88797f17497SC.J. Collier	extra_idx = 0;
88897f17497SC.J. Collier	seg_res = 0;
88997f17497SC.J. Collier	hdr_size = hw->vtnet_hdr_size;
8906b3e017eSChristian Ehrhardt	offload = rx_offload_enabled(hw);
89197f17497SC.J. Collier
89297f17497SC.J. Collier	while (i < nb_used) {
89397f17497SC.J. Collier		struct virtio_net_hdr_mrg_rxbuf *header;
89497f17497SC.J. Collier
89597f17497SC.J. Collier		if (nb_rx == nb_pkts)
89697f17497SC.J. Collier			break;
89797f17497SC.J. Collier
8988b25d1adSChristian Ehrhardt		num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, 1);
89997f17497SC.J. Collier		if (num != 1)
90097f17497SC.J. Collier			continue;
90197f17497SC.J. Collier
90297f17497SC.J. Collier		i++;
90397f17497SC.J. Collier
9048b25d1adSChristian Ehrhardt		PMD_RX_LOG(DEBUG, "dequeue:%d", num);
9058b25d1adSChristian Ehrhardt		PMD_RX_LOG(DEBUG, "packet len:%d", len[0]);
90697f17497SC.J. Collier
90797f17497SC.J. Collier		rxm = rcv_pkts[0];
90897f17497SC.J. Collier
90997f17497SC.J. Collier		if (unlikely(len[0] < hdr_size + ETHER_HDR_LEN)) {
9108b25d1adSChristian Ehrhardt			PMD_RX_LOG(ERR, "Packet drop");
91197f17497SC.J. Collier			nb_enqueued++;
9128b25d1adSChristian Ehrhardt			virtio_discard_rxbuf(vq, rxm);
9138b25d1adSChristian Ehrhardt			rxvq->stats.errors++;
91497f17497SC.J. Collier			continue;
91597f17497SC.J. Collier		}
91697f17497SC.J. Collier
91797f17497SC.J. Collier		header = (struct virtio_net_hdr_mrg_rxbuf *)((char *)rxm->buf_addr +
91897f17497SC.J. Collier			RTE_PKTMBUF_HEADROOM - hdr_size);
91997f17497SC.J. Collier		seg_num = header->num_buffers;
92097f17497SC.J. Collier
92197f17497SC.J. Collier		if (seg_num == 0)
92297f17497SC.J. Collier			seg_num = 1;
92397f17497SC.J. Collier
92497f17497SC.J. Collier		rxm->data_off = RTE_PKTMBUF_HEADROOM;
92597f17497SC.J. Collier		rxm->nb_segs = seg_num;
92697f17497SC.J. Collier		rxm->next = NULL;
92797f17497SC.J. Collier		rxm->ol_flags = 0;
92897f17497SC.J. Collier		rxm->vlan_tci = 0;
92997f17497SC.J. Collier		rxm->pkt_len = (uint32_t)(len[0] - hdr_size);
93097f17497SC.J. Collier		rxm->data_len = (uint16_t)(len[0] - hdr_size);
93197f17497SC.J. Collier
93297f17497SC.J. Collier		rxm->port = rxvq->port_id;
93397f17497SC.J. Collier		rx_pkts[nb_rx] = rxm;
93497f17497SC.J. Collier		prev = rxm;
93597f17497SC.J. Collier
9366b3e017eSChristian Ehrhardt		if (offload && virtio_rx_offload(rxm, &header->hdr) < 0) {
9376b3e017eSChristian Ehrhardt			virtio_discard_rxbuf(vq, rxm);
9386b3e017eSChristian Ehrhardt			rxvq->stats.errors++;
9396b3e017eSChristian Ehrhardt			continue;
9406b3e017eSChristian Ehrhardt		}
9416b3e017eSChristian Ehrhardt
94297f17497SC.J. Collier		seg_res = seg_num - 1;
94397f17497SC.J. Collier
94497f17497SC.J. Collier		while (seg_res != 0) {
94597f17497SC.J. Collier			/*
94697f17497SC.J. Collier			 * Get extra segments for current uncompleted packet.
94797f17497SC.J. Collier			 */
94897f17497SC.J. Collier			uint16_t  rcv_cnt =
94997f17497SC.J. Collier				RTE_MIN(seg_res, RTE_DIM(rcv_pkts));
9508b25d1adSChristian Ehrhardt			if (likely(VIRTQUEUE_NUSED(vq) >= rcv_cnt)) {
95197f17497SC.J. Collier				uint32_t rx_num =
9528b25d1adSChristian Ehrhardt					virtqueue_dequeue_burst_rx(vq,
95397f17497SC.J. Collier					rcv_pkts, len, rcv_cnt);
95497f17497SC.J. Collier				i += rx_num;
95597f17497SC.J. Collier				rcv_cnt = rx_num;
95697f17497SC.J. Collier			} else {
95797f17497SC.J. Collier				PMD_RX_LOG(ERR,
9588b25d1adSChristian Ehrhardt					   "No enough segments for packet.");
95997f17497SC.J. Collier				nb_enqueued++;
9608b25d1adSChristian Ehrhardt				virtio_discard_rxbuf(vq, rxm);
9618b25d1adSChristian Ehrhardt				rxvq->stats.errors++;
96297f17497SC.J. Collier				break;
96397f17497SC.J. Collier			}
96497f17497SC.J. Collier
96597f17497SC.J. Collier			extra_idx = 0;
96697f17497SC.J. Collier
96797f17497SC.J. Collier			while (extra_idx < rcv_cnt) {
96897f17497SC.J. Collier				rxm = rcv_pkts[extra_idx];
96997f17497SC.J. Collier
97097f17497SC.J. Collier				rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
97197f17497SC.J. Collier				rxm->next = NULL;
97297f17497SC.J. Collier				rxm->pkt_len = (uint32_t)(len[extra_idx]);
97397f17497SC.J. Collier				rxm->data_len = (uint16_t)(len[extra_idx]);
97497f17497SC.J. Collier
97597f17497SC.J. Collier				if (prev)
97697f17497SC.J. Collier					prev->next = rxm;
97797f17497SC.J. Collier
97897f17497SC.J. Collier				prev = rxm;
97997f17497SC.J. Collier				rx_pkts[nb_rx]->pkt_len += rxm->pkt_len;
98097f17497SC.J. Collier				extra_idx++;
98197f17497SC.J. Collier			};
98297f17497SC.J. Collier			seg_res -= rcv_cnt;
98397f17497SC.J. Collier		}
98497f17497SC.J. Collier
98597f17497SC.J. Collier		if (hw->vlan_strip)
98697f17497SC.J. Collier			rte_vlan_strip(rx_pkts[nb_rx]);
98797f17497SC.J. Collier
98897f17497SC.J. Collier		VIRTIO_DUMP_PACKET(rx_pkts[nb_rx],
98997f17497SC.J. Collier			rx_pkts[nb_rx]->data_len);
99097f17497SC.J. Collier
9918b25d1adSChristian Ehrhardt		rxvq->stats.bytes += rx_pkts[nb_rx]->pkt_len;
9928b25d1adSChristian Ehrhardt		virtio_update_packet_stats(&rxvq->stats, rx_pkts[nb_rx]);
99397f17497SC.J. Collier		nb_rx++;
99497f17497SC.J. Collier	}
99597f17497SC.J. Collier
9968b25d1adSChristian Ehrhardt	rxvq->stats.packets += nb_rx;
99797f17497SC.J. Collier
99897f17497SC.J. Collier	/* Allocate new mbuf for the used descriptor */
99997f17497SC.J. Collier	error = ENOSPC;
10008b25d1adSChristian Ehrhardt	while (likely(!virtqueue_full(vq))) {
10018b25d1adSChristian Ehrhardt		new_mbuf = rte_mbuf_raw_alloc(rxvq->mpool);
100297f17497SC.J. Collier		if (unlikely(new_mbuf == NULL)) {
100397f17497SC.J. Collier			struct rte_eth_dev *dev
100497f17497SC.J. Collier				= &rte_eth_devices[rxvq->port_id];
100597f17497SC.J. Collier			dev->data->rx_mbuf_alloc_failed++;
100697f17497SC.J. Collier			break;
100797f17497SC.J. Collier		}
10088b25d1adSChristian Ehrhardt		error = virtqueue_enqueue_recv_refill(vq, new_mbuf);
100997f17497SC.J. Collier		if (unlikely(error)) {
101097f17497SC.J. Collier			rte_pktmbuf_free(new_mbuf);
101197f17497SC.J. Collier			break;
101297f17497SC.J. Collier		}
101397f17497SC.J. Collier		nb_enqueued++;
101497f17497SC.J. Collier	}
101597f17497SC.J. Collier
101697f17497SC.J. Collier	if (likely(nb_enqueued)) {
10178b25d1adSChristian Ehrhardt		vq_update_avail_idx(vq);
101897f17497SC.J. Collier
10198b25d1adSChristian Ehrhardt		if (unlikely(virtqueue_kick_prepare(vq))) {