1/* Copyright 2008-2016 Cisco Systems, Inc.  All rights reserved.
2 * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
3 *
4 * Copyright (c) 2014, Cisco Systems, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 *
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
17 * distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
29 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33#include <rte_mbuf.h>
34#include <rte_ethdev.h>
35#include <rte_prefetch.h>
36
37#include "enic_compat.h"
38#include "rq_enet_desc.h"
39#include "enic.h"
40
41#define RTE_PMD_USE_PREFETCH
42
43#ifdef RTE_PMD_USE_PREFETCH
44/*Prefetch a cache line into all cache levels. */
45#define rte_enic_prefetch(p) rte_prefetch0(p)
46#else
47#define rte_enic_prefetch(p) do {} while (0)
48#endif
49
50#ifdef RTE_PMD_PACKET_PREFETCH
51#define rte_packet_prefetch(p) rte_prefetch1(p)
52#else
53#define rte_packet_prefetch(p) do {} while (0)
54#endif
55
56static inline uint16_t
57enic_cq_rx_desc_ciflags(struct cq_enet_rq_desc *crd)
58{
59	return le16_to_cpu(crd->completed_index_flags) & ~CQ_DESC_COMP_NDX_MASK;
60}
61
62static inline uint16_t
63enic_cq_rx_desc_bwflags(struct cq_enet_rq_desc *crd)
64{
65	return le16_to_cpu(crd->bytes_written_flags) &
66			   ~CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
67}
68
69static inline uint8_t
70enic_cq_rx_desc_packet_error(uint16_t bwflags)
71{
72	return (bwflags & CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ==
73		CQ_ENET_RQ_DESC_FLAGS_TRUNCATED;
74}
75
76static inline uint8_t
77enic_cq_rx_desc_eop(uint16_t ciflags)
78{
79	return (ciflags & CQ_ENET_RQ_DESC_FLAGS_EOP)
80		== CQ_ENET_RQ_DESC_FLAGS_EOP;
81}
82
83static inline uint8_t
84enic_cq_rx_desc_csum_not_calc(struct cq_enet_rq_desc *cqrd)
85{
86	return (le16_to_cpu(cqrd->q_number_rss_type_flags) &
87		CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ==
88		CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC;
89}
90
91static inline uint8_t
92enic_cq_rx_desc_ipv4_csum_ok(struct cq_enet_rq_desc *cqrd)
93{
94	return (cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ==
95		CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK;
96}
97
98static inline uint8_t
99enic_cq_rx_desc_tcp_udp_csum_ok(struct cq_enet_rq_desc *cqrd)
100{
101	return (cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ==
102		CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK;
103}
104
105static inline uint8_t
106enic_cq_rx_desc_rss_type(struct cq_enet_rq_desc *cqrd)
107{
108	return (uint8_t)((le16_to_cpu(cqrd->q_number_rss_type_flags) >>
109		CQ_DESC_Q_NUM_BITS) & CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
110}
111
112static inline uint32_t
113enic_cq_rx_desc_rss_hash(struct cq_enet_rq_desc *cqrd)
114{
115	return le32_to_cpu(cqrd->rss_hash);
116}
117
118static inline uint16_t
119enic_cq_rx_desc_vlan(struct cq_enet_rq_desc *cqrd)
120{
121	return le16_to_cpu(cqrd->vlan);
122}
123
124static inline uint16_t
125enic_cq_rx_desc_n_bytes(struct cq_desc *cqd)
126{
127	struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
128	return le16_to_cpu(cqrd->bytes_written_flags) &
129		CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
130}
131
132static inline uint8_t
133enic_cq_rx_check_err(struct cq_desc *cqd)
134{
135	struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
136	uint16_t bwflags;
137
138	bwflags = enic_cq_rx_desc_bwflags(cqrd);
139	if (unlikely(enic_cq_rx_desc_packet_error(bwflags)))
140		return 1;
141	return 0;
142}
143
144/* Lookup table to translate RX CQ flags to mbuf flags. */
145static inline uint32_t
146enic_cq_rx_flags_to_pkt_type(struct cq_desc *cqd)
147{
148	struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
149	uint8_t cqrd_flags = cqrd->flags;
150	static const uint32_t cq_type_table[128] __rte_cache_aligned = {
151		[0x00] = RTE_PTYPE_UNKNOWN,
152		[0x20] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG,
153		[0x22] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
154		[0x24] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
155		[0x60] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
156		[0x62] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
157		[0x64] = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
158		[0x10] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_NONFRAG,
159		[0x12] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
160		[0x14] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
161		[0x50] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_FRAG,
162		[0x52] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_UDP,
163		[0x54] = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_L4_TCP,
164		/* All others reserved */
165	};
166	cqrd_flags &= CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT
167		| CQ_ENET_RQ_DESC_FLAGS_IPV4 | CQ_ENET_RQ_DESC_FLAGS_IPV6
168		| CQ_ENET_RQ_DESC_FLAGS_TCP | CQ_ENET_RQ_DESC_FLAGS_UDP;
169	return cq_type_table[cqrd_flags];
170}
171
172static inline void
173enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
174{
175	struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
176	uint16_t ciflags, bwflags, pkt_flags = 0, vlan_tci;
177	ciflags = enic_cq_rx_desc_ciflags(cqrd);
178	bwflags = enic_cq_rx_desc_bwflags(cqrd);
179	vlan_tci = enic_cq_rx_desc_vlan(cqrd);
180
181	mbuf->ol_flags = 0;
182
183	/* flags are meaningless if !EOP */
184	if (unlikely(!enic_cq_rx_desc_eop(ciflags)))
185		goto mbuf_flags_done;
186
187	/* VLAN STRIPPED flag. The L2 packet type updated here also */
188	if (bwflags & CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) {
189		pkt_flags |= PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED;
190		mbuf->packet_type |= RTE_PTYPE_L2_ETHER;
191	} else {
192		if (vlan_tci != 0) {
193			pkt_flags |= PKT_RX_VLAN_PKT;
194			mbuf->packet_type |= RTE_PTYPE_L2_ETHER_VLAN;
195		} else {
196			mbuf->packet_type |= RTE_PTYPE_L2_ETHER;
197		}
198	}
199	mbuf->vlan_tci = vlan_tci;
200
201	/* RSS flag */
202	if (enic_cq_rx_desc_rss_type(cqrd)) {
203		pkt_flags |= PKT_RX_RSS_HASH;
204		mbuf->hash.rss = enic_cq_rx_desc_rss_hash(cqrd);
205	}
206
207	/* checksum flags */
208	if (!enic_cq_rx_desc_csum_not_calc(cqrd) &&
209		(mbuf->packet_type & RTE_PTYPE_L3_IPV4)) {
210		uint32_t l4_flags = mbuf->packet_type & RTE_PTYPE_L4_MASK;
211
212		if (unlikely(!enic_cq_rx_desc_ipv4_csum_ok(cqrd)))
213			pkt_flags |= PKT_RX_IP_CKSUM_BAD;
214		if (l4_flags == RTE_PTYPE_L4_UDP ||
215		    l4_flags == RTE_PTYPE_L4_TCP) {
216			if (unlikely(!enic_cq_rx_desc_tcp_udp_csum_ok(cqrd)))
217				pkt_flags |= PKT_RX_L4_CKSUM_BAD;
218		}
219	}
220
221 mbuf_flags_done:
222	mbuf->ol_flags = pkt_flags;
223}
224
225/* dummy receive function to replace actual function in
226 * order to do safe reconfiguration operations.
227 */
228uint16_t
229enic_dummy_recv_pkts(__rte_unused void *rx_queue,
230		     __rte_unused struct rte_mbuf **rx_pkts,
231		     __rte_unused uint16_t nb_pkts)
232{
233	return 0;
234}
235
236uint16_t
237enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
238	       uint16_t nb_pkts)
239{
240	struct vnic_rq *sop_rq = rx_queue;
241	struct vnic_rq *data_rq;
242	struct vnic_rq *rq;
243	struct enic *enic = vnic_dev_priv(sop_rq->vdev);
244	uint16_t cq_idx;
245	uint16_t rq_idx;
246	uint16_t rq_num;
247	struct rte_mbuf *nmb, *rxmb;
248	uint16_t nb_rx = 0;
249	struct vnic_cq *cq;
250	volatile struct cq_desc *cqd_ptr;
251	uint8_t color;
252	uint16_t seg_length;
253	struct rte_mbuf *first_seg = sop_rq->pkt_first_seg;
254	struct rte_mbuf *last_seg = sop_rq->pkt_last_seg;
255
256	cq = &enic->cq[enic_cq_rq(enic, sop_rq->index)];
257	cq_idx = cq->to_clean;		/* index of cqd, rqd, mbuf_table */
258	cqd_ptr = (struct cq_desc *)(cq->ring.descs) + cq_idx;
259
260	data_rq = &enic->rq[sop_rq->data_queue_idx];
261
262	while (nb_rx < nb_pkts) {
263		volatile struct rq_enet_desc *rqd_ptr;
264		dma_addr_t dma_addr;
265		struct cq_desc cqd;
266		uint8_t packet_error;
267		uint16_t ciflags;
268
269		/* Check for pkts available */
270		color = (cqd_ptr->type_color >> CQ_DESC_COLOR_SHIFT)
271			& CQ_DESC_COLOR_MASK;
272		if (color == cq->last_color)
273			break;
274
275		/* Get the cq descriptor and extract rq info from it */
276		cqd = *cqd_ptr;
277		rq_num = cqd.q_number & CQ_DESC_Q_NUM_MASK;
278		rq_idx = cqd.completed_index & CQ_DESC_COMP_NDX_MASK;
279
280		rq = &enic->rq[rq_num];
281		rqd_ptr = ((struct rq_enet_desc *)rq->ring.descs) + rq_idx;
282
283		/* allocate a new mbuf */
284		nmb = rte_mbuf_raw_alloc(rq->mp);
285		if (nmb == NULL) {
286			rte_atomic64_inc(&enic->soft_stats.rx_nombuf);
287			break;
288		}
289
290		/* A packet error means descriptor and data are untrusted */
291		packet_error = enic_cq_rx_check_err(&cqd);
292
293		/* Get the mbuf to return and replace with one just allocated */
294		rxmb = rq->mbuf_ring[rq_idx];
295		rq->mbuf_ring[rq_idx] = nmb;
296
297		/* Increment cqd, rqd, mbuf_table index */
298		cq_idx++;
299		if (unlikely(cq_idx == cq->ring.desc_count)) {
300			cq_idx = 0;
301			cq->last_color = cq->last_color ? 0 : 1;
302		}
303
304		/* Prefetch next mbuf & desc while processing current one */
305		cqd_ptr = (struct cq_desc *)(cq->ring.descs) + cq_idx;
306		rte_enic_prefetch(cqd_ptr);
307
308		ciflags = enic_cq_rx_desc_ciflags(
309			(struct cq_enet_rq_desc *)&cqd);
310
311		/* Push descriptor for newly allocated mbuf */
312		nmb->data_off = RTE_PKTMBUF_HEADROOM;
313		dma_addr = (dma_addr_t)(nmb->buf_physaddr +
314					RTE_PKTMBUF_HEADROOM);
315		rq_enet_desc_enc(rqd_ptr, dma_addr,
316				(rq->is_sop ? RQ_ENET_TYPE_ONLY_SOP
317				: RQ_ENET_TYPE_NOT_SOP),
318				nmb->buf_len - RTE_PKTMBUF_HEADROOM);
319
320		/* Fill in the rest of the mbuf */
321		seg_length = enic_cq_rx_desc_n_bytes(&cqd);
322
323		if (rq->is_sop) {
324			first_seg = rxmb;
325			first_seg->nb_segs = 1;
326			first_seg->pkt_len = seg_length;
327		} else {
328			first_seg->pkt_len = (uint16_t)(first_seg->pkt_len
329							+ seg_length);
330			first_seg->nb_segs++;
331			last_seg->next = rxmb;
332		}
333
334		rxmb->next = NULL;
335		rxmb->port = enic->port_id;
336		rxmb->data_len = seg_length;
337
338		rq->rx_nb_hold++;
339
340		if (!(enic_cq_rx_desc_eop(ciflags))) {
341			last_seg = rxmb;
342			continue;
343		}
344
345		/* cq rx flags are only valid if eop bit is set */
346		first_seg->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd);
347		enic_cq_rx_to_pkt_flags(&cqd, first_seg);
348
349		if (unlikely(packet_error)) {
350			rte_pktmbuf_free(first_seg);
351			rte_atomic64_inc(&enic->soft_stats.rx_packet_errors);
352			continue;
353		}
354
355
356		/* prefetch mbuf data for caller */
357		rte_packet_prefetch(RTE_PTR_ADD(first_seg->buf_addr,
358				    RTE_PKTMBUF_HEADROOM));
359
360		/* store the mbuf address into the next entry of the array */
361		rx_pkts[nb_rx++] = first_seg;
362	}
363
364	sop_rq->pkt_first_seg = first_seg;
365	sop_rq->pkt_last_seg = last_seg;
366
367	cq->to_clean = cq_idx;
368
369	if ((sop_rq->rx_nb_hold + data_rq->rx_nb_hold) >
370	    sop_rq->rx_free_thresh) {
371		if (data_rq->in_use) {
372			data_rq->posted_index =
373				enic_ring_add(data_rq->ring.desc_count,
374					      data_rq->posted_index,
375					      data_rq->rx_nb_hold);
376			data_rq->rx_nb_hold = 0;
377		}
378		sop_rq->posted_index = enic_ring_add(sop_rq->ring.desc_count,
379						     sop_rq->posted_index,
380						     sop_rq->rx_nb_hold);
381		sop_rq->rx_nb_hold = 0;
382
383		rte_mb();
384		if (data_rq->in_use)
385			iowrite32(data_rq->posted_index,
386				  &data_rq->ctrl->posted_index);
387		rte_compiler_barrier();
388		iowrite32(sop_rq->posted_index, &sop_rq->ctrl->posted_index);
389	}
390
391
392	return nb_rx;
393}
394
395static inline void enic_free_wq_bufs(struct vnic_wq *wq, u16 completed_index)
396{
397	struct vnic_wq_buf *buf;
398	struct rte_mbuf *m, *free[ENIC_MAX_WQ_DESCS];
399	unsigned int nb_to_free, nb_free = 0, i;
400	struct rte_mempool *pool;
401	unsigned int tail_idx;
402	unsigned int desc_count = wq->ring.desc_count;
403
404	nb_to_free = enic_ring_sub(desc_count, wq->tail_idx, completed_index)
405				   + 1;
406	tail_idx = wq->tail_idx;
407	buf = &wq->bufs[tail_idx];
408	pool = ((struct rte_mbuf *)buf->mb)->pool;
409	for (i = 0; i < nb_to_free; i++) {
410		buf = &wq->bufs[tail_idx];
411		m = __rte_pktmbuf_prefree_seg((struct rte_mbuf *)(buf->mb));
412		buf->mb = NULL;
413
414		if (unlikely(m == NULL)) {
415			tail_idx = enic_ring_incr(desc_count, tail_idx);
416			continue;
417		}
418
419		if (likely(m->pool == pool)) {
420			RTE_ASSERT(nb_free < ENIC_MAX_WQ_DESCS);
421			free[nb_free++] = m;
422		} else {
423			rte_mempool_put_bulk(pool, (void *)free, nb_free);
424			free[0] = m;
425			nb_free = 1;
426			pool = m->pool;
427		}
428		tail_idx = enic_ring_incr(desc_count, tail_idx);
429	}
430
431	if (nb_free > 0)
432		rte_mempool_put_bulk(pool, (void **)free, nb_free);
433
434	wq->tail_idx = tail_idx;
435	wq->ring.desc_avail += nb_to_free;
436}
437
438unsigned int enic_cleanup_wq(__rte_unused struct enic *enic, struct vnic_wq *wq)
439{
440	u16 completed_index;
441
442	completed_index = *((uint32_t *)wq->cqmsg_rz->addr) & 0xffff;
443
444	if (wq->last_completed_index != completed_index) {
445		enic_free_wq_bufs(wq, completed_index);
446		wq->last_completed_index = completed_index;
447	}
448	return 0;
449}
450
451uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
452	uint16_t nb_pkts)
453{
454	uint16_t index;
455	unsigned int pkt_len, data_len;
456	unsigned int nb_segs;
457	struct rte_mbuf *tx_pkt;
458	struct vnic_wq *wq = (struct vnic_wq *)tx_queue;
459	struct enic *enic = vnic_dev_priv(wq->vdev);
460	unsigned short vlan_id;
461	uint64_t ol_flags;
462	uint64_t ol_flags_mask;
463	unsigned int wq_desc_avail;
464	int head_idx;
465	struct vnic_wq_buf *buf;
466	unsigned int desc_count;
467	struct wq_enet_desc *descs, *desc_p, desc_tmp;
468	uint16_t mss;
469	uint8_t vlan_tag_insert;
470	uint8_t eop;
471	uint64_t bus_addr;
472
473	enic_cleanup_wq(enic, wq);
474	wq_desc_avail = vnic_wq_desc_avail(wq);
475	head_idx = wq->head_idx;
476	desc_count = wq->ring.desc_count;
477	ol_flags_mask = PKT_TX_VLAN_PKT | PKT_TX_IP_CKSUM | PKT_TX_L4_MASK;
478
479	nb_pkts = RTE_MIN(nb_pkts, ENIC_TX_XMIT_MAX);
480
481	for (index = 0; index < nb_pkts; index++) {
482		tx_pkt = *tx_pkts++;
483		pkt_len = tx_pkt->pkt_len;
484		data_len = tx_pkt->data_len;
485		ol_flags = tx_pkt->ol_flags;
486		nb_segs = tx_pkt->nb_segs;
487
488		if (pkt_len > ENIC_TX_MAX_PKT_SIZE) {
489			rte_pktmbuf_free(tx_pkt);
490			rte_atomic64_inc(&enic->soft_stats.tx_oversized);
491			continue;
492		}
493
494		if (nb_segs > wq_desc_avail) {
495			if (index > 0)
496				goto post;
497			goto done;
498		}
499
500		mss = 0;
501		vlan_id = 0;
502		vlan_tag_insert = 0;
503		bus_addr = (dma_addr_t)
504			   (tx_pkt->buf_physaddr + tx_pkt->data_off);
505
506		descs = (struct wq_enet_desc *)wq->ring.descs;
507		desc_p = descs + head_idx;
508
509		eop = (data_len == pkt_len);
510
511		if (ol_flags & ol_flags_mask) {
512			if (ol_flags & PKT_TX_VLAN_PKT) {
513				vlan_tag_insert = 1;
514				vlan_id = tx_pkt->vlan_tci;
515			}
516
517			if (ol_flags & PKT_TX_IP_CKSUM)
518				mss |= ENIC_CALC_IP_CKSUM;
519
520			/* Nic uses just 1 bit for UDP and TCP */
521			switch (ol_flags & PKT_TX_L4_MASK) {
522			case PKT_TX_TCP_CKSUM:
523			case PKT_TX_UDP_CKSUM:
524				mss |= ENIC_CALC_TCP_UDP_CKSUM;
525				break;
526			}
527		}
528
529		wq_enet_desc_enc(&desc_tmp, bus_addr, data_len, mss, 0, 0, eop,
530				 eop, 0, vlan_tag_insert, vlan_id, 0);
531
532		*desc_p = desc_tmp;
533		buf = &wq->bufs[head_idx];
534		buf->mb = (void *)tx_pkt;
535		head_idx = enic_ring_incr(desc_count, head_idx);
536		wq_desc_avail--;
537
538		if (!eop) {
539			for (tx_pkt = tx_pkt->next; tx_pkt; tx_pkt =
540			    tx_pkt->next) {
541				data_len = tx_pkt->data_len;
542
543				if (tx_pkt->next == NULL)
544					eop = 1;
545				desc_p = descs + head_idx;
546				bus_addr = (dma_addr_t)(tx_pkt->buf_physaddr
547					   + tx_pkt->data_off);
548				wq_enet_desc_enc((struct wq_enet_desc *)
549						 &desc_tmp, bus_addr, data_len,
550						 mss, 0, 0, eop, eop, 0,
551						 vlan_tag_insert, vlan_id, 0);
552
553				*desc_p = desc_tmp;
554				buf = &wq->bufs[head_idx];
555				buf->mb = (void *)tx_pkt;
556				head_idx = enic_ring_incr(desc_count, head_idx);
557				wq_desc_avail--;
558			}
559		}
560	}
561 post:
562	rte_wmb();
563	iowrite32(head_idx, &wq->ctrl->posted_index);
564 done:
565	wq->ring.desc_avail = wq_desc_avail;
566	wq->head_idx = head_idx;
567
568	return index;
569}
570
571
572