sge.c revision 9ca4a157
1/*-
2 *   BSD LICENSE
3 *
4 *   Copyright(c) 2014-2015 Chelsio Communications.
5 *   All rights reserved.
6 *
7 *   Redistribution and use in source and binary forms, with or without
8 *   modification, are permitted provided that the following conditions
9 *   are met:
10 *
11 *     * Redistributions of source code must retain the above copyright
12 *       notice, this list of conditions and the following disclaimer.
13 *     * Redistributions in binary form must reproduce the above copyright
14 *       notice, this list of conditions and the following disclaimer in
15 *       the documentation and/or other materials provided with the
16 *       distribution.
17 *     * Neither the name of Chelsio Communications nor the names of its
18 *       contributors may be used to endorse or promote products derived
19 *       from this software without specific prior written permission.
20 *
21 *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <sys/queue.h>
35#include <stdio.h>
36#include <errno.h>
37#include <stdint.h>
38#include <string.h>
39#include <unistd.h>
40#include <stdarg.h>
41#include <inttypes.h>
42#include <netinet/in.h>
43
44#include <rte_byteorder.h>
45#include <rte_common.h>
46#include <rte_cycles.h>
47#include <rte_interrupts.h>
48#include <rte_log.h>
49#include <rte_debug.h>
50#include <rte_pci.h>
51#include <rte_atomic.h>
52#include <rte_branch_prediction.h>
53#include <rte_memory.h>
54#include <rte_memzone.h>
55#include <rte_tailq.h>
56#include <rte_eal.h>
57#include <rte_alarm.h>
58#include <rte_ether.h>
59#include <rte_ethdev.h>
60#include <rte_atomic.h>
61#include <rte_malloc.h>
62#include <rte_random.h>
63#include <rte_dev.h>
64
65#include "common.h"
66#include "t4_regs.h"
67#include "t4_msg.h"
68#include "cxgbe.h"
69
70static inline void ship_tx_pkt_coalesce_wr(struct adapter *adap,
71					   struct sge_eth_txq *txq);
72
73/*
74 * Max number of Rx buffers we replenish at a time.
75 */
76#define MAX_RX_REFILL 64U
77
78#define NOMEM_TMR_IDX (SGE_NTIMERS - 1)
79
80/*
81 * Max Tx descriptor space we allow for an Ethernet packet to be inlined
82 * into a WR.
83 */
84#define MAX_IMM_TX_PKT_LEN 256
85
86/*
87 * Rx buffer sizes for "usembufs" Free List buffers (one ingress packet
88 * per mbuf buffer).  We currently only support two sizes for 1500- and
89 * 9000-byte MTUs. We could easily support more but there doesn't seem to be
90 * much need for that ...
91 */
92#define FL_MTU_SMALL 1500
93#define FL_MTU_LARGE 9000
94
95static inline unsigned int fl_mtu_bufsize(struct adapter *adapter,
96					  unsigned int mtu)
97{
98	struct sge *s = &adapter->sge;
99
100	return CXGBE_ALIGN(s->pktshift + ETHER_HDR_LEN + VLAN_HLEN + mtu,
101			   s->fl_align);
102}
103
104#define FL_MTU_SMALL_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_SMALL)
105#define FL_MTU_LARGE_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_LARGE)
106
107/*
108 * Bits 0..3 of rx_sw_desc.dma_addr have special meaning.  The hardware uses
109 * these to specify the buffer size as an index into the SGE Free List Buffer
110 * Size register array.  We also use bit 4, when the buffer has been unmapped
111 * for DMA, but this is of course never sent to the hardware and is only used
112 * to prevent double unmappings.  All of the above requires that the Free List
113 * Buffers which we allocate have the bottom 5 bits free (0) -- i.e. are
114 * 32-byte or or a power of 2 greater in alignment.  Since the SGE's minimal
115 * Free List Buffer alignment is 32 bytes, this works out for us ...
116 */
117enum {
118	RX_BUF_FLAGS     = 0x1f,   /* bottom five bits are special */
119	RX_BUF_SIZE      = 0x0f,   /* bottom three bits are for buf sizes */
120	RX_UNMAPPED_BUF  = 0x10,   /* buffer is not mapped */
121
122	/*
123	 * XXX We shouldn't depend on being able to use these indices.
124	 * XXX Especially when some other Master PF has initialized the
125	 * XXX adapter or we use the Firmware Configuration File.  We
126	 * XXX should really search through the Host Buffer Size register
127	 * XXX array for the appropriately sized buffer indices.
128	 */
129	RX_SMALL_PG_BUF  = 0x0,   /* small (PAGE_SIZE) page buffer */
130	RX_LARGE_PG_BUF  = 0x1,   /* buffer large page buffer */
131
132	RX_SMALL_MTU_BUF = 0x2,   /* small MTU buffer */
133	RX_LARGE_MTU_BUF = 0x3,   /* large MTU buffer */
134};
135
136/**
137 * txq_avail - return the number of available slots in a Tx queue
138 * @q: the Tx queue
139 *
140 * Returns the number of descriptors in a Tx queue available to write new
141 * packets.
142 */
143static inline unsigned int txq_avail(const struct sge_txq *q)
144{
145	return q->size - 1 - q->in_use;
146}
147
148static int map_mbuf(struct rte_mbuf *mbuf, dma_addr_t *addr)
149{
150	struct rte_mbuf *m = mbuf;
151
152	for (; m; m = m->next, addr++) {
153		*addr = m->buf_physaddr + rte_pktmbuf_headroom(m);
154		if (*addr == 0)
155			goto out_err;
156	}
157	return 0;
158
159out_err:
160	return -ENOMEM;
161}
162
163/**
164 * free_tx_desc - reclaims Tx descriptors and their buffers
165 * @q: the Tx queue to reclaim descriptors from
166 * @n: the number of descriptors to reclaim
167 *
168 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
169 * Tx buffers.  Called with the Tx queue lock held.
170 */
171static void free_tx_desc(struct sge_txq *q, unsigned int n)
172{
173	struct tx_sw_desc *d;
174	unsigned int cidx = 0;
175
176	d = &q->sdesc[cidx];
177	while (n--) {
178		if (d->mbuf) {                       /* an SGL is present */
179			rte_pktmbuf_free(d->mbuf);
180			d->mbuf = NULL;
181		}
182		if (d->coalesce.idx) {
183			int i;
184
185			for (i = 0; i < d->coalesce.idx; i++) {
186				rte_pktmbuf_free(d->coalesce.mbuf[i]);
187				d->coalesce.mbuf[i] = NULL;
188			}
189			d->coalesce.idx = 0;
190		}
191		++d;
192		if (++cidx == q->size) {
193			cidx = 0;
194			d = q->sdesc;
195		}
196		RTE_MBUF_PREFETCH_TO_FREE(&q->sdesc->mbuf->pool);
197	}
198}
199
200static void reclaim_tx_desc(struct sge_txq *q, unsigned int n)
201{
202	struct tx_sw_desc *d;
203	unsigned int cidx = q->cidx;
204
205	d = &q->sdesc[cidx];
206	while (n--) {
207		if (d->mbuf) {                       /* an SGL is present */
208			rte_pktmbuf_free(d->mbuf);
209			d->mbuf = NULL;
210		}
211		++d;
212		if (++cidx == q->size) {
213			cidx = 0;
214			d = q->sdesc;
215		}
216	}
217	q->cidx = cidx;
218}
219
220/**
221 * fl_cap - return the capacity of a free-buffer list
222 * @fl: the FL
223 *
224 * Returns the capacity of a free-buffer list.  The capacity is less than
225 * the size because one descriptor needs to be left unpopulated, otherwise
226 * HW will think the FL is empty.
227 */
228static inline unsigned int fl_cap(const struct sge_fl *fl)
229{
230	return fl->size - 8;   /* 1 descriptor = 8 buffers */
231}
232
233/**
234 * fl_starving - return whether a Free List is starving.
235 * @adapter: pointer to the adapter
236 * @fl: the Free List
237 *
238 * Tests specified Free List to see whether the number of buffers
239 * available to the hardware has falled below our "starvation"
240 * threshold.
241 */
242static inline bool fl_starving(const struct adapter *adapter,
243			       const struct sge_fl *fl)
244{
245	const struct sge *s = &adapter->sge;
246
247	return fl->avail - fl->pend_cred <= s->fl_starve_thres;
248}
249
250static inline unsigned int get_buf_size(struct adapter *adapter,
251					const struct rx_sw_desc *d)
252{
253	unsigned int rx_buf_size_idx = d->dma_addr & RX_BUF_SIZE;
254	unsigned int buf_size = 0;
255
256	switch (rx_buf_size_idx) {
257	case RX_SMALL_MTU_BUF:
258		buf_size = FL_MTU_SMALL_BUFSIZE(adapter);
259		break;
260
261	case RX_LARGE_MTU_BUF:
262		buf_size = FL_MTU_LARGE_BUFSIZE(adapter);
263		break;
264
265	default:
266		BUG_ON(1);
267		/* NOT REACHED */
268	}
269
270	return buf_size;
271}
272
273/**
274 * free_rx_bufs - free the Rx buffers on an SGE free list
275 * @q: the SGE free list to free buffers from
276 * @n: how many buffers to free
277 *
278 * Release the next @n buffers on an SGE free-buffer Rx queue.   The
279 * buffers must be made inaccessible to HW before calling this function.
280 */
281static void free_rx_bufs(struct sge_fl *q, int n)
282{
283	unsigned int cidx = q->cidx;
284	struct rx_sw_desc *d;
285
286	d = &q->sdesc[cidx];
287	while (n--) {
288		if (d->buf) {
289			rte_pktmbuf_free(d->buf);
290			d->buf = NULL;
291		}
292		++d;
293		if (++cidx == q->size) {
294			cidx = 0;
295			d = q->sdesc;
296		}
297		q->avail--;
298	}
299	q->cidx = cidx;
300}
301
302/**
303 * unmap_rx_buf - unmap the current Rx buffer on an SGE free list
304 * @q: the SGE free list
305 *
306 * Unmap the current buffer on an SGE free-buffer Rx queue.   The
307 * buffer must be made inaccessible to HW before calling this function.
308 *
309 * This is similar to @free_rx_bufs above but does not free the buffer.
310 * Do note that the FL still loses any further access to the buffer.
311 */
312static void unmap_rx_buf(struct sge_fl *q)
313{
314	if (++q->cidx == q->size)
315		q->cidx = 0;
316	q->avail--;
317}
318
319static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
320{
321	if (q->pend_cred >= 64) {
322		u32 val = adap->params.arch.sge_fl_db;
323
324		if (is_t4(adap->params.chip))
325			val |= V_PIDX(q->pend_cred / 8);
326		else
327			val |= V_PIDX_T5(q->pend_cred / 8);
328
329		/*
330		 * Make sure all memory writes to the Free List queue are
331		 * committed before we tell the hardware about them.
332		 */
333		wmb();
334
335		/*
336		 * If we don't have access to the new User Doorbell (T5+), use
337		 * the old doorbell mechanism; otherwise use the new BAR2
338		 * mechanism.
339		 */
340		if (unlikely(!q->bar2_addr)) {
341			t4_write_reg_relaxed(adap, MYPF_REG(A_SGE_PF_KDOORBELL),
342					     val | V_QID(q->cntxt_id));
343		} else {
344			writel_relaxed(val | V_QID(q->bar2_qid),
345				       (void *)((uintptr_t)q->bar2_addr +
346				       SGE_UDB_KDOORBELL));
347
348			/*
349			 * This Write memory Barrier will force the write to
350			 * the User Doorbell area to be flushed.
351			 */
352			wmb();
353		}
354		q->pend_cred &= 7;
355	}
356}
357
358static inline void set_rx_sw_desc(struct rx_sw_desc *sd, void *buf,
359				  dma_addr_t mapping)
360{
361	sd->buf = buf;
362	sd->dma_addr = mapping;      /* includes size low bits */
363}
364
365/**
366 * refill_fl_usembufs - refill an SGE Rx buffer ring with mbufs
367 * @adap: the adapter
368 * @q: the ring to refill
369 * @n: the number of new buffers to allocate
370 *
371 * (Re)populate an SGE free-buffer queue with up to @n new packet buffers,
372 * allocated with the supplied gfp flags.  The caller must assure that
373 * @n does not exceed the queue's capacity.  If afterwards the queue is
374 * found critically low mark it as starving in the bitmap of starving FLs.
375 *
376 * Returns the number of buffers allocated.
377 */
378static unsigned int refill_fl_usembufs(struct adapter *adap, struct sge_fl *q,
379				       int n)
380{
381	struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, fl);
382	unsigned int cred = q->avail;
383	__be64 *d = &q->desc[q->pidx];
384	struct rx_sw_desc *sd = &q->sdesc[q->pidx];
385	unsigned int buf_size_idx = RX_SMALL_MTU_BUF;
386	struct rte_mbuf *buf_bulk[n];
387	int ret, i;
388	struct rte_pktmbuf_pool_private *mbp_priv;
389	u8 jumbo_en = rxq->rspq.eth_dev->data->dev_conf.rxmode.jumbo_frame;
390
391	/* Use jumbo mtu buffers iff mbuf data room size can fit jumbo data. */
392	mbp_priv = rte_mempool_get_priv(rxq->rspq.mb_pool);
393	if (jumbo_en &&
394	    ((mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM) >= 9000))
395		buf_size_idx = RX_LARGE_MTU_BUF;
396
397	ret = rte_mempool_get_bulk(rxq->rspq.mb_pool, (void *)buf_bulk, n);
398	if (unlikely(ret != 0)) {
399		dev_debug(adap, "%s: failed to allocated fl entries in bulk ..\n",
400			  __func__);
401		q->alloc_failed++;
402		rxq->rspq.eth_dev->data->rx_mbuf_alloc_failed++;
403		goto out;
404	}
405
406	for (i = 0; i < n; i++) {
407		struct rte_mbuf *mbuf = buf_bulk[i];
408		dma_addr_t mapping;
409
410		if (!mbuf) {
411			dev_debug(adap, "%s: mbuf alloc failed\n", __func__);
412			q->alloc_failed++;
413			rxq->rspq.eth_dev->data->rx_mbuf_alloc_failed++;
414			goto out;
415		}
416
417		rte_mbuf_refcnt_set(mbuf, 1);
418		mbuf->data_off = RTE_PKTMBUF_HEADROOM;
419		mbuf->next = NULL;
420		mbuf->nb_segs = 1;
421		mbuf->port = rxq->rspq.port_id;
422
423		mapping = (dma_addr_t)(mbuf->buf_physaddr + mbuf->data_off);
424		mapping |= buf_size_idx;
425		*d++ = cpu_to_be64(mapping);
426		set_rx_sw_desc(sd, mbuf, mapping);
427		sd++;
428
429		q->avail++;
430		if (++q->pidx == q->size) {
431			q->pidx = 0;
432			sd = q->sdesc;
433			d = q->desc;
434		}
435	}
436
437out:    cred = q->avail - cred;
438	q->pend_cred += cred;
439	ring_fl_db(adap, q);
440
441	if (unlikely(fl_starving(adap, q))) {
442		/*
443		 * Make sure data has been written to free list
444		 */
445		wmb();
446		q->low++;
447	}
448
449	return cred;
450}
451
452/**
453 * refill_fl - refill an SGE Rx buffer ring with mbufs
454 * @adap: the adapter
455 * @q: the ring to refill
456 * @n: the number of new buffers to allocate
457 *
458 * (Re)populate an SGE free-buffer queue with up to @n new packet buffers,
459 * allocated with the supplied gfp flags.  The caller must assure that
460 * @n does not exceed the queue's capacity.  Returns the number of buffers
461 * allocated.
462 */
463static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n)
464{
465	return refill_fl_usembufs(adap, q, n);
466}
467
468static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
469{
470	refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail));
471}
472
473/*
474 * Return the number of reclaimable descriptors in a Tx queue.
475 */
476static inline int reclaimable(const struct sge_txq *q)
477{
478	int hw_cidx = ntohs(q->stat->cidx);
479
480	hw_cidx -= q->cidx;
481	if (hw_cidx < 0)
482		return hw_cidx + q->size;
483	return hw_cidx;
484}
485
486/**
487 * reclaim_completed_tx - reclaims completed Tx descriptors
488 * @q: the Tx queue to reclaim completed descriptors from
489 *
490 * Reclaims Tx descriptors that the SGE has indicated it has processed.
491 */
492void reclaim_completed_tx(struct sge_txq *q)
493{
494	unsigned int avail = reclaimable(q);
495
496	do {
497		/* reclaim as much as possible */
498		reclaim_tx_desc(q, avail);
499		q->in_use -= avail;
500		avail = reclaimable(q);
501	} while (avail);
502}
503
504/**
505 * sgl_len - calculates the size of an SGL of the given capacity
506 * @n: the number of SGL entries
507 *
508 * Calculates the number of flits needed for a scatter/gather list that
509 * can hold the given number of entries.
510 */
511static inline unsigned int sgl_len(unsigned int n)
512{
513	/*
514	 * A Direct Scatter Gather List uses 32-bit lengths and 64-bit PCI DMA
515	 * addresses.  The DSGL Work Request starts off with a 32-bit DSGL
516	 * ULPTX header, then Length0, then Address0, then, for 1 <= i <= N,
517	 * repeated sequences of { Length[i], Length[i+1], Address[i],
518	 * Address[i+1] } (this ensures that all addresses are on 64-bit
519	 * boundaries).  If N is even, then Length[N+1] should be set to 0 and
520	 * Address[N+1] is omitted.
521	 *
522	 * The following calculation incorporates all of the above.  It's
523	 * somewhat hard to follow but, briefly: the "+2" accounts for the
524	 * first two flits which include the DSGL header, Length0 and
525	 * Address0; the "(3*(n-1))/2" covers the main body of list entries (3
526	 * flits for every pair of the remaining N) +1 if (n-1) is odd; and
527	 * finally the "+((n-1)&1)" adds the one remaining flit needed if
528	 * (n-1) is odd ...
529	 */
530	n--;
531	return (3 * n) / 2 + (n & 1) + 2;
532}
533
534/**
535 * flits_to_desc - returns the num of Tx descriptors for the given flits
536 * @n: the number of flits
537 *
538 * Returns the number of Tx descriptors needed for the supplied number
539 * of flits.
540 */
541static inline unsigned int flits_to_desc(unsigned int n)
542{
543	return DIV_ROUND_UP(n, 8);
544}
545
546/**
547 * is_eth_imm - can an Ethernet packet be sent as immediate data?
548 * @m: the packet
549 *
550 * Returns whether an Ethernet packet is small enough to fit as
551 * immediate data. Return value corresponds to the headroom required.
552 */
553static inline int is_eth_imm(const struct rte_mbuf *m)
554{
555	unsigned int hdrlen = (m->ol_flags & PKT_TX_TCP_SEG) ?
556			      sizeof(struct cpl_tx_pkt_lso_core) : 0;
557
558	hdrlen += sizeof(struct cpl_tx_pkt);
559	if (m->pkt_len <= MAX_IMM_TX_PKT_LEN - hdrlen)
560		return hdrlen;
561
562	return 0;
563}
564
565/**
566 * calc_tx_flits - calculate the number of flits for a packet Tx WR
567 * @m: the packet
568 *
569 * Returns the number of flits needed for a Tx WR for the given Ethernet
570 * packet, including the needed WR and CPL headers.
571 */
572static inline unsigned int calc_tx_flits(const struct rte_mbuf *m)
573{
574	unsigned int flits;
575	int hdrlen;
576
577	/*
578	 * If the mbuf is small enough, we can pump it out as a work request
579	 * with only immediate data.  In that case we just have to have the
580	 * TX Packet header plus the mbuf data in the Work Request.
581	 */
582
583	hdrlen = is_eth_imm(m);
584	if (hdrlen)
585		return DIV_ROUND_UP(m->pkt_len + hdrlen, sizeof(__be64));
586
587	/*
588	 * Otherwise, we're going to have to construct a Scatter gather list
589	 * of the mbuf body and fragments.  We also include the flits necessary
590	 * for the TX Packet Work Request and CPL.  We always have a firmware
591	 * Write Header (incorporated as part of the cpl_tx_pkt_lso and
592	 * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
593	 * message or, if we're doing a Large Send Offload, an LSO CPL message
594	 * with an embeded TX Packet Write CPL message.
595	 */
596	flits = sgl_len(m->nb_segs);
597	if (m->tso_segsz)
598		flits += (sizeof(struct fw_eth_tx_pkt_wr) +
599			  sizeof(struct cpl_tx_pkt_lso_core) +
600			  sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
601	else
602		flits += (sizeof(struct fw_eth_tx_pkt_wr) +
603			  sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
604	return flits;
605}
606
607/**
608 * write_sgl - populate a scatter/gather list for a packet
609 * @mbuf: the packet
610 * @q: the Tx queue we are writing into
611 * @sgl: starting location for writing the SGL
612 * @end: points right after the end of the SGL
613 * @start: start offset into mbuf main-body data to include in the SGL
614 * @addr: address of mapped region
615 *
616 * Generates a scatter/gather list for the buffers that make up a packet.
617 * The caller must provide adequate space for the SGL that will be written.
618 * The SGL includes all of the packet's page fragments and the data in its
619 * main body except for the first @start bytes.  @sgl must be 16-byte
620 * aligned and within a Tx descriptor with available space.  @end points
621 * write after the end of the SGL but does not account for any potential
622 * wrap around, i.e., @end > @sgl.
623 */
624static void write_sgl(struct rte_mbuf *mbuf, struct sge_txq *q,
625		      struct ulptx_sgl *sgl, u64 *end, unsigned int start,
626		      const dma_addr_t *addr)
627{
628	unsigned int i, len;
629	struct ulptx_sge_pair *to;
630	struct rte_mbuf *m = mbuf;
631	unsigned int nfrags = m->nb_segs;
632	struct ulptx_sge_pair buf[nfrags / 2];
633
634	len = m->data_len - start;
635	sgl->len0 = htonl(len);
636	sgl->addr0 = rte_cpu_to_be_64(addr[0]);
637
638	sgl->cmd_nsge = htonl(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
639			      V_ULPTX_NSGE(nfrags));
640	if (likely(--nfrags == 0))
641		return;
642	/*
643	 * Most of the complexity below deals with the possibility we hit the
644	 * end of the queue in the middle of writing the SGL.  For this case
645	 * only we create the SGL in a temporary buffer and then copy it.
646	 */
647	to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge;
648
649	for (i = 0; nfrags >= 2; nfrags -= 2, to++) {
650		m = m->next;
651		to->len[0] = rte_cpu_to_be_32(m->data_len);
652		to->addr[0] = rte_cpu_to_be_64(addr[++i]);
653		m = m->next;
654		to->len[1] = rte_cpu_to_be_32(m->data_len);
655		to->addr[1] = rte_cpu_to_be_64(addr[++i]);
656	}
657	if (nfrags) {
658		m = m->next;
659		to->len[0] = rte_cpu_to_be_32(m->data_len);
660		to->len[1] = rte_cpu_to_be_32(0);
661		to->addr[0] = rte_cpu_to_be_64(addr[i + 1]);
662	}
663	if (unlikely((u8 *)end > (u8 *)q->stat)) {
664		unsigned int part0 = RTE_PTR_DIFF((u8 *)q->stat,
665						  (u8 *)sgl->sge);
666		unsigned int part1;
667
668		if (likely(part0))
669			memcpy(sgl->sge, buf, part0);
670		part1 = RTE_PTR_DIFF((u8 *)end, (u8 *)q->stat);
671		rte_memcpy(q->desc, RTE_PTR_ADD((u8 *)buf, part0), part1);
672		end = RTE_PTR_ADD((void *)q->desc, part1);
673	}
674	if ((uintptr_t)end & 8)           /* 0-pad to multiple of 16 */
675		*(u64 *)end = 0;
676}
677
678#define IDXDIFF(head, tail, wrap) \
679	((head) >= (tail) ? (head) - (tail) : (wrap) - (tail) + (head))
680
681#define Q_IDXDIFF(q, idx) IDXDIFF((q)->pidx, (q)->idx, (q)->size)
682#define R_IDXDIFF(q, idx) IDXDIFF((q)->cidx, (q)->idx, (q)->size)
683
684/**
685 * ring_tx_db - ring a Tx queue's doorbell
686 * @adap: the adapter
687 * @q: the Tx queue
688 * @n: number of new descriptors to give to HW
689 *
690 * Ring the doorbel for a Tx queue.
691 */
692static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q)
693{
694	int n = Q_IDXDIFF(q, dbidx);
695
696	/*
697	 * Make sure that all writes to the TX Descriptors are committed
698	 * before we tell the hardware about them.
699	 */
700	rte_wmb();
701
702	/*
703	 * If we don't have access to the new User Doorbell (T5+), use the old
704	 * doorbell mechanism; otherwise use the new BAR2 mechanism.
705	 */
706	if (unlikely(!q->bar2_addr)) {
707		u32 val = V_PIDX(n);
708
709		/*
710		 * For T4 we need to participate in the Doorbell Recovery
711		 * mechanism.
712		 */
713		if (!q->db_disabled)
714			t4_write_reg(adap, MYPF_REG(A_SGE_PF_KDOORBELL),
715				     V_QID(q->cntxt_id) | val);
716		else
717			q->db_pidx_inc += n;
718		q->db_pidx = q->pidx;
719	} else {
720		u32 val = V_PIDX_T5(n);
721
722		/*
723		 * T4 and later chips share the same PIDX field offset within
724		 * the doorbell, but T5 and later shrank the field in order to
725		 * gain a bit for Doorbell Priority.  The field was absurdly
726		 * large in the first place (14 bits) so we just use the T5
727		 * and later limits and warn if a Queue ID is too large.
728		 */
729		WARN_ON(val & F_DBPRIO);
730
731		writel(val | V_QID(q->bar2_qid),
732		       (void *)((uintptr_t)q->bar2_addr + SGE_UDB_KDOORBELL));
733
734		/*
735		 * This Write Memory Barrier will force the write to the User
736		 * Doorbell area to be flushed.  This is needed to prevent
737		 * writes on different CPUs for the same queue from hitting
738		 * the adapter out of order.  This is required when some Work
739		 * Requests take the Write Combine Gather Buffer path (user
740		 * doorbell area offset [SGE_UDB_WCDOORBELL..+63]) and some
741		 * take the traditional path where we simply increment the
742		 * PIDX (User Doorbell area SGE_UDB_KDOORBELL) and have the
743		 * hardware DMA read the actual Work Request.
744		 */
745		rte_wmb();
746	}
747	q->dbidx = q->pidx;
748}
749
750/*
751 * Figure out what HW csum a packet wants and return the appropriate control
752 * bits.
753 */
754static u64 hwcsum(enum chip_type chip, const struct rte_mbuf *m)
755{
756	int csum_type;
757
758	if (m->ol_flags & PKT_TX_IP_CKSUM) {
759		switch (m->ol_flags & PKT_TX_L4_MASK) {
760		case PKT_TX_TCP_CKSUM:
761			csum_type = TX_CSUM_TCPIP;
762			break;
763		case PKT_TX_UDP_CKSUM:
764			csum_type = TX_CSUM_UDPIP;
765			break;
766		default:
767			goto nocsum;
768		}
769	} else {
770		goto nocsum;
771	}
772
773	if (likely(csum_type >= TX_CSUM_TCPIP)) {
774		int hdr_len = V_TXPKT_IPHDR_LEN(m->l3_len);
775		int eth_hdr_len = m->l2_len;
776
777		if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5)
778			hdr_len |= V_TXPKT_ETHHDR_LEN(eth_hdr_len);
779		else
780			hdr_len |= V_T6_TXPKT_ETHHDR_LEN(eth_hdr_len);
781		return V_TXPKT_CSUM_TYPE(csum_type) | hdr_len;
782	}
783nocsum:
784	/*
785	 * unknown protocol, disable HW csum
786	 * and hope a bad packet is detected
787	 */
788	return F_TXPKT_L4CSUM_DIS;
789}
790
791static inline void txq_advance(struct sge_txq *q, unsigned int n)
792{
793	q->in_use += n;
794	q->pidx += n;
795	if (q->pidx >= q->size)
796		q->pidx -= q->size;
797}
798
799#define MAX_COALESCE_LEN 64000
800
801static inline int wraps_around(struct sge_txq *q, int ndesc)
802{
803	return (q->pidx + ndesc) > q->size ? 1 : 0;
804}
805
806static void tx_timer_cb(void *data)
807{
808	struct adapter *adap = (struct adapter *)data;
809	struct sge_eth_txq *txq = &adap->sge.ethtxq[0];
810	int i;
811	unsigned int coal_idx;
812
813	/* monitor any pending tx */
814	for (i = 0; i < adap->sge.max_ethqsets; i++, txq++) {
815		if (t4_os_trylock(&txq->txq_lock)) {
816			coal_idx = txq->q.coalesce.idx;
817			if (coal_idx) {
818				if (coal_idx == txq->q.last_coal_idx &&
819				    txq->q.pidx == txq->q.last_pidx) {
820					ship_tx_pkt_coalesce_wr(adap, txq);
821				} else {
822					txq->q.last_coal_idx = coal_idx;
823					txq->q.last_pidx = txq->q.pidx;
824				}
825			}
826			t4_os_unlock(&txq->txq_lock);
827		}
828	}
829	rte_eal_alarm_set(50, tx_timer_cb, (void *)adap);
830}
831
832/**
833 * ship_tx_pkt_coalesce_wr - finalizes and ships a coalesce WR
834 * @ adap: adapter structure
835 * @txq: tx queue
836 *
837 * writes the different fields of the pkts WR and sends it.
838 */
839static inline void ship_tx_pkt_coalesce_wr(struct adapter *adap,
840					   struct sge_eth_txq *txq)
841{
842	u32 wr_mid;
843	struct sge_txq *q = &txq->q;
844	struct fw_eth_tx_pkts_wr *wr;
845	unsigned int ndesc;
846
847	/* fill the pkts WR header */
848	wr = (void *)&q->desc[q->pidx];
849	wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS_WR));
850
851	wr_mid = V_FW_WR_LEN16(DIV_ROUND_UP(q->coalesce.flits, 2));
852	ndesc = flits_to_desc(q->coalesce.flits);
853	wr->equiq_to_len16 = htonl(wr_mid);
854	wr->plen = cpu_to_be16(q->coalesce.len);
855	wr->npkt = q->coalesce.idx;
856	wr->r3 = 0;
857	wr->type = q->coalesce.type;
858
859	/* zero out coalesce structure members */
860	q->coalesce.idx = 0;
861	q->coalesce.flits = 0;
862	q->coalesce.len = 0;
863
864	txq_advance(q, ndesc);
865	txq->stats.coal_wr++;
866	txq->stats.coal_pkts += wr->npkt;
867
868	if (Q_IDXDIFF(q, equeidx) >= q->size / 2) {
869		q->equeidx = q->pidx;
870		wr_mid |= F_FW_WR_EQUEQ;
871		wr->equiq_to_len16 = htonl(wr_mid);
872	}
873	ring_tx_db(adap, q);
874}
875
876/**
877 * should_tx_packet_coalesce - decides wether to coalesce an mbuf or not
878 * @txq: tx queue where the mbuf is sent
879 * @mbuf: mbuf to be sent
880 * @nflits: return value for number of flits needed
881 * @adap: adapter structure
882 *
883 * This function decides if a packet should be coalesced or not.
884 */
885static inline int should_tx_packet_coalesce(struct sge_eth_txq *txq,
886					    struct rte_mbuf *mbuf,
887					    unsigned int *nflits,
888					    struct adapter *adap)
889{
890	struct sge_txq *q = &txq->q;
891	unsigned int flits, ndesc;
892	unsigned char type = 0;
893	int credits;
894
895	/* use coal WR type 1 when no frags are present */
896	type = (mbuf->nb_segs == 1) ? 1 : 0;
897
898	if (unlikely(type != q->coalesce.type && q->coalesce.idx))
899		ship_tx_pkt_coalesce_wr(adap, txq);
900
901	/* calculate the number of flits required for coalescing this packet
902	 * without the 2 flits of the WR header. These are added further down
903	 * if we are just starting in new PKTS WR. sgl_len doesn't account for
904	 * the possible 16 bytes alignment ULP TX commands so we do it here.
905	 */
906	flits = (sgl_len(mbuf->nb_segs) + 1) & ~1U;
907	if (type == 0)
908		flits += (sizeof(struct ulp_txpkt) +
909			  sizeof(struct ulptx_idata)) / sizeof(__be64);
910	flits += sizeof(struct cpl_tx_pkt_core) / sizeof(__be64);
911	*nflits = flits;
912
913	/* If coalescing is on, the mbuf is added to a pkts WR */
914	if (q->coalesce.idx) {
915		ndesc = DIV_ROUND_UP(q->coalesce.flits + flits, 8);
916		credits = txq_avail(q) - ndesc;
917
918		/* If we are wrapping or this is last mbuf then, send the
919		 * already coalesced mbufs and let the non-coalesce pass
920		 * handle the mbuf.
921		 */
922		if (unlikely(credits < 0 || wraps_around(q, ndesc))) {
923			ship_tx_pkt_coalesce_wr(adap, txq);
924			return 0;
925		}
926
927		/* If the max coalesce len or the max WR len is reached
928		 * ship the WR and keep coalescing on.
929		 */
930		if (unlikely((q->coalesce.len + mbuf->pkt_len >
931						MAX_COALESCE_LEN) ||
932			     (q->coalesce.flits + flits >
933			      q->coalesce.max))) {
934			ship_tx_pkt_coalesce_wr(adap, txq);
935			goto new;
936		}
937		return 1;
938	}
939
940new:
941	/* start a new pkts WR, the WR header is not filled below */
942	flits += sizeof(struct fw_eth_tx_pkts_wr) / sizeof(__be64);
943	ndesc = flits_to_desc(q->coalesce.flits + flits);
944	credits = txq_avail(q) - ndesc;
945
946	if (unlikely(credits < 0 || wraps_around(q, ndesc)))
947		return 0;
948	q->coalesce.flits += 2;
949	q->coalesce.type = type;
950	q->coalesce.ptr = (unsigned char *)&q->desc[q->pidx] +
951			   2 * sizeof(__be64);
952	return 1;
953}
954
955/**
956 * tx_do_packet_coalesce - add an mbuf to a coalesce WR
957 * @txq: sge_eth_txq used send the mbuf
958 * @mbuf: mbuf to be sent
959 * @flits: flits needed for this mbuf
960 * @adap: adapter structure
961 * @pi: port_info structure
962 * @addr: mapped address of the mbuf
963 *
964 * Adds an mbuf to be sent as part of a coalesce WR by filling a
965 * ulp_tx_pkt command, ulp_tx_sc_imm command, cpl message and
966 * ulp_tx_sc_dsgl command.
967 */
968static inline int tx_do_packet_coalesce(struct sge_eth_txq *txq,
969					struct rte_mbuf *mbuf,
970					int flits, struct adapter *adap,
971					const struct port_info *pi,
972					dma_addr_t *addr)
973{
974	u64 cntrl, *end;
975	struct sge_txq *q = &txq->q;
976	struct ulp_txpkt *mc;
977	struct ulptx_idata *sc_imm;
978	struct cpl_tx_pkt_core *cpl;
979	struct tx_sw_desc *sd;
980	unsigned int idx = q->coalesce.idx, len = mbuf->pkt_len;
981
982	if (q->coalesce.type == 0) {
983		mc = (struct ulp_txpkt *)q->coalesce.ptr;
984		mc->cmd_dest = htonl(V_ULPTX_CMD(4) | V_ULP_TXPKT_DEST(0) |
985				     V_ULP_TXPKT_FID(adap->sge.fw_evtq.cntxt_id) |
986				     F_ULP_TXPKT_RO);
987		mc->len = htonl(DIV_ROUND_UP(flits, 2));
988		sc_imm = (struct ulptx_idata *)(mc + 1);
989		sc_imm->cmd_more = htonl(V_ULPTX_CMD(ULP_TX_SC_IMM) |
990					 F_ULP_TX_SC_MORE);
991		sc_imm->len = htonl(sizeof(*cpl));
992		end = (u64 *)mc + flits;
993		cpl = (struct cpl_tx_pkt_core *)(sc_imm + 1);
994	} else {
995		end = (u64 *)q->coalesce.ptr + flits;
996		cpl = (struct cpl_tx_pkt_core *)q->coalesce.ptr;
997	}
998
999	/* update coalesce structure for this txq */
1000	q->coalesce.flits += flits;
1001	q->coalesce.ptr += flits * sizeof(__be64);
1002	q->coalesce.len += mbuf->pkt_len;
1003
1004	/* fill the cpl message, same as in t4_eth_xmit, this should be kept
1005	 * similar to t4_eth_xmit
1006	 */
1007	if (mbuf->ol_flags & PKT_TX_IP_CKSUM) {
1008		cntrl = hwcsum(adap->params.chip, mbuf) |
1009			       F_TXPKT_IPCSUM_DIS;
1010		txq->stats.tx_cso++;
1011	} else {
1012		cntrl = F_TXPKT_L4CSUM_DIS | F_TXPKT_IPCSUM_DIS;
1013	}
1014
1015	if (mbuf->ol_flags & PKT_TX_VLAN_PKT) {
1016		txq->stats.vlan_ins++;
1017		cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(mbuf->vlan_tci);
1018	}
1019
1020	cpl->ctrl0 = htonl(V_TXPKT_OPCODE(CPL_TX_PKT_XT) |
1021			   V_TXPKT_INTF(pi->tx_chan) |
1022			   V_TXPKT_PF(adap->pf));
1023	cpl->pack = htons(0);
1024	cpl->len = htons(len);
1025	cpl->ctrl1 = cpu_to_be64(cntrl);
1026	write_sgl(mbuf, q, (struct ulptx_sgl *)(cpl + 1), end, 0,  addr);
1027	txq->stats.pkts++;
1028	txq->stats.tx_bytes += len;
1029
1030	sd = &q->sdesc[q->pidx + (idx >> 1)];
1031	if (!(idx & 1)) {
1032		if (sd->coalesce.idx) {
1033			int i;
1034
1035			for (i = 0; i < sd->coalesce.idx; i++) {
1036				rte_pktmbuf_free(sd->coalesce.mbuf[i]);
1037				sd->coalesce.mbuf[i] = NULL;
1038			}
1039		}
1040	}
1041
1042	/* store pointers to the mbuf and the sgl used in free_tx_desc.
1043	 * each tx desc can hold two pointers corresponding to the value
1044	 * of ETH_COALESCE_PKT_PER_DESC
1045	 */
1046	sd->coalesce.mbuf[idx & 1] = mbuf;
1047	sd->coalesce.sgl[idx & 1] = (struct ulptx_sgl *)(cpl + 1);
1048	sd->coalesce.idx = (idx & 1) + 1;
1049
1050	/* send the coaelsced work request if max reached */
1051	if (++q->coalesce.idx == ETH_COALESCE_PKT_NUM)
1052		ship_tx_pkt_coalesce_wr(adap, txq);
1053	return 0;
1054}
1055
1056/**
1057 * t4_eth_xmit - add a packet to an Ethernet Tx queue
1058 * @txq: the egress queue
1059 * @mbuf: the packet
1060 *
1061 * Add a packet to an SGE Ethernet Tx queue.  Runs with softirqs disabled.
1062 */
1063int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf)
1064{
1065	const struct port_info *pi;
1066	struct cpl_tx_pkt_lso_core *lso;
1067	struct adapter *adap;
1068	struct rte_mbuf *m = mbuf;
1069	struct fw_eth_tx_pkt_wr *wr;
1070	struct cpl_tx_pkt_core *cpl;
1071	struct tx_sw_desc *d;
1072	dma_addr_t addr[m->nb_segs];
1073	unsigned int flits, ndesc, cflits;
1074	int l3hdr_len, l4hdr_len, eth_xtra_len;
1075	int len, last_desc;
1076	int credits;
1077	u32 wr_mid;
1078	u64 cntrl, *end;
1079	bool v6;
1080	u32 max_pkt_len = txq->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
1081
1082	/* Reject xmit if queue is stopped */
1083	if (unlikely(txq->flags & EQ_STOPPED))
1084		return -(EBUSY);
1085
1086	/*
1087	 * The chip min packet length is 10 octets but play safe and reject
1088	 * anything shorter than an Ethernet header.
1089	 */
1090	if (unlikely(m->pkt_len < ETHER_HDR_LEN)) {
1091out_free:
1092		rte_pktmbuf_free(m);
1093		return 0;
1094	}
1095
1096	if ((!(m->ol_flags & PKT_TX_TCP_SEG)) &&
1097	    (unlikely(m->pkt_len > max_pkt_len)))
1098		goto out_free;
1099
1100	pi = (struct port_info *)txq->eth_dev->data->dev_private;
1101	adap = pi->adapter;
1102
1103	cntrl = F_TXPKT_L4CSUM_DIS | F_TXPKT_IPCSUM_DIS;
1104	/* align the end of coalesce WR to a 512 byte boundary */
1105	txq->q.coalesce.max = (8 - (txq->q.pidx & 7)) * 8;
1106
1107	if (!((m->ol_flags & PKT_TX_TCP_SEG) || (m->pkt_len > ETHER_MAX_LEN))) {
1108		if (should_tx_packet_coalesce(txq, mbuf, &cflits, adap)) {
1109			if (unlikely(map_mbuf(mbuf, addr) < 0)) {
1110				dev_warn(adap, "%s: mapping err for coalesce\n",
1111					 __func__);
1112				txq->stats.mapping_err++;
1113				goto out_free;
1114			}
1115			rte_prefetch0((volatile void *)addr);
1116			return tx_do_packet_coalesce(txq, mbuf, cflits, adap,
1117						     pi, addr);
1118		} else {
1119			return -EBUSY;
1120		}
1121	}
1122
1123	if (txq->q.coalesce.idx)
1124		ship_tx_pkt_coalesce_wr(adap, txq);
1125
1126	flits = calc_tx_flits(m);
1127	ndesc = flits_to_desc(flits);
1128	credits = txq_avail(&txq->q) - ndesc;
1129
1130	if (unlikely(credits < 0)) {
1131		dev_debug(adap, "%s: Tx ring %u full; credits = %d\n",
1132			  __func__, txq->q.cntxt_id, credits);
1133		return -EBUSY;
1134	}
1135
1136	if (unlikely(map_mbuf(m, addr) < 0)) {
1137		txq->stats.mapping_err++;
1138		goto out_free;
1139	}
1140
1141	wr_mid = V_FW_WR_LEN16(DIV_ROUND_UP(flits, 2));
1142	if (Q_IDXDIFF(&txq->q, equeidx)  >= 64) {
1143		txq->q.equeidx = txq->q.pidx;
1144		wr_mid |= F_FW_WR_EQUEQ;
1145	}
1146
1147	wr = (void *)&txq->q.desc[txq->q.pidx];
1148	wr->equiq_to_len16 = htonl(wr_mid);
1149	wr->r3 = rte_cpu_to_be_64(0);
1150	end = (u64 *)wr + flits;
1151
1152	len = 0;
1153	len += sizeof(*cpl);
1154
1155	/* Coalescing skipped and we send through normal path */
1156	if (!(m->ol_flags & PKT_TX_TCP_SEG)) {
1157		wr->op_immdlen = htonl(V_FW_WR_OP(FW_ETH_TX_PKT_WR) |
1158				       V_FW_WR_IMMDLEN(len));
1159		cpl = (void *)(wr + 1);
1160		if (m->ol_flags & PKT_TX_IP_CKSUM) {
1161			cntrl = hwcsum(adap->params.chip, m) |
1162				F_TXPKT_IPCSUM_DIS;
1163			txq->stats.tx_cso++;
1164		}
1165	} else {
1166		lso = (void *)(wr + 1);
1167		v6 = (m->ol_flags & PKT_TX_IPV6) != 0;
1168		l3hdr_len = m->l3_len;
1169		l4hdr_len = m->l4_len;
1170		eth_xtra_len = m->l2_len - ETHER_HDR_LEN;
1171		len += sizeof(*lso);
1172		wr->op_immdlen = htonl(V_FW_WR_OP(FW_ETH_TX_PKT_WR) |
1173				       V_FW_WR_IMMDLEN(len));
1174		lso->lso_ctrl = htonl(V_LSO_OPCODE(CPL_TX_PKT_LSO) |
1175				      F_LSO_FIRST_SLICE | F_LSO_LAST_SLICE |
1176				      V_LSO_IPV6(v6) |
1177				      V_LSO_ETHHDR_LEN(eth_xtra_len / 4) |
1178				      V_LSO_IPHDR_LEN(l3hdr_len / 4) |
1179				      V_LSO_TCPHDR_LEN(l4hdr_len / 4));
1180		lso->ipid_ofst = htons(0);
1181		lso->mss = htons(m->tso_segsz);
1182		lso->seqno_offset = htonl(0);
1183		if (is_t4(adap->params.chip))
1184			lso->len = htonl(m->pkt_len);
1185		else
1186			lso->len = htonl(V_LSO_T5_XFER_SIZE(m->pkt_len));
1187		cpl = (void *)(lso + 1);
1188		cntrl = V_TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
1189			V_TXPKT_IPHDR_LEN(l3hdr_len) |
1190			V_TXPKT_ETHHDR_LEN(eth_xtra_len);
1191		txq->stats.tso++;
1192		txq->stats.tx_cso += m->tso_segsz;
1193	}
1194
1195	if (m->ol_flags & PKT_TX_VLAN_PKT) {
1196		txq->stats.vlan_ins++;
1197		cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m->vlan_tci);
1198	}
1199
1200	cpl->ctrl0 = htonl(V_TXPKT_OPCODE(CPL_TX_PKT_XT) |
1201			   V_TXPKT_INTF(pi->tx_chan) |
1202			   V_TXPKT_PF(adap->pf));
1203	cpl->pack = htons(0);
1204	cpl->len = htons(m->pkt_len);
1205	cpl->ctrl1 = cpu_to_be64(cntrl);
1206
1207	txq->stats.pkts++;
1208	txq->stats.tx_bytes += m->pkt_len;
1209	last_desc = txq->q.pidx + ndesc - 1;
1210	if (last_desc >= (int)txq->q.size)
1211		last_desc -= txq->q.size;
1212
1213	d = &txq->q.sdesc[last_desc];
1214	if (d->coalesce.idx) {
1215		int i;
1216
1217		for (i = 0; i < d->coalesce.idx; i++) {
1218			rte_pktmbuf_free(d->coalesce.mbuf[i]);
1219			d->coalesce.mbuf[i] = NULL;
1220		}
1221		d->coalesce.idx = 0;
1222	}
1223	write_sgl(m, &txq->q, (struct ulptx_sgl *)(cpl + 1), end, 0,
1224		  addr);
1225	txq->q.sdesc[last_desc].mbuf = m;
1226	txq->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)(cpl + 1);
1227	txq_advance(&txq->q, ndesc);
1228	ring_tx_db(adap, &txq->q);
1229	return 0;
1230}
1231
1232/**
1233 * alloc_ring - allocate resources for an SGE descriptor ring
1234 * @dev: the PCI device's core device
1235 * @nelem: the number of descriptors
1236 * @elem_size: the size of each descriptor
1237 * @sw_size: the size of the SW state associated with each ring element
1238 * @phys: the physical address of the allocated ring
1239 * @metadata: address of the array holding the SW state for the ring
1240 * @stat_size: extra space in HW ring for status information
1241 * @node: preferred node for memory allocations
1242 *
1243 * Allocates resources for an SGE descriptor ring, such as Tx queues,
1244 * free buffer lists, or response queues.  Each SGE ring requires
1245 * space for its HW descriptors plus, optionally, space for the SW state
1246 * associated with each HW entry (the metadata).  The function returns
1247 * three values: the virtual address for the HW ring (the return value
1248 * of the function), the bus address of the HW ring, and the address
1249 * of the SW ring.
1250 */
1251static void *alloc_ring(size_t nelem, size_t elem_size,
1252			size_t sw_size, dma_addr_t *phys, void *metadata,
1253			size_t stat_size, __rte_unused uint16_t queue_id,
1254			int socket_id, const char *z_name,
1255			const char *z_name_sw)
1256{
1257	size_t len = CXGBE_MAX_RING_DESC_SIZE * elem_size + stat_size;
1258	const struct rte_memzone *tz;
1259	void *s = NULL;
1260
1261	dev_debug(adapter, "%s: nelem = %zu; elem_size = %zu; sw_size = %zu; "
1262		  "stat_size = %zu; queue_id = %u; socket_id = %d; z_name = %s;"
1263		  " z_name_sw = %s\n", __func__, nelem, elem_size, sw_size,
1264		  stat_size, queue_id, socket_id, z_name, z_name_sw);
1265
1266	tz = rte_memzone_lookup(z_name);
1267	if (tz) {
1268		dev_debug(adapter, "%s: tz exists...returning existing..\n",
1269			  __func__);
1270		goto alloc_sw_ring;
1271	}
1272
1273	/*
1274	 * Allocate TX/RX ring hardware descriptors. A memzone large enough to
1275	 * handle the maximum ring size is allocated in order to allow for
1276	 * resizing in later calls to the queue setup function.
1277	 */
1278	tz = rte_memzone_reserve_aligned(z_name, len, socket_id, 0, 4096);
1279	if (!tz)
1280		return NULL;
1281
1282alloc_sw_ring:
1283	memset(tz->addr, 0, len);
1284	if (sw_size) {
1285		s = rte_zmalloc_socket(z_name_sw, nelem * sw_size,
1286				       RTE_CACHE_LINE_SIZE, socket_id);
1287
1288		if (!s) {
1289			dev_err(adapter, "%s: failed to get sw_ring memory\n",
1290				__func__);
1291			return NULL;
1292		}
1293	}
1294	if (metadata)
1295		*(void **)metadata = s;
1296
1297	*phys = (uint64_t)tz->phys_addr;
1298	return tz->addr;
1299}
1300
1301/**
1302 * t4_pktgl_to_mbuf_usembufs - build an mbuf from a packet gather list
1303 * @gl: the gather list
1304 *
1305 * Builds an mbuf from the given packet gather list.  Returns the mbuf or
1306 * %NULL if mbuf allocation failed.
1307 */
1308static struct rte_mbuf *t4_pktgl_to_mbuf_usembufs(const struct pkt_gl *gl)
1309{
1310	/*
1311	 * If there's only one mbuf fragment, just return that.
1312	 */
1313	if (likely(gl->nfrags == 1))
1314		return gl->mbufs[0];
1315
1316	return NULL;
1317}
1318
1319/**
1320 * t4_pktgl_to_mbuf - build an mbuf from a packet gather list
1321 * @gl: the gather list
1322 *
1323 * Builds an mbuf from the given packet gather list.  Returns the mbuf or
1324 * %NULL if mbuf allocation failed.
1325 */
1326static struct rte_mbuf *t4_pktgl_to_mbuf(const struct pkt_gl *gl)
1327{
1328	return t4_pktgl_to_mbuf_usembufs(gl);
1329}
1330
1331/**
1332 * t4_ethrx_handler - process an ingress ethernet packet
1333 * @q: the response queue that received the packet
1334 * @rsp: the response queue descriptor holding the RX_PKT message
1335 * @si: the gather list of packet fragments
1336 *
1337 * Process an ingress ethernet packet and deliver it to the stack.
1338 */
1339int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
1340		     const struct pkt_gl *si)
1341{
1342	struct rte_mbuf *mbuf;
1343	const struct cpl_rx_pkt *pkt;
1344	const struct rss_header *rss_hdr;
1345	bool csum_ok;
1346	struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
1347
1348	rss_hdr = (const void *)rsp;
1349	pkt = (const void *)&rsp[1];
1350	csum_ok = pkt->csum_calc && !pkt->err_vec;
1351
1352	mbuf = t4_pktgl_to_mbuf(si);
1353	if (unlikely(!mbuf)) {
1354		rxq->stats.rx_drops++;
1355		return 0;
1356	}
1357
1358	mbuf->port = pkt->iff;
1359	if (pkt->l2info & htonl(F_RXF_IP)) {
1360		mbuf->packet_type = RTE_PTYPE_L3_IPV4;
1361		if (unlikely(!csum_ok))
1362			mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
1363
1364		if ((pkt->l2info & htonl(F_RXF_UDP | F_RXF_TCP)) && !csum_ok)
1365			mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
1366	} else if (pkt->l2info & htonl(F_RXF_IP6)) {
1367		mbuf->packet_type = RTE_PTYPE_L3_IPV6;
1368	}
1369
1370	mbuf->port = pkt->iff;
1371
1372	if (!rss_hdr->filter_tid && rss_hdr->hash_type) {
1373		mbuf->ol_flags |= PKT_RX_RSS_HASH;
1374		mbuf->hash.rss = ntohl(rss_hdr->hash_val);
1375	}
1376
1377	if (pkt->vlan_ex) {
1378		mbuf->ol_flags |= PKT_RX_VLAN_PKT;
1379		mbuf->vlan_tci = ntohs(pkt->vlan);
1380	}
1381	rxq->stats.pkts++;
1382	rxq->stats.rx_bytes += mbuf->pkt_len;
1383
1384	return 0;
1385}
1386
1387/**
1388 * is_new_response - check if a response is newly written
1389 * @r: the response descriptor
1390 * @q: the response queue
1391 *
1392 * Returns true if a response descriptor contains a yet unprocessed
1393 * response.
1394 */
1395static inline bool is_new_response(const struct rsp_ctrl *r,
1396				   const struct sge_rspq *q)
1397{
1398	return (r->u.type_gen >> S_RSPD_GEN) == q->gen;
1399}
1400
1401#define CXGB4_MSG_AN ((void *)1)
1402
1403/**
1404 * rspq_next - advance to the next entry in a response queue
1405 * @q: the queue
1406 *
1407 * Updates the state of a response queue to advance it to the next entry.
1408 */
1409static inline void rspq_next(struct sge_rspq *q)
1410{
1411	q->cur_desc = (const __be64 *)((const char *)q->cur_desc + q->iqe_len);
1412	if (unlikely(++q->cidx == q->size)) {
1413		q->cidx = 0;
1414		q->gen ^= 1;
1415		q->cur_desc = q->desc;
1416	}
1417}
1418
1419/**
1420 * process_responses - process responses from an SGE response queue
1421 * @q: the ingress queue to process
1422 * @budget: how many responses can be processed in this round
1423 * @rx_pkts: mbuf to put the pkts
1424 *
1425 * Process responses from an SGE response queue up to the supplied budget.
1426 * Responses include received packets as well as control messages from FW
1427 * or HW.
1428 *
1429 * Additionally choose the interrupt holdoff time for the next interrupt
1430 * on this queue.  If the system is under memory shortage use a fairly
1431 * long delay to help recovery.
1432 */
1433static int process_responses(struct sge_rspq *q, int budget,
1434			     struct rte_mbuf **rx_pkts)
1435{
1436	int ret = 0, rsp_type;
1437	int budget_left = budget;
1438	const struct rsp_ctrl *rc;
1439	struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
1440
1441	while (likely(budget_left)) {
1442		rc = (const struct rsp_ctrl *)
1443		     ((const char *)q->cur_desc + (q->iqe_len - sizeof(*rc)));
1444
1445		if (!is_new_response(rc, q))
1446			break;
1447
1448		/*
1449		 * Ensure response has been read
1450		 */
1451		rmb();
1452		rsp_type = G_RSPD_TYPE(rc->u.type_gen);
1453
1454		if (likely(rsp_type == X_RSPD_TYPE_FLBUF)) {
1455			const struct rx_sw_desc *rsd =
1456						&rxq->fl.sdesc[rxq->fl.cidx];
1457			const struct rss_header *rss_hdr =
1458						(const void *)q->cur_desc;
1459			const struct cpl_rx_pkt *cpl =
1460						(const void *)&q->cur_desc[1];
1461			bool csum_ok = cpl->csum_calc && !cpl->err_vec;
1462			struct rte_mbuf *pkt, *npkt;
1463			u32 len, bufsz;
1464
1465			len = ntohl(rc->pldbuflen_qid);
1466			BUG_ON(!(len & F_RSPD_NEWBUF));
1467			pkt = rsd->buf;
1468			npkt = pkt;
1469			len = G_RSPD_LEN(len);
1470			pkt->pkt_len = len;
1471
1472			/* Chain mbufs into len if necessary */
1473			while (len) {
1474				struct rte_mbuf *new_pkt = rsd->buf;
1475
1476				bufsz = min(get_buf_size(q->adapter, rsd), len);
1477				new_pkt->data_len = bufsz;
1478				unmap_rx_buf(&rxq->fl);
1479				len -= bufsz;
1480				npkt->next = new_pkt;
1481				npkt = new_pkt;
1482				pkt->nb_segs++;
1483				rsd = &rxq->fl.sdesc[rxq->fl.cidx];
1484			}
1485			npkt->next = NULL;
1486			pkt->nb_segs--;
1487
1488			if (cpl->l2info & htonl(F_RXF_IP)) {
1489				pkt->packet_type = RTE_PTYPE_L3_IPV4;
1490				if (unlikely(!csum_ok))
1491					pkt->ol_flags |= PKT_RX_IP_CKSUM_BAD;
1492
1493				if ((cpl->l2info &
1494				     htonl(F_RXF_UDP | F_RXF_TCP)) && !csum_ok)
1495					pkt->ol_flags |= PKT_RX_L4_CKSUM_BAD;
1496			} else if (cpl->l2info & htonl(F_RXF_IP6)) {
1497				pkt->packet_type = RTE_PTYPE_L3_IPV6;
1498			}
1499
1500			if (!rss_hdr->filter_tid && rss_hdr->hash_type) {
1501				pkt->ol_flags |= PKT_RX_RSS_HASH;
1502				pkt->hash.rss = ntohl(rss_hdr->hash_val);
1503			}
1504
1505			if (cpl->vlan_ex) {
1506				pkt->ol_flags |= PKT_RX_VLAN_PKT;
1507				pkt->vlan_tci = ntohs(cpl->vlan);
1508			}
1509			rxq->stats.pkts++;
1510			rxq->stats.rx_bytes += pkt->pkt_len;
1511			rx_pkts[budget - budget_left] = pkt;
1512		} else if (likely(rsp_type == X_RSPD_TYPE_CPL)) {
1513			ret = q->handler(q, q->cur_desc, NULL);
1514		} else {
1515			ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN);
1516		}
1517
1518		if (unlikely(ret)) {
1519			/* couldn't process descriptor, back off for recovery */
1520			q->next_intr_params = V_QINTR_TIMER_IDX(NOMEM_TMR_IDX);
1521			break;
1522		}
1523
1524		rspq_next(q);
1525		budget_left--;
1526
1527		if (R_IDXDIFF(q, gts_idx) >= 64) {
1528			unsigned int cidx_inc = R_IDXDIFF(q, gts_idx);
1529			unsigned int params;
1530			u32 val;
1531
1532			if (fl_cap(&rxq->fl) - rxq->fl.avail >= 64)
1533				__refill_fl(q->adapter, &rxq->fl);
1534			params = V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX);
1535			q->next_intr_params = params;
1536			val = V_CIDXINC(cidx_inc) | V_SEINTARM(params);
1537
1538			if (unlikely(!q->bar2_addr))
1539				t4_write_reg(q->adapter, MYPF_REG(A_SGE_PF_GTS),
1540					     val |
1541					     V_INGRESSQID((u32)q->cntxt_id));
1542			else {
1543				writel(val | V_INGRESSQID(q->bar2_qid),
1544				       (void *)((uintptr_t)q->bar2_addr +
1545				       SGE_UDB_GTS));
1546				/*
1547				 * This Write memory Barrier will force the
1548				 * write to the User Doorbell area to be
1549				 * flushed.
1550				 */
1551				wmb();
1552			}
1553			q->gts_idx = q->cidx;
1554		}
1555	}
1556
1557	/*
1558	 * If this is a Response Queue with an associated Free List and
1559	 * there's room for another chunk of new Free List buffer pointers,
1560	 * refill the Free List.
1561	 */
1562
1563	if (q->offset >= 0 && fl_cap(&rxq->fl) - rxq->fl.avail >= 64)
1564		__refill_fl(q->adapter, &rxq->fl);
1565
1566	return budget - budget_left;
1567}
1568
1569int cxgbe_poll(struct sge_rspq *q, struct rte_mbuf **rx_pkts,
1570	       unsigned int budget, unsigned int *work_done)
1571{
1572	int err = 0;
1573
1574	*work_done = process_responses(q, budget, rx_pkts);
1575	return err;
1576}
1577
1578/**
1579 * bar2_address - return the BAR2 address for an SGE Queue's Registers
1580 * @adapter: the adapter
1581 * @qid: the SGE Queue ID
1582 * @qtype: the SGE Queue Type (Egress or Ingress)
1583 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
1584 *
1585 * Returns the BAR2 address for the SGE Queue Registers associated with
1586 * @qid.  If BAR2 SGE Registers aren't available, returns NULL.  Also
1587 * returns the BAR2 Queue ID to be used with writes to the BAR2 SGE
1588 * Queue Registers.  If the BAR2 Queue ID is 0, then "Inferred Queue ID"
1589 * Registers are supported (e.g. the Write Combining Doorbell Buffer).
1590 */
1591static void __iomem *bar2_address(struct adapter *adapter, unsigned int qid,
1592				  enum t4_bar2_qtype qtype,
1593				  unsigned int *pbar2_qid)
1594{
1595	u64 bar2_qoffset;
1596	int ret;
1597
1598	ret = t4_bar2_sge_qregs(adapter, qid, qtype, &bar2_qoffset, pbar2_qid);
1599	if (ret)
1600		return NULL;
1601
1602	return adapter->bar2 + bar2_qoffset;
1603}
1604
1605int t4_sge_eth_rxq_start(struct adapter *adap, struct sge_rspq *rq)
1606{
1607	struct sge_eth_rxq *rxq = container_of(rq, struct sge_eth_rxq, rspq);
1608	unsigned int fl_id = rxq->fl.size ? rxq->fl.cntxt_id : 0xffff;
1609
1610	return t4_iq_start_stop(adap, adap->mbox, true, adap->pf, 0,
1611				rq->cntxt_id, fl_id, 0xffff);
1612}
1613
1614int t4_sge_eth_rxq_stop(struct adapter *adap, struct sge_rspq *rq)
1615{
1616	struct sge_eth_rxq *rxq = container_of(rq, struct sge_eth_rxq, rspq);
1617	unsigned int fl_id = rxq->fl.size ? rxq->fl.cntxt_id : 0xffff;
1618
1619	return t4_iq_start_stop(adap, adap->mbox, false, adap->pf, 0,
1620				rq->cntxt_id, fl_id, 0xffff);
1621}
1622
1623/*
1624 * @intr_idx: MSI/MSI-X vector if >=0, -(absolute qid + 1) if < 0
1625 * @cong: < 0 -> no congestion feedback, >= 0 -> congestion channel map
1626 */
1627int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
1628		     struct rte_eth_dev *eth_dev, int intr_idx,
1629		     struct sge_fl *fl, rspq_handler_t hnd, int cong,
1630		     struct rte_mempool *mp, int queue_id, int socket_id)
1631{
1632	int ret, flsz = 0;
1633	struct fw_iq_cmd c;
1634	struct sge *s = &adap->sge;
1635	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
1636	char z_name[RTE_MEMZONE_NAMESIZE];
1637	char z_name_sw[RTE_MEMZONE_NAMESIZE];
1638	unsigned int nb_refill;
1639
1640	/* Size needs to be multiple of 16, including status entry. */
1641	iq->size = cxgbe_roundup(iq->size, 16);
1642
1643	snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
1644		 eth_dev->driver->pci_drv.driver.name,
1645		 fwevtq ? "fwq_ring" : "rx_ring",
1646		 eth_dev->data->port_id, queue_id);
1647	snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name);
1648
1649	iq->desc = alloc_ring(iq->size, iq->iqe_len, 0, &iq->phys_addr, NULL, 0,
1650			      queue_id, socket_id, z_name, z_name_sw);
1651	if (!iq->desc)
1652		return -ENOMEM;
1653
1654	memset(&c, 0, sizeof(c));
1655	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
1656			    F_FW_CMD_WRITE | F_FW_CMD_EXEC |
1657			    V_FW_IQ_CMD_PFN(adap->pf) | V_FW_IQ_CMD_VFN(0));
1658	c.alloc_to_len16 = htonl(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART |
1659				 (sizeof(c) / 16));
1660	c.type_to_iqandstindex =
1661		htonl(V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
1662		      V_FW_IQ_CMD_IQASYNCH(fwevtq) |
1663		      V_FW_IQ_CMD_VIID(pi->viid) |
1664		      V_FW_IQ_CMD_IQANDST(intr_idx < 0) |
1665		      V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT) |
1666		      V_FW_IQ_CMD_IQANDSTINDEX(intr_idx >= 0 ? intr_idx :
1667							       -intr_idx - 1));
1668	c.iqdroprss_to_iqesize =
1669		htons(V_FW_IQ_CMD_IQPCIECH(pi->tx_chan) |
1670		      F_FW_IQ_CMD_IQGTSMODE |
1671		      V_FW_IQ_CMD_IQINTCNTTHRESH(iq->pktcnt_idx) |
1672		      V_FW_IQ_CMD_IQESIZE(ilog2(iq->iqe_len) - 4));
1673	c.iqsize = htons(iq->size);
1674	c.iqaddr = cpu_to_be64(iq->phys_addr);
1675	if (cong >= 0)
1676		c.iqns_to_fl0congen = htonl(F_FW_IQ_CMD_IQFLINTCONGEN);
1677
1678	if (fl) {
1679		struct sge_eth_rxq *rxq = container_of(fl, struct sge_eth_rxq,
1680						       fl);
1681		enum chip_type chip = (enum chip_type)CHELSIO_CHIP_VERSION(
1682				adap->params.chip);
1683
1684		/*
1685		 * Allocate the ring for the hardware free list (with space
1686		 * for its status page) along with the associated software
1687		 * descriptor ring.  The free list size needs to be a multiple
1688		 * of the Egress Queue Unit and at least 2 Egress Units larger
1689		 * than the SGE's Egress Congrestion Threshold
1690		 * (fl_starve_thres - 1).
1691		 */
1692		if (fl->size < s->fl_starve_thres - 1 + 2 * 8)
1693			fl->size = s->fl_starve_thres - 1 + 2 * 8;
1694		fl->size = cxgbe_roundup(fl->size, 8);
1695
1696		snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
1697			 eth_dev->driver->pci_drv.driver.name,
1698			 fwevtq ? "fwq_ring" : "fl_ring",
1699			 eth_dev->data->port_id, queue_id);
1700		snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name);
1701
1702		fl->desc = alloc_ring(fl->size, sizeof(__be64),
1703				      sizeof(struct rx_sw_desc),
1704				      &fl->addr, &fl->sdesc, s->stat_len,
1705				      queue_id, socket_id, z_name, z_name_sw);
1706
1707		if (!fl->desc)
1708			goto fl_nomem;
1709
1710		flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc);
1711		c.iqns_to_fl0congen |=
1712			htonl(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE) |
1713			      (unlikely(rxq->usembufs) ?
1714			       0 : F_FW_IQ_CMD_FL0PACKEN) |
1715			      F_FW_IQ_CMD_FL0FETCHRO | F_FW_IQ_CMD_FL0DATARO |
1716			      F_FW_IQ_CMD_FL0PADEN);
1717		if (cong >= 0)
1718			c.iqns_to_fl0congen |=
1719				htonl(V_FW_IQ_CMD_FL0CNGCHMAP(cong) |
1720				      F_FW_IQ_CMD_FL0CONGCIF |
1721				      F_FW_IQ_CMD_FL0CONGEN);
1722
1723		/* In T6, for egress queue type FL there is internal overhead
1724		 * of 16B for header going into FLM module.
1725		 * Hence maximum allowed burst size will be 448 bytes.
1726		 */
1727		c.fl0dcaen_to_fl0cidxfthresh =
1728			htons(V_FW_IQ_CMD_FL0FBMIN(X_FETCHBURSTMIN_128B) |
1729			      V_FW_IQ_CMD_FL0FBMAX((chip <= CHELSIO_T5) ?
1730			      X_FETCHBURSTMAX_512B : X_FETCHBURSTMAX_256B));
1731		c.fl0size = htons(flsz);
1732		c.fl0addr = cpu_to_be64(fl->addr);
1733	}
1734
1735	ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
1736	if (ret)
1737		goto err;
1738
1739	iq->cur_desc = iq->desc;
1740	iq->cidx = 0;
1741	iq->gts_idx = 0;
1742	iq->gen = 1;
1743	iq->next_intr_params = iq->intr_params;
1744	iq->cntxt_id = ntohs(c.iqid);
1745	iq->abs_id = ntohs(c.physiqid);
1746	iq->bar2_addr = bar2_address(adap, iq->cntxt_id, T4_BAR2_QTYPE_INGRESS,
1747				     &iq->bar2_qid);
1748	iq->size--;                           /* subtract status entry */
1749	iq->eth_dev = eth_dev;
1750	iq->handler = hnd;
1751	iq->port_id = pi->port_id;
1752	iq->mb_pool = mp;
1753
1754	/* set offset to -1 to distinguish ingress queues without FL */
1755	iq->offset = fl ? 0 : -1;
1756
1757	if (fl) {
1758		fl->cntxt_id = ntohs(c.fl0id);
1759		fl->avail = 0;
1760		fl->pend_cred = 0;
1761		fl->pidx = 0;
1762		fl->cidx = 0;
1763		fl->alloc_failed = 0;
1764
1765		/*
1766		 * Note, we must initialize the BAR2 Free List User Doorbell
1767		 * information before refilling the Free List!
1768		 */
1769		fl->bar2_addr = bar2_address(adap, fl->cntxt_id,
1770					     T4_BAR2_QTYPE_EGRESS,
1771					     &fl->bar2_qid);
1772
1773		nb_refill = refill_fl(adap, fl, fl_cap(fl));
1774		if (nb_refill != fl_cap(fl)) {
1775			ret = -ENOMEM;
1776			dev_err(adap, "%s: mbuf alloc failed with error: %d\n",
1777				__func__, ret);
1778			goto refill_fl_err;
1779		}
1780	}
1781
1782	/*
1783	 * For T5 and later we attempt to set up the Congestion Manager values
1784	 * of the new RX Ethernet Queue.  This should really be handled by
1785	 * firmware because it's more complex than any host driver wants to
1786	 * get involved with and it's different per chip and this is almost
1787	 * certainly wrong.  Formware would be wrong as well, but it would be
1788	 * a lot easier to fix in one place ...  For now we do something very
1789	 * simple (and hopefully less wrong).
1790	 */
1791	if (!is_t4(adap->params.chip) && cong >= 0) {
1792		u32 param, val;
1793		int i;
1794
1795		param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
1796			 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) |
1797			 V_FW_PARAMS_PARAM_YZ(iq->cntxt_id));
1798		if (cong == 0) {
1799			val = V_CONMCTXT_CNGTPMODE(X_CONMCTXT_CNGTPMODE_QUEUE);
1800		} else {
1801			val = V_CONMCTXT_CNGTPMODE(
1802					X_CONMCTXT_CNGTPMODE_CHANNEL);
1803			for (i = 0; i < 4; i++) {
1804				if (cong & (1 << i))
1805					val |= V_CONMCTXT_CNGCHMAP(1 <<
1806								   (i << 2));
1807			}
1808		}
1809		ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
1810				    &param, &val);
1811		if (ret)
1812			dev_warn(adap->pdev_dev, "Failed to set Congestion Manager Context for Ingress Queue %d: %d\n",
1813				 iq->cntxt_id, -ret);
1814	}
1815
1816	return 0;
1817
1818refill_fl_err:
1819	t4_iq_free(adap, adap->mbox, adap->pf, 0, FW_IQ_TYPE_FL_INT_CAP,
1820		   iq->cntxt_id, fl->cntxt_id, 0xffff);
1821fl_nomem:
1822	ret = -ENOMEM;
1823err:
1824	iq->cntxt_id = 0;
1825	iq->abs_id = 0;
1826	if (iq->desc)
1827		iq->desc = NULL;
1828
1829	if (fl && fl->desc) {
1830		rte_free(fl->sdesc);
1831		fl->cntxt_id = 0;
1832		fl->sdesc = NULL;
1833		fl->desc = NULL;
1834	}
1835	return ret;
1836}
1837
1838static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
1839{
1840	q->cntxt_id = id;
1841	q->bar2_addr = bar2_address(adap, q->cntxt_id, T4_BAR2_QTYPE_EGRESS,
1842				    &q->bar2_qid);
1843	q->cidx = 0;
1844	q->pidx = 0;
1845	q->dbidx = 0;
1846	q->in_use = 0;
1847	q->equeidx = 0;
1848	q->coalesce.idx = 0;
1849	q->coalesce.len = 0;
1850	q->coalesce.flits = 0;
1851	q->last_coal_idx = 0;
1852	q->last_pidx = 0;
1853	q->stat = (void *)&q->desc[q->size];
1854}
1855
1856int t4_sge_eth_txq_start(struct sge_eth_txq *txq)
1857{
1858	/*
1859	 *  TODO: For flow-control, queue may be stopped waiting to reclaim
1860	 *  credits.
1861	 *  Ensure queue is in EQ_STOPPED state before starting it.
1862	 */
1863	if (!(txq->flags & EQ_STOPPED))
1864		return -(EBUSY);
1865
1866	txq->flags &= ~EQ_STOPPED;
1867
1868	return 0;
1869}
1870
1871int t4_sge_eth_txq_stop(struct sge_eth_txq *txq)
1872{
1873	txq->flags |= EQ_STOPPED;
1874
1875	return 0;
1876}
1877
1878int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
1879			 struct rte_eth_dev *eth_dev, uint16_t queue_id,
1880			 unsigned int iqid, int socket_id)
1881{
1882	int ret, nentries;
1883	struct fw_eq_eth_cmd c;
1884	struct sge *s = &adap->sge;
1885	struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
1886	char z_name[RTE_MEMZONE_NAMESIZE];
1887	char z_name_sw[RTE_MEMZONE_NAMESIZE];
1888
1889	/* Add status entries */
1890	nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
1891
1892	snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
1893		 eth_dev->driver->pci_drv.driver.name, "tx_ring",
1894		 eth_dev->data->port_id, queue_id);
1895	snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name);
1896
1897	txq->q.desc = alloc_ring(txq->q.size, sizeof(struct tx_desc),
1898				 sizeof(struct tx_sw_desc), &txq->q.phys_addr,
1899				 &txq->q.sdesc, s->stat_len, queue_id,
1900				 socket_id, z_name, z_name_sw);
1901	if (!txq->q.desc)
1902		return -ENOMEM;
1903
1904	memset(&c, 0, sizeof(c));
1905	c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST |
1906			    F_FW_CMD_WRITE | F_FW_CMD_EXEC |
1907			    V_FW_EQ_ETH_CMD_PFN(adap->pf) |
1908			    V_FW_EQ_ETH_CMD_VFN(0));
1909	c.alloc_to_len16 = htonl(F_FW_EQ_ETH_CMD_ALLOC |
1910				 F_FW_EQ_ETH_CMD_EQSTART | (sizeof(c) / 16));
1911	c.autoequiqe_to_viid = htonl(F_FW_EQ_ETH_CMD_AUTOEQUEQE |
1912				     V_FW_EQ_ETH_CMD_VIID(pi->viid));
1913	c.fetchszm_to_iqid =
1914		htonl(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) |
1915		      V_FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) |
1916		      F_FW_EQ_ETH_CMD_FETCHRO | V_FW_EQ_ETH_CMD_IQID(iqid));
1917	c.dcaen_to_eqsize =
1918		htonl(V_FW_EQ_ETH_CMD_FBMIN(X_FETCHBURSTMIN_64B) |
1919		      V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
1920		      V_FW_EQ_ETH_CMD_EQSIZE(nentries));
1921	c.eqaddr = rte_cpu_to_be_64(txq->q.phys_addr);
1922
1923	ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
1924	if (ret) {
1925		rte_free(txq->q.sdesc);
1926		txq->q.sdesc = NULL;
1927		txq->q.desc = NULL;
1928		return ret;
1929	}
1930
1931	init_txq(adap, &txq->q, G_FW_EQ_ETH_CMD_EQID(ntohl(c.eqid_pkd)));
1932	txq->stats.tso = 0;
1933	txq->stats.pkts = 0;
1934	txq->stats.tx_cso = 0;
1935	txq->stats.coal_wr = 0;
1936	txq->stats.vlan_ins = 0;
1937	txq->stats.tx_bytes = 0;
1938	txq->stats.coal_pkts = 0;
1939	txq->stats.mapping_err = 0;
1940	txq->flags |= EQ_STOPPED;
1941	txq->eth_dev = eth_dev;
1942	t4_os_lock_init(&txq->txq_lock);
1943	return 0;
1944}
1945
1946static void free_txq(struct sge_txq *q)
1947{
1948	q->cntxt_id = 0;
1949	q->sdesc = NULL;
1950	q->desc = NULL;
1951}
1952
1953static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
1954			 struct sge_fl *fl)
1955{
1956	unsigned int fl_id = fl ? fl->cntxt_id : 0xffff;
1957
1958	t4_iq_free(adap, adap->mbox, adap->pf, 0, FW_IQ_TYPE_FL_INT_CAP,
1959		   rq->cntxt_id, fl_id, 0xffff);
1960	rq->cntxt_id = 0;
1961	rq->abs_id = 0;
1962	rq->desc = NULL;
1963
1964	if (fl) {
1965		free_rx_bufs(fl, fl->avail);
1966		rte_free(fl->sdesc);
1967		fl->sdesc = NULL;
1968		fl->cntxt_id = 0;
1969		fl->desc = NULL;
1970	}
1971}
1972
1973/*
1974 * Clear all queues of the port
1975 *
1976 * Note:  This function must only be called after rx and tx path
1977 * of the port have been disabled.
1978 */
1979void t4_sge_eth_clear_queues(struct port_info *pi)
1980{
1981	int i;
1982	struct adapter *adap = pi->adapter;
1983	struct sge_eth_rxq *rxq = &adap->sge.ethrxq[pi->first_qset];
1984	struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset];
1985
1986	for (i = 0; i < pi->n_rx_qsets; i++, rxq++) {
1987		if (rxq->rspq.desc)
1988			t4_sge_eth_rxq_stop(adap, &rxq->rspq);
1989	}
1990	for (i = 0; i < pi->n_tx_qsets; i++, txq++) {
1991		if (txq->q.desc) {
1992			struct sge_txq *q = &txq->q;
1993
1994			t4_sge_eth_txq_stop(txq);
1995			reclaim_completed_tx(q);
1996			free_tx_desc(q, q->size);
1997			q->equeidx = q->pidx;
1998		}
1999	}
2000}
2001
2002void t4_sge_eth_rxq_release(struct adapter *adap, struct sge_eth_rxq *rxq)
2003{
2004	if (rxq->rspq.desc) {
2005		t4_sge_eth_rxq_stop(adap, &rxq->rspq);
2006		free_rspq_fl(adap, &rxq->rspq, rxq->fl.size ? &rxq->fl : NULL);
2007	}
2008}
2009
2010void t4_sge_eth_txq_release(struct adapter *adap, struct sge_eth_txq *txq)
2011{
2012	if (txq->q.desc) {
2013		t4_sge_eth_txq_stop(txq);
2014		reclaim_completed_tx(&txq->q);
2015		t4_eth_eq_free(adap, adap->mbox, adap->pf, 0, txq->q.cntxt_id);
2016		free_tx_desc(&txq->q, txq->q.size);
2017		rte_free(txq->q.sdesc);
2018		free_txq(&txq->q);
2019	}
2020}
2021
2022void t4_sge_tx_monitor_start(struct adapter *adap)
2023{
2024	rte_eal_alarm_set(50, tx_timer_cb, (void *)adap);
2025}
2026
2027void t4_sge_tx_monitor_stop(struct adapter *adap)
2028{
2029	rte_eal_alarm_cancel(tx_timer_cb, (void *)adap);
2030}
2031
2032/**
2033 * t4_free_sge_resources - free SGE resources
2034 * @adap: the adapter
2035 *
2036 * Frees resources used by the SGE queue sets.
2037 */
2038void t4_free_sge_resources(struct adapter *adap)
2039{
2040	int i;
2041	struct sge_eth_rxq *rxq = &adap->sge.ethrxq[0];
2042	struct sge_eth_txq *txq = &adap->sge.ethtxq[0];
2043
2044	/* clean up Ethernet Tx/Rx queues */
2045	for (i = 0; i < adap->sge.max_ethqsets; i++, rxq++, txq++) {
2046		/* Free only the queues allocated */
2047		if (rxq->rspq.desc) {
2048			t4_sge_eth_rxq_release(adap, rxq);
2049			rxq->rspq.eth_dev = NULL;
2050		}
2051		if (txq->q.desc) {
2052			t4_sge_eth_txq_release(adap, txq);
2053			txq->eth_dev = NULL;
2054		}
2055	}
2056
2057	if (adap->sge.fw_evtq.desc)
2058		free_rspq_fl(adap, &adap->sge.fw_evtq, NULL);
2059}
2060
2061/**
2062 * t4_sge_init - initialize SGE
2063 * @adap: the adapter
2064 *
2065 * Performs SGE initialization needed every time after a chip reset.
2066 * We do not initialize any of the queues here, instead the driver
2067 * top-level must request those individually.
2068 *
2069 * Called in two different modes:
2070 *
2071 *  1. Perform actual hardware initialization and record hard-coded
2072 *     parameters which were used.  This gets used when we're the
2073 *     Master PF and the Firmware Configuration File support didn't
2074 *     work for some reason.
2075 *
2076 *  2. We're not the Master PF or initialization was performed with
2077 *     a Firmware Configuration File.  In this case we need to grab
2078 *     any of the SGE operating parameters that we need to have in
2079 *     order to do our job and make sure we can live with them ...
2080 */
2081static int t4_sge_init_soft(struct adapter *adap)
2082{
2083	struct sge *s = &adap->sge;
2084	u32 fl_small_pg, fl_large_pg, fl_small_mtu, fl_large_mtu;
2085	u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5;
2086	u32 ingress_rx_threshold;
2087
2088	/*
2089	 * Verify that CPL messages are going to the Ingress Queue for
2090	 * process_responses() and that only packet data is going to the
2091	 * Free Lists.
2092	 */
2093	if ((t4_read_reg(adap, A_SGE_CONTROL) & F_RXPKTCPLMODE) !=
2094	    V_RXPKTCPLMODE(X_RXPKTCPLMODE_SPLIT)) {
2095		dev_err(adap, "bad SGE CPL MODE\n");
2096		return -EINVAL;
2097	}
2098
2099	/*
2100	 * Validate the Host Buffer Register Array indices that we want to
2101	 * use ...
2102	 *
2103	 * XXX Note that we should really read through the Host Buffer Size
2104	 * XXX register array and find the indices of the Buffer Sizes which
2105	 * XXX meet our needs!
2106	 */
2107#define READ_FL_BUF(x) \
2108	t4_read_reg(adap, A_SGE_FL_BUFFER_SIZE0 + (x) * sizeof(u32))
2109
2110	fl_small_pg = READ_FL_BUF(RX_SMALL_PG_BUF);
2111	fl_large_pg = READ_FL_BUF(RX_LARGE_PG_BUF);
2112	fl_small_mtu = READ_FL_BUF(RX_SMALL_MTU_BUF);
2113	fl_large_mtu = READ_FL_BUF(RX_LARGE_MTU_BUF);
2114
2115	/*
2116	 * We only bother using the Large Page logic if the Large Page Buffer
2117	 * is larger than our Page Size Buffer.
2118	 */
2119	if (fl_large_pg <= fl_small_pg)
2120		fl_large_pg = 0;
2121
2122#undef READ_FL_BUF
2123
2124	/*
2125	 * The Page Size Buffer must be exactly equal to our Page Size and the
2126	 * Large Page Size Buffer should be 0 (per above) or a power of 2.
2127	 */
2128	if (fl_small_pg != CXGBE_PAGE_SIZE ||
2129	    (fl_large_pg & (fl_large_pg - 1)) != 0) {
2130		dev_err(adap, "bad SGE FL page buffer sizes [%d, %d]\n",
2131			fl_small_pg, fl_large_pg);
2132		return -EINVAL;
2133	}
2134	if (fl_large_pg)
2135		s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT;
2136
2137	if (adap->use_unpacked_mode) {
2138		int err = 0;
2139
2140		if (fl_small_mtu < FL_MTU_SMALL_BUFSIZE(adap)) {
2141			dev_err(adap, "bad SGE FL small MTU %d\n",
2142				fl_small_mtu);
2143			err = -EINVAL;
2144		}
2145		if (fl_large_mtu < FL_MTU_LARGE_BUFSIZE(adap)) {
2146			dev_err(adap, "bad SGE FL large MTU %d\n",
2147				fl_large_mtu);
2148			err = -EINVAL;
2149		}
2150		if (err)
2151			return err;
2152	}
2153
2154	/*
2155	 * Retrieve our RX interrupt holdoff timer values and counter
2156	 * threshold values from the SGE parameters.
2157	 */
2158	timer_value_0_and_1 = t4_read_reg(adap, A_SGE_TIMER_VALUE_0_AND_1);
2159	timer_value_2_and_3 = t4_read_reg(adap, A_SGE_TIMER_VALUE_2_AND_3);
2160	timer_value_4_and_5 = t4_read_reg(adap, A_SGE_TIMER_VALUE_4_AND_5);
2161	s->timer_val[0] = core_ticks_to_us(adap,
2162					   G_TIMERVALUE0(timer_value_0_and_1));
2163	s->timer_val[1] = core_ticks_to_us(adap,
2164					   G_TIMERVALUE1(timer_value_0_and_1));
2165	s->timer_val[2] = core_ticks_to_us(adap,
2166					   G_TIMERVALUE2(timer_value_2_and_3));
2167	s->timer_val[3] = core_ticks_to_us(adap,
2168					   G_TIMERVALUE3(timer_value_2_and_3));
2169	s->timer_val[4] = core_ticks_to_us(adap,
2170					   G_TIMERVALUE4(timer_value_4_and_5));
2171	s->timer_val[5] = core_ticks_to_us(adap,
2172					   G_TIMERVALUE5(timer_value_4_and_5));
2173
2174	ingress_rx_threshold = t4_read_reg(adap, A_SGE_INGRESS_RX_THRESHOLD);
2175	s->counter_val[0] = G_THRESHOLD_0(ingress_rx_threshold);
2176	s->counter_val[1] = G_THRESHOLD_1(ingress_rx_threshold);
2177	s->counter_val[2] = G_THRESHOLD_2(ingress_rx_threshold);
2178	s->counter_val[3] = G_THRESHOLD_3(ingress_rx_threshold);
2179
2180	return 0;
2181}
2182
2183int t4_sge_init(struct adapter *adap)
2184{
2185	struct sge *s = &adap->sge;
2186	u32 sge_control, sge_control2, sge_conm_ctrl;
2187	unsigned int ingpadboundary, ingpackboundary;
2188	int ret, egress_threshold;
2189
2190	/*
2191	 * Ingress Padding Boundary and Egress Status Page Size are set up by
2192	 * t4_fixup_host_params().
2193	 */
2194	sge_control = t4_read_reg(adap, A_SGE_CONTROL);
2195	s->pktshift = G_PKTSHIFT(sge_control);
2196	s->stat_len = (sge_control & F_EGRSTATUSPAGESIZE) ? 128 : 64;
2197
2198	/*
2199	 * T4 uses a single control field to specify both the PCIe Padding and
2200	 * Packing Boundary.  T5 introduced the ability to specify these
2201	 * separately.  The actual Ingress Packet Data alignment boundary
2202	 * within Packed Buffer Mode is the maximum of these two
2203	 * specifications.
2204	 */
2205	ingpadboundary = 1 << (G_INGPADBOUNDARY(sge_control) +
2206			 X_INGPADBOUNDARY_SHIFT);
2207	s->fl_align = ingpadboundary;
2208
2209	if (!is_t4(adap->params.chip) && !adap->use_unpacked_mode) {
2210		/*
2211		 * T5 has a weird interpretation of one of the PCIe Packing
2212		 * Boundary values.  No idea why ...
2213		 */
2214		sge_control2 = t4_read_reg(adap, A_SGE_CONTROL2);
2215		ingpackboundary = G_INGPACKBOUNDARY(sge_control2);
2216		if (ingpackboundary == X_INGPACKBOUNDARY_16B)
2217			ingpackboundary = 16;
2218		else
2219			ingpackboundary = 1 << (ingpackboundary +
2220					  X_INGPACKBOUNDARY_SHIFT);
2221
2222		s->fl_align = max(ingpadboundary, ingpackboundary);
2223	}
2224
2225	ret = t4_sge_init_soft(adap);
2226	if (ret < 0) {
2227		dev_err(adap, "%s: t4_sge_init_soft failed, error %d\n",
2228			__func__, -ret);
2229		return ret;
2230	}
2231
2232	/*
2233	 * A FL with <= fl_starve_thres buffers is starving and a periodic
2234	 * timer will attempt to refill it.  This needs to be larger than the
2235	 * SGE's Egress Congestion Threshold.  If it isn't, then we can get
2236	 * stuck waiting for new packets while the SGE is waiting for us to
2237	 * give it more Free List entries.  (Note that the SGE's Egress
2238	 * Congestion Threshold is in units of 2 Free List pointers.)  For T4,
2239	 * there was only a single field to control this.  For T5 there's the
2240	 * original field which now only applies to Unpacked Mode Free List
2241	 * buffers and a new field which only applies to Packed Mode Free List
2242	 * buffers.
2243	 */
2244	sge_conm_ctrl = t4_read_reg(adap, A_SGE_CONM_CTRL);
2245	if (is_t4(adap->params.chip) || adap->use_unpacked_mode)
2246		egress_threshold = G_EGRTHRESHOLD(sge_conm_ctrl);
2247	else
2248		egress_threshold = G_EGRTHRESHOLDPACKING(sge_conm_ctrl);
2249	s->fl_starve_thres = 2 * egress_threshold + 1;
2250
2251	return 0;
2252}
2253