nicvf_hw.c revision 3d9b7210
1/*
2 *   BSD LICENSE
3 *
4 *   Copyright (C) Cavium networks Ltd. 2016.
5 *
6 *   Redistribution and use in source and binary forms, with or without
7 *   modification, are permitted provided that the following conditions
8 *   are met:
9 *
10 *     * Redistributions of source code must retain the above copyright
11 *       notice, this list of conditions and the following disclaimer.
12 *     * Redistributions in binary form must reproduce the above copyright
13 *       notice, this list of conditions and the following disclaimer in
14 *       the documentation and/or other materials provided with the
15 *       distribution.
16 *     * Neither the name of Cavium networks nor the names of its
17 *       contributors may be used to endorse or promote products derived
18 *       from this software without specific prior written permission.
19 *
20 *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33#include <unistd.h>
34#include <math.h>
35#include <errno.h>
36#include <stdarg.h>
37#include <stdint.h>
38#include <stdio.h>
39#include <stdlib.h>
40#include <string.h>
41#include <assert.h>
42
43#include "nicvf_plat.h"
44
45struct nicvf_reg_info {
46	uint32_t offset;
47	const char *name;
48};
49
50#define NICVF_REG_POLL_ITER_NR   (10)
51#define NICVF_REG_POLL_DELAY_US  (2000)
52#define NICVF_REG_INFO(reg) {reg, #reg}
53
54static const struct nicvf_reg_info nicvf_reg_tbl[] = {
55	NICVF_REG_INFO(NIC_VF_CFG),
56	NICVF_REG_INFO(NIC_VF_PF_MAILBOX_0_1),
57	NICVF_REG_INFO(NIC_VF_INT),
58	NICVF_REG_INFO(NIC_VF_INT_W1S),
59	NICVF_REG_INFO(NIC_VF_ENA_W1C),
60	NICVF_REG_INFO(NIC_VF_ENA_W1S),
61	NICVF_REG_INFO(NIC_VNIC_RSS_CFG),
62	NICVF_REG_INFO(NIC_VNIC_RQ_GEN_CFG),
63};
64
65static const struct nicvf_reg_info nicvf_multi_reg_tbl[] = {
66	{NIC_VNIC_RSS_KEY_0_4 + 0,  "NIC_VNIC_RSS_KEY_0"},
67	{NIC_VNIC_RSS_KEY_0_4 + 8,  "NIC_VNIC_RSS_KEY_1"},
68	{NIC_VNIC_RSS_KEY_0_4 + 16, "NIC_VNIC_RSS_KEY_2"},
69	{NIC_VNIC_RSS_KEY_0_4 + 24, "NIC_VNIC_RSS_KEY_3"},
70	{NIC_VNIC_RSS_KEY_0_4 + 32, "NIC_VNIC_RSS_KEY_4"},
71	{NIC_VNIC_TX_STAT_0_4 + 0,  "NIC_VNIC_STAT_TX_OCTS"},
72	{NIC_VNIC_TX_STAT_0_4 + 8,  "NIC_VNIC_STAT_TX_UCAST"},
73	{NIC_VNIC_TX_STAT_0_4 + 16,  "NIC_VNIC_STAT_TX_BCAST"},
74	{NIC_VNIC_TX_STAT_0_4 + 24,  "NIC_VNIC_STAT_TX_MCAST"},
75	{NIC_VNIC_TX_STAT_0_4 + 32,  "NIC_VNIC_STAT_TX_DROP"},
76	{NIC_VNIC_RX_STAT_0_13 + 0,  "NIC_VNIC_STAT_RX_OCTS"},
77	{NIC_VNIC_RX_STAT_0_13 + 8,  "NIC_VNIC_STAT_RX_UCAST"},
78	{NIC_VNIC_RX_STAT_0_13 + 16, "NIC_VNIC_STAT_RX_BCAST"},
79	{NIC_VNIC_RX_STAT_0_13 + 24, "NIC_VNIC_STAT_RX_MCAST"},
80	{NIC_VNIC_RX_STAT_0_13 + 32, "NIC_VNIC_STAT_RX_RED"},
81	{NIC_VNIC_RX_STAT_0_13 + 40, "NIC_VNIC_STAT_RX_RED_OCTS"},
82	{NIC_VNIC_RX_STAT_0_13 + 48, "NIC_VNIC_STAT_RX_ORUN"},
83	{NIC_VNIC_RX_STAT_0_13 + 56, "NIC_VNIC_STAT_RX_ORUN_OCTS"},
84	{NIC_VNIC_RX_STAT_0_13 + 64, "NIC_VNIC_STAT_RX_FCS"},
85	{NIC_VNIC_RX_STAT_0_13 + 72, "NIC_VNIC_STAT_RX_L2ERR"},
86	{NIC_VNIC_RX_STAT_0_13 + 80, "NIC_VNIC_STAT_RX_DRP_BCAST"},
87	{NIC_VNIC_RX_STAT_0_13 + 88, "NIC_VNIC_STAT_RX_DRP_MCAST"},
88	{NIC_VNIC_RX_STAT_0_13 + 96, "NIC_VNIC_STAT_RX_DRP_L3BCAST"},
89	{NIC_VNIC_RX_STAT_0_13 + 104, "NIC_VNIC_STAT_RX_DRP_L3MCAST"},
90};
91
92static const struct nicvf_reg_info nicvf_qset_cq_reg_tbl[] = {
93	NICVF_REG_INFO(NIC_QSET_CQ_0_7_CFG),
94	NICVF_REG_INFO(NIC_QSET_CQ_0_7_CFG2),
95	NICVF_REG_INFO(NIC_QSET_CQ_0_7_THRESH),
96	NICVF_REG_INFO(NIC_QSET_CQ_0_7_BASE),
97	NICVF_REG_INFO(NIC_QSET_CQ_0_7_HEAD),
98	NICVF_REG_INFO(NIC_QSET_CQ_0_7_TAIL),
99	NICVF_REG_INFO(NIC_QSET_CQ_0_7_DOOR),
100	NICVF_REG_INFO(NIC_QSET_CQ_0_7_STATUS),
101	NICVF_REG_INFO(NIC_QSET_CQ_0_7_STATUS2),
102	NICVF_REG_INFO(NIC_QSET_CQ_0_7_DEBUG),
103};
104
105static const struct nicvf_reg_info nicvf_qset_rq_reg_tbl[] = {
106	NICVF_REG_INFO(NIC_QSET_RQ_0_7_CFG),
107	NICVF_REG_INFO(NIC_QSET_RQ_0_7_STATUS0),
108	NICVF_REG_INFO(NIC_QSET_RQ_0_7_STATUS1),
109};
110
111static const struct nicvf_reg_info nicvf_qset_sq_reg_tbl[] = {
112	NICVF_REG_INFO(NIC_QSET_SQ_0_7_CFG),
113	NICVF_REG_INFO(NIC_QSET_SQ_0_7_THRESH),
114	NICVF_REG_INFO(NIC_QSET_SQ_0_7_BASE),
115	NICVF_REG_INFO(NIC_QSET_SQ_0_7_HEAD),
116	NICVF_REG_INFO(NIC_QSET_SQ_0_7_TAIL),
117	NICVF_REG_INFO(NIC_QSET_SQ_0_7_DOOR),
118	NICVF_REG_INFO(NIC_QSET_SQ_0_7_STATUS),
119	NICVF_REG_INFO(NIC_QSET_SQ_0_7_DEBUG),
120	NICVF_REG_INFO(NIC_QSET_SQ_0_7_STATUS0),
121	NICVF_REG_INFO(NIC_QSET_SQ_0_7_STATUS1),
122};
123
124static const struct nicvf_reg_info nicvf_qset_rbdr_reg_tbl[] = {
125	NICVF_REG_INFO(NIC_QSET_RBDR_0_1_CFG),
126	NICVF_REG_INFO(NIC_QSET_RBDR_0_1_THRESH),
127	NICVF_REG_INFO(NIC_QSET_RBDR_0_1_BASE),
128	NICVF_REG_INFO(NIC_QSET_RBDR_0_1_HEAD),
129	NICVF_REG_INFO(NIC_QSET_RBDR_0_1_TAIL),
130	NICVF_REG_INFO(NIC_QSET_RBDR_0_1_DOOR),
131	NICVF_REG_INFO(NIC_QSET_RBDR_0_1_STATUS0),
132	NICVF_REG_INFO(NIC_QSET_RBDR_0_1_STATUS1),
133	NICVF_REG_INFO(NIC_QSET_RBDR_0_1_PRFCH_STATUS),
134};
135
136int
137nicvf_base_init(struct nicvf *nic)
138{
139	nic->hwcap = 0;
140	if (nic->subsystem_device_id == 0)
141		return NICVF_ERR_BASE_INIT;
142
143	if (nicvf_hw_version(nic) == PCI_SUB_DEVICE_ID_CN88XX_PASS2_NICVF)
144		nic->hwcap |= NICVF_CAP_TUNNEL_PARSING | NICVF_CAP_CQE_RX2;
145
146	if (nicvf_hw_version(nic) == PCI_SUB_DEVICE_ID_CN81XX_NICVF)
147		nic->hwcap |= NICVF_CAP_TUNNEL_PARSING | NICVF_CAP_CQE_RX2;
148
149	if (nicvf_hw_version(nic) == PCI_SUB_DEVICE_ID_CN83XX_NICVF)
150		nic->hwcap |= NICVF_CAP_TUNNEL_PARSING | NICVF_CAP_CQE_RX2 |
151				NICVF_CAP_DISABLE_APAD;
152
153	return NICVF_OK;
154}
155
156/* dump on stdout if data is NULL */
157int
158nicvf_reg_dump(struct nicvf *nic,  uint64_t *data)
159{
160	uint32_t i, q;
161	bool dump_stdout;
162
163	dump_stdout = data ? 0 : 1;
164
165	for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_reg_tbl); i++)
166		if (dump_stdout)
167			nicvf_log("%24s  = 0x%" PRIx64 "\n",
168				nicvf_reg_tbl[i].name,
169				nicvf_reg_read(nic, nicvf_reg_tbl[i].offset));
170		else
171			*data++ = nicvf_reg_read(nic, nicvf_reg_tbl[i].offset);
172
173	for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_multi_reg_tbl); i++)
174		if (dump_stdout)
175			nicvf_log("%24s  = 0x%" PRIx64 "\n",
176				nicvf_multi_reg_tbl[i].name,
177				nicvf_reg_read(nic,
178					nicvf_multi_reg_tbl[i].offset));
179		else
180			*data++ = nicvf_reg_read(nic,
181					nicvf_multi_reg_tbl[i].offset);
182
183	for (q = 0; q < MAX_CMP_QUEUES_PER_QS; q++)
184		for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_cq_reg_tbl); i++)
185			if (dump_stdout)
186				nicvf_log("%30s(%d)  = 0x%" PRIx64 "\n",
187					nicvf_qset_cq_reg_tbl[i].name, q,
188					nicvf_queue_reg_read(nic,
189					nicvf_qset_cq_reg_tbl[i].offset, q));
190			else
191				*data++ = nicvf_queue_reg_read(nic,
192					nicvf_qset_cq_reg_tbl[i].offset, q);
193
194	for (q = 0; q < MAX_RCV_QUEUES_PER_QS; q++)
195		for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_rq_reg_tbl); i++)
196			if (dump_stdout)
197				nicvf_log("%30s(%d)  = 0x%" PRIx64 "\n",
198					nicvf_qset_rq_reg_tbl[i].name, q,
199					nicvf_queue_reg_read(nic,
200					nicvf_qset_rq_reg_tbl[i].offset, q));
201			else
202				*data++ = nicvf_queue_reg_read(nic,
203					nicvf_qset_rq_reg_tbl[i].offset, q);
204
205	for (q = 0; q < MAX_SND_QUEUES_PER_QS; q++)
206		for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_sq_reg_tbl); i++)
207			if (dump_stdout)
208				nicvf_log("%30s(%d)  = 0x%" PRIx64 "\n",
209					nicvf_qset_sq_reg_tbl[i].name, q,
210					nicvf_queue_reg_read(nic,
211					nicvf_qset_sq_reg_tbl[i].offset, q));
212			else
213				*data++ = nicvf_queue_reg_read(nic,
214					nicvf_qset_sq_reg_tbl[i].offset, q);
215
216	for (q = 0; q < MAX_RCV_BUF_DESC_RINGS_PER_QS; q++)
217		for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_rbdr_reg_tbl); i++)
218			if (dump_stdout)
219				nicvf_log("%30s(%d)  = 0x%" PRIx64 "\n",
220					nicvf_qset_rbdr_reg_tbl[i].name, q,
221					nicvf_queue_reg_read(nic,
222					nicvf_qset_rbdr_reg_tbl[i].offset, q));
223			else
224				*data++ = nicvf_queue_reg_read(nic,
225					nicvf_qset_rbdr_reg_tbl[i].offset, q);
226	return 0;
227}
228
229int
230nicvf_reg_get_count(void)
231{
232	int nr_regs;
233
234	nr_regs = NICVF_ARRAY_SIZE(nicvf_reg_tbl);
235	nr_regs += NICVF_ARRAY_SIZE(nicvf_multi_reg_tbl);
236	nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_cq_reg_tbl) *
237			MAX_CMP_QUEUES_PER_QS;
238	nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_rq_reg_tbl) *
239			MAX_RCV_QUEUES_PER_QS;
240	nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_sq_reg_tbl) *
241			MAX_SND_QUEUES_PER_QS;
242	nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_rbdr_reg_tbl) *
243			MAX_RCV_BUF_DESC_RINGS_PER_QS;
244
245	return nr_regs;
246}
247
248static int
249nicvf_qset_config_internal(struct nicvf *nic, bool enable)
250{
251	int ret;
252	struct pf_qs_cfg pf_qs_cfg = {.value = 0};
253
254	pf_qs_cfg.ena = enable ? 1 : 0;
255	pf_qs_cfg.vnic = nic->vf_id;
256	ret = nicvf_mbox_qset_config(nic, &pf_qs_cfg);
257	return ret ? NICVF_ERR_SET_QS : 0;
258}
259
260/* Requests PF to assign and enable Qset */
261int
262nicvf_qset_config(struct nicvf *nic)
263{
264	/* Enable Qset */
265	return nicvf_qset_config_internal(nic, true);
266}
267
268int
269nicvf_qset_reclaim(struct nicvf *nic)
270{
271	/* Disable Qset */
272	return nicvf_qset_config_internal(nic, false);
273}
274
275static int
276cmpfunc(const void *a, const void *b)
277{
278	return (*(const uint32_t *)a - *(const uint32_t *)b);
279}
280
281static uint32_t
282nicvf_roundup_list(uint32_t val, uint32_t list[], uint32_t entries)
283{
284	uint32_t i;
285
286	qsort(list, entries, sizeof(uint32_t), cmpfunc);
287	for (i = 0; i < entries; i++)
288		if (val <= list[i])
289			break;
290	/* Not in the list */
291	if (i >= entries)
292		return 0;
293	else
294		return list[i];
295}
296
297static void
298nicvf_handle_qset_err_intr(struct nicvf *nic)
299{
300	uint16_t qidx;
301	uint64_t status;
302
303	nicvf_log("%s (VF%d)\n", __func__, nic->vf_id);
304	nicvf_reg_dump(nic, NULL);
305
306	for (qidx = 0; qidx < MAX_CMP_QUEUES_PER_QS; qidx++) {
307		status = nicvf_queue_reg_read(
308				nic, NIC_QSET_CQ_0_7_STATUS, qidx);
309		if (!(status & NICVF_CQ_ERR_MASK))
310			continue;
311
312		if (status & NICVF_CQ_WR_FULL)
313			nicvf_log("[%d]NICVF_CQ_WR_FULL\n", qidx);
314		if (status & NICVF_CQ_WR_DISABLE)
315			nicvf_log("[%d]NICVF_CQ_WR_DISABLE\n", qidx);
316		if (status & NICVF_CQ_WR_FAULT)
317			nicvf_log("[%d]NICVF_CQ_WR_FAULT\n", qidx);
318		nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_STATUS, qidx, 0);
319	}
320
321	for (qidx = 0; qidx < MAX_SND_QUEUES_PER_QS; qidx++) {
322		status = nicvf_queue_reg_read(
323				nic, NIC_QSET_SQ_0_7_STATUS, qidx);
324		if (!(status & NICVF_SQ_ERR_MASK))
325			continue;
326
327		if (status & NICVF_SQ_ERR_STOPPED)
328			nicvf_log("[%d]NICVF_SQ_ERR_STOPPED\n", qidx);
329		if (status & NICVF_SQ_ERR_SEND)
330			nicvf_log("[%d]NICVF_SQ_ERR_SEND\n", qidx);
331		if (status & NICVF_SQ_ERR_DPE)
332			nicvf_log("[%d]NICVF_SQ_ERR_DPE\n", qidx);
333		nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_STATUS, qidx, 0);
334	}
335
336	for (qidx = 0; qidx < MAX_RCV_BUF_DESC_RINGS_PER_QS; qidx++) {
337		status = nicvf_queue_reg_read(nic,
338				NIC_QSET_RBDR_0_1_STATUS0, qidx);
339		status &= NICVF_RBDR_FIFO_STATE_MASK;
340		status >>= NICVF_RBDR_FIFO_STATE_SHIFT;
341
342		if (status == RBDR_FIFO_STATE_FAIL)
343			nicvf_log("[%d]RBDR_FIFO_STATE_FAIL\n", qidx);
344		nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx, 0);
345	}
346
347	nicvf_disable_all_interrupts(nic);
348	abort();
349}
350
351/*
352 * Handle poll mode driver interested "mbox" and "queue-set error" interrupts.
353 * This function is not re-entrant.
354 * The caller should provide proper serialization.
355 */
356int
357nicvf_reg_poll_interrupts(struct nicvf *nic)
358{
359	int msg = 0;
360	uint64_t intr;
361
362	intr = nicvf_reg_read(nic, NIC_VF_INT);
363	if (intr & NICVF_INTR_MBOX_MASK) {
364		nicvf_reg_write(nic, NIC_VF_INT, NICVF_INTR_MBOX_MASK);
365		msg = nicvf_handle_mbx_intr(nic);
366	}
367	if (intr & NICVF_INTR_QS_ERR_MASK) {
368		nicvf_reg_write(nic, NIC_VF_INT, NICVF_INTR_QS_ERR_MASK);
369		nicvf_handle_qset_err_intr(nic);
370	}
371	return msg;
372}
373
374static int
375nicvf_qset_poll_reg(struct nicvf *nic, uint16_t qidx, uint32_t offset,
376		    uint32_t bit_pos, uint32_t bits, uint64_t val)
377{
378	uint64_t bit_mask;
379	uint64_t reg_val;
380	int timeout = NICVF_REG_POLL_ITER_NR;
381
382	bit_mask = (1ULL << bits) - 1;
383	bit_mask = (bit_mask << bit_pos);
384
385	while (timeout) {
386		reg_val = nicvf_queue_reg_read(nic, offset, qidx);
387		if (((reg_val & bit_mask) >> bit_pos) == val)
388			return NICVF_OK;
389		nicvf_delay_us(NICVF_REG_POLL_DELAY_US);
390		timeout--;
391	}
392	return NICVF_ERR_REG_POLL;
393}
394
395int
396nicvf_qset_rbdr_reclaim(struct nicvf *nic, uint16_t qidx)
397{
398	uint64_t status;
399	int timeout = NICVF_REG_POLL_ITER_NR;
400	struct nicvf_rbdr *rbdr = nic->rbdr;
401
402	/* Save head and tail pointers for freeing up buffers */
403	if (rbdr) {
404		rbdr->head = nicvf_queue_reg_read(nic,
405				NIC_QSET_RBDR_0_1_HEAD, qidx) >> 3;
406		rbdr->tail = nicvf_queue_reg_read(nic,
407				NIC_QSET_RBDR_0_1_TAIL,	qidx) >> 3;
408		rbdr->next_tail = rbdr->tail;
409	}
410
411	/* Reset RBDR */
412	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx,
413				NICVF_RBDR_RESET);
414
415	/* Disable RBDR */
416	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0);
417	if (nicvf_qset_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0,
418				62, 2, 0x00))
419		return NICVF_ERR_RBDR_DISABLE;
420
421	while (1) {
422		status = nicvf_queue_reg_read(nic,
423				NIC_QSET_RBDR_0_1_PRFCH_STATUS,	qidx);
424		if ((status & 0xFFFFFFFF) == ((status >> 32) & 0xFFFFFFFF))
425			break;
426		nicvf_delay_us(NICVF_REG_POLL_DELAY_US);
427		timeout--;
428		if (!timeout)
429			return NICVF_ERR_RBDR_PREFETCH;
430	}
431
432	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx,
433			NICVF_RBDR_RESET);
434	if (nicvf_qset_poll_reg(nic, qidx,
435			NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02))
436		return NICVF_ERR_RBDR_RESET1;
437
438	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00);
439	if (nicvf_qset_poll_reg(nic, qidx,
440			NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
441		return NICVF_ERR_RBDR_RESET2;
442
443	return NICVF_OK;
444}
445
446static int
447nicvf_qsize_regbit(uint32_t len, uint32_t len_shift)
448{
449	int val;
450
451	val = ((uint32_t)log2(len) - len_shift);
452	assert(val >= NICVF_QSIZE_MIN_VAL);
453	assert(val <= NICVF_QSIZE_MAX_VAL);
454	return val;
455}
456
457int
458nicvf_qset_rbdr_config(struct nicvf *nic, uint16_t qidx)
459{
460	int ret;
461	uint64_t head, tail;
462	struct nicvf_rbdr *rbdr = nic->rbdr;
463	struct rbdr_cfg rbdr_cfg = {.value = 0};
464
465	ret = nicvf_qset_rbdr_reclaim(nic, qidx);
466	if (ret)
467		return ret;
468
469	/* Set descriptor base address */
470	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE, qidx, rbdr->phys);
471
472	/* Enable RBDR  & set queue size */
473	rbdr_cfg.ena = 1;
474	rbdr_cfg.reset = 0;
475	rbdr_cfg.ldwb = 0;
476	rbdr_cfg.qsize = nicvf_qsize_regbit(rbdr->qlen_mask + 1,
477						RBDR_SIZE_SHIFT);
478	rbdr_cfg.avg_con = 0;
479	rbdr_cfg.lines = rbdr->buffsz / 128;
480
481	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, rbdr_cfg.value);
482
483	/* Verify proper RBDR reset */
484	head = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_HEAD, qidx);
485	tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, qidx);
486
487	if (head | tail)
488		return NICVF_ERR_RBDR_RESET;
489
490	return NICVF_OK;
491}
492
493uint32_t
494nicvf_qsize_rbdr_roundup(uint32_t val)
495{
496	uint32_t list[] = {RBDR_QUEUE_SZ_8K, RBDR_QUEUE_SZ_16K,
497			RBDR_QUEUE_SZ_32K, RBDR_QUEUE_SZ_64K,
498			RBDR_QUEUE_SZ_128K, RBDR_QUEUE_SZ_256K,
499			RBDR_QUEUE_SZ_512K};
500	return nicvf_roundup_list(val, list, NICVF_ARRAY_SIZE(list));
501}
502
503int
504nicvf_qset_rbdr_precharge(void *dev, struct nicvf *nic,
505			  uint16_t ridx, rbdr_pool_get_handler handler,
506			  uint32_t max_buffs)
507{
508	struct rbdr_entry_t *desc, *desc0;
509	struct nicvf_rbdr *rbdr = nic->rbdr;
510	uint32_t count;
511	nicvf_phys_addr_t phy;
512
513	assert(rbdr != NULL);
514	desc = rbdr->desc;
515	count = 0;
516	/* Don't fill beyond max numbers of desc */
517	while (count < rbdr->qlen_mask) {
518		if (count >= max_buffs)
519			break;
520		desc0 = desc + count;
521		phy = handler(dev, nic);
522		if (phy) {
523			desc0->full_addr = phy;
524			count++;
525		} else {
526			break;
527		}
528	}
529	nicvf_smp_wmb();
530	nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, ridx, count);
531	rbdr->tail = nicvf_queue_reg_read(nic,
532				NIC_QSET_RBDR_0_1_TAIL, ridx) >> 3;
533	rbdr->next_tail = rbdr->tail;
534	nicvf_smp_rmb();
535	return 0;
536}
537
538int
539nicvf_qset_rbdr_active(struct nicvf *nic, uint16_t qidx)
540{
541	return nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx);
542}
543
544int
545nicvf_qset_sq_reclaim(struct nicvf *nic, uint16_t qidx)
546{
547	uint64_t head, tail;
548	struct sq_cfg sq_cfg;
549
550	sq_cfg.value = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
551
552	/* Disable send queue */
553	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0);
554
555	/* Check if SQ is stopped */
556	if (sq_cfg.ena && nicvf_qset_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS,
557				NICVF_SQ_STATUS_STOPPED_BIT, 1, 0x01))
558		return NICVF_ERR_SQ_DISABLE;
559
560	/* Reset send queue */
561	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
562	head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4;
563	tail = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, qidx) >> 4;
564	if (head | tail)
565		return  NICVF_ERR_SQ_RESET;
566
567	return 0;
568}
569
570int
571nicvf_qset_sq_config(struct nicvf *nic, uint16_t qidx, struct nicvf_txq *txq)
572{
573	int ret;
574	struct sq_cfg sq_cfg = {.value = 0};
575
576	ret = nicvf_qset_sq_reclaim(nic, qidx);
577	if (ret)
578		return ret;
579
580	/* Send a mailbox msg to PF to config SQ */
581	if (nicvf_mbox_sq_config(nic, qidx))
582		return  NICVF_ERR_SQ_PF_CFG;
583
584	/* Set queue base address */
585	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE, qidx, txq->phys);
586
587	/* Enable send queue  & set queue size */
588	sq_cfg.ena = 1;
589	sq_cfg.reset = 0;
590	sq_cfg.ldwb = 0;
591	sq_cfg.qsize = nicvf_qsize_regbit(txq->qlen_mask + 1, SND_QSIZE_SHIFT);
592	sq_cfg.tstmp_bgx_intf = 0;
593	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg.value);
594
595	/* Ring doorbell so that H/W restarts processing SQEs */
596	nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0);
597
598	return 0;
599}
600
601uint32_t
602nicvf_qsize_sq_roundup(uint32_t val)
603{
604	uint32_t list[] = {SND_QUEUE_SZ_1K, SND_QUEUE_SZ_2K,
605			SND_QUEUE_SZ_4K, SND_QUEUE_SZ_8K,
606			SND_QUEUE_SZ_16K, SND_QUEUE_SZ_32K,
607			SND_QUEUE_SZ_64K};
608	return nicvf_roundup_list(val, list, NICVF_ARRAY_SIZE(list));
609}
610
611int
612nicvf_qset_rq_reclaim(struct nicvf *nic, uint16_t qidx)
613{
614	/* Disable receive queue */
615	nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0);
616	return nicvf_mbox_rq_sync(nic);
617}
618
619int
620nicvf_qset_rq_config(struct nicvf *nic, uint16_t qidx, struct nicvf_rxq *rxq)
621{
622	struct pf_rq_cfg pf_rq_cfg = {.value = 0};
623	struct rq_cfg rq_cfg = {.value = 0};
624
625	if (nicvf_qset_rq_reclaim(nic, qidx))
626		return NICVF_ERR_RQ_CLAIM;
627
628	pf_rq_cfg.strip_pre_l2 = 0;
629	/* First cache line of RBDR data will be allocated into L2C */
630	pf_rq_cfg.caching = RQ_CACHE_ALLOC_FIRST;
631	pf_rq_cfg.cq_qs = nic->vf_id;
632	pf_rq_cfg.cq_idx = qidx;
633	pf_rq_cfg.rbdr_cont_qs = nic->vf_id;
634	pf_rq_cfg.rbdr_cont_idx = 0;
635	pf_rq_cfg.rbdr_strt_qs = nic->vf_id;
636	pf_rq_cfg.rbdr_strt_idx = 0;
637
638	/* Send a mailbox msg to PF to config RQ */
639	if (nicvf_mbox_rq_config(nic, qidx, &pf_rq_cfg))
640		return NICVF_ERR_RQ_PF_CFG;
641
642	/* Select Rx backpressure */
643	if (nicvf_mbox_rq_bp_config(nic, qidx, rxq->rx_drop_en))
644		return NICVF_ERR_RQ_BP_CFG;
645
646	/* Send a mailbox msg to PF to config RQ drop */
647	if (nicvf_mbox_rq_drop_config(nic, qidx, rxq->rx_drop_en))
648		return NICVF_ERR_RQ_DROP_CFG;
649
650	/* Enable Receive queue */
651	rq_cfg.ena = 1;
652	nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, rq_cfg.value);
653
654	return 0;
655}
656
657int
658nicvf_qset_cq_reclaim(struct nicvf *nic, uint16_t qidx)
659{
660	uint64_t tail, head;
661
662	/* Disable completion queue */
663	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0);
664	if (nicvf_qset_poll_reg(nic, qidx, NIC_QSET_CQ_0_7_CFG, 42, 1, 0))
665		return NICVF_ERR_CQ_DISABLE;
666
667	/* Reset completion queue */
668	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
669	tail = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_TAIL, qidx) >> 9;
670	head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, qidx) >> 9;
671	if (head | tail)
672		return  NICVF_ERR_CQ_RESET;
673
674	/* Disable timer threshold (doesn't get reset upon CQ reset) */
675	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0);
676	return 0;
677}
678
679int
680nicvf_qset_cq_config(struct nicvf *nic, uint16_t qidx, struct nicvf_rxq *rxq)
681{
682	int ret;
683	struct cq_cfg cq_cfg = {.value = 0};
684
685	ret = nicvf_qset_cq_reclaim(nic, qidx);
686	if (ret)
687		return ret;
688
689	/* Set completion queue base address */
690	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE, qidx, rxq->phys);
691
692	cq_cfg.ena = 1;
693	cq_cfg.reset = 0;
694	/* Writes of CQE will be allocated into L2C */
695	cq_cfg.caching = 1;
696	cq_cfg.qsize = nicvf_qsize_regbit(rxq->qlen_mask + 1, CMP_QSIZE_SHIFT);
697	cq_cfg.avg_con = 0;
698	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, cq_cfg.value);
699
700	/* Set threshold value for interrupt generation */
701	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, 0);
702	nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0);
703	return 0;
704}
705
706uint32_t
707nicvf_qsize_cq_roundup(uint32_t val)
708{
709	uint32_t list[] = {CMP_QUEUE_SZ_1K, CMP_QUEUE_SZ_2K,
710			CMP_QUEUE_SZ_4K, CMP_QUEUE_SZ_8K,
711			CMP_QUEUE_SZ_16K, CMP_QUEUE_SZ_32K,
712			CMP_QUEUE_SZ_64K};
713	return nicvf_roundup_list(val, list, NICVF_ARRAY_SIZE(list));
714}
715
716
717void
718nicvf_vlan_hw_strip(struct nicvf *nic, bool enable)
719{
720	uint64_t val;
721
722	val = nicvf_reg_read(nic, NIC_VNIC_RQ_GEN_CFG);
723	if (enable)
724		val |= (STRIP_FIRST_VLAN << 25);
725	else
726		val &= ~((STRIP_SECOND_VLAN | STRIP_FIRST_VLAN) << 25);
727
728	nicvf_reg_write(nic, NIC_VNIC_RQ_GEN_CFG, val);
729}
730
731void
732nicvf_apad_config(struct nicvf *nic, bool enable)
733{
734	uint64_t val;
735
736	/* APAD always enabled in this device */
737	if (!(nic->hwcap & NICVF_CAP_DISABLE_APAD))
738		return;
739
740	val = nicvf_reg_read(nic, NIC_VNIC_RQ_GEN_CFG);
741	if (enable)
742		val &= ~(1ULL << NICVF_QS_RQ_DIS_APAD_SHIFT);
743	else
744		val |= (1ULL << NICVF_QS_RQ_DIS_APAD_SHIFT);
745
746	nicvf_reg_write(nic, NIC_VNIC_RQ_GEN_CFG, val);
747}
748
749void
750nicvf_rss_set_key(struct nicvf *nic, uint8_t *key)
751{
752	int idx;
753	uint64_t addr, val;
754	uint64_t *keyptr = (uint64_t *)key;
755
756	addr = NIC_VNIC_RSS_KEY_0_4;
757	for (idx = 0; idx < RSS_HASH_KEY_SIZE; idx++) {
758		val = nicvf_cpu_to_be_64(*keyptr);
759		nicvf_reg_write(nic, addr, val);
760		addr += sizeof(uint64_t);
761		keyptr++;
762	}
763}
764
765void
766nicvf_rss_get_key(struct nicvf *nic, uint8_t *key)
767{
768	int idx;
769	uint64_t addr, val;
770	uint64_t *keyptr = (uint64_t *)key;
771
772	addr = NIC_VNIC_RSS_KEY_0_4;
773	for (idx = 0; idx < RSS_HASH_KEY_SIZE; idx++) {
774		val = nicvf_reg_read(nic, addr);
775		*keyptr = nicvf_be_to_cpu_64(val);
776		addr += sizeof(uint64_t);
777		keyptr++;
778	}
779}
780
781void
782nicvf_rss_set_cfg(struct nicvf *nic, uint64_t val)
783{
784	nicvf_reg_write(nic, NIC_VNIC_RSS_CFG, val);
785}
786
787uint64_t
788nicvf_rss_get_cfg(struct nicvf *nic)
789{
790	return nicvf_reg_read(nic, NIC_VNIC_RSS_CFG);
791}
792
793int
794nicvf_rss_reta_update(struct nicvf *nic, uint8_t *tbl, uint32_t max_count)
795{
796	uint32_t idx;
797	struct nicvf_rss_reta_info *rss = &nic->rss_info;
798
799	/* result will be stored in nic->rss_info.rss_size */
800	if (nicvf_mbox_get_rss_size(nic))
801		return NICVF_ERR_RSS_GET_SZ;
802
803	assert(rss->rss_size > 0);
804	rss->hash_bits = (uint8_t)log2(rss->rss_size);
805	for (idx = 0; idx < rss->rss_size && idx < max_count; idx++)
806		rss->ind_tbl[idx] = tbl[idx];
807
808	if (nicvf_mbox_config_rss(nic))
809		return NICVF_ERR_RSS_TBL_UPDATE;
810
811	return NICVF_OK;
812}
813
814int
815nicvf_rss_reta_query(struct nicvf *nic, uint8_t *tbl, uint32_t max_count)
816{
817	uint32_t idx;
818	struct nicvf_rss_reta_info *rss = &nic->rss_info;
819
820	/* result will be stored in nic->rss_info.rss_size */
821	if (nicvf_mbox_get_rss_size(nic))
822		return NICVF_ERR_RSS_GET_SZ;
823
824	assert(rss->rss_size > 0);
825	rss->hash_bits = (uint8_t)log2(rss->rss_size);
826	for (idx = 0; idx < rss->rss_size && idx < max_count; idx++)
827		tbl[idx] = rss->ind_tbl[idx];
828
829	return NICVF_OK;
830}
831
832int
833nicvf_rss_config(struct nicvf *nic, uint32_t  qcnt, uint64_t cfg)
834{
835	uint32_t idx;
836	uint8_t default_reta[NIC_MAX_RSS_IDR_TBL_SIZE];
837	uint8_t default_key[RSS_HASH_KEY_BYTE_SIZE] = {
838		0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD,
839		0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD,
840		0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD,
841		0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD,
842		0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD
843	};
844
845	if (nic->cpi_alg != CPI_ALG_NONE)
846		return -EINVAL;
847
848	if (cfg == 0)
849		return -EINVAL;
850
851	/* Update default RSS key and cfg */
852	nicvf_rss_set_key(nic, default_key);
853	nicvf_rss_set_cfg(nic, cfg);
854
855	/* Update default RSS RETA */
856	for (idx = 0; idx < NIC_MAX_RSS_IDR_TBL_SIZE; idx++)
857		default_reta[idx] = idx % qcnt;
858
859	return nicvf_rss_reta_update(nic, default_reta,
860			NIC_MAX_RSS_IDR_TBL_SIZE);
861}
862
863int
864nicvf_rss_term(struct nicvf *nic)
865{
866	uint32_t idx;
867	uint8_t disable_rss[NIC_MAX_RSS_IDR_TBL_SIZE];
868
869	nicvf_rss_set_cfg(nic, 0);
870	/* Redirect the output to 0th queue  */
871	for (idx = 0; idx < NIC_MAX_RSS_IDR_TBL_SIZE; idx++)
872		disable_rss[idx] = 0;
873
874	return nicvf_rss_reta_update(nic, disable_rss,
875			NIC_MAX_RSS_IDR_TBL_SIZE);
876}
877
878int
879nicvf_loopback_config(struct nicvf *nic, bool enable)
880{
881	if (enable && nic->loopback_supported == 0)
882		return NICVF_ERR_LOOPBACK_CFG;
883
884	return nicvf_mbox_loopback_config(nic, enable);
885}
886
887void
888nicvf_hw_get_stats(struct nicvf *nic, struct nicvf_hw_stats *stats)
889{
890	stats->rx_bytes = NICVF_GET_RX_STATS(RX_OCTS);
891	stats->rx_ucast_frames = NICVF_GET_RX_STATS(RX_UCAST);
892	stats->rx_bcast_frames = NICVF_GET_RX_STATS(RX_BCAST);
893	stats->rx_mcast_frames = NICVF_GET_RX_STATS(RX_MCAST);
894	stats->rx_fcs_errors = NICVF_GET_RX_STATS(RX_FCS);
895	stats->rx_l2_errors = NICVF_GET_RX_STATS(RX_L2ERR);
896	stats->rx_drop_red = NICVF_GET_RX_STATS(RX_RED);
897	stats->rx_drop_red_bytes = NICVF_GET_RX_STATS(RX_RED_OCTS);
898	stats->rx_drop_overrun = NICVF_GET_RX_STATS(RX_ORUN);
899	stats->rx_drop_overrun_bytes = NICVF_GET_RX_STATS(RX_ORUN_OCTS);
900	stats->rx_drop_bcast = NICVF_GET_RX_STATS(RX_DRP_BCAST);
901	stats->rx_drop_mcast = NICVF_GET_RX_STATS(RX_DRP_MCAST);
902	stats->rx_drop_l3_bcast = NICVF_GET_RX_STATS(RX_DRP_L3BCAST);
903	stats->rx_drop_l3_mcast = NICVF_GET_RX_STATS(RX_DRP_L3MCAST);
904
905	stats->tx_bytes_ok = NICVF_GET_TX_STATS(TX_OCTS);
906	stats->tx_ucast_frames_ok = NICVF_GET_TX_STATS(TX_UCAST);
907	stats->tx_bcast_frames_ok = NICVF_GET_TX_STATS(TX_BCAST);
908	stats->tx_mcast_frames_ok = NICVF_GET_TX_STATS(TX_MCAST);
909	stats->tx_drops = NICVF_GET_TX_STATS(TX_DROP);
910}
911
912void
913nicvf_hw_get_rx_qstats(struct nicvf *nic, struct nicvf_hw_rx_qstats *qstats,
914		       uint16_t qidx)
915{
916	qstats->q_rx_bytes =
917		nicvf_queue_reg_read(nic, NIC_QSET_RQ_0_7_STATUS0, qidx);
918	qstats->q_rx_packets =
919		nicvf_queue_reg_read(nic, NIC_QSET_RQ_0_7_STATUS1, qidx);
920}
921
922void
923nicvf_hw_get_tx_qstats(struct nicvf *nic, struct nicvf_hw_tx_qstats *qstats,
924		       uint16_t qidx)
925{
926	qstats->q_tx_bytes =
927		nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STATUS0, qidx);
928	qstats->q_tx_packets =
929		nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STATUS1, qidx);
930}
931