ecore_vf.c revision 6b3e017e
1/*
2 * Copyright (c) 2016 QLogic Corporation.
3 * All rights reserved.
4 * www.qlogic.com
5 *
6 * See LICENSE.qede_pmd for copyright and licensing details.
7 */
8
9#include "bcm_osal.h"
10#include "ecore.h"
11#include "ecore_hsi_eth.h"
12#include "ecore_sriov.h"
13#include "ecore_l2_api.h"
14#include "ecore_vf.h"
15#include "ecore_vfpf_if.h"
16#include "ecore_status.h"
17#include "reg_addr.h"
18#include "ecore_int.h"
19#include "ecore_l2.h"
20#include "ecore_mcp_api.h"
21#include "ecore_vf_api.h"
22
23static void *ecore_vf_pf_prep(struct ecore_hwfn *p_hwfn, u16 type, u16 length)
24{
25	struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
26	void *p_tlv;
27
28	/* This lock is released when we receive PF's response
29	 * in ecore_send_msg2pf().
30	 * So, ecore_vf_pf_prep() and ecore_send_msg2pf()
31	 * must come in sequence.
32	 */
33	OSAL_MUTEX_ACQUIRE(&p_iov->mutex);
34
35	DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
36		   "preparing to send %s tlv over vf pf channel\n",
37		   ecore_channel_tlvs_string[type]);
38
39	/* Reset Request offset */
40	p_iov->offset = (u8 *)(p_iov->vf2pf_request);
41
42	/* Clear mailbox - both request and reply */
43	OSAL_MEMSET(p_iov->vf2pf_request, 0, sizeof(union vfpf_tlvs));
44	OSAL_MEMSET(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs));
45
46	/* Init type and length */
47	p_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset, type, length);
48
49	/* Init first tlv header */
50	((struct vfpf_first_tlv *)p_tlv)->reply_address =
51	    (u64)p_iov->pf2vf_reply_phys;
52
53	return p_tlv;
54}
55
56static void ecore_vf_pf_req_end(struct ecore_hwfn *p_hwfn,
57				 enum _ecore_status_t req_status)
58{
59	union pfvf_tlvs *resp = p_hwfn->vf_iov_info->pf2vf_reply;
60
61	DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
62		   "VF request status = 0x%x, PF reply status = 0x%x\n",
63		   req_status, resp->default_resp.hdr.status);
64
65	OSAL_MUTEX_RELEASE(&p_hwfn->vf_iov_info->mutex);
66}
67
68static int ecore_send_msg2pf(struct ecore_hwfn *p_hwfn,
69			     u8 *done, u32 resp_size)
70{
71	union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request;
72	struct ustorm_trigger_vf_zone trigger;
73	struct ustorm_vf_zone *zone_data;
74	int rc = ECORE_SUCCESS, time = 100;
75
76	zone_data = (struct ustorm_vf_zone *)PXP_VF_BAR0_START_USDM_ZONE_B;
77
78	/* output tlvs list */
79	ecore_dp_tlv_list(p_hwfn, p_req);
80
81	/* need to add the END TLV to the message size */
82	resp_size += sizeof(struct channel_list_end_tlv);
83
84	if (!p_hwfn->p_dev->b_hw_channel) {
85		rc = OSAL_VF_SEND_MSG2PF(p_hwfn->p_dev,
86					 done,
87					 p_req,
88					 p_hwfn->vf_iov_info->pf2vf_reply,
89					 sizeof(union vfpf_tlvs), resp_size);
90		/* TODO - no prints about message ? */
91		return rc;
92	}
93
94	/* Send TLVs over HW channel */
95	OSAL_MEMSET(&trigger, 0, sizeof(struct ustorm_trigger_vf_zone));
96	trigger.vf_pf_msg_valid = 1;
97
98	DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
99		   "VF -> PF [%02x] message: [%08x, %08x] --> %p,"
100		   " %08x --> %p\n",
101		   GET_FIELD(p_hwfn->hw_info.concrete_fid,
102			     PXP_CONCRETE_FID_PFID),
103		   U64_HI(p_hwfn->vf_iov_info->vf2pf_request_phys),
104		   U64_LO(p_hwfn->vf_iov_info->vf2pf_request_phys),
105		   &zone_data->non_trigger.vf_pf_msg_addr,
106		   *((u32 *)&trigger), &zone_data->trigger);
107
108	REG_WR(p_hwfn,
109	       (osal_uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.lo,
110	       U64_LO(p_hwfn->vf_iov_info->vf2pf_request_phys));
111
112	REG_WR(p_hwfn,
113	       (osal_uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.hi,
114	       U64_HI(p_hwfn->vf_iov_info->vf2pf_request_phys));
115
116	/* The message data must be written first, to prevent trigger before
117	 * data is written.
118	 */
119	OSAL_WMB(p_hwfn->p_dev);
120
121	REG_WR(p_hwfn, (osal_uintptr_t)&zone_data->trigger,
122	       *((u32 *)&trigger));
123
124	/* When PF would be done with the response, it would write back to the
125	 * `done' address. Poll until then.
126	 */
127	while ((!*done) && time) {
128		OSAL_MSLEEP(25);
129		time--;
130	}
131
132	if (!*done) {
133		DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
134			   "VF <-- PF Timeout [Type %d]\n",
135			   p_req->first_tlv.tl.type);
136		rc = ECORE_TIMEOUT;
137		return rc;
138	} else {
139		DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
140			   "PF response: %d [Type %d]\n",
141			   *done, p_req->first_tlv.tl.type);
142	}
143
144	return rc;
145}
146
147#define VF_ACQUIRE_THRESH 3
148static void ecore_vf_pf_acquire_reduce_resc(struct ecore_hwfn *p_hwfn,
149					    struct vf_pf_resc_request *p_req,
150					    struct pf_vf_resc *p_resp)
151{
152	DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
153		   "PF unwilling to fullill resource request: rxq [%02x/%02x]"
154		   " txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x]"
155		   " vlan [%02x/%02x] mc [%02x/%02x]."
156		   " Try PF recommended amount\n",
157		   p_req->num_rxqs, p_resp->num_rxqs,
158		   p_req->num_rxqs, p_resp->num_txqs,
159		   p_req->num_sbs, p_resp->num_sbs,
160		   p_req->num_mac_filters, p_resp->num_mac_filters,
161		   p_req->num_vlan_filters, p_resp->num_vlan_filters,
162		   p_req->num_mc_filters, p_resp->num_mc_filters);
163
164	/* humble our request */
165	p_req->num_txqs = p_resp->num_txqs;
166	p_req->num_rxqs = p_resp->num_rxqs;
167	p_req->num_sbs = p_resp->num_sbs;
168	p_req->num_mac_filters = p_resp->num_mac_filters;
169	p_req->num_vlan_filters = p_resp->num_vlan_filters;
170	p_req->num_mc_filters = p_resp->num_mc_filters;
171}
172
173static enum _ecore_status_t ecore_vf_pf_acquire(struct ecore_hwfn *p_hwfn)
174{
175	struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
176	struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp;
177	struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
178	struct ecore_vf_acquire_sw_info vf_sw_info;
179	struct vf_pf_resc_request *p_resc;
180	bool resources_acquired = false;
181	struct vfpf_acquire_tlv *req;
182	int attempts = 0;
183	enum _ecore_status_t rc = ECORE_SUCCESS;
184
185	/* clear mailbox and prep first tlv */
186	req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_ACQUIRE, sizeof(*req));
187	p_resc = &req->resc_request;
188
189	/* @@@ TBD: PF may not be ready bnx2x_get_vf_id... */
190	req->vfdev_info.opaque_fid = p_hwfn->hw_info.opaque_fid;
191
192	p_resc->num_rxqs = ECORE_MAX_VF_CHAINS_PER_PF;
193	p_resc->num_txqs = ECORE_MAX_VF_CHAINS_PER_PF;
194	p_resc->num_sbs = ECORE_MAX_VF_CHAINS_PER_PF;
195	p_resc->num_mac_filters = ECORE_ETH_VF_NUM_MAC_FILTERS;
196	p_resc->num_vlan_filters = ECORE_ETH_VF_NUM_VLAN_FILTERS;
197
198	OSAL_MEMSET(&vf_sw_info, 0, sizeof(vf_sw_info));
199	OSAL_VF_FILL_ACQUIRE_RESC_REQ(p_hwfn, &req->resc_request, &vf_sw_info);
200
201	req->vfdev_info.os_type = vf_sw_info.os_type;
202	req->vfdev_info.driver_version = vf_sw_info.driver_version;
203	req->vfdev_info.fw_major = FW_MAJOR_VERSION;
204	req->vfdev_info.fw_minor = FW_MINOR_VERSION;
205	req->vfdev_info.fw_revision = FW_REVISION_VERSION;
206	req->vfdev_info.fw_engineering = FW_ENGINEERING_VERSION;
207	req->vfdev_info.eth_fp_hsi_major = ETH_HSI_VER_MAJOR;
208	req->vfdev_info.eth_fp_hsi_minor = ETH_HSI_VER_MINOR;
209
210	/* Fill capability field with any non-deprecated config we support */
211	req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_100G;
212
213	/* pf 2 vf bulletin board address */
214	req->bulletin_addr = p_iov->bulletin.phys;
215	req->bulletin_size = p_iov->bulletin.size;
216
217	/* add list termination tlv */
218	ecore_add_tlv(p_hwfn, &p_iov->offset,
219		      CHANNEL_TLV_LIST_END,
220		      sizeof(struct channel_list_end_tlv));
221
222	while (!resources_acquired) {
223		DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
224			   "attempting to acquire resources\n");
225
226		/* Clear response buffer, as this might be a re-send */
227		OSAL_MEMSET(p_iov->pf2vf_reply, 0,
228			    sizeof(union pfvf_tlvs));
229
230		/* send acquire request */
231		rc = ecore_send_msg2pf(p_hwfn,
232				       &resp->hdr.status, sizeof(*resp));
233
234		/* PF timeout */
235		if (rc)
236			return rc;
237
238		/* copy acquire response from buffer to p_hwfn */
239		OSAL_MEMCPY(&p_iov->acquire_resp,
240			    resp, sizeof(p_iov->acquire_resp));
241
242		attempts++;
243
244		if (resp->hdr.status == PFVF_STATUS_SUCCESS) {
245			/* PF agrees to allocate our resources */
246			if (!(resp->pfdev_info.capabilities &
247			      PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE)) {
248				/* It's possible legacy PF mistakenly accepted;
249				 * but we don't care - simply mark it as
250				 * legacy and continue.
251				 */
252				req->vfdev_info.capabilities |=
253					VFPF_ACQUIRE_CAP_PRE_FP_HSI;
254			}
255			DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
256				   "resources acquired\n");
257			resources_acquired = true;
258		} /* PF refuses to allocate our resources */
259		else if (resp->hdr.status == PFVF_STATUS_NO_RESOURCE &&
260			 attempts < VF_ACQUIRE_THRESH) {
261			ecore_vf_pf_acquire_reduce_resc(p_hwfn, p_resc,
262							&resp->resc);
263
264		} else if (resp->hdr.status == PFVF_STATUS_NOT_SUPPORTED) {
265			if (pfdev_info->major_fp_hsi &&
266			    (pfdev_info->major_fp_hsi != ETH_HSI_VER_MAJOR)) {
267				DP_NOTICE(p_hwfn, false,
268					  "PF uses an incompatible fastpath HSI"
269					  " %02x.%02x [VF requires %02x.%02x]."
270					  " Please change to a VF driver using"
271					  " %02x.xx.\n",
272					  pfdev_info->major_fp_hsi,
273					  pfdev_info->minor_fp_hsi,
274					  ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR,
275					  pfdev_info->major_fp_hsi);
276				rc = ECORE_INVAL;
277				goto exit;
278			}
279
280			if (!pfdev_info->major_fp_hsi) {
281				if (req->vfdev_info.capabilities &
282				    VFPF_ACQUIRE_CAP_PRE_FP_HSI) {
283					DP_NOTICE(p_hwfn, false,
284						  "PF uses very old drivers."
285						  " Please change to a VF"
286						  " driver using no later than"
287						  " 8.8.x.x.\n");
288					rc = ECORE_INVAL;
289					goto exit;
290				} else {
291					DP_INFO(p_hwfn,
292						"PF is old - try re-acquire to"
293						" see if it supports FW-version"
294						" override\n");
295					req->vfdev_info.capabilities |=
296						VFPF_ACQUIRE_CAP_PRE_FP_HSI;
297				}
298			}
299		} else {
300			DP_ERR(p_hwfn,
301			       "PF returned err %d to VF acquisition request\n",
302			       resp->hdr.status);
303			rc = ECORE_AGAIN;
304			goto exit;
305		}
306	}
307
308	/* Mark the PF as legacy, if needed */
309	if (req->vfdev_info.capabilities &
310	    VFPF_ACQUIRE_CAP_PRE_FP_HSI)
311		p_iov->b_pre_fp_hsi = true;
312
313	rc = OSAL_VF_UPDATE_ACQUIRE_RESC_RESP(p_hwfn, &resp->resc);
314	if (rc) {
315		DP_NOTICE(p_hwfn, true,
316			  "VF_UPDATE_ACQUIRE_RESC_RESP Failed:"
317			  " status = 0x%x.\n",
318			  rc);
319		rc = ECORE_AGAIN;
320		goto exit;
321	}
322
323	/* Update bulletin board size with response from PF */
324	p_iov->bulletin.size = resp->bulletin_size;
325
326	/* get HW info */
327	p_hwfn->p_dev->type = resp->pfdev_info.dev_type;
328	p_hwfn->p_dev->chip_rev = resp->pfdev_info.chip_rev;
329
330	DP_INFO(p_hwfn, "Chip details - %s%d\n",
331		ECORE_IS_BB(p_hwfn->p_dev) ? "BB" : "AH",
332		CHIP_REV_IS_A0(p_hwfn->p_dev) ? 0 : 1);
333
334	p_hwfn->p_dev->chip_num = pfdev_info->chip_num & 0xffff;
335
336	/* Learn of the possibility of CMT */
337	if (IS_LEAD_HWFN(p_hwfn)) {
338		if (resp->pfdev_info.capabilities & PFVF_ACQUIRE_CAP_100G) {
339			DP_INFO(p_hwfn, "100g VF\n");
340			p_hwfn->p_dev->num_hwfns = 2;
341		}
342	}
343
344	/* @DPDK */
345	if ((~p_iov->b_pre_fp_hsi &
346	    ETH_HSI_VER_MINOR) &&
347	    (resp->pfdev_info.minor_fp_hsi < ETH_HSI_VER_MINOR))
348		DP_INFO(p_hwfn,
349			"PF is using older fastpath HSI;"
350			" %02x.%02x is configured\n",
351			ETH_HSI_VER_MAJOR,
352			resp->pfdev_info.minor_fp_hsi);
353
354exit:
355	ecore_vf_pf_req_end(p_hwfn, rc);
356
357	return rc;
358}
359
360enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_hwfn *p_hwfn)
361{
362	struct ecore_vf_iov *p_iov;
363	u32 reg;
364
365	/* Set number of hwfns - might be overridden once leading hwfn learns
366	 * actual configuration from PF.
367	 */
368	if (IS_LEAD_HWFN(p_hwfn))
369		p_hwfn->p_dev->num_hwfns = 1;
370
371	/* Set the doorbell bar. Assumption: regview is set */
372	p_hwfn->doorbells = (u8 OSAL_IOMEM *)p_hwfn->regview +
373	    PXP_VF_BAR0_START_DQ;
374
375	reg = PXP_VF_BAR0_ME_OPAQUE_ADDRESS;
376	p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, reg);
377
378	reg = PXP_VF_BAR0_ME_CONCRETE_ADDRESS;
379	p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, reg);
380
381	/* Allocate vf sriov info */
382	p_iov = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_iov));
383	if (!p_iov) {
384		DP_NOTICE(p_hwfn, true,
385			  "Failed to allocate `struct ecore_sriov'\n");
386		return ECORE_NOMEM;
387	}
388
389	OSAL_MEMSET(p_iov, 0, sizeof(*p_iov));
390
391	/* Allocate vf2pf msg */
392	p_iov->vf2pf_request = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
393							 &p_iov->
394							 vf2pf_request_phys,
395							 sizeof(union
396								vfpf_tlvs));
397	if (!p_iov->vf2pf_request) {
398		DP_NOTICE(p_hwfn, true,
399			 "Failed to allocate `vf2pf_request' DMA memory\n");
400		goto free_p_iov;
401	}
402
403	p_iov->pf2vf_reply = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
404						       &p_iov->
405						       pf2vf_reply_phys,
406						       sizeof(union pfvf_tlvs));
407	if (!p_iov->pf2vf_reply) {
408		DP_NOTICE(p_hwfn, true,
409			  "Failed to allocate `pf2vf_reply' DMA memory\n");
410		goto free_vf2pf_request;
411	}
412
413	DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
414		   "VF's Request mailbox [%p virt 0x%lx phys], "
415		   "Response mailbox [%p virt 0x%lx phys]\n",
416		   p_iov->vf2pf_request,
417		   (unsigned long)p_iov->vf2pf_request_phys,
418		   p_iov->pf2vf_reply,
419		   (unsigned long)p_iov->pf2vf_reply_phys);
420
421	/* Allocate Bulletin board */
422	p_iov->bulletin.size = sizeof(struct ecore_bulletin_content);
423	p_iov->bulletin.p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
424							   &p_iov->bulletin.
425							   phys,
426							   p_iov->bulletin.
427							   size);
428	DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
429		   "VF's bulletin Board [%p virt 0x%lx phys 0x%08x bytes]\n",
430		   p_iov->bulletin.p_virt, (unsigned long)p_iov->bulletin.phys,
431		   p_iov->bulletin.size);
432
433	OSAL_MUTEX_ALLOC(p_hwfn, &p_iov->mutex);
434	OSAL_MUTEX_INIT(&p_iov->mutex);
435
436	p_hwfn->vf_iov_info = p_iov;
437
438	p_hwfn->hw_info.personality = ECORE_PCI_ETH;
439
440	return ecore_vf_pf_acquire(p_hwfn);
441
442free_vf2pf_request:
443	OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_iov->vf2pf_request,
444			       p_iov->vf2pf_request_phys,
445			       sizeof(union vfpf_tlvs));
446free_p_iov:
447	OSAL_FREE(p_hwfn->p_dev, p_iov);
448
449	return ECORE_NOMEM;
450}
451
452#define TSTORM_QZONE_START   PXP_VF_BAR0_START_SDM_ZONE_A
453#define MSTORM_QZONE_START(dev)   (TSTORM_QZONE_START + \
454				   (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev)))
455
456enum _ecore_status_t ecore_vf_pf_rxq_start(struct ecore_hwfn *p_hwfn,
457					   u8 rx_qid,
458					   u16 sb,
459					   u8 sb_index,
460					   u16 bd_max_bytes,
461					   dma_addr_t bd_chain_phys_addr,
462					   dma_addr_t cqe_pbl_addr,
463					   u16 cqe_pbl_size,
464					   void OSAL_IOMEM **pp_prod)
465{
466	struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
467	struct pfvf_start_queue_resp_tlv *resp;
468	struct vfpf_start_rxq_tlv *req;
469	int rc;
470
471	/* clear mailbox and prep first tlv */
472	req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_RXQ, sizeof(*req));
473
474	req->rx_qid = rx_qid;
475	req->cqe_pbl_addr = cqe_pbl_addr;
476	req->cqe_pbl_size = cqe_pbl_size;
477	req->rxq_addr = bd_chain_phys_addr;
478	req->hw_sb = sb;
479	req->sb_index = sb_index;
480	req->bd_max_bytes = bd_max_bytes;
481	req->stat_id = -1; /* Keep initialized, for future compatibility */
482
483	/* If PF is legacy, we'll need to calculate producers ourselves
484	 * as well as clean them.
485	 */
486	if (pp_prod && p_iov->b_pre_fp_hsi) {
487		u8 hw_qid = p_iov->acquire_resp.resc.hw_qid[rx_qid];
488		u32 init_prod_val = 0;
489
490		*pp_prod = (u8 OSAL_IOMEM *)p_hwfn->regview +
491			   MSTORM_QZONE_START(p_hwfn->p_dev) +
492			   (hw_qid) * MSTORM_QZONE_SIZE;
493
494		/* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
495		__internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
496				  (u32 *)(&init_prod_val));
497	}
498
499	/* add list termination tlv */
500	ecore_add_tlv(p_hwfn, &p_iov->offset,
501		      CHANNEL_TLV_LIST_END,
502		      sizeof(struct channel_list_end_tlv));
503
504	resp = &p_iov->pf2vf_reply->queue_start;
505	rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
506	if (rc)
507		goto exit;
508
509	if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
510		rc = ECORE_INVAL;
511		goto exit;
512	}
513
514	/* Learn the address of the producer from the response */
515	if (pp_prod && !p_iov->b_pre_fp_hsi) {
516		u32 init_prod_val = 0;
517
518		*pp_prod = (u8 OSAL_IOMEM *)p_hwfn->regview + resp->offset;
519		DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
520			   "Rxq[0x%02x]: producer at %p [offset 0x%08x]\n",
521			   rx_qid, *pp_prod, resp->offset);
522
523		/* Init the rcq, rx bd and rx sge (if valid) producers to 0.
524		 * It was actually the PF's responsibility, but since some
525		 * old PFs might fail to do so, we do this as well.
526		 */
527		OSAL_BUILD_BUG_ON(ETH_HSI_VER_MAJOR != 3);
528		__internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
529				  (u32 *)&init_prod_val);
530	}
531
532exit:
533	ecore_vf_pf_req_end(p_hwfn, rc);
534
535	return rc;
536}
537
538enum _ecore_status_t ecore_vf_pf_rxq_stop(struct ecore_hwfn *p_hwfn,
539					  u16 rx_qid, bool cqe_completion)
540{
541	struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
542	struct vfpf_stop_rxqs_tlv *req;
543	struct pfvf_def_resp_tlv *resp;
544	int rc;
545
546	/* clear mailbox and prep first tlv */
547	req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_RXQS, sizeof(*req));
548
549	req->rx_qid = rx_qid;
550	req->num_rxqs = 1;
551	req->cqe_completion = cqe_completion;
552
553	/* add list termination tlv */
554	ecore_add_tlv(p_hwfn, &p_iov->offset,
555		      CHANNEL_TLV_LIST_END,
556		      sizeof(struct channel_list_end_tlv));
557
558	resp = &p_iov->pf2vf_reply->default_resp;
559	rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
560	if (rc)
561		goto exit;
562
563	if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
564		rc = ECORE_INVAL;
565		goto exit;
566	}
567
568exit:
569	ecore_vf_pf_req_end(p_hwfn, rc);
570
571	return rc;
572}
573
574enum _ecore_status_t ecore_vf_pf_txq_start(struct ecore_hwfn *p_hwfn,
575					   u16 tx_queue_id,
576					   u16 sb,
577					   u8 sb_index,
578					   dma_addr_t pbl_addr,
579					   u16 pbl_size,
580					   void OSAL_IOMEM **pp_doorbell)
581{
582	struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
583	struct pfvf_start_queue_resp_tlv *resp;
584	struct vfpf_start_txq_tlv *req;
585	int rc;
586
587	/* clear mailbox and prep first tlv */
588	req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_TXQ, sizeof(*req));
589
590	req->tx_qid = tx_queue_id;
591
592	/* Tx */
593	req->pbl_addr = pbl_addr;
594	req->pbl_size = pbl_size;
595	req->hw_sb = sb;
596	req->sb_index = sb_index;
597
598	/* add list termination tlv */
599	ecore_add_tlv(p_hwfn, &p_iov->offset,
600		      CHANNEL_TLV_LIST_END,
601		      sizeof(struct channel_list_end_tlv));
602
603	resp  = &p_iov->pf2vf_reply->queue_start;
604	rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
605	if (rc)
606		goto exit;
607
608	if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
609		rc = ECORE_INVAL;
610		goto exit;
611	}
612
613	if (pp_doorbell) {
614		/* Modern PFs provide the actual offsets, while legacy
615		 * provided only the queue id.
616		 */
617		if (!p_iov->b_pre_fp_hsi) {
618			*pp_doorbell = (u8 OSAL_IOMEM *)p_hwfn->doorbells +
619						       resp->offset;
620		} else {
621			u8 cid = p_iov->acquire_resp.resc.cid[tx_queue_id];
622
623		*pp_doorbell = (u8 OSAL_IOMEM *)p_hwfn->doorbells +
624				DB_ADDR_VF(cid, DQ_DEMS_LEGACY);
625		}
626
627		DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
628			   "Txq[0x%02x]: doorbell at %p [offset 0x%08x]\n",
629			   tx_queue_id, *pp_doorbell, resp->offset);
630	}
631
632exit:
633	ecore_vf_pf_req_end(p_hwfn, rc);
634
635	return rc;
636}
637
638enum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn *p_hwfn, u16 tx_qid)
639{
640	struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
641	struct vfpf_stop_txqs_tlv *req;
642	struct pfvf_def_resp_tlv *resp;
643	int rc;
644
645	/* clear mailbox and prep first tlv */
646	req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_TXQS, sizeof(*req));
647
648	req->tx_qid = tx_qid;
649	req->num_txqs = 1;
650
651	/* add list termination tlv */
652	ecore_add_tlv(p_hwfn, &p_iov->offset,
653		      CHANNEL_TLV_LIST_END,
654		      sizeof(struct channel_list_end_tlv));
655
656	resp = &p_iov->pf2vf_reply->default_resp;
657	rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
658	if (rc)
659		goto exit;
660
661	if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
662		rc = ECORE_INVAL;
663		goto exit;
664	}
665
666exit:
667	ecore_vf_pf_req_end(p_hwfn, rc);
668
669	return rc;
670}
671
672enum _ecore_status_t ecore_vf_pf_rxqs_update(struct ecore_hwfn *p_hwfn,
673					     u16 rx_queue_id,
674					     u8 num_rxqs,
675					     u8 comp_cqe_flg, u8 comp_event_flg)
676{
677	struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
678	struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
679	struct vfpf_update_rxq_tlv *req;
680	int rc;
681
682	/* clear mailbox and prep first tlv */
683	req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_UPDATE_RXQ, sizeof(*req));
684
685	req->rx_qid = rx_queue_id;
686	req->num_rxqs = num_rxqs;
687
688	if (comp_cqe_flg)
689		req->flags |= VFPF_RXQ_UPD_COMPLETE_CQE_FLAG;
690	if (comp_event_flg)
691		req->flags |= VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG;
692
693	/* add list termination tlv */
694	ecore_add_tlv(p_hwfn, &p_iov->offset,
695		      CHANNEL_TLV_LIST_END,
696		      sizeof(struct channel_list_end_tlv));
697
698	rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
699	if (rc)
700		goto exit;
701
702	if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
703		rc = ECORE_INVAL;
704		goto exit;
705	}
706
707exit:
708	ecore_vf_pf_req_end(p_hwfn, rc);
709
710	return rc;
711}
712
713enum _ecore_status_t
714ecore_vf_pf_vport_start(struct ecore_hwfn *p_hwfn, u8 vport_id,
715			u16 mtu, u8 inner_vlan_removal,
716			enum ecore_tpa_mode tpa_mode, u8 max_buffers_per_cqe,
717			u8 only_untagged)
718{
719	struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
720	struct vfpf_vport_start_tlv *req;
721	struct pfvf_def_resp_tlv *resp;
722	int rc, i;
723
724	/* clear mailbox and prep first tlv */
725	req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_START, sizeof(*req));
726
727	req->mtu = mtu;
728	req->vport_id = vport_id;
729	req->inner_vlan_removal = inner_vlan_removal;
730	req->tpa_mode = tpa_mode;
731	req->max_buffers_per_cqe = max_buffers_per_cqe;
732	req->only_untagged = only_untagged;
733
734	/* status blocks */
735	for (i = 0; i < p_hwfn->vf_iov_info->acquire_resp.resc.num_sbs; i++)
736		if (p_hwfn->sbs_info[i])
737			req->sb_addr[i] = p_hwfn->sbs_info[i]->sb_phys;
738
739	/* add list termination tlv */
740	ecore_add_tlv(p_hwfn, &p_iov->offset,
741		      CHANNEL_TLV_LIST_END,
742		      sizeof(struct channel_list_end_tlv));
743
744	resp  = &p_iov->pf2vf_reply->default_resp;
745	rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
746	if (rc)
747		goto exit;
748
749	if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
750		rc = ECORE_INVAL;
751		goto exit;
752	}
753
754exit:
755	ecore_vf_pf_req_end(p_hwfn, rc);
756
757	return rc;
758}
759
760enum _ecore_status_t ecore_vf_pf_vport_stop(struct ecore_hwfn *p_hwfn)
761{
762	struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
763	struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
764	int rc;
765
766	/* clear mailbox and prep first tlv */
767	ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_TEARDOWN,
768			 sizeof(struct vfpf_first_tlv));
769
770	/* add list termination tlv */
771	ecore_add_tlv(p_hwfn, &p_iov->offset,
772		      CHANNEL_TLV_LIST_END,
773		      sizeof(struct channel_list_end_tlv));
774
775	rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
776	if (rc)
777		goto exit;
778
779	if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
780		rc = ECORE_INVAL;
781		goto exit;
782	}
783
784exit:
785	ecore_vf_pf_req_end(p_hwfn, rc);
786
787	return rc;
788}
789
790static bool
791ecore_vf_handle_vp_update_is_needed(struct ecore_hwfn *p_hwfn,
792				    struct ecore_sp_vport_update_params *p_data,
793				    u16 tlv)
794{
795	switch (tlv) {
796	case CHANNEL_TLV_VPORT_UPDATE_ACTIVATE:
797		return !!(p_data->update_vport_active_rx_flg ||
798			  p_data->update_vport_active_tx_flg);
799	case CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH:
800#ifndef ASIC_ONLY
801		/* FPGA doesn't have PVFC and so can't support tx-switching */
802		return !!(p_data->update_tx_switching_flg &&
803			  !CHIP_REV_IS_FPGA(p_hwfn->p_dev));
804#else
805		return !!p_data->update_tx_switching_flg;
806#endif
807	case CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP:
808		return !!p_data->update_inner_vlan_removal_flg;
809	case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN:
810		return !!p_data->update_accept_any_vlan_flg;
811	case CHANNEL_TLV_VPORT_UPDATE_MCAST:
812		return !!p_data->update_approx_mcast_flg;
813	case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM:
814		return !!(p_data->accept_flags.update_rx_mode_config ||
815			  p_data->accept_flags.update_tx_mode_config);
816	case CHANNEL_TLV_VPORT_UPDATE_RSS:
817		return !!p_data->rss_params;
818	case CHANNEL_TLV_VPORT_UPDATE_SGE_TPA:
819		return !!p_data->sge_tpa_params;
820	default:
821		DP_INFO(p_hwfn, "Unexpected vport-update TLV[%d] %s\n",
822			tlv, ecore_channel_tlvs_string[tlv]);
823		return false;
824	}
825}
826
827static void
828ecore_vf_handle_vp_update_tlvs_resp(struct ecore_hwfn *p_hwfn,
829				    struct ecore_sp_vport_update_params *p_data)
830{
831	struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
832	struct pfvf_def_resp_tlv *p_resp;
833	u16 tlv;
834
835	for (tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
836	     tlv < CHANNEL_TLV_VPORT_UPDATE_MAX;
837	     tlv++) {
838		if (!ecore_vf_handle_vp_update_is_needed(p_hwfn, p_data, tlv))
839			continue;
840
841		p_resp = (struct pfvf_def_resp_tlv *)
842		    ecore_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply, tlv);
843		if (p_resp && p_resp->hdr.status)
844			DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
845				   "TLV[%d] type %s Configuration %s\n",
846				   tlv, ecore_channel_tlvs_string[tlv],
847				   (p_resp && p_resp->hdr.status) ? "succeeded"
848								  : "failed");
849	}
850}
851
852enum _ecore_status_t
853ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn,
854			 struct ecore_sp_vport_update_params *p_params)
855{
856	struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
857	struct vfpf_vport_update_tlv *req;
858	struct pfvf_def_resp_tlv *resp;
859	u8 update_rx, update_tx;
860	u32 resp_size = 0;
861	u16 size, tlv;
862	int rc;
863
864	resp = &p_iov->pf2vf_reply->default_resp;
865	resp_size = sizeof(*resp);
866
867	update_rx = p_params->update_vport_active_rx_flg;
868	update_tx = p_params->update_vport_active_tx_flg;
869
870	/* clear mailbox and prep header tlv */
871	ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_UPDATE, sizeof(*req));
872
873	/* Prepare extended tlvs */
874	if (update_rx || update_tx) {
875		struct vfpf_vport_update_activate_tlv *p_act_tlv;
876
877		size = sizeof(struct vfpf_vport_update_activate_tlv);
878		p_act_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
879					  CHANNEL_TLV_VPORT_UPDATE_ACTIVATE,
880					  size);
881		resp_size += sizeof(struct pfvf_def_resp_tlv);
882
883		if (update_rx) {
884			p_act_tlv->update_rx = update_rx;
885			p_act_tlv->active_rx = p_params->vport_active_rx_flg;
886		}
887
888		if (update_tx) {
889			p_act_tlv->update_tx = update_tx;
890			p_act_tlv->active_tx = p_params->vport_active_tx_flg;
891		}
892	}
893
894	if (p_params->update_inner_vlan_removal_flg) {
895		struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv;
896
897		size = sizeof(struct vfpf_vport_update_vlan_strip_tlv);
898		p_vlan_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
899					   CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP,
900					   size);
901		resp_size += sizeof(struct pfvf_def_resp_tlv);
902
903		p_vlan_tlv->remove_vlan = p_params->inner_vlan_removal_flg;
904	}
905
906	if (p_params->update_tx_switching_flg) {
907		struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
908
909		size = sizeof(struct vfpf_vport_update_tx_switch_tlv);
910		tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
911		p_tx_switch_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
912						tlv, size);
913		resp_size += sizeof(struct pfvf_def_resp_tlv);
914
915		p_tx_switch_tlv->tx_switching = p_params->tx_switching_flg;
916	}
917
918	if (p_params->update_approx_mcast_flg) {
919		struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
920
921		size = sizeof(struct vfpf_vport_update_mcast_bin_tlv);
922		p_mcast_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
923					    CHANNEL_TLV_VPORT_UPDATE_MCAST,
924					    size);
925		resp_size += sizeof(struct pfvf_def_resp_tlv);
926
927		OSAL_MEMCPY(p_mcast_tlv->bins, p_params->bins,
928			    sizeof(unsigned long) *
929			    ETH_MULTICAST_MAC_BINS_IN_REGS);
930	}
931
932	update_rx = p_params->accept_flags.update_rx_mode_config;
933	update_tx = p_params->accept_flags.update_tx_mode_config;
934
935	if (update_rx || update_tx) {
936		struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
937
938		tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
939		size = sizeof(struct vfpf_vport_update_accept_param_tlv);
940		p_accept_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset, tlv, size);
941		resp_size += sizeof(struct pfvf_def_resp_tlv);
942
943		if (update_rx) {
944			p_accept_tlv->update_rx_mode = update_rx;
945			p_accept_tlv->rx_accept_filter =
946			    p_params->accept_flags.rx_accept_filter;
947		}
948
949		if (update_tx) {
950			p_accept_tlv->update_tx_mode = update_tx;
951			p_accept_tlv->tx_accept_filter =
952			    p_params->accept_flags.tx_accept_filter;
953		}
954	}
955
956	if (p_params->rss_params) {
957		struct ecore_rss_params *rss_params = p_params->rss_params;
958		struct vfpf_vport_update_rss_tlv *p_rss_tlv;
959
960		size = sizeof(struct vfpf_vport_update_rss_tlv);
961		p_rss_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
962					  CHANNEL_TLV_VPORT_UPDATE_RSS, size);
963		resp_size += sizeof(struct pfvf_def_resp_tlv);
964
965		if (rss_params->update_rss_config)
966			p_rss_tlv->update_rss_flags |=
967			    VFPF_UPDATE_RSS_CONFIG_FLAG;
968		if (rss_params->update_rss_capabilities)
969			p_rss_tlv->update_rss_flags |=
970			    VFPF_UPDATE_RSS_CAPS_FLAG;
971		if (rss_params->update_rss_ind_table)
972			p_rss_tlv->update_rss_flags |=
973			    VFPF_UPDATE_RSS_IND_TABLE_FLAG;
974		if (rss_params->update_rss_key)
975			p_rss_tlv->update_rss_flags |= VFPF_UPDATE_RSS_KEY_FLAG;
976
977		p_rss_tlv->rss_enable = rss_params->rss_enable;
978		p_rss_tlv->rss_caps = rss_params->rss_caps;
979		p_rss_tlv->rss_table_size_log = rss_params->rss_table_size_log;
980		OSAL_MEMCPY(p_rss_tlv->rss_ind_table, rss_params->rss_ind_table,
981			    sizeof(rss_params->rss_ind_table));
982		OSAL_MEMCPY(p_rss_tlv->rss_key, rss_params->rss_key,
983			    sizeof(rss_params->rss_key));
984	}
985
986	if (p_params->update_accept_any_vlan_flg) {
987		struct vfpf_vport_update_accept_any_vlan_tlv *p_any_vlan_tlv;
988
989		size = sizeof(struct vfpf_vport_update_accept_any_vlan_tlv);
990		tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
991		p_any_vlan_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
992					       tlv, size);
993
994		resp_size += sizeof(struct pfvf_def_resp_tlv);
995		p_any_vlan_tlv->accept_any_vlan = p_params->accept_any_vlan;
996		p_any_vlan_tlv->update_accept_any_vlan_flg =
997		    p_params->update_accept_any_vlan_flg;
998	}
999
1000	if (p_params->sge_tpa_params) {
1001		struct ecore_sge_tpa_params *sge_tpa_params;
1002		struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv;
1003
1004		sge_tpa_params = p_params->sge_tpa_params;
1005		size = sizeof(struct vfpf_vport_update_sge_tpa_tlv);
1006		p_sge_tpa_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset,
1007					      CHANNEL_TLV_VPORT_UPDATE_SGE_TPA,
1008					      size);
1009		resp_size += sizeof(struct pfvf_def_resp_tlv);
1010
1011		if (sge_tpa_params->update_tpa_en_flg)
1012			p_sge_tpa_tlv->update_sge_tpa_flags |=
1013			    VFPF_UPDATE_TPA_EN_FLAG;
1014		if (sge_tpa_params->update_tpa_param_flg)
1015			p_sge_tpa_tlv->update_sge_tpa_flags |=
1016			    VFPF_UPDATE_TPA_PARAM_FLAG;
1017
1018		if (sge_tpa_params->tpa_ipv4_en_flg)
1019			p_sge_tpa_tlv->sge_tpa_flags |= VFPF_TPA_IPV4_EN_FLAG;
1020		if (sge_tpa_params->tpa_ipv6_en_flg)
1021			p_sge_tpa_tlv->sge_tpa_flags |= VFPF_TPA_IPV6_EN_FLAG;
1022		if (sge_tpa_params->tpa_pkt_split_flg)
1023			p_sge_tpa_tlv->sge_tpa_flags |= VFPF_TPA_PKT_SPLIT_FLAG;
1024		if (sge_tpa_params->tpa_hdr_data_split_flg)
1025			p_sge_tpa_tlv->sge_tpa_flags |=
1026			    VFPF_TPA_HDR_DATA_SPLIT_FLAG;
1027		if (sge_tpa_params->tpa_gro_consistent_flg)
1028			p_sge_tpa_tlv->sge_tpa_flags |=
1029			    VFPF_TPA_GRO_CONSIST_FLAG;
1030
1031		p_sge_tpa_tlv->tpa_max_aggs_num =
1032		    sge_tpa_params->tpa_max_aggs_num;
1033		p_sge_tpa_tlv->tpa_max_size = sge_tpa_params->tpa_max_size;
1034		p_sge_tpa_tlv->tpa_min_size_to_start =
1035		    sge_tpa_params->tpa_min_size_to_start;
1036		p_sge_tpa_tlv->tpa_min_size_to_cont =
1037		    sge_tpa_params->tpa_min_size_to_cont;
1038
1039		p_sge_tpa_tlv->max_buffers_per_cqe =
1040		    sge_tpa_params->max_buffers_per_cqe;
1041	}
1042
1043	/* add list termination tlv */
1044	ecore_add_tlv(p_hwfn, &p_iov->offset,
1045		      CHANNEL_TLV_LIST_END,
1046		      sizeof(struct channel_list_end_tlv));
1047
1048	rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, resp_size);
1049	if (rc)
1050		goto exit;
1051
1052	if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
1053		rc = ECORE_INVAL;
1054		goto exit;
1055	}
1056
1057	ecore_vf_handle_vp_update_tlvs_resp(p_hwfn, p_params);
1058
1059exit:
1060	ecore_vf_pf_req_end(p_hwfn, rc);
1061
1062	return rc;
1063}
1064
1065enum _ecore_status_t ecore_vf_pf_reset(struct ecore_hwfn *p_hwfn)
1066{
1067	struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1068	struct pfvf_def_resp_tlv *resp;
1069	struct vfpf_first_tlv *req;
1070	int rc;
1071
1072	/* clear mailbox and prep first tlv */
1073	req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_CLOSE, sizeof(*req));
1074
1075	/* add list termination tlv */
1076	ecore_add_tlv(p_hwfn, &p_iov->offset,
1077		      CHANNEL_TLV_LIST_END,
1078		      sizeof(struct channel_list_end_tlv));
1079
1080	resp = &p_iov->pf2vf_reply->default_resp;
1081	rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
1082	if (rc)
1083		goto exit;
1084
1085	if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
1086		rc = ECORE_AGAIN;
1087		goto exit;
1088	}
1089
1090	p_hwfn->b_int_enabled = 0;
1091
1092exit:
1093	ecore_vf_pf_req_end(p_hwfn, rc);
1094
1095	return rc;
1096}
1097
1098enum _ecore_status_t ecore_vf_pf_release(struct ecore_hwfn *p_hwfn)
1099{
1100	struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1101	struct pfvf_def_resp_tlv *resp;
1102	struct vfpf_first_tlv *req;
1103	u32 size;
1104	int rc;
1105
1106	/* clear mailbox and prep first tlv */
1107	req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_RELEASE, sizeof(*req));
1108
1109	/* add list termination tlv */
1110	ecore_add_tlv(p_hwfn, &p_iov->offset,
1111		      CHANNEL_TLV_LIST_END,
1112		      sizeof(struct channel_list_end_tlv));
1113
1114	resp = &p_iov->pf2vf_reply->default_resp;
1115	rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
1116
1117	if (rc == ECORE_SUCCESS && resp->hdr.status != PFVF_STATUS_SUCCESS)
1118		rc = ECORE_AGAIN;
1119
1120	ecore_vf_pf_req_end(p_hwfn, rc);
1121
1122	p_hwfn->b_int_enabled = 0;
1123
1124	if (p_iov->vf2pf_request)
1125		OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
1126				       p_iov->vf2pf_request,
1127				       p_iov->vf2pf_request_phys,
1128				       sizeof(union vfpf_tlvs));
1129	if (p_iov->pf2vf_reply)
1130		OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
1131				       p_iov->pf2vf_reply,
1132				       p_iov->pf2vf_reply_phys,
1133				       sizeof(union pfvf_tlvs));
1134
1135	if (p_iov->bulletin.p_virt) {
1136		size = sizeof(struct ecore_bulletin_content);
1137		OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
1138				       p_iov->bulletin.p_virt,
1139				       p_iov->bulletin.phys, size);
1140	}
1141
1142	OSAL_FREE(p_hwfn->p_dev, p_hwfn->vf_iov_info);
1143	p_hwfn->vf_iov_info = OSAL_NULL;
1144
1145	return rc;
1146}
1147
1148void ecore_vf_pf_filter_mcast(struct ecore_hwfn *p_hwfn,
1149			      struct ecore_filter_mcast *p_filter_cmd)
1150{
1151	struct ecore_sp_vport_update_params sp_params;
1152	int i;
1153
1154	OSAL_MEMSET(&sp_params, 0, sizeof(sp_params));
1155	sp_params.update_approx_mcast_flg = 1;
1156
1157	if (p_filter_cmd->opcode == ECORE_FILTER_ADD) {
1158		for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
1159			u32 bit;
1160
1161			bit = ecore_mcast_bin_from_mac(p_filter_cmd->mac[i]);
1162			OSAL_SET_BIT(bit, sp_params.bins);
1163		}
1164	}
1165
1166	ecore_vf_pf_vport_update(p_hwfn, &sp_params);
1167}
1168
1169enum _ecore_status_t ecore_vf_pf_filter_ucast(struct ecore_hwfn *p_hwfn,
1170					      struct ecore_filter_ucast
1171					      *p_ucast)
1172{
1173	struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1174	struct vfpf_ucast_filter_tlv *req;
1175	struct pfvf_def_resp_tlv *resp;
1176	int rc;
1177
1178	/* Sanitize */
1179	if (p_ucast->opcode == ECORE_FILTER_MOVE) {
1180		DP_NOTICE(p_hwfn, true,
1181			  "VFs don't support Moving of filters\n");
1182		return ECORE_INVAL;
1183	}
1184
1185	/* clear mailbox and prep first tlv */
1186	req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_UCAST_FILTER, sizeof(*req));
1187	req->opcode = (u8)p_ucast->opcode;
1188	req->type = (u8)p_ucast->type;
1189	OSAL_MEMCPY(req->mac, p_ucast->mac, ETH_ALEN);
1190	req->vlan = p_ucast->vlan;
1191
1192	/* add list termination tlv */
1193	ecore_add_tlv(p_hwfn, &p_iov->offset,
1194		      CHANNEL_TLV_LIST_END,
1195		      sizeof(struct channel_list_end_tlv));
1196
1197	resp = &p_iov->pf2vf_reply->default_resp;
1198	rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
1199	if (rc)
1200		goto exit;
1201
1202	if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
1203		rc = ECORE_AGAIN;
1204		goto exit;
1205	}
1206
1207exit:
1208	ecore_vf_pf_req_end(p_hwfn, rc);
1209
1210	return rc;
1211}
1212
1213enum _ecore_status_t ecore_vf_pf_int_cleanup(struct ecore_hwfn *p_hwfn)
1214{
1215	struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1216	struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
1217	int rc;
1218
1219	/* clear mailbox and prep first tlv */
1220	ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_INT_CLEANUP,
1221			 sizeof(struct vfpf_first_tlv));
1222
1223	/* add list termination tlv */
1224	ecore_add_tlv(p_hwfn, &p_iov->offset,
1225		      CHANNEL_TLV_LIST_END,
1226		      sizeof(struct channel_list_end_tlv));
1227
1228	rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
1229	if (rc)
1230		goto exit;
1231
1232	if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
1233		rc = ECORE_INVAL;
1234		goto exit;
1235	}
1236
1237exit:
1238	ecore_vf_pf_req_end(p_hwfn, rc);
1239
1240	return rc;
1241}
1242
1243u16 ecore_vf_get_igu_sb_id(struct ecore_hwfn *p_hwfn,
1244			   u16               sb_id)
1245{
1246	struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1247
1248	if (!p_iov) {
1249		DP_NOTICE(p_hwfn, true, "vf_sriov_info isn't initialized\n");
1250		return 0;
1251	}
1252
1253	return p_iov->acquire_resp.resc.hw_sbs[sb_id].hw_sb_id;
1254}
1255
1256enum _ecore_status_t ecore_vf_read_bulletin(struct ecore_hwfn *p_hwfn,
1257					    u8 *p_change)
1258{
1259	struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1260	struct ecore_bulletin_content shadow;
1261	u32 crc, crc_size;
1262
1263	crc_size = sizeof(p_iov->bulletin.p_virt->crc);
1264	*p_change = 0;
1265
1266	/* Need to guarantee PF is not in the middle of writing it */
1267	OSAL_MEMCPY(&shadow, p_iov->bulletin.p_virt, p_iov->bulletin.size);
1268
1269	/* If version did not update, no need to do anything */
1270	if (shadow.version == p_iov->bulletin_shadow.version)
1271		return ECORE_SUCCESS;
1272
1273	/* Verify the bulletin we see is valid */
1274	crc = ecore_crc32(0, (u8 *)&shadow + crc_size,
1275			  p_iov->bulletin.size - crc_size);
1276	if (crc != shadow.crc)
1277		return ECORE_AGAIN;
1278
1279	/* Set the shadow bulletin and process it */
1280	OSAL_MEMCPY(&p_iov->bulletin_shadow, &shadow, p_iov->bulletin.size);
1281
1282	DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1283		   "Read a bulletin update %08x\n", shadow.version);
1284
1285	*p_change = 1;
1286
1287	return ECORE_SUCCESS;
1288}
1289
1290void __ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn,
1291				struct ecore_mcp_link_params *p_params,
1292				struct ecore_bulletin_content *p_bulletin)
1293{
1294	OSAL_MEMSET(p_params, 0, sizeof(*p_params));
1295
1296	p_params->speed.autoneg = p_bulletin->req_autoneg;
1297	p_params->speed.advertised_speeds = p_bulletin->req_adv_speed;
1298	p_params->speed.forced_speed = p_bulletin->req_forced_speed;
1299	p_params->pause.autoneg = p_bulletin->req_autoneg_pause;
1300	p_params->pause.forced_rx = p_bulletin->req_forced_rx;
1301	p_params->pause.forced_tx = p_bulletin->req_forced_tx;
1302	p_params->loopback_mode = p_bulletin->req_loopback;
1303}
1304
1305void ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn,
1306			      struct ecore_mcp_link_params *params)
1307{
1308	__ecore_vf_get_link_params(p_hwfn, params,
1309				   &p_hwfn->vf_iov_info->bulletin_shadow);
1310}
1311
1312void __ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn,
1313			       struct ecore_mcp_link_state *p_link,
1314			       struct ecore_bulletin_content *p_bulletin)
1315{
1316	OSAL_MEMSET(p_link, 0, sizeof(*p_link));
1317
1318	p_link->link_up = p_bulletin->link_up;
1319	p_link->speed = p_bulletin->speed;
1320	p_link->full_duplex = p_bulletin->full_duplex;
1321	p_link->an = p_bulletin->autoneg;
1322	p_link->an_complete = p_bulletin->autoneg_complete;
1323	p_link->parallel_detection = p_bulletin->parallel_detection;
1324	p_link->pfc_enabled = p_bulletin->pfc_enabled;
1325	p_link->partner_adv_speed = p_bulletin->partner_adv_speed;
1326	p_link->partner_tx_flow_ctrl_en = p_bulletin->partner_tx_flow_ctrl_en;
1327	p_link->partner_rx_flow_ctrl_en = p_bulletin->partner_rx_flow_ctrl_en;
1328	p_link->partner_adv_pause = p_bulletin->partner_adv_pause;
1329	p_link->sfp_tx_fault = p_bulletin->sfp_tx_fault;
1330}
1331
1332void ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn,
1333			     struct ecore_mcp_link_state *link)
1334{
1335	__ecore_vf_get_link_state(p_hwfn, link,
1336				  &p_hwfn->vf_iov_info->bulletin_shadow);
1337}
1338
1339void __ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn,
1340			      struct ecore_mcp_link_capabilities *p_link_caps,
1341			      struct ecore_bulletin_content *p_bulletin)
1342{
1343	OSAL_MEMSET(p_link_caps, 0, sizeof(*p_link_caps));
1344	p_link_caps->speed_capabilities = p_bulletin->capability_speed;
1345}
1346
1347void ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn,
1348			    struct ecore_mcp_link_capabilities *p_link_caps)
1349{
1350	__ecore_vf_get_link_caps(p_hwfn, p_link_caps,
1351				 &p_hwfn->vf_iov_info->bulletin_shadow);
1352}
1353
1354void ecore_vf_get_num_rxqs(struct ecore_hwfn *p_hwfn, u8 *num_rxqs)
1355{
1356	*num_rxqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_rxqs;
1357}
1358
1359void ecore_vf_get_port_mac(struct ecore_hwfn *p_hwfn, u8 *port_mac)
1360{
1361	OSAL_MEMCPY(port_mac,
1362		    p_hwfn->vf_iov_info->acquire_resp.pfdev_info.port_mac,
1363		    ETH_ALEN);
1364}
1365
1366void ecore_vf_get_num_vlan_filters(struct ecore_hwfn *p_hwfn,
1367				   u8 *num_vlan_filters)
1368{
1369	struct ecore_vf_iov *p_vf;
1370
1371	p_vf = p_hwfn->vf_iov_info;
1372	*num_vlan_filters = p_vf->acquire_resp.resc.num_vlan_filters;
1373}
1374
1375/* @DPDK */
1376void ecore_vf_get_num_mac_filters(struct ecore_hwfn *p_hwfn,
1377				  u32 *num_mac)
1378{
1379	struct ecore_vf_iov *p_vf;
1380
1381	p_vf = p_hwfn->vf_iov_info;
1382	*num_mac = p_vf->acquire_resp.resc.num_mac_filters;
1383}
1384
1385void ecore_vf_get_num_sbs(struct ecore_hwfn *p_hwfn,
1386			  u32 *num_sbs)
1387{
1388	struct ecore_vf_iov *p_vf;
1389
1390	p_vf = p_hwfn->vf_iov_info;
1391	*num_sbs = (u32)p_vf->acquire_resp.resc.num_sbs;
1392}
1393
1394bool ecore_vf_check_mac(struct ecore_hwfn *p_hwfn, u8 *mac)
1395{
1396	struct ecore_bulletin_content *bulletin;
1397
1398	bulletin = &p_hwfn->vf_iov_info->bulletin_shadow;
1399	if (!(bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)))
1400		return true;
1401
1402	/* Forbid VF from changing a MAC enforced by PF */
1403	if (OSAL_MEMCMP(bulletin->mac, mac, ETH_ALEN))
1404		return false;
1405
1406	return false;
1407}
1408
1409bool ecore_vf_bulletin_get_forced_mac(struct ecore_hwfn *hwfn, u8 *dst_mac,
1410				      u8 *p_is_forced)
1411{
1412	struct ecore_bulletin_content *bulletin;
1413
1414	bulletin = &hwfn->vf_iov_info->bulletin_shadow;
1415
1416	if (bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) {
1417		if (p_is_forced)
1418			*p_is_forced = 1;
1419	} else if (bulletin->valid_bitmap & (1 << VFPF_BULLETIN_MAC_ADDR)) {
1420		if (p_is_forced)
1421			*p_is_forced = 0;
1422	} else {
1423		return false;
1424	}
1425
1426	OSAL_MEMCPY(dst_mac, bulletin->mac, ETH_ALEN);
1427
1428	return true;
1429}
1430
1431bool ecore_vf_bulletin_get_forced_vlan(struct ecore_hwfn *hwfn, u16 *dst_pvid)
1432{
1433	struct ecore_bulletin_content *bulletin;
1434
1435	bulletin = &hwfn->vf_iov_info->bulletin_shadow;
1436
1437	if (!(bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED)))
1438		return false;
1439
1440	if (dst_pvid)
1441		*dst_pvid = bulletin->pvid;
1442
1443	return true;
1444}
1445
1446bool ecore_vf_get_pre_fp_hsi(struct ecore_hwfn *p_hwfn)
1447{
1448	return p_hwfn->vf_iov_info->b_pre_fp_hsi;
1449}
1450
1451void ecore_vf_get_fw_version(struct ecore_hwfn *p_hwfn,
1452			     u16 *fw_major, u16 *fw_minor, u16 *fw_rev,
1453			     u16 *fw_eng)
1454{
1455	struct pf_vf_pfdev_info *info;
1456
1457	info = &p_hwfn->vf_iov_info->acquire_resp.pfdev_info;
1458
1459	*fw_major = info->fw_major;
1460	*fw_minor = info->fw_minor;
1461	*fw_rev = info->fw_rev;
1462	*fw_eng = info->fw_eng;
1463}
1464