i40e_ethdev_vf.c revision 93f15e30
1/*-
2 *   BSD LICENSE
3 *
4 *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5 *   All rights reserved.
6 *
7 *   Redistribution and use in source and binary forms, with or without
8 *   modification, are permitted provided that the following conditions
9 *   are met:
10 *
11 *     * Redistributions of source code must retain the above copyright
12 *       notice, this list of conditions and the following disclaimer.
13 *     * Redistributions in binary form must reproduce the above copyright
14 *       notice, this list of conditions and the following disclaimer in
15 *       the documentation and/or other materials provided with the
16 *       distribution.
17 *     * Neither the name of Intel Corporation nor the names of its
18 *       contributors may be used to endorse or promote products derived
19 *       from this software without specific prior written permission.
20 *
21 *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <sys/queue.h>
35#include <stdio.h>
36#include <errno.h>
37#include <stdint.h>
38#include <string.h>
39#include <unistd.h>
40#include <stdarg.h>
41#include <inttypes.h>
42#include <rte_byteorder.h>
43#include <rte_common.h>
44#include <rte_cycles.h>
45
46#include <rte_interrupts.h>
47#include <rte_log.h>
48#include <rte_debug.h>
49#include <rte_pci.h>
50#include <rte_atomic.h>
51#include <rte_branch_prediction.h>
52#include <rte_memory.h>
53#include <rte_memzone.h>
54#include <rte_eal.h>
55#include <rte_alarm.h>
56#include <rte_ether.h>
57#include <rte_ethdev.h>
58#include <rte_atomic.h>
59#include <rte_malloc.h>
60#include <rte_dev.h>
61
62#include "i40e_logs.h"
63#include "base/i40e_prototype.h"
64#include "base/i40e_adminq_cmd.h"
65#include "base/i40e_type.h"
66
67#include "i40e_rxtx.h"
68#include "i40e_ethdev.h"
69#include "i40e_pf.h"
70#define I40EVF_VSI_DEFAULT_MSIX_INTR     1
71#define I40EVF_VSI_DEFAULT_MSIX_INTR_LNX 0
72
73/* busy wait delay in msec */
74#define I40EVF_BUSY_WAIT_DELAY 10
75#define I40EVF_BUSY_WAIT_COUNT 50
76#define MAX_RESET_WAIT_CNT     20
77
78struct i40evf_arq_msg_info {
79	enum i40e_virtchnl_ops ops;
80	enum i40e_status_code result;
81	uint16_t buf_len;
82	uint16_t msg_len;
83	uint8_t *msg;
84};
85
86struct vf_cmd_info {
87	enum i40e_virtchnl_ops ops;
88	uint8_t *in_args;
89	uint32_t in_args_size;
90	uint8_t *out_buffer;
91	/* Input & output type. pass in buffer size and pass out
92	 * actual return result
93	 */
94	uint32_t out_size;
95};
96
97enum i40evf_aq_result {
98	I40EVF_MSG_ERR = -1, /* Meet error when accessing admin queue */
99	I40EVF_MSG_NON,      /* Read nothing from admin queue */
100	I40EVF_MSG_SYS,      /* Read system msg from admin queue */
101	I40EVF_MSG_CMD,      /* Read async command result */
102};
103
104static int i40evf_dev_configure(struct rte_eth_dev *dev);
105static int i40evf_dev_start(struct rte_eth_dev *dev);
106static void i40evf_dev_stop(struct rte_eth_dev *dev);
107static void i40evf_dev_info_get(struct rte_eth_dev *dev,
108				struct rte_eth_dev_info *dev_info);
109static int i40evf_dev_link_update(struct rte_eth_dev *dev,
110				  __rte_unused int wait_to_complete);
111static void i40evf_dev_stats_get(struct rte_eth_dev *dev,
112				struct rte_eth_stats *stats);
113static int i40evf_dev_xstats_get(struct rte_eth_dev *dev,
114				 struct rte_eth_xstat *xstats, unsigned n);
115static int i40evf_dev_xstats_get_names(struct rte_eth_dev *dev,
116				       struct rte_eth_xstat_name *xstats_names,
117				       unsigned limit);
118static void i40evf_dev_xstats_reset(struct rte_eth_dev *dev);
119static int i40evf_vlan_filter_set(struct rte_eth_dev *dev,
120				  uint16_t vlan_id, int on);
121static void i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask);
122static int i40evf_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid,
123				int on);
124static void i40evf_dev_close(struct rte_eth_dev *dev);
125static void i40evf_dev_promiscuous_enable(struct rte_eth_dev *dev);
126static void i40evf_dev_promiscuous_disable(struct rte_eth_dev *dev);
127static void i40evf_dev_allmulticast_enable(struct rte_eth_dev *dev);
128static void i40evf_dev_allmulticast_disable(struct rte_eth_dev *dev);
129static int i40evf_init_vlan(struct rte_eth_dev *dev);
130static int i40evf_dev_rx_queue_start(struct rte_eth_dev *dev,
131				     uint16_t rx_queue_id);
132static int i40evf_dev_rx_queue_stop(struct rte_eth_dev *dev,
133				    uint16_t rx_queue_id);
134static int i40evf_dev_tx_queue_start(struct rte_eth_dev *dev,
135				     uint16_t tx_queue_id);
136static int i40evf_dev_tx_queue_stop(struct rte_eth_dev *dev,
137				    uint16_t tx_queue_id);
138static void i40evf_add_mac_addr(struct rte_eth_dev *dev,
139				struct ether_addr *addr,
140				uint32_t index,
141				uint32_t pool);
142static void i40evf_del_mac_addr(struct rte_eth_dev *dev, uint32_t index);
143static int i40evf_dev_rss_reta_update(struct rte_eth_dev *dev,
144			struct rte_eth_rss_reta_entry64 *reta_conf,
145			uint16_t reta_size);
146static int i40evf_dev_rss_reta_query(struct rte_eth_dev *dev,
147			struct rte_eth_rss_reta_entry64 *reta_conf,
148			uint16_t reta_size);
149static int i40evf_config_rss(struct i40e_vf *vf);
150static int i40evf_dev_rss_hash_update(struct rte_eth_dev *dev,
151				      struct rte_eth_rss_conf *rss_conf);
152static int i40evf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
153					struct rte_eth_rss_conf *rss_conf);
154static int i40evf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
155static void i40evf_set_default_mac_addr(struct rte_eth_dev *dev,
156					struct ether_addr *mac_addr);
157static int
158i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id);
159static int
160i40evf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id);
161static void i40evf_handle_pf_event(__rte_unused struct rte_eth_dev *dev,
162				   uint8_t *msg,
163				   uint16_t msglen);
164
165/* Default hash key buffer for RSS */
166static uint32_t rss_key_default[I40E_VFQF_HKEY_MAX_INDEX + 1];
167
168struct rte_i40evf_xstats_name_off {
169	char name[RTE_ETH_XSTATS_NAME_SIZE];
170	unsigned offset;
171};
172
173static const struct rte_i40evf_xstats_name_off rte_i40evf_stats_strings[] = {
174	{"rx_bytes", offsetof(struct i40e_eth_stats, rx_bytes)},
175	{"rx_unicast_packets", offsetof(struct i40e_eth_stats, rx_unicast)},
176	{"rx_multicast_packets", offsetof(struct i40e_eth_stats, rx_multicast)},
177	{"rx_broadcast_packets", offsetof(struct i40e_eth_stats, rx_broadcast)},
178	{"rx_dropped_packets", offsetof(struct i40e_eth_stats, rx_discards)},
179	{"rx_unknown_protocol_packets", offsetof(struct i40e_eth_stats,
180		rx_unknown_protocol)},
181	{"tx_bytes", offsetof(struct i40e_eth_stats, tx_bytes)},
182	{"tx_unicast_packets", offsetof(struct i40e_eth_stats, tx_unicast)},
183	{"tx_multicast_packets", offsetof(struct i40e_eth_stats, tx_multicast)},
184	{"tx_broadcast_packets", offsetof(struct i40e_eth_stats, tx_broadcast)},
185	{"tx_dropped_packets", offsetof(struct i40e_eth_stats, tx_discards)},
186	{"tx_error_packets", offsetof(struct i40e_eth_stats, tx_errors)},
187};
188
189#define I40EVF_NB_XSTATS (sizeof(rte_i40evf_stats_strings) / \
190		sizeof(rte_i40evf_stats_strings[0]))
191
192static const struct eth_dev_ops i40evf_eth_dev_ops = {
193	.dev_configure        = i40evf_dev_configure,
194	.dev_start            = i40evf_dev_start,
195	.dev_stop             = i40evf_dev_stop,
196	.promiscuous_enable   = i40evf_dev_promiscuous_enable,
197	.promiscuous_disable  = i40evf_dev_promiscuous_disable,
198	.allmulticast_enable  = i40evf_dev_allmulticast_enable,
199	.allmulticast_disable = i40evf_dev_allmulticast_disable,
200	.link_update          = i40evf_dev_link_update,
201	.stats_get            = i40evf_dev_stats_get,
202	.xstats_get           = i40evf_dev_xstats_get,
203	.xstats_get_names     = i40evf_dev_xstats_get_names,
204	.xstats_reset         = i40evf_dev_xstats_reset,
205	.dev_close            = i40evf_dev_close,
206	.dev_infos_get        = i40evf_dev_info_get,
207	.dev_supported_ptypes_get = i40e_dev_supported_ptypes_get,
208	.vlan_filter_set      = i40evf_vlan_filter_set,
209	.vlan_offload_set     = i40evf_vlan_offload_set,
210	.vlan_pvid_set        = i40evf_vlan_pvid_set,
211	.rx_queue_start       = i40evf_dev_rx_queue_start,
212	.rx_queue_stop        = i40evf_dev_rx_queue_stop,
213	.tx_queue_start       = i40evf_dev_tx_queue_start,
214	.tx_queue_stop        = i40evf_dev_tx_queue_stop,
215	.rx_queue_setup       = i40e_dev_rx_queue_setup,
216	.rx_queue_release     = i40e_dev_rx_queue_release,
217	.rx_queue_intr_enable = i40evf_dev_rx_queue_intr_enable,
218	.rx_queue_intr_disable = i40evf_dev_rx_queue_intr_disable,
219	.rx_descriptor_done   = i40e_dev_rx_descriptor_done,
220	.tx_queue_setup       = i40e_dev_tx_queue_setup,
221	.tx_queue_release     = i40e_dev_tx_queue_release,
222	.rx_queue_count       = i40e_dev_rx_queue_count,
223	.rxq_info_get         = i40e_rxq_info_get,
224	.txq_info_get         = i40e_txq_info_get,
225	.mac_addr_add	      = i40evf_add_mac_addr,
226	.mac_addr_remove      = i40evf_del_mac_addr,
227	.reta_update          = i40evf_dev_rss_reta_update,
228	.reta_query           = i40evf_dev_rss_reta_query,
229	.rss_hash_update      = i40evf_dev_rss_hash_update,
230	.rss_hash_conf_get    = i40evf_dev_rss_hash_conf_get,
231	.mtu_set              = i40evf_dev_mtu_set,
232	.mac_addr_set         = i40evf_set_default_mac_addr,
233};
234
235/*
236 * Read data in admin queue to get msg from pf driver
237 */
238static enum i40evf_aq_result
239i40evf_read_pfmsg(struct rte_eth_dev *dev, struct i40evf_arq_msg_info *data)
240{
241	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
242	struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
243	struct i40e_arq_event_info event;
244	enum i40e_virtchnl_ops opcode;
245	enum i40e_status_code retval;
246	int ret;
247	enum i40evf_aq_result result = I40EVF_MSG_NON;
248
249	event.buf_len = data->buf_len;
250	event.msg_buf = data->msg;
251	ret = i40e_clean_arq_element(hw, &event, NULL);
252	/* Can't read any msg from adminQ */
253	if (ret) {
254		if (ret != I40E_ERR_ADMIN_QUEUE_NO_WORK)
255			result = I40EVF_MSG_ERR;
256		return result;
257	}
258
259	opcode = (enum i40e_virtchnl_ops)rte_le_to_cpu_32(event.desc.cookie_high);
260	retval = (enum i40e_status_code)rte_le_to_cpu_32(event.desc.cookie_low);
261	/* pf sys event */
262	if (opcode == I40E_VIRTCHNL_OP_EVENT) {
263		struct i40e_virtchnl_pf_event *vpe =
264			(struct i40e_virtchnl_pf_event *)event.msg_buf;
265
266		result = I40EVF_MSG_SYS;
267		switch (vpe->event) {
268		case I40E_VIRTCHNL_EVENT_LINK_CHANGE:
269			vf->link_up =
270				vpe->event_data.link_event.link_status;
271			vf->link_speed =
272				vpe->event_data.link_event.link_speed;
273			vf->pend_msg |= PFMSG_LINK_CHANGE;
274			PMD_DRV_LOG(INFO, "Link status update:%s",
275				    vf->link_up ? "up" : "down");
276			break;
277		case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
278			vf->vf_reset = true;
279			vf->pend_msg |= PFMSG_RESET_IMPENDING;
280			PMD_DRV_LOG(INFO, "vf is reseting");
281			break;
282		case I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE:
283			vf->dev_closed = true;
284			vf->pend_msg |= PFMSG_DRIVER_CLOSE;
285			PMD_DRV_LOG(INFO, "PF driver closed");
286			break;
287		default:
288			PMD_DRV_LOG(ERR, "%s: Unknown event %d from pf",
289				    __func__, vpe->event);
290		}
291	} else {
292		/* async reply msg on command issued by vf previously */
293		result = I40EVF_MSG_CMD;
294		/* Actual data length read from PF */
295		data->msg_len = event.msg_len;
296	}
297
298	data->result = retval;
299	data->ops = opcode;
300
301	return result;
302}
303
304/**
305 * clear current command. Only call in case execute
306 * _atomic_set_cmd successfully.
307 */
308static inline void
309_clear_cmd(struct i40e_vf *vf)
310{
311	rte_wmb();
312	vf->pend_cmd = I40E_VIRTCHNL_OP_UNKNOWN;
313}
314
315/*
316 * Check there is pending cmd in execution. If none, set new command.
317 */
318static inline int
319_atomic_set_cmd(struct i40e_vf *vf, enum i40e_virtchnl_ops ops)
320{
321	int ret = rte_atomic32_cmpset(&vf->pend_cmd,
322			I40E_VIRTCHNL_OP_UNKNOWN, ops);
323
324	if (!ret)
325		PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd);
326
327	return !ret;
328}
329
330#define MAX_TRY_TIMES 200
331#define ASQ_DELAY_MS  10
332
333static int
334i40evf_execute_vf_cmd(struct rte_eth_dev *dev, struct vf_cmd_info *args)
335{
336	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
337	struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
338	struct i40evf_arq_msg_info info;
339	enum i40evf_aq_result ret;
340	int err, i = 0;
341
342	if (_atomic_set_cmd(vf, args->ops))
343		return -1;
344
345	info.msg = args->out_buffer;
346	info.buf_len = args->out_size;
347	info.ops = I40E_VIRTCHNL_OP_UNKNOWN;
348	info.result = I40E_SUCCESS;
349
350	err = i40e_aq_send_msg_to_pf(hw, args->ops, I40E_SUCCESS,
351		     args->in_args, args->in_args_size, NULL);
352	if (err) {
353		PMD_DRV_LOG(ERR, "fail to send cmd %d", args->ops);
354		_clear_cmd(vf);
355		return err;
356	}
357
358	switch (args->ops) {
359	case I40E_VIRTCHNL_OP_RESET_VF:
360		/*no need to process in this function */
361		err = 0;
362		break;
363	case I40E_VIRTCHNL_OP_VERSION:
364	case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
365		/* for init adminq commands, need to poll the response */
366		err = -1;
367		do {
368			ret = i40evf_read_pfmsg(dev, &info);
369			vf->cmd_retval = info.result;
370			if (ret == I40EVF_MSG_CMD) {
371				err = 0;
372				break;
373			} else if (ret == I40EVF_MSG_ERR)
374				break;
375			rte_delay_ms(ASQ_DELAY_MS);
376			/* If don't read msg or read sys event, continue */
377		} while (i++ < MAX_TRY_TIMES);
378		_clear_cmd(vf);
379		break;
380
381	default:
382		/* for other adminq in running time, waiting the cmd done flag */
383		err = -1;
384		do {
385			if (vf->pend_cmd == I40E_VIRTCHNL_OP_UNKNOWN) {
386				err = 0;
387				break;
388			}
389			rte_delay_ms(ASQ_DELAY_MS);
390			/* If don't read msg or read sys event, continue */
391		} while (i++ < MAX_TRY_TIMES);
392		break;
393	}
394
395	return err | vf->cmd_retval;
396}
397
398/*
399 * Check API version with sync wait until version read or fail from admin queue
400 */
401static int
402i40evf_check_api_version(struct rte_eth_dev *dev)
403{
404	struct i40e_virtchnl_version_info version, *pver;
405	int err;
406	struct vf_cmd_info args;
407	struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
408
409	version.major = I40E_VIRTCHNL_VERSION_MAJOR;
410	version.minor = I40E_VIRTCHNL_VERSION_MINOR;
411
412	args.ops = I40E_VIRTCHNL_OP_VERSION;
413	args.in_args = (uint8_t *)&version;
414	args.in_args_size = sizeof(version);
415	args.out_buffer = vf->aq_resp;
416	args.out_size = I40E_AQ_BUF_SZ;
417
418	err = i40evf_execute_vf_cmd(dev, &args);
419	if (err) {
420		PMD_INIT_LOG(ERR, "fail to execute command OP_VERSION");
421		return err;
422	}
423
424	pver = (struct i40e_virtchnl_version_info *)args.out_buffer;
425	vf->version_major = pver->major;
426	vf->version_minor = pver->minor;
427	if (vf->version_major == I40E_DPDK_VERSION_MAJOR)
428		PMD_DRV_LOG(INFO, "Peer is DPDK PF host");
429	else if ((vf->version_major == I40E_VIRTCHNL_VERSION_MAJOR) &&
430		(vf->version_minor <= I40E_VIRTCHNL_VERSION_MINOR))
431		PMD_DRV_LOG(INFO, "Peer is Linux PF host");
432	else {
433		PMD_INIT_LOG(ERR, "PF/VF API version mismatch:(%u.%u)-(%u.%u)",
434					vf->version_major, vf->version_minor,
435						I40E_VIRTCHNL_VERSION_MAJOR,
436						I40E_VIRTCHNL_VERSION_MINOR);
437		return -1;
438	}
439
440	return 0;
441}
442
443static int
444i40evf_get_vf_resource(struct rte_eth_dev *dev)
445{
446	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
447	struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
448	int err;
449	struct vf_cmd_info args;
450	uint32_t caps, len;
451
452	args.ops = I40E_VIRTCHNL_OP_GET_VF_RESOURCES;
453	args.out_buffer = vf->aq_resp;
454	args.out_size = I40E_AQ_BUF_SZ;
455	if (PF_IS_V11(vf)) {
456		caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
457		       I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ |
458		       I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG |
459		       I40E_VIRTCHNL_VF_OFFLOAD_VLAN |
460		       I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING;
461		args.in_args = (uint8_t *)&caps;
462		args.in_args_size = sizeof(caps);
463	} else {
464		args.in_args = NULL;
465		args.in_args_size = 0;
466	}
467	err = i40evf_execute_vf_cmd(dev, &args);
468
469	if (err) {
470		PMD_DRV_LOG(ERR, "fail to execute command OP_GET_VF_RESOURCE");
471		return err;
472	}
473
474	len =  sizeof(struct i40e_virtchnl_vf_resource) +
475		I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource);
476
477	(void)rte_memcpy(vf->vf_res, args.out_buffer,
478			RTE_MIN(args.out_size, len));
479	i40e_vf_parse_hw_config(hw, vf->vf_res);
480
481	return 0;
482}
483
484static int
485i40evf_config_promisc(struct rte_eth_dev *dev,
486		      bool enable_unicast,
487		      bool enable_multicast)
488{
489	struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
490	int err;
491	struct vf_cmd_info args;
492	struct i40e_virtchnl_promisc_info promisc;
493
494	promisc.flags = 0;
495	promisc.vsi_id = vf->vsi_res->vsi_id;
496
497	if (enable_unicast)
498		promisc.flags |= I40E_FLAG_VF_UNICAST_PROMISC;
499
500	if (enable_multicast)
501		promisc.flags |= I40E_FLAG_VF_MULTICAST_PROMISC;
502
503	args.ops = I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
504	args.in_args = (uint8_t *)&promisc;
505	args.in_args_size = sizeof(promisc);
506	args.out_buffer = vf->aq_resp;
507	args.out_size = I40E_AQ_BUF_SZ;
508
509	err = i40evf_execute_vf_cmd(dev, &args);
510
511	if (err)
512		PMD_DRV_LOG(ERR, "fail to execute command "
513			    "CONFIG_PROMISCUOUS_MODE");
514	return err;
515}
516
517/* Configure vlan and double vlan offload. Use flag to specify which part to configure */
518static int
519i40evf_config_vlan_offload(struct rte_eth_dev *dev,
520				bool enable_vlan_strip)
521{
522	struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
523	int err;
524	struct vf_cmd_info args;
525	struct i40e_virtchnl_vlan_offload_info offload;
526
527	offload.vsi_id = vf->vsi_res->vsi_id;
528	offload.enable_vlan_strip = enable_vlan_strip;
529
530	args.ops = (enum i40e_virtchnl_ops)I40E_VIRTCHNL_OP_CFG_VLAN_OFFLOAD;
531	args.in_args = (uint8_t *)&offload;
532	args.in_args_size = sizeof(offload);
533	args.out_buffer = vf->aq_resp;
534	args.out_size = I40E_AQ_BUF_SZ;
535
536	err = i40evf_execute_vf_cmd(dev, &args);
537	if (err)
538		PMD_DRV_LOG(ERR, "fail to execute command CFG_VLAN_OFFLOAD");
539
540	return err;
541}
542
543static int
544i40evf_config_vlan_pvid(struct rte_eth_dev *dev,
545				struct i40e_vsi_vlan_pvid_info *info)
546{
547	struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
548	int err;
549	struct vf_cmd_info args;
550	struct i40e_virtchnl_pvid_info tpid_info;
551
552	if (info == NULL) {
553		PMD_DRV_LOG(ERR, "invalid parameters");
554		return I40E_ERR_PARAM;
555	}
556
557	memset(&tpid_info, 0, sizeof(tpid_info));
558	tpid_info.vsi_id = vf->vsi_res->vsi_id;
559	(void)rte_memcpy(&tpid_info.info, info, sizeof(*info));
560
561	args.ops = (enum i40e_virtchnl_ops)I40E_VIRTCHNL_OP_CFG_VLAN_PVID;
562	args.in_args = (uint8_t *)&tpid_info;
563	args.in_args_size = sizeof(tpid_info);
564	args.out_buffer = vf->aq_resp;
565	args.out_size = I40E_AQ_BUF_SZ;
566
567	err = i40evf_execute_vf_cmd(dev, &args);
568	if (err)
569		PMD_DRV_LOG(ERR, "fail to execute command CFG_VLAN_PVID");
570
571	return err;
572}
573
574static void
575i40evf_fill_virtchnl_vsi_txq_info(struct i40e_virtchnl_txq_info *txq_info,
576				  uint16_t vsi_id,
577				  uint16_t queue_id,
578				  uint16_t nb_txq,
579				  struct i40e_tx_queue *txq)
580{
581	txq_info->vsi_id = vsi_id;
582	txq_info->queue_id = queue_id;
583	if (queue_id < nb_txq) {
584		txq_info->ring_len = txq->nb_tx_desc;
585		txq_info->dma_ring_addr = txq->tx_ring_phys_addr;
586	}
587}
588
589static void
590i40evf_fill_virtchnl_vsi_rxq_info(struct i40e_virtchnl_rxq_info *rxq_info,
591				  uint16_t vsi_id,
592				  uint16_t queue_id,
593				  uint16_t nb_rxq,
594				  uint32_t max_pkt_size,
595				  struct i40e_rx_queue *rxq)
596{
597	rxq_info->vsi_id = vsi_id;
598	rxq_info->queue_id = queue_id;
599	rxq_info->max_pkt_size = max_pkt_size;
600	if (queue_id < nb_rxq) {
601		rxq_info->ring_len = rxq->nb_rx_desc;
602		rxq_info->dma_ring_addr = rxq->rx_ring_phys_addr;
603		rxq_info->databuffer_size =
604			(rte_pktmbuf_data_room_size(rxq->mp) -
605				RTE_PKTMBUF_HEADROOM);
606	}
607}
608
609/* It configures VSI queues to co-work with Linux PF host */
610static int
611i40evf_configure_vsi_queues(struct rte_eth_dev *dev)
612{
613	struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
614	struct i40e_rx_queue **rxq =
615		(struct i40e_rx_queue **)dev->data->rx_queues;
616	struct i40e_tx_queue **txq =
617		(struct i40e_tx_queue **)dev->data->tx_queues;
618	struct i40e_virtchnl_vsi_queue_config_info *vc_vqci;
619	struct i40e_virtchnl_queue_pair_info *vc_qpi;
620	struct vf_cmd_info args;
621	uint16_t i, nb_qp = vf->num_queue_pairs;
622	const uint32_t size =
623		I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqci, nb_qp);
624	uint8_t buff[size];
625	int ret;
626
627	memset(buff, 0, sizeof(buff));
628	vc_vqci = (struct i40e_virtchnl_vsi_queue_config_info *)buff;
629	vc_vqci->vsi_id = vf->vsi_res->vsi_id;
630	vc_vqci->num_queue_pairs = nb_qp;
631
632	for (i = 0, vc_qpi = vc_vqci->qpair; i < nb_qp; i++, vc_qpi++) {
633		i40evf_fill_virtchnl_vsi_txq_info(&vc_qpi->txq,
634			vc_vqci->vsi_id, i, dev->data->nb_tx_queues, txq[i]);
635		i40evf_fill_virtchnl_vsi_rxq_info(&vc_qpi->rxq,
636			vc_vqci->vsi_id, i, dev->data->nb_rx_queues,
637					vf->max_pkt_len, rxq[i]);
638	}
639	memset(&args, 0, sizeof(args));
640	args.ops = I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES;
641	args.in_args = (uint8_t *)vc_vqci;
642	args.in_args_size = size;
643	args.out_buffer = vf->aq_resp;
644	args.out_size = I40E_AQ_BUF_SZ;
645	ret = i40evf_execute_vf_cmd(dev, &args);
646	if (ret)
647		PMD_DRV_LOG(ERR, "Failed to execute command of "
648			"I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES");
649
650	return ret;
651}
652
653/* It configures VSI queues to co-work with DPDK PF host */
654static int
655i40evf_configure_vsi_queues_ext(struct rte_eth_dev *dev)
656{
657	struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
658	struct i40e_rx_queue **rxq =
659		(struct i40e_rx_queue **)dev->data->rx_queues;
660	struct i40e_tx_queue **txq =
661		(struct i40e_tx_queue **)dev->data->tx_queues;
662	struct i40e_virtchnl_vsi_queue_config_ext_info *vc_vqcei;
663	struct i40e_virtchnl_queue_pair_ext_info *vc_qpei;
664	struct vf_cmd_info args;
665	uint16_t i, nb_qp = vf->num_queue_pairs;
666	const uint32_t size =
667		I40E_VIRTCHNL_CONFIG_VSI_QUEUES_SIZE(vc_vqcei, nb_qp);
668	uint8_t buff[size];
669	int ret;
670
671	memset(buff, 0, sizeof(buff));
672	vc_vqcei = (struct i40e_virtchnl_vsi_queue_config_ext_info *)buff;
673	vc_vqcei->vsi_id = vf->vsi_res->vsi_id;
674	vc_vqcei->num_queue_pairs = nb_qp;
675	vc_qpei = vc_vqcei->qpair;
676	for (i = 0; i < nb_qp; i++, vc_qpei++) {
677		i40evf_fill_virtchnl_vsi_txq_info(&vc_qpei->txq,
678			vc_vqcei->vsi_id, i, dev->data->nb_tx_queues, txq[i]);
679		i40evf_fill_virtchnl_vsi_rxq_info(&vc_qpei->rxq,
680			vc_vqcei->vsi_id, i, dev->data->nb_rx_queues,
681					vf->max_pkt_len, rxq[i]);
682		if (i < dev->data->nb_rx_queues)
683			/*
684			 * It adds extra info for configuring VSI queues, which
685			 * is needed to enable the configurable crc stripping
686			 * in VF.
687			 */
688			vc_qpei->rxq_ext.crcstrip =
689				dev->data->dev_conf.rxmode.hw_strip_crc;
690	}
691	memset(&args, 0, sizeof(args));
692	args.ops =
693		(enum i40e_virtchnl_ops)I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT;
694	args.in_args = (uint8_t *)vc_vqcei;
695	args.in_args_size = size;
696	args.out_buffer = vf->aq_resp;
697	args.out_size = I40E_AQ_BUF_SZ;
698	ret = i40evf_execute_vf_cmd(dev, &args);
699	if (ret)
700		PMD_DRV_LOG(ERR, "Failed to execute command of "
701			"I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES_EXT");
702
703	return ret;
704}
705
706static int
707i40evf_configure_queues(struct rte_eth_dev *dev)
708{
709	struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
710
711	if (vf->version_major == I40E_DPDK_VERSION_MAJOR)
712		/* To support DPDK PF host */
713		return i40evf_configure_vsi_queues_ext(dev);
714	else
715		/* To support Linux PF host */
716		return i40evf_configure_vsi_queues(dev);
717}
718
719static int
720i40evf_config_irq_map(struct rte_eth_dev *dev)
721{
722	struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
723	struct vf_cmd_info args;
724	uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_irq_map_info) + \
725		sizeof(struct i40e_virtchnl_vector_map)];
726	struct i40e_virtchnl_irq_map_info *map_info;
727	struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
728	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
729	uint32_t vector_id;
730	int i, err;
731
732	if (rte_intr_allow_others(intr_handle)) {
733		if (vf->version_major == I40E_DPDK_VERSION_MAJOR)
734			vector_id = I40EVF_VSI_DEFAULT_MSIX_INTR;
735		else
736			vector_id = I40EVF_VSI_DEFAULT_MSIX_INTR_LNX;
737	} else {
738		vector_id = I40E_MISC_VEC_ID;
739	}
740
741	map_info = (struct i40e_virtchnl_irq_map_info *)cmd_buffer;
742	map_info->num_vectors = 1;
743	map_info->vecmap[0].rxitr_idx = I40E_ITR_INDEX_DEFAULT;
744	map_info->vecmap[0].vsi_id = vf->vsi_res->vsi_id;
745	/* Alway use default dynamic MSIX interrupt */
746	map_info->vecmap[0].vector_id = vector_id;
747	/* Don't map any tx queue */
748	map_info->vecmap[0].txq_map = 0;
749	map_info->vecmap[0].rxq_map = 0;
750	for (i = 0; i < dev->data->nb_rx_queues; i++) {
751		map_info->vecmap[0].rxq_map |= 1 << i;
752		if (rte_intr_dp_is_en(intr_handle))
753			intr_handle->intr_vec[i] = vector_id;
754	}
755
756	args.ops = I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP;
757	args.in_args = (u8 *)cmd_buffer;
758	args.in_args_size = sizeof(cmd_buffer);
759	args.out_buffer = vf->aq_resp;
760	args.out_size = I40E_AQ_BUF_SZ;
761	err = i40evf_execute_vf_cmd(dev, &args);
762	if (err)
763		PMD_DRV_LOG(ERR, "fail to execute command OP_ENABLE_QUEUES");
764
765	return err;
766}
767
768static int
769i40evf_switch_queue(struct rte_eth_dev *dev, bool isrx, uint16_t qid,
770				bool on)
771{
772	struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
773	struct i40e_virtchnl_queue_select queue_select;
774	int err;
775	struct vf_cmd_info args;
776	memset(&queue_select, 0, sizeof(queue_select));
777	queue_select.vsi_id = vf->vsi_res->vsi_id;
778
779	if (isrx)
780		queue_select.rx_queues |= 1 << qid;
781	else
782		queue_select.tx_queues |= 1 << qid;
783
784	if (on)
785		args.ops = I40E_VIRTCHNL_OP_ENABLE_QUEUES;
786	else
787		args.ops = I40E_VIRTCHNL_OP_DISABLE_QUEUES;
788	args.in_args = (u8 *)&queue_select;
789	args.in_args_size = sizeof(queue_select);
790	args.out_buffer = vf->aq_resp;
791	args.out_size = I40E_AQ_BUF_SZ;
792	err = i40evf_execute_vf_cmd(dev, &args);
793	if (err)
794		PMD_DRV_LOG(ERR, "fail to switch %s %u %s",
795			    isrx ? "RX" : "TX", qid, on ? "on" : "off");
796
797	return err;
798}
799
800static int
801i40evf_start_queues(struct rte_eth_dev *dev)
802{
803	struct rte_eth_dev_data *dev_data = dev->data;
804	int i;
805	struct i40e_rx_queue *rxq;
806	struct i40e_tx_queue *txq;
807
808	for (i = 0; i < dev->data->nb_rx_queues; i++) {
809		rxq = dev_data->rx_queues[i];
810		if (rxq->rx_deferred_start)
811			continue;
812		if (i40evf_dev_rx_queue_start(dev, i) != 0) {
813			PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
814			return -1;
815		}
816	}
817
818	for (i = 0; i < dev->data->nb_tx_queues; i++) {
819		txq = dev_data->tx_queues[i];
820		if (txq->tx_deferred_start)
821			continue;
822		if (i40evf_dev_tx_queue_start(dev, i) != 0) {
823			PMD_DRV_LOG(ERR, "Fail to start queue %u", i);
824			return -1;
825		}
826	}
827
828	return 0;
829}
830
831static int
832i40evf_stop_queues(struct rte_eth_dev *dev)
833{
834	int i;
835
836	/* Stop TX queues first */
837	for (i = 0; i < dev->data->nb_tx_queues; i++) {
838		if (i40evf_dev_tx_queue_stop(dev, i) != 0) {
839			PMD_DRV_LOG(ERR, "Fail to stop queue %u", i);
840			return -1;
841		}
842	}
843
844	/* Then stop RX queues */
845	for (i = 0; i < dev->data->nb_rx_queues; i++) {
846		if (i40evf_dev_rx_queue_stop(dev, i) != 0) {
847			PMD_DRV_LOG(ERR, "Fail to stop queue %u", i);
848			return -1;
849		}
850	}
851
852	return 0;
853}
854
855static void
856i40evf_add_mac_addr(struct rte_eth_dev *dev,
857		    struct ether_addr *addr,
858		    __rte_unused uint32_t index,
859		    __rte_unused uint32_t pool)
860{
861	struct i40e_virtchnl_ether_addr_list *list;
862	struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
863	uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_ether_addr_list) + \
864			sizeof(struct i40e_virtchnl_ether_addr)];
865	int err;
866	struct vf_cmd_info args;
867
868	if (i40e_validate_mac_addr(addr->addr_bytes) != I40E_SUCCESS) {
869		PMD_DRV_LOG(ERR, "Invalid mac:%x:%x:%x:%x:%x:%x",
870			    addr->addr_bytes[0], addr->addr_bytes[1],
871			    addr->addr_bytes[2], addr->addr_bytes[3],
872			    addr->addr_bytes[4], addr->addr_bytes[5]);
873		return;
874	}
875
876	list = (struct i40e_virtchnl_ether_addr_list *)cmd_buffer;
877	list->vsi_id = vf->vsi_res->vsi_id;
878	list->num_elements = 1;
879	(void)rte_memcpy(list->list[0].addr, addr->addr_bytes,
880					sizeof(addr->addr_bytes));
881
882	args.ops = I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS;
883	args.in_args = cmd_buffer;
884	args.in_args_size = sizeof(cmd_buffer);
885	args.out_buffer = vf->aq_resp;
886	args.out_size = I40E_AQ_BUF_SZ;
887	err = i40evf_execute_vf_cmd(dev, &args);
888	if (err)
889		PMD_DRV_LOG(ERR, "fail to execute command "
890			    "OP_ADD_ETHER_ADDRESS");
891
892	return;
893}
894
895static void
896i40evf_del_mac_addr_by_addr(struct rte_eth_dev *dev,
897			    struct ether_addr *addr)
898{
899	struct i40e_virtchnl_ether_addr_list *list;
900	struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
901	uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_ether_addr_list) + \
902			sizeof(struct i40e_virtchnl_ether_addr)];
903	int err;
904	struct vf_cmd_info args;
905
906	if (i40e_validate_mac_addr(addr->addr_bytes) != I40E_SUCCESS) {
907		PMD_DRV_LOG(ERR, "Invalid mac:%x-%x-%x-%x-%x-%x",
908			    addr->addr_bytes[0], addr->addr_bytes[1],
909			    addr->addr_bytes[2], addr->addr_bytes[3],
910			    addr->addr_bytes[4], addr->addr_bytes[5]);
911		return;
912	}
913
914	list = (struct i40e_virtchnl_ether_addr_list *)cmd_buffer;
915	list->vsi_id = vf->vsi_res->vsi_id;
916	list->num_elements = 1;
917	(void)rte_memcpy(list->list[0].addr, addr->addr_bytes,
918			sizeof(addr->addr_bytes));
919
920	args.ops = I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS;
921	args.in_args = cmd_buffer;
922	args.in_args_size = sizeof(cmd_buffer);
923	args.out_buffer = vf->aq_resp;
924	args.out_size = I40E_AQ_BUF_SZ;
925	err = i40evf_execute_vf_cmd(dev, &args);
926	if (err)
927		PMD_DRV_LOG(ERR, "fail to execute command "
928			    "OP_DEL_ETHER_ADDRESS");
929	return;
930}
931
932static void
933i40evf_del_mac_addr(struct rte_eth_dev *dev, uint32_t index)
934{
935	struct rte_eth_dev_data *data = dev->data;
936	struct ether_addr *addr;
937
938	addr = &data->mac_addrs[index];
939
940	i40evf_del_mac_addr_by_addr(dev, addr);
941}
942
943static int
944i40evf_update_stats(struct rte_eth_dev *dev, struct i40e_eth_stats **pstats)
945{
946	struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
947	struct i40e_virtchnl_queue_select q_stats;
948	int err;
949	struct vf_cmd_info args;
950
951	memset(&q_stats, 0, sizeof(q_stats));
952	q_stats.vsi_id = vf->vsi_res->vsi_id;
953	args.ops = I40E_VIRTCHNL_OP_GET_STATS;
954	args.in_args = (u8 *)&q_stats;
955	args.in_args_size = sizeof(q_stats);
956	args.out_buffer = vf->aq_resp;
957	args.out_size = I40E_AQ_BUF_SZ;
958
959	err = i40evf_execute_vf_cmd(dev, &args);
960	if (err) {
961		PMD_DRV_LOG(ERR, "fail to execute command OP_GET_STATS");
962		*pstats = NULL;
963		return err;
964	}
965	*pstats = (struct i40e_eth_stats *)args.out_buffer;
966	return 0;
967}
968
969static int
970i40evf_get_statistics(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
971{
972	int ret;
973	struct i40e_eth_stats *pstats = NULL;
974
975	ret = i40evf_update_stats(dev, &pstats);
976	if (ret != 0)
977		return 0;
978
979	stats->ipackets = pstats->rx_unicast + pstats->rx_multicast +
980						pstats->rx_broadcast;
981	stats->opackets = pstats->tx_broadcast + pstats->tx_multicast +
982						pstats->tx_unicast;
983	stats->imissed = pstats->rx_discards;
984	stats->oerrors = pstats->tx_errors + pstats->tx_discards;
985	stats->ibytes = pstats->rx_bytes;
986	stats->obytes = pstats->tx_bytes;
987
988	return 0;
989}
990
991static void
992i40evf_dev_xstats_reset(struct rte_eth_dev *dev)
993{
994	struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
995	struct i40e_eth_stats *pstats = NULL;
996
997	/* read stat values to clear hardware registers */
998	i40evf_update_stats(dev, &pstats);
999
1000	/* set stats offset base on current values */
1001	vf->vsi.eth_stats_offset = vf->vsi.eth_stats;
1002}
1003
1004static int i40evf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
1005				      struct rte_eth_xstat_name *xstats_names,
1006				      __rte_unused unsigned limit)
1007{
1008	unsigned i;
1009
1010	if (xstats_names != NULL)
1011		for (i = 0; i < I40EVF_NB_XSTATS; i++) {
1012			snprintf(xstats_names[i].name,
1013				sizeof(xstats_names[i].name),
1014				"%s", rte_i40evf_stats_strings[i].name);
1015		}
1016	return I40EVF_NB_XSTATS;
1017}
1018
1019static int i40evf_dev_xstats_get(struct rte_eth_dev *dev,
1020				 struct rte_eth_xstat *xstats, unsigned n)
1021{
1022	int ret;
1023	unsigned i;
1024	struct i40e_eth_stats *pstats = NULL;
1025
1026	if (n < I40EVF_NB_XSTATS)
1027		return I40EVF_NB_XSTATS;
1028
1029	ret = i40evf_update_stats(dev, &pstats);
1030	if (ret != 0)
1031		return 0;
1032
1033	if (!xstats)
1034		return 0;
1035
1036	/* loop over xstats array and values from pstats */
1037	for (i = 0; i < I40EVF_NB_XSTATS; i++) {
1038		xstats[i].id = i;
1039		xstats[i].value = *(uint64_t *)(((char *)pstats) +
1040			rte_i40evf_stats_strings[i].offset);
1041	}
1042
1043	return I40EVF_NB_XSTATS;
1044}
1045
1046static int
1047i40evf_add_vlan(struct rte_eth_dev *dev, uint16_t vlanid)
1048{
1049	struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1050	struct i40e_virtchnl_vlan_filter_list *vlan_list;
1051	uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_vlan_filter_list) +
1052							sizeof(uint16_t)];
1053	int err;
1054	struct vf_cmd_info args;
1055
1056	vlan_list = (struct i40e_virtchnl_vlan_filter_list *)cmd_buffer;
1057	vlan_list->vsi_id = vf->vsi_res->vsi_id;
1058	vlan_list->num_elements = 1;
1059	vlan_list->vlan_id[0] = vlanid;
1060
1061	args.ops = I40E_VIRTCHNL_OP_ADD_VLAN;
1062	args.in_args = (u8 *)&cmd_buffer;
1063	args.in_args_size = sizeof(cmd_buffer);
1064	args.out_buffer = vf->aq_resp;
1065	args.out_size = I40E_AQ_BUF_SZ;
1066	err = i40evf_execute_vf_cmd(dev, &args);
1067	if (err)
1068		PMD_DRV_LOG(ERR, "fail to execute command OP_ADD_VLAN");
1069
1070	return err;
1071}
1072
1073static int
1074i40evf_del_vlan(struct rte_eth_dev *dev, uint16_t vlanid)
1075{
1076	struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1077	struct i40e_virtchnl_vlan_filter_list *vlan_list;
1078	uint8_t cmd_buffer[sizeof(struct i40e_virtchnl_vlan_filter_list) +
1079							sizeof(uint16_t)];
1080	int err;
1081	struct vf_cmd_info args;
1082
1083	vlan_list = (struct i40e_virtchnl_vlan_filter_list *)cmd_buffer;
1084	vlan_list->vsi_id = vf->vsi_res->vsi_id;
1085	vlan_list->num_elements = 1;
1086	vlan_list->vlan_id[0] = vlanid;
1087
1088	args.ops = I40E_VIRTCHNL_OP_DEL_VLAN;
1089	args.in_args = (u8 *)&cmd_buffer;
1090	args.in_args_size = sizeof(cmd_buffer);
1091	args.out_buffer = vf->aq_resp;
1092	args.out_size = I40E_AQ_BUF_SZ;
1093	err = i40evf_execute_vf_cmd(dev, &args);
1094	if (err)
1095		PMD_DRV_LOG(ERR, "fail to execute command OP_DEL_VLAN");
1096
1097	return err;
1098}
1099
1100static const struct rte_pci_id pci_id_i40evf_map[] = {
1101	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF) },
1102	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF_HV) },
1103	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_A0_VF) },
1104	{ RTE_PCI_DEVICE(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_VF) },
1105	{ .vendor_id = 0, /* sentinel */ },
1106};
1107
1108static inline int
1109i40evf_dev_atomic_write_link_status(struct rte_eth_dev *dev,
1110				    struct rte_eth_link *link)
1111{
1112	struct rte_eth_link *dst = &(dev->data->dev_link);
1113	struct rte_eth_link *src = link;
1114
1115	if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
1116					*(uint64_t *)src) == 0)
1117		return -1;
1118
1119	return 0;
1120}
1121
1122/* Disable IRQ0 */
1123static inline void
1124i40evf_disable_irq0(struct i40e_hw *hw)
1125{
1126	/* Disable all interrupt types */
1127	I40E_WRITE_REG(hw, I40E_VFINT_ICR0_ENA1, 0);
1128	I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01,
1129		       I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
1130	I40EVF_WRITE_FLUSH(hw);
1131}
1132
1133/* Enable IRQ0 */
1134static inline void
1135i40evf_enable_irq0(struct i40e_hw *hw)
1136{
1137	/* Enable admin queue interrupt trigger */
1138	uint32_t val;
1139
1140	i40evf_disable_irq0(hw);
1141	val = I40E_READ_REG(hw, I40E_VFINT_ICR0_ENA1);
1142	val |= I40E_VFINT_ICR0_ENA1_ADMINQ_MASK |
1143		I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_MASK;
1144	I40E_WRITE_REG(hw, I40E_VFINT_ICR0_ENA1, val);
1145
1146	I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01,
1147		I40E_VFINT_DYN_CTL01_INTENA_MASK |
1148		I40E_VFINT_DYN_CTL01_CLEARPBA_MASK |
1149		I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
1150
1151	I40EVF_WRITE_FLUSH(hw);
1152}
1153
1154static int
1155i40evf_reset_vf(struct i40e_hw *hw)
1156{
1157	int i, reset;
1158
1159	if (i40e_vf_reset(hw) != I40E_SUCCESS) {
1160		PMD_INIT_LOG(ERR, "Reset VF NIC failed");
1161		return -1;
1162	}
1163	/**
1164	  * After issuing vf reset command to pf, pf won't necessarily
1165	  * reset vf, it depends on what state it exactly is. If it's not
1166	  * initialized yet, it won't have vf reset since it's in a certain
1167	  * state. If not, it will try to reset. Even vf is reset, pf will
1168	  * set I40E_VFGEN_RSTAT to COMPLETE first, then wait 10ms and set
1169	  * it to ACTIVE. In this duration, vf may not catch the moment that
1170	  * COMPLETE is set. So, for vf, we'll try to wait a long time.
1171	  */
1172	rte_delay_ms(200);
1173
1174	for (i = 0; i < MAX_RESET_WAIT_CNT; i++) {
1175		reset = rd32(hw, I40E_VFGEN_RSTAT) &
1176			I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1177		reset = reset >> I40E_VFGEN_RSTAT_VFR_STATE_SHIFT;
1178		if (I40E_VFR_COMPLETED == reset || I40E_VFR_VFACTIVE == reset)
1179			break;
1180		else
1181			rte_delay_ms(50);
1182	}
1183
1184	if (i >= MAX_RESET_WAIT_CNT) {
1185		PMD_INIT_LOG(ERR, "Reset VF NIC failed");
1186		return -1;
1187	}
1188
1189	return 0;
1190}
1191
1192static int
1193i40evf_init_vf(struct rte_eth_dev *dev)
1194{
1195	int i, err, bufsz;
1196	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1197	struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1198	uint16_t interval =
1199		i40e_calc_itr_interval(I40E_QUEUE_ITR_INTERVAL_MAX);
1200
1201	vf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1202	vf->dev_data = dev->data;
1203	err = i40e_set_mac_type(hw);
1204	if (err) {
1205		PMD_INIT_LOG(ERR, "set_mac_type failed: %d", err);
1206		goto err;
1207	}
1208
1209	i40e_init_adminq_parameter(hw);
1210	err = i40e_init_adminq(hw);
1211	if (err) {
1212		PMD_INIT_LOG(ERR, "init_adminq failed: %d", err);
1213		goto err;
1214	}
1215
1216	/* Reset VF and wait until it's complete */
1217	if (i40evf_reset_vf(hw)) {
1218		PMD_INIT_LOG(ERR, "reset NIC failed");
1219		goto err_aq;
1220	}
1221
1222	/* VF reset, shutdown admin queue and initialize again */
1223	if (i40e_shutdown_adminq(hw) != I40E_SUCCESS) {
1224		PMD_INIT_LOG(ERR, "i40e_shutdown_adminq failed");
1225		return -1;
1226	}
1227
1228	i40e_init_adminq_parameter(hw);
1229	if (i40e_init_adminq(hw) != I40E_SUCCESS) {
1230		PMD_INIT_LOG(ERR, "init_adminq failed");
1231		return -1;
1232	}
1233	vf->aq_resp = rte_zmalloc("vf_aq_resp", I40E_AQ_BUF_SZ, 0);
1234	if (!vf->aq_resp) {
1235		PMD_INIT_LOG(ERR, "unable to allocate vf_aq_resp memory");
1236			goto err_aq;
1237	}
1238	if (i40evf_check_api_version(dev) != 0) {
1239		PMD_INIT_LOG(ERR, "check_api version failed");
1240		goto err_aq;
1241	}
1242	bufsz = sizeof(struct i40e_virtchnl_vf_resource) +
1243		(I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource));
1244	vf->vf_res = rte_zmalloc("vf_res", bufsz, 0);
1245	if (!vf->vf_res) {
1246		PMD_INIT_LOG(ERR, "unable to allocate vf_res memory");
1247			goto err_aq;
1248	}
1249
1250	if (i40evf_get_vf_resource(dev) != 0) {
1251		PMD_INIT_LOG(ERR, "i40evf_get_vf_config failed");
1252		goto err_alloc;
1253	}
1254
1255	/* got VF config message back from PF, now we can parse it */
1256	for (i = 0; i < vf->vf_res->num_vsis; i++) {
1257		if (vf->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
1258			vf->vsi_res = &vf->vf_res->vsi_res[i];
1259	}
1260
1261	if (!vf->vsi_res) {
1262		PMD_INIT_LOG(ERR, "no LAN VSI found");
1263		goto err_alloc;
1264	}
1265
1266	if (hw->mac.type == I40E_MAC_X722_VF)
1267		vf->flags = I40E_FLAG_RSS_AQ_CAPABLE;
1268	vf->vsi.vsi_id = vf->vsi_res->vsi_id;
1269	vf->vsi.type = vf->vsi_res->vsi_type;
1270	vf->vsi.nb_qps = vf->vsi_res->num_queue_pairs;
1271	vf->vsi.adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1272
1273	/* Store the MAC address configured by host, or generate random one */
1274	if (is_valid_assigned_ether_addr((struct ether_addr *)hw->mac.addr))
1275		vf->flags |= I40E_FLAG_VF_MAC_BY_PF;
1276	else
1277		eth_random_addr(hw->mac.addr); /* Generate a random one */
1278
1279	/* If the PF host is not DPDK, set the interval of ITR0 to max*/
1280	if (vf->version_major != I40E_DPDK_VERSION_MAJOR) {
1281		I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01,
1282			       (I40E_ITR_INDEX_DEFAULT <<
1283				I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT) |
1284			       (interval <<
1285				I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT));
1286		I40EVF_WRITE_FLUSH(hw);
1287	}
1288
1289	return 0;
1290
1291err_alloc:
1292	rte_free(vf->vf_res);
1293err_aq:
1294	i40e_shutdown_adminq(hw); /* ignore error */
1295err:
1296	return -1;
1297}
1298
1299static int
1300i40evf_uninit_vf(struct rte_eth_dev *dev)
1301{
1302	struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1303	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1304
1305	PMD_INIT_FUNC_TRACE();
1306
1307	if (hw->adapter_stopped == 0)
1308		i40evf_dev_close(dev);
1309	rte_free(vf->vf_res);
1310	vf->vf_res = NULL;
1311	rte_free(vf->aq_resp);
1312	vf->aq_resp = NULL;
1313
1314	return 0;
1315}
1316
1317static void
1318i40evf_handle_pf_event(__rte_unused struct rte_eth_dev *dev,
1319			   uint8_t *msg,
1320			   __rte_unused uint16_t msglen)
1321{
1322	struct i40e_virtchnl_pf_event *pf_msg =
1323			(struct i40e_virtchnl_pf_event *)msg;
1324	struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1325
1326	switch (pf_msg->event) {
1327	case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
1328		PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_RESET_IMPENDING event");
1329		_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET, NULL);
1330		break;
1331	case I40E_VIRTCHNL_EVENT_LINK_CHANGE:
1332		PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_LINK_CHANGE event");
1333		vf->link_up = pf_msg->event_data.link_event.link_status;
1334		vf->link_speed = pf_msg->event_data.link_event.link_speed;
1335		break;
1336	case I40E_VIRTCHNL_EVENT_PF_DRIVER_CLOSE:
1337		PMD_DRV_LOG(DEBUG, "VIRTCHNL_EVENT_PF_DRIVER_CLOSE event");
1338		break;
1339	default:
1340		PMD_DRV_LOG(ERR, " unknown event received %u", pf_msg->event);
1341		break;
1342	}
1343}
1344
1345static void
1346i40evf_handle_aq_msg(struct rte_eth_dev *dev)
1347{
1348	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1349	struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1350	struct i40e_arq_event_info info;
1351	uint16_t pending, aq_opc;
1352	enum i40e_virtchnl_ops msg_opc;
1353	enum i40e_status_code msg_ret;
1354	int ret;
1355
1356	info.buf_len = I40E_AQ_BUF_SZ;
1357	if (!vf->aq_resp) {
1358		PMD_DRV_LOG(ERR, "Buffer for adminq resp should not be NULL");
1359		return;
1360	}
1361	info.msg_buf = vf->aq_resp;
1362
1363	pending = 1;
1364	while (pending) {
1365		ret = i40e_clean_arq_element(hw, &info, &pending);
1366
1367		if (ret != I40E_SUCCESS) {
1368			PMD_DRV_LOG(INFO, "Failed to read msg from AdminQ,"
1369				    "ret: %d", ret);
1370			break;
1371		}
1372		aq_opc = rte_le_to_cpu_16(info.desc.opcode);
1373		/* For the message sent from pf to vf, opcode is stored in
1374		 * cookie_high of struct i40e_aq_desc, while return error code
1375		 * are stored in cookie_low, Which is done by
1376		 * i40e_aq_send_msg_to_vf in PF driver.*/
1377		msg_opc = (enum i40e_virtchnl_ops)rte_le_to_cpu_32(
1378						  info.desc.cookie_high);
1379		msg_ret = (enum i40e_status_code)rte_le_to_cpu_32(
1380						  info.desc.cookie_low);
1381		switch (aq_opc) {
1382		case i40e_aqc_opc_send_msg_to_vf:
1383			if (msg_opc == I40E_VIRTCHNL_OP_EVENT)
1384				/* process event*/
1385				i40evf_handle_pf_event(dev, info.msg_buf,
1386						       info.msg_len);
1387			else {
1388				/* read message and it's expected one */
1389				if (msg_opc == vf->pend_cmd) {
1390					vf->cmd_retval = msg_ret;
1391					/* prevent compiler reordering */
1392					rte_compiler_barrier();
1393					_clear_cmd(vf);
1394				} else
1395					PMD_DRV_LOG(ERR, "command mismatch,"
1396						"expect %u, get %u",
1397						vf->pend_cmd, msg_opc);
1398				PMD_DRV_LOG(DEBUG, "adminq response is received,"
1399					     " opcode = %d", msg_opc);
1400			}
1401			break;
1402		default:
1403			PMD_DRV_LOG(ERR, "Request %u is not supported yet",
1404				    aq_opc);
1405			break;
1406		}
1407	}
1408}
1409
1410/**
1411 * Interrupt handler triggered by NIC  for handling
1412 * specific interrupt. Only adminq interrupt is processed in VF.
1413 *
1414 * @param handle
1415 *  Pointer to interrupt handle.
1416 * @param param
1417 *  The address of parameter (struct rte_eth_dev *) regsitered before.
1418 *
1419 * @return
1420 *  void
1421 */
1422static void
1423i40evf_dev_interrupt_handler(struct rte_intr_handle *intr_handle,
1424			     void *param)
1425{
1426	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1427	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1428	uint32_t icr0;
1429
1430	i40evf_disable_irq0(hw);
1431
1432	/* read out interrupt causes */
1433	icr0 = I40E_READ_REG(hw, I40E_VFINT_ICR01);
1434
1435	/* No interrupt event indicated */
1436	if (!(icr0 & I40E_VFINT_ICR01_INTEVENT_MASK)) {
1437		PMD_DRV_LOG(DEBUG, "No interrupt event, nothing to do");
1438		goto done;
1439	}
1440
1441	if (icr0 & I40E_VFINT_ICR01_ADMINQ_MASK) {
1442		PMD_DRV_LOG(DEBUG, "ICR01_ADMINQ is reported");
1443		i40evf_handle_aq_msg(dev);
1444	}
1445
1446	/* Link Status Change interrupt */
1447	if (icr0 & I40E_VFINT_ICR01_LINK_STAT_CHANGE_MASK)
1448		PMD_DRV_LOG(DEBUG, "LINK_STAT_CHANGE is reported,"
1449				   " do nothing");
1450
1451done:
1452	i40evf_enable_irq0(hw);
1453	rte_intr_enable(intr_handle);
1454}
1455
1456static int
1457i40evf_dev_init(struct rte_eth_dev *eth_dev)
1458{
1459	struct i40e_hw *hw
1460		= I40E_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
1461	struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(eth_dev);
1462
1463	PMD_INIT_FUNC_TRACE();
1464
1465	/* assign ops func pointer */
1466	eth_dev->dev_ops = &i40evf_eth_dev_ops;
1467	eth_dev->rx_pkt_burst = &i40e_recv_pkts;
1468	eth_dev->tx_pkt_burst = &i40e_xmit_pkts;
1469
1470	/*
1471	 * For secondary processes, we don't initialise any further as primary
1472	 * has already done this work.
1473	 */
1474	if (rte_eal_process_type() != RTE_PROC_PRIMARY){
1475		i40e_set_rx_function(eth_dev);
1476		i40e_set_tx_function(eth_dev);
1477		return 0;
1478	}
1479
1480	rte_eth_copy_pci_info(eth_dev, pci_dev);
1481	eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
1482
1483	hw->vendor_id = pci_dev->id.vendor_id;
1484	hw->device_id = pci_dev->id.device_id;
1485	hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
1486	hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
1487	hw->bus.device = pci_dev->addr.devid;
1488	hw->bus.func = pci_dev->addr.function;
1489	hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
1490	hw->adapter_stopped = 0;
1491
1492	if(i40evf_init_vf(eth_dev) != 0) {
1493		PMD_INIT_LOG(ERR, "Init vf failed");
1494		return -1;
1495	}
1496
1497	/* register callback func to eal lib */
1498	rte_intr_callback_register(&pci_dev->intr_handle,
1499		i40evf_dev_interrupt_handler, (void *)eth_dev);
1500
1501	/* enable uio intr after callback register */
1502	rte_intr_enable(&pci_dev->intr_handle);
1503
1504	/* configure and enable device interrupt */
1505	i40evf_enable_irq0(hw);
1506
1507	/* copy mac addr */
1508	eth_dev->data->mac_addrs = rte_zmalloc("i40evf_mac",
1509					ETHER_ADDR_LEN * I40E_NUM_MACADDR_MAX,
1510					0);
1511	if (eth_dev->data->mac_addrs == NULL) {
1512		PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to"
1513				" store MAC addresses",
1514				ETHER_ADDR_LEN * I40E_NUM_MACADDR_MAX);
1515		return -ENOMEM;
1516	}
1517	ether_addr_copy((struct ether_addr *)hw->mac.addr,
1518			&eth_dev->data->mac_addrs[0]);
1519
1520	return 0;
1521}
1522
1523static int
1524i40evf_dev_uninit(struct rte_eth_dev *eth_dev)
1525{
1526	PMD_INIT_FUNC_TRACE();
1527
1528	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1529		return -EPERM;
1530
1531	eth_dev->dev_ops = NULL;
1532	eth_dev->rx_pkt_burst = NULL;
1533	eth_dev->tx_pkt_burst = NULL;
1534
1535	if (i40evf_uninit_vf(eth_dev) != 0) {
1536		PMD_INIT_LOG(ERR, "i40evf_uninit_vf failed");
1537		return -1;
1538	}
1539
1540	rte_free(eth_dev->data->mac_addrs);
1541	eth_dev->data->mac_addrs = NULL;
1542
1543	return 0;
1544}
1545/*
1546 * virtual function driver struct
1547 */
1548static struct eth_driver rte_i40evf_pmd = {
1549	.pci_drv = {
1550		.id_table = pci_id_i40evf_map,
1551		.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1552		.probe = rte_eth_dev_pci_probe,
1553		.remove = rte_eth_dev_pci_remove,
1554	},
1555	.eth_dev_init = i40evf_dev_init,
1556	.eth_dev_uninit = i40evf_dev_uninit,
1557	.dev_private_size = sizeof(struct i40e_adapter),
1558};
1559
1560RTE_PMD_REGISTER_PCI(net_i40e_vf, rte_i40evf_pmd.pci_drv);
1561RTE_PMD_REGISTER_PCI_TABLE(net_i40e_vf, pci_id_i40evf_map);
1562RTE_PMD_REGISTER_KMOD_DEP(net_i40e_vf, "* igb_uio | vfio");
1563
1564static int
1565i40evf_dev_configure(struct rte_eth_dev *dev)
1566{
1567	struct i40e_adapter *ad =
1568		I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
1569	struct rte_eth_conf *conf = &dev->data->dev_conf;
1570	struct i40e_vf *vf;
1571
1572	/* Initialize to TRUE. If any of Rx queues doesn't meet the bulk
1573	 * allocation or vector Rx preconditions we will reset it.
1574	 */
1575	ad->rx_bulk_alloc_allowed = true;
1576	ad->rx_vec_allowed = true;
1577	ad->tx_simple_allowed = true;
1578	ad->tx_vec_allowed = true;
1579
1580	/* For non-DPDK PF drivers, VF has no ability to disable HW
1581	 * CRC strip, and is implicitly enabled by the PF.
1582	 */
1583	if (!conf->rxmode.hw_strip_crc) {
1584		vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1585		if ((vf->version_major == I40E_VIRTCHNL_VERSION_MAJOR) &&
1586		    (vf->version_minor <= I40E_VIRTCHNL_VERSION_MINOR)) {
1587			/* Peer is running non-DPDK PF driver. */
1588			PMD_INIT_LOG(ERR, "VF can't disable HW CRC Strip");
1589			return -EINVAL;
1590		}
1591	}
1592
1593	return i40evf_init_vlan(dev);
1594}
1595
1596static int
1597i40evf_init_vlan(struct rte_eth_dev *dev)
1598{
1599	struct rte_eth_dev_data *data = dev->data;
1600	int ret;
1601
1602	/* Apply vlan offload setting */
1603	i40evf_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK);
1604
1605	/* Apply pvid setting */
1606	ret = i40evf_vlan_pvid_set(dev, data->dev_conf.txmode.pvid,
1607				data->dev_conf.txmode.hw_vlan_insert_pvid);
1608	return ret;
1609}
1610
1611static void
1612i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1613{
1614	bool enable_vlan_strip = 0;
1615	struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1616	struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1617
1618	/* Linux pf host doesn't support vlan offload yet */
1619	if (vf->version_major == I40E_DPDK_VERSION_MAJOR) {
1620		/* Vlan stripping setting */
1621		if (mask & ETH_VLAN_STRIP_MASK) {
1622			/* Enable or disable VLAN stripping */
1623			if (dev_conf->rxmode.hw_vlan_strip)
1624				enable_vlan_strip = 1;
1625			else
1626				enable_vlan_strip = 0;
1627
1628			i40evf_config_vlan_offload(dev, enable_vlan_strip);
1629		}
1630	}
1631}
1632
1633static int
1634i40evf_vlan_pvid_set(struct rte_eth_dev *dev, uint16_t pvid, int on)
1635{
1636	struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
1637	struct i40e_vsi_vlan_pvid_info info;
1638	struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1639
1640	memset(&info, 0, sizeof(info));
1641	info.on = on;
1642
1643	/* Linux pf host don't support vlan offload yet */
1644	if (vf->version_major == I40E_DPDK_VERSION_MAJOR) {
1645		if (info.on)
1646			info.config.pvid = pvid;
1647		else {
1648			info.config.reject.tagged =
1649				dev_conf->txmode.hw_vlan_reject_tagged;
1650			info.config.reject.untagged =
1651				dev_conf->txmode.hw_vlan_reject_untagged;
1652		}
1653		return i40evf_config_vlan_pvid(dev, &info);
1654	}
1655
1656	return 0;
1657}
1658
1659static int
1660i40evf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1661{
1662	struct i40e_rx_queue *rxq;
1663	int err = 0;
1664	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1665
1666	PMD_INIT_FUNC_TRACE();
1667
1668	if (rx_queue_id < dev->data->nb_rx_queues) {
1669		rxq = dev->data->rx_queues[rx_queue_id];
1670
1671		err = i40e_alloc_rx_queue_mbufs(rxq);
1672		if (err) {
1673			PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf");
1674			return err;
1675		}
1676
1677		rte_wmb();
1678
1679		/* Init the RX tail register. */
1680		I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
1681		I40EVF_WRITE_FLUSH(hw);
1682
1683		/* Ready to switch the queue on */
1684		err = i40evf_switch_queue(dev, TRUE, rx_queue_id, TRUE);
1685
1686		if (err)
1687			PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on",
1688				    rx_queue_id);
1689		else
1690			dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
1691	}
1692
1693	return err;
1694}
1695
1696static int
1697i40evf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1698{
1699	struct i40e_rx_queue *rxq;
1700	int err;
1701
1702	if (rx_queue_id < dev->data->nb_rx_queues) {
1703		rxq = dev->data->rx_queues[rx_queue_id];
1704
1705		err = i40evf_switch_queue(dev, TRUE, rx_queue_id, FALSE);
1706
1707		if (err) {
1708			PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
1709				    rx_queue_id);
1710			return err;
1711		}
1712
1713		i40e_rx_queue_release_mbufs(rxq);
1714		i40e_reset_rx_queue(rxq);
1715		dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
1716	}
1717
1718	return 0;
1719}
1720
1721static int
1722i40evf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
1723{
1724	int err = 0;
1725
1726	PMD_INIT_FUNC_TRACE();
1727
1728	if (tx_queue_id < dev->data->nb_tx_queues) {
1729
1730		/* Ready to switch the queue on */
1731		err = i40evf_switch_queue(dev, FALSE, tx_queue_id, TRUE);
1732
1733		if (err)
1734			PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on",
1735				    tx_queue_id);
1736		else
1737			dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
1738	}
1739
1740	return err;
1741}
1742
1743static int
1744i40evf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
1745{
1746	struct i40e_tx_queue *txq;
1747	int err;
1748
1749	if (tx_queue_id < dev->data->nb_tx_queues) {
1750		txq = dev->data->tx_queues[tx_queue_id];
1751
1752		err = i40evf_switch_queue(dev, FALSE, tx_queue_id, FALSE);
1753
1754		if (err) {
1755			PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
1756				    tx_queue_id);
1757			return err;
1758		}
1759
1760		i40e_tx_queue_release_mbufs(txq);
1761		i40e_reset_tx_queue(txq);
1762		dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
1763	}
1764
1765	return 0;
1766}
1767
1768static int
1769i40evf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1770{
1771	int ret;
1772
1773	if (on)
1774		ret = i40evf_add_vlan(dev, vlan_id);
1775	else
1776		ret = i40evf_del_vlan(dev,vlan_id);
1777
1778	return ret;
1779}
1780
1781static int
1782i40evf_rxq_init(struct rte_eth_dev *dev, struct i40e_rx_queue *rxq)
1783{
1784	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1785	struct rte_eth_dev_data *dev_data = dev->data;
1786	struct rte_pktmbuf_pool_private *mbp_priv;
1787	uint16_t buf_size, len;
1788
1789	rxq->qrx_tail = hw->hw_addr + I40E_QRX_TAIL1(rxq->queue_id);
1790	I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
1791	I40EVF_WRITE_FLUSH(hw);
1792
1793	/* Calculate the maximum packet length allowed */
1794	mbp_priv = rte_mempool_get_priv(rxq->mp);
1795	buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -
1796					RTE_PKTMBUF_HEADROOM);
1797	rxq->hs_mode = i40e_header_split_none;
1798	rxq->rx_hdr_len = 0;
1799	rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << I40E_RXQ_CTX_DBUFF_SHIFT));
1800	len = rxq->rx_buf_len * I40E_MAX_CHAINED_RX_BUFFERS;
1801	rxq->max_pkt_len = RTE_MIN(len,
1802		dev_data->dev_conf.rxmode.max_rx_pkt_len);
1803
1804	/**
1805	 * Check if the jumbo frame and maximum packet length are set correctly
1806	 */
1807	if (dev_data->dev_conf.rxmode.jumbo_frame == 1) {
1808		if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
1809		    rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
1810			PMD_DRV_LOG(ERR, "maximum packet length must be "
1811				"larger than %u and smaller than %u, as jumbo "
1812				"frame is enabled", (uint32_t)ETHER_MAX_LEN,
1813					(uint32_t)I40E_FRAME_SIZE_MAX);
1814			return I40E_ERR_CONFIG;
1815		}
1816	} else {
1817		if (rxq->max_pkt_len < ETHER_MIN_LEN ||
1818		    rxq->max_pkt_len > ETHER_MAX_LEN) {
1819			PMD_DRV_LOG(ERR, "maximum packet length must be "
1820				"larger than %u and smaller than %u, as jumbo "
1821				"frame is disabled", (uint32_t)ETHER_MIN_LEN,
1822						(uint32_t)ETHER_MAX_LEN);
1823			return I40E_ERR_CONFIG;
1824		}
1825	}
1826
1827	if (dev_data->dev_conf.rxmode.enable_scatter ||
1828	    (rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size) {
1829		dev_data->scattered_rx = 1;
1830	}
1831
1832	return 0;
1833}
1834
1835static int
1836i40evf_rx_init(struct rte_eth_dev *dev)
1837{
1838	struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1839	uint16_t i;
1840	int ret = I40E_SUCCESS;
1841	struct i40e_rx_queue **rxq =
1842		(struct i40e_rx_queue **)dev->data->rx_queues;
1843
1844	i40evf_config_rss(vf);
1845	for (i = 0; i < dev->data->nb_rx_queues; i++) {
1846		if (!rxq[i] || !rxq[i]->q_set)
1847			continue;
1848		ret = i40evf_rxq_init(dev, rxq[i]);
1849		if (ret != I40E_SUCCESS)
1850			break;
1851	}
1852	if (ret == I40E_SUCCESS)
1853		i40e_set_rx_function(dev);
1854
1855	return ret;
1856}
1857
1858static void
1859i40evf_tx_init(struct rte_eth_dev *dev)
1860{
1861	uint16_t i;
1862	struct i40e_tx_queue **txq =
1863		(struct i40e_tx_queue **)dev->data->tx_queues;
1864	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1865
1866	for (i = 0; i < dev->data->nb_tx_queues; i++)
1867		txq[i]->qtx_tail = hw->hw_addr + I40E_QTX_TAIL1(i);
1868
1869	i40e_set_tx_function(dev);
1870}
1871
1872static inline void
1873i40evf_enable_queues_intr(struct rte_eth_dev *dev)
1874{
1875	struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1876	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1877	struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
1878	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1879
1880	if (!rte_intr_allow_others(intr_handle)) {
1881		I40E_WRITE_REG(hw,
1882			       I40E_VFINT_DYN_CTL01,
1883			       I40E_VFINT_DYN_CTL01_INTENA_MASK |
1884			       I40E_VFINT_DYN_CTL01_CLEARPBA_MASK |
1885			       I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
1886		I40EVF_WRITE_FLUSH(hw);
1887		return;
1888	}
1889
1890	if (vf->version_major == I40E_DPDK_VERSION_MAJOR)
1891		/* To support DPDK PF host */
1892		I40E_WRITE_REG(hw,
1893			I40E_VFINT_DYN_CTLN1(I40EVF_VSI_DEFAULT_MSIX_INTR - 1),
1894			I40E_VFINT_DYN_CTLN1_INTENA_MASK |
1895			I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
1896	/* If host driver is kernel driver, do nothing.
1897	 * Interrupt 0 is used for rx packets, but don't set
1898	 * I40E_VFINT_DYN_CTL01,
1899	 * because it is already done in i40evf_enable_irq0.
1900	 */
1901
1902	I40EVF_WRITE_FLUSH(hw);
1903}
1904
1905static inline void
1906i40evf_disable_queues_intr(struct rte_eth_dev *dev)
1907{
1908	struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1909	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1910	struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
1911	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1912
1913	if (!rte_intr_allow_others(intr_handle)) {
1914		I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01,
1915			       I40E_VFINT_DYN_CTL01_ITR_INDX_MASK);
1916		I40EVF_WRITE_FLUSH(hw);
1917		return;
1918	}
1919
1920	if (vf->version_major == I40E_DPDK_VERSION_MAJOR)
1921		I40E_WRITE_REG(hw,
1922			       I40E_VFINT_DYN_CTLN1(I40EVF_VSI_DEFAULT_MSIX_INTR
1923						    - 1),
1924			       0);
1925	/* If host driver is kernel driver, do nothing.
1926	 * Interrupt 0 is used for rx packets, but don't zero
1927	 * I40E_VFINT_DYN_CTL01,
1928	 * because interrupt 0 is also used for adminq processing.
1929	 */
1930
1931	I40EVF_WRITE_FLUSH(hw);
1932}
1933
1934static int
1935i40evf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
1936{
1937	struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
1938	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1939	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1940	uint16_t interval =
1941		i40e_calc_itr_interval(RTE_LIBRTE_I40E_ITR_INTERVAL);
1942	uint16_t msix_intr;
1943
1944	msix_intr = intr_handle->intr_vec[queue_id];
1945	if (msix_intr == I40E_MISC_VEC_ID)
1946		I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01,
1947			       I40E_VFINT_DYN_CTL01_INTENA_MASK |
1948			       I40E_VFINT_DYN_CTL01_CLEARPBA_MASK |
1949			       (0 << I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT) |
1950			       (interval <<
1951				I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT));
1952	else
1953		I40E_WRITE_REG(hw,
1954			       I40E_VFINT_DYN_CTLN1(msix_intr -
1955						    I40E_RX_VEC_START),
1956			       I40E_VFINT_DYN_CTLN1_INTENA_MASK |
1957			       I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
1958			       (0 << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
1959			       (interval <<
1960				I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT));
1961
1962	I40EVF_WRITE_FLUSH(hw);
1963
1964	rte_intr_enable(&pci_dev->intr_handle);
1965
1966	return 0;
1967}
1968
1969static int
1970i40evf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
1971{
1972	struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
1973	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1974	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1975	uint16_t msix_intr;
1976
1977	msix_intr = intr_handle->intr_vec[queue_id];
1978	if (msix_intr == I40E_MISC_VEC_ID)
1979		I40E_WRITE_REG(hw, I40E_VFINT_DYN_CTL01, 0);
1980	else
1981		I40E_WRITE_REG(hw,
1982			       I40E_VFINT_DYN_CTLN1(msix_intr -
1983						    I40E_RX_VEC_START),
1984			       0);
1985
1986	I40EVF_WRITE_FLUSH(hw);
1987
1988	return 0;
1989}
1990
1991static void
1992i40evf_add_del_all_mac_addr(struct rte_eth_dev *dev, bool add)
1993{
1994	struct i40e_virtchnl_ether_addr_list *list;
1995	struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
1996	int err, i, j;
1997	int next_begin = 0;
1998	int begin = 0;
1999	uint32_t len;
2000	struct ether_addr *addr;
2001	struct vf_cmd_info args;
2002
2003	do {
2004		j = 0;
2005		len = sizeof(struct i40e_virtchnl_ether_addr_list);
2006		for (i = begin; i < I40E_NUM_MACADDR_MAX; i++, next_begin++) {
2007			if (is_zero_ether_addr(&dev->data->mac_addrs[i]))
2008				continue;
2009			len += sizeof(struct i40e_virtchnl_ether_addr);
2010			if (len >= I40E_AQ_BUF_SZ) {
2011				next_begin = i + 1;
2012				break;
2013			}
2014		}
2015
2016		list = rte_zmalloc("i40evf_del_mac_buffer", len, 0);
2017
2018		for (i = begin; i < next_begin; i++) {
2019			addr = &dev->data->mac_addrs[i];
2020			if (is_zero_ether_addr(addr))
2021				continue;
2022			(void)rte_memcpy(list->list[j].addr, addr->addr_bytes,
2023					 sizeof(addr->addr_bytes));
2024			PMD_DRV_LOG(DEBUG, "add/rm mac:%x:%x:%x:%x:%x:%x",
2025				    addr->addr_bytes[0], addr->addr_bytes[1],
2026				    addr->addr_bytes[2], addr->addr_bytes[3],
2027				    addr->addr_bytes[4], addr->addr_bytes[5]);
2028			j++;
2029		}
2030		list->vsi_id = vf->vsi_res->vsi_id;
2031		list->num_elements = j;
2032		args.ops = add ? I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS :
2033			   I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS;
2034		args.in_args = (uint8_t *)list;
2035		args.in_args_size = len;
2036		args.out_buffer = vf->aq_resp;
2037		args.out_size = I40E_AQ_BUF_SZ;
2038		err = i40evf_execute_vf_cmd(dev, &args);
2039		if (err)
2040			PMD_DRV_LOG(ERR, "fail to execute command %s",
2041				    add ? "OP_ADD_ETHER_ADDRESS" :
2042				    "OP_DEL_ETHER_ADDRESS");
2043		rte_free(list);
2044		begin = next_begin;
2045	} while (begin < I40E_NUM_MACADDR_MAX);
2046}
2047
2048static int
2049i40evf_dev_start(struct rte_eth_dev *dev)
2050{
2051	struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2052	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2053	struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
2054	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2055	uint32_t intr_vector = 0;
2056
2057	PMD_INIT_FUNC_TRACE();
2058
2059	hw->adapter_stopped = 0;
2060
2061	vf->max_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
2062	vf->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
2063					dev->data->nb_tx_queues);
2064
2065	/* check and configure queue intr-vector mapping */
2066	if (dev->data->dev_conf.intr_conf.rxq != 0) {
2067		intr_vector = dev->data->nb_rx_queues;
2068		if (rte_intr_efd_enable(intr_handle, intr_vector))
2069			return -1;
2070	}
2071
2072	if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
2073		intr_handle->intr_vec =
2074			rte_zmalloc("intr_vec",
2075				    dev->data->nb_rx_queues * sizeof(int), 0);
2076		if (!intr_handle->intr_vec) {
2077			PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
2078				     " intr_vec", dev->data->nb_rx_queues);
2079			return -ENOMEM;
2080		}
2081	}
2082
2083	if (i40evf_rx_init(dev) != 0){
2084		PMD_DRV_LOG(ERR, "failed to do RX init");
2085		return -1;
2086	}
2087
2088	i40evf_tx_init(dev);
2089
2090	if (i40evf_configure_queues(dev) != 0) {
2091		PMD_DRV_LOG(ERR, "configure queues failed");
2092		goto err_queue;
2093	}
2094	if (i40evf_config_irq_map(dev)) {
2095		PMD_DRV_LOG(ERR, "config_irq_map failed");
2096		goto err_queue;
2097	}
2098
2099	/* Set all mac addrs */
2100	i40evf_add_del_all_mac_addr(dev, TRUE);
2101
2102	if (i40evf_start_queues(dev) != 0) {
2103		PMD_DRV_LOG(ERR, "enable queues failed");
2104		goto err_mac;
2105	}
2106
2107	i40evf_enable_queues_intr(dev);
2108	return 0;
2109
2110err_mac:
2111	i40evf_add_del_all_mac_addr(dev, FALSE);
2112err_queue:
2113	return -1;
2114}
2115
2116static void
2117i40evf_dev_stop(struct rte_eth_dev *dev)
2118{
2119	struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
2120	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2121
2122	PMD_INIT_FUNC_TRACE();
2123
2124	i40evf_stop_queues(dev);
2125	i40evf_disable_queues_intr(dev);
2126	i40e_dev_clear_queues(dev);
2127
2128	/* Clean datapath event and queue/vec mapping */
2129	rte_intr_efd_disable(intr_handle);
2130	if (intr_handle->intr_vec) {
2131		rte_free(intr_handle->intr_vec);
2132		intr_handle->intr_vec = NULL;
2133	}
2134	/* remove all mac addrs */
2135	i40evf_add_del_all_mac_addr(dev, FALSE);
2136
2137}
2138
2139static int
2140i40evf_dev_link_update(struct rte_eth_dev *dev,
2141		       __rte_unused int wait_to_complete)
2142{
2143	struct rte_eth_link new_link;
2144	struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2145	/*
2146	 * DPDK pf host provide interfacet to acquire link status
2147	 * while Linux driver does not
2148	 */
2149
2150	/* Linux driver PF host */
2151	switch (vf->link_speed) {
2152	case I40E_LINK_SPEED_100MB:
2153		new_link.link_speed = ETH_SPEED_NUM_100M;
2154		break;
2155	case I40E_LINK_SPEED_1GB:
2156		new_link.link_speed = ETH_SPEED_NUM_1G;
2157		break;
2158	case I40E_LINK_SPEED_10GB:
2159		new_link.link_speed = ETH_SPEED_NUM_10G;
2160		break;
2161	case I40E_LINK_SPEED_20GB:
2162		new_link.link_speed = ETH_SPEED_NUM_20G;
2163		break;
2164	case I40E_LINK_SPEED_40GB:
2165		new_link.link_speed = ETH_SPEED_NUM_40G;
2166		break;
2167	default:
2168		new_link.link_speed = ETH_SPEED_NUM_100M;
2169		break;
2170	}
2171	/* full duplex only */
2172	new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
2173	new_link.link_status = vf->link_up ? ETH_LINK_UP :
2174					     ETH_LINK_DOWN;
2175
2176	i40evf_dev_atomic_write_link_status(dev, &new_link);
2177
2178	return 0;
2179}
2180
2181static void
2182i40evf_dev_promiscuous_enable(struct rte_eth_dev *dev)
2183{
2184	struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2185	int ret;
2186
2187	/* If enabled, just return */
2188	if (vf->promisc_unicast_enabled)
2189		return;
2190
2191	ret = i40evf_config_promisc(dev, 1, vf->promisc_multicast_enabled);
2192	if (ret == 0)
2193		vf->promisc_unicast_enabled = TRUE;
2194}
2195
2196static void
2197i40evf_dev_promiscuous_disable(struct rte_eth_dev *dev)
2198{
2199	struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2200	int ret;
2201
2202	/* If disabled, just return */
2203	if (!vf->promisc_unicast_enabled)
2204		return;
2205
2206	ret = i40evf_config_promisc(dev, 0, vf->promisc_multicast_enabled);
2207	if (ret == 0)
2208		vf->promisc_unicast_enabled = FALSE;
2209}
2210
2211static void
2212i40evf_dev_allmulticast_enable(struct rte_eth_dev *dev)
2213{
2214	struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2215	int ret;
2216
2217	/* If enabled, just return */
2218	if (vf->promisc_multicast_enabled)
2219		return;
2220
2221	ret = i40evf_config_promisc(dev, vf->promisc_unicast_enabled, 1);
2222	if (ret == 0)
2223		vf->promisc_multicast_enabled = TRUE;
2224}
2225
2226static void
2227i40evf_dev_allmulticast_disable(struct rte_eth_dev *dev)
2228{
2229	struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2230	int ret;
2231
2232	/* If enabled, just return */
2233	if (!vf->promisc_multicast_enabled)
2234		return;
2235
2236	ret = i40evf_config_promisc(dev, vf->promisc_unicast_enabled, 0);
2237	if (ret == 0)
2238		vf->promisc_multicast_enabled = FALSE;
2239}
2240
2241static void
2242i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
2243{
2244	struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2245
2246	memset(dev_info, 0, sizeof(*dev_info));
2247	dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
2248	dev_info->max_rx_queues = vf->vsi_res->num_queue_pairs;
2249	dev_info->max_tx_queues = vf->vsi_res->num_queue_pairs;
2250	dev_info->min_rx_bufsize = I40E_BUF_SIZE_MIN;
2251	dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
2252	dev_info->hash_key_size = (I40E_VFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
2253	dev_info->reta_size = ETH_RSS_RETA_SIZE_64;
2254	dev_info->flow_type_rss_offloads = I40E_RSS_OFFLOAD_ALL;
2255	dev_info->max_mac_addrs = I40E_NUM_MACADDR_MAX;
2256	dev_info->rx_offload_capa =
2257		DEV_RX_OFFLOAD_VLAN_STRIP |
2258		DEV_RX_OFFLOAD_QINQ_STRIP |
2259		DEV_RX_OFFLOAD_IPV4_CKSUM |
2260		DEV_RX_OFFLOAD_UDP_CKSUM |
2261		DEV_RX_OFFLOAD_TCP_CKSUM;
2262	dev_info->tx_offload_capa =
2263		DEV_TX_OFFLOAD_VLAN_INSERT |
2264		DEV_TX_OFFLOAD_QINQ_INSERT |
2265		DEV_TX_OFFLOAD_IPV4_CKSUM |
2266		DEV_TX_OFFLOAD_UDP_CKSUM |
2267		DEV_TX_OFFLOAD_TCP_CKSUM |
2268		DEV_TX_OFFLOAD_SCTP_CKSUM;
2269
2270	dev_info->default_rxconf = (struct rte_eth_rxconf) {
2271		.rx_thresh = {
2272			.pthresh = I40E_DEFAULT_RX_PTHRESH,
2273			.hthresh = I40E_DEFAULT_RX_HTHRESH,
2274			.wthresh = I40E_DEFAULT_RX_WTHRESH,
2275		},
2276		.rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
2277		.rx_drop_en = 0,
2278	};
2279
2280	dev_info->default_txconf = (struct rte_eth_txconf) {
2281		.tx_thresh = {
2282			.pthresh = I40E_DEFAULT_TX_PTHRESH,
2283			.hthresh = I40E_DEFAULT_TX_HTHRESH,
2284			.wthresh = I40E_DEFAULT_TX_WTHRESH,
2285		},
2286		.tx_free_thresh = I40E_DEFAULT_TX_FREE_THRESH,
2287		.tx_rs_thresh = I40E_DEFAULT_TX_RSBIT_THRESH,
2288		.txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
2289				ETH_TXQ_FLAGS_NOOFFLOADS,
2290	};
2291
2292	dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
2293		.nb_max = I40E_MAX_RING_DESC,
2294		.nb_min = I40E_MIN_RING_DESC,
2295		.nb_align = I40E_ALIGN_RING_DESC,
2296	};
2297
2298	dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
2299		.nb_max = I40E_MAX_RING_DESC,
2300		.nb_min = I40E_MIN_RING_DESC,
2301		.nb_align = I40E_ALIGN_RING_DESC,
2302	};
2303}
2304
2305static void
2306i40evf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
2307{
2308	if (i40evf_get_statistics(dev, stats))
2309		PMD_DRV_LOG(ERR, "Get statistics failed");
2310}
2311
2312static void
2313i40evf_dev_close(struct rte_eth_dev *dev)
2314{
2315	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2316	struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev);
2317	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2318
2319	i40evf_dev_stop(dev);
2320	hw->adapter_stopped = 1;
2321	i40e_dev_free_queues(dev);
2322	i40evf_reset_vf(hw);
2323	i40e_shutdown_adminq(hw);
2324	/* disable uio intr before callback unregister */
2325	rte_intr_disable(intr_handle);
2326
2327	/* unregister callback func from eal lib */
2328	rte_intr_callback_unregister(intr_handle,
2329				     i40evf_dev_interrupt_handler, dev);
2330	i40evf_disable_irq0(hw);
2331}
2332
2333static int
2334i40evf_get_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
2335{
2336	struct i40e_vf *vf = I40E_VSI_TO_VF(vsi);
2337	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2338	int ret;
2339
2340	if (!lut)
2341		return -EINVAL;
2342
2343	if (vf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
2344		ret = i40e_aq_get_rss_lut(hw, vsi->vsi_id, FALSE,
2345					  lut, lut_size);
2346		if (ret) {
2347			PMD_DRV_LOG(ERR, "Failed to get RSS lookup table");
2348			return ret;
2349		}
2350	} else {
2351		uint32_t *lut_dw = (uint32_t *)lut;
2352		uint16_t i, lut_size_dw = lut_size / 4;
2353
2354		for (i = 0; i < lut_size_dw; i++)
2355			lut_dw[i] = I40E_READ_REG(hw, I40E_VFQF_HLUT(i));
2356	}
2357
2358	return 0;
2359}
2360
2361static int
2362i40evf_set_rss_lut(struct i40e_vsi *vsi, uint8_t *lut, uint16_t lut_size)
2363{
2364	struct i40e_vf *vf;
2365	struct i40e_hw *hw;
2366	int ret;
2367
2368	if (!vsi || !lut)
2369		return -EINVAL;
2370
2371	vf = I40E_VSI_TO_VF(vsi);
2372	hw = I40E_VSI_TO_HW(vsi);
2373
2374	if (vf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
2375		ret = i40e_aq_set_rss_lut(hw, vsi->vsi_id, FALSE,
2376					  lut, lut_size);
2377		if (ret) {
2378			PMD_DRV_LOG(ERR, "Failed to set RSS lookup table");
2379			return ret;
2380		}
2381	} else {
2382		uint32_t *lut_dw = (uint32_t *)lut;
2383		uint16_t i, lut_size_dw = lut_size / 4;
2384
2385		for (i = 0; i < lut_size_dw; i++)
2386			I40E_WRITE_REG(hw, I40E_VFQF_HLUT(i), lut_dw[i]);
2387		I40EVF_WRITE_FLUSH(hw);
2388	}
2389
2390	return 0;
2391}
2392
2393static int
2394i40evf_dev_rss_reta_update(struct rte_eth_dev *dev,
2395			   struct rte_eth_rss_reta_entry64 *reta_conf,
2396			   uint16_t reta_size)
2397{
2398	struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2399	uint8_t *lut;
2400	uint16_t i, idx, shift;
2401	int ret;
2402
2403	if (reta_size != ETH_RSS_RETA_SIZE_64) {
2404		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2405			"(%d) doesn't match the number of hardware can "
2406			"support (%d)", reta_size, ETH_RSS_RETA_SIZE_64);
2407		return -EINVAL;
2408	}
2409
2410	lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
2411	if (!lut) {
2412		PMD_DRV_LOG(ERR, "No memory can be allocated");
2413		return -ENOMEM;
2414	}
2415	ret = i40evf_get_rss_lut(&vf->vsi, lut, reta_size);
2416	if (ret)
2417		goto out;
2418	for (i = 0; i < reta_size; i++) {
2419		idx = i / RTE_RETA_GROUP_SIZE;
2420		shift = i % RTE_RETA_GROUP_SIZE;
2421		if (reta_conf[idx].mask & (1ULL << shift))
2422			lut[i] = reta_conf[idx].reta[shift];
2423	}
2424	ret = i40evf_set_rss_lut(&vf->vsi, lut, reta_size);
2425
2426out:
2427	rte_free(lut);
2428
2429	return ret;
2430}
2431
2432static int
2433i40evf_dev_rss_reta_query(struct rte_eth_dev *dev,
2434			  struct rte_eth_rss_reta_entry64 *reta_conf,
2435			  uint16_t reta_size)
2436{
2437	struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2438	uint16_t i, idx, shift;
2439	uint8_t *lut;
2440	int ret;
2441
2442	if (reta_size != ETH_RSS_RETA_SIZE_64) {
2443		PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2444			"(%d) doesn't match the number of hardware can "
2445			"support (%d)", reta_size, ETH_RSS_RETA_SIZE_64);
2446		return -EINVAL;
2447	}
2448
2449	lut = rte_zmalloc("i40e_rss_lut", reta_size, 0);
2450	if (!lut) {
2451		PMD_DRV_LOG(ERR, "No memory can be allocated");
2452		return -ENOMEM;
2453	}
2454
2455	ret = i40evf_get_rss_lut(&vf->vsi, lut, reta_size);
2456	if (ret)
2457		goto out;
2458	for (i = 0; i < reta_size; i++) {
2459		idx = i / RTE_RETA_GROUP_SIZE;
2460		shift = i % RTE_RETA_GROUP_SIZE;
2461		if (reta_conf[idx].mask & (1ULL << shift))
2462			reta_conf[idx].reta[shift] = lut[i];
2463	}
2464
2465out:
2466	rte_free(lut);
2467
2468	return ret;
2469}
2470
2471static int
2472i40evf_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len)
2473{
2474	struct i40e_vf *vf = I40E_VSI_TO_VF(vsi);
2475	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2476	int ret = 0;
2477
2478	if (!key || key_len == 0) {
2479		PMD_DRV_LOG(DEBUG, "No key to be configured");
2480		return 0;
2481	} else if (key_len != (I40E_VFQF_HKEY_MAX_INDEX + 1) *
2482		sizeof(uint32_t)) {
2483		PMD_DRV_LOG(ERR, "Invalid key length %u", key_len);
2484		return -EINVAL;
2485	}
2486
2487	if (vf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
2488		struct i40e_aqc_get_set_rss_key_data *key_dw =
2489			(struct i40e_aqc_get_set_rss_key_data *)key;
2490
2491		ret = i40e_aq_set_rss_key(hw, vsi->vsi_id, key_dw);
2492		if (ret)
2493			PMD_INIT_LOG(ERR, "Failed to configure RSS key "
2494				     "via AQ");
2495	} else {
2496		uint32_t *hash_key = (uint32_t *)key;
2497		uint16_t i;
2498
2499		for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
2500			i40e_write_rx_ctl(hw, I40E_VFQF_HKEY(i), hash_key[i]);
2501		I40EVF_WRITE_FLUSH(hw);
2502	}
2503
2504	return ret;
2505}
2506
2507static int
2508i40evf_get_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t *key_len)
2509{
2510	struct i40e_vf *vf = I40E_VSI_TO_VF(vsi);
2511	struct i40e_hw *hw = I40E_VSI_TO_HW(vsi);
2512	int ret;
2513
2514	if (!key || !key_len)
2515		return -EINVAL;
2516
2517	if (vf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
2518		ret = i40e_aq_get_rss_key(hw, vsi->vsi_id,
2519			(struct i40e_aqc_get_set_rss_key_data *)key);
2520		if (ret) {
2521			PMD_INIT_LOG(ERR, "Failed to get RSS key via AQ");
2522			return ret;
2523		}
2524	} else {
2525		uint32_t *key_dw = (uint32_t *)key;
2526		uint16_t i;
2527
2528		for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
2529			key_dw[i] = i40e_read_rx_ctl(hw, I40E_VFQF_HKEY(i));
2530	}
2531	*key_len = (I40E_VFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t);
2532
2533	return 0;
2534}
2535
2536static int
2537i40evf_hw_rss_hash_set(struct i40e_vf *vf, struct rte_eth_rss_conf *rss_conf)
2538{
2539	struct i40e_hw *hw = I40E_VF_TO_HW(vf);
2540	uint64_t rss_hf, hena;
2541	int ret;
2542
2543	ret = i40evf_set_rss_key(&vf->vsi, rss_conf->rss_key,
2544				 rss_conf->rss_key_len);
2545	if (ret)
2546		return ret;
2547
2548	rss_hf = rss_conf->rss_hf;
2549	hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(0));
2550	hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(1))) << 32;
2551	if (hw->mac.type == I40E_MAC_X722)
2552		hena &= ~I40E_RSS_HENA_ALL_X722;
2553	else
2554		hena &= ~I40E_RSS_HENA_ALL;
2555	hena |= i40e_config_hena(rss_hf, hw->mac.type);
2556	i40e_write_rx_ctl(hw, I40E_VFQF_HENA(0), (uint32_t)hena);
2557	i40e_write_rx_ctl(hw, I40E_VFQF_HENA(1), (uint32_t)(hena >> 32));
2558	I40EVF_WRITE_FLUSH(hw);
2559
2560	return 0;
2561}
2562
2563static void
2564i40evf_disable_rss(struct i40e_vf *vf)
2565{
2566	struct i40e_hw *hw = I40E_VF_TO_HW(vf);
2567	uint64_t hena;
2568
2569	hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(0));
2570	hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(1))) << 32;
2571	if (hw->mac.type == I40E_MAC_X722)
2572		hena &= ~I40E_RSS_HENA_ALL_X722;
2573	else
2574		hena &= ~I40E_RSS_HENA_ALL;
2575	i40e_write_rx_ctl(hw, I40E_VFQF_HENA(0), (uint32_t)hena);
2576	i40e_write_rx_ctl(hw, I40E_VFQF_HENA(1), (uint32_t)(hena >> 32));
2577	I40EVF_WRITE_FLUSH(hw);
2578}
2579
2580static int
2581i40evf_config_rss(struct i40e_vf *vf)
2582{
2583	struct i40e_hw *hw = I40E_VF_TO_HW(vf);
2584	struct rte_eth_rss_conf rss_conf;
2585	uint32_t i, j, lut = 0, nb_q = (I40E_VFQF_HLUT_MAX_INDEX + 1) * 4;
2586	uint16_t num;
2587
2588	if (vf->dev_data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
2589		i40evf_disable_rss(vf);
2590		PMD_DRV_LOG(DEBUG, "RSS not configured");
2591		return 0;
2592	}
2593
2594	num = RTE_MIN(vf->dev_data->nb_rx_queues, I40E_MAX_QP_NUM_PER_VF);
2595	/* Fill out the look up table */
2596	for (i = 0, j = 0; i < nb_q; i++, j++) {
2597		if (j >= num)
2598			j = 0;
2599		lut = (lut << 8) | j;
2600		if ((i & 3) == 3)
2601			I40E_WRITE_REG(hw, I40E_VFQF_HLUT(i >> 2), lut);
2602	}
2603
2604	rss_conf = vf->dev_data->dev_conf.rx_adv_conf.rss_conf;
2605	if ((rss_conf.rss_hf & I40E_RSS_OFFLOAD_ALL) == 0) {
2606		i40evf_disable_rss(vf);
2607		PMD_DRV_LOG(DEBUG, "No hash flag is set");
2608		return 0;
2609	}
2610
2611	if (rss_conf.rss_key == NULL || rss_conf.rss_key_len <
2612		(I40E_VFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t)) {
2613		/* Calculate the default hash key */
2614		for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
2615			rss_key_default[i] = (uint32_t)rte_rand();
2616		rss_conf.rss_key = (uint8_t *)rss_key_default;
2617		rss_conf.rss_key_len = (I40E_VFQF_HKEY_MAX_INDEX + 1) *
2618			sizeof(uint32_t);
2619	}
2620
2621	return i40evf_hw_rss_hash_set(vf, &rss_conf);
2622}
2623
2624static int
2625i40evf_dev_rss_hash_update(struct rte_eth_dev *dev,
2626			   struct rte_eth_rss_conf *rss_conf)
2627{
2628	struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2629	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2630	uint64_t rss_hf = rss_conf->rss_hf & I40E_RSS_OFFLOAD_ALL;
2631	uint64_t hena;
2632
2633	hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(0));
2634	hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(1))) << 32;
2635	if (!(hena & ((hw->mac.type == I40E_MAC_X722)
2636		 ? I40E_RSS_HENA_ALL_X722
2637		 : I40E_RSS_HENA_ALL))) { /* RSS disabled */
2638		if (rss_hf != 0) /* Enable RSS */
2639			return -EINVAL;
2640		return 0;
2641	}
2642
2643	/* RSS enabled */
2644	if (rss_hf == 0) /* Disable RSS */
2645		return -EINVAL;
2646
2647	return i40evf_hw_rss_hash_set(vf, rss_conf);
2648}
2649
2650static int
2651i40evf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
2652			     struct rte_eth_rss_conf *rss_conf)
2653{
2654	struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2655	struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2656	uint64_t hena;
2657
2658	i40evf_get_rss_key(&vf->vsi, rss_conf->rss_key,
2659			   &rss_conf->rss_key_len);
2660
2661	hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(0));
2662	hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_VFQF_HENA(1))) << 32;
2663	rss_conf->rss_hf = i40e_parse_hena(hena);
2664
2665	return 0;
2666}
2667
2668static int
2669i40evf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
2670{
2671	struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2672	struct rte_eth_dev_data *dev_data = vf->dev_data;
2673	uint32_t frame_size = mtu + ETHER_HDR_LEN
2674			      + ETHER_CRC_LEN + I40E_VLAN_TAG_SIZE;
2675	int ret = 0;
2676
2677	/* check if mtu is within the allowed range */
2678	if ((mtu < ETHER_MIN_MTU) || (frame_size > I40E_FRAME_SIZE_MAX))
2679		return -EINVAL;
2680
2681	/* mtu setting is forbidden if port is start */
2682	if (dev_data->dev_started) {
2683		PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
2684			    dev_data->port_id);
2685		return -EBUSY;
2686	}
2687
2688	if (frame_size > ETHER_MAX_LEN)
2689		dev_data->dev_conf.rxmode.jumbo_frame = 1;
2690	else
2691		dev_data->dev_conf.rxmode.jumbo_frame = 0;
2692
2693	dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
2694
2695	return ret;
2696}
2697
2698static void
2699i40evf_set_default_mac_addr(struct rte_eth_dev *dev,
2700			    struct ether_addr *mac_addr)
2701{
2702	struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
2703
2704	if (!is_valid_assigned_ether_addr(mac_addr)) {
2705		PMD_DRV_LOG(ERR, "Tried to set invalid MAC address.");
2706		return;
2707	}
2708
2709	if (is_same_ether_addr(mac_addr, dev->data->mac_addrs))
2710		return;
2711
2712	if (vf->flags & I40E_FLAG_VF_MAC_BY_PF)
2713		return;
2714
2715	i40evf_del_mac_addr_by_addr(dev, dev->data->mac_addrs);
2716
2717	i40evf_add_mac_addr(dev, mac_addr, 0, 0);
2718}
2719