nicvf_ethdev.c revision 39157ec0
1/*
2 *   BSD LICENSE
3 *
4 *   Copyright (C) Cavium networks Ltd. 2016.
5 *
6 *   Redistribution and use in source and binary forms, with or without
7 *   modification, are permitted provided that the following conditions
8 *   are met:
9 *
10 *     * Redistributions of source code must retain the above copyright
11 *       notice, this list of conditions and the following disclaimer.
12 *     * Redistributions in binary form must reproduce the above copyright
13 *       notice, this list of conditions and the following disclaimer in
14 *       the documentation and/or other materials provided with the
15 *       distribution.
16 *     * Neither the name of Cavium networks nor the names of its
17 *       contributors may be used to endorse or promote products derived
18 *       from this software without specific prior written permission.
19 *
20 *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33#include <assert.h>
34#include <stdio.h>
35#include <stdbool.h>
36#include <errno.h>
37#include <stdint.h>
38#include <string.h>
39#include <unistd.h>
40#include <stdarg.h>
41#include <inttypes.h>
42#include <netinet/in.h>
43#include <sys/queue.h>
44
45#include <rte_alarm.h>
46#include <rte_atomic.h>
47#include <rte_branch_prediction.h>
48#include <rte_byteorder.h>
49#include <rte_common.h>
50#include <rte_cycles.h>
51#include <rte_debug.h>
52#include <rte_dev.h>
53#include <rte_eal.h>
54#include <rte_ether.h>
55#include <rte_ethdev.h>
56#include <rte_interrupts.h>
57#include <rte_log.h>
58#include <rte_memory.h>
59#include <rte_memzone.h>
60#include <rte_malloc.h>
61#include <rte_random.h>
62#include <rte_pci.h>
63#include <rte_tailq.h>
64
65#include "base/nicvf_plat.h"
66
67#include "nicvf_ethdev.h"
68#include "nicvf_rxtx.h"
69#include "nicvf_svf.h"
70#include "nicvf_logs.h"
71
72static void nicvf_dev_stop(struct rte_eth_dev *dev);
73static void nicvf_dev_stop_cleanup(struct rte_eth_dev *dev, bool cleanup);
74static void nicvf_vf_stop(struct rte_eth_dev *dev, struct nicvf *nic,
75			  bool cleanup);
76
77static inline int
78nicvf_atomic_write_link_status(struct rte_eth_dev *dev,
79			       struct rte_eth_link *link)
80{
81	struct rte_eth_link *dst = &dev->data->dev_link;
82	struct rte_eth_link *src = link;
83
84	if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
85		*(uint64_t *)src) == 0)
86		return -1;
87
88	return 0;
89}
90
91static inline void
92nicvf_set_eth_link_status(struct nicvf *nic, struct rte_eth_link *link)
93{
94	link->link_status = nic->link_up;
95	link->link_duplex = ETH_LINK_AUTONEG;
96	if (nic->duplex == NICVF_HALF_DUPLEX)
97		link->link_duplex = ETH_LINK_HALF_DUPLEX;
98	else if (nic->duplex == NICVF_FULL_DUPLEX)
99		link->link_duplex = ETH_LINK_FULL_DUPLEX;
100	link->link_speed = nic->speed;
101	link->link_autoneg = ETH_LINK_AUTONEG;
102}
103
104static void
105nicvf_interrupt(void *arg)
106{
107	struct rte_eth_dev *dev = arg;
108	struct nicvf *nic = nicvf_pmd_priv(dev);
109
110	if (nicvf_reg_poll_interrupts(nic) == NIC_MBOX_MSG_BGX_LINK_CHANGE) {
111		if (dev->data->dev_conf.intr_conf.lsc)
112			nicvf_set_eth_link_status(nic, &dev->data->dev_link);
113		_rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
114	}
115
116	rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000,
117				nicvf_interrupt, dev);
118}
119
120static void
121nicvf_vf_interrupt(void *arg)
122{
123	struct nicvf *nic = arg;
124
125	nicvf_reg_poll_interrupts(nic);
126
127	rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000,
128				nicvf_vf_interrupt, nic);
129}
130
131static int
132nicvf_periodic_alarm_start(void (fn)(void *), void *arg)
133{
134	return rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000, fn, arg);
135}
136
137static int
138nicvf_periodic_alarm_stop(void (fn)(void *), void *arg)
139{
140	return rte_eal_alarm_cancel(fn, arg);
141}
142
143/*
144 * Return 0 means link status changed, -1 means not changed
145 */
146static int
147nicvf_dev_link_update(struct rte_eth_dev *dev,
148		      int wait_to_complete __rte_unused)
149{
150	struct rte_eth_link link;
151	struct nicvf *nic = nicvf_pmd_priv(dev);
152
153	PMD_INIT_FUNC_TRACE();
154
155	memset(&link, 0, sizeof(link));
156	nicvf_set_eth_link_status(nic, &link);
157	return nicvf_atomic_write_link_status(dev, &link);
158}
159
160static int
161nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
162{
163	struct nicvf *nic = nicvf_pmd_priv(dev);
164	uint32_t buffsz, frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
165	size_t i;
166
167	PMD_INIT_FUNC_TRACE();
168
169	if (frame_size > NIC_HW_MAX_FRS)
170		return -EINVAL;
171
172	if (frame_size < NIC_HW_MIN_FRS)
173		return -EINVAL;
174
175	buffsz = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM;
176
177	/*
178	 * Refuse mtu that requires the support of scattered packets
179	 * when this feature has not been enabled before.
180	 */
181	if (!dev->data->scattered_rx &&
182		(frame_size + 2 * VLAN_TAG_SIZE > buffsz))
183		return -EINVAL;
184
185	/* check <seg size> * <max_seg>  >= max_frame */
186	if (dev->data->scattered_rx &&
187		(frame_size + 2 * VLAN_TAG_SIZE > buffsz * NIC_HW_MAX_SEGS))
188		return -EINVAL;
189
190	if (frame_size > ETHER_MAX_LEN)
191		dev->data->dev_conf.rxmode.jumbo_frame = 1;
192	else
193		dev->data->dev_conf.rxmode.jumbo_frame = 0;
194
195	if (nicvf_mbox_update_hw_max_frs(nic, frame_size))
196		return -EINVAL;
197
198	/* Update max frame size */
199	dev->data->dev_conf.rxmode.max_rx_pkt_len = (uint32_t)frame_size;
200	nic->mtu = mtu;
201
202	for (i = 0; i < nic->sqs_count; i++)
203		nic->snicvf[i]->mtu = mtu;
204
205	return 0;
206}
207
208static int
209nicvf_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs)
210{
211	uint64_t *data = regs->data;
212	struct nicvf *nic = nicvf_pmd_priv(dev);
213
214	if (data == NULL) {
215		regs->length = nicvf_reg_get_count();
216		regs->width = THUNDERX_REG_BYTES;
217		return 0;
218	}
219
220	/* Support only full register dump */
221	if ((regs->length == 0) ||
222		(regs->length == (uint32_t)nicvf_reg_get_count())) {
223		regs->version = nic->vendor_id << 16 | nic->device_id;
224		nicvf_reg_dump(nic, data);
225		return 0;
226	}
227	return -ENOTSUP;
228}
229
230static void
231nicvf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
232{
233	uint16_t qidx;
234	struct nicvf_hw_rx_qstats rx_qstats;
235	struct nicvf_hw_tx_qstats tx_qstats;
236	struct nicvf_hw_stats port_stats;
237	struct nicvf *nic = nicvf_pmd_priv(dev);
238	uint16_t rx_start, rx_end;
239	uint16_t tx_start, tx_end;
240	size_t i;
241
242	/* RX queue indices for the first VF */
243	nicvf_rx_range(dev, nic, &rx_start, &rx_end);
244
245	/* Reading per RX ring stats */
246	for (qidx = rx_start; qidx <= rx_end; qidx++) {
247		if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
248			break;
249
250		nicvf_hw_get_rx_qstats(nic, &rx_qstats, qidx);
251		stats->q_ibytes[qidx] = rx_qstats.q_rx_bytes;
252		stats->q_ipackets[qidx] = rx_qstats.q_rx_packets;
253	}
254
255	/* TX queue indices for the first VF */
256	nicvf_tx_range(dev, nic, &tx_start, &tx_end);
257
258	/* Reading per TX ring stats */
259	for (qidx = tx_start; qidx <= tx_end; qidx++) {
260		if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
261			break;
262
263		nicvf_hw_get_tx_qstats(nic, &tx_qstats, qidx);
264		stats->q_obytes[qidx] = tx_qstats.q_tx_bytes;
265		stats->q_opackets[qidx] = tx_qstats.q_tx_packets;
266	}
267
268	for (i = 0; i < nic->sqs_count; i++) {
269		struct nicvf *snic = nic->snicvf[i];
270
271		if (snic == NULL)
272			break;
273
274		/* RX queue indices for a secondary VF */
275		nicvf_rx_range(dev, snic, &rx_start, &rx_end);
276
277		/* Reading per RX ring stats */
278		for (qidx = rx_start; qidx <= rx_end; qidx++) {
279			if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
280				break;
281
282			nicvf_hw_get_rx_qstats(snic, &rx_qstats,
283					       qidx % MAX_RCV_QUEUES_PER_QS);
284			stats->q_ibytes[qidx] = rx_qstats.q_rx_bytes;
285			stats->q_ipackets[qidx] = rx_qstats.q_rx_packets;
286		}
287
288		/* TX queue indices for a secondary VF */
289		nicvf_tx_range(dev, snic, &tx_start, &tx_end);
290		/* Reading per TX ring stats */
291		for (qidx = tx_start; qidx <= tx_end; qidx++) {
292			if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
293				break;
294
295			nicvf_hw_get_tx_qstats(snic, &tx_qstats,
296					       qidx % MAX_SND_QUEUES_PER_QS);
297			stats->q_obytes[qidx] = tx_qstats.q_tx_bytes;
298			stats->q_opackets[qidx] = tx_qstats.q_tx_packets;
299		}
300	}
301
302	nicvf_hw_get_stats(nic, &port_stats);
303	stats->ibytes = port_stats.rx_bytes;
304	stats->ipackets = port_stats.rx_ucast_frames;
305	stats->ipackets += port_stats.rx_bcast_frames;
306	stats->ipackets += port_stats.rx_mcast_frames;
307	stats->ierrors = port_stats.rx_l2_errors;
308	stats->imissed = port_stats.rx_drop_red;
309	stats->imissed += port_stats.rx_drop_overrun;
310	stats->imissed += port_stats.rx_drop_bcast;
311	stats->imissed += port_stats.rx_drop_mcast;
312	stats->imissed += port_stats.rx_drop_l3_bcast;
313	stats->imissed += port_stats.rx_drop_l3_mcast;
314
315	stats->obytes = port_stats.tx_bytes_ok;
316	stats->opackets = port_stats.tx_ucast_frames_ok;
317	stats->opackets += port_stats.tx_bcast_frames_ok;
318	stats->opackets += port_stats.tx_mcast_frames_ok;
319	stats->oerrors = port_stats.tx_drops;
320}
321
322static const uint32_t *
323nicvf_dev_supported_ptypes_get(struct rte_eth_dev *dev)
324{
325	size_t copied;
326	static uint32_t ptypes[32];
327	struct nicvf *nic = nicvf_pmd_priv(dev);
328	static const uint32_t ptypes_common[] = {
329		RTE_PTYPE_L3_IPV4,
330		RTE_PTYPE_L3_IPV4_EXT,
331		RTE_PTYPE_L3_IPV6,
332		RTE_PTYPE_L3_IPV6_EXT,
333		RTE_PTYPE_L4_TCP,
334		RTE_PTYPE_L4_UDP,
335		RTE_PTYPE_L4_FRAG,
336	};
337	static const uint32_t ptypes_tunnel[] = {
338		RTE_PTYPE_TUNNEL_GRE,
339		RTE_PTYPE_TUNNEL_GENEVE,
340		RTE_PTYPE_TUNNEL_VXLAN,
341		RTE_PTYPE_TUNNEL_NVGRE,
342	};
343	static const uint32_t ptypes_end = RTE_PTYPE_UNKNOWN;
344
345	copied = sizeof(ptypes_common);
346	memcpy(ptypes, ptypes_common, copied);
347	if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) {
348		memcpy((char *)ptypes + copied, ptypes_tunnel,
349			sizeof(ptypes_tunnel));
350		copied += sizeof(ptypes_tunnel);
351	}
352
353	memcpy((char *)ptypes + copied, &ptypes_end, sizeof(ptypes_end));
354	if (dev->rx_pkt_burst == nicvf_recv_pkts ||
355		dev->rx_pkt_burst == nicvf_recv_pkts_multiseg)
356		return ptypes;
357
358	return NULL;
359}
360
361static void
362nicvf_dev_stats_reset(struct rte_eth_dev *dev)
363{
364	int i;
365	uint16_t rxqs = 0, txqs = 0;
366	struct nicvf *nic = nicvf_pmd_priv(dev);
367	uint16_t rx_start, rx_end;
368	uint16_t tx_start, tx_end;
369
370	/* Reset all primary nic counters */
371	nicvf_rx_range(dev, nic, &rx_start, &rx_end);
372	for (i = rx_start; i <= rx_end; i++)
373		rxqs |= (0x3 << (i * 2));
374
375	nicvf_tx_range(dev, nic, &tx_start, &tx_end);
376	for (i = tx_start; i <= tx_end; i++)
377		txqs |= (0x3 << (i * 2));
378
379	nicvf_mbox_reset_stat_counters(nic, 0x3FFF, 0x1F, rxqs, txqs);
380
381	/* Reset secondary nic queue counters */
382	for (i = 0; i < nic->sqs_count; i++) {
383		struct nicvf *snic = nic->snicvf[i];
384		if (snic == NULL)
385			break;
386
387		nicvf_rx_range(dev, snic, &rx_start, &rx_end);
388		for (i = rx_start; i <= rx_end; i++)
389			rxqs |= (0x3 << ((i % MAX_CMP_QUEUES_PER_QS) * 2));
390
391		nicvf_tx_range(dev, snic, &tx_start, &tx_end);
392		for (i = tx_start; i <= tx_end; i++)
393			txqs |= (0x3 << ((i % MAX_SND_QUEUES_PER_QS) * 2));
394
395		nicvf_mbox_reset_stat_counters(snic, 0, 0, rxqs, txqs);
396	}
397}
398
399/* Promiscuous mode enabled by default in LMAC to VF 1:1 map configuration */
400static void
401nicvf_dev_promisc_enable(struct rte_eth_dev *dev __rte_unused)
402{
403}
404
405static inline uint64_t
406nicvf_rss_ethdev_to_nic(struct nicvf *nic, uint64_t ethdev_rss)
407{
408	uint64_t nic_rss = 0;
409
410	if (ethdev_rss & ETH_RSS_IPV4)
411		nic_rss |= RSS_IP_ENA;
412
413	if (ethdev_rss & ETH_RSS_IPV6)
414		nic_rss |= RSS_IP_ENA;
415
416	if (ethdev_rss & ETH_RSS_NONFRAG_IPV4_UDP)
417		nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA);
418
419	if (ethdev_rss & ETH_RSS_NONFRAG_IPV4_TCP)
420		nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA);
421
422	if (ethdev_rss & ETH_RSS_NONFRAG_IPV6_UDP)
423		nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA);
424
425	if (ethdev_rss & ETH_RSS_NONFRAG_IPV6_TCP)
426		nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA);
427
428	if (ethdev_rss & ETH_RSS_PORT)
429		nic_rss |= RSS_L2_EXTENDED_HASH_ENA;
430
431	if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) {
432		if (ethdev_rss & ETH_RSS_VXLAN)
433			nic_rss |= RSS_TUN_VXLAN_ENA;
434
435		if (ethdev_rss & ETH_RSS_GENEVE)
436			nic_rss |= RSS_TUN_GENEVE_ENA;
437
438		if (ethdev_rss & ETH_RSS_NVGRE)
439			nic_rss |= RSS_TUN_NVGRE_ENA;
440	}
441
442	return nic_rss;
443}
444
445static inline uint64_t
446nicvf_rss_nic_to_ethdev(struct nicvf *nic,  uint64_t nic_rss)
447{
448	uint64_t ethdev_rss = 0;
449
450	if (nic_rss & RSS_IP_ENA)
451		ethdev_rss |= (ETH_RSS_IPV4 | ETH_RSS_IPV6);
452
453	if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_TCP_ENA))
454		ethdev_rss |= (ETH_RSS_NONFRAG_IPV4_TCP |
455				ETH_RSS_NONFRAG_IPV6_TCP);
456
457	if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_UDP_ENA))
458		ethdev_rss |= (ETH_RSS_NONFRAG_IPV4_UDP |
459				ETH_RSS_NONFRAG_IPV6_UDP);
460
461	if (nic_rss & RSS_L2_EXTENDED_HASH_ENA)
462		ethdev_rss |= ETH_RSS_PORT;
463
464	if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) {
465		if (nic_rss & RSS_TUN_VXLAN_ENA)
466			ethdev_rss |= ETH_RSS_VXLAN;
467
468		if (nic_rss & RSS_TUN_GENEVE_ENA)
469			ethdev_rss |= ETH_RSS_GENEVE;
470
471		if (nic_rss & RSS_TUN_NVGRE_ENA)
472			ethdev_rss |= ETH_RSS_NVGRE;
473	}
474	return ethdev_rss;
475}
476
477static int
478nicvf_dev_reta_query(struct rte_eth_dev *dev,
479		     struct rte_eth_rss_reta_entry64 *reta_conf,
480		     uint16_t reta_size)
481{
482	struct nicvf *nic = nicvf_pmd_priv(dev);
483	uint8_t tbl[NIC_MAX_RSS_IDR_TBL_SIZE];
484	int ret, i, j;
485
486	if (reta_size != NIC_MAX_RSS_IDR_TBL_SIZE) {
487		RTE_LOG(ERR, PMD, "The size of hash lookup table configured "
488			"(%d) doesn't match the number hardware can supported "
489			"(%d)", reta_size, NIC_MAX_RSS_IDR_TBL_SIZE);
490		return -EINVAL;
491	}
492
493	ret = nicvf_rss_reta_query(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE);
494	if (ret)
495		return ret;
496
497	/* Copy RETA table */
498	for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_RETA_GROUP_SIZE); i++) {
499		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
500			if ((reta_conf[i].mask >> j) & 0x01)
501				reta_conf[i].reta[j] = tbl[j];
502	}
503
504	return 0;
505}
506
507static int
508nicvf_dev_reta_update(struct rte_eth_dev *dev,
509		      struct rte_eth_rss_reta_entry64 *reta_conf,
510		      uint16_t reta_size)
511{
512	struct nicvf *nic = nicvf_pmd_priv(dev);
513	uint8_t tbl[NIC_MAX_RSS_IDR_TBL_SIZE];
514	int ret, i, j;
515
516	if (reta_size != NIC_MAX_RSS_IDR_TBL_SIZE) {
517		RTE_LOG(ERR, PMD, "The size of hash lookup table configured "
518			"(%d) doesn't match the number hardware can supported "
519			"(%d)", reta_size, NIC_MAX_RSS_IDR_TBL_SIZE);
520		return -EINVAL;
521	}
522
523	ret = nicvf_rss_reta_query(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE);
524	if (ret)
525		return ret;
526
527	/* Copy RETA table */
528	for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_RETA_GROUP_SIZE); i++) {
529		for (j = 0; j < RTE_RETA_GROUP_SIZE; j++)
530			if ((reta_conf[i].mask >> j) & 0x01)
531				tbl[j] = reta_conf[i].reta[j];
532	}
533
534	return nicvf_rss_reta_update(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE);
535}
536
537static int
538nicvf_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
539			    struct rte_eth_rss_conf *rss_conf)
540{
541	struct nicvf *nic = nicvf_pmd_priv(dev);
542
543	if (rss_conf->rss_key)
544		nicvf_rss_get_key(nic, rss_conf->rss_key);
545
546	rss_conf->rss_key_len =  RSS_HASH_KEY_BYTE_SIZE;
547	rss_conf->rss_hf = nicvf_rss_nic_to_ethdev(nic, nicvf_rss_get_cfg(nic));
548	return 0;
549}
550
551static int
552nicvf_dev_rss_hash_update(struct rte_eth_dev *dev,
553			  struct rte_eth_rss_conf *rss_conf)
554{
555	struct nicvf *nic = nicvf_pmd_priv(dev);
556	uint64_t nic_rss;
557
558	if (rss_conf->rss_key &&
559		rss_conf->rss_key_len != RSS_HASH_KEY_BYTE_SIZE) {
560		RTE_LOG(ERR, PMD, "Hash key size mismatch %d",
561				rss_conf->rss_key_len);
562		return -EINVAL;
563	}
564
565	if (rss_conf->rss_key)
566		nicvf_rss_set_key(nic, rss_conf->rss_key);
567
568	nic_rss = nicvf_rss_ethdev_to_nic(nic, rss_conf->rss_hf);
569	nicvf_rss_set_cfg(nic, nic_rss);
570	return 0;
571}
572
573static int
574nicvf_qset_cq_alloc(struct rte_eth_dev *dev, struct nicvf *nic,
575		    struct nicvf_rxq *rxq, uint16_t qidx, uint32_t desc_cnt)
576{
577	const struct rte_memzone *rz;
578	uint32_t ring_size = CMP_QUEUE_SZ_MAX * sizeof(union cq_entry_t);
579
580	rz = rte_eth_dma_zone_reserve(dev, "cq_ring",
581				      nicvf_netdev_qidx(nic, qidx), ring_size,
582				      NICVF_CQ_BASE_ALIGN_BYTES, nic->node);
583	if (rz == NULL) {
584		PMD_INIT_LOG(ERR, "Failed to allocate mem for cq hw ring");
585		return -ENOMEM;
586	}
587
588	memset(rz->addr, 0, ring_size);
589
590	rxq->phys = rz->phys_addr;
591	rxq->desc = rz->addr;
592	rxq->qlen_mask = desc_cnt - 1;
593
594	return 0;
595}
596
597static int
598nicvf_qset_sq_alloc(struct rte_eth_dev *dev, struct nicvf *nic,
599		    struct nicvf_txq *sq, uint16_t qidx, uint32_t desc_cnt)
600{
601	const struct rte_memzone *rz;
602	uint32_t ring_size = SND_QUEUE_SZ_MAX * sizeof(union sq_entry_t);
603
604	rz = rte_eth_dma_zone_reserve(dev, "sq",
605				      nicvf_netdev_qidx(nic, qidx), ring_size,
606				      NICVF_SQ_BASE_ALIGN_BYTES, nic->node);
607	if (rz == NULL) {
608		PMD_INIT_LOG(ERR, "Failed allocate mem for sq hw ring");
609		return -ENOMEM;
610	}
611
612	memset(rz->addr, 0, ring_size);
613
614	sq->phys = rz->phys_addr;
615	sq->desc = rz->addr;
616	sq->qlen_mask = desc_cnt - 1;
617
618	return 0;
619}
620
621static int
622nicvf_qset_rbdr_alloc(struct rte_eth_dev *dev, struct nicvf *nic,
623		      uint32_t desc_cnt, uint32_t buffsz)
624{
625	struct nicvf_rbdr *rbdr;
626	const struct rte_memzone *rz;
627	uint32_t ring_size;
628
629	assert(nic->rbdr == NULL);
630	rbdr = rte_zmalloc_socket("rbdr", sizeof(struct nicvf_rbdr),
631				  RTE_CACHE_LINE_SIZE, nic->node);
632	if (rbdr == NULL) {
633		PMD_INIT_LOG(ERR, "Failed to allocate mem for rbdr");
634		return -ENOMEM;
635	}
636
637	ring_size = sizeof(struct rbdr_entry_t) * RBDR_QUEUE_SZ_MAX;
638	rz = rte_eth_dma_zone_reserve(dev, "rbdr",
639				      nicvf_netdev_qidx(nic, 0), ring_size,
640				      NICVF_RBDR_BASE_ALIGN_BYTES, nic->node);
641	if (rz == NULL) {
642		PMD_INIT_LOG(ERR, "Failed to allocate mem for rbdr desc ring");
643		return -ENOMEM;
644	}
645
646	memset(rz->addr, 0, ring_size);
647
648	rbdr->phys = rz->phys_addr;
649	rbdr->tail = 0;
650	rbdr->next_tail = 0;
651	rbdr->desc = rz->addr;
652	rbdr->buffsz = buffsz;
653	rbdr->qlen_mask = desc_cnt - 1;
654	rbdr->rbdr_status =
655		nicvf_qset_base(nic, 0) + NIC_QSET_RBDR_0_1_STATUS0;
656	rbdr->rbdr_door =
657		nicvf_qset_base(nic, 0) + NIC_QSET_RBDR_0_1_DOOR;
658
659	nic->rbdr = rbdr;
660	return 0;
661}
662
663static void
664nicvf_rbdr_release_mbuf(struct rte_eth_dev *dev, struct nicvf *nic,
665			nicvf_phys_addr_t phy)
666{
667	uint16_t qidx;
668	void *obj;
669	struct nicvf_rxq *rxq;
670	uint16_t rx_start, rx_end;
671
672	/* Get queue ranges for this VF */
673	nicvf_rx_range(dev, nic, &rx_start, &rx_end);
674
675	for (qidx = rx_start; qidx <= rx_end; qidx++) {
676		rxq = dev->data->rx_queues[qidx];
677		if (rxq->precharge_cnt) {
678			obj = (void *)nicvf_mbuff_phy2virt(phy,
679							   rxq->mbuf_phys_off);
680			rte_mempool_put(rxq->pool, obj);
681			rxq->precharge_cnt--;
682			break;
683		}
684	}
685}
686
687static inline void
688nicvf_rbdr_release_mbufs(struct rte_eth_dev *dev, struct nicvf *nic)
689{
690	uint32_t qlen_mask, head;
691	struct rbdr_entry_t *entry;
692	struct nicvf_rbdr *rbdr = nic->rbdr;
693
694	qlen_mask = rbdr->qlen_mask;
695	head = rbdr->head;
696	while (head != rbdr->tail) {
697		entry = rbdr->desc + head;
698		nicvf_rbdr_release_mbuf(dev, nic, entry->full_addr);
699		head++;
700		head = head & qlen_mask;
701	}
702}
703
704static inline void
705nicvf_tx_queue_release_mbufs(struct nicvf_txq *txq)
706{
707	uint32_t head;
708
709	head = txq->head;
710	while (head != txq->tail) {
711		if (txq->txbuffs[head]) {
712			rte_pktmbuf_free_seg(txq->txbuffs[head]);
713			txq->txbuffs[head] = NULL;
714		}
715		head++;
716		head = head & txq->qlen_mask;
717	}
718}
719
720static void
721nicvf_tx_queue_reset(struct nicvf_txq *txq)
722{
723	uint32_t txq_desc_cnt = txq->qlen_mask + 1;
724
725	memset(txq->desc, 0, sizeof(union sq_entry_t) * txq_desc_cnt);
726	memset(txq->txbuffs, 0, sizeof(struct rte_mbuf *) * txq_desc_cnt);
727	txq->tail = 0;
728	txq->head = 0;
729	txq->xmit_bufs = 0;
730}
731
732static inline int
733nicvf_vf_start_tx_queue(struct rte_eth_dev *dev, struct nicvf *nic,
734			uint16_t qidx)
735{
736	struct nicvf_txq *txq;
737	int ret;
738
739	assert(qidx < MAX_SND_QUEUES_PER_QS);
740
741	if (dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] ==
742		RTE_ETH_QUEUE_STATE_STARTED)
743		return 0;
744
745	txq = dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)];
746	txq->pool = NULL;
747	ret = nicvf_qset_sq_config(nic, qidx, txq);
748	if (ret) {
749		PMD_INIT_LOG(ERR, "Failed to configure sq VF%d %d %d",
750			     nic->vf_id, qidx, ret);
751		goto config_sq_error;
752	}
753
754	dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
755		RTE_ETH_QUEUE_STATE_STARTED;
756	return ret;
757
758config_sq_error:
759	nicvf_qset_sq_reclaim(nic, qidx);
760	return ret;
761}
762
763static inline int
764nicvf_vf_stop_tx_queue(struct rte_eth_dev *dev, struct nicvf *nic,
765		       uint16_t qidx)
766{
767	struct nicvf_txq *txq;
768	int ret;
769
770	assert(qidx < MAX_SND_QUEUES_PER_QS);
771
772	if (dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] ==
773		RTE_ETH_QUEUE_STATE_STOPPED)
774		return 0;
775
776	ret = nicvf_qset_sq_reclaim(nic, qidx);
777	if (ret)
778		PMD_INIT_LOG(ERR, "Failed to reclaim sq VF%d %d %d",
779			     nic->vf_id, qidx, ret);
780
781	txq = dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)];
782	nicvf_tx_queue_release_mbufs(txq);
783	nicvf_tx_queue_reset(txq);
784
785	dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
786		RTE_ETH_QUEUE_STATE_STOPPED;
787	return ret;
788}
789
790static inline int
791nicvf_configure_cpi(struct rte_eth_dev *dev)
792{
793	struct nicvf *nic = nicvf_pmd_priv(dev);
794	uint16_t qidx, qcnt;
795	int ret;
796
797	/* Count started rx queues */
798	for (qidx = qcnt = 0; qidx < dev->data->nb_rx_queues; qidx++)
799		if (dev->data->rx_queue_state[qidx] ==
800		    RTE_ETH_QUEUE_STATE_STARTED)
801			qcnt++;
802
803	nic->cpi_alg = CPI_ALG_NONE;
804	ret = nicvf_mbox_config_cpi(nic, qcnt);
805	if (ret)
806		PMD_INIT_LOG(ERR, "Failed to configure CPI %d", ret);
807
808	return ret;
809}
810
811static inline int
812nicvf_configure_rss(struct rte_eth_dev *dev)
813{
814	struct nicvf *nic = nicvf_pmd_priv(dev);
815	uint64_t rsshf;
816	int ret = -EINVAL;
817
818	rsshf = nicvf_rss_ethdev_to_nic(nic,
819			dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf);
820	PMD_DRV_LOG(INFO, "mode=%d rx_queues=%d loopback=%d rsshf=0x%" PRIx64,
821		    dev->data->dev_conf.rxmode.mq_mode,
822		    dev->data->nb_rx_queues,
823		    dev->data->dev_conf.lpbk_mode, rsshf);
824
825	if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE)
826		ret = nicvf_rss_term(nic);
827	else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
828		ret = nicvf_rss_config(nic, dev->data->nb_rx_queues, rsshf);
829	if (ret)
830		PMD_INIT_LOG(ERR, "Failed to configure RSS %d", ret);
831
832	return ret;
833}
834
835static int
836nicvf_configure_rss_reta(struct rte_eth_dev *dev)
837{
838	struct nicvf *nic = nicvf_pmd_priv(dev);
839	unsigned int idx, qmap_size;
840	uint8_t qmap[RTE_MAX_QUEUES_PER_PORT];
841	uint8_t default_reta[NIC_MAX_RSS_IDR_TBL_SIZE];
842
843	if (nic->cpi_alg != CPI_ALG_NONE)
844		return -EINVAL;
845
846	/* Prepare queue map */
847	for (idx = 0, qmap_size = 0; idx < dev->data->nb_rx_queues; idx++) {
848		if (dev->data->rx_queue_state[idx] ==
849				RTE_ETH_QUEUE_STATE_STARTED)
850			qmap[qmap_size++] = idx;
851	}
852
853	/* Update default RSS RETA */
854	for (idx = 0; idx < NIC_MAX_RSS_IDR_TBL_SIZE; idx++)
855		default_reta[idx] = qmap[idx % qmap_size];
856
857	return nicvf_rss_reta_update(nic, default_reta,
858				     NIC_MAX_RSS_IDR_TBL_SIZE);
859}
860
861static void
862nicvf_dev_tx_queue_release(void *sq)
863{
864	struct nicvf_txq *txq;
865
866	PMD_INIT_FUNC_TRACE();
867
868	txq = (struct nicvf_txq *)sq;
869	if (txq) {
870		if (txq->txbuffs != NULL) {
871			nicvf_tx_queue_release_mbufs(txq);
872			rte_free(txq->txbuffs);
873			txq->txbuffs = NULL;
874		}
875		rte_free(txq);
876	}
877}
878
879static void
880nicvf_set_tx_function(struct rte_eth_dev *dev)
881{
882	struct nicvf_txq *txq;
883	size_t i;
884	bool multiseg = false;
885
886	for (i = 0; i < dev->data->nb_tx_queues; i++) {
887		txq = dev->data->tx_queues[i];
888		if ((txq->txq_flags & ETH_TXQ_FLAGS_NOMULTSEGS) == 0) {
889			multiseg = true;
890			break;
891		}
892	}
893
894	/* Use a simple Tx queue (no offloads, no multi segs) if possible */
895	if (multiseg) {
896		PMD_DRV_LOG(DEBUG, "Using multi-segment tx callback");
897		dev->tx_pkt_burst = nicvf_xmit_pkts_multiseg;
898	} else {
899		PMD_DRV_LOG(DEBUG, "Using single-segment tx callback");
900		dev->tx_pkt_burst = nicvf_xmit_pkts;
901	}
902
903	if (txq->pool_free == nicvf_single_pool_free_xmited_buffers)
904		PMD_DRV_LOG(DEBUG, "Using single-mempool tx free method");
905	else
906		PMD_DRV_LOG(DEBUG, "Using multi-mempool tx free method");
907}
908
909static void
910nicvf_set_rx_function(struct rte_eth_dev *dev)
911{
912	if (dev->data->scattered_rx) {
913		PMD_DRV_LOG(DEBUG, "Using multi-segment rx callback");
914		dev->rx_pkt_burst = nicvf_recv_pkts_multiseg;
915	} else {
916		PMD_DRV_LOG(DEBUG, "Using single-segment rx callback");
917		dev->rx_pkt_burst = nicvf_recv_pkts;
918	}
919}
920
921static int
922nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
923			 uint16_t nb_desc, unsigned int socket_id,
924			 const struct rte_eth_txconf *tx_conf)
925{
926	uint16_t tx_free_thresh;
927	uint8_t is_single_pool;
928	struct nicvf_txq *txq;
929	struct nicvf *nic = nicvf_pmd_priv(dev);
930
931	PMD_INIT_FUNC_TRACE();
932
933	if (qidx >= MAX_SND_QUEUES_PER_QS)
934		nic = nic->snicvf[qidx / MAX_SND_QUEUES_PER_QS - 1];
935
936	qidx = qidx % MAX_SND_QUEUES_PER_QS;
937
938	/* Socket id check */
939	if (socket_id != (unsigned int)SOCKET_ID_ANY && socket_id != nic->node)
940		PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d",
941		socket_id, nic->node);
942
943	/* Tx deferred start is not supported */
944	if (tx_conf->tx_deferred_start) {
945		PMD_INIT_LOG(ERR, "Tx deferred start not supported");
946		return -EINVAL;
947	}
948
949	/* Roundup nb_desc to available qsize and validate max number of desc */
950	nb_desc = nicvf_qsize_sq_roundup(nb_desc);
951	if (nb_desc == 0) {
952		PMD_INIT_LOG(ERR, "Value of nb_desc beyond available sq qsize");
953		return -EINVAL;
954	}
955
956	/* Validate tx_free_thresh */
957	tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
958				tx_conf->tx_free_thresh :
959				NICVF_DEFAULT_TX_FREE_THRESH);
960
961	if (tx_free_thresh > (nb_desc) ||
962		tx_free_thresh > NICVF_MAX_TX_FREE_THRESH) {
963		PMD_INIT_LOG(ERR,
964			"tx_free_thresh must be less than the number of TX "
965			"descriptors. (tx_free_thresh=%u port=%d "
966			"queue=%d)", (unsigned int)tx_free_thresh,
967			(int)dev->data->port_id, (int)qidx);
968		return -EINVAL;
969	}
970
971	/* Free memory prior to re-allocation if needed. */
972	if (dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] != NULL) {
973		PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
974				nicvf_netdev_qidx(nic, qidx));
975		nicvf_dev_tx_queue_release(
976			dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)]);
977		dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] = NULL;
978	}
979
980	/* Allocating tx queue data structure */
981	txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct nicvf_txq),
982					RTE_CACHE_LINE_SIZE, nic->node);
983	if (txq == NULL) {
984		PMD_INIT_LOG(ERR, "Failed to allocate txq=%d",
985			     nicvf_netdev_qidx(nic, qidx));
986		return -ENOMEM;
987	}
988
989	txq->nic = nic;
990	txq->queue_id = qidx;
991	txq->tx_free_thresh = tx_free_thresh;
992	txq->txq_flags = tx_conf->txq_flags;
993	txq->sq_head = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_HEAD;
994	txq->sq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_DOOR;
995	is_single_pool = (txq->txq_flags & ETH_TXQ_FLAGS_NOREFCOUNT &&
996				txq->txq_flags & ETH_TXQ_FLAGS_NOMULTMEMP);
997
998	/* Choose optimum free threshold value for multipool case */
999	if (!is_single_pool) {
1000		txq->tx_free_thresh = (uint16_t)
1001		(tx_conf->tx_free_thresh == NICVF_DEFAULT_TX_FREE_THRESH ?
1002				NICVF_TX_FREE_MPOOL_THRESH :
1003				tx_conf->tx_free_thresh);
1004		txq->pool_free = nicvf_multi_pool_free_xmited_buffers;
1005	} else {
1006		txq->pool_free = nicvf_single_pool_free_xmited_buffers;
1007	}
1008
1009	/* Allocate software ring */
1010	txq->txbuffs = rte_zmalloc_socket("txq->txbuffs",
1011				nb_desc * sizeof(struct rte_mbuf *),
1012				RTE_CACHE_LINE_SIZE, nic->node);
1013
1014	if (txq->txbuffs == NULL) {
1015		nicvf_dev_tx_queue_release(txq);
1016		return -ENOMEM;
1017	}
1018
1019	if (nicvf_qset_sq_alloc(dev, nic, txq, qidx, nb_desc)) {
1020		PMD_INIT_LOG(ERR, "Failed to allocate mem for sq %d", qidx);
1021		nicvf_dev_tx_queue_release(txq);
1022		return -ENOMEM;
1023	}
1024
1025	nicvf_tx_queue_reset(txq);
1026
1027	PMD_TX_LOG(DEBUG, "[%d] txq=%p nb_desc=%d desc=%p phys=0x%" PRIx64,
1028			nicvf_netdev_qidx(nic, qidx), txq, nb_desc, txq->desc,
1029			txq->phys);
1030
1031	dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] = txq;
1032	dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
1033		RTE_ETH_QUEUE_STATE_STOPPED;
1034	return 0;
1035}
1036
1037static inline void
1038nicvf_rx_queue_release_mbufs(struct rte_eth_dev *dev, struct nicvf_rxq *rxq)
1039{
1040	uint32_t rxq_cnt;
1041	uint32_t nb_pkts, released_pkts = 0;
1042	uint32_t refill_cnt = 0;
1043	struct rte_mbuf *rx_pkts[NICVF_MAX_RX_FREE_THRESH];
1044
1045	if (dev->rx_pkt_burst == NULL)
1046		return;
1047
1048	while ((rxq_cnt = nicvf_dev_rx_queue_count(dev,
1049				nicvf_netdev_qidx(rxq->nic, rxq->queue_id)))) {
1050		nb_pkts = dev->rx_pkt_burst(rxq, rx_pkts,
1051					NICVF_MAX_RX_FREE_THRESH);
1052		PMD_DRV_LOG(INFO, "nb_pkts=%d  rxq_cnt=%d", nb_pkts, rxq_cnt);
1053		while (nb_pkts) {
1054			rte_pktmbuf_free_seg(rx_pkts[--nb_pkts]);
1055			released_pkts++;
1056		}
1057	}
1058
1059
1060	refill_cnt += nicvf_dev_rbdr_refill(dev,
1061			nicvf_netdev_qidx(rxq->nic, rxq->queue_id));
1062
1063	PMD_DRV_LOG(INFO, "free_cnt=%d  refill_cnt=%d",
1064		    released_pkts, refill_cnt);
1065}
1066
1067static void
1068nicvf_rx_queue_reset(struct nicvf_rxq *rxq)
1069{
1070	rxq->head = 0;
1071	rxq->available_space = 0;
1072	rxq->recv_buffers = 0;
1073}
1074
1075static inline int
1076nicvf_vf_start_rx_queue(struct rte_eth_dev *dev, struct nicvf *nic,
1077			uint16_t qidx)
1078{
1079	struct nicvf_rxq *rxq;
1080	int ret;
1081
1082	assert(qidx < MAX_RCV_QUEUES_PER_QS);
1083
1084	if (dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] ==
1085		RTE_ETH_QUEUE_STATE_STARTED)
1086		return 0;
1087
1088	/* Update rbdr pointer to all rxq */
1089	rxq = dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)];
1090	rxq->shared_rbdr = nic->rbdr;
1091
1092	ret = nicvf_qset_rq_config(nic, qidx, rxq);
1093	if (ret) {
1094		PMD_INIT_LOG(ERR, "Failed to configure rq VF%d %d %d",
1095			     nic->vf_id, qidx, ret);
1096		goto config_rq_error;
1097	}
1098	ret = nicvf_qset_cq_config(nic, qidx, rxq);
1099	if (ret) {
1100		PMD_INIT_LOG(ERR, "Failed to configure cq VF%d %d %d",
1101			     nic->vf_id, qidx, ret);
1102		goto config_cq_error;
1103	}
1104
1105	dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
1106		RTE_ETH_QUEUE_STATE_STARTED;
1107	return 0;
1108
1109config_cq_error:
1110	nicvf_qset_cq_reclaim(nic, qidx);
1111config_rq_error:
1112	nicvf_qset_rq_reclaim(nic, qidx);
1113	return ret;
1114}
1115
1116static inline int
1117nicvf_vf_stop_rx_queue(struct rte_eth_dev *dev, struct nicvf *nic,
1118		       uint16_t qidx)
1119{
1120	struct nicvf_rxq *rxq;
1121	int ret, other_error;
1122
1123	if (dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] ==
1124		RTE_ETH_QUEUE_STATE_STOPPED)
1125		return 0;
1126
1127	ret = nicvf_qset_rq_reclaim(nic, qidx);
1128	if (ret)
1129		PMD_INIT_LOG(ERR, "Failed to reclaim rq VF%d %d %d",
1130			     nic->vf_id, qidx, ret);
1131
1132	other_error = ret;
1133	rxq = dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)];
1134	nicvf_rx_queue_release_mbufs(dev, rxq);
1135	nicvf_rx_queue_reset(rxq);
1136
1137	ret = nicvf_qset_cq_reclaim(nic, qidx);
1138	if (ret)
1139		PMD_INIT_LOG(ERR, "Failed to reclaim cq VF%d %d %d",
1140			     nic->vf_id, qidx, ret);
1141
1142	other_error |= ret;
1143	dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
1144		RTE_ETH_QUEUE_STATE_STOPPED;
1145	return other_error;
1146}
1147
1148static void
1149nicvf_dev_rx_queue_release(void *rx_queue)
1150{
1151	PMD_INIT_FUNC_TRACE();
1152
1153	rte_free(rx_queue);
1154}
1155
1156static int
1157nicvf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
1158{
1159	struct nicvf *nic = nicvf_pmd_priv(dev);
1160	int ret;
1161
1162	if (qidx >= MAX_RCV_QUEUES_PER_QS)
1163		nic = nic->snicvf[(qidx / MAX_RCV_QUEUES_PER_QS - 1)];
1164
1165	qidx = qidx % MAX_RCV_QUEUES_PER_QS;
1166
1167	ret = nicvf_vf_start_rx_queue(dev, nic, qidx);
1168	if (ret)
1169		return ret;
1170
1171	ret = nicvf_configure_cpi(dev);
1172	if (ret)
1173		return ret;
1174
1175	return nicvf_configure_rss_reta(dev);
1176}
1177
1178static int
1179nicvf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
1180{
1181	int ret;
1182	struct nicvf *nic = nicvf_pmd_priv(dev);
1183
1184	if (qidx >= MAX_SND_QUEUES_PER_QS)
1185		nic = nic->snicvf[(qidx / MAX_SND_QUEUES_PER_QS - 1)];
1186
1187	qidx = qidx % MAX_RCV_QUEUES_PER_QS;
1188
1189	ret = nicvf_vf_stop_rx_queue(dev, nic, qidx);
1190	ret |= nicvf_configure_cpi(dev);
1191	ret |= nicvf_configure_rss_reta(dev);
1192	return ret;
1193}
1194
1195static int
1196nicvf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
1197{
1198	struct nicvf *nic = nicvf_pmd_priv(dev);
1199
1200	if (qidx >= MAX_SND_QUEUES_PER_QS)
1201		nic = nic->snicvf[(qidx / MAX_SND_QUEUES_PER_QS - 1)];
1202
1203	qidx = qidx % MAX_SND_QUEUES_PER_QS;
1204
1205	return nicvf_vf_start_tx_queue(dev, nic, qidx);
1206}
1207
1208static int
1209nicvf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
1210{
1211	struct nicvf *nic = nicvf_pmd_priv(dev);
1212
1213	if (qidx >= MAX_SND_QUEUES_PER_QS)
1214		nic = nic->snicvf[(qidx / MAX_SND_QUEUES_PER_QS - 1)];
1215
1216	qidx = qidx % MAX_SND_QUEUES_PER_QS;
1217
1218	return nicvf_vf_stop_tx_queue(dev, nic, qidx);
1219}
1220
1221
1222static int
1223nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
1224			 uint16_t nb_desc, unsigned int socket_id,
1225			 const struct rte_eth_rxconf *rx_conf,
1226			 struct rte_mempool *mp)
1227{
1228	uint16_t rx_free_thresh;
1229	struct nicvf_rxq *rxq;
1230	struct nicvf *nic = nicvf_pmd_priv(dev);
1231
1232	PMD_INIT_FUNC_TRACE();
1233
1234	if (qidx >= MAX_RCV_QUEUES_PER_QS)
1235		nic = nic->snicvf[qidx / MAX_RCV_QUEUES_PER_QS - 1];
1236
1237	qidx = qidx % MAX_RCV_QUEUES_PER_QS;
1238
1239	/* Socket id check */
1240	if (socket_id != (unsigned int)SOCKET_ID_ANY && socket_id != nic->node)
1241		PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d",
1242		socket_id, nic->node);
1243
1244	/* Mempool memory must be contiguous, so must be one memory segment*/
1245	if (mp->nb_mem_chunks != 1) {
1246		PMD_INIT_LOG(ERR, "Non-contiguous mempool, add more huge pages");
1247		return -EINVAL;
1248	}
1249
1250	/* Mempool memory must be physically contiguous */
1251	if (mp->flags & MEMPOOL_F_NO_PHYS_CONTIG) {
1252		PMD_INIT_LOG(ERR, "Mempool memory must be physically contiguous");
1253		return -EINVAL;
1254	}
1255
1256	/* Rx deferred start is not supported */
1257	if (rx_conf->rx_deferred_start) {
1258		PMD_INIT_LOG(ERR, "Rx deferred start not supported");
1259		return -EINVAL;
1260	}
1261
1262	/* Roundup nb_desc to available qsize and validate max number of desc */
1263	nb_desc = nicvf_qsize_cq_roundup(nb_desc);
1264	if (nb_desc == 0) {
1265		PMD_INIT_LOG(ERR, "Value nb_desc beyond available hw cq qsize");
1266		return -EINVAL;
1267	}
1268
1269	/* Check rx_free_thresh upper bound */
1270	rx_free_thresh = (uint16_t)((rx_conf->rx_free_thresh) ?
1271				rx_conf->rx_free_thresh :
1272				NICVF_DEFAULT_RX_FREE_THRESH);
1273	if (rx_free_thresh > NICVF_MAX_RX_FREE_THRESH ||
1274		rx_free_thresh >= nb_desc * .75) {
1275		PMD_INIT_LOG(ERR, "rx_free_thresh greater than expected %d",
1276				rx_free_thresh);
1277		return -EINVAL;
1278	}
1279
1280	/* Free memory prior to re-allocation if needed */
1281	if (dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] != NULL) {
1282		PMD_RX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
1283				nicvf_netdev_qidx(nic, qidx));
1284		nicvf_dev_rx_queue_release(
1285			dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)]);
1286		dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = NULL;
1287	}
1288
1289	/* Allocate rxq memory */
1290	rxq = rte_zmalloc_socket("ethdev rx queue", sizeof(struct nicvf_rxq),
1291					RTE_CACHE_LINE_SIZE, nic->node);
1292	if (rxq == NULL) {
1293		PMD_INIT_LOG(ERR, "Failed to allocate rxq=%d",
1294			     nicvf_netdev_qidx(nic, qidx));
1295		return -ENOMEM;
1296	}
1297
1298	rxq->nic = nic;
1299	rxq->pool = mp;
1300	rxq->queue_id = qidx;
1301	rxq->port_id = dev->data->port_id;
1302	rxq->rx_free_thresh = rx_free_thresh;
1303	rxq->rx_drop_en = rx_conf->rx_drop_en;
1304	rxq->cq_status = nicvf_qset_base(nic, qidx) + NIC_QSET_CQ_0_7_STATUS;
1305	rxq->cq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_CQ_0_7_DOOR;
1306	rxq->precharge_cnt = 0;
1307
1308	if (nicvf_hw_cap(nic) & NICVF_CAP_CQE_RX2)
1309		rxq->rbptr_offset = NICVF_CQE_RX2_RBPTR_WORD;
1310	else
1311		rxq->rbptr_offset = NICVF_CQE_RBPTR_WORD;
1312
1313
1314	/* Alloc completion queue */
1315	if (nicvf_qset_cq_alloc(dev, nic, rxq, rxq->queue_id, nb_desc)) {
1316		PMD_INIT_LOG(ERR, "failed to allocate cq %u", rxq->queue_id);
1317		nicvf_dev_rx_queue_release(rxq);
1318		return -ENOMEM;
1319	}
1320
1321	nicvf_rx_queue_reset(rxq);
1322
1323	PMD_RX_LOG(DEBUG, "[%d] rxq=%p pool=%s nb_desc=(%d/%d) phy=%" PRIx64,
1324			nicvf_netdev_qidx(nic, qidx), rxq, mp->name, nb_desc,
1325			rte_mempool_avail_count(mp), rxq->phys);
1326
1327	dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = rxq;
1328	dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
1329		RTE_ETH_QUEUE_STATE_STOPPED;
1330	return 0;
1331}
1332
1333static void
1334nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1335{
1336	struct nicvf *nic = nicvf_pmd_priv(dev);
1337
1338	PMD_INIT_FUNC_TRACE();
1339
1340	dev_info->min_rx_bufsize = ETHER_MIN_MTU;
1341	dev_info->max_rx_pktlen = NIC_HW_MAX_FRS;
1342	dev_info->max_rx_queues =
1343			(uint16_t)MAX_RCV_QUEUES_PER_QS * (MAX_SQS_PER_VF + 1);
1344	dev_info->max_tx_queues =
1345			(uint16_t)MAX_SND_QUEUES_PER_QS * (MAX_SQS_PER_VF + 1);
1346	dev_info->max_mac_addrs = 1;
1347	dev_info->max_vfs = dev->pci_dev->max_vfs;
1348
1349	dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
1350	dev_info->tx_offload_capa =
1351		DEV_TX_OFFLOAD_IPV4_CKSUM  |
1352		DEV_TX_OFFLOAD_UDP_CKSUM   |
1353		DEV_TX_OFFLOAD_TCP_CKSUM   |
1354		DEV_TX_OFFLOAD_TCP_TSO     |
1355		DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
1356
1357	dev_info->reta_size = nic->rss_info.rss_size;
1358	dev_info->hash_key_size = RSS_HASH_KEY_BYTE_SIZE;
1359	dev_info->flow_type_rss_offloads = NICVF_RSS_OFFLOAD_PASS1;
1360	if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING)
1361		dev_info->flow_type_rss_offloads |= NICVF_RSS_OFFLOAD_TUNNEL;
1362
1363	dev_info->default_rxconf = (struct rte_eth_rxconf) {
1364		.rx_free_thresh = NICVF_DEFAULT_RX_FREE_THRESH,
1365		.rx_drop_en = 0,
1366	};
1367
1368	dev_info->default_txconf = (struct rte_eth_txconf) {
1369		.tx_free_thresh = NICVF_DEFAULT_TX_FREE_THRESH,
1370		.txq_flags =
1371			ETH_TXQ_FLAGS_NOMULTSEGS  |
1372			ETH_TXQ_FLAGS_NOREFCOUNT  |
1373			ETH_TXQ_FLAGS_NOMULTMEMP  |
1374			ETH_TXQ_FLAGS_NOVLANOFFL  |
1375			ETH_TXQ_FLAGS_NOXSUMSCTP,
1376	};
1377}
1378
1379static nicvf_phys_addr_t
1380rbdr_rte_mempool_get(void *dev, void *opaque)
1381{
1382	uint16_t qidx;
1383	uintptr_t mbuf;
1384	struct nicvf_rxq *rxq;
1385	struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)dev;
1386	struct nicvf *nic = (struct nicvf *)opaque;
1387	uint16_t rx_start, rx_end;
1388
1389	/* Get queue ranges for this VF */
1390	nicvf_rx_range(eth_dev, nic, &rx_start, &rx_end);
1391
1392	for (qidx = rx_start; qidx <= rx_end; qidx++) {
1393		rxq = eth_dev->data->rx_queues[qidx];
1394		/* Maintain equal buffer count across all pools */
1395		if (rxq->precharge_cnt >= rxq->qlen_mask)
1396			continue;
1397		rxq->precharge_cnt++;
1398		mbuf = (uintptr_t)rte_pktmbuf_alloc(rxq->pool);
1399		if (mbuf)
1400			return nicvf_mbuff_virt2phy(mbuf, rxq->mbuf_phys_off);
1401	}
1402	return 0;
1403}
1404
1405static int
1406nicvf_vf_start(struct rte_eth_dev *dev, struct nicvf *nic, uint32_t rbdrsz)
1407{
1408	int ret;
1409	uint16_t qidx;
1410	uint32_t total_rxq_desc, nb_rbdr_desc, exp_buffs;
1411	uint64_t mbuf_phys_off = 0;
1412	struct nicvf_rxq *rxq;
1413	struct rte_mbuf *mbuf;
1414	uint16_t rx_start, rx_end;
1415	uint16_t tx_start, tx_end;
1416
1417	PMD_INIT_FUNC_TRACE();
1418
1419	/* Userspace process exited without proper shutdown in last run */
1420	if (nicvf_qset_rbdr_active(nic, 0))
1421		nicvf_vf_stop(dev, nic, false);
1422
1423	/* Get queue ranges for this VF */
1424	nicvf_rx_range(dev, nic, &rx_start, &rx_end);
1425
1426	/*
1427	 * Thunderx nicvf PMD can support more than one pool per port only when
1428	 * 1) Data payload size is same across all the pools in given port
1429	 * AND
1430	 * 2) All mbuffs in the pools are from the same hugepage
1431	 * AND
1432	 * 3) Mbuff metadata size is same across all the pools in given port
1433	 *
1434	 * This is to support existing application that uses multiple pool/port.
1435	 * But, the purpose of using multipool for QoS will not be addressed.
1436	 *
1437	 */
1438
1439	/* Validate mempool attributes */
1440	for (qidx = rx_start; qidx <= rx_end; qidx++) {
1441		rxq = dev->data->rx_queues[qidx];
1442		rxq->mbuf_phys_off = nicvf_mempool_phy_offset(rxq->pool);
1443		mbuf = rte_pktmbuf_alloc(rxq->pool);
1444		if (mbuf == NULL) {
1445			PMD_INIT_LOG(ERR, "Failed allocate mbuf VF%d qid=%d "
1446				     "pool=%s",
1447				     nic->vf_id, qidx, rxq->pool->name);
1448			return -ENOMEM;
1449		}
1450		rxq->mbuf_phys_off -= nicvf_mbuff_meta_length(mbuf);
1451		rxq->mbuf_phys_off -= RTE_PKTMBUF_HEADROOM;
1452		rte_pktmbuf_free(mbuf);
1453
1454		if (mbuf_phys_off == 0)
1455			mbuf_phys_off = rxq->mbuf_phys_off;
1456		if (mbuf_phys_off != rxq->mbuf_phys_off) {
1457			PMD_INIT_LOG(ERR, "pool params not same,%s VF%d %"
1458				     PRIx64, rxq->pool->name, nic->vf_id,
1459				     mbuf_phys_off);
1460			return -EINVAL;
1461		}
1462	}
1463
1464	/* Check the level of buffers in the pool */
1465	total_rxq_desc = 0;
1466	for (qidx = rx_start; qidx <= rx_end; qidx++) {
1467		rxq = dev->data->rx_queues[qidx];
1468		/* Count total numbers of rxq descs */
1469		total_rxq_desc += rxq->qlen_mask + 1;
1470		exp_buffs = RTE_MEMPOOL_CACHE_MAX_SIZE + rxq->rx_free_thresh;
1471		exp_buffs *= dev->data->nb_rx_queues;
1472		if (rte_mempool_avail_count(rxq->pool) < exp_buffs) {
1473			PMD_INIT_LOG(ERR, "Buff shortage in pool=%s (%d/%d)",
1474				     rxq->pool->name,
1475				     rte_mempool_avail_count(rxq->pool),
1476				     exp_buffs);
1477			return -ENOENT;
1478		}
1479	}
1480
1481	/* Check RBDR desc overflow */
1482	ret = nicvf_qsize_rbdr_roundup(total_rxq_desc);
1483	if (ret == 0) {
1484		PMD_INIT_LOG(ERR, "Reached RBDR desc limit, reduce nr desc "
1485			     "VF%d", nic->vf_id);
1486		return -ENOMEM;
1487	}
1488
1489	/* Enable qset */
1490	ret = nicvf_qset_config(nic);
1491	if (ret) {
1492		PMD_INIT_LOG(ERR, "Failed to enable qset %d VF%d", ret,
1493			     nic->vf_id);
1494		return ret;
1495	}
1496
1497	/* Allocate RBDR and RBDR ring desc */
1498	nb_rbdr_desc = nicvf_qsize_rbdr_roundup(total_rxq_desc);
1499	ret = nicvf_qset_rbdr_alloc(dev, nic, nb_rbdr_desc, rbdrsz);
1500	if (ret) {
1501		PMD_INIT_LOG(ERR, "Failed to allocate memory for rbdr alloc "
1502			     "VF%d", nic->vf_id);
1503		goto qset_reclaim;
1504	}
1505
1506	/* Enable and configure RBDR registers */
1507	ret = nicvf_qset_rbdr_config(nic, 0);
1508	if (ret) {
1509		PMD_INIT_LOG(ERR, "Failed to configure rbdr %d VF%d", ret,
1510			     nic->vf_id);
1511		goto qset_rbdr_free;
1512	}
1513
1514	/* Fill rte_mempool buffers in RBDR pool and precharge it */
1515	ret = nicvf_qset_rbdr_precharge(dev, nic, 0, rbdr_rte_mempool_get,
1516					total_rxq_desc);
1517	if (ret) {
1518		PMD_INIT_LOG(ERR, "Failed to fill rbdr %d VF%d", ret,
1519			     nic->vf_id);
1520		goto qset_rbdr_reclaim;
1521	}
1522
1523	PMD_DRV_LOG(INFO, "Filled %d out of %d entries in RBDR VF%d",
1524		     nic->rbdr->tail, nb_rbdr_desc, nic->vf_id);
1525
1526	/* Configure VLAN Strip */
1527	nicvf_vlan_hw_strip(nic, dev->data->dev_conf.rxmode.hw_vlan_strip);
1528
1529	/* Based on the packet type(IPv4 or IPv6), the nicvf HW aligns L3 data
1530	 * to the 64bit memory address.
1531	 * The alignment creates a hole in mbuf(between the end of headroom and
1532	 * packet data start). The new revision of the HW provides an option to
1533	 * disable the L3 alignment feature and make mbuf layout looks
1534	 * more like other NICs. For better application compatibility, disabling
1535	 * l3 alignment feature on the hardware revisions it supports
1536	 */
1537	nicvf_apad_config(nic, false);
1538
1539	/* Get queue ranges for this VF */
1540	nicvf_tx_range(dev, nic, &tx_start, &tx_end);
1541
1542	/* Configure TX queues */
1543	for (qidx = tx_start; qidx <= tx_end; qidx++) {
1544		ret = nicvf_vf_start_tx_queue(dev, nic,
1545			qidx % MAX_SND_QUEUES_PER_QS);
1546		if (ret)
1547			goto start_txq_error;
1548	}
1549
1550	/* Configure RX queues */
1551	for (qidx = rx_start; qidx <= rx_end; qidx++) {
1552		ret = nicvf_vf_start_rx_queue(dev, nic,
1553			qidx % MAX_RCV_QUEUES_PER_QS);
1554		if (ret)
1555			goto start_rxq_error;
1556	}
1557
1558	if (!nic->sqs_mode) {
1559		/* Configure CPI algorithm */
1560		ret = nicvf_configure_cpi(dev);
1561		if (ret)
1562			goto start_txq_error;
1563
1564		ret = nicvf_mbox_get_rss_size(nic);
1565		if (ret) {
1566			PMD_INIT_LOG(ERR, "Failed to get rss table size");
1567			goto qset_rss_error;
1568		}
1569
1570		/* Configure RSS */
1571		ret = nicvf_configure_rss(dev);
1572		if (ret)
1573			goto qset_rss_error;
1574	}
1575
1576	/* Done; Let PF make the BGX's RX and TX switches to ON position */
1577	nicvf_mbox_cfg_done(nic);
1578	return 0;
1579
1580qset_rss_error:
1581	nicvf_rss_term(nic);
1582start_rxq_error:
1583	for (qidx = rx_start; qidx <= rx_end; qidx++)
1584		nicvf_vf_stop_rx_queue(dev, nic, qidx % MAX_RCV_QUEUES_PER_QS);
1585start_txq_error:
1586	for (qidx = tx_start; qidx <= tx_end; qidx++)
1587		nicvf_vf_stop_tx_queue(dev, nic, qidx % MAX_SND_QUEUES_PER_QS);
1588qset_rbdr_reclaim:
1589	nicvf_qset_rbdr_reclaim(nic, 0);
1590	nicvf_rbdr_release_mbufs(dev, nic);
1591qset_rbdr_free:
1592	if (nic->rbdr) {
1593		rte_free(nic->rbdr);
1594		nic->rbdr = NULL;
1595	}
1596qset_reclaim:
1597	nicvf_qset_reclaim(nic);
1598	return ret;
1599}
1600
1601static int
1602nicvf_dev_start(struct rte_eth_dev *dev)
1603{
1604	uint16_t qidx;
1605	int ret;
1606	size_t i;
1607	struct nicvf *nic = nicvf_pmd_priv(dev);
1608	struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
1609	uint16_t mtu;
1610	uint32_t buffsz = 0, rbdrsz = 0;
1611	struct rte_pktmbuf_pool_private *mbp_priv;
1612	struct nicvf_rxq *rxq;
1613
1614	PMD_INIT_FUNC_TRACE();
1615
1616	/* This function must be called for a primary device */
1617	assert_primary(nic);
1618
1619	/* Validate RBDR buff size */
1620	for (qidx = 0; qidx < dev->data->nb_rx_queues; qidx++) {
1621		rxq = dev->data->rx_queues[qidx];
1622		mbp_priv = rte_mempool_get_priv(rxq->pool);
1623		buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
1624		if (buffsz % 128) {
1625			PMD_INIT_LOG(ERR, "rxbuf size must be multiply of 128");
1626			return -EINVAL;
1627		}
1628		if (rbdrsz == 0)
1629			rbdrsz = buffsz;
1630		if (rbdrsz != buffsz) {
1631			PMD_INIT_LOG(ERR, "buffsz not same, qidx=%d (%d/%d)",
1632				     qidx, rbdrsz, buffsz);
1633			return -EINVAL;
1634		}
1635	}
1636
1637	/* Configure loopback */
1638	ret = nicvf_loopback_config(nic, dev->data->dev_conf.lpbk_mode);
1639	if (ret) {
1640		PMD_INIT_LOG(ERR, "Failed to configure loopback %d", ret);
1641		return ret;
1642	}
1643
1644	/* Reset all statistics counters attached to this port */
1645	ret = nicvf_mbox_reset_stat_counters(nic, 0x3FFF, 0x1F, 0xFFFF, 0xFFFF);
1646	if (ret) {
1647		PMD_INIT_LOG(ERR, "Failed to reset stat counters %d", ret);
1648		return ret;
1649	}
1650
1651	/* Setup scatter mode if needed by jumbo */
1652	if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
1653					    2 * VLAN_TAG_SIZE > buffsz)
1654		dev->data->scattered_rx = 1;
1655	if (rx_conf->enable_scatter)
1656		dev->data->scattered_rx = 1;
1657
1658	/* Setup MTU based on max_rx_pkt_len or default */
1659	mtu = dev->data->dev_conf.rxmode.jumbo_frame ?
1660		dev->data->dev_conf.rxmode.max_rx_pkt_len
1661			-  ETHER_HDR_LEN - ETHER_CRC_LEN
1662		: ETHER_MTU;
1663
1664	if (nicvf_dev_set_mtu(dev, mtu)) {
1665		PMD_INIT_LOG(ERR, "Failed to set default mtu size");
1666		return -EBUSY;
1667	}
1668
1669	ret = nicvf_vf_start(dev, nic, rbdrsz);
1670	if (ret != 0)
1671		return ret;
1672
1673	for (i = 0; i < nic->sqs_count; i++) {
1674		assert(nic->snicvf[i]);
1675
1676		ret = nicvf_vf_start(dev, nic->snicvf[i], rbdrsz);
1677		if (ret != 0)
1678			return ret;
1679	}
1680
1681	/* Configure callbacks based on scatter mode */
1682	nicvf_set_tx_function(dev);
1683	nicvf_set_rx_function(dev);
1684
1685	return 0;
1686}
1687
1688static void
1689nicvf_dev_stop_cleanup(struct rte_eth_dev *dev, bool cleanup)
1690{
1691	size_t i;
1692	int ret;
1693	struct nicvf *nic = nicvf_pmd_priv(dev);
1694
1695	PMD_INIT_FUNC_TRACE();
1696
1697	/* Teardown secondary vf first */
1698	for (i = 0; i < nic->sqs_count; i++) {
1699		if (!nic->snicvf[i])
1700			continue;
1701
1702		nicvf_vf_stop(dev, nic->snicvf[i], cleanup);
1703	}
1704
1705	/* Stop the primary VF now */
1706	nicvf_vf_stop(dev, nic, cleanup);
1707
1708	/* Disable loopback */
1709	ret = nicvf_loopback_config(nic, 0);
1710	if (ret)
1711		PMD_INIT_LOG(ERR, "Failed to disable loopback %d", ret);
1712
1713	/* Reclaim CPI configuration */
1714	ret = nicvf_mbox_config_cpi(nic, 0);
1715	if (ret)
1716		PMD_INIT_LOG(ERR, "Failed to reclaim CPI config %d", ret);
1717}
1718
1719static void
1720nicvf_dev_stop(struct rte_eth_dev *dev)
1721{
1722	PMD_INIT_FUNC_TRACE();
1723
1724	nicvf_dev_stop_cleanup(dev, false);
1725}
1726
1727static void
1728nicvf_vf_stop(struct rte_eth_dev *dev, struct nicvf *nic, bool cleanup)
1729{
1730	int ret;
1731	uint16_t qidx;
1732	uint16_t tx_start, tx_end;
1733	uint16_t rx_start, rx_end;
1734
1735	PMD_INIT_FUNC_TRACE();
1736
1737	if (cleanup) {
1738		/* Let PF make the BGX's RX and TX switches to OFF position */
1739		nicvf_mbox_shutdown(nic);
1740	}
1741
1742	/* Disable VLAN Strip */
1743	nicvf_vlan_hw_strip(nic, 0);
1744
1745	/* Get queue ranges for this VF */
1746	nicvf_tx_range(dev, nic, &tx_start, &tx_end);
1747
1748	for (qidx = tx_start; qidx <= tx_end; qidx++)
1749		nicvf_vf_stop_tx_queue(dev, nic, qidx % MAX_SND_QUEUES_PER_QS);
1750
1751	/* Get queue ranges for this VF */
1752	nicvf_rx_range(dev, nic, &rx_start, &rx_end);
1753
1754	/* Reclaim rq */
1755	for (qidx = rx_start; qidx <= rx_end; qidx++)
1756		nicvf_vf_stop_rx_queue(dev, nic, qidx % MAX_RCV_QUEUES_PER_QS);
1757
1758	/* Reclaim RBDR */
1759	ret = nicvf_qset_rbdr_reclaim(nic, 0);
1760	if (ret)
1761		PMD_INIT_LOG(ERR, "Failed to reclaim RBDR %d", ret);
1762
1763	/* Move all charged buffers in RBDR back to pool */
1764	if (nic->rbdr != NULL)
1765		nicvf_rbdr_release_mbufs(dev, nic);
1766
1767	/* Disable qset */
1768	ret = nicvf_qset_reclaim(nic);
1769	if (ret)
1770		PMD_INIT_LOG(ERR, "Failed to disable qset %d", ret);
1771
1772	/* Disable all interrupts */
1773	nicvf_disable_all_interrupts(nic);
1774
1775	/* Free RBDR SW structure */
1776	if (nic->rbdr) {
1777		rte_free(nic->rbdr);
1778		nic->rbdr = NULL;
1779	}
1780}
1781
1782static void
1783nicvf_dev_close(struct rte_eth_dev *dev)
1784{
1785	size_t i;
1786	struct nicvf *nic = nicvf_pmd_priv(dev);
1787
1788	PMD_INIT_FUNC_TRACE();
1789
1790	nicvf_dev_stop_cleanup(dev, true);
1791	nicvf_periodic_alarm_stop(nicvf_interrupt, dev);
1792
1793	for (i = 0; i < nic->sqs_count; i++) {
1794		if (!nic->snicvf[i])
1795			continue;
1796
1797		nicvf_periodic_alarm_stop(nicvf_vf_interrupt, nic->snicvf[i]);
1798	}
1799}
1800
1801static int
1802nicvf_request_sqs(struct nicvf *nic)
1803{
1804	size_t i;
1805
1806	assert_primary(nic);
1807	assert(nic->sqs_count > 0);
1808	assert(nic->sqs_count <= MAX_SQS_PER_VF);
1809
1810	/* Set no of Rx/Tx queues in each of the SQsets */
1811	for (i = 0; i < nic->sqs_count; i++) {
1812		if (nicvf_svf_empty())
1813			rte_panic("Cannot assign sufficient number of "
1814				  "secondary queues to primary VF%" PRIu8 "\n",
1815				  nic->vf_id);
1816
1817		nic->snicvf[i] = nicvf_svf_pop();
1818		nic->snicvf[i]->sqs_id = i;
1819	}
1820
1821	return nicvf_mbox_request_sqs(nic);
1822}
1823
1824static int
1825nicvf_dev_configure(struct rte_eth_dev *dev)
1826{
1827	struct rte_eth_dev_data *data = dev->data;
1828	struct rte_eth_conf *conf = &data->dev_conf;
1829	struct rte_eth_rxmode *rxmode = &conf->rxmode;
1830	struct rte_eth_txmode *txmode = &conf->txmode;
1831	struct nicvf *nic = nicvf_pmd_priv(dev);
1832	uint8_t cqcount;
1833
1834	PMD_INIT_FUNC_TRACE();
1835
1836	if (!rte_eal_has_hugepages()) {
1837		PMD_INIT_LOG(INFO, "Huge page is not configured");
1838		return -EINVAL;
1839	}
1840
1841	if (txmode->mq_mode) {
1842		PMD_INIT_LOG(INFO, "Tx mq_mode DCB or VMDq not supported");
1843		return -EINVAL;
1844	}
1845
1846	if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
1847		rxmode->mq_mode != ETH_MQ_RX_RSS) {
1848		PMD_INIT_LOG(INFO, "Unsupported rx qmode %d", rxmode->mq_mode);
1849		return -EINVAL;
1850	}
1851
1852	if (!rxmode->hw_strip_crc) {
1853		PMD_INIT_LOG(NOTICE, "Can't disable hw crc strip");
1854		rxmode->hw_strip_crc = 1;
1855	}
1856
1857	if (rxmode->hw_ip_checksum) {
1858		PMD_INIT_LOG(NOTICE, "Rxcksum not supported");
1859		rxmode->hw_ip_checksum = 0;
1860	}
1861
1862	if (rxmode->split_hdr_size) {
1863		PMD_INIT_LOG(INFO, "Rxmode does not support split header");
1864		return -EINVAL;
1865	}
1866
1867	if (rxmode->hw_vlan_filter) {
1868		PMD_INIT_LOG(INFO, "VLAN filter not supported");
1869		return -EINVAL;
1870	}
1871
1872	if (rxmode->hw_vlan_extend) {
1873		PMD_INIT_LOG(INFO, "VLAN extended not supported");
1874		return -EINVAL;
1875	}
1876
1877	if (rxmode->enable_lro) {
1878		PMD_INIT_LOG(INFO, "LRO not supported");
1879		return -EINVAL;
1880	}
1881
1882	if (conf->link_speeds & ETH_LINK_SPEED_FIXED) {
1883		PMD_INIT_LOG(INFO, "Setting link speed/duplex not supported");
1884		return -EINVAL;
1885	}
1886
1887	if (conf->dcb_capability_en) {
1888		PMD_INIT_LOG(INFO, "DCB enable not supported");
1889		return -EINVAL;
1890	}
1891
1892	if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
1893		PMD_INIT_LOG(INFO, "Flow director not supported");
1894		return -EINVAL;
1895	}
1896
1897	assert_primary(nic);
1898	NICVF_STATIC_ASSERT(MAX_RCV_QUEUES_PER_QS == MAX_SND_QUEUES_PER_QS);
1899	cqcount = RTE_MAX(data->nb_tx_queues, data->nb_rx_queues);
1900	if (cqcount > MAX_RCV_QUEUES_PER_QS) {
1901		nic->sqs_count = RTE_ALIGN_CEIL(cqcount, MAX_RCV_QUEUES_PER_QS);
1902		nic->sqs_count = (nic->sqs_count / MAX_RCV_QUEUES_PER_QS) - 1;
1903	} else {
1904		nic->sqs_count = 0;
1905	}
1906
1907	assert(nic->sqs_count <= MAX_SQS_PER_VF);
1908
1909	if (nic->sqs_count > 0) {
1910		if (nicvf_request_sqs(nic)) {
1911			rte_panic("Cannot assign sufficient number of "
1912				  "secondary queues to PORT%d VF%" PRIu8 "\n",
1913				  dev->data->port_id, nic->vf_id);
1914		}
1915	}
1916
1917	PMD_INIT_LOG(DEBUG, "Configured ethdev port%d hwcap=0x%" PRIx64,
1918		dev->data->port_id, nicvf_hw_cap(nic));
1919
1920	return 0;
1921}
1922
1923/* Initialize and register driver with DPDK Application */
1924static const struct eth_dev_ops nicvf_eth_dev_ops = {
1925	.dev_configure            = nicvf_dev_configure,
1926	.dev_start                = nicvf_dev_start,
1927	.dev_stop                 = nicvf_dev_stop,
1928	.link_update              = nicvf_dev_link_update,
1929	.dev_close                = nicvf_dev_close,
1930	.stats_get                = nicvf_dev_stats_get,
1931	.stats_reset              = nicvf_dev_stats_reset,
1932	.promiscuous_enable       = nicvf_dev_promisc_enable,
1933	.dev_infos_get            = nicvf_dev_info_get,
1934	.dev_supported_ptypes_get = nicvf_dev_supported_ptypes_get,
1935	.mtu_set                  = nicvf_dev_set_mtu,
1936	.reta_update              = nicvf_dev_reta_update,
1937	.reta_query               = nicvf_dev_reta_query,
1938	.rss_hash_update          = nicvf_dev_rss_hash_update,
1939	.rss_hash_conf_get        = nicvf_dev_rss_hash_conf_get,
1940	.rx_queue_start           = nicvf_dev_rx_queue_start,
1941	.rx_queue_stop            = nicvf_dev_rx_queue_stop,
1942	.tx_queue_start           = nicvf_dev_tx_queue_start,
1943	.tx_queue_stop            = nicvf_dev_tx_queue_stop,
1944	.rx_queue_setup           = nicvf_dev_rx_queue_setup,
1945	.rx_queue_release         = nicvf_dev_rx_queue_release,
1946	.rx_queue_count           = nicvf_dev_rx_queue_count,
1947	.tx_queue_setup           = nicvf_dev_tx_queue_setup,
1948	.tx_queue_release         = nicvf_dev_tx_queue_release,
1949	.get_reg                  = nicvf_dev_get_regs,
1950};
1951
1952static int
1953nicvf_eth_dev_init(struct rte_eth_dev *eth_dev)
1954{
1955	int ret;
1956	struct rte_pci_device *pci_dev;
1957	struct nicvf *nic = nicvf_pmd_priv(eth_dev);
1958
1959	PMD_INIT_FUNC_TRACE();
1960
1961	eth_dev->dev_ops = &nicvf_eth_dev_ops;
1962
1963	/* For secondary processes, the primary has done all the work */
1964	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1965		if (nic) {
1966			/* Setup callbacks for secondary process */
1967			nicvf_set_tx_function(eth_dev);
1968			nicvf_set_rx_function(eth_dev);
1969			return 0;
1970		} else {
1971			/* If nic == NULL than it is secondary function
1972			 * so ethdev need to be released by caller */
1973			return ENOTSUP;
1974		}
1975	}
1976
1977	pci_dev = eth_dev->pci_dev;
1978	rte_eth_copy_pci_info(eth_dev, pci_dev);
1979
1980	nic->device_id = pci_dev->id.device_id;
1981	nic->vendor_id = pci_dev->id.vendor_id;
1982	nic->subsystem_device_id = pci_dev->id.subsystem_device_id;
1983	nic->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
1984
1985	PMD_INIT_LOG(DEBUG, "nicvf: device (%x:%x) %u:%u:%u:%u",
1986			pci_dev->id.vendor_id, pci_dev->id.device_id,
1987			pci_dev->addr.domain, pci_dev->addr.bus,
1988			pci_dev->addr.devid, pci_dev->addr.function);
1989
1990	nic->reg_base = (uintptr_t)pci_dev->mem_resource[0].addr;
1991	if (!nic->reg_base) {
1992		PMD_INIT_LOG(ERR, "Failed to map BAR0");
1993		ret = -ENODEV;
1994		goto fail;
1995	}
1996
1997	nicvf_disable_all_interrupts(nic);
1998
1999	ret = nicvf_periodic_alarm_start(nicvf_interrupt, eth_dev);
2000	if (ret) {
2001		PMD_INIT_LOG(ERR, "Failed to start period alarm");
2002		goto fail;
2003	}
2004
2005	ret = nicvf_mbox_check_pf_ready(nic);
2006	if (ret) {
2007		PMD_INIT_LOG(ERR, "Failed to get ready message from PF");
2008		goto alarm_fail;
2009	} else {
2010		PMD_INIT_LOG(INFO,
2011			"node=%d vf=%d mode=%s sqs=%s loopback_supported=%s",
2012			nic->node, nic->vf_id,
2013			nic->tns_mode == NIC_TNS_MODE ? "tns" : "tns-bypass",
2014			nic->sqs_mode ? "true" : "false",
2015			nic->loopback_supported ? "true" : "false"
2016			);
2017	}
2018
2019	ret = nicvf_base_init(nic);
2020	if (ret) {
2021		PMD_INIT_LOG(ERR, "Failed to execute nicvf_base_init");
2022		goto malloc_fail;
2023	}
2024
2025	if (nic->sqs_mode) {
2026		/* Push nic to stack of secondary vfs */
2027		nicvf_svf_push(nic);
2028
2029		/* Steal nic pointer from the device for further reuse */
2030		eth_dev->data->dev_private = NULL;
2031
2032		nicvf_periodic_alarm_stop(nicvf_interrupt, eth_dev);
2033		ret = nicvf_periodic_alarm_start(nicvf_vf_interrupt, nic);
2034		if (ret) {
2035			PMD_INIT_LOG(ERR, "Failed to start period alarm");
2036			goto fail;
2037		}
2038
2039		/* Detach port by returning postive error number */
2040		return ENOTSUP;
2041	}
2042
2043	eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", ETHER_ADDR_LEN, 0);
2044	if (eth_dev->data->mac_addrs == NULL) {
2045		PMD_INIT_LOG(ERR, "Failed to allocate memory for mac addr");
2046		ret = -ENOMEM;
2047		goto alarm_fail;
2048	}
2049	if (is_zero_ether_addr((struct ether_addr *)nic->mac_addr))
2050		eth_random_addr(&nic->mac_addr[0]);
2051
2052	ether_addr_copy((struct ether_addr *)nic->mac_addr,
2053			&eth_dev->data->mac_addrs[0]);
2054
2055	ret = nicvf_mbox_set_mac_addr(nic, nic->mac_addr);
2056	if (ret) {
2057		PMD_INIT_LOG(ERR, "Failed to set mac addr");
2058		goto malloc_fail;
2059	}
2060
2061	PMD_INIT_LOG(INFO, "Port %d (%x:%x) mac=%02x:%02x:%02x:%02x:%02x:%02x",
2062		eth_dev->data->port_id, nic->vendor_id, nic->device_id,
2063		nic->mac_addr[0], nic->mac_addr[1], nic->mac_addr[2],
2064		nic->mac_addr[3], nic->mac_addr[4], nic->mac_addr[5]);
2065
2066	return 0;
2067
2068malloc_fail:
2069	rte_free(eth_dev->data->mac_addrs);
2070alarm_fail:
2071	nicvf_periodic_alarm_stop(nicvf_interrupt, eth_dev);
2072fail:
2073	return ret;
2074}
2075
2076static const struct rte_pci_id pci_id_nicvf_map[] = {
2077	{
2078		.class_id = RTE_CLASS_ANY_ID,
2079		.vendor_id = PCI_VENDOR_ID_CAVIUM,
2080		.device_id = PCI_DEVICE_ID_THUNDERX_CN88XX_PASS1_NICVF,
2081		.subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
2082		.subsystem_device_id = PCI_SUB_DEVICE_ID_CN88XX_PASS1_NICVF,
2083	},
2084	{
2085		.class_id = RTE_CLASS_ANY_ID,
2086		.vendor_id = PCI_VENDOR_ID_CAVIUM,
2087		.device_id = PCI_DEVICE_ID_THUNDERX_NICVF,
2088		.subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
2089		.subsystem_device_id = PCI_SUB_DEVICE_ID_CN88XX_PASS2_NICVF,
2090	},
2091	{
2092		.class_id = RTE_CLASS_ANY_ID,
2093		.vendor_id = PCI_VENDOR_ID_CAVIUM,
2094		.device_id = PCI_DEVICE_ID_THUNDERX_NICVF,
2095		.subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
2096		.subsystem_device_id = PCI_SUB_DEVICE_ID_CN81XX_NICVF,
2097	},
2098	{
2099		.class_id = RTE_CLASS_ANY_ID,
2100		.vendor_id = PCI_VENDOR_ID_CAVIUM,
2101		.device_id = PCI_DEVICE_ID_THUNDERX_NICVF,
2102		.subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM,
2103		.subsystem_device_id = PCI_SUB_DEVICE_ID_CN83XX_NICVF,
2104	},
2105	{
2106		.vendor_id = 0,
2107	},
2108};
2109
2110static struct eth_driver rte_nicvf_pmd = {
2111	.pci_drv = {
2112		.id_table = pci_id_nicvf_map,
2113		.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
2114		.probe = rte_eth_dev_pci_probe,
2115		.remove = rte_eth_dev_pci_remove,
2116	},
2117	.eth_dev_init = nicvf_eth_dev_init,
2118	.dev_private_size = sizeof(struct nicvf),
2119};
2120
2121RTE_PMD_REGISTER_PCI(net_thunderx, rte_nicvf_pmd.pci_drv);
2122RTE_PMD_REGISTER_PCI_TABLE(net_thunderx, pci_id_nicvf_map);
2123