1a551c94aSIdo Barnea/*-
2a551c94aSIdo Barnea *   BSD LICENSE
3a551c94aSIdo Barnea *
4a551c94aSIdo Barnea *   Copyright(c) 2013-2016 Intel Corporation. All rights reserved.
5a551c94aSIdo Barnea *   All rights reserved.
6a551c94aSIdo Barnea *
7a551c94aSIdo Barnea *   Redistribution and use in source and binary forms, with or without
8a551c94aSIdo Barnea *   modification, are permitted provided that the following conditions
9a551c94aSIdo Barnea *   are met:
10a551c94aSIdo Barnea *
11a551c94aSIdo Barnea *     * Redistributions of source code must retain the above copyright
12a551c94aSIdo Barnea *       notice, this list of conditions and the following disclaimer.
13a551c94aSIdo Barnea *     * Redistributions in binary form must reproduce the above copyright
14a551c94aSIdo Barnea *       notice, this list of conditions and the following disclaimer in
15a551c94aSIdo Barnea *       the documentation and/or other materials provided with the
16a551c94aSIdo Barnea *       distribution.
17a551c94aSIdo Barnea *     * Neither the name of Intel Corporation nor the names of its
18a551c94aSIdo Barnea *       contributors may be used to endorse or promote products derived
19a551c94aSIdo Barnea *       from this software without specific prior written permission.
20a551c94aSIdo Barnea *
21a551c94aSIdo Barnea *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22a551c94aSIdo Barnea *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23a551c94aSIdo Barnea *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24a551c94aSIdo Barnea *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25a551c94aSIdo Barnea *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26a551c94aSIdo Barnea *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27a551c94aSIdo Barnea *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28a551c94aSIdo Barnea *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29a551c94aSIdo Barnea *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30a551c94aSIdo Barnea *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31a551c94aSIdo Barnea *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32a551c94aSIdo Barnea */
33a551c94aSIdo Barnea
34a551c94aSIdo Barnea#include <rte_ethdev.h>
35a551c94aSIdo Barnea#include <rte_malloc.h>
36a551c94aSIdo Barnea#include <rte_memzone.h>
37a551c94aSIdo Barnea#include <rte_string_fns.h>
38a551c94aSIdo Barnea#include <rte_dev.h>
39a551c94aSIdo Barnea#include <rte_spinlock.h>
40a551c94aSIdo Barnea#include <rte_kvargs.h>
41a551c94aSIdo Barnea
42a551c94aSIdo Barnea#include "fm10k.h"
43a551c94aSIdo Barnea#include "base/fm10k_api.h"
44a551c94aSIdo Barnea
45a551c94aSIdo Barnea/* Default delay to acquire mailbox lock */
46a551c94aSIdo Barnea#define FM10K_MBXLOCK_DELAY_US 20
47a551c94aSIdo Barnea#define UINT64_LOWER_32BITS_MASK 0x00000000ffffffffULL
48a551c94aSIdo Barnea
49a551c94aSIdo Barnea#define MAIN_VSI_POOL_NUMBER 0
50a551c94aSIdo Barnea
51a551c94aSIdo Barnea/* Max try times to acquire switch status */
52a551c94aSIdo Barnea#define MAX_QUERY_SWITCH_STATE_TIMES 10
53a551c94aSIdo Barnea/* Wait interval to get switch status */
54a551c94aSIdo Barnea#define WAIT_SWITCH_MSG_US    100000
559ca4a157SIdo Barnea/* A period of quiescence for switch */
569ca4a157SIdo Barnea#define FM10K_SWITCH_QUIESCE_US 10000
57a551c94aSIdo Barnea/* Number of chars per uint32 type */
58a551c94aSIdo Barnea#define CHARS_PER_UINT32 (sizeof(uint32_t))
59a551c94aSIdo Barnea#define BIT_MASK_PER_UINT32 ((1 << CHARS_PER_UINT32) - 1)
60a551c94aSIdo Barnea
61a551c94aSIdo Barnea/* default 1:1 map from queue ID to interrupt vector ID */
629ca4a157SIdo Barnea#define Q2V(pci_dev, queue_id) ((pci_dev)->intr_handle.intr_vec[queue_id])
63a551c94aSIdo Barnea
64a551c94aSIdo Barnea/* First 64 Logical ports for PF/VMDQ, second 64 for Flow director */
65a551c94aSIdo Barnea#define MAX_LPORT_NUM    128
66a551c94aSIdo Barnea#define GLORT_FD_Q_BASE  0x40
67a551c94aSIdo Barnea#define GLORT_PF_MASK    0xFFC0
68a551c94aSIdo Barnea#define GLORT_FD_MASK    GLORT_PF_MASK
69a551c94aSIdo Barnea#define GLORT_FD_INDEX   GLORT_FD_Q_BASE
70a551c94aSIdo Barnea
71a551c94aSIdo Barneastatic void fm10k_close_mbx_service(struct fm10k_hw *hw);
72a551c94aSIdo Barneastatic void fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev);
73a551c94aSIdo Barneastatic void fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev);
74a551c94aSIdo Barneastatic void fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev);
75a551c94aSIdo Barneastatic void fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev);
76a551c94aSIdo Barneastatic inline int fm10k_glort_valid(struct fm10k_hw *hw);
77a551c94aSIdo Barneastatic int
78a551c94aSIdo Barneafm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);
79a551c94aSIdo Barneastatic void fm10k_MAC_filter_set(struct rte_eth_dev *dev,
80a551c94aSIdo Barnea	const u8 *mac, bool add, uint32_t pool);
81a551c94aSIdo Barneastatic void fm10k_tx_queue_release(void *queue);
82a551c94aSIdo Barneastatic void fm10k_rx_queue_release(void *queue);
83a551c94aSIdo Barneastatic void fm10k_set_rx_function(struct rte_eth_dev *dev);
84a551c94aSIdo Barneastatic void fm10k_set_tx_function(struct rte_eth_dev *dev);
85a551c94aSIdo Barneastatic int fm10k_check_ftag(struct rte_devargs *devargs);
86a551c94aSIdo Barnea
87a551c94aSIdo Barneastruct fm10k_xstats_name_off {
88a551c94aSIdo Barnea	char name[RTE_ETH_XSTATS_NAME_SIZE];
89a551c94aSIdo Barnea	unsigned offset;
90a551c94aSIdo Barnea};
91a551c94aSIdo Barnea
92a551c94aSIdo Barneastruct fm10k_xstats_name_off fm10k_hw_stats_strings[] = {
93a551c94aSIdo Barnea	{"completion_timeout_count", offsetof(struct fm10k_hw_stats, timeout)},
94a551c94aSIdo Barnea	{"unsupported_requests_count", offsetof(struct fm10k_hw_stats, ur)},
95a551c94aSIdo Barnea	{"completer_abort_count", offsetof(struct fm10k_hw_stats, ca)},
96a551c94aSIdo Barnea	{"unsupported_message_count", offsetof(struct fm10k_hw_stats, um)},
97a551c94aSIdo Barnea	{"checksum_error_count", offsetof(struct fm10k_hw_stats, xec)},
98a551c94aSIdo Barnea	{"vlan_dropped", offsetof(struct fm10k_hw_stats, vlan_drop)},
99a551c94aSIdo Barnea	{"loopback_dropped", offsetof(struct fm10k_hw_stats, loopback_drop)},
100a551c94aSIdo Barnea	{"rx_mbuf_allocation_errors", offsetof(struct fm10k_hw_stats,
101a551c94aSIdo Barnea		nodesc_drop)},
102a551c94aSIdo Barnea};
103a551c94aSIdo Barnea
104a551c94aSIdo Barnea#define FM10K_NB_HW_XSTATS (sizeof(fm10k_hw_stats_strings) / \
105a551c94aSIdo Barnea		sizeof(fm10k_hw_stats_strings[0]))
106a551c94aSIdo Barnea
107a551c94aSIdo Barneastruct fm10k_xstats_name_off fm10k_hw_stats_rx_q_strings[] = {
108a551c94aSIdo Barnea	{"packets", offsetof(struct fm10k_hw_stats_q, rx_packets)},
109a551c94aSIdo Barnea	{"bytes", offsetof(struct fm10k_hw_stats_q, rx_bytes)},
110a551c94aSIdo Barnea	{"dropped", offsetof(struct fm10k_hw_stats_q, rx_drops)},
111a551c94aSIdo Barnea};
112a551c94aSIdo Barnea
113a551c94aSIdo Barnea#define FM10K_NB_RX_Q_XSTATS (sizeof(fm10k_hw_stats_rx_q_strings) / \
114a551c94aSIdo Barnea		sizeof(fm10k_hw_stats_rx_q_strings[0]))
115a551c94aSIdo Barnea
116a551c94aSIdo Barneastruct fm10k_xstats_name_off fm10k_hw_stats_tx_q_strings[] = {
117a551c94aSIdo Barnea	{"packets", offsetof(struct fm10k_hw_stats_q, tx_packets)},
118a551c94aSIdo Barnea	{"bytes", offsetof(struct fm10k_hw_stats_q, tx_bytes)},
119a551c94aSIdo Barnea};
120a551c94aSIdo Barnea
121a551c94aSIdo Barnea#define FM10K_NB_TX_Q_XSTATS (sizeof(fm10k_hw_stats_tx_q_strings) / \
122a551c94aSIdo Barnea		sizeof(fm10k_hw_stats_tx_q_strings[0]))
123a551c94aSIdo Barnea
124a551c94aSIdo Barnea#define FM10K_NB_XSTATS (FM10K_NB_HW_XSTATS + FM10K_MAX_QUEUES_PF * \
125a551c94aSIdo Barnea		(FM10K_NB_RX_Q_XSTATS + FM10K_NB_TX_Q_XSTATS))
126a551c94aSIdo Barneastatic int
127a551c94aSIdo Barneafm10k_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
128a551c94aSIdo Barnea
129a551c94aSIdo Barneastatic void
130a551c94aSIdo Barneafm10k_mbx_initlock(struct fm10k_hw *hw)
131a551c94aSIdo Barnea{
132a551c94aSIdo Barnea	rte_spinlock_init(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
133a551c94aSIdo Barnea}
134a551c94aSIdo Barnea
135a551c94aSIdo Barneastatic void
136a551c94aSIdo Barneafm10k_mbx_lock(struct fm10k_hw *hw)
137a551c94aSIdo Barnea{
138a551c94aSIdo Barnea	while (!rte_spinlock_trylock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back)))
139a551c94aSIdo Barnea		rte_delay_us(FM10K_MBXLOCK_DELAY_US);
140a551c94aSIdo Barnea}
141a551c94aSIdo Barnea
142a551c94aSIdo Barneastatic void
143a551c94aSIdo Barneafm10k_mbx_unlock(struct fm10k_hw *hw)
144a551c94aSIdo Barnea{
145a551c94aSIdo Barnea	rte_spinlock_unlock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
146a551c94aSIdo Barnea}
147a551c94aSIdo Barnea
148a551c94aSIdo Barnea/* Stubs needed for linkage when vPMD is disabled */
149a551c94aSIdo Barneaint __attribute__((weak))
150a551c94aSIdo Barneafm10k_rx_vec_condition_check(__rte_unused struct rte_eth_dev *dev)
151a551c94aSIdo Barnea{
152a551c94aSIdo Barnea	return -1;
153a551c94aSIdo Barnea}
154a551c94aSIdo Barnea
155a551c94aSIdo Barneauint16_t __attribute__((weak))
156a551c94aSIdo Barneafm10k_recv_pkts_vec(
157a551c94aSIdo Barnea	__rte_unused void *rx_queue,
158a551c94aSIdo Barnea	__rte_unused struct rte_mbuf **rx_pkts,
159a551c94aSIdo Barnea	__rte_unused uint16_t nb_pkts)
160a551c94aSIdo Barnea{
161a551c94aSIdo Barnea	return 0;
162a551c94aSIdo Barnea}
163a551c94aSIdo Barnea
164a551c94aSIdo Barneauint16_t __attribute__((weak))
165a551c94aSIdo Barneafm10k_recv_scattered_pkts_vec(
166a551c94aSIdo Barnea		__rte_unused void *rx_queue,
167a551c94aSIdo Barnea		__rte_unused struct rte_mbuf **rx_pkts,
168a551c94aSIdo Barnea		__rte_unused uint16_t nb_pkts)
169a551c94aSIdo Barnea{
170a551c94aSIdo Barnea	return 0;
171a551c94aSIdo Barnea}
172a551c94aSIdo Barnea
173a551c94aSIdo Barneaint __attribute__((weak))
174a551c94aSIdo Barneafm10k_rxq_vec_setup(__rte_unused struct fm10k_rx_queue *rxq)
175a551c94aSIdo Barnea
176a551c94aSIdo Barnea{
177a551c94aSIdo Barnea	return -1;
178a551c94aSIdo Barnea}
179a551c94aSIdo Barnea
180a551c94aSIdo Barneavoid __attribute__((weak))
181a551c94aSIdo Barneafm10k_rx_queue_release_mbufs_vec(
182a551c94aSIdo Barnea		__rte_unused struct fm10k_rx_queue *rxq)
183a551c94aSIdo Barnea{
184a551c94aSIdo Barnea	return;
185a551c94aSIdo Barnea}
186a551c94aSIdo Barnea
187a551c94aSIdo Barneavoid __attribute__((weak))
188a551c94aSIdo Barneafm10k_txq_vec_setup(__rte_unused struct fm10k_tx_queue *txq)
189a551c94aSIdo Barnea{
190a551c94aSIdo Barnea	return;
191a551c94aSIdo Barnea}
192a551c94aSIdo Barnea
193a551c94aSIdo Barneaint __attribute__((weak))
194a551c94aSIdo Barneafm10k_tx_vec_condition_check(__rte_unused struct fm10k_tx_queue *txq)
195a551c94aSIdo Barnea{
196a551c94aSIdo Barnea	return -1;
197a551c94aSIdo Barnea}
198a551c94aSIdo Barnea
199a551c94aSIdo Barneauint16_t __attribute__((weak))
200a551c94aSIdo Barneafm10k_xmit_pkts_vec(__rte_unused void *tx_queue,
201a551c94aSIdo Barnea		__rte_unused struct rte_mbuf **tx_pkts,
202a551c94aSIdo Barnea		__rte_unused uint16_t nb_pkts)
203a551c94aSIdo Barnea{
204a551c94aSIdo Barnea	return 0;
205a551c94aSIdo Barnea}
206a551c94aSIdo Barnea
207a551c94aSIdo Barnea/*
208a551c94aSIdo Barnea * reset queue to initial state, allocate software buffers used when starting
209a551c94aSIdo Barnea * device.
210a551c94aSIdo Barnea * return 0 on success
211a551c94aSIdo Barnea * return -ENOMEM if buffers cannot be allocated
212a551c94aSIdo Barnea * return -EINVAL if buffers do not satisfy alignment condition
213a551c94aSIdo Barnea */
214a551c94aSIdo Barneastatic inline int
215a551c94aSIdo Barnearx_queue_reset(struct fm10k_rx_queue *q)
216a551c94aSIdo Barnea{
217a551c94aSIdo Barnea	static const union fm10k_rx_desc zero = {{0} };
218a551c94aSIdo Barnea	uint64_t dma_addr;
219a551c94aSIdo Barnea	int i, diag;
220a551c94aSIdo Barnea	PMD_INIT_FUNC_TRACE();
221a551c94aSIdo Barnea
222a551c94aSIdo Barnea	diag = rte_mempool_get_bulk(q->mp, (void **)q->sw_ring, q->nb_desc);
223a551c94aSIdo Barnea	if (diag != 0)
224a551c94aSIdo Barnea		return -ENOMEM;
225a551c94aSIdo Barnea
226a551c94aSIdo Barnea	for (i = 0; i < q->nb_desc; ++i) {
227a551c94aSIdo Barnea		fm10k_pktmbuf_reset(q->sw_ring[i], q->port_id);
228a551c94aSIdo Barnea		if (!fm10k_addr_alignment_valid(q->sw_ring[i])) {
229a551c94aSIdo Barnea			rte_mempool_put_bulk(q->mp, (void **)q->sw_ring,
230a551c94aSIdo Barnea						q->nb_desc);
231a551c94aSIdo Barnea			return -EINVAL;
232a551c94aSIdo Barnea		}
233a551c94aSIdo Barnea		dma_addr = MBUF_DMA_ADDR_DEFAULT(q->sw_ring[i]);
234a551c94aSIdo Barnea		q->hw_ring[i].q.pkt_addr = dma_addr;
235a551c94aSIdo Barnea		q->hw_ring[i].q.hdr_addr = dma_addr;
236a551c94aSIdo Barnea	}
237a551c94aSIdo Barnea
238a551c94aSIdo Barnea	/* initialize extra software ring entries. Space for these extra
239a551c94aSIdo Barnea	 * entries is always allocated.
240a551c94aSIdo Barnea	 */
241a551c94aSIdo Barnea	memset(&q->fake_mbuf, 0x0, sizeof(q->fake_mbuf));
242a551c94aSIdo Barnea	for (i = 0; i < q->nb_fake_desc; ++i) {
243a551c94aSIdo Barnea		q->sw_ring[q->nb_desc + i] = &q->fake_mbuf;
244a551c94aSIdo Barnea		q->hw_ring[q->nb_desc + i] = zero;
245a551c94aSIdo Barnea	}
246a551c94aSIdo Barnea
247a551c94aSIdo Barnea	q->next_dd = 0;
248a551c94aSIdo Barnea	q->next_alloc = 0;
249a551c94aSIdo Barnea	q->next_trigger = q->alloc_thresh - 1;
250a551c94aSIdo Barnea	FM10K_PCI_REG_WRITE(q->tail_ptr, q->nb_desc - 1);
251a551c94aSIdo Barnea	q->rxrearm_start = 0;
252a551c94aSIdo Barnea	q->rxrearm_nb = 0;
253a551c94aSIdo Barnea
254a551c94aSIdo Barnea	return 0;
255a551c94aSIdo Barnea}
256a551c94aSIdo Barnea
257a551c94aSIdo Barnea/*
258a551c94aSIdo Barnea * clean queue, descriptor rings, free software buffers used when stopping
259a551c94aSIdo Barnea * device.
260a551c94aSIdo Barnea */
261a551c94aSIdo Barneastatic inline void
262a551c94aSIdo Barnearx_queue_clean(struct fm10k_rx_queue *q)
263a551c94aSIdo Barnea{
264a551c94aSIdo Barnea	union fm10k_rx_desc zero = {.q = {0, 0, 0, 0} };
265a551c94aSIdo Barnea	uint32_t i;
266a551c94aSIdo Barnea	PMD_INIT_FUNC_TRACE();
267a551c94aSIdo Barnea
268a551c94aSIdo Barnea	/* zero descriptor rings */
269a551c94aSIdo Barnea	for (i = 0; i < q->nb_desc; ++i)
270a551c94aSIdo Barnea		q->hw_ring[i] = zero;
271a551c94aSIdo Barnea
272a551c94aSIdo Barnea	/* zero faked descriptors */
273a551c94aSIdo Barnea	for (i = 0; i < q->nb_fake_desc; ++i)
274a551c94aSIdo Barnea		q->hw_ring[q->nb_desc + i] = zero;
275a551c94aSIdo Barnea
276a551c94aSIdo Barnea	/* vPMD driver has a different way of releasing mbufs. */
277a551c94aSIdo Barnea	if (q->rx_using_sse) {
278a551c94aSIdo Barnea		fm10k_rx_queue_release_mbufs_vec(q);
279a551c94aSIdo Barnea		return;
280a551c94aSIdo Barnea	}
281a551c94aSIdo Barnea
282a551c94aSIdo Barnea	/* free software buffers */
283a551c94aSIdo Barnea	for (i = 0; i < q->nb_desc; ++i) {
284a551c94aSIdo Barnea		if (q->sw_ring[i]) {
285a551c94aSIdo Barnea			rte_pktmbuf_free_seg(q->sw_ring[i]);
286a551c94aSIdo Barnea			q->sw_ring[i] = NULL;
287a551c94aSIdo Barnea		}
288a551c94aSIdo Barnea	}
289a551c94aSIdo Barnea}
290a551c94aSIdo Barnea
291a551c94aSIdo Barnea/*
292a551c94aSIdo Barnea * free all queue memory used when releasing the queue (i.e. configure)
293a551c94aSIdo Barnea */
294a551c94aSIdo Barneastatic inline void
295a551c94aSIdo Barnearx_queue_free(struct fm10k_rx_queue *q)
296a551c94aSIdo Barnea{
297a551c94aSIdo Barnea	PMD_INIT_FUNC_TRACE();
298a551c94aSIdo Barnea	if (q) {
299a551c94aSIdo Barnea		PMD_INIT_LOG(DEBUG, "Freeing rx queue %p", q);
300a551c94aSIdo Barnea		rx_queue_clean(q);
301a551c94aSIdo Barnea		if (q->sw_ring) {
302a551c94aSIdo Barnea			rte_free(q->sw_ring);
303a551c94aSIdo Barnea			q->sw_ring = NULL;
304a551c94aSIdo Barnea		}
305a551c94aSIdo Barnea		rte_free(q);
306a551c94aSIdo Barnea		q = NULL;
307a551c94aSIdo Barnea	}
308a551c94aSIdo Barnea}
309a551c94aSIdo Barnea
310a551c94aSIdo Barnea/*
311a551c94aSIdo Barnea * disable RX queue, wait unitl HW finished necessary flush operation
312a551c94aSIdo Barnea */
313a551c94aSIdo Barneastatic inline int
314a551c94aSIdo Barnearx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
315a551c94aSIdo Barnea{
316a551c94aSIdo Barnea	uint32_t reg, i;
317a551c94aSIdo Barnea
318a551c94aSIdo Barnea	reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum));
319a551c94aSIdo Barnea	FM10K_WRITE_REG(hw, FM10K_RXQCTL(qnum),
320a551c94aSIdo Barnea			reg & ~FM10K_RXQCTL_ENABLE);
321a551c94aSIdo Barnea
322a551c94aSIdo Barnea	/* Wait 100us at most */
323a551c94aSIdo Barnea	for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
324a551c94aSIdo Barnea		rte_delay_us(1);
325a551c94aSIdo Barnea		reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum));
326a551c94aSIdo Barnea		if (!(reg & FM10K_RXQCTL_ENABLE))
327a551c94aSIdo Barnea			break;
328a551c94aSIdo Barnea	}
329a551c94aSIdo Barnea
330a551c94aSIdo Barnea	if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
331a551c94aSIdo Barnea		return -1;
332a551c94aSIdo Barnea
333a551c94aSIdo Barnea	return 0;
334a551c94aSIdo Barnea}
335a551c94aSIdo Barnea
336a551c94aSIdo Barnea/*
337a551c94aSIdo Barnea * reset queue to initial state, allocate software buffers used when starting
338a551c94aSIdo Barnea * device
339a551c94aSIdo Barnea */
340a551c94aSIdo Barneastatic inline void
341a551c94aSIdo Barneatx_queue_reset(struct fm10k_tx_queue *q)
342a551c94aSIdo Barnea{
343a551c94aSIdo Barnea	PMD_INIT_FUNC_TRACE();
344a551c94aSIdo Barnea	q->last_free = 0;
345a551c94aSIdo Barnea	q->next_free = 0;
346a551c94aSIdo Barnea	q->nb_used = 0;
347a551c94aSIdo Barnea	q->nb_free = q->nb_desc - 1;
348a551c94aSIdo Barnea	fifo_reset(&q->rs_tracker, (q->nb_desc + 1) / q->rs_thresh);
349a551c94aSIdo Barnea	FM10K_PCI_REG_WRITE(q->tail_ptr, 0);
350a551c94aSIdo Barnea}
351a551c94aSIdo Barnea
352a551c94aSIdo Barnea/*
353a551c94aSIdo Barnea * clean queue, descriptor rings, free software buffers used when stopping
354a551c94aSIdo Barnea * device
355a551c94aSIdo Barnea */
356a551c94aSIdo Barneastatic inline void
357a551c94aSIdo Barneatx_queue_clean(struct fm10k_tx_queue *q)
358a551c94aSIdo Barnea{
359a551c94aSIdo Barnea	struct fm10k_tx_desc zero = {0, 0, 0, 0, 0, 0};
360a551c94aSIdo Barnea	uint32_t i;
361a551c94aSIdo Barnea	PMD_INIT_FUNC_TRACE();
362a551c94aSIdo Barnea
363a551c94aSIdo Barnea	/* zero descriptor rings */
364a551c94aSIdo Barnea	for (i = 0; i < q->nb_desc; ++i)
365a551c94aSIdo Barnea		q->hw_ring[i] = zero;
366a551c94aSIdo Barnea
367a551c94aSIdo Barnea	/* free software buffers */
368a551c94aSIdo Barnea	for (i = 0; i < q->nb_desc; ++i) {
369a551c94aSIdo Barnea		if (q->sw_ring[i]) {
370a551c94aSIdo Barnea			rte_pktmbuf_free_seg(q->sw_ring[i]);
371a551c94aSIdo Barnea			q->sw_ring[i] = NULL;
372a551c94aSIdo Barnea		}
373a551c94aSIdo Barnea	}
374a551c94aSIdo Barnea}
375a551c94aSIdo Barnea
376a551c94aSIdo Barnea/*
377a551c94aSIdo Barnea * free all queue memory used when releasing the queue (i.e. configure)
378a551c94aSIdo Barnea */
379a551c94aSIdo Barneastatic inline void
380a551c94aSIdo Barneatx_queue_free(struct fm10k_tx_queue *q)
381a551c94aSIdo Barnea{
382a551c94aSIdo Barnea	PMD_INIT_FUNC_TRACE();
383a551c94aSIdo Barnea	if (q) {
384a551c94aSIdo Barnea		PMD_INIT_LOG(DEBUG, "Freeing tx queue %p", q);
385a551c94aSIdo Barnea		tx_queue_clean(q);
386a551c94aSIdo Barnea		if (q->rs_tracker.list) {
387a551c94aSIdo Barnea			rte_free(q->rs_tracker.list);
388a551c94aSIdo Barnea			q->rs_tracker.list = NULL;
389a551c94aSIdo Barnea		}
390a551c94aSIdo Barnea		if (q->sw_ring) {
391a551c94aSIdo Barnea			rte_free(q->sw_ring);
392a551c94aSIdo Barnea			q->sw_ring = NULL;
393a551c94aSIdo Barnea		}
394a551c94aSIdo Barnea		rte_free(q);
395a551c94aSIdo Barnea		q = NULL;
396a551c94aSIdo Barnea	}
397a551c94aSIdo Barnea}
398a551c94aSIdo Barnea
399a551c94aSIdo Barnea/*
400a551c94aSIdo Barnea * disable TX queue, wait unitl HW finished necessary flush operation
401a551c94aSIdo Barnea */
402a551c94aSIdo Barneastatic inline int
403a551c94aSIdo Barneatx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
404a551c94aSIdo Barnea{
405a551c94aSIdo Barnea	uint32_t reg, i;
406a551c94aSIdo Barnea
407a551c94aSIdo Barnea	reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum));
408a551c94aSIdo Barnea	FM10K_WRITE_REG(hw, FM10K_TXDCTL(qnum),
409a551c94aSIdo Barnea			reg & ~FM10K_TXDCTL_ENABLE);
410a551c94aSIdo Barnea
411a551c94aSIdo Barnea	/* Wait 100us at most */
412a551c94aSIdo Barnea	for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
413a551c94aSIdo Barnea		rte_delay_us(1);
414a551c94aSIdo Barnea		reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum));
415a551c94aSIdo Barnea		if (!(reg & FM10K_TXDCTL_ENABLE))
416a551c94aSIdo Barnea			break;
417a551c94aSIdo Barnea	}
418a551c94aSIdo Barnea
419a551c94aSIdo Barnea	if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
420a551c94aSIdo Barnea		return -1;
421a551c94aSIdo Barnea
422a551c94aSIdo Barnea	return 0;
423a551c94aSIdo Barnea}
424a551c94aSIdo Barnea
425a551c94aSIdo Barneastatic int
426a551c94aSIdo Barneafm10k_check_mq_mode(struct rte_eth_dev *dev)
427a551c94aSIdo Barnea{
428a551c94aSIdo Barnea	enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode;
429a551c94aSIdo Barnea	struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
430a551c94aSIdo Barnea	struct rte_eth_vmdq_rx_conf *vmdq_conf;
431a551c94aSIdo Barnea	uint16_t nb_rx_q = dev->data->nb_rx_queues;
432a551c94aSIdo Barnea
433a551c94aSIdo Barnea	vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
434a551c94aSIdo Barnea
435a551c94aSIdo Barnea	if (rx_mq_mode & ETH_MQ_RX_DCB_FLAG) {
436a551c94aSIdo Barnea		PMD_INIT_LOG(ERR, "DCB mode is not supported.");
437a551c94aSIdo Barnea		return -EINVAL;
438a551c94aSIdo Barnea	}
439a551c94aSIdo Barnea
440a551c94aSIdo Barnea	if (!(rx_mq_mode & ETH_MQ_RX_VMDQ_FLAG))
441a551c94aSIdo Barnea		return 0;
442a551c94aSIdo Barnea
443a551c94aSIdo Barnea	if (hw->mac.type == fm10k_mac_vf) {
444a551c94aSIdo Barnea		PMD_INIT_LOG(ERR, "VMDQ mode is not supported in VF.");
445a551c94aSIdo Barnea		return -EINVAL;
446a551c94aSIdo Barnea	}
447a551c94aSIdo Barnea
448a551c94aSIdo Barnea	/* Check VMDQ queue pool number */
449a551c94aSIdo Barnea	if (vmdq_conf->nb_queue_pools >
450a551c94aSIdo Barnea			sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT ||
451a551c94aSIdo Barnea			vmdq_conf->nb_queue_pools > nb_rx_q) {
452a551c94aSIdo Barnea		PMD_INIT_LOG(ERR, "Too many of queue pools: %d",
453a551c94aSIdo Barnea			vmdq_conf->nb_queue_pools);
454a551c94aSIdo Barnea		return -EINVAL;
455a551c94aSIdo Barnea	}
456a551c94aSIdo Barnea
457a551c94aSIdo Barnea	return 0;
458a551c94aSIdo Barnea}
459a551c94aSIdo Barnea
460a551c94aSIdo Barneastatic const struct fm10k_txq_ops def_txq_ops = {
461a551c94aSIdo Barnea	.reset = tx_queue_reset,
462a551c94aSIdo Barnea};
463a551c94aSIdo Barnea
464a551c94aSIdo Barneastatic int
465a551c94aSIdo Barneafm10k_dev_configure(struct rte_eth_dev *dev)
466a551c94aSIdo Barnea{
467a551c94aSIdo Barnea	int ret;
468a551c94aSIdo Barnea
469a551c94aSIdo Barnea	PMD_INIT_FUNC_TRACE();
470a551c94aSIdo Barnea
471a551c94aSIdo Barnea	if (dev->data->dev_conf.rxmode.hw_strip_crc == 0)
472a551c94aSIdo Barnea		PMD_INIT_LOG(WARNING, "fm10k always strip CRC");
473a551c94aSIdo Barnea	/* multipe queue mode checking */
474a551c94aSIdo Barnea	ret  = fm10k_check_mq_mode(dev);
475a551c94aSIdo Barnea	if (ret != 0) {
476a551c94aSIdo Barnea		PMD_DRV_LOG(ERR, "fm10k_check_mq_mode fails with %d.",
477a551c94aSIdo Barnea			    ret);
478a551c94aSIdo Barnea		return ret;
479a551c94aSIdo Barnea	}
480a551c94aSIdo Barnea
481a551c94aSIdo Barnea	return 0;
482a551c94aSIdo Barnea}
483a551c94aSIdo Barnea
484a551c94aSIdo Barnea/* fls = find last set bit = 32 minus the number of leading zeros */
485a551c94aSIdo Barnea#ifndef fls
486a551c94aSIdo Barnea#define fls(x) (((x) == 0) ? 0 : (32 - __builtin_clz((x))))
487a551c94aSIdo Barnea#endif
488a551c94aSIdo Barnea
489a551c94aSIdo Barneastatic void
490a551c94aSIdo Barneafm10k_dev_vmdq_rx_configure(struct rte_eth_dev *dev)
491a551c94aSIdo Barnea{
492a551c94aSIdo Barnea	struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
493a551c94aSIdo Barnea	struct rte_eth_vmdq_rx_conf *vmdq_conf;
494a551c94aSIdo Barnea	uint32_t i;
495a551c94aSIdo Barnea
496a551c94aSIdo Barnea	vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
497a551c94aSIdo Barnea
498a551c94aSIdo Barnea	for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
499a551c94aSIdo Barnea		if (!vmdq_conf->pool_map[i].pools)
500a551c94aSIdo Barnea			continue;
501a551c94aSIdo Barnea		fm10k_mbx_lock(hw);
502a551c94aSIdo Barnea		fm10k_update_vlan(hw, vmdq_conf->pool_map[i].vlan_id, 0, true);
503a551c94aSIdo Barnea		fm10k_mbx_unlock(hw);
504a551c94aSIdo Barnea	}
505a551c94aSIdo Barnea}
506a551c94aSIdo Barnea
507a551c94aSIdo Barneastatic void
508a551c94aSIdo Barneafm10k_dev_pf_main_vsi_reset(struct rte_eth_dev *dev)
509a551c94aSIdo Barnea{
510a551c94aSIdo Barnea	struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
511a551c94aSIdo Barnea
512a551c94aSIdo Barnea	/* Add default mac address */
513a551c94aSIdo Barnea	fm10k_MAC_filter_set(dev, hw->mac.addr, true,
514a551c94aSIdo Barnea		MAIN_VSI_POOL_NUMBER);
515a551c94aSIdo Barnea}
516a551c94aSIdo Barnea
517a551c94aSIdo Barneastatic void
518a551c94aSIdo Barneafm10k_dev_rss_configure(struct rte_eth_dev *dev)
519a551c94aSIdo Barnea{
520a551c94aSIdo Barnea	struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
521a551c94aSIdo Barnea	struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
522a551c94aSIdo Barnea	uint32_t mrqc, *key, i, reta, j;
523a551c94aSIdo Barnea	uint64_t hf;
524a551c94aSIdo Barnea
525a551c94aSIdo Barnea#define RSS_KEY_SIZE 40
526a551c94aSIdo Barnea	static uint8_t rss_intel_key[RSS_KEY_SIZE] = {
527a551c94aSIdo Barnea		0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
528a551c94aSIdo Barnea		0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
529a551c94aSIdo Barnea		0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
530a551c94aSIdo Barnea		0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
531a551c94aSIdo Barnea		0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
532a551c94aSIdo Barnea	};
533a551c94aSIdo Barnea
534a551c94aSIdo Barnea	if (dev->data->nb_rx_queues == 1 ||
535a551c94aSIdo Barnea	    dev_conf->rxmode.mq_mode != ETH_MQ_RX_RSS ||
536a551c94aSIdo Barnea	    dev_conf->rx_adv_conf.rss_conf.rss_hf == 0) {
537a551c94aSIdo Barnea		FM10K_WRITE_REG(hw, FM10K_MRQC(0), 0);
538a551c94aSIdo Barnea		return;
539a551c94aSIdo Barnea	}
540a551c94aSIdo Barnea
541a551c94aSIdo Barnea	/* random key is rss_intel_key (default) or user provided (rss_key) */
542a551c94aSIdo Barnea	if (dev_conf->rx_adv_conf.rss_conf.rss_key == NULL)
543a551c94aSIdo Barnea		key = (uint32_t *)rss_intel_key;
544a551c94aSIdo Barnea	else
545a551c94aSIdo Barnea		key = (uint32_t *)dev_conf->rx_adv_conf.rss_conf.rss_key;
546a551c94aSIdo Barnea
547a551c94aSIdo Barnea	/* Now fill our hash function seeds, 4 bytes at a time */
548a551c94aSIdo Barnea	for (i = 0; i < RSS_KEY_SIZE / sizeof(*key); ++i)
549a551c94aSIdo Barnea		FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
550a551c94aSIdo Barnea
551a551c94aSIdo Barnea	/*
552a551c94aSIdo Barnea	 * Fill in redirection table
553a551c94aSIdo Barnea	 * The byte-swap is needed because NIC registers are in
554a551c94aSIdo Barnea	 * little-endian order.
555a551c94aSIdo Barnea	 */
556a551c94aSIdo Barnea	reta = 0;
557a551c94aSIdo Barnea	for (i = 0, j = 0; i < FM10K_MAX_RSS_INDICES; i++, j++) {
558a551c94aSIdo Barnea		if (j == dev->data->nb_rx_queues)
559a551c94aSIdo Barnea			j = 0;
560a551c94aSIdo Barnea		reta = (reta << CHAR_BIT) | j;
561a551c94aSIdo Barnea		if ((i & 3) == 3)
562a551c94aSIdo Barnea			FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2),
563a551c94aSIdo Barnea					rte_bswap32(reta));
564a551c94aSIdo Barnea	}
565a551c94aSIdo Barnea
566a551c94aSIdo Barnea	/*
567a551c94aSIdo Barnea	 * Generate RSS hash based on packet types, TCP/UDP
568a551c94aSIdo Barnea	 * port numbers and/or IPv4/v6 src and dst addresses
569a551c94aSIdo Barnea	 */
570a551c94aSIdo Barnea	hf = dev_conf->rx_adv_conf.rss_conf.rss_hf;
571a551c94aSIdo Barnea	mrqc = 0;
572a551c94aSIdo Barnea	mrqc |= (hf & ETH_RSS_IPV4)              ? FM10K_MRQC_IPV4     : 0;
573a551c94aSIdo Barnea	mrqc |= (hf & ETH_RSS_IPV6)              ? FM10K_MRQC_IPV6     : 0;
574a551c94aSIdo Barnea	mrqc |= (hf & ETH_RSS_IPV6_EX)           ? FM10K_MRQC_IPV6     : 0;
575a551c94aSIdo Barnea	mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP)  ? FM10K_MRQC_TCP_IPV4 : 0;
576a551c94aSIdo Barnea	mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP)  ? FM10K_MRQC_TCP_IPV6 : 0;
577a551c94aSIdo Barnea	mrqc |= (hf & ETH_RSS_IPV6_TCP_EX)       ? FM10K_MRQC_TCP_IPV6 : 0;
578a551c94aSIdo Barnea	mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP)  ? FM10K_MRQC_UDP_IPV4 : 0;
579a551c94aSIdo Barnea	mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP)  ? FM10K_MRQC_UDP_IPV6 : 0;
580a551c94aSIdo Barnea	mrqc |= (hf & ETH_RSS_IPV6_UDP_EX)       ? FM10K_MRQC_UDP_IPV6 : 0;
581a551c94aSIdo Barnea
582a551c94aSIdo Barnea	if (mrqc == 0) {
583a551c94aSIdo Barnea		PMD_INIT_LOG(ERR, "Specified RSS mode 0x%"PRIx64"is not"
584a551c94aSIdo Barnea			"supported", hf);
585a551c94aSIdo Barnea		return;
586a551c94aSIdo Barnea	}
587a551c94aSIdo Barnea
588a551c94aSIdo Barnea	FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
589a551c94aSIdo Barnea}
590a551c94aSIdo Barnea
591a551c94aSIdo Barneastatic void
592a551c94aSIdo Barneafm10k_dev_logic_port_update(struct rte_eth_dev *dev, uint16_t nb_lport_new)
593a551c94aSIdo Barnea{
594a551c94aSIdo Barnea	struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
595a551c94aSIdo Barnea	uint32_t i;
596a551c94aSIdo Barnea
597a551c94aSIdo Barnea	for (i = 0; i < nb_lport_new; i++) {
598a551c94aSIdo Barnea		/* Set unicast mode by default. App can change
599a551c94aSIdo Barnea		 * to other mode in other API func.
600a551c94aSIdo Barnea		 */
601a551c94aSIdo Barnea		fm10k_mbx_lock(hw);
602a551c94aSIdo Barnea		hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map + i,
603a551c94aSIdo Barnea			FM10K_XCAST_MODE_NONE);
604a551c94aSIdo Barnea		fm10k_mbx_unlock(hw);
605a551c94aSIdo Barnea	}
606a551c94aSIdo Barnea}
607a551c94aSIdo Barnea
608a551c94aSIdo Barneastatic void
609a551c94aSIdo Barneafm10k_dev_mq_rx_configure(struct rte_eth_dev *dev)
610a551c94aSIdo Barnea{
611a551c94aSIdo Barnea	struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
612a551c94aSIdo Barnea	struct rte_eth_vmdq_rx_conf *vmdq_conf;
613a551c94aSIdo Barnea	struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
614a551c94aSIdo Barnea	struct fm10k_macvlan_filter_info *macvlan;
615a551c94aSIdo Barnea	uint16_t nb_queue_pools = 0; /* pool number in configuration */
616a551c94aSIdo Barnea	uint16_t nb_lport_new;
617a551c94aSIdo Barnea
618a551c94aSIdo Barnea	macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
619a551c94aSIdo Barnea	vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
620a551c94aSIdo Barnea
621a551c94aSIdo Barnea	fm10k_dev_rss_configure(dev);
622a551c94aSIdo Barnea
623a551c94aSIdo Barnea	/* only PF supports VMDQ */
624a551c94aSIdo Barnea	if (hw->mac.type != fm10k_mac_pf)
625a551c94aSIdo Barnea		return;
626a551c94aSIdo Barnea
627a551c94aSIdo Barnea	if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
628a551c94aSIdo Barnea		nb_queue_pools = vmdq_conf->nb_queue_pools;
629a551c94aSIdo Barnea
630a551c94aSIdo Barnea	/* no pool number change, no need to update logic port and VLAN/MAC */
631a551c94aSIdo Barnea	if (macvlan->nb_queue_pools == nb_queue_pools)
632a551c94aSIdo Barnea		return;
633a551c94aSIdo Barnea
634a551c94aSIdo Barnea	nb_lport_new = nb_queue_pools ? nb_queue_pools : 1;
635a551c94aSIdo Barnea	fm10k_dev_logic_port_update(dev, nb_lport_new);
636a551c94aSIdo Barnea
637a551c94aSIdo Barnea	/* reset MAC/VLAN as it's based on VMDQ or PF main VSI */
638a551c94aSIdo Barnea	memset(dev->data->mac_addrs, 0,
639a551c94aSIdo Barnea		ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM);
640a551c94aSIdo Barnea	ether_addr_copy((const struct ether_addr *)hw->mac.addr,
641a551c94aSIdo Barnea		&dev->data->mac_addrs[0]);
642a551c94aSIdo Barnea	memset(macvlan, 0, sizeof(*macvlan));
643a551c94aSIdo Barnea	macvlan->nb_queue_pools = nb_queue_pools;
644a551c94aSIdo Barnea
645a551c94aSIdo Barnea	if (nb_queue_pools)
646a551c94aSIdo Barnea		fm10k_dev_vmdq_rx_configure(dev);
647a551c94aSIdo Barnea	else
648a551c94aSIdo Barnea		fm10k_dev_pf_main_vsi_reset(dev);
649a551c94aSIdo Barnea}
650a551c94aSIdo Barnea
651a551c94aSIdo Barneastatic int
652a551c94aSIdo Barneafm10k_dev_tx_init(struct rte_eth_dev *dev)
653a551c94aSIdo Barnea{
654a551c94aSIdo Barnea	struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
655a551c94aSIdo Barnea	int i, ret;
656a551c94aSIdo Barnea	struct fm10k_tx_queue *txq;
657a551c94aSIdo Barnea	uint64_t base_addr;
658a551c94aSIdo Barnea	uint32_t size;
659a551c94aSIdo Barnea
660a551c94aSIdo Barnea	/* Disable TXINT to avoid possible interrupt */
661a551c94aSIdo Barnea	for (i = 0; i < hw->mac.max_queues; i++)
662a551c94aSIdo Barnea		FM10K_WRITE_REG(hw, FM10K_TXINT(i),
663a551c94aSIdo Barnea				3 << FM10K_TXINT_TIMER_SHIFT);
664a551c94aSIdo Barnea
665a551c94aSIdo Barnea	/* Setup TX queue */
666a551c94aSIdo Barnea	for (i = 0; i < dev->data->nb_tx_queues; ++i) {
667a551c94aSIdo Barnea		txq = dev->data->tx_queues[i];
668a551c94aSIdo Barnea		base_addr = txq->hw_ring_phys_addr;
669a551c94aSIdo Barnea		size = txq->nb_desc * sizeof(struct fm10k_tx_desc);
670a551c94aSIdo Barnea
671a551c94aSIdo Barnea		/* disable queue to avoid issues while updating state */
672a551c94aSIdo Barnea		ret = tx_queue_disable(hw, i);
673a551c94aSIdo Barnea		if (ret) {
674a551c94aSIdo Barnea			PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
675a551c94aSIdo Barnea			return -1;
676a551c94aSIdo Barnea		}
677a551c94aSIdo Barnea		/* Enable use of FTAG bit in TX descriptor, PFVTCTL
678a551c94aSIdo Barnea		 * register is read-only for VF.
679a551c94aSIdo Barnea		 */
6809ca4a157SIdo Barnea		if (fm10k_check_ftag(dev->device->devargs)) {
681a551c94aSIdo Barnea			if (hw->mac.type == fm10k_mac_pf) {
682a551c94aSIdo Barnea				FM10K_WRITE_REG(hw, FM10K_PFVTCTL(i),
683a551c94aSIdo Barnea						FM10K_PFVTCTL_FTAG_DESC_ENABLE);
684a551c94aSIdo Barnea				PMD_INIT_LOG(DEBUG, "FTAG mode is enabled");
685a551c94aSIdo Barnea			} else {
686a551c94aSIdo Barnea				PMD_INIT_LOG(ERR, "VF FTAG is not supported.");
687a551c94aSIdo Barnea				return -ENOTSUP;
688a551c94aSIdo Barnea			}
689a551c94aSIdo Barnea		}
690a551c94aSIdo Barnea
691a551c94aSIdo Barnea		/* set location and size for descriptor ring */
692a551c94aSIdo Barnea		FM10K_WRITE_REG(hw, FM10K_TDBAL(i),
693a551c94aSIdo Barnea				base_addr & UINT64_LOWER_32BITS_MASK);
694a551c94aSIdo Barnea		FM10K_WRITE_REG(hw, FM10K_TDBAH(i),
695a551c94aSIdo Barnea				base_addr >> (CHAR_BIT * sizeof(uint32_t)));
696a551c94aSIdo Barnea		FM10K_WRITE_REG(hw, FM10K_TDLEN(i), size);
697a551c94aSIdo Barnea
6989ca4a157SIdo Barnea		/* assign default SGLORT for each TX queue by PF */
6999ca4a157SIdo Barnea		if (hw->mac.type == fm10k_mac_pf)
7009ca4a157SIdo Barnea			FM10K_WRITE_REG(hw, FM10K_TX_SGLORT(i), hw->mac.dglort_map);
701a551c94aSIdo Barnea	}
702a551c94aSIdo Barnea
703a551c94aSIdo Barnea	/* set up vector or scalar TX function as appropriate */
704a551c94aSIdo Barnea	fm10k_set_tx_function(dev);
705a551c94aSIdo Barnea
706a551c94aSIdo Barnea	return 0;
707a551c94aSIdo Barnea}
708a551c94aSIdo Barnea
709a551c94aSIdo Barneastatic int
710a551c94aSIdo Barneafm10k_dev_rx_init(struct rte_eth_dev *dev)
711a551c94aSIdo Barnea{
712a551c94aSIdo Barnea	struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
713a551c94aSIdo Barnea	struct fm10k_macvlan_filter_info *macvlan;
7149ca4a157SIdo Barnea	struct rte_pci_device *pdev = RTE_DEV_TO_PCI(dev->device);
7159ca4a157SIdo Barnea	struct rte_intr_handle *intr_handle = &pdev->intr_handle;
716a551c94aSIdo Barnea	int i, ret;
717a551c94aSIdo Barnea	struct fm10k_rx_queue *rxq;
718a551c94aSIdo Barnea	uint64_t base_addr;
719a551c94aSIdo Barnea	uint32_t size;
720a551c94aSIdo Barnea	uint32_t rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
721a551c94aSIdo Barnea	uint32_t logic_port = hw->mac.dglort_map;
722a551c94aSIdo Barnea	uint16_t buf_size;
723a551c94aSIdo Barnea	uint16_t queue_stride = 0;
724a551c94aSIdo Barnea
725a551c94aSIdo Barnea	/* enable RXINT for interrupt mode */
726a551c94aSIdo Barnea	i = 0;
727a551c94aSIdo Barnea	if (rte_intr_dp_is_en(intr_handle)) {
728a551c94aSIdo Barnea		for (; i < dev->data->nb_rx_queues; i++) {
7299ca4a157SIdo Barnea			FM10K_WRITE_REG(hw, FM10K_RXINT(i), Q2V(pdev, i));
730a551c94aSIdo Barnea			if (hw->mac.type == fm10k_mac_pf)
7319ca4a157SIdo Barnea				FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, i)),
732a551c94aSIdo Barnea					FM10K_ITR_AUTOMASK |
733a551c94aSIdo Barnea					FM10K_ITR_MASK_CLEAR);
734a551c94aSIdo Barnea			else
7359ca4a157SIdo Barnea				FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, i)),
736a551c94aSIdo Barnea					FM10K_ITR_AUTOMASK |
737a551c94aSIdo Barnea					FM10K_ITR_MASK_CLEAR);
738a551c94aSIdo Barnea		}
739a551c94aSIdo Barnea	}
740a551c94aSIdo Barnea	/* Disable other RXINT to avoid possible interrupt */
741a551c94aSIdo Barnea	for (; i < hw->mac.max_queues; i++)
742a551c94aSIdo Barnea		FM10K_WRITE_REG(hw, FM10K_RXINT(i),
743a551c94aSIdo Barnea			3 << FM10K_RXINT_TIMER_SHIFT);
744a551c94aSIdo Barnea
745a551c94aSIdo Barnea	/* Setup RX queues */
746a551c94aSIdo Barnea	for (i = 0; i < dev->data->nb_rx_queues; ++i) {
747a551c94aSIdo Barnea		rxq = dev->data->rx_queues[i];
748a551c94aSIdo Barnea		base_addr = rxq->hw_ring_phys_addr;
749a551c94aSIdo Barnea		size = rxq->nb_desc * sizeof(union fm10k_rx_desc);
750a551c94aSIdo Barnea
751a551c94aSIdo Barnea		/* disable queue to avoid issues while updating state */
752a551c94aSIdo Barnea		ret = rx_queue_disable(hw, i);
753a551c94aSIdo Barnea		if (ret) {
754a551c94aSIdo Barnea			PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
755a551c94aSIdo Barnea			return -1;
756a551c94aSIdo Barnea		}
757a551c94aSIdo Barnea
758a551c94aSIdo Barnea		/* Setup the Base and Length of the Rx Descriptor Ring */
759a551c94aSIdo Barnea		FM10K_WRITE_REG(hw, FM10K_RDBAL(i),
760a551c94aSIdo Barnea				base_addr & UINT64_LOWER_32BITS_MASK);
761a551c94aSIdo Barnea		FM10K_WRITE_REG(hw, FM10K_RDBAH(i),
762a551c94aSIdo Barnea				base_addr >> (CHAR_BIT * sizeof(uint32_t)));
763a551c94aSIdo Barnea		FM10K_WRITE_REG(hw, FM10K_RDLEN(i), size);
764a551c94aSIdo Barnea
765a551c94aSIdo Barnea		/* Configure the Rx buffer size for one buff without split */
766a551c94aSIdo Barnea		buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
767a551c94aSIdo Barnea			RTE_PKTMBUF_HEADROOM);
768a551c94aSIdo Barnea		/* As RX buffer is aligned to 512B within mbuf, some bytes are
769a551c94aSIdo Barnea		 * reserved for this purpose, and the worst case could be 511B.
770a551c94aSIdo Barnea		 * But SRR reg assumes all buffers have the same size. In order
771a551c94aSIdo Barnea		 * to fill the gap, we'll have to consider the worst case and
772a551c94aSIdo Barnea		 * assume 512B is reserved. If we don't do so, it's possible
773a551c94aSIdo Barnea		 * for HW to overwrite data to next mbuf.
774a551c94aSIdo Barnea		 */
775a551c94aSIdo Barnea		buf_size -= FM10K_RX_DATABUF_ALIGN;
776a551c94aSIdo Barnea
777a551c94aSIdo Barnea		FM10K_WRITE_REG(hw, FM10K_SRRCTL(i),
778a551c94aSIdo Barnea				(buf_size >> FM10K_SRRCTL_BSIZEPKT_SHIFT) |
779a551c94aSIdo Barnea				FM10K_SRRCTL_LOOPBACK_SUPPRESS);
780a551c94aSIdo Barnea
781a551c94aSIdo Barnea		/* It adds dual VLAN length for supporting dual VLAN */
782a551c94aSIdo Barnea		if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
783a551c94aSIdo Barnea				2 * FM10K_VLAN_TAG_SIZE) > buf_size ||
784a551c94aSIdo Barnea			dev->data->dev_conf.rxmode.enable_scatter) {
785a551c94aSIdo Barnea			uint32_t reg;
786a551c94aSIdo Barnea			dev->data->scattered_rx = 1;
787a551c94aSIdo Barnea			reg = FM10K_READ_REG(hw, FM10K_SRRCTL(i));
788a551c94aSIdo Barnea			reg |= FM10K_SRRCTL_BUFFER_CHAINING_EN;
789a551c94aSIdo Barnea			FM10K_WRITE_REG(hw, FM10K_SRRCTL(i), reg);
790a551c94aSIdo Barnea		}
791a551c94aSIdo Barnea
792a551c94aSIdo Barnea		/* Enable drop on empty, it's RO for VF */
793a551c94aSIdo Barnea		if (hw->mac.type == fm10k_mac_pf && rxq->drop_en)
794a551c94aSIdo Barnea			rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
795a551c94aSIdo Barnea
796a551c94aSIdo Barnea		FM10K_WRITE_REG(hw, FM10K_RXDCTL(i), rxdctl);
797a551c94aSIdo Barnea		FM10K_WRITE_FLUSH(hw);
798a551c94aSIdo Barnea	}
799a551c94aSIdo Barnea
800a551c94aSIdo Barnea	/* Configure VMDQ/RSS if applicable */
801a551c94aSIdo Barnea	fm10k_dev_mq_rx_configure(dev);
802a551c94aSIdo Barnea
803a551c94aSIdo Barnea	/* Decide the best RX function */
804a551c94aSIdo Barnea	fm10k_set_rx_function(dev);
805a551c94aSIdo Barnea
806a551c94aSIdo Barnea	/* update RX_SGLORT for loopback suppress*/
807a551c94aSIdo Barnea	if (hw->mac.type != fm10k_mac_pf)
808a551c94aSIdo Barnea		return 0;
809a551c94aSIdo Barnea	macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
810a551c94aSIdo Barnea	if (macvlan->nb_queue_pools)
811a551c94aSIdo Barnea		queue_stride = dev->data->nb_rx_queues / macvlan->nb_queue_pools;
812a551c94aSIdo Barnea	for (i = 0; i < dev->data->nb_rx_queues; ++i) {
813a551c94aSIdo Barnea		if (i && queue_stride && !(i % queue_stride))
814a551c94aSIdo Barnea			logic_port++;
815a551c94aSIdo Barnea		FM10K_WRITE_REG(hw, FM10K_RX_SGLORT(i), logic_port);
816a551c94aSIdo Barnea	}
817a551c94aSIdo Barnea
818a551c94aSIdo Barnea	return 0;
819a551c94aSIdo Barnea}
820a551c94aSIdo Barnea
821a551c94aSIdo Barneastatic int
822a551c94aSIdo Barneafm10k_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
823a551c94aSIdo Barnea{
824a551c94aSIdo Barnea	struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
825a551c94aSIdo Barnea	int err = -1;
826a551c94aSIdo Barnea	uint32_t reg;
827a551c94aSIdo Barnea	struct fm10k_rx_queue *rxq;
828a551c94aSIdo Barnea
829a551c94aSIdo Barnea	PMD_INIT_FUNC_TRACE();
830a551c94aSIdo Barnea
831a551c94aSIdo Barnea	if (rx_queue_id < dev->data->nb_rx_queues) {
832a551c94aSIdo Barnea		rxq = dev->data->rx_queues[rx_queue_id];
833a551c94aSIdo Barnea		err = rx_queue_reset(rxq);
834a551c94aSIdo Barnea		if (err == -ENOMEM) {
835a551c94aSIdo Barnea			PMD_INIT_LOG(ERR, "Failed to alloc memory : %d", err);
836a551c94aSIdo Barnea			return err;
837a551c94aSIdo Barnea		} else if (err == -EINVAL) {
838a551c94aSIdo Barnea			PMD_INIT_LOG(ERR, "Invalid buffer address alignment :"
839a551c94aSIdo Barnea				" %d", err);
840a551c94aSIdo Barnea			return err;
841a551c94aSIdo Barnea		}
842a551c94aSIdo Barnea
843a551c94aSIdo Barnea		/* Setup the HW Rx Head and Tail Descriptor Pointers
844a551c94aSIdo Barnea		 * Note: this must be done AFTER the queue is enabled on real
845a551c94aSIdo Barnea		 * hardware, but BEFORE the queue is enabled when using the
846a551c94aSIdo Barnea		 * emulation platform. Do it in both places for now and remove
847a551c94aSIdo Barnea		 * this comment and the following two register writes when the
848a551c94aSIdo Barnea		 * emulation platform is no longer being used.
849a551c94aSIdo Barnea		 */
850a551c94aSIdo Barnea		FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
851a551c94aSIdo Barnea		FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
852a551c94aSIdo Barnea
853a551c94aSIdo Barnea		/* Set PF ownership flag for PF devices */
854a551c94aSIdo Barnea		reg = FM10K_READ_REG(hw, FM10K_RXQCTL(rx_queue_id));
855a551c94aSIdo Barnea		if (hw->mac.type == fm10k_mac_pf)
856a551c94aSIdo Barnea			reg |= FM10K_RXQCTL_PF;
857a551c94aSIdo Barnea		reg |= FM10K_RXQCTL_ENABLE;
858a551c94aSIdo Barnea		/* enable RX queue */
859a551c94aSIdo Barnea		FM10K_WRITE_REG(hw, FM10K_RXQCTL(rx_queue_id), reg);
860a551c94aSIdo Barnea		FM10K_WRITE_FLUSH(hw);
861a551c94aSIdo Barnea
862a551c94aSIdo Barnea		/* Setup the HW Rx Head and Tail Descriptor Pointers
863a551c94aSIdo Barnea		 * Note: this must be done AFTER the queue is enabled
864a551c94aSIdo Barnea		 */
865a551c94aSIdo Barnea		FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
866a551c94aSIdo Barnea		FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
867a551c94aSIdo Barnea		dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
868a551c94aSIdo Barnea	}
869a551c94aSIdo Barnea
870a551c94aSIdo Barnea	return err;
871a551c94aSIdo Barnea}
872a551c94aSIdo Barnea
873a551c94aSIdo Barneastatic int
874a551c94aSIdo Barneafm10k_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
875a551c94aSIdo Barnea{
876a551c94aSIdo Barnea	struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
877a551c94aSIdo Barnea
878a551c94aSIdo Barnea	PMD_INIT_FUNC_TRACE();
879a551c94aSIdo Barnea
880a551c94aSIdo Barnea	if (rx_queue_id < dev->data->nb_rx_queues) {
881a551c94aSIdo Barnea		/* Disable RX queue */
882a551c94aSIdo Barnea		rx_queue_disable(hw, rx_queue_id);
883a551c94aSIdo Barnea
884a551c94aSIdo Barnea		/* Free mbuf and clean HW ring */
885a551c94aSIdo Barnea		rx_queue_clean(dev->data->rx_queues[rx_queue_id]);
886a551c94aSIdo Barnea		dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
887a551c94aSIdo Barnea	}
888a551c94aSIdo Barnea
889a551c94aSIdo Barnea	return 0;
890a551c94aSIdo Barnea}
891a551c94aSIdo Barnea
892a551c94aSIdo Barneastatic int
893a551c94aSIdo Barneafm10k_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
894a551c94aSIdo Barnea{
895a551c94aSIdo Barnea	struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
896a551c94aSIdo Barnea	/** @todo - this should be defined in the shared code */
897a551c94aSIdo Barnea#define FM10K_TXDCTL_WRITE_BACK_MIN_DELAY	0x00010000
898a551c94aSIdo Barnea	uint32_t txdctl = FM10K_TXDCTL_WRITE_BACK_MIN_DELAY;
899a551c94aSIdo Barnea	int err = 0;
900a551c94aSIdo Barnea
901a551c94aSIdo Barnea	PMD_INIT_FUNC_TRACE();
902a551c94aSIdo Barnea
903a551c94aSIdo Barnea	if (tx_queue_id < dev->data->nb_tx_queues) {
904a551c94aSIdo Barnea		struct fm10k_tx_queue *q = dev->data->tx_queues[tx_queue_id];
905a551c94aSIdo Barnea
906a551c94aSIdo Barnea		q->ops->reset(q);
907a551c94aSIdo Barnea
908a551c94aSIdo Barnea		/* reset head and tail pointers */
909a551c94aSIdo Barnea		FM10K_WRITE_REG(hw, FM10K_TDH(tx_queue_id), 0);
910a551c94aSIdo Barnea		FM10K_WRITE_REG(hw, FM10K_TDT(tx_queue_id), 0);
911a551c94aSIdo Barnea
912a551c94aSIdo Barnea		/* enable TX queue */
913a551c94aSIdo Barnea		FM10K_WRITE_REG(hw, FM10K_TXDCTL(tx_queue_id),
914a551c94aSIdo Barnea					FM10K_TXDCTL_ENABLE | txdctl);
915a551c94aSIdo Barnea		FM10K_WRITE_FLUSH(hw);
916a551c94aSIdo Barnea		dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
917a551c94aSIdo Barnea	} else
918a551c94aSIdo Barnea		err = -1;
919a551c94aSIdo Barnea
920a551c94aSIdo Barnea	return err;
921a551c94aSIdo Barnea}
922a551c94aSIdo Barnea
923a551c94aSIdo Barneastatic int
924a551c94aSIdo Barneafm10k_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
925a551c94aSIdo Barnea{
926a551c94aSIdo Barnea	struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
927a551c94aSIdo Barnea
928a551c94aSIdo Barnea	PMD_INIT_FUNC_TRACE();
929a551c94aSIdo Barnea
930a551c94aSIdo Barnea	if (tx_queue_id < dev->data->nb_tx_queues) {
931a551c94aSIdo Barnea		tx_queue_disable(hw, tx_queue_id);
932a551c94aSIdo Barnea		tx_queue_clean(dev->data->tx_queues[tx_queue_id]);
933a551c94aSIdo Barnea		dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
934a551c94aSIdo Barnea	}
935a551c94aSIdo Barnea
936a551c94aSIdo Barnea	return 0;
937a551c94aSIdo Barnea}
938a551c94aSIdo Barnea
939a551c94aSIdo Barneastatic inline int fm10k_glort_valid(struct fm10k_hw *hw)
940a551c94aSIdo Barnea{
941a551c94aSIdo Barnea	return ((hw->mac.dglort_map & FM10K_DGLORTMAP_NONE)
942a551c94aSIdo Barnea		!= FM10K_DGLORTMAP_NONE);
943a551c94aSIdo Barnea}
944a551c94aSIdo Barnea
945a551c94aSIdo Barneastatic void
946a551c94aSIdo Barneafm10k_dev_promiscuous_enable(struct rte_eth_dev *dev)
947a551c94aSIdo Barnea{
948a551c94aSIdo Barnea	struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
949a551c94aSIdo Barnea	int status;
950a551c94aSIdo Barnea
951a551c94aSIdo Barnea	PMD_INIT_FUNC_TRACE();
952a551c94aSIdo Barnea
953a551c94aSIdo Barnea	/* Return if it didn't acquire valid glort range */
954a551c94aSIdo Barnea	if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
955a551c94aSIdo Barnea		return;
956a551c94aSIdo Barnea
957a551c94aSIdo Barnea	fm10k_mbx_lock(hw);
958a551c94aSIdo Barnea	status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
959a551c94aSIdo Barnea				FM10K_XCAST_MODE_PROMISC);
960a551c94aSIdo Barnea	fm10k_mbx_unlock(hw);
961a551c94aSIdo Barnea
962a551c94aSIdo Barnea	if (status != FM10K_SUCCESS)
963a551c94aSIdo Barnea		PMD_INIT_LOG(ERR, "Failed to enable promiscuous mode");
964a551c94aSIdo Barnea}
965a551c94aSIdo Barnea
966a551c94aSIdo Barneastatic void
967a551c94aSIdo Barneafm10k_dev_promiscuous_disable(struct rte_eth_dev *dev)
968a551c94aSIdo Barnea{
969a551c94aSIdo Barnea	struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
970a551c94aSIdo Barnea	uint8_t mode;
971a551c94aSIdo Barnea	int status;
972a551c94aSIdo Barnea
973a551c94aSIdo Barnea	PMD_INIT_FUNC_TRACE();
974a551c94aSIdo Barnea
975a551c94aSIdo Barnea	/* Return if it didn't acquire valid glort range */
976a551c94aSIdo Barnea	if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
977a551c94aSIdo Barnea		return;
978a551c94aSIdo Barnea
979a551c94aSIdo Barnea	if (dev->data->all_multicast == 1)
980a551c94aSIdo Barnea		mode = FM10K_XCAST_MODE_ALLMULTI;
981a551c94aSIdo Barnea	else
982a551c94aSIdo Barnea		mode = FM10K_XCAST_MODE_NONE;
983a551c94aSIdo Barnea
984a551c94aSIdo Barnea	fm10k_mbx_lock(hw);
985a551c94aSIdo Barnea	status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
986a551c94aSIdo Barnea				mode);
987a551c94aSIdo Barnea	fm10k_mbx_unlock(hw);
988a551c94aSIdo Barnea
989a551c94aSIdo Barnea	if (status != FM10K_SUCCESS)
990a551c94aSIdo Barnea		PMD_INIT_LOG(ERR, "Failed to disable promiscuous mode");
991a551c94aSIdo Barnea}
992a551c94aSIdo Barnea
993a551c94aSIdo Barneastatic void
994a551c94aSIdo Barneafm10k_dev_allmulticast_enable(struct rte_eth_dev *dev)
995a551c94aSIdo Barnea{
996a551c94aSIdo Barnea	struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
997a551c94aSIdo Barnea	int status;
998a551c94aSIdo Barnea
999a551c94aSIdo Barnea	PMD_INIT_FUNC_TRACE();
1000a551c94aSIdo Barnea
1001a551c94aSIdo Barnea	/* Return if it didn't acquire valid glort range */
1002a551c94aSIdo Barnea	if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
1003a551c94aSIdo Barnea		return;
1004a551c94aSIdo Barnea
1005a551c94aSIdo Barnea	/* If promiscuous mode is enabled, it doesn't make sense to enable
1006a551c94aSIdo Barnea	 * allmulticast and disable promiscuous since fm10k only can select
1007a551c94aSIdo Barnea	 * one of the modes.
1008a551c94aSIdo Barnea	 */
1009a551c94aSIdo Barnea	if (dev->data->promiscuous) {
1010a551c94aSIdo Barnea		PMD_INIT_LOG(INFO, "Promiscuous mode is enabled, "\
1011a551c94aSIdo Barnea			"needn't enable allmulticast");
1012a551c94aSIdo Barnea		return;
1013a551c94aSIdo Barnea	}
1014a551c94aSIdo Barnea
1015a551c94aSIdo Barnea	fm10k_mbx_lock(hw);
1016a551c94aSIdo Barnea	status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
1017a551c94aSIdo Barnea				FM10K_XCAST_MODE_ALLMULTI);
1018a551c94aSIdo Barnea	fm10k_mbx_unlock(hw);
1019a551c94aSIdo Barnea
1020a551c94aSIdo Barnea	if (status != FM10K_SUCCESS)
1021a551c94aSIdo Barnea		PMD_INIT_LOG(ERR, "Failed to enable allmulticast mode");
1022a551c94aSIdo Barnea}
1023a551c94aSIdo Barnea
1024a551c94aSIdo Barneastatic void
1025a551c94aSIdo Barneafm10k_dev_allmulticast_disable(struct rte_eth_dev *dev)
1026a551c94aSIdo Barnea{
1027a551c94aSIdo Barnea	struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1028a551c94aSIdo Barnea	int status;
1029a551c94aSIdo Barnea
1030a551c94aSIdo Barnea	PMD_INIT_FUNC_TRACE();
1031a551c94aSIdo Barnea
1032a551c94aSIdo Barnea	/* Return if it didn't acquire valid glort range */
1033a551c94aSIdo Barnea	if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
1034a551c94aSIdo Barnea		return;
1035a551c94aSIdo Barnea
1036a551c94aSIdo Barnea	if (dev->data->promiscuous) {
1037a551c94aSIdo Barnea		PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode "\
1038a551c94aSIdo Barnea			"since promisc mode is enabled");
1039a551c94aSIdo Barnea		return;
1040a551c94aSIdo Barnea	}
1041a551c94aSIdo Barnea
1042a551c94aSIdo Barnea	fm10k_mbx_lock(hw);
1043a551c94aSIdo Barnea	/* Change mode to unicast mode */
1044a551c94aSIdo Barnea	status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
1045a551c94aSIdo Barnea				FM10K_XCAST_MODE_NONE);
1046a551c94aSIdo Barnea	fm10k_mbx_unlock(hw);
1047a551c94aSIdo Barnea
1048a551c94aSIdo Barnea	if (status != FM10K_SUCCESS)
1049a551c94aSIdo Barnea		PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode");
1050a551c94aSIdo Barnea}
1051a551c94aSIdo Barnea
1052a551c94aSIdo Barneastatic void
1053a551c94aSIdo Barneafm10k_dev_dglort_map_configure(struct rte_eth_dev *dev)
1054a551c94aSIdo Barnea{
1055a551c94aSIdo Barnea	struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1056a551c94aSIdo Barnea	uint32_t dglortdec, pool_len, rss_len, i, dglortmask;
1057a551c94aSIdo Barnea	uint16_t nb_queue_pools;
1058a551c94aSIdo Barnea	struct fm10k_macvlan_filter_info *macvlan;
1059a551c94aSIdo Barnea
1060a551c94aSIdo Barnea	macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1061a551c94aSIdo Barnea	nb_queue_pools = macvlan->nb_queue_pools;
1062a551c94aSIdo Barnea	pool_len = nb_queue_pools ? fls(nb_queue_pools - 1) : 0;
1063a551c94aSIdo Barnea	rss_len = fls(dev->data->nb_rx_queues - 1) - pool_len;
1064a551c94aSIdo Barnea
1065a551c94aSIdo Barnea	/* GLORT 0x0-0x3F are used by PF and VMDQ,  0x40-0x7F used by FD */
1066a551c94aSIdo Barnea	dglortdec = (rss_len << FM10K_DGLORTDEC_RSSLENGTH_SHIFT) | pool_len;
1067a551c94aSIdo Barnea	dglortmask = (GLORT_PF_MASK << FM10K_DGLORTMAP_MASK_SHIFT) |
1068a551c94aSIdo Barnea			hw->mac.dglort_map;
1069a551c94aSIdo Barnea	FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(0), dglortmask);
1070a551c94aSIdo Barnea	/* Configure VMDQ/RSS DGlort Decoder */
1071a551c94aSIdo Barnea	FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(0), dglortdec);
1072a551c94aSIdo Barnea
1073a551c94aSIdo Barnea	/* Flow Director configurations, only queue number is valid. */
1074a551c94aSIdo Barnea	dglortdec = fls(dev->data->nb_rx_queues - 1);
1075a551c94aSIdo Barnea	dglortmask = (GLORT_FD_MASK << FM10K_DGLORTMAP_MASK_SHIFT) |
1076a551c94aSIdo Barnea			(hw->mac.dglort_map + GLORT_FD_Q_BASE);
1077a551c94aSIdo Barnea	FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(1), dglortmask);
1078a551c94aSIdo Barnea	FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(1), dglortdec);
1079a551c94aSIdo Barnea
1080a551c94aSIdo Barnea	/* Invalidate all other GLORT entries */
1081a551c94aSIdo Barnea	for (i = 2; i < FM10K_DGLORT_COUNT; i++)
1082a551c94aSIdo Barnea		FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(i),
1083a551c94aSIdo Barnea				FM10K_DGLORTMAP_NONE);
1084a551c94aSIdo Barnea}
1085a551c94aSIdo Barnea
1086a551c94aSIdo Barnea#define BSIZEPKT_ROUNDUP ((1 << FM10K_SRRCTL_BSIZEPKT_SHIFT) - 1)
1087a551c94aSIdo Barneastatic int
1088a551c94aSIdo Barneafm10k_dev_start(struct rte_eth_dev *dev)
1089a551c94aSIdo Barnea{
1090a551c94aSIdo Barnea	struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1091a551c94aSIdo Barnea	int i, diag;
1092a551c94aSIdo Barnea
1093a551c94aSIdo Barnea	PMD_INIT_FUNC_TRACE();
1094a551c94aSIdo Barnea
1095a551c94aSIdo Barnea	/* stop, init, then start the hw */
1096a551c94aSIdo Barnea	diag = fm10k_stop_hw(hw);
1097a551c94aSIdo Barnea	if (diag != FM10K_SUCCESS) {
1098a551c94aSIdo Barnea		PMD_INIT_LOG(ERR, "Hardware stop failed: %d", diag);
1099a551c94aSIdo Barnea		return -EIO;
1100a551c94aSIdo Barnea	}
1101a551c94aSIdo Barnea
1102a551c94aSIdo Barnea	diag = fm10k_init_hw(hw);
1103a551c94aSIdo Barnea	if (diag != FM10K_SUCCESS) {
1104a551c94aSIdo Barnea		PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
1105a551c94aSIdo Barnea		return -EIO;
1106a551c94aSIdo Barnea	}
1107a551c94aSIdo Barnea
1108a551c94aSIdo Barnea	diag = fm10k_start_hw(hw);
1109a551c94aSIdo Barnea	if (diag != FM10K_SUCCESS) {
1110a551c94aSIdo Barnea		PMD_INIT_LOG(ERR, "Hardware start failed: %d", diag);
1111a551c94aSIdo Barnea		return -EIO;
1112a551c94aSIdo Barnea	}
1113a551c94aSIdo Barnea
1114a551c94aSIdo Barnea	diag = fm10k_dev_tx_init(dev);
1115a551c94aSIdo Barnea	if (diag) {
1116a551c94aSIdo Barnea		PMD_INIT_LOG(ERR, "TX init failed: %d", diag);
1117a551c94aSIdo Barnea		return diag;
1118a551c94aSIdo Barnea	}
1119a551c94aSIdo Barnea
1120a551c94aSIdo Barnea	if (fm10k_dev_rxq_interrupt_setup(dev))
1121a551c94aSIdo Barnea		return -EIO;
1122a551c94aSIdo Barnea
1123a551c94aSIdo Barnea	diag = fm10k_dev_rx_init(dev);
1124a551c94aSIdo Barnea	if (diag) {
1125a551c94aSIdo Barnea		PMD_INIT_LOG(ERR, "RX init failed: %d", diag);
1126a551c94aSIdo Barnea		return diag;
1127a551c94aSIdo Barnea	}
1128a551c94aSIdo Barnea
1129a551c94aSIdo Barnea	if (hw->mac.type == fm10k_mac_pf)
1130a551c94aSIdo Barnea		fm10k_dev_dglort_map_configure(dev);
1131a551c94aSIdo Barnea
1132a551c94aSIdo Barnea	for (i = 0; i < dev->data->nb_rx_queues; i++) {
1133a551c94aSIdo Barnea		struct fm10k_rx_queue *rxq;
1134a551c94aSIdo Barnea		rxq = dev->data->rx_queues[i];
1135a551c94aSIdo Barnea
1136a551c94aSIdo Barnea		if (rxq->rx_deferred_start)
1137a551c94aSIdo Barnea			continue;
1138a551c94aSIdo Barnea		diag = fm10k_dev_rx_queue_start(dev, i);
1139a551c94aSIdo Barnea		if (diag != 0) {
1140a551c94aSIdo Barnea			int j;
1141a551c94aSIdo Barnea			for (j = 0; j < i; ++j)
1142a551c94aSIdo Barnea				rx_queue_clean(dev->data->rx_queues[j]);
1143a551c94aSIdo Barnea			return diag;
1144a551c94aSIdo Barnea		}
1145a551c94aSIdo Barnea	}
1146a551c94aSIdo Barnea
1147a551c94aSIdo Barnea	for (i = 0; i < dev->data->nb_tx_queues; i++) {
1148a551c94aSIdo Barnea		struct fm10k_tx_queue *txq;
1149a551c94aSIdo Barnea		txq = dev->data->tx_queues[i];
1150a551c94aSIdo Barnea
1151a551c94aSIdo Barnea		if (txq->tx_deferred_start)
1152a551c94aSIdo Barnea			continue;
1153a551c94aSIdo Barnea		diag = fm10k_dev_tx_queue_start(dev, i);
1154a551c94aSIdo Barnea		if (diag != 0) {
1155a551c94aSIdo Barnea			int j;
1156a551c94aSIdo Barnea			for (j = 0; j < i; ++j)
1157a551c94aSIdo Barnea				tx_queue_clean(dev->data->tx_queues[j]);
1158a551c94aSIdo Barnea			for (j = 0; j < dev->data->nb_rx_queues; ++j)
1159a551c94aSIdo Barnea				rx_queue_clean(dev->data->rx_queues[j]);
1160a551c94aSIdo Barnea			return diag;
1161a551c94aSIdo Barnea		}
1162a551c94aSIdo Barnea	}
1163a551c94aSIdo Barnea
1164a551c94aSIdo Barnea	/* Update default vlan when not in VMDQ mode */
1165a551c94aSIdo Barnea	if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG))
1166a551c94aSIdo Barnea		fm10k_vlan_filter_set(dev, hw->mac.default_vid, true);
1167a551c94aSIdo Barnea
1168a551c94aSIdo Barnea	return 0;
1169a551c94aSIdo Barnea}
1170a551c94aSIdo Barnea
1171a551c94aSIdo Barneastatic void
1172a551c94aSIdo Barneafm10k_dev_stop(struct rte_eth_dev *dev)
1173a551c94aSIdo Barnea{
1174a551c94aSIdo Barnea	struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
11759ca4a157SIdo Barnea	struct rte_pci_device *pdev = RTE_DEV_TO_PCI(dev->device);
11769ca4a157SIdo Barnea	struct rte_intr_handle *intr_handle = &pdev->intr_handle;
1177a551c94aSIdo Barnea	int i;
1178a551c94aSIdo Barnea
1179a551c94aSIdo Barnea	PMD_INIT_FUNC_TRACE();
1180a551c94aSIdo Barnea
1181a551c94aSIdo Barnea	if (dev->data->tx_queues)
1182a551c94aSIdo Barnea		for (i = 0; i < dev->data->nb_tx_queues; i++)
1183a551c94aSIdo Barnea			fm10k_dev_tx_queue_stop(dev, i);
1184a551c94aSIdo Barnea
1185a551c94aSIdo Barnea	if (dev->data->rx_queues)
1186a551c94aSIdo Barnea		for (i = 0; i < dev->data->nb_rx_queues; i++)
1187a551c94aSIdo Barnea			fm10k_dev_rx_queue_stop(dev, i);
1188a551c94aSIdo Barnea
1189a551c94aSIdo Barnea	/* Disable datapath event */
1190a551c94aSIdo Barnea	if (rte_intr_dp_is_en(intr_handle)) {
1191a551c94aSIdo Barnea		for (i = 0; i < dev->data->nb_rx_queues; i++) {
1192a551c94aSIdo Barnea			FM10K_WRITE_REG(hw, FM10K_RXINT(i),
1193a551c94aSIdo Barnea				3 << FM10K_RXINT_TIMER_SHIFT);
1194a551c94aSIdo Barnea			if (hw->mac.type == fm10k_mac_pf)
11959ca4a157SIdo Barnea				FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, i)),
1196a551c94aSIdo Barnea					FM10K_ITR_MASK_SET);
1197a551c94aSIdo Barnea			else
11989ca4a157SIdo Barnea				FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, i)),
1199a551c94aSIdo Barnea					FM10K_ITR_MASK_SET);
1200a551c94aSIdo Barnea		}
1201a551c94aSIdo Barnea	}
1202a551c94aSIdo Barnea	/* Clean datapath event and queue/vec mapping */
1203a551c94aSIdo Barnea	rte_intr_efd_disable(intr_handle);
1204a551c94aSIdo Barnea	rte_free(intr_handle->intr_vec);
1205a551c94aSIdo Barnea	intr_handle->intr_vec = NULL;
1206a551c94aSIdo Barnea}
1207a551c94aSIdo Barnea
1208a551c94aSIdo Barneastatic void
1209a551c94aSIdo Barneafm10k_dev_queue_release(struct rte_eth_dev *dev)
1210a551c94aSIdo Barnea{
1211a551c94aSIdo Barnea	int i;
1212a551c94aSIdo Barnea
1213a551c94aSIdo Barnea	PMD_INIT_FUNC_TRACE();
1214a551c94aSIdo Barnea
1215a551c94aSIdo Barnea	if (dev->data->tx_queues) {
1216a551c94aSIdo Barnea		for (i = 0; i < dev->data->nb_tx_queues; i++) {
1217a551c94aSIdo Barnea			struct fm10k_tx_queue *txq = dev->data->tx_queues[i];
1218a551c94aSIdo Barnea
1219a551c94aSIdo Barnea			tx_queue_free(txq);
1220a551c94aSIdo Barnea		}
1221a551c94aSIdo Barnea	}
1222a551c94aSIdo Barnea
1223a551c94aSIdo Barnea	if (dev->data->rx_queues) {
1224a551c94aSIdo Barnea		for (i = 0; i < dev->data->nb_rx_queues; i++)
1225a551c94aSIdo Barnea			fm10k_rx_queue_release(dev->data->rx_queues[i]);
1226a551c94aSIdo Barnea	}
1227a551c94aSIdo Barnea}
1228a551c94aSIdo Barnea
1229a551c94aSIdo Barneastatic void
1230a551c94aSIdo Barneafm10k_dev_close(struct rte_eth_dev *dev)
1231a551c94aSIdo Barnea{
1232a551c94aSIdo Barnea	struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1233a551c94aSIdo Barnea
1234a551c94aSIdo Barnea	PMD_INIT_FUNC_TRACE();
1235a551c94aSIdo Barnea
1236a551c94aSIdo Barnea	fm10k_mbx_lock(hw);
1237a551c94aSIdo Barnea	hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
1238a551c94aSIdo Barnea		MAX_LPORT_NUM, false);
1239a551c94aSIdo Barnea	fm10k_mbx_unlock(hw);
1240a551c94aSIdo Barnea
12419ca4a157SIdo Barnea	/* allow 10ms for device to quiesce */
12429ca4a157SIdo Barnea	rte_delay_us(FM10K_SWITCH_QUIESCE_US);
12439ca4a157SIdo Barnea
1244a551c94aSIdo Barnea	/* Stop mailbox service first */
1245a551c94aSIdo Barnea	fm10k_close_mbx_service(hw);
1246a551c94aSIdo Barnea	fm10k_dev_stop(dev);
1247a551c94aSIdo Barnea	fm10k_dev_queue_release(dev);
1248a551c94aSIdo Barnea	fm10k_stop_hw(hw);
1249a551c94aSIdo Barnea}
1250a551c94aSIdo Barnea
1251a551c94aSIdo Barneastatic int
1252a551c94aSIdo Barneafm10k_link_update(struct rte_eth_dev *dev,
1253a551c94aSIdo Barnea	__rte_unused int wait_to_complete)
1254a551c94aSIdo Barnea{
1255a551c94aSIdo Barnea	PMD_INIT_FUNC_TRACE();
1256a551c94aSIdo Barnea
1257a551c94aSIdo Barnea	/* The host-interface link is always up.  The speed is ~50Gbps per Gen3
1258a551c94aSIdo Barnea	 * x8 PCIe interface. For now, we leave the speed undefined since there
1259a551c94aSIdo Barnea	 * is no 50Gbps Ethernet. */
1260a551c94aSIdo Barnea	dev->data->dev_link.link_speed  = 0;
1261a551c94aSIdo Barnea	dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
1262a551c94aSIdo Barnea	dev->data->dev_link.link_status = ETH_LINK_UP;
1263a551c94aSIdo Barnea
1264a551c94aSIdo Barnea	return 0;
1265a551c94aSIdo Barnea}
1266a551c94aSIdo Barnea
1267a551c94aSIdo Barneastatic int fm10k_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
1268a551c94aSIdo Barnea	struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit)
1269a551c94aSIdo Barnea{
1270a551c94aSIdo Barnea	unsigned i, q;
1271a551c94aSIdo Barnea	unsigned count = 0;
1272a551c94aSIdo Barnea
1273a551c94aSIdo Barnea	if (xstats_names != NULL) {
1274a551c94aSIdo Barnea		/* Note: limit checked in rte_eth_xstats_names() */
1275a551c94aSIdo Barnea
1276a551c94aSIdo Barnea		/* Global stats */
1277a551c94aSIdo Barnea		for (i = 0; i < FM10K_NB_HW_XSTATS; i++) {
1278a551c94aSIdo Barnea			snprintf(xstats_names[count].name,
1279a551c94aSIdo Barnea				sizeof(xstats_names[count].name),
1280a551c94aSIdo Barnea				"%s", fm10k_hw_stats_strings[count].name);
1281a551c94aSIdo Barnea			count++;
1282a551c94aSIdo Barnea		}
1283a551c94aSIdo Barnea
1284a551c94aSIdo Barnea		/* PF queue stats */
1285a551c94aSIdo Barnea		for (q = 0; q < FM10K_MAX_QUEUES_PF; q++) {
1286a551c94aSIdo Barnea			for (i = 0; i < FM10K_NB_RX_Q_XSTATS; i++) {
1287a551c94aSIdo Barnea				snprintf(xstats_names[count].name,
1288a551c94aSIdo Barnea					sizeof(xstats_names[count].name),
1289a551c94aSIdo Barnea					"rx_q%u_%s", q,
1290a551c94aSIdo Barnea					fm10k_hw_stats_rx_q_strings[i].name);
1291a551c94aSIdo Barnea				count++;
1292a551c94aSIdo Barnea			}
1293a551c94aSIdo Barnea			for (i = 0; i < FM10K_NB_TX_Q_XSTATS; i++) {
1294a551c94aSIdo Barnea				snprintf(xstats_names[count].name,
1295a551c94aSIdo Barnea					sizeof(xstats_names[count].name),
1296a551c94aSIdo Barnea					"tx_q%u_%s", q,
1297a551c94aSIdo Barnea					fm10k_hw_stats_tx_q_strings[i].name);
1298a551c94aSIdo Barnea				count++;
1299a551c94aSIdo Barnea			}
1300a551c94aSIdo Barnea		}
1301a551c94aSIdo Barnea	}
1302a551c94aSIdo Barnea	return FM10K_NB_XSTATS;
1303a551c94aSIdo Barnea}
1304a551c94aSIdo Barnea
1305a551c94aSIdo Barneastatic int
1306a551c94aSIdo Barneafm10k_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1307a551c94aSIdo Barnea		 unsigned n)
1308a551c94aSIdo Barnea{
1309a551c94aSIdo Barnea	struct fm10k_hw_stats *hw_stats =
1310a551c94aSIdo Barnea		FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1311a551c94aSIdo Barnea	unsigned i, q, count = 0;
1312a551c94aSIdo Barnea
1313a551c94aSIdo Barnea	if (n < FM10K_NB_XSTATS)
1314a551c94aSIdo Barnea		return FM10K_NB_XSTATS;
1315a551c94aSIdo Barnea
1316a551c94aSIdo Barnea	/* Global stats */
1317a551c94aSIdo Barnea	for (i = 0; i < FM10K_NB_HW_XSTATS; i++) {
1318a551c94aSIdo Barnea		xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
1319a551c94aSIdo Barnea			fm10k_hw_stats_strings[count].offset);
13209ca4a157SIdo Barnea		xstats[count].id = count;
1321a551c94aSIdo Barnea		count++;
1322a551c94aSIdo Barnea	}
1323a551c94aSIdo Barnea
1324a551c94aSIdo Barnea	/* PF queue stats */
1325a551c94aSIdo Barnea	for (q = 0; q < FM10K_MAX_QUEUES_PF; q++) {
1326a551c94aSIdo Barnea		for (i = 0; i < FM10K_NB_RX_Q_XSTATS; i++) {
1327a551c94aSIdo Barnea			xstats[count].value =
1328a551c94aSIdo Barnea				*(uint64_t *)(((char *)&hw_stats->q[q]) +
1329a551c94aSIdo Barnea				fm10k_hw_stats_rx_q_strings[i].offset);
13309ca4a157SIdo Barnea			xstats[count].id = count;
1331a551c94aSIdo Barnea			count++;
1332a551c94aSIdo Barnea		}
1333a551c94aSIdo Barnea		for (i = 0; i < FM10K_NB_TX_Q_XSTATS; i++) {
1334a551c94aSIdo Barnea			xstats[count].value =
1335a551c94aSIdo Barnea				*(uint64_t *)(((char *)&hw_stats->q[q]) +
1336a551c94aSIdo Barnea				fm10k_hw_stats_tx_q_strings[i].offset);
13379ca4a157SIdo Barnea			xstats[count].id = count;
1338a551c94aSIdo Barnea			count++;
1339a551c94aSIdo Barnea		}
1340a551c94aSIdo Barnea	}
1341a551c94aSIdo Barnea
1342a551c94aSIdo Barnea	return FM10K_NB_XSTATS;
1343a551c94aSIdo Barnea}
1344a551c94aSIdo Barnea
1345a551c94aSIdo Barneastatic void
1346a551c94aSIdo Barneafm10k_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1347a551c94aSIdo Barnea{
1348a551c94aSIdo Barnea	uint64_t ipackets, opackets, ibytes, obytes;
1349a551c94aSIdo Barnea	struct fm10k_hw *hw =
1350a551c94aSIdo Barnea		FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1351a551c94aSIdo Barnea	struct fm10k_hw_stats *hw_stats =
1352a551c94aSIdo Barnea		FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1353a551c94aSIdo Barnea	int i;
1354a551c94aSIdo Barnea
1355a551c94aSIdo Barnea	PMD_INIT_FUNC_TRACE();
1356a551c94aSIdo Barnea
1357a551c94aSIdo Barnea	fm10k_update_hw_stats(hw, hw_stats);
1358a551c94aSIdo Barnea
1359a551c94aSIdo Barnea	ipackets = opackets = ibytes = obytes = 0;
1360a551c94aSIdo Barnea	for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) &&
1361a551c94aSIdo Barnea		(i < hw->mac.max_queues); ++i) {
1362a551c94aSIdo Barnea		stats->q_ipackets[i] = hw_stats->q[i].rx_packets.count;
1363a551c94aSIdo Barnea		stats->q_opackets[i] = hw_stats->q[i].tx_packets.count;
1364a551c94aSIdo Barnea		stats->q_ibytes[i]   = hw_stats->q[i].rx_bytes.count;
1365a551c94aSIdo Barnea		stats->q_obytes[i]   = hw_stats->q[i].tx_bytes.count;
1366a551c94aSIdo Barnea		ipackets += stats->q_ipackets[i];
1367a551c94aSIdo Barnea		opackets += stats->q_opackets[i];
1368a551c94aSIdo Barnea		ibytes   += stats->q_ibytes[i];
1369a551c94aSIdo Barnea		obytes   += stats->q_obytes[i];
1370a551c94aSIdo Barnea	}
1371a551c94aSIdo Barnea	stats->ipackets = ipackets;
1372a551c94aSIdo Barnea	stats->opackets = opackets;
1373a551c94aSIdo Barnea	stats->ibytes = ibytes;
1374a551c94aSIdo Barnea	stats->obytes = obytes;
1375a551c94aSIdo Barnea}
1376a551c94aSIdo Barnea
1377a551c94aSIdo Barneastatic void
1378a551c94aSIdo Barneafm10k_stats_reset(struct rte_eth_dev *dev)
1379a551c94aSIdo Barnea{
1380a551c94aSIdo Barnea	struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1381a551c94aSIdo Barnea	struct fm10k_hw_stats *hw_stats =
1382a551c94aSIdo Barnea		FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1383a551c94aSIdo Barnea
1384a551c94aSIdo Barnea	PMD_INIT_FUNC_TRACE();
1385a551c94aSIdo Barnea
1386a551c94aSIdo Barnea	memset(hw_stats, 0, sizeof(*hw_stats));
1387a551c94aSIdo Barnea	fm10k_rebind_hw_stats(hw, hw_stats);
1388a551c94aSIdo Barnea}
1389a551c94aSIdo Barnea
1390a551c94aSIdo Barneastatic void
1391a551c94aSIdo Barneafm10k_dev_infos_get(struct rte_eth_dev *dev,
1392a551c94aSIdo Barnea	struct rte_eth_dev_info *dev_info)
1393a551c94aSIdo Barnea{
1394a551c94aSIdo Barnea	struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
13959ca4a157SIdo Barnea	struct rte_pci_device *pdev = RTE_DEV_TO_PCI(dev->device);
1396a551c94aSIdo Barnea
1397a551c94aSIdo Barnea	PMD_INIT_FUNC_TRACE();
1398a551c94aSIdo Barnea
13999ca4a157SIdo Barnea	dev_info->pci_dev            = pdev;
1400a551c94aSIdo Barnea	dev_info->min_rx_bufsize     = FM10K_MIN_RX_BUF_SIZE;
1401a551c94aSIdo Barnea	dev_info->max_rx_pktlen      = FM10K_MAX_PKT_SIZE;
1402a551c94aSIdo Barnea	dev_info->max_rx_queues      = hw->mac.max_queues;
1403a551c94aSIdo Barnea	dev_info->max_tx_queues      = hw->mac.max_queues;
1404a551c94aSIdo Barnea	dev_info->max_mac_addrs      = FM10K_MAX_MACADDR_NUM;
1405a551c94aSIdo Barnea	dev_info->max_hash_mac_addrs = 0;
14069ca4a157SIdo Barnea	dev_info->max_vfs            = pdev->max_vfs;
1407a551c94aSIdo Barnea	dev_info->vmdq_pool_base     = 0;
1408a551c94aSIdo Barnea	dev_info->vmdq_queue_base    = 0;
1409a551c94aSIdo Barnea	dev_info->max_vmdq_pools     = ETH_32_POOLS;
1410a551c94aSIdo Barnea	dev_info->vmdq_queue_num     = FM10K_MAX_QUEUES_PF;
1411a551c94aSIdo Barnea	dev_info->rx_offload_capa =
1412a551c94aSIdo Barnea		DEV_RX_OFFLOAD_VLAN_STRIP |
1413a551c94aSIdo Barnea		DEV_RX_OFFLOAD_IPV4_CKSUM |
1414a551c94aSIdo Barnea		DEV_RX_OFFLOAD_UDP_CKSUM  |
1415a551c94aSIdo Barnea		DEV_RX_OFFLOAD_TCP_CKSUM;
1416a551c94aSIdo Barnea	dev_info->tx_offload_capa =
1417a551c94aSIdo Barnea		DEV_TX_OFFLOAD_VLAN_INSERT |
1418a551c94aSIdo Barnea		DEV_TX_OFFLOAD_IPV4_CKSUM  |
1419a551c94aSIdo Barnea		DEV_TX_OFFLOAD_UDP_CKSUM   |
1420a551c94aSIdo Barnea		DEV_TX_OFFLOAD_TCP_CKSUM   |
1421a551c94aSIdo Barnea		DEV_TX_OFFLOAD_TCP_TSO;
1422a551c94aSIdo Barnea
1423a551c94aSIdo Barnea	dev_info->hash_key_size = FM10K_RSSRK_SIZE * sizeof(uint32_t);
1424a551c94aSIdo Barnea	dev_info->reta_size = FM10K_MAX_RSS_INDICES;
1425a551c94aSIdo Barnea
1426a551c94aSIdo Barnea	dev_info->default_rxconf = (struct rte_eth_rxconf) {
1427a551c94aSIdo Barnea		.rx_thresh = {
1428a551c94aSIdo Barnea			.pthresh = FM10K_DEFAULT_RX_PTHRESH,
1429a551c94aSIdo Barnea			.hthresh = FM10K_DEFAULT_RX_HTHRESH,
1430a551c94aSIdo Barnea			.wthresh = FM10K_DEFAULT_RX_WTHRESH,
1431a551c94aSIdo Barnea		},
1432a551c94aSIdo Barnea		.rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(0),
1433a551c94aSIdo Barnea		.rx_drop_en = 0,
1434a551c94aSIdo Barnea	};
1435a551c94aSIdo Barnea
1436a551c94aSIdo Barnea	dev_info->default_txconf = (struct rte_eth_txconf) {
1437a551c94aSIdo Barnea		.tx_thresh = {
1438a551c94aSIdo Barnea			.pthresh = FM10K_DEFAULT_TX_PTHRESH,
1439a551c94aSIdo Barnea			.hthresh = FM10K_DEFAULT_TX_HTHRESH,
1440a551c94aSIdo Barnea			.wthresh = FM10K_DEFAULT_TX_WTHRESH,
1441a551c94aSIdo Barnea		},
1442a551c94aSIdo Barnea		.tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(0),
1443a551c94aSIdo Barnea		.tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(0),
1444a551c94aSIdo Barnea		.txq_flags = FM10K_SIMPLE_TX_FLAG,
1445a551c94aSIdo Barnea	};
1446a551c94aSIdo Barnea
1447a551c94aSIdo Barnea	dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
1448a551c94aSIdo Barnea		.nb_max = FM10K_MAX_RX_DESC,
1449a551c94aSIdo Barnea		.nb_min = FM10K_MIN_RX_DESC,
1450a551c94aSIdo Barnea		.nb_align = FM10K_MULT_RX_DESC,
1451a551c94aSIdo Barnea	};
1452a551c94aSIdo Barnea
1453a551c94aSIdo Barnea	dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
1454a551c94aSIdo Barnea		.nb_max = FM10K_MAX_TX_DESC,
1455a551c94aSIdo Barnea		.nb_min = FM10K_MIN_TX_DESC,
1456a551c94aSIdo Barnea		.nb_align = FM10K_MULT_TX_DESC,
14579ca4a157SIdo Barnea		.nb_seg_max = FM10K_TX_MAX_SEG,
14589ca4a157SIdo Barnea		.nb_mtu_seg_max = FM10K_TX_MAX_MTU_SEG,
1459a551c94aSIdo Barnea	};
1460a551c94aSIdo Barnea
1461a551c94aSIdo Barnea	dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G |
1462a551c94aSIdo Barnea			ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G |
1463a551c94aSIdo Barnea			ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G;
1464a551c94aSIdo Barnea}
1465