qede_eth_if.c revision 6b3e017e
1/*
2 * Copyright (c) 2016 QLogic Corporation.
3 * All rights reserved.
4 * www.qlogic.com
5 *
6 * See LICENSE.qede_pmd for copyright and licensing details.
7 */
8
9#include "qede_ethdev.h"
10
11static int
12qed_start_vport(struct ecore_dev *edev, struct qed_start_vport_params *p_params)
13{
14	int rc, i;
15
16	for_each_hwfn(edev, i) {
17		struct ecore_hwfn *p_hwfn = &edev->hwfns[i];
18		u8 tx_switching = 0;
19		struct ecore_sp_vport_start_params start = { 0 };
20
21		start.tpa_mode = p_params->gro_enable ? ECORE_TPA_MODE_GRO :
22		    ECORE_TPA_MODE_NONE;
23		start.remove_inner_vlan = p_params->remove_inner_vlan;
24		start.tx_switching = tx_switching;
25		start.only_untagged = false;	/* untagged only */
26		start.drop_ttl0 = p_params->drop_ttl0;
27		start.concrete_fid = p_hwfn->hw_info.concrete_fid;
28		start.opaque_fid = p_hwfn->hw_info.opaque_fid;
29		start.concrete_fid = p_hwfn->hw_info.concrete_fid;
30		start.handle_ptp_pkts = p_params->handle_ptp_pkts;
31		start.vport_id = p_params->vport_id;
32		start.max_buffers_per_cqe = 16;	/* TODO-is this right */
33		start.mtu = p_params->mtu;
34		/* @DPDK - Disable FW placement */
35		start.zero_placement_offset = 1;
36
37		rc = ecore_sp_vport_start(p_hwfn, &start);
38		if (rc) {
39			DP_ERR(edev, "Failed to start VPORT\n");
40			return rc;
41		}
42
43		DP_VERBOSE(edev, ECORE_MSG_SPQ,
44			   "Started V-PORT %d with MTU %d\n",
45			   p_params->vport_id, p_params->mtu);
46	}
47
48	ecore_reset_vport_stats(edev);
49
50	return 0;
51}
52
53static int qed_stop_vport(struct ecore_dev *edev, uint8_t vport_id)
54{
55	int rc, i;
56
57	for_each_hwfn(edev, i) {
58		struct ecore_hwfn *p_hwfn = &edev->hwfns[i];
59		rc = ecore_sp_vport_stop(p_hwfn,
60					 p_hwfn->hw_info.opaque_fid, vport_id);
61
62		if (rc) {
63			DP_ERR(edev, "Failed to stop VPORT\n");
64			return rc;
65		}
66	}
67
68	return 0;
69}
70
71static int
72qed_update_vport(struct ecore_dev *edev, struct qed_update_vport_params *params)
73{
74	struct ecore_sp_vport_update_params sp_params;
75	struct ecore_rss_params sp_rss_params;
76	int rc, i;
77
78	memset(&sp_params, 0, sizeof(sp_params));
79	memset(&sp_rss_params, 0, sizeof(sp_rss_params));
80
81	/* Translate protocol params into sp params */
82	sp_params.vport_id = params->vport_id;
83	sp_params.update_vport_active_rx_flg = params->update_vport_active_flg;
84	sp_params.update_vport_active_tx_flg = params->update_vport_active_flg;
85	sp_params.vport_active_rx_flg = params->vport_active_flg;
86	sp_params.vport_active_tx_flg = params->vport_active_flg;
87	sp_params.update_inner_vlan_removal_flg =
88	    params->update_inner_vlan_removal_flg;
89	sp_params.inner_vlan_removal_flg = params->inner_vlan_removal_flg;
90	sp_params.update_tx_switching_flg = params->update_tx_switching_flg;
91	sp_params.tx_switching_flg = params->tx_switching_flg;
92	sp_params.accept_any_vlan = params->accept_any_vlan;
93	sp_params.update_accept_any_vlan_flg =
94	    params->update_accept_any_vlan_flg;
95	sp_params.mtu = params->mtu;
96
97	/* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns.
98	 * We need to re-fix the rss values per engine for CMT.
99	 */
100
101	if (edev->num_hwfns > 1 && params->update_rss_flg) {
102		struct qed_update_vport_rss_params *rss = &params->rss_params;
103		int k, max = 0;
104
105		/* Find largest entry, since it's possible RSS needs to
106		 * be disabled [in case only 1 queue per-hwfn]
107		 */
108		for (k = 0; k < ECORE_RSS_IND_TABLE_SIZE; k++)
109			max = (max > rss->rss_ind_table[k]) ?
110			    max : rss->rss_ind_table[k];
111
112		/* Either fix RSS values or disable RSS */
113		if (edev->num_hwfns < max + 1) {
114			int divisor = (max + edev->num_hwfns - 1) /
115			    edev->num_hwfns;
116
117			DP_VERBOSE(edev, ECORE_MSG_SPQ,
118				   "CMT - fixing RSS values (modulo %02x)\n",
119				   divisor);
120
121			for (k = 0; k < ECORE_RSS_IND_TABLE_SIZE; k++)
122				rss->rss_ind_table[k] =
123				    rss->rss_ind_table[k] % divisor;
124		} else {
125			DP_VERBOSE(edev, ECORE_MSG_SPQ,
126				   "CMT - 1 queue per-hwfn; Disabling RSS\n");
127			params->update_rss_flg = 0;
128		}
129	}
130
131	/* Now, update the RSS configuration for actual configuration */
132	if (params->update_rss_flg) {
133		sp_rss_params.update_rss_config = 1;
134		sp_rss_params.rss_enable = 1;
135		sp_rss_params.update_rss_capabilities = 1;
136		sp_rss_params.update_rss_ind_table = 1;
137		sp_rss_params.update_rss_key = 1;
138		sp_rss_params.rss_caps = ECORE_RSS_IPV4 | ECORE_RSS_IPV6 |
139		    ECORE_RSS_IPV4_TCP | ECORE_RSS_IPV6_TCP;
140		sp_rss_params.rss_table_size_log = 7;	/* 2^7 = 128 */
141		rte_memcpy(sp_rss_params.rss_ind_table,
142		       params->rss_params.rss_ind_table,
143		       ECORE_RSS_IND_TABLE_SIZE * sizeof(uint16_t));
144		rte_memcpy(sp_rss_params.rss_key, params->rss_params.rss_key,
145		       ECORE_RSS_KEY_SIZE * sizeof(uint32_t));
146		sp_params.rss_params = &sp_rss_params;
147	}
148
149	for_each_hwfn(edev, i) {
150		struct ecore_hwfn *p_hwfn = &edev->hwfns[i];
151
152		sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
153		rc = ecore_sp_vport_update(p_hwfn, &sp_params,
154					   ECORE_SPQ_MODE_EBLOCK, NULL);
155		if (rc) {
156			DP_ERR(edev, "Failed to update VPORT\n");
157			return rc;
158		}
159
160		DP_VERBOSE(edev, ECORE_MSG_SPQ,
161			   "Updated V-PORT %d: active_flag %d [update %d]\n",
162			   params->vport_id, params->vport_active_flg,
163			   params->update_vport_active_flg);
164	}
165
166	return 0;
167}
168
169static int
170qed_start_rxq(struct ecore_dev *edev,
171	      uint8_t rss_num,
172	      struct ecore_queue_start_common_params *p_params,
173	      uint16_t bd_max_bytes,
174	      dma_addr_t bd_chain_phys_addr,
175	      dma_addr_t cqe_pbl_addr,
176	      uint16_t cqe_pbl_size, void OSAL_IOMEM * *pp_prod)
177{
178	struct ecore_hwfn *p_hwfn;
179	int rc, hwfn_index;
180
181	hwfn_index = rss_num % edev->num_hwfns;
182	p_hwfn = &edev->hwfns[hwfn_index];
183
184	p_params->queue_id = p_params->queue_id / edev->num_hwfns;
185	p_params->stats_id = p_params->vport_id;
186
187	rc = ecore_sp_eth_rx_queue_start(p_hwfn,
188					 p_hwfn->hw_info.opaque_fid,
189					 p_params,
190					 bd_max_bytes,
191					 bd_chain_phys_addr,
192					 cqe_pbl_addr, cqe_pbl_size, pp_prod);
193
194	if (rc) {
195		DP_ERR(edev, "Failed to start RXQ#%d\n", p_params->queue_id);
196		return rc;
197	}
198
199	DP_VERBOSE(edev, ECORE_MSG_SPQ,
200		   "Started RX-Q %d [rss_num %d] on V-PORT %d and SB %d\n",
201		   p_params->queue_id, rss_num, p_params->vport_id,
202		   p_params->sb);
203
204	return 0;
205}
206
207static int
208qed_stop_rxq(struct ecore_dev *edev, struct qed_stop_rxq_params *params)
209{
210	int rc, hwfn_index;
211	struct ecore_hwfn *p_hwfn;
212
213	hwfn_index = params->rss_id % edev->num_hwfns;
214	p_hwfn = &edev->hwfns[hwfn_index];
215
216	rc = ecore_sp_eth_rx_queue_stop(p_hwfn,
217					params->rx_queue_id / edev->num_hwfns,
218					params->eq_completion_only, false);
219	if (rc) {
220		DP_ERR(edev, "Failed to stop RXQ#%d\n", params->rx_queue_id);
221		return rc;
222	}
223
224	return 0;
225}
226
227static int
228qed_start_txq(struct ecore_dev *edev,
229	      uint8_t rss_num,
230	      struct ecore_queue_start_common_params *p_params,
231	      dma_addr_t pbl_addr,
232	      uint16_t pbl_size, void OSAL_IOMEM * *pp_doorbell)
233{
234	struct ecore_hwfn *p_hwfn;
235	int rc, hwfn_index;
236
237	hwfn_index = rss_num % edev->num_hwfns;
238	p_hwfn = &edev->hwfns[hwfn_index];
239
240	p_params->queue_id = p_params->queue_id / edev->num_hwfns;
241	p_params->stats_id = p_params->vport_id;
242
243	rc = ecore_sp_eth_tx_queue_start(p_hwfn,
244					 p_hwfn->hw_info.opaque_fid,
245					 p_params,
246					 0 /* tc */,
247					 pbl_addr, pbl_size, pp_doorbell);
248
249	if (rc) {
250		DP_ERR(edev, "Failed to start TXQ#%d\n", p_params->queue_id);
251		return rc;
252	}
253
254	DP_VERBOSE(edev, ECORE_MSG_SPQ,
255		   "Started TX-Q %d [rss_num %d] on V-PORT %d and SB %d\n",
256		   p_params->queue_id, rss_num, p_params->vport_id,
257		   p_params->sb);
258
259	return 0;
260}
261
262static int
263qed_stop_txq(struct ecore_dev *edev, struct qed_stop_txq_params *params)
264{
265	struct ecore_hwfn *p_hwfn;
266	int rc, hwfn_index;
267
268	hwfn_index = params->rss_id % edev->num_hwfns;
269	p_hwfn = &edev->hwfns[hwfn_index];
270
271	rc = ecore_sp_eth_tx_queue_stop(p_hwfn,
272					params->tx_queue_id / edev->num_hwfns);
273	if (rc) {
274		DP_ERR(edev, "Failed to stop TXQ#%d\n", params->tx_queue_id);
275		return rc;
276	}
277
278	return 0;
279}
280
281static int
282qed_fp_cqe_completion(struct ecore_dev *edev,
283		      uint8_t rss_id, struct eth_slow_path_rx_cqe *cqe)
284{
285	return ecore_eth_cqe_completion(&edev->hwfns[rss_id % edev->num_hwfns],
286					cqe);
287}
288
289static int qed_fastpath_stop(struct ecore_dev *edev)
290{
291	ecore_hw_stop_fastpath(edev);
292
293	return 0;
294}
295
296static void qed_fastpath_start(struct ecore_dev *edev)
297{
298	struct ecore_hwfn *p_hwfn;
299	int i;
300
301	for_each_hwfn(edev, i) {
302		p_hwfn = &edev->hwfns[i];
303		ecore_hw_start_fastpath(p_hwfn);
304	}
305}
306
307static void
308qed_get_vport_stats(struct ecore_dev *edev, struct ecore_eth_stats *stats)
309{
310	ecore_get_vport_stats(edev, stats);
311}
312
313static int
314qed_configure_filter_ucast(struct ecore_dev *edev,
315			   struct qed_filter_ucast_params *params)
316{
317	struct ecore_filter_ucast ucast;
318
319	if (!params->vlan_valid && !params->mac_valid) {
320		DP_NOTICE(edev, true,
321			  "Tried configuring a unicast filter,"
322			  "but both MAC and VLAN are not set\n");
323		return -EINVAL;
324	}
325
326	memset(&ucast, 0, sizeof(ucast));
327	switch (params->type) {
328	case QED_FILTER_XCAST_TYPE_ADD:
329		ucast.opcode = ECORE_FILTER_ADD;
330		break;
331	case QED_FILTER_XCAST_TYPE_DEL:
332		ucast.opcode = ECORE_FILTER_REMOVE;
333		break;
334	case QED_FILTER_XCAST_TYPE_REPLACE:
335		ucast.opcode = ECORE_FILTER_REPLACE;
336		break;
337	default:
338		DP_NOTICE(edev, true, "Unknown unicast filter type %d\n",
339			  params->type);
340	}
341
342	if (params->vlan_valid && params->mac_valid) {
343		ucast.type = ECORE_FILTER_MAC_VLAN;
344		ether_addr_copy((struct ether_addr *)&params->mac,
345				(struct ether_addr *)&ucast.mac);
346		ucast.vlan = params->vlan;
347	} else if (params->mac_valid) {
348		ucast.type = ECORE_FILTER_MAC;
349		ether_addr_copy((struct ether_addr *)&params->mac,
350				(struct ether_addr *)&ucast.mac);
351	} else {
352		ucast.type = ECORE_FILTER_VLAN;
353		ucast.vlan = params->vlan;
354	}
355
356	ucast.is_rx_filter = true;
357	ucast.is_tx_filter = true;
358
359	return ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, NULL);
360}
361
362static int
363qed_configure_filter_mcast(struct ecore_dev *edev,
364			   struct qed_filter_mcast_params *params)
365{
366	struct ecore_filter_mcast mcast;
367	int i;
368
369	memset(&mcast, 0, sizeof(mcast));
370	switch (params->type) {
371	case QED_FILTER_XCAST_TYPE_ADD:
372		mcast.opcode = ECORE_FILTER_ADD;
373		break;
374	case QED_FILTER_XCAST_TYPE_DEL:
375		mcast.opcode = ECORE_FILTER_REMOVE;
376		break;
377	default:
378		DP_NOTICE(edev, true, "Unknown multicast filter type %d\n",
379			  params->type);
380	}
381
382	mcast.num_mc_addrs = params->num;
383	for (i = 0; i < mcast.num_mc_addrs; i++)
384		ether_addr_copy((struct ether_addr *)&params->mac[i],
385				(struct ether_addr *)&mcast.mac[i]);
386
387	return ecore_filter_mcast_cmd(edev, &mcast, ECORE_SPQ_MODE_CB, NULL);
388}
389
390int qed_configure_filter_rx_mode(struct ecore_dev *edev,
391				 enum qed_filter_rx_mode_type type)
392{
393	struct ecore_filter_accept_flags flags;
394
395	memset(&flags, 0, sizeof(flags));
396
397	flags.update_rx_mode_config = 1;
398	flags.update_tx_mode_config = 1;
399	flags.rx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
400					ECORE_ACCEPT_MCAST_MATCHED |
401					ECORE_ACCEPT_BCAST;
402
403	flags.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
404				 ECORE_ACCEPT_MCAST_MATCHED |
405				 ECORE_ACCEPT_BCAST;
406
407	if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) {
408		flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
409		if (IS_VF(edev)) {
410			flags.tx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
411			DP_INFO(edev, "Enabling Tx unmatched flag for VF\n");
412		}
413	} else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) {
414		flags.rx_accept_filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
415	} else if (type == (QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC |
416			    QED_FILTER_RX_MODE_TYPE_PROMISC)) {
417		flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED |
418					  ECORE_ACCEPT_MCAST_UNMATCHED;
419	}
420
421	return ecore_filter_accept_cmd(edev, 0, flags, false, false,
422				       ECORE_SPQ_MODE_CB, NULL);
423}
424
425static int
426qed_configure_filter(struct ecore_dev *edev, struct qed_filter_params *params)
427{
428	switch (params->type) {
429	case QED_FILTER_TYPE_UCAST:
430		return qed_configure_filter_ucast(edev, &params->filter.ucast);
431	case QED_FILTER_TYPE_MCAST:
432		return qed_configure_filter_mcast(edev, &params->filter.mcast);
433	case QED_FILTER_TYPE_RX_MODE:
434		return qed_configure_filter_rx_mode(edev,
435						    params->filter.
436						    accept_flags);
437	default:
438		DP_NOTICE(edev, true, "Unknown filter type %d\n",
439			  (int)params->type);
440		return -EINVAL;
441	}
442}
443
444static const struct qed_eth_ops qed_eth_ops_pass = {
445	INIT_STRUCT_FIELD(common, &qed_common_ops_pass),
446	INIT_STRUCT_FIELD(fill_dev_info, &qed_fill_eth_dev_info),
447	INIT_STRUCT_FIELD(vport_start, &qed_start_vport),
448	INIT_STRUCT_FIELD(vport_stop, &qed_stop_vport),
449	INIT_STRUCT_FIELD(vport_update, &qed_update_vport),
450	INIT_STRUCT_FIELD(q_rx_start, &qed_start_rxq),
451	INIT_STRUCT_FIELD(q_tx_start, &qed_start_txq),
452	INIT_STRUCT_FIELD(q_rx_stop, &qed_stop_rxq),
453	INIT_STRUCT_FIELD(q_tx_stop, &qed_stop_txq),
454	INIT_STRUCT_FIELD(eth_cqe_completion, &qed_fp_cqe_completion),
455	INIT_STRUCT_FIELD(fastpath_stop, &qed_fastpath_stop),
456	INIT_STRUCT_FIELD(fastpath_start, &qed_fastpath_start),
457	INIT_STRUCT_FIELD(get_vport_stats, &qed_get_vport_stats),
458	INIT_STRUCT_FIELD(filter_config, &qed_configure_filter),
459};
460
461const struct qed_eth_ops *qed_get_eth_ops(void)
462{
463	return &qed_eth_ops_pass;
464}
465