1/*
2 * Copyright (c) 2013-2015 Brocade Communications Systems, Inc.
3 *
4 * Copyright (c) 2015 QLogic Corporation.
5 * All rights reserved.
6 * www.qlogic.com
7 *
8 * See LICENSE.bnx2x_pmd for copyright and licensing details.
9 */
10
11#include "bnx2x.h"
12#include "bnx2x_rxtx.h"
13
14#include <rte_dev.h>
15
16/*
17 * The set of PCI devices this driver supports
18 */
19#define BROADCOM_PCI_VENDOR_ID 0x14E4
20static const struct rte_pci_id pci_id_bnx2x_map[] = {
21	{ RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57800) },
22	{ RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57711) },
23	{ RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57810) },
24	{ RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57811) },
25	{ RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57840_OBS) },
26	{ RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57840_4_10) },
27	{ RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57840_2_20) },
28#ifdef RTE_LIBRTE_BNX2X_MF_SUPPORT
29	{ RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57810_MF) },
30	{ RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57811_MF) },
31	{ RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57840_MF) },
32#endif
33	{ .vendor_id = 0, }
34};
35
36static const struct rte_pci_id pci_id_bnx2xvf_map[] = {
37	{ RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57800_VF) },
38	{ RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57810_VF) },
39	{ RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57811_VF) },
40	{ RTE_PCI_DEVICE(BROADCOM_PCI_VENDOR_ID, CHIP_NUM_57840_VF) },
41	{ .vendor_id = 0, }
42};
43
44struct rte_bnx2x_xstats_name_off {
45	char name[RTE_ETH_XSTATS_NAME_SIZE];
46	uint32_t offset_hi;
47	uint32_t offset_lo;
48};
49
50static const struct rte_bnx2x_xstats_name_off bnx2x_xstats_strings[] = {
51	{"rx_buffer_drops",
52		offsetof(struct bnx2x_eth_stats, brb_drop_hi),
53		offsetof(struct bnx2x_eth_stats, brb_drop_lo)},
54	{"rx_buffer_truncates",
55		offsetof(struct bnx2x_eth_stats, brb_truncate_hi),
56		offsetof(struct bnx2x_eth_stats, brb_truncate_lo)},
57	{"rx_buffer_truncate_discard",
58		offsetof(struct bnx2x_eth_stats, brb_truncate_discard),
59		offsetof(struct bnx2x_eth_stats, brb_truncate_discard)},
60	{"mac_filter_discard",
61		offsetof(struct bnx2x_eth_stats, mac_filter_discard),
62		offsetof(struct bnx2x_eth_stats, mac_filter_discard)},
63	{"no_match_vlan_tag_discard",
64		offsetof(struct bnx2x_eth_stats, mf_tag_discard),
65		offsetof(struct bnx2x_eth_stats, mf_tag_discard)},
66	{"tx_pause",
67		offsetof(struct bnx2x_eth_stats, pause_frames_sent_hi),
68		offsetof(struct bnx2x_eth_stats, pause_frames_sent_lo)},
69	{"rx_pause",
70		offsetof(struct bnx2x_eth_stats, pause_frames_received_hi),
71		offsetof(struct bnx2x_eth_stats, pause_frames_received_lo)},
72	{"tx_priority_flow_control",
73		offsetof(struct bnx2x_eth_stats, pfc_frames_sent_hi),
74		offsetof(struct bnx2x_eth_stats, pfc_frames_sent_lo)},
75	{"rx_priority_flow_control",
76		offsetof(struct bnx2x_eth_stats, pfc_frames_received_hi),
77		offsetof(struct bnx2x_eth_stats, pfc_frames_received_lo)}
78};
79
80static void
81bnx2x_link_update(struct rte_eth_dev *dev)
82{
83	struct bnx2x_softc *sc = dev->data->dev_private;
84
85	PMD_INIT_FUNC_TRACE();
86	bnx2x_link_status_update(sc);
87	mb();
88	dev->data->dev_link.link_speed = sc->link_vars.line_speed;
89	switch (sc->link_vars.duplex) {
90		case DUPLEX_FULL:
91			dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
92			break;
93		case DUPLEX_HALF:
94			dev->data->dev_link.link_duplex = ETH_LINK_HALF_DUPLEX;
95			break;
96	}
97	dev->data->dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
98			ETH_LINK_SPEED_FIXED);
99	dev->data->dev_link.link_status = sc->link_vars.link_up;
100}
101
102static void
103bnx2x_interrupt_action(struct rte_eth_dev *dev)
104{
105	struct bnx2x_softc *sc = dev->data->dev_private;
106	uint32_t link_status;
107
108	PMD_DEBUG_PERIODIC_LOG(INFO, "Interrupt handled");
109
110	bnx2x_intr_legacy(sc, 0);
111
112	if (sc->periodic_flags & PERIODIC_GO)
113		bnx2x_periodic_callout(sc);
114	link_status = REG_RD(sc, sc->link_params.shmem_base +
115			offsetof(struct shmem_region,
116				port_mb[sc->link_params.port].link_status));
117	if ((link_status & LINK_STATUS_LINK_UP) != dev->data->dev_link.link_status)
118		bnx2x_link_update(dev);
119}
120
121static __rte_unused void
122bnx2x_interrupt_handler(struct rte_intr_handle *handle, void *param)
123{
124	struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
125
126	bnx2x_interrupt_action(dev);
127	rte_intr_enable(handle);
128}
129
130/*
131 * Devops - helper functions can be called from user application
132 */
133
134static int
135bnx2x_dev_configure(struct rte_eth_dev *dev)
136{
137	struct bnx2x_softc *sc = dev->data->dev_private;
138	int mp_ncpus = sysconf(_SC_NPROCESSORS_CONF);
139
140	PMD_INIT_FUNC_TRACE();
141
142	if (dev->data->dev_conf.rxmode.jumbo_frame)
143		sc->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len;
144
145	if (dev->data->nb_tx_queues > dev->data->nb_rx_queues) {
146		PMD_DRV_LOG(ERR, "The number of TX queues is greater than number of RX queues");
147		return -EINVAL;
148	}
149
150	sc->num_queues = MAX(dev->data->nb_rx_queues, dev->data->nb_tx_queues);
151	if (sc->num_queues > mp_ncpus) {
152		PMD_DRV_LOG(ERR, "The number of queues is more than number of CPUs");
153		return -EINVAL;
154	}
155
156	PMD_DRV_LOG(DEBUG, "num_queues=%d, mtu=%d",
157		       sc->num_queues, sc->mtu);
158
159	/* allocate ilt */
160	if (bnx2x_alloc_ilt_mem(sc) != 0) {
161		PMD_DRV_LOG(ERR, "bnx2x_alloc_ilt_mem was failed");
162		return -ENXIO;
163	}
164
165	/* allocate the host hardware/software hsi structures */
166	if (bnx2x_alloc_hsi_mem(sc) != 0) {
167		PMD_DRV_LOG(ERR, "bnx2x_alloc_hsi_mem was failed");
168		bnx2x_free_ilt_mem(sc);
169		return -ENXIO;
170	}
171
172	return 0;
173}
174
175static int
176bnx2x_dev_start(struct rte_eth_dev *dev)
177{
178	struct bnx2x_softc *sc = dev->data->dev_private;
179	int ret = 0;
180
181	PMD_INIT_FUNC_TRACE();
182
183	ret = bnx2x_init(sc);
184	if (ret) {
185		PMD_DRV_LOG(DEBUG, "bnx2x_init failed (%d)", ret);
186		return -1;
187	}
188
189	if (IS_PF(sc)) {
190		rte_intr_callback_register(&sc->pci_dev->intr_handle,
191				bnx2x_interrupt_handler, (void *)dev);
192
193		if (rte_intr_enable(&sc->pci_dev->intr_handle))
194			PMD_DRV_LOG(ERR, "rte_intr_enable failed");
195	}
196
197	ret = bnx2x_dev_rx_init(dev);
198	if (ret != 0) {
199		PMD_DRV_LOG(DEBUG, "bnx2x_dev_rx_init returned error code");
200		return -3;
201	}
202
203	/* Print important adapter info for the user. */
204	bnx2x_print_adapter_info(sc);
205
206	return ret;
207}
208
209static void
210bnx2x_dev_stop(struct rte_eth_dev *dev)
211{
212	struct bnx2x_softc *sc = dev->data->dev_private;
213	int ret = 0;
214
215	PMD_INIT_FUNC_TRACE();
216
217	if (IS_PF(sc)) {
218		rte_intr_disable(&sc->pci_dev->intr_handle);
219		rte_intr_callback_unregister(&sc->pci_dev->intr_handle,
220				bnx2x_interrupt_handler, (void *)dev);
221	}
222
223	ret = bnx2x_nic_unload(sc, UNLOAD_NORMAL, FALSE);
224	if (ret) {
225		PMD_DRV_LOG(DEBUG, "bnx2x_nic_unload failed (%d)", ret);
226		return;
227	}
228
229	return;
230}
231
232static void
233bnx2x_dev_close(struct rte_eth_dev *dev)
234{
235	struct bnx2x_softc *sc = dev->data->dev_private;
236
237	PMD_INIT_FUNC_TRACE();
238
239	if (IS_VF(sc))
240		bnx2x_vf_close(sc);
241
242	bnx2x_dev_clear_queues(dev);
243	memset(&(dev->data->dev_link), 0 , sizeof(struct rte_eth_link));
244
245	/* free the host hardware/software hsi structures */
246	bnx2x_free_hsi_mem(sc);
247
248	/* free ilt */
249	bnx2x_free_ilt_mem(sc);
250}
251
252static void
253bnx2x_promisc_enable(struct rte_eth_dev *dev)
254{
255	struct bnx2x_softc *sc = dev->data->dev_private;
256
257	PMD_INIT_FUNC_TRACE();
258	sc->rx_mode = BNX2X_RX_MODE_PROMISC;
259	if (rte_eth_allmulticast_get(dev->data->port_id) == 1)
260		sc->rx_mode = BNX2X_RX_MODE_ALLMULTI_PROMISC;
261	bnx2x_set_rx_mode(sc);
262}
263
264static void
265bnx2x_promisc_disable(struct rte_eth_dev *dev)
266{
267	struct bnx2x_softc *sc = dev->data->dev_private;
268
269	PMD_INIT_FUNC_TRACE();
270	sc->rx_mode = BNX2X_RX_MODE_NORMAL;
271	if (rte_eth_allmulticast_get(dev->data->port_id) == 1)
272		sc->rx_mode = BNX2X_RX_MODE_ALLMULTI;
273	bnx2x_set_rx_mode(sc);
274}
275
276static void
277bnx2x_dev_allmulticast_enable(struct rte_eth_dev *dev)
278{
279	struct bnx2x_softc *sc = dev->data->dev_private;
280
281	PMD_INIT_FUNC_TRACE();
282	sc->rx_mode = BNX2X_RX_MODE_ALLMULTI;
283	if (rte_eth_promiscuous_get(dev->data->port_id) == 1)
284		sc->rx_mode = BNX2X_RX_MODE_ALLMULTI_PROMISC;
285	bnx2x_set_rx_mode(sc);
286}
287
288static void
289bnx2x_dev_allmulticast_disable(struct rte_eth_dev *dev)
290{
291	struct bnx2x_softc *sc = dev->data->dev_private;
292
293	PMD_INIT_FUNC_TRACE();
294	sc->rx_mode = BNX2X_RX_MODE_NORMAL;
295	if (rte_eth_promiscuous_get(dev->data->port_id) == 1)
296		sc->rx_mode = BNX2X_RX_MODE_PROMISC;
297	bnx2x_set_rx_mode(sc);
298}
299
300static int
301bnx2x_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
302{
303	PMD_INIT_FUNC_TRACE();
304
305	int old_link_status = dev->data->dev_link.link_status;
306
307	bnx2x_link_update(dev);
308
309	return old_link_status == dev->data->dev_link.link_status ? -1 : 0;
310}
311
312static int
313bnx2xvf_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
314{
315	int old_link_status = dev->data->dev_link.link_status;
316	struct bnx2x_softc *sc = dev->data->dev_private;
317
318	bnx2x_link_update(dev);
319
320	bnx2x_check_bull(sc);
321	if (sc->old_bulletin.valid_bitmap & (1 << CHANNEL_DOWN)) {
322		PMD_DRV_LOG(ERR, "PF indicated channel is down."
323				"VF device is no longer operational");
324		dev->data->dev_link.link_status = ETH_LINK_DOWN;
325	}
326
327	return old_link_status == dev->data->dev_link.link_status ? -1 : 0;
328}
329
330static void
331bnx2x_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
332{
333	struct bnx2x_softc *sc = dev->data->dev_private;
334	uint32_t brb_truncate_discard;
335	uint64_t brb_drops;
336	uint64_t brb_truncates;
337
338	PMD_INIT_FUNC_TRACE();
339
340	bnx2x_stats_handle(sc, STATS_EVENT_UPDATE);
341
342	memset(stats, 0, sizeof (struct rte_eth_stats));
343
344	stats->ipackets =
345		HILO_U64(sc->eth_stats.total_unicast_packets_received_hi,
346				sc->eth_stats.total_unicast_packets_received_lo) +
347		HILO_U64(sc->eth_stats.total_multicast_packets_received_hi,
348				sc->eth_stats.total_multicast_packets_received_lo) +
349		HILO_U64(sc->eth_stats.total_broadcast_packets_received_hi,
350				sc->eth_stats.total_broadcast_packets_received_lo);
351
352	stats->opackets =
353		HILO_U64(sc->eth_stats.total_unicast_packets_transmitted_hi,
354				sc->eth_stats.total_unicast_packets_transmitted_lo) +
355		HILO_U64(sc->eth_stats.total_multicast_packets_transmitted_hi,
356				sc->eth_stats.total_multicast_packets_transmitted_lo) +
357		HILO_U64(sc->eth_stats.total_broadcast_packets_transmitted_hi,
358				sc->eth_stats.total_broadcast_packets_transmitted_lo);
359
360	stats->ibytes =
361		HILO_U64(sc->eth_stats.total_bytes_received_hi,
362				sc->eth_stats.total_bytes_received_lo);
363
364	stats->obytes =
365		HILO_U64(sc->eth_stats.total_bytes_transmitted_hi,
366				sc->eth_stats.total_bytes_transmitted_lo);
367
368	stats->ierrors =
369		HILO_U64(sc->eth_stats.error_bytes_received_hi,
370				sc->eth_stats.error_bytes_received_lo);
371
372	stats->oerrors = 0;
373
374	stats->rx_nombuf =
375		HILO_U64(sc->eth_stats.no_buff_discard_hi,
376				sc->eth_stats.no_buff_discard_lo);
377
378	brb_drops =
379		HILO_U64(sc->eth_stats.brb_drop_hi,
380			 sc->eth_stats.brb_drop_lo);
381
382	brb_truncates =
383		HILO_U64(sc->eth_stats.brb_truncate_hi,
384			 sc->eth_stats.brb_truncate_lo);
385
386	brb_truncate_discard = sc->eth_stats.brb_truncate_discard;
387
388	stats->imissed = brb_drops + brb_truncates +
389			 brb_truncate_discard + stats->rx_nombuf;
390}
391
392static int
393bnx2x_get_xstats_names(__rte_unused struct rte_eth_dev *dev,
394		       struct rte_eth_xstat_name *xstats_names,
395		       __rte_unused unsigned limit)
396{
397	unsigned int i, stat_cnt = RTE_DIM(bnx2x_xstats_strings);
398
399	if (xstats_names != NULL)
400		for (i = 0; i < stat_cnt; i++)
401			snprintf(xstats_names[i].name,
402				sizeof(xstats_names[i].name),
403				"%s",
404				bnx2x_xstats_strings[i].name);
405
406	return stat_cnt;
407}
408
409static int
410bnx2x_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
411		     unsigned int n)
412{
413	struct bnx2x_softc *sc = dev->data->dev_private;
414	unsigned int num = RTE_DIM(bnx2x_xstats_strings);
415
416	if (n < num)
417		return num;
418
419	bnx2x_stats_handle(sc, STATS_EVENT_UPDATE);
420
421	for (num = 0; num < n; num++) {
422		if (bnx2x_xstats_strings[num].offset_hi !=
423		    bnx2x_xstats_strings[num].offset_lo)
424			xstats[num].value = HILO_U64(
425					  *(uint32_t *)((char *)&sc->eth_stats +
426					  bnx2x_xstats_strings[num].offset_hi),
427					  *(uint32_t *)((char *)&sc->eth_stats +
428					  bnx2x_xstats_strings[num].offset_lo));
429		else
430			xstats[num].value =
431					  *(uint64_t *)((char *)&sc->eth_stats +
432					  bnx2x_xstats_strings[num].offset_lo);
433		xstats[num].id = num;
434	}
435
436	return num;
437}
438
439static void
440bnx2x_dev_infos_get(struct rte_eth_dev *dev, __rte_unused struct rte_eth_dev_info *dev_info)
441{
442	struct bnx2x_softc *sc = dev->data->dev_private;
443	dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
444	dev_info->max_rx_queues  = sc->max_rx_queues;
445	dev_info->max_tx_queues  = sc->max_tx_queues;
446	dev_info->min_rx_bufsize = BNX2X_MIN_RX_BUF_SIZE;
447	dev_info->max_rx_pktlen  = BNX2X_MAX_RX_PKT_LEN;
448	dev_info->max_mac_addrs  = BNX2X_MAX_MAC_ADDRS;
449	dev_info->speed_capa = ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G;
450}
451
452static void
453bnx2x_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
454		uint32_t index, uint32_t pool)
455{
456	struct bnx2x_softc *sc = dev->data->dev_private;
457
458	if (sc->mac_ops.mac_addr_add)
459		sc->mac_ops.mac_addr_add(dev, mac_addr, index, pool);
460}
461
462static void
463bnx2x_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
464{
465	struct bnx2x_softc *sc = dev->data->dev_private;
466
467	if (sc->mac_ops.mac_addr_remove)
468		sc->mac_ops.mac_addr_remove(dev, index);
469}
470
471static const struct eth_dev_ops bnx2x_eth_dev_ops = {
472	.dev_configure                = bnx2x_dev_configure,
473	.dev_start                    = bnx2x_dev_start,
474	.dev_stop                     = bnx2x_dev_stop,
475	.dev_close                    = bnx2x_dev_close,
476	.promiscuous_enable           = bnx2x_promisc_enable,
477	.promiscuous_disable          = bnx2x_promisc_disable,
478	.allmulticast_enable          = bnx2x_dev_allmulticast_enable,
479	.allmulticast_disable         = bnx2x_dev_allmulticast_disable,
480	.link_update                  = bnx2x_dev_link_update,
481	.stats_get                    = bnx2x_dev_stats_get,
482	.xstats_get                   = bnx2x_dev_xstats_get,
483	.xstats_get_names             = bnx2x_get_xstats_names,
484	.dev_infos_get                = bnx2x_dev_infos_get,
485	.rx_queue_setup               = bnx2x_dev_rx_queue_setup,
486	.rx_queue_release             = bnx2x_dev_rx_queue_release,
487	.tx_queue_setup               = bnx2x_dev_tx_queue_setup,
488	.tx_queue_release             = bnx2x_dev_tx_queue_release,
489	.mac_addr_add                 = bnx2x_mac_addr_add,
490	.mac_addr_remove              = bnx2x_mac_addr_remove,
491};
492
493/*
494 * dev_ops for virtual function
495 */
496static const struct eth_dev_ops bnx2xvf_eth_dev_ops = {
497	.dev_configure                = bnx2x_dev_configure,
498	.dev_start                    = bnx2x_dev_start,
499	.dev_stop                     = bnx2x_dev_stop,
500	.dev_close                    = bnx2x_dev_close,
501	.promiscuous_enable           = bnx2x_promisc_enable,
502	.promiscuous_disable          = bnx2x_promisc_disable,
503	.allmulticast_enable          = bnx2x_dev_allmulticast_enable,
504	.allmulticast_disable         = bnx2x_dev_allmulticast_disable,
505	.link_update                  = bnx2xvf_dev_link_update,
506	.stats_get                    = bnx2x_dev_stats_get,
507	.xstats_get                   = bnx2x_dev_xstats_get,
508	.xstats_get_names             = bnx2x_get_xstats_names,
509	.dev_infos_get                = bnx2x_dev_infos_get,
510	.rx_queue_setup               = bnx2x_dev_rx_queue_setup,
511	.rx_queue_release             = bnx2x_dev_rx_queue_release,
512	.tx_queue_setup               = bnx2x_dev_tx_queue_setup,
513	.tx_queue_release             = bnx2x_dev_tx_queue_release,
514	.mac_addr_add                 = bnx2x_mac_addr_add,
515	.mac_addr_remove              = bnx2x_mac_addr_remove,
516};
517
518
519static int
520bnx2x_common_dev_init(struct rte_eth_dev *eth_dev, int is_vf)
521{
522	int ret = 0;
523	struct rte_pci_device *pci_dev;
524	struct bnx2x_softc *sc;
525
526	PMD_INIT_FUNC_TRACE();
527
528	eth_dev->dev_ops = is_vf ? &bnx2xvf_eth_dev_ops : &bnx2x_eth_dev_ops;
529	pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
530
531	rte_eth_copy_pci_info(eth_dev, pci_dev);
532
533	sc = eth_dev->data->dev_private;
534	sc->pcie_bus    = pci_dev->addr.bus;
535	sc->pcie_device = pci_dev->addr.devid;
536
537	if (is_vf)
538		sc->flags = BNX2X_IS_VF_FLAG;
539
540	sc->devinfo.vendor_id    = pci_dev->id.vendor_id;
541	sc->devinfo.device_id    = pci_dev->id.device_id;
542	sc->devinfo.subvendor_id = pci_dev->id.subsystem_vendor_id;
543	sc->devinfo.subdevice_id = pci_dev->id.subsystem_device_id;
544
545	sc->pcie_func = pci_dev->addr.function;
546	sc->bar[BAR0].base_addr = (void *)pci_dev->mem_resource[0].addr;
547	if (is_vf)
548		sc->bar[BAR1].base_addr = (void *)
549			((uintptr_t)pci_dev->mem_resource[0].addr + PXP_VF_ADDR_DB_START);
550	else
551		sc->bar[BAR1].base_addr = pci_dev->mem_resource[2].addr;
552
553	assert(sc->bar[BAR0].base_addr);
554	assert(sc->bar[BAR1].base_addr);
555
556	bnx2x_load_firmware(sc);
557	assert(sc->firmware);
558
559	if (eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
560		sc->udp_rss = 1;
561
562	sc->rx_budget = BNX2X_RX_BUDGET;
563	sc->hc_rx_ticks = BNX2X_RX_TICKS;
564	sc->hc_tx_ticks = BNX2X_TX_TICKS;
565
566	sc->interrupt_mode = INTR_MODE_SINGLE_MSIX;
567	sc->rx_mode = BNX2X_RX_MODE_NORMAL;
568
569	sc->pci_dev = pci_dev;
570	ret = bnx2x_attach(sc);
571	if (ret) {
572		PMD_DRV_LOG(ERR, "bnx2x_attach failed (%d)", ret);
573		return ret;
574	}
575
576	eth_dev->data->mac_addrs = (struct ether_addr *)sc->link_params.mac_addr;
577
578	PMD_DRV_LOG(INFO, "pcie_bus=%d, pcie_device=%d",
579			sc->pcie_bus, sc->pcie_device);
580	PMD_DRV_LOG(INFO, "bar0.addr=%p, bar1.addr=%p",
581			sc->bar[BAR0].base_addr, sc->bar[BAR1].base_addr);
582	PMD_DRV_LOG(INFO, "port=%d, path=%d, vnic=%d, func=%d",
583			PORT_ID(sc), PATH_ID(sc), VNIC_ID(sc), FUNC_ID(sc));
584	PMD_DRV_LOG(INFO, "portID=%d vendorID=0x%x deviceID=0x%x",
585			eth_dev->data->port_id, pci_dev->id.vendor_id, pci_dev->id.device_id);
586
587	if (IS_VF(sc)) {
588		rte_spinlock_init(&sc->vf2pf_lock);
589
590		if (bnx2x_dma_alloc(sc, sizeof(struct bnx2x_vf_mbx_msg),
591				    &sc->vf2pf_mbox_mapping, "vf2pf_mbox",
592				    RTE_CACHE_LINE_SIZE) != 0)
593			return -ENOMEM;
594
595		sc->vf2pf_mbox = (struct bnx2x_vf_mbx_msg *)
596					 sc->vf2pf_mbox_mapping.vaddr;
597
598		if (bnx2x_dma_alloc(sc, sizeof(struct bnx2x_vf_bulletin),
599				    &sc->pf2vf_bulletin_mapping, "vf2pf_bull",
600				    RTE_CACHE_LINE_SIZE) != 0)
601			return -ENOMEM;
602
603		sc->pf2vf_bulletin = (struct bnx2x_vf_bulletin *)
604					     sc->pf2vf_bulletin_mapping.vaddr;
605
606		ret = bnx2x_vf_get_resources(sc, sc->max_tx_queues,
607					     sc->max_rx_queues);
608		if (ret)
609			return ret;
610	}
611
612	return 0;
613}
614
615static int
616eth_bnx2x_dev_init(struct rte_eth_dev *eth_dev)
617{
618	PMD_INIT_FUNC_TRACE();
619	return bnx2x_common_dev_init(eth_dev, 0);
620}
621
622static int
623eth_bnx2xvf_dev_init(struct rte_eth_dev *eth_dev)
624{
625	PMD_INIT_FUNC_TRACE();
626	return bnx2x_common_dev_init(eth_dev, 1);
627}
628
629static struct eth_driver rte_bnx2x_pmd = {
630	.pci_drv = {
631		.id_table = pci_id_bnx2x_map,
632		.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
633		.probe = rte_eth_dev_pci_probe,
634		.remove = rte_eth_dev_pci_remove,
635	},
636	.eth_dev_init = eth_bnx2x_dev_init,
637	.dev_private_size = sizeof(struct bnx2x_softc),
638};
639
640/*
641 * virtual function driver struct
642 */
643static struct eth_driver rte_bnx2xvf_pmd = {
644	.pci_drv = {
645		.id_table = pci_id_bnx2xvf_map,
646		.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
647		.probe = rte_eth_dev_pci_probe,
648		.remove = rte_eth_dev_pci_remove,
649	},
650	.eth_dev_init = eth_bnx2xvf_dev_init,
651	.dev_private_size = sizeof(struct bnx2x_softc),
652};
653
654RTE_PMD_REGISTER_PCI(net_bnx2x, rte_bnx2x_pmd.pci_drv);
655RTE_PMD_REGISTER_PCI_TABLE(net_bnx2x, pci_id_bnx2x_map);
656RTE_PMD_REGISTER_KMOD_DEP(net_bnx2x, "* igb_uio | uio_pci_generic | vfio");
657RTE_PMD_REGISTER_PCI(net_bnx2xvf, rte_bnx2xvf_pmd.pci_drv);
658RTE_PMD_REGISTER_PCI_TABLE(net_bnx2xvf, pci_id_bnx2xvf_map);
659RTE_PMD_REGISTER_KMOD_DEP(net_bnx2xvf, "* igb_uio | vfio");
660