kcompat.h revision f7a9461e
1/*******************************************************************************
2
3  Intel(R) Gigabit Ethernet Linux driver
4  Copyright(c) 2007-2013 Intel Corporation.
5
6  This program is free software; you can redistribute it and/or modify it
7  under the terms and conditions of the GNU General Public License,
8  version 2, as published by the Free Software Foundation.
9
10  This program is distributed in the hope it will be useful, but WITHOUT
11  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  more details.
14
15  You should have received a copy of the GNU General Public License along with
16  this program; if not, write to the Free Software Foundation, Inc.,
17  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19  The full GNU General Public License is included in this distribution in
20  the file called "LICENSE.GPL".
21
22  Contact Information:
23  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#ifndef _KCOMPAT_H_
29#define _KCOMPAT_H_
30
31#ifndef LINUX_VERSION_CODE
32#include <linux/version.h>
33#else
34#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))
35#endif
36#include <linux/init.h>
37#include <linux/types.h>
38#include <linux/errno.h>
39#include <linux/module.h>
40#include <linux/pci.h>
41#include <linux/netdevice.h>
42#include <linux/etherdevice.h>
43#include <linux/skbuff.h>
44#include <linux/ioport.h>
45#include <linux/slab.h>
46#include <linux/list.h>
47#include <linux/delay.h>
48#include <linux/sched.h>
49#include <linux/in.h>
50#include <linux/ip.h>
51#include <linux/udp.h>
52#include <linux/mii.h>
53#include <linux/vmalloc.h>
54#include <asm/io.h>
55#include <linux/ethtool.h>
56#include <linux/if_vlan.h>
57
58/* NAPI enable/disable flags here */
59#define NAPI
60
61#define adapter_struct igb_adapter
62#define adapter_q_vector igb_q_vector
63#define NAPI
64
65/* and finally set defines so that the code sees the changes */
66#ifdef NAPI
67#else
68#endif /* NAPI */
69
70/* packet split disable/enable */
71#ifdef DISABLE_PACKET_SPLIT
72#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
73#define CONFIG_IGB_DISABLE_PACKET_SPLIT
74#endif
75#endif /* DISABLE_PACKET_SPLIT */
76
77/* MSI compatibility code for all kernels and drivers */
78#ifdef DISABLE_PCI_MSI
79#undef CONFIG_PCI_MSI
80#endif
81#ifndef CONFIG_PCI_MSI
82#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) )
83struct msix_entry {
84	u16 vector; /* kernel uses to write allocated vector */
85	u16 entry;  /* driver uses to specify entry, OS writes */
86};
87#endif
88#undef pci_enable_msi
89#define pci_enable_msi(a) -ENOTSUPP
90#undef pci_disable_msi
91#define pci_disable_msi(a) do {} while (0)
92#undef pci_enable_msix
93#define pci_enable_msix(a, b, c) -ENOTSUPP
94#undef pci_disable_msix
95#define pci_disable_msix(a) do {} while (0)
96#define msi_remove_pci_irq_vectors(a) do {} while (0)
97#endif /* CONFIG_PCI_MSI */
98#ifdef DISABLE_PM
99#undef CONFIG_PM
100#endif
101
102#ifdef DISABLE_NET_POLL_CONTROLLER
103#undef CONFIG_NET_POLL_CONTROLLER
104#endif
105
106#ifndef PMSG_SUSPEND
107#define PMSG_SUSPEND 3
108#endif
109
110/* generic boolean compatibility */
111#undef TRUE
112#undef FALSE
113#define TRUE true
114#define FALSE false
115#ifdef GCC_VERSION
116#if ( GCC_VERSION < 3000 )
117#define _Bool char
118#endif
119#else
120#define _Bool char
121#endif
122
123/* kernels less than 2.4.14 don't have this */
124#ifndef ETH_P_8021Q
125#define ETH_P_8021Q 0x8100
126#endif
127
128#ifndef module_param
129#define module_param(v,t,p) MODULE_PARM(v, "i");
130#endif
131
132#ifndef DMA_64BIT_MASK
133#define DMA_64BIT_MASK  0xffffffffffffffffULL
134#endif
135
136#ifndef DMA_32BIT_MASK
137#define DMA_32BIT_MASK  0x00000000ffffffffULL
138#endif
139
140#ifndef PCI_CAP_ID_EXP
141#define PCI_CAP_ID_EXP 0x10
142#endif
143
144#ifndef PCIE_LINK_STATE_L0S
145#define PCIE_LINK_STATE_L0S 1
146#endif
147#ifndef PCIE_LINK_STATE_L1
148#define PCIE_LINK_STATE_L1 2
149#endif
150
151#ifndef mmiowb
152#ifdef CONFIG_IA64
153#define mmiowb() asm volatile ("mf.a" ::: "memory")
154#else
155#define mmiowb()
156#endif
157#endif
158
159#ifndef SET_NETDEV_DEV
160#define SET_NETDEV_DEV(net, pdev)
161#endif
162
163#if !defined(HAVE_FREE_NETDEV) && ( LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) )
164#define free_netdev(x)	kfree(x)
165#endif
166
167#ifdef HAVE_POLL_CONTROLLER
168#define CONFIG_NET_POLL_CONTROLLER
169#endif
170
171#ifndef SKB_DATAREF_SHIFT
172/* if we do not have the infrastructure to detect if skb_header is cloned
173   just return false in all cases */
174#define skb_header_cloned(x) 0
175#endif
176
177#ifndef NETIF_F_GSO
178#define gso_size tso_size
179#define gso_segs tso_segs
180#endif
181
182#ifndef NETIF_F_GRO
183#define vlan_gro_receive(_napi, _vlgrp, _vlan, _skb) \
184		vlan_hwaccel_receive_skb(_skb, _vlgrp, _vlan)
185#define napi_gro_receive(_napi, _skb) netif_receive_skb(_skb)
186#endif
187
188#ifndef NETIF_F_SCTP_CSUM
189#define NETIF_F_SCTP_CSUM 0
190#endif
191
192#ifndef NETIF_F_LRO
193#define NETIF_F_LRO (1 << 15)
194#endif
195
196#ifndef NETIF_F_NTUPLE
197#define NETIF_F_NTUPLE (1 << 27)
198#endif
199
200#ifndef IPPROTO_SCTP
201#define IPPROTO_SCTP 132
202#endif
203
204#ifndef CHECKSUM_PARTIAL
205#define CHECKSUM_PARTIAL CHECKSUM_HW
206#define CHECKSUM_COMPLETE CHECKSUM_HW
207#endif
208
209#ifndef __read_mostly
210#define __read_mostly
211#endif
212
213#ifndef MII_RESV1
214#define MII_RESV1		0x17		/* Reserved...		*/
215#endif
216
217#ifndef unlikely
218#define unlikely(_x) _x
219#define likely(_x) _x
220#endif
221
222#ifndef WARN_ON
223#define WARN_ON(x)
224#endif
225
226#ifndef PCI_DEVICE
227#define PCI_DEVICE(vend,dev) \
228	.vendor = (vend), .device = (dev), \
229	.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
230#endif
231
232#ifndef node_online
233#define node_online(node) ((node) == 0)
234#endif
235
236#ifndef num_online_cpus
237#define num_online_cpus() smp_num_cpus
238#endif
239
240#ifndef cpu_online
241#define cpu_online(cpuid) test_bit((cpuid), &cpu_online_map)
242#endif
243
244#ifndef _LINUX_RANDOM_H
245#include <linux/random.h>
246#endif
247
248#ifndef DECLARE_BITMAP
249#ifndef BITS_TO_LONGS
250#define BITS_TO_LONGS(bits) (((bits)+BITS_PER_LONG-1)/BITS_PER_LONG)
251#endif
252#define DECLARE_BITMAP(name,bits) long name[BITS_TO_LONGS(bits)]
253#endif
254
255#ifndef VLAN_HLEN
256#define VLAN_HLEN 4
257#endif
258
259#ifndef VLAN_ETH_HLEN
260#define VLAN_ETH_HLEN 18
261#endif
262
263#ifndef VLAN_ETH_FRAME_LEN
264#define VLAN_ETH_FRAME_LEN 1518
265#endif
266
267#if !defined(IXGBE_DCA) && !defined(IGB_DCA)
268#define dca_get_tag(b) 0
269#define dca_add_requester(a) -1
270#define dca_remove_requester(b) do { } while(0)
271#define DCA_PROVIDER_ADD     0x0001
272#define DCA_PROVIDER_REMOVE  0x0002
273#endif
274
275#ifndef DCA_GET_TAG_TWO_ARGS
276#define dca3_get_tag(a,b) dca_get_tag(b)
277#endif
278
279#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
280#if defined(__i386__) || defined(__x86_64__)
281#define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
282#endif
283#endif
284
285/* taken from 2.6.24 definition in linux/kernel.h */
286#ifndef IS_ALIGNED
287#define IS_ALIGNED(x,a)         (((x) % ((typeof(x))(a))) == 0)
288#endif
289
290#ifdef IS_ENABLED
291#undef IS_ENABLED
292#undef __ARG_PLACEHOLDER_1
293#undef config_enabled
294#undef _config_enabled
295#undef __config_enabled
296#undef ___config_enabled
297#endif
298
299#define __ARG_PLACEHOLDER_1 0,
300#define config_enabled(cfg) _config_enabled(cfg)
301#define _config_enabled(value) __config_enabled(__ARG_PLACEHOLDER_##value)
302#define __config_enabled(arg1_or_junk) ___config_enabled(arg1_or_junk 1, 0)
303#define ___config_enabled(__ignored, val, ...) val
304
305#define IS_ENABLED(option) \
306	(config_enabled(option) || config_enabled(option##_MODULE))
307
308#if !defined(NETIF_F_HW_VLAN_TX) && !defined(NETIF_F_HW_VLAN_CTAG_TX)
309struct _kc_vlan_ethhdr {
310	unsigned char	h_dest[ETH_ALEN];
311	unsigned char	h_source[ETH_ALEN];
312	__be16		h_vlan_proto;
313	__be16		h_vlan_TCI;
314	__be16		h_vlan_encapsulated_proto;
315};
316#define vlan_ethhdr _kc_vlan_ethhdr
317struct _kc_vlan_hdr {
318	__be16		h_vlan_TCI;
319	__be16		h_vlan_encapsulated_proto;
320};
321#define vlan_hdr _kc_vlan_hdr
322#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) )
323#define vlan_tx_tag_present(_skb) 0
324#define vlan_tx_tag_get(_skb) 0
325#endif
326#endif /* NETIF_F_HW_VLAN_TX && NETIF_F_HW_VLAN_CTAG_TX */
327
328#ifndef VLAN_PRIO_SHIFT
329#define VLAN_PRIO_SHIFT 13
330#endif
331
332
333#ifndef __GFP_COLD
334#define __GFP_COLD 0
335#endif
336
337#ifndef __GFP_COMP
338#define __GFP_COMP 0
339#endif
340
341/*****************************************************************************/
342/* Installations with ethtool version without eeprom, adapter id, or statistics
343 * support */
344
345#ifndef ETH_GSTRING_LEN
346#define ETH_GSTRING_LEN 32
347#endif
348
349#ifndef ETHTOOL_GSTATS
350#define ETHTOOL_GSTATS 0x1d
351#undef ethtool_drvinfo
352#define ethtool_drvinfo k_ethtool_drvinfo
353struct k_ethtool_drvinfo {
354	u32 cmd;
355	char driver[32];
356	char version[32];
357	char fw_version[32];
358	char bus_info[32];
359	char reserved1[32];
360	char reserved2[16];
361	u32 n_stats;
362	u32 testinfo_len;
363	u32 eedump_len;
364	u32 regdump_len;
365};
366
367struct ethtool_stats {
368	u32 cmd;
369	u32 n_stats;
370	u64 data[0];
371};
372#endif /* ETHTOOL_GSTATS */
373
374#ifndef ETHTOOL_PHYS_ID
375#define ETHTOOL_PHYS_ID 0x1c
376#endif /* ETHTOOL_PHYS_ID */
377
378#ifndef ETHTOOL_GSTRINGS
379#define ETHTOOL_GSTRINGS 0x1b
380enum ethtool_stringset {
381	ETH_SS_TEST             = 0,
382	ETH_SS_STATS,
383};
384struct ethtool_gstrings {
385	u32 cmd;            /* ETHTOOL_GSTRINGS */
386	u32 string_set;     /* string set id e.c. ETH_SS_TEST, etc*/
387	u32 len;            /* number of strings in the string set */
388	u8 data[0];
389};
390#endif /* ETHTOOL_GSTRINGS */
391
392#ifndef ETHTOOL_TEST
393#define ETHTOOL_TEST 0x1a
394enum ethtool_test_flags {
395	ETH_TEST_FL_OFFLINE	= (1 << 0),
396	ETH_TEST_FL_FAILED	= (1 << 1),
397};
398struct ethtool_test {
399	u32 cmd;
400	u32 flags;
401	u32 reserved;
402	u32 len;
403	u64 data[0];
404};
405#endif /* ETHTOOL_TEST */
406
407#ifndef ETHTOOL_GEEPROM
408#define ETHTOOL_GEEPROM 0xb
409#undef ETHTOOL_GREGS
410struct ethtool_eeprom {
411	u32 cmd;
412	u32 magic;
413	u32 offset;
414	u32 len;
415	u8 data[0];
416};
417
418struct ethtool_value {
419	u32 cmd;
420	u32 data;
421};
422#endif /* ETHTOOL_GEEPROM */
423
424#ifndef ETHTOOL_GLINK
425#define ETHTOOL_GLINK 0xa
426#endif /* ETHTOOL_GLINK */
427
428#ifndef ETHTOOL_GWOL
429#define ETHTOOL_GWOL 0x5
430#define ETHTOOL_SWOL 0x6
431#define SOPASS_MAX      6
432struct ethtool_wolinfo {
433	u32 cmd;
434	u32 supported;
435	u32 wolopts;
436	u8 sopass[SOPASS_MAX]; /* SecureOn(tm) password */
437};
438#endif /* ETHTOOL_GWOL */
439
440#ifndef ETHTOOL_GREGS
441#define ETHTOOL_GREGS		0x00000004 /* Get NIC registers */
442#define ethtool_regs _kc_ethtool_regs
443/* for passing big chunks of data */
444struct _kc_ethtool_regs {
445	u32 cmd;
446	u32 version; /* driver-specific, indicates different chips/revs */
447	u32 len; /* bytes */
448	u8 data[0];
449};
450#endif /* ETHTOOL_GREGS */
451
452#ifndef ETHTOOL_GMSGLVL
453#define ETHTOOL_GMSGLVL		0x00000007 /* Get driver message level */
454#endif
455#ifndef ETHTOOL_SMSGLVL
456#define ETHTOOL_SMSGLVL		0x00000008 /* Set driver msg level, priv. */
457#endif
458#ifndef ETHTOOL_NWAY_RST
459#define ETHTOOL_NWAY_RST	0x00000009 /* Restart autonegotiation, priv */
460#endif
461#ifndef ETHTOOL_GLINK
462#define ETHTOOL_GLINK		0x0000000a /* Get link status */
463#endif
464#ifndef ETHTOOL_GEEPROM
465#define ETHTOOL_GEEPROM		0x0000000b /* Get EEPROM data */
466#endif
467#ifndef ETHTOOL_SEEPROM
468#define ETHTOOL_SEEPROM		0x0000000c /* Set EEPROM data */
469#endif
470#ifndef ETHTOOL_GCOALESCE
471#define ETHTOOL_GCOALESCE	0x0000000e /* Get coalesce config */
472/* for configuring coalescing parameters of chip */
473#define ethtool_coalesce _kc_ethtool_coalesce
474struct _kc_ethtool_coalesce {
475	u32	cmd;	/* ETHTOOL_{G,S}COALESCE */
476
477	/* How many usecs to delay an RX interrupt after
478	 * a packet arrives.  If 0, only rx_max_coalesced_frames
479	 * is used.
480	 */
481	u32	rx_coalesce_usecs;
482
483	/* How many packets to delay an RX interrupt after
484	 * a packet arrives.  If 0, only rx_coalesce_usecs is
485	 * used.  It is illegal to set both usecs and max frames
486	 * to zero as this would cause RX interrupts to never be
487	 * generated.
488	 */
489	u32	rx_max_coalesced_frames;
490
491	/* Same as above two parameters, except that these values
492	 * apply while an IRQ is being serviced by the host.  Not
493	 * all cards support this feature and the values are ignored
494	 * in that case.
495	 */
496	u32	rx_coalesce_usecs_irq;
497	u32	rx_max_coalesced_frames_irq;
498
499	/* How many usecs to delay a TX interrupt after
500	 * a packet is sent.  If 0, only tx_max_coalesced_frames
501	 * is used.
502	 */
503	u32	tx_coalesce_usecs;
504
505	/* How many packets to delay a TX interrupt after
506	 * a packet is sent.  If 0, only tx_coalesce_usecs is
507	 * used.  It is illegal to set both usecs and max frames
508	 * to zero as this would cause TX interrupts to never be
509	 * generated.
510	 */
511	u32	tx_max_coalesced_frames;
512
513	/* Same as above two parameters, except that these values
514	 * apply while an IRQ is being serviced by the host.  Not
515	 * all cards support this feature and the values are ignored
516	 * in that case.
517	 */
518	u32	tx_coalesce_usecs_irq;
519	u32	tx_max_coalesced_frames_irq;
520
521	/* How many usecs to delay in-memory statistics
522	 * block updates.  Some drivers do not have an in-memory
523	 * statistic block, and in such cases this value is ignored.
524	 * This value must not be zero.
525	 */
526	u32	stats_block_coalesce_usecs;
527
528	/* Adaptive RX/TX coalescing is an algorithm implemented by
529	 * some drivers to improve latency under low packet rates and
530	 * improve throughput under high packet rates.  Some drivers
531	 * only implement one of RX or TX adaptive coalescing.  Anything
532	 * not implemented by the driver causes these values to be
533	 * silently ignored.
534	 */
535	u32	use_adaptive_rx_coalesce;
536	u32	use_adaptive_tx_coalesce;
537
538	/* When the packet rate (measured in packets per second)
539	 * is below pkt_rate_low, the {rx,tx}_*_low parameters are
540	 * used.
541	 */
542	u32	pkt_rate_low;
543	u32	rx_coalesce_usecs_low;
544	u32	rx_max_coalesced_frames_low;
545	u32	tx_coalesce_usecs_low;
546	u32	tx_max_coalesced_frames_low;
547
548	/* When the packet rate is below pkt_rate_high but above
549	 * pkt_rate_low (both measured in packets per second) the
550	 * normal {rx,tx}_* coalescing parameters are used.
551	 */
552
553	/* When the packet rate is (measured in packets per second)
554	 * is above pkt_rate_high, the {rx,tx}_*_high parameters are
555	 * used.
556	 */
557	u32	pkt_rate_high;
558	u32	rx_coalesce_usecs_high;
559	u32	rx_max_coalesced_frames_high;
560	u32	tx_coalesce_usecs_high;
561	u32	tx_max_coalesced_frames_high;
562
563	/* How often to do adaptive coalescing packet rate sampling,
564	 * measured in seconds.  Must not be zero.
565	 */
566	u32	rate_sample_interval;
567};
568#endif /* ETHTOOL_GCOALESCE */
569
570#ifndef ETHTOOL_SCOALESCE
571#define ETHTOOL_SCOALESCE	0x0000000f /* Set coalesce config. */
572#endif
573#ifndef ETHTOOL_GRINGPARAM
574#define ETHTOOL_GRINGPARAM	0x00000010 /* Get ring parameters */
575/* for configuring RX/TX ring parameters */
576#define ethtool_ringparam _kc_ethtool_ringparam
577struct _kc_ethtool_ringparam {
578	u32	cmd;	/* ETHTOOL_{G,S}RINGPARAM */
579
580	/* Read only attributes.  These indicate the maximum number
581	 * of pending RX/TX ring entries the driver will allow the
582	 * user to set.
583	 */
584	u32	rx_max_pending;
585	u32	rx_mini_max_pending;
586	u32	rx_jumbo_max_pending;
587	u32	tx_max_pending;
588
589	/* Values changeable by the user.  The valid values are
590	 * in the range 1 to the "*_max_pending" counterpart above.
591	 */
592	u32	rx_pending;
593	u32	rx_mini_pending;
594	u32	rx_jumbo_pending;
595	u32	tx_pending;
596};
597#endif /* ETHTOOL_GRINGPARAM */
598
599#ifndef ETHTOOL_SRINGPARAM
600#define ETHTOOL_SRINGPARAM	0x00000011 /* Set ring parameters, priv. */
601#endif
602#ifndef ETHTOOL_GPAUSEPARAM
603#define ETHTOOL_GPAUSEPARAM	0x00000012 /* Get pause parameters */
604/* for configuring link flow control parameters */
605#define ethtool_pauseparam _kc_ethtool_pauseparam
606struct _kc_ethtool_pauseparam {
607	u32	cmd;	/* ETHTOOL_{G,S}PAUSEPARAM */
608
609	/* If the link is being auto-negotiated (via ethtool_cmd.autoneg
610	 * being true) the user may set 'autoneg' here non-zero to have the
611	 * pause parameters be auto-negotiated too.  In such a case, the
612	 * {rx,tx}_pause values below determine what capabilities are
613	 * advertised.
614	 *
615	 * If 'autoneg' is zero or the link is not being auto-negotiated,
616	 * then {rx,tx}_pause force the driver to use/not-use pause
617	 * flow control.
618	 */
619	u32	autoneg;
620	u32	rx_pause;
621	u32	tx_pause;
622};
623#endif /* ETHTOOL_GPAUSEPARAM */
624
625#ifndef ETHTOOL_SPAUSEPARAM
626#define ETHTOOL_SPAUSEPARAM	0x00000013 /* Set pause parameters. */
627#endif
628#ifndef ETHTOOL_GRXCSUM
629#define ETHTOOL_GRXCSUM		0x00000014 /* Get RX hw csum enable (ethtool_value) */
630#endif
631#ifndef ETHTOOL_SRXCSUM
632#define ETHTOOL_SRXCSUM		0x00000015 /* Set RX hw csum enable (ethtool_value) */
633#endif
634#ifndef ETHTOOL_GTXCSUM
635#define ETHTOOL_GTXCSUM		0x00000016 /* Get TX hw csum enable (ethtool_value) */
636#endif
637#ifndef ETHTOOL_STXCSUM
638#define ETHTOOL_STXCSUM		0x00000017 /* Set TX hw csum enable (ethtool_value) */
639#endif
640#ifndef ETHTOOL_GSG
641#define ETHTOOL_GSG		0x00000018 /* Get scatter-gather enable
642					    * (ethtool_value) */
643#endif
644#ifndef ETHTOOL_SSG
645#define ETHTOOL_SSG		0x00000019 /* Set scatter-gather enable
646					    * (ethtool_value). */
647#endif
648#ifndef ETHTOOL_TEST
649#define ETHTOOL_TEST		0x0000001a /* execute NIC self-test, priv. */
650#endif
651#ifndef ETHTOOL_GSTRINGS
652#define ETHTOOL_GSTRINGS	0x0000001b /* get specified string set */
653#endif
654#ifndef ETHTOOL_PHYS_ID
655#define ETHTOOL_PHYS_ID		0x0000001c /* identify the NIC */
656#endif
657#ifndef ETHTOOL_GSTATS
658#define ETHTOOL_GSTATS		0x0000001d /* get NIC-specific statistics */
659#endif
660#ifndef ETHTOOL_GTSO
661#define ETHTOOL_GTSO		0x0000001e /* Get TSO enable (ethtool_value) */
662#endif
663#ifndef ETHTOOL_STSO
664#define ETHTOOL_STSO		0x0000001f /* Set TSO enable (ethtool_value) */
665#endif
666
667#ifndef ETHTOOL_BUSINFO_LEN
668#define ETHTOOL_BUSINFO_LEN	32
669#endif
670
671#ifndef RHEL_RELEASE_VERSION
672#define RHEL_RELEASE_VERSION(a,b) (((a) << 8) + (b))
673#endif
674#ifndef AX_RELEASE_VERSION
675#define AX_RELEASE_VERSION(a,b) (((a) << 8) + (b))
676#endif
677
678#ifndef AX_RELEASE_CODE
679#define AX_RELEASE_CODE 0
680#endif
681
682#if (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,0))
683#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,0)
684#elif (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,1))
685#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,1)
686#elif (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,2))
687#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,3)
688#endif
689
690#ifndef RHEL_RELEASE_CODE
691/* NOTE: RHEL_RELEASE_* introduced in RHEL4.5 */
692#define RHEL_RELEASE_CODE 0
693#endif
694
695/* SuSE version macro is the same as Linux kernel version */
696#ifndef SLE_VERSION
697#define SLE_VERSION(a,b,c) KERNEL_VERSION(a,b,c)
698#endif
699#ifdef CONFIG_SUSE_KERNEL
700#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 57))
701/* SLES12SP3 is at least 4.4.57+ based */
702#define SLE_VERSION_CODE SLE_VERSION(12, 3, 0)
703#elif ( LINUX_VERSION_CODE >= KERNEL_VERSION(3,12,28) )
704/* SLES12 is at least 3.12.28+ based */
705#define SLE_VERSION_CODE SLE_VERSION(12,0,0)
706#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,61)) && \
707       (LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0)))
708/* SLES11 SP3 is at least 3.0.61+ based */
709#define SLE_VERSION_CODE SLE_VERSION(11,3,0)
710#elif ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,32) )
711/* SLES11 SP1 is 2.6.32 based */
712#define SLE_VERSION_CODE SLE_VERSION(11,1,0)
713#elif ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,27) )
714/* SLES11 GA is 2.6.27 based */
715#define SLE_VERSION_CODE SLE_VERSION(11,0,0)
716#endif /* LINUX_VERSION_CODE == KERNEL_VERSION(x,y,z) */
717#endif /* CONFIG_SUSE_KERNEL */
718#ifndef SLE_VERSION_CODE
719#define SLE_VERSION_CODE 0
720#endif /* SLE_VERSION_CODE */
721
722/* Ubuntu release and kernel codes must be specified from Makefile */
723#ifndef UBUNTU_RELEASE_VERSION
724#define UBUNTU_RELEASE_VERSION(a,b) (((a) * 100) + (b))
725#endif
726#ifndef UBUNTU_KERNEL_VERSION
727#define UBUNTU_KERNEL_VERSION(a,b,c,abi,upload) (((a) << 40) + ((b) << 32) + ((c) << 24) + ((abi) << 8) + (upload))
728#endif
729#ifndef UBUNTU_RELEASE_CODE
730#define UBUNTU_RELEASE_CODE 0
731#endif
732#ifndef UBUNTU_KERNEL_CODE
733#define UBUNTU_KERNEL_CODE 0
734#endif
735
736#ifdef __KLOCWORK__
737#ifdef ARRAY_SIZE
738#undef ARRAY_SIZE
739#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
740#endif
741#endif /* __KLOCWORK__ */
742
743/*****************************************************************************/
744/* 2.4.3 => 2.4.0 */
745#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) )
746
747/**************************************/
748/* PCI DRIVER API */
749
750#ifndef pci_set_dma_mask
751#define pci_set_dma_mask _kc_pci_set_dma_mask
752extern int _kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask);
753#endif
754
755#ifndef pci_request_regions
756#define pci_request_regions _kc_pci_request_regions
757extern int _kc_pci_request_regions(struct pci_dev *pdev, char *res_name);
758#endif
759
760#ifndef pci_release_regions
761#define pci_release_regions _kc_pci_release_regions
762extern void _kc_pci_release_regions(struct pci_dev *pdev);
763#endif
764
765/**************************************/
766/* NETWORK DRIVER API */
767
768#ifndef alloc_etherdev
769#define alloc_etherdev _kc_alloc_etherdev
770extern struct net_device * _kc_alloc_etherdev(int sizeof_priv);
771#endif
772
773#ifndef is_valid_ether_addr
774#define is_valid_ether_addr _kc_is_valid_ether_addr
775extern int _kc_is_valid_ether_addr(u8 *addr);
776#endif
777
778/**************************************/
779/* MISCELLANEOUS */
780
781#ifndef INIT_TQUEUE
782#define INIT_TQUEUE(_tq, _routine, _data)		\
783	do {						\
784		INIT_LIST_HEAD(&(_tq)->list);		\
785		(_tq)->sync = 0;			\
786		(_tq)->routine = _routine;		\
787		(_tq)->data = _data;			\
788	} while (0)
789#endif
790
791#endif /* 2.4.3 => 2.4.0 */
792
793/*****************************************************************************/
794#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,5) )
795/* Generic MII registers. */
796#define MII_BMCR            0x00        /* Basic mode control register */
797#define MII_BMSR            0x01        /* Basic mode status register  */
798#define MII_PHYSID1         0x02        /* PHYS ID 1                   */
799#define MII_PHYSID2         0x03        /* PHYS ID 2                   */
800#define MII_ADVERTISE       0x04        /* Advertisement control reg   */
801#define MII_LPA             0x05        /* Link partner ability reg    */
802#define MII_EXPANSION       0x06        /* Expansion register          */
803/* Basic mode control register. */
804#define BMCR_FULLDPLX           0x0100  /* Full duplex                 */
805#define BMCR_ANENABLE           0x1000  /* Enable auto negotiation     */
806/* Basic mode status register. */
807#define BMSR_ERCAP              0x0001  /* Ext-reg capability          */
808#define BMSR_ANEGCAPABLE        0x0008  /* Able to do auto-negotiation */
809#define BMSR_10HALF             0x0800  /* Can do 10mbps, half-duplex  */
810#define BMSR_10FULL             0x1000  /* Can do 10mbps, full-duplex  */
811#define BMSR_100HALF            0x2000  /* Can do 100mbps, half-duplex */
812#define BMSR_100FULL            0x4000  /* Can do 100mbps, full-duplex */
813/* Advertisement control register. */
814#define ADVERTISE_CSMA          0x0001  /* Only selector supported     */
815#define ADVERTISE_10HALF        0x0020  /* Try for 10mbps half-duplex  */
816#define ADVERTISE_10FULL        0x0040  /* Try for 10mbps full-duplex  */
817#define ADVERTISE_100HALF       0x0080  /* Try for 100mbps half-duplex */
818#define ADVERTISE_100FULL       0x0100  /* Try for 100mbps full-duplex */
819#define ADVERTISE_ALL (ADVERTISE_10HALF | ADVERTISE_10FULL | \
820                       ADVERTISE_100HALF | ADVERTISE_100FULL)
821/* Expansion register for auto-negotiation. */
822#define EXPANSION_ENABLENPAGE   0x0004  /* This enables npage words    */
823#endif
824
825/*****************************************************************************/
826/* 2.4.6 => 2.4.3 */
827#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) )
828
829#ifndef pci_set_power_state
830#define pci_set_power_state _kc_pci_set_power_state
831extern int _kc_pci_set_power_state(struct pci_dev *dev, int state);
832#endif
833
834#ifndef pci_enable_wake
835#define pci_enable_wake _kc_pci_enable_wake
836extern int _kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable);
837#endif
838
839#ifndef pci_disable_device
840#define pci_disable_device _kc_pci_disable_device
841extern void _kc_pci_disable_device(struct pci_dev *pdev);
842#endif
843
844/* PCI PM entry point syntax changed, so don't support suspend/resume */
845#undef CONFIG_PM
846
847#endif /* 2.4.6 => 2.4.3 */
848
849#ifndef HAVE_PCI_SET_MWI
850#define pci_set_mwi(X) pci_write_config_word(X, \
851			       PCI_COMMAND, adapter->hw.bus.pci_cmd_word | \
852			       PCI_COMMAND_INVALIDATE);
853#define pci_clear_mwi(X) pci_write_config_word(X, \
854			       PCI_COMMAND, adapter->hw.bus.pci_cmd_word & \
855			       ~PCI_COMMAND_INVALIDATE);
856#endif
857
858/*****************************************************************************/
859/* 2.4.10 => 2.4.9 */
860#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,10) )
861
862/**************************************/
863/* MODULE API */
864
865#ifndef MODULE_LICENSE
866	#define MODULE_LICENSE(X)
867#endif
868
869/**************************************/
870/* OTHER */
871
872#undef min
873#define min(x,y) ({ \
874	const typeof(x) _x = (x);	\
875	const typeof(y) _y = (y);	\
876	(void) (&_x == &_y);		\
877	_x < _y ? _x : _y; })
878
879#undef max
880#define max(x,y) ({ \
881	const typeof(x) _x = (x);	\
882	const typeof(y) _y = (y);	\
883	(void) (&_x == &_y);		\
884	_x > _y ? _x : _y; })
885
886#define min_t(type,x,y) ({ \
887	type _x = (x); \
888	type _y = (y); \
889	_x < _y ? _x : _y; })
890
891#define max_t(type,x,y) ({ \
892	type _x = (x); \
893	type _y = (y); \
894	_x > _y ? _x : _y; })
895
896#ifndef list_for_each_safe
897#define list_for_each_safe(pos, n, head) \
898	for (pos = (head)->next, n = pos->next; pos != (head); \
899		pos = n, n = pos->next)
900#endif
901
902#ifndef ____cacheline_aligned_in_smp
903#ifdef CONFIG_SMP
904#define ____cacheline_aligned_in_smp ____cacheline_aligned
905#else
906#define ____cacheline_aligned_in_smp
907#endif /* CONFIG_SMP */
908#endif
909
910#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,8) )
911extern int _kc_snprintf(char * buf, size_t size, const char *fmt, ...);
912#define snprintf(buf, size, fmt, args...) _kc_snprintf(buf, size, fmt, ##args)
913extern int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args);
914#define vsnprintf(buf, size, fmt, args) _kc_vsnprintf(buf, size, fmt, args)
915#else /* 2.4.8 => 2.4.9 */
916extern int snprintf(char * buf, size_t size, const char *fmt, ...);
917extern int vsnprintf(char *buf, size_t size, const char *fmt, va_list args);
918#endif
919#endif /* 2.4.10 -> 2.4.6 */
920
921
922/*****************************************************************************/
923/* 2.4.12 => 2.4.10 */
924#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,12) )
925#ifndef HAVE_NETIF_MSG
926#define HAVE_NETIF_MSG 1
927enum {
928	NETIF_MSG_DRV		= 0x0001,
929	NETIF_MSG_PROBE		= 0x0002,
930	NETIF_MSG_LINK		= 0x0004,
931	NETIF_MSG_TIMER		= 0x0008,
932	NETIF_MSG_IFDOWN	= 0x0010,
933	NETIF_MSG_IFUP		= 0x0020,
934	NETIF_MSG_RX_ERR	= 0x0040,
935	NETIF_MSG_TX_ERR	= 0x0080,
936	NETIF_MSG_TX_QUEUED	= 0x0100,
937	NETIF_MSG_INTR		= 0x0200,
938	NETIF_MSG_TX_DONE	= 0x0400,
939	NETIF_MSG_RX_STATUS	= 0x0800,
940	NETIF_MSG_PKTDATA	= 0x1000,
941	NETIF_MSG_HW		= 0x2000,
942	NETIF_MSG_WOL		= 0x4000,
943};
944
945#define netif_msg_drv(p)	((p)->msg_enable & NETIF_MSG_DRV)
946#define netif_msg_probe(p)	((p)->msg_enable & NETIF_MSG_PROBE)
947#define netif_msg_link(p)	((p)->msg_enable & NETIF_MSG_LINK)
948#define netif_msg_timer(p)	((p)->msg_enable & NETIF_MSG_TIMER)
949#define netif_msg_ifdown(p)	((p)->msg_enable & NETIF_MSG_IFDOWN)
950#define netif_msg_ifup(p)	((p)->msg_enable & NETIF_MSG_IFUP)
951#define netif_msg_rx_err(p)	((p)->msg_enable & NETIF_MSG_RX_ERR)
952#define netif_msg_tx_err(p)	((p)->msg_enable & NETIF_MSG_TX_ERR)
953#define netif_msg_tx_queued(p)	((p)->msg_enable & NETIF_MSG_TX_QUEUED)
954#define netif_msg_intr(p)	((p)->msg_enable & NETIF_MSG_INTR)
955#define netif_msg_tx_done(p)	((p)->msg_enable & NETIF_MSG_TX_DONE)
956#define netif_msg_rx_status(p)	((p)->msg_enable & NETIF_MSG_RX_STATUS)
957#define netif_msg_pktdata(p)	((p)->msg_enable & NETIF_MSG_PKTDATA)
958#endif /* !HAVE_NETIF_MSG */
959#endif /* 2.4.12 => 2.4.10 */
960
961/*****************************************************************************/
962/* 2.4.13 => 2.4.12 */
963#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) )
964
965/**************************************/
966/* PCI DMA MAPPING */
967
968#ifndef virt_to_page
969	#define virt_to_page(v) (mem_map + (virt_to_phys(v) >> PAGE_SHIFT))
970#endif
971
972#ifndef pci_map_page
973#define pci_map_page _kc_pci_map_page
974extern u64 _kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, size_t size, int direction);
975#endif
976
977#ifndef pci_unmap_page
978#define pci_unmap_page _kc_pci_unmap_page
979extern void _kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size, int direction);
980#endif
981
982/* pci_set_dma_mask takes dma_addr_t, which is only 32-bits prior to 2.4.13 */
983
984#undef DMA_32BIT_MASK
985#define DMA_32BIT_MASK	0xffffffff
986#undef DMA_64BIT_MASK
987#define DMA_64BIT_MASK	0xffffffff
988
989/**************************************/
990/* OTHER */
991
992#ifndef cpu_relax
993#define cpu_relax()	rep_nop()
994#endif
995
996struct vlan_ethhdr {
997	unsigned char h_dest[ETH_ALEN];
998	unsigned char h_source[ETH_ALEN];
999	unsigned short h_vlan_proto;
1000	unsigned short h_vlan_TCI;
1001	unsigned short h_vlan_encapsulated_proto;
1002};
1003#endif /* 2.4.13 => 2.4.12 */
1004
1005/*****************************************************************************/
1006/* 2.4.17 => 2.4.12 */
1007#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,17) )
1008
1009#ifndef __devexit_p
1010	#define __devexit_p(x) &(x)
1011#endif
1012
1013#else
1014        /* For Kernel 3.8 these are not defined - so undefine all */
1015        #undef __devexit_p
1016        #undef __devexit
1017        #undef __devinit
1018        #undef __devinitdata
1019        #define __devexit_p(x) &(x)
1020        #define __devexit
1021        #define __devinit
1022        #define __devinitdata
1023
1024#endif /* 2.4.17 => 2.4.13 */
1025
1026/*****************************************************************************/
1027#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,18) )
1028#define NETIF_MSG_HW	0x2000
1029#define NETIF_MSG_WOL	0x4000
1030
1031#ifndef netif_msg_hw
1032#define netif_msg_hw(p)		((p)->msg_enable & NETIF_MSG_HW)
1033#endif
1034#ifndef netif_msg_wol
1035#define netif_msg_wol(p)	((p)->msg_enable & NETIF_MSG_WOL)
1036#endif
1037#endif /* 2.4.18 */
1038
1039/*****************************************************************************/
1040
1041/*****************************************************************************/
1042/* 2.4.20 => 2.4.19 */
1043#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,20) )
1044
1045/* we won't support NAPI on less than 2.4.20 */
1046#ifdef NAPI
1047#undef NAPI
1048#endif
1049
1050#endif /* 2.4.20 => 2.4.19 */
1051
1052/*****************************************************************************/
1053/* 2.4.22 => 2.4.17 */
1054#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) )
1055#define pci_name(x)	((x)->slot_name)
1056
1057#ifndef SUPPORTED_10000baseT_Full
1058#define SUPPORTED_10000baseT_Full	(1 << 12)
1059#endif
1060#ifndef ADVERTISED_10000baseT_Full
1061#define ADVERTISED_10000baseT_Full	(1 << 12)
1062#endif
1063#endif
1064
1065/*****************************************************************************/
1066/* 2.4.22 => 2.4.17 */
1067
1068#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) )
1069#ifndef IGB_NO_LRO
1070#define IGB_NO_LRO
1071#endif
1072#endif
1073
1074/*****************************************************************************/
1075/*****************************************************************************/
1076/* 2.4.23 => 2.4.22 */
1077#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,23) )
1078/*****************************************************************************/
1079#ifdef NAPI
1080#ifndef netif_poll_disable
1081#define netif_poll_disable(x) _kc_netif_poll_disable(x)
1082static inline void _kc_netif_poll_disable(struct net_device *netdev)
1083{
1084	while (test_and_set_bit(__LINK_STATE_RX_SCHED, &netdev->state)) {
1085		/* No hurry */
1086		current->state = TASK_INTERRUPTIBLE;
1087		schedule_timeout(1);
1088	}
1089}
1090#endif
1091#ifndef netif_poll_enable
1092#define netif_poll_enable(x) _kc_netif_poll_enable(x)
1093static inline void _kc_netif_poll_enable(struct net_device *netdev)
1094{
1095	clear_bit(__LINK_STATE_RX_SCHED, &netdev->state);
1096}
1097#endif
1098#endif /* NAPI */
1099#ifndef netif_tx_disable
1100#define netif_tx_disable(x) _kc_netif_tx_disable(x)
1101static inline void _kc_netif_tx_disable(struct net_device *dev)
1102{
1103	spin_lock_bh(&dev->xmit_lock);
1104	netif_stop_queue(dev);
1105	spin_unlock_bh(&dev->xmit_lock);
1106}
1107#endif
1108#else /* 2.4.23 => 2.4.22 */
1109#define HAVE_SCTP
1110#endif /* 2.4.23 => 2.4.22 */
1111
1112/*****************************************************************************/
1113/* 2.6.4 => 2.6.0 */
1114#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,25) || \
1115    ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \
1116      LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) )
1117#define ETHTOOL_OPS_COMPAT
1118#endif /* 2.6.4 => 2.6.0 */
1119
1120/*****************************************************************************/
1121#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) )
1122#define __user
1123#endif /* < 2.4.27 */
1124
1125/*****************************************************************************/
1126/* 2.5.71 => 2.4.x */
1127#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,71) )
1128#define sk_protocol protocol
1129#define pci_get_device pci_find_device
1130#endif /* 2.5.70 => 2.4.x */
1131
1132/*****************************************************************************/
1133/* < 2.4.27 or 2.6.0 <= 2.6.5 */
1134#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) || \
1135    ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \
1136      LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) ) )
1137
1138#ifndef netif_msg_init
1139#define netif_msg_init _kc_netif_msg_init
1140static inline u32 _kc_netif_msg_init(int debug_value, int default_msg_enable_bits)
1141{
1142	/* use default */
1143	if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
1144		return default_msg_enable_bits;
1145	if (debug_value == 0) /* no output */
1146		return 0;
1147	/* set low N bits */
1148	return (1 << debug_value) -1;
1149}
1150#endif
1151
1152#endif /* < 2.4.27 or 2.6.0 <= 2.6.5 */
1153/*****************************************************************************/
1154#if (( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) ) || \
1155     (( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) && \
1156      ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) )))
1157#define netdev_priv(x) x->priv
1158#endif
1159
1160/*****************************************************************************/
1161/* <= 2.5.0 */
1162#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) )
1163#include <linux/rtnetlink.h>
1164#undef pci_register_driver
1165#define pci_register_driver pci_module_init
1166
1167/*
1168 * Most of the dma compat code is copied/modifed from the 2.4.37
1169 * /include/linux/libata-compat.h header file
1170 */
1171/* These definitions mirror those in pci.h, so they can be used
1172 * interchangeably with their PCI_ counterparts */
1173enum dma_data_direction {
1174	DMA_BIDIRECTIONAL = 0,
1175	DMA_TO_DEVICE = 1,
1176	DMA_FROM_DEVICE = 2,
1177	DMA_NONE = 3,
1178};
1179
1180struct device {
1181	struct pci_dev pdev;
1182};
1183
1184static inline struct pci_dev *to_pci_dev (struct device *dev)
1185{
1186	return (struct pci_dev *) dev;
1187}
1188static inline struct device *pci_dev_to_dev(struct pci_dev *pdev)
1189{
1190	return (struct device *) pdev;
1191}
1192
1193#define pdev_printk(lvl, pdev, fmt, args...)	\
1194	printk("%s %s: " fmt, lvl, pci_name(pdev), ## args)
1195#define dev_err(dev, fmt, args...)            \
1196	pdev_printk(KERN_ERR, to_pci_dev(dev), fmt, ## args)
1197#define dev_info(dev, fmt, args...)            \
1198	pdev_printk(KERN_INFO, to_pci_dev(dev), fmt, ## args)
1199#define dev_warn(dev, fmt, args...)            \
1200	pdev_printk(KERN_WARNING, to_pci_dev(dev), fmt, ## args)
1201#define dev_notice(dev, fmt, args...)            \
1202	pdev_printk(KERN_NOTICE, to_pci_dev(dev), fmt, ## args)
1203#define dev_dbg(dev, fmt, args...) \
1204	pdev_printk(KERN_DEBUG, to_pci_dev(dev), fmt, ## args)
1205
1206/* NOTE: dangerous! we ignore the 'gfp' argument */
1207#define dma_alloc_coherent(dev,sz,dma,gfp) \
1208	pci_alloc_consistent(to_pci_dev(dev),(sz),(dma))
1209#define dma_free_coherent(dev,sz,addr,dma_addr) \
1210	pci_free_consistent(to_pci_dev(dev),(sz),(addr),(dma_addr))
1211
1212#define dma_map_page(dev,a,b,c,d) \
1213	pci_map_page(to_pci_dev(dev),(a),(b),(c),(d))
1214#define dma_unmap_page(dev,a,b,c) \
1215	pci_unmap_page(to_pci_dev(dev),(a),(b),(c))
1216
1217#define dma_map_single(dev,a,b,c) \
1218	pci_map_single(to_pci_dev(dev),(a),(b),(c))
1219#define dma_unmap_single(dev,a,b,c) \
1220	pci_unmap_single(to_pci_dev(dev),(a),(b),(c))
1221
1222#define dma_map_sg(dev, sg, nents, dir) \
1223	pci_map_sg(to_pci_dev(dev), (sg), (nents), (dir)
1224#define dma_unmap_sg(dev, sg, nents, dir) \
1225	pci_unmap_sg(to_pci_dev(dev), (sg), (nents), (dir)
1226
1227#define dma_sync_single(dev,a,b,c) \
1228	pci_dma_sync_single(to_pci_dev(dev),(a),(b),(c))
1229
1230/* for range just sync everything, that's all the pci API can do */
1231#define dma_sync_single_range(dev,addr,off,sz,dir) \
1232	pci_dma_sync_single(to_pci_dev(dev),(addr),(off)+(sz),(dir))
1233
1234#define dma_set_mask(dev,mask) \
1235	pci_set_dma_mask(to_pci_dev(dev),(mask))
1236
1237/* hlist_* code - double linked lists */
1238struct hlist_head {
1239	struct hlist_node *first;
1240};
1241
1242struct hlist_node {
1243	struct hlist_node *next, **pprev;
1244};
1245
1246static inline void __hlist_del(struct hlist_node *n)
1247{
1248	struct hlist_node *next = n->next;
1249	struct hlist_node **pprev = n->pprev;
1250	*pprev = next;
1251	if (next)
1252	next->pprev = pprev;
1253}
1254
1255static inline void hlist_del(struct hlist_node *n)
1256{
1257	__hlist_del(n);
1258	n->next = NULL;
1259	n->pprev = NULL;
1260}
1261
1262static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
1263{
1264	struct hlist_node *first = h->first;
1265	n->next = first;
1266	if (first)
1267		first->pprev = &n->next;
1268	h->first = n;
1269	n->pprev = &h->first;
1270}
1271
1272static inline int hlist_empty(const struct hlist_head *h)
1273{
1274	return !h->first;
1275}
1276#define HLIST_HEAD_INIT { .first = NULL }
1277#define HLIST_HEAD(name) struct hlist_head name = {  .first = NULL }
1278#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
1279static inline void INIT_HLIST_NODE(struct hlist_node *h)
1280{
1281	h->next = NULL;
1282	h->pprev = NULL;
1283}
1284
1285#ifndef might_sleep
1286#define might_sleep()
1287#endif
1288#else
1289static inline struct device *pci_dev_to_dev(struct pci_dev *pdev)
1290{
1291	return &pdev->dev;
1292}
1293#endif /* <= 2.5.0 */
1294
1295/*****************************************************************************/
1296/* 2.5.28 => 2.4.23 */
1297#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) )
1298
1299#include <linux/tqueue.h>
1300#define work_struct tq_struct
1301#undef INIT_WORK
1302#define INIT_WORK(a,b) INIT_TQUEUE(a,(void (*)(void *))b,a)
1303#undef container_of
1304#define container_of list_entry
1305#define schedule_work schedule_task
1306#define flush_scheduled_work flush_scheduled_tasks
1307#define cancel_work_sync(x) flush_scheduled_work()
1308
1309#endif /* 2.5.28 => 2.4.17 */
1310
1311/*****************************************************************************/
1312/* 2.6.0 => 2.5.28 */
1313#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
1314#ifndef read_barrier_depends
1315#define read_barrier_depends() rmb()
1316#endif
1317
1318#undef get_cpu
1319#define get_cpu() smp_processor_id()
1320#undef put_cpu
1321#define put_cpu() do { } while(0)
1322#define MODULE_INFO(version, _version)
1323#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
1324#define CONFIG_E1000_DISABLE_PACKET_SPLIT 1
1325#endif
1326#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT
1327#define CONFIG_IGB_DISABLE_PACKET_SPLIT 1
1328#endif
1329
1330#define dma_set_coherent_mask(dev,mask) 1
1331
1332#undef dev_put
1333#define dev_put(dev) __dev_put(dev)
1334
1335#ifndef skb_fill_page_desc
1336#define skb_fill_page_desc _kc_skb_fill_page_desc
1337extern void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, int off, int size);
1338#endif
1339
1340#undef ALIGN
1341#define ALIGN(x,a) (((x)+(a)-1)&~((a)-1))
1342
1343#ifndef page_count
1344#define page_count(p) atomic_read(&(p)->count)
1345#endif
1346
1347#ifdef MAX_NUMNODES
1348#undef MAX_NUMNODES
1349#endif
1350#define MAX_NUMNODES 1
1351
1352/* find_first_bit and find_next bit are not defined for most
1353 * 2.4 kernels (except for the redhat 2.4.21 kernels
1354 */
1355#include <linux/bitops.h>
1356#define BITOP_WORD(nr)          ((nr) / BITS_PER_LONG)
1357#undef find_next_bit
1358#define find_next_bit _kc_find_next_bit
1359extern unsigned long _kc_find_next_bit(const unsigned long *addr,
1360                                       unsigned long size,
1361                                       unsigned long offset);
1362#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
1363
1364
1365#ifndef netdev_name
1366static inline const char *_kc_netdev_name(const struct net_device *dev)
1367{
1368	if (strchr(dev->name, '%'))
1369		return "(unregistered net_device)";
1370	return dev->name;
1371}
1372#define netdev_name(netdev)	_kc_netdev_name(netdev)
1373#endif /* netdev_name */
1374
1375#ifndef strlcpy
1376#define strlcpy _kc_strlcpy
1377extern size_t _kc_strlcpy(char *dest, const char *src, size_t size);
1378#endif /* strlcpy */
1379
1380#ifndef do_div
1381#if BITS_PER_LONG == 64
1382# define do_div(n,base) ({					\
1383	uint32_t __base = (base);				\
1384	uint32_t __rem;						\
1385	__rem = ((uint64_t)(n)) % __base;			\
1386	(n) = ((uint64_t)(n)) / __base;				\
1387	__rem;							\
1388 })
1389#elif BITS_PER_LONG == 32
1390extern uint32_t _kc__div64_32(uint64_t *dividend, uint32_t divisor);
1391# define do_div(n,base) ({				\
1392	uint32_t __base = (base);			\
1393	uint32_t __rem;					\
1394	if (likely(((n) >> 32) == 0)) {			\
1395		__rem = (uint32_t)(n) % __base;		\
1396		(n) = (uint32_t)(n) / __base;		\
1397	} else 						\
1398		__rem = _kc__div64_32(&(n), __base);	\
1399	__rem;						\
1400 })
1401#else /* BITS_PER_LONG == ?? */
1402# error do_div() does not yet support the C64
1403#endif /* BITS_PER_LONG */
1404#endif /* do_div */
1405
1406#ifndef NSEC_PER_SEC
1407#define NSEC_PER_SEC	1000000000L
1408#endif
1409
1410#undef HAVE_I2C_SUPPORT
1411#else /* 2.6.0 */
1412#if IS_ENABLED(CONFIG_I2C_ALGOBIT) && \
1413	(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,9)))
1414#define HAVE_I2C_SUPPORT
1415#endif /* IS_ENABLED(CONFIG_I2C_ALGOBIT) */
1416
1417#endif /* 2.6.0 => 2.5.28 */
1418/*****************************************************************************/
1419#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) )
1420#define dma_pool pci_pool
1421#define dma_pool_destroy pci_pool_destroy
1422#define dma_pool_alloc pci_pool_alloc
1423#define dma_pool_free pci_pool_free
1424
1425#define dma_pool_create(name,dev,size,align,allocation) \
1426       pci_pool_create((name),to_pci_dev(dev),(size),(align),(allocation))
1427#endif /* < 2.6.3 */
1428
1429/*****************************************************************************/
1430/* 2.6.4 => 2.6.0 */
1431#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) )
1432#define MODULE_VERSION(_version) MODULE_INFO(version, _version)
1433#endif /* 2.6.4 => 2.6.0 */
1434
1435/*****************************************************************************/
1436/* 2.6.5 => 2.6.0 */
1437#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) )
1438#define dma_sync_single_for_cpu		dma_sync_single
1439#define dma_sync_single_for_device	dma_sync_single
1440#define dma_sync_single_range_for_cpu		dma_sync_single_range
1441#define dma_sync_single_range_for_device	dma_sync_single_range
1442#ifndef pci_dma_mapping_error
1443#define pci_dma_mapping_error _kc_pci_dma_mapping_error
1444static inline int _kc_pci_dma_mapping_error(dma_addr_t dma_addr)
1445{
1446	return dma_addr == 0;
1447}
1448#endif
1449#endif /* 2.6.5 => 2.6.0 */
1450
1451/*****************************************************************************/
1452#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) )
1453extern int _kc_scnprintf(char * buf, size_t size, const char *fmt, ...);
1454#define scnprintf(buf, size, fmt, args...) _kc_scnprintf(buf, size, fmt, ##args)
1455#endif /* < 2.6.4 */
1456
1457/*****************************************************************************/
1458#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,6) )
1459/* taken from 2.6 include/linux/bitmap.h */
1460#undef bitmap_zero
1461#define bitmap_zero _kc_bitmap_zero
1462static inline void _kc_bitmap_zero(unsigned long *dst, int nbits)
1463{
1464        if (nbits <= BITS_PER_LONG)
1465                *dst = 0UL;
1466        else {
1467                int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
1468                memset(dst, 0, len);
1469        }
1470}
1471#define random_ether_addr _kc_random_ether_addr
1472static inline void _kc_random_ether_addr(u8 *addr)
1473{
1474        get_random_bytes(addr, ETH_ALEN);
1475        addr[0] &= 0xfe; /* clear multicast */
1476        addr[0] |= 0x02; /* set local assignment */
1477}
1478#define page_to_nid(x) 0
1479
1480#endif /* < 2.6.6 */
1481
1482/*****************************************************************************/
1483#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7) )
1484#undef if_mii
1485#define if_mii _kc_if_mii
1486static inline struct mii_ioctl_data *_kc_if_mii(struct ifreq *rq)
1487{
1488	return (struct mii_ioctl_data *) &rq->ifr_ifru;
1489}
1490
1491#ifndef __force
1492#define __force
1493#endif
1494#endif /* < 2.6.7 */
1495
1496/*****************************************************************************/
1497#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) )
1498#ifndef PCI_EXP_DEVCTL
1499#define PCI_EXP_DEVCTL 8
1500#endif
1501#ifndef PCI_EXP_DEVCTL_CERE
1502#define PCI_EXP_DEVCTL_CERE 0x0001
1503#endif
1504#define PCI_EXP_FLAGS		2	/* Capabilities register */
1505#define PCI_EXP_FLAGS_VERS	0x000f	/* Capability version */
1506#define PCI_EXP_FLAGS_TYPE	0x00f0	/* Device/Port type */
1507#define  PCI_EXP_TYPE_ENDPOINT	0x0	/* Express Endpoint */
1508#define  PCI_EXP_TYPE_LEG_END	0x1	/* Legacy Endpoint */
1509#define  PCI_EXP_TYPE_ROOT_PORT 0x4	/* Root Port */
1510#define  PCI_EXP_TYPE_DOWNSTREAM 0x6	/* Downstream Port */
1511#define PCI_EXP_FLAGS_SLOT	0x0100	/* Slot implemented */
1512#define PCI_EXP_DEVCAP		4	/* Device capabilities */
1513#define PCI_EXP_DEVSTA		10	/* Device Status */
1514#define msleep(x)	do { set_current_state(TASK_UNINTERRUPTIBLE); \
1515				schedule_timeout((x * HZ)/1000 + 2); \
1516			} while (0)
1517
1518#endif /* < 2.6.8 */
1519
1520/*****************************************************************************/
1521#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9))
1522#include <net/dsfield.h>
1523#define __iomem
1524
1525#ifndef kcalloc
1526#define kcalloc(n, size, flags) _kc_kzalloc(((n) * (size)), flags)
1527extern void *_kc_kzalloc(size_t size, int flags);
1528#endif
1529#define MSEC_PER_SEC    1000L
1530static inline unsigned int _kc_jiffies_to_msecs(const unsigned long j)
1531{
1532#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
1533	return (MSEC_PER_SEC / HZ) * j;
1534#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
1535	return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC);
1536#else
1537	return (j * MSEC_PER_SEC) / HZ;
1538#endif
1539}
1540static inline unsigned long _kc_msecs_to_jiffies(const unsigned int m)
1541{
1542	if (m > _kc_jiffies_to_msecs(MAX_JIFFY_OFFSET))
1543		return MAX_JIFFY_OFFSET;
1544#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
1545	return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ);
1546#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
1547	return m * (HZ / MSEC_PER_SEC);
1548#else
1549	return (m * HZ + MSEC_PER_SEC - 1) / MSEC_PER_SEC;
1550#endif
1551}
1552
1553#define msleep_interruptible _kc_msleep_interruptible
1554static inline unsigned long _kc_msleep_interruptible(unsigned int msecs)
1555{
1556	unsigned long timeout = _kc_msecs_to_jiffies(msecs) + 1;
1557
1558	while (timeout && !signal_pending(current)) {
1559		__set_current_state(TASK_INTERRUPTIBLE);
1560		timeout = schedule_timeout(timeout);
1561	}
1562	return _kc_jiffies_to_msecs(timeout);
1563}
1564
1565/* Basic mode control register. */
1566#define BMCR_SPEED1000		0x0040  /* MSB of Speed (1000)         */
1567
1568#ifndef __le16
1569#define __le16 u16
1570#endif
1571#ifndef __le32
1572#define __le32 u32
1573#endif
1574#ifndef __le64
1575#define __le64 u64
1576#endif
1577#ifndef __be16
1578#define __be16 u16
1579#endif
1580#ifndef __be32
1581#define __be32 u32
1582#endif
1583#ifndef __be64
1584#define __be64 u64
1585#endif
1586
1587static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb)
1588{
1589	return (struct vlan_ethhdr *)skb->mac.raw;
1590}
1591
1592/* Wake-On-Lan options. */
1593#define WAKE_PHY		(1 << 0)
1594#define WAKE_UCAST		(1 << 1)
1595#define WAKE_MCAST		(1 << 2)
1596#define WAKE_BCAST		(1 << 3)
1597#define WAKE_ARP		(1 << 4)
1598#define WAKE_MAGIC		(1 << 5)
1599#define WAKE_MAGICSECURE	(1 << 6) /* only meaningful if WAKE_MAGIC */
1600
1601#define skb_header_pointer _kc_skb_header_pointer
1602static inline void *_kc_skb_header_pointer(const struct sk_buff *skb,
1603					    int offset, int len, void *buffer)
1604{
1605	int hlen = skb_headlen(skb);
1606
1607	if (hlen - offset >= len)
1608		return skb->data + offset;
1609
1610#ifdef MAX_SKB_FRAGS
1611	if (skb_copy_bits(skb, offset, buffer, len) < 0)
1612		return NULL;
1613
1614	return buffer;
1615#else
1616	return NULL;
1617#endif
1618
1619#ifndef NETDEV_TX_OK
1620#define NETDEV_TX_OK 0
1621#endif
1622#ifndef NETDEV_TX_BUSY
1623#define NETDEV_TX_BUSY 1
1624#endif
1625#ifndef NETDEV_TX_LOCKED
1626#define NETDEV_TX_LOCKED -1
1627#endif
1628}
1629
1630#ifndef __bitwise
1631#define __bitwise
1632#endif
1633#endif /* < 2.6.9 */
1634
1635/*****************************************************************************/
1636#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) )
1637#ifdef module_param_array_named
1638#undef module_param_array_named
1639#define module_param_array_named(name, array, type, nump, perm)          \
1640	static struct kparam_array __param_arr_##name                    \
1641	= { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type, \
1642	    sizeof(array[0]), array };                                   \
1643	module_param_call(name, param_array_set, param_array_get,        \
1644			  &__param_arr_##name, perm)
1645#endif /* module_param_array_named */
1646/*
1647 * num_online is broken for all < 2.6.10 kernels.  This is needed to support
1648 * Node module parameter of ixgbe.
1649 */
1650#undef num_online_nodes
1651#define num_online_nodes(n) 1
1652extern DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES);
1653#undef node_online_map
1654#define node_online_map _kcompat_node_online_map
1655#define pci_get_class pci_find_class
1656#endif /* < 2.6.10 */
1657
1658/*****************************************************************************/
1659#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) )
1660#define PCI_D0      0
1661#define PCI_D1      1
1662#define PCI_D2      2
1663#define PCI_D3hot   3
1664#define PCI_D3cold  4
1665typedef int pci_power_t;
1666#define pci_choose_state(pdev,state) state
1667#define PMSG_SUSPEND 3
1668#define PCI_EXP_LNKCTL	16
1669
1670#undef NETIF_F_LLTX
1671
1672#ifndef ARCH_HAS_PREFETCH
1673#define prefetch(X)
1674#endif
1675
1676#ifndef NET_IP_ALIGN
1677#define NET_IP_ALIGN 2
1678#endif
1679
1680#define KC_USEC_PER_SEC	1000000L
1681#define usecs_to_jiffies _kc_usecs_to_jiffies
1682static inline unsigned int _kc_jiffies_to_usecs(const unsigned long j)
1683{
1684#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ)
1685	return (KC_USEC_PER_SEC / HZ) * j;
1686#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC)
1687	return (j + (HZ / KC_USEC_PER_SEC) - 1)/(HZ / KC_USEC_PER_SEC);
1688#else
1689	return (j * KC_USEC_PER_SEC) / HZ;
1690#endif
1691}
1692static inline unsigned long _kc_usecs_to_jiffies(const unsigned int m)
1693{
1694	if (m > _kc_jiffies_to_usecs(MAX_JIFFY_OFFSET))
1695		return MAX_JIFFY_OFFSET;
1696#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ)
1697	return (m + (KC_USEC_PER_SEC / HZ) - 1) / (KC_USEC_PER_SEC / HZ);
1698#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC)
1699	return m * (HZ / KC_USEC_PER_SEC);
1700#else
1701	return (m * HZ + KC_USEC_PER_SEC - 1) / KC_USEC_PER_SEC;
1702#endif
1703}
1704
1705#define PCI_EXP_LNKCAP		12	/* Link Capabilities */
1706#define PCI_EXP_LNKSTA		18	/* Link Status */
1707#define PCI_EXP_SLTCAP		20	/* Slot Capabilities */
1708#define PCI_EXP_SLTCTL		24	/* Slot Control */
1709#define PCI_EXP_SLTSTA		26	/* Slot Status */
1710#define PCI_EXP_RTCTL		28	/* Root Control */
1711#define PCI_EXP_RTCAP		30	/* Root Capabilities */
1712#define PCI_EXP_RTSTA		32	/* Root Status */
1713#endif /* < 2.6.11 */
1714
1715/*****************************************************************************/
1716#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12) )
1717#include <linux/reboot.h>
1718#define USE_REBOOT_NOTIFIER
1719
1720/* Generic MII registers. */
1721#define MII_CTRL1000        0x09        /* 1000BASE-T control          */
1722#define MII_STAT1000        0x0a        /* 1000BASE-T status           */
1723/* Advertisement control register. */
1724#define ADVERTISE_PAUSE_CAP     0x0400  /* Try for pause               */
1725#define ADVERTISE_PAUSE_ASYM    0x0800  /* Try for asymmetric pause     */
1726/* Link partner ability register. */
1727#define LPA_PAUSE_CAP		0x0400	/* Can pause                   */
1728#define LPA_PAUSE_ASYM		0x0800	/* Can pause asymetrically     */
1729/* 1000BASE-T Control register */
1730#define ADVERTISE_1000FULL      0x0200  /* Advertise 1000BASE-T full duplex */
1731#define ADVERTISE_1000HALF	0x0100  /* Advertise 1000BASE-T half duplex */
1732/* 1000BASE-T Status register */
1733#define LPA_1000LOCALRXOK	0x2000	/* Link partner local receiver status */
1734#define LPA_1000REMRXOK		0x1000	/* Link partner remote receiver status */
1735
1736#ifndef is_zero_ether_addr
1737#define is_zero_ether_addr _kc_is_zero_ether_addr
1738static inline int _kc_is_zero_ether_addr(const u8 *addr)
1739{
1740	return !(addr[0] | addr[1] | addr[2] | addr[3] | addr[4] | addr[5]);
1741}
1742#endif /* is_zero_ether_addr */
1743#ifndef is_multicast_ether_addr
1744#define is_multicast_ether_addr _kc_is_multicast_ether_addr
1745static inline int _kc_is_multicast_ether_addr(const u8 *addr)
1746{
1747	return addr[0] & 0x01;
1748}
1749#endif /* is_multicast_ether_addr */
1750#endif /* < 2.6.12 */
1751
1752/*****************************************************************************/
1753#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) )
1754#ifndef kstrdup
1755#define kstrdup _kc_kstrdup
1756extern char *_kc_kstrdup(const char *s, unsigned int gfp);
1757#endif
1758#endif /* < 2.6.13 */
1759
1760/*****************************************************************************/
1761#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) )
1762#define pm_message_t u32
1763#ifndef kzalloc
1764#define kzalloc _kc_kzalloc
1765extern void *_kc_kzalloc(size_t size, int flags);
1766#endif
1767
1768/* Generic MII registers. */
1769#define MII_ESTATUS	    0x0f	/* Extended Status */
1770/* Basic mode status register. */
1771#define BMSR_ESTATEN		0x0100	/* Extended Status in R15 */
1772/* Extended status register. */
1773#define ESTATUS_1000_TFULL	0x2000	/* Can do 1000BT Full */
1774#define ESTATUS_1000_THALF	0x1000	/* Can do 1000BT Half */
1775
1776#define SUPPORTED_Pause	        (1 << 13)
1777#define SUPPORTED_Asym_Pause	(1 << 14)
1778#define ADVERTISED_Pause	(1 << 13)
1779#define ADVERTISED_Asym_Pause	(1 << 14)
1780
1781#if (!(RHEL_RELEASE_CODE && \
1782       (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,3)) && \
1783       (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))))
1784#if ((LINUX_VERSION_CODE == KERNEL_VERSION(2,6,9)) && !defined(gfp_t))
1785#define gfp_t unsigned
1786#else
1787typedef unsigned gfp_t;
1788#endif
1789#endif /* !RHEL4.3->RHEL5.0 */
1790
1791#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9) )
1792#ifdef CONFIG_X86_64
1793#define dma_sync_single_range_for_cpu(dev, addr, off, sz, dir)       \
1794	dma_sync_single_for_cpu((dev), (addr), (off) + (sz), (dir))
1795#define dma_sync_single_range_for_device(dev, addr, off, sz, dir)    \
1796	dma_sync_single_for_device((dev), (addr), (off) + (sz), (dir))
1797#endif
1798#endif
1799#endif /* < 2.6.14 */
1800
1801/*****************************************************************************/
1802#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15) )
1803#ifndef vmalloc_node
1804#define vmalloc_node(a,b) vmalloc(a)
1805#endif /* vmalloc_node*/
1806
1807#define setup_timer(_timer, _function, _data) \
1808do { \
1809	(_timer)->function = _function; \
1810	(_timer)->data = _data; \
1811	init_timer(_timer); \
1812} while (0)
1813#ifndef device_can_wakeup
1814#define device_can_wakeup(dev)	(1)
1815#endif
1816#ifndef device_set_wakeup_enable
1817#define device_set_wakeup_enable(dev, val)	do{}while(0)
1818#endif
1819#ifndef device_init_wakeup
1820#define device_init_wakeup(dev,val) do {} while (0)
1821#endif
1822static inline unsigned _kc_compare_ether_addr(const u8 *addr1, const u8 *addr2)
1823{
1824	const u16 *a = (const u16 *) addr1;
1825	const u16 *b = (const u16 *) addr2;
1826
1827	return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) != 0;
1828}
1829#undef compare_ether_addr
1830#define compare_ether_addr(addr1, addr2) _kc_compare_ether_addr(addr1, addr2)
1831#endif /* < 2.6.15 */
1832
1833/*****************************************************************************/
1834#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) )
1835#undef DEFINE_MUTEX
1836#define DEFINE_MUTEX(x)	DECLARE_MUTEX(x)
1837#define mutex_lock(x)	down_interruptible(x)
1838#define mutex_unlock(x)	up(x)
1839
1840#ifndef ____cacheline_internodealigned_in_smp
1841#ifdef CONFIG_SMP
1842#define ____cacheline_internodealigned_in_smp ____cacheline_aligned_in_smp
1843#else
1844#define ____cacheline_internodealigned_in_smp
1845#endif /* CONFIG_SMP */
1846#endif /* ____cacheline_internodealigned_in_smp */
1847#undef HAVE_PCI_ERS
1848#else /* 2.6.16 and above */
1849#undef HAVE_PCI_ERS
1850#define HAVE_PCI_ERS
1851#if ( SLE_VERSION_CODE && SLE_VERSION_CODE == SLE_VERSION(10,4,0) )
1852#ifdef device_can_wakeup
1853#undef device_can_wakeup
1854#endif /* device_can_wakeup */
1855#define device_can_wakeup(dev) 1
1856#endif /* SLE_VERSION(10,4,0) */
1857#endif /* < 2.6.16 */
1858
1859/*****************************************************************************/
1860#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) )
1861#ifndef dev_notice
1862#define dev_notice(dev, fmt, args...)            \
1863	dev_printk(KERN_NOTICE, dev, fmt, ## args)
1864#endif
1865
1866#ifndef first_online_node
1867#define first_online_node 0
1868#endif
1869#ifndef NET_SKB_PAD
1870#define NET_SKB_PAD 16
1871#endif
1872#endif /* < 2.6.17 */
1873
1874/*****************************************************************************/
1875#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) )
1876
1877#ifndef IRQ_HANDLED
1878#define irqreturn_t void
1879#define IRQ_HANDLED
1880#define IRQ_NONE
1881#endif
1882
1883#ifndef IRQF_PROBE_SHARED
1884#ifdef SA_PROBEIRQ
1885#define IRQF_PROBE_SHARED SA_PROBEIRQ
1886#else
1887#define IRQF_PROBE_SHARED 0
1888#endif
1889#endif
1890
1891#ifndef IRQF_SHARED
1892#define IRQF_SHARED SA_SHIRQ
1893#endif
1894
1895#ifndef ARRAY_SIZE
1896#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
1897#endif
1898
1899#ifndef FIELD_SIZEOF
1900#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
1901#endif
1902
1903#ifndef skb_is_gso
1904#ifdef NETIF_F_TSO
1905#define skb_is_gso _kc_skb_is_gso
1906static inline int _kc_skb_is_gso(const struct sk_buff *skb)
1907{
1908	return skb_shinfo(skb)->gso_size;
1909}
1910#else
1911#define skb_is_gso(a) 0
1912#endif
1913#endif
1914
1915#ifndef resource_size_t
1916#define resource_size_t unsigned long
1917#endif
1918
1919#ifdef skb_pad
1920#undef skb_pad
1921#endif
1922#define skb_pad(x,y) _kc_skb_pad(x, y)
1923int _kc_skb_pad(struct sk_buff *skb, int pad);
1924#ifdef skb_padto
1925#undef skb_padto
1926#endif
1927#define skb_padto(x,y) _kc_skb_padto(x, y)
1928static inline int _kc_skb_padto(struct sk_buff *skb, unsigned int len)
1929{
1930	unsigned int size = skb->len;
1931	if(likely(size >= len))
1932		return 0;
1933	return _kc_skb_pad(skb, len - size);
1934}
1935
1936#ifndef DECLARE_PCI_UNMAP_ADDR
1937#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
1938	dma_addr_t ADDR_NAME
1939#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
1940	u32 LEN_NAME
1941#define pci_unmap_addr(PTR, ADDR_NAME) \
1942	((PTR)->ADDR_NAME)
1943#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
1944	(((PTR)->ADDR_NAME) = (VAL))
1945#define pci_unmap_len(PTR, LEN_NAME) \
1946	((PTR)->LEN_NAME)
1947#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
1948	(((PTR)->LEN_NAME) = (VAL))
1949#endif /* DECLARE_PCI_UNMAP_ADDR */
1950#endif /* < 2.6.18 */
1951
1952/*****************************************************************************/
1953#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) )
1954
1955#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,0)))
1956#define i_private u.generic_ip
1957#endif /* >= RHEL 5.0 */
1958
1959#ifndef DIV_ROUND_UP
1960#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
1961#endif
1962#ifndef __ALIGN_MASK
1963#define __ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask))
1964#endif
1965#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) )
1966#if (!((RHEL_RELEASE_CODE && \
1967        ((RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,4) && \
1968          RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0)) || \
1969         (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,0))))))
1970typedef irqreturn_t (*irq_handler_t)(int, void*, struct pt_regs *);
1971#endif
1972#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0))
1973#undef CONFIG_INET_LRO
1974#undef CONFIG_INET_LRO_MODULE
1975#ifdef IXGBE_FCOE
1976#undef CONFIG_FCOE
1977#undef CONFIG_FCOE_MODULE
1978#endif /* IXGBE_FCOE */
1979#endif
1980typedef irqreturn_t (*new_handler_t)(int, void*);
1981static inline irqreturn_t _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id)
1982#else /* 2.4.x */
1983typedef void (*irq_handler_t)(int, void*, struct pt_regs *);
1984typedef void (*new_handler_t)(int, void*);
1985static inline int _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id)
1986#endif /* >= 2.5.x */
1987{
1988	irq_handler_t new_handler = (irq_handler_t) handler;
1989	return request_irq(irq, new_handler, flags, devname, dev_id);
1990}
1991
1992#undef request_irq
1993#define request_irq(irq, handler, flags, devname, dev_id) _kc_request_irq((irq), (handler), (flags), (devname), (dev_id))
1994
1995#define irq_handler_t new_handler_t
1996/* pci_restore_state and pci_save_state handles MSI/PCIE from 2.6.19 */
1997#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4)))
1998#define PCIE_CONFIG_SPACE_LEN 256
1999#define PCI_CONFIG_SPACE_LEN 64
2000#define PCIE_LINK_STATUS 0x12
2001#define pci_config_space_ich8lan() do {} while(0)
2002#undef pci_save_state
2003extern int _kc_pci_save_state(struct pci_dev *);
2004#define pci_save_state(pdev) _kc_pci_save_state(pdev)
2005#undef pci_restore_state
2006extern void _kc_pci_restore_state(struct pci_dev *);
2007#define pci_restore_state(pdev) _kc_pci_restore_state(pdev)
2008#endif /* !(RHEL_RELEASE_CODE >= RHEL 5.4) */
2009
2010#ifdef HAVE_PCI_ERS
2011#undef free_netdev
2012extern void _kc_free_netdev(struct net_device *);
2013#define free_netdev(netdev) _kc_free_netdev(netdev)
2014#endif
2015static inline int pci_enable_pcie_error_reporting(struct pci_dev *dev)
2016{
2017	return 0;
2018}
2019#define pci_disable_pcie_error_reporting(dev) do {} while (0)
2020#define pci_cleanup_aer_uncorrect_error_status(dev) do {} while (0)
2021
2022extern void *_kc_kmemdup(const void *src, size_t len, unsigned gfp);
2023#define kmemdup(src, len, gfp) _kc_kmemdup(src, len, gfp)
2024#ifndef bool
2025#define bool _Bool
2026#define true 1
2027#define false 0
2028#endif
2029#else /* 2.6.19 */
2030#include <linux/aer.h>
2031#include <linux/string.h>
2032#endif /* < 2.6.19 */
2033
2034/*****************************************************************************/
2035#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) )
2036#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,28) )
2037#undef INIT_WORK
2038#define INIT_WORK(_work, _func) \
2039do { \
2040	INIT_LIST_HEAD(&(_work)->entry); \
2041	(_work)->pending = 0; \
2042	(_work)->func = (void (*)(void *))_func; \
2043	(_work)->data = _work; \
2044	init_timer(&(_work)->timer); \
2045} while (0)
2046#endif
2047
2048#ifndef PCI_VDEVICE
2049#define PCI_VDEVICE(ven, dev)        \
2050	PCI_VENDOR_ID_##ven, (dev),  \
2051	PCI_ANY_ID, PCI_ANY_ID, 0, 0
2052#endif
2053
2054#ifndef PCI_VENDOR_ID_INTEL
2055#define PCI_VENDOR_ID_INTEL 0x8086
2056#endif
2057
2058#ifndef round_jiffies
2059#define round_jiffies(x) x
2060#endif
2061
2062#define csum_offset csum
2063
2064#define HAVE_EARLY_VMALLOC_NODE
2065#define dev_to_node(dev) -1
2066#undef set_dev_node
2067/* remove compiler warning with b=b, for unused variable */
2068#define set_dev_node(a, b) do { (b) = (b); } while(0)
2069
2070#if (!(RHEL_RELEASE_CODE && \
2071       (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(4,7)) && \
2072         (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))) || \
2073        (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,6)))) && \
2074     !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,2,0)))
2075typedef __u16 __bitwise __sum16;
2076typedef __u32 __bitwise __wsum;
2077#endif
2078
2079#if (!(RHEL_RELEASE_CODE && \
2080       (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(4,7)) && \
2081         (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))) || \
2082        (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4)))) && \
2083     !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,2,0)))
2084static inline __wsum csum_unfold(__sum16 n)
2085{
2086	return (__force __wsum)n;
2087}
2088#endif
2089
2090#else /* < 2.6.20 */
2091#define HAVE_DEVICE_NUMA_NODE
2092#endif /* < 2.6.20 */
2093
2094/*****************************************************************************/
2095#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) )
2096#define to_net_dev(class) container_of(class, struct net_device, class_dev)
2097#define NETDEV_CLASS_DEV
2098#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5)))
2099#define vlan_group_get_device(vg, id) (vg->vlan_devices[id])
2100#define vlan_group_set_device(vg, id, dev)		\
2101	do {						\
2102		if (vg) vg->vlan_devices[id] = dev;	\
2103	} while (0)
2104#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5)) */
2105#define pci_channel_offline(pdev) (pdev->error_state && \
2106	pdev->error_state != pci_channel_io_normal)
2107#define pci_request_selected_regions(pdev, bars, name) \
2108        pci_request_regions(pdev, name)
2109#define pci_release_selected_regions(pdev, bars) pci_release_regions(pdev);
2110
2111#ifndef __aligned
2112#define __aligned(x)			__attribute__((aligned(x)))
2113#endif
2114
2115extern struct pci_dev *_kc_netdev_to_pdev(struct net_device *netdev);
2116#define netdev_to_dev(netdev)	\
2117	pci_dev_to_dev(_kc_netdev_to_pdev(netdev))
2118#else
2119static inline struct device *netdev_to_dev(struct net_device *netdev)
2120{
2121	return &netdev->dev;
2122}
2123
2124#endif /* < 2.6.21 */
2125
2126/*****************************************************************************/
2127#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) )
2128#define tcp_hdr(skb) (skb->h.th)
2129#define tcp_hdrlen(skb) (skb->h.th->doff << 2)
2130#define skb_transport_offset(skb) (skb->h.raw - skb->data)
2131#define skb_transport_header(skb) (skb->h.raw)
2132#define ipv6_hdr(skb) (skb->nh.ipv6h)
2133#define ip_hdr(skb) (skb->nh.iph)
2134#define skb_network_offset(skb) (skb->nh.raw - skb->data)
2135#define skb_network_header(skb) (skb->nh.raw)
2136#define skb_tail_pointer(skb) skb->tail
2137#define skb_reset_tail_pointer(skb) \
2138	do { \
2139		skb->tail = skb->data; \
2140	} while (0)
2141#define skb_set_tail_pointer(skb, offset) \
2142	do { \
2143		skb->tail = skb->data + offset; \
2144	} while (0)
2145#define skb_copy_to_linear_data(skb, from, len) \
2146				memcpy(skb->data, from, len)
2147#define skb_copy_to_linear_data_offset(skb, offset, from, len) \
2148				memcpy(skb->data + offset, from, len)
2149#define skb_network_header_len(skb) (skb->h.raw - skb->nh.raw)
2150#define pci_register_driver pci_module_init
2151#define skb_mac_header(skb) skb->mac.raw
2152
2153#ifdef NETIF_F_MULTI_QUEUE
2154#ifndef alloc_etherdev_mq
2155#define alloc_etherdev_mq(_a, _b) alloc_etherdev(_a)
2156#endif
2157#endif /* NETIF_F_MULTI_QUEUE */
2158
2159#ifndef ETH_FCS_LEN
2160#define ETH_FCS_LEN 4
2161#endif
2162#define cancel_work_sync(x) flush_scheduled_work()
2163#ifndef udp_hdr
2164#define udp_hdr _udp_hdr
2165static inline struct udphdr *_udp_hdr(const struct sk_buff *skb)
2166{
2167	return (struct udphdr *)skb_transport_header(skb);
2168}
2169#endif
2170
2171#ifdef cpu_to_be16
2172#undef cpu_to_be16
2173#endif
2174#define cpu_to_be16(x) __constant_htons(x)
2175
2176#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,1)))
2177enum {
2178	DUMP_PREFIX_NONE,
2179	DUMP_PREFIX_ADDRESS,
2180	DUMP_PREFIX_OFFSET
2181};
2182#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,1)) */
2183#ifndef hex_asc
2184#define hex_asc(x)	"0123456789abcdef"[x]
2185#endif
2186#include <linux/ctype.h>
2187extern void _kc_print_hex_dump(const char *level, const char *prefix_str,
2188			       int prefix_type, int rowsize, int groupsize,
2189			       const void *buf, size_t len, bool ascii);
2190#define print_hex_dump(lvl, s, t, r, g, b, l, a) \
2191		_kc_print_hex_dump(lvl, s, t, r, g, b, l, a)
2192#ifndef ADVERTISED_2500baseX_Full
2193#define ADVERTISED_2500baseX_Full (1 << 15)
2194#endif
2195#ifndef SUPPORTED_2500baseX_Full
2196#define SUPPORTED_2500baseX_Full (1 << 15)
2197#endif
2198
2199#ifdef HAVE_I2C_SUPPORT
2200#include <linux/i2c.h>
2201#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5)))
2202struct i2c_board_info {
2203	char	driver_name[KOBJ_NAME_LEN];
2204	char	type[I2C_NAME_SIZE];
2205	unsigned short	flags;
2206	unsigned short	addr;
2207	void		*platform_data;
2208};
2209#define I2C_BOARD_INFO(driver, dev_addr) .driver_name = (driver),\
2210			.addr = (dev_addr)
2211#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5)) */
2212#define i2c_new_device(adap, info) _kc_i2c_new_device(adap, info)
2213extern struct i2c_client *
2214_kc_i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info);
2215#endif /* HAVE_I2C_SUPPORT */
2216
2217#else /* 2.6.22 */
2218#define ETH_TYPE_TRANS_SETS_DEV
2219#define HAVE_NETDEV_STATS_IN_NETDEV
2220#endif /* < 2.6.22 */
2221
2222/*****************************************************************************/
2223#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22) )
2224#undef SET_MODULE_OWNER
2225#define SET_MODULE_OWNER(dev) do { } while (0)
2226#endif /* > 2.6.22 */
2227
2228/*****************************************************************************/
2229#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) )
2230#define netif_subqueue_stopped(_a, _b) 0
2231#ifndef PTR_ALIGN
2232#define PTR_ALIGN(p, a)         ((typeof(p))ALIGN((unsigned long)(p), (a)))
2233#endif
2234
2235#ifndef CONFIG_PM_SLEEP
2236#define CONFIG_PM_SLEEP	CONFIG_PM
2237#endif
2238
2239#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13) )
2240#define HAVE_ETHTOOL_GET_PERM_ADDR
2241#endif /* 2.6.14 through 2.6.22 */
2242#endif /* < 2.6.23 */
2243
2244/*****************************************************************************/
2245#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) )
2246#ifndef ETH_FLAG_LRO
2247#define ETH_FLAG_LRO NETIF_F_LRO
2248#endif
2249
2250/* if GRO is supported then the napi struct must already exist */
2251#ifndef NETIF_F_GRO
2252/* NAPI API changes in 2.6.24 break everything */
2253struct napi_struct {
2254	/* used to look up the real NAPI polling routine */
2255	int (*poll)(struct napi_struct *, int);
2256	struct net_device *dev;
2257	int weight;
2258};
2259#endif
2260
2261#ifdef NAPI
2262extern int __kc_adapter_clean(struct net_device *, int *);
2263extern struct net_device *napi_to_poll_dev(const struct napi_struct *napi);
2264#define netif_napi_add(_netdev, _napi, _poll, _weight) \
2265	do { \
2266		struct napi_struct *__napi = (_napi); \
2267		struct net_device *poll_dev = napi_to_poll_dev(__napi); \
2268		poll_dev->poll = &(__kc_adapter_clean); \
2269		poll_dev->priv = (_napi); \
2270		poll_dev->weight = (_weight); \
2271		set_bit(__LINK_STATE_RX_SCHED, &poll_dev->state); \
2272		set_bit(__LINK_STATE_START, &poll_dev->state);\
2273		dev_hold(poll_dev); \
2274		__napi->poll = &(_poll); \
2275		__napi->weight = (_weight); \
2276		__napi->dev = (_netdev); \
2277	} while (0)
2278#define netif_napi_del(_napi) \
2279	do { \
2280		struct net_device *poll_dev = napi_to_poll_dev(_napi); \
2281		WARN_ON(!test_bit(__LINK_STATE_RX_SCHED, &poll_dev->state)); \
2282		dev_put(poll_dev); \
2283		memset(poll_dev, 0, sizeof(struct net_device));\
2284	} while (0)
2285#define napi_schedule_prep(_napi) \
2286	(netif_running((_napi)->dev) && netif_rx_schedule_prep(napi_to_poll_dev(_napi)))
2287#define napi_schedule(_napi) \
2288	do { \
2289		if (napi_schedule_prep(_napi)) \
2290			__netif_rx_schedule(napi_to_poll_dev(_napi)); \
2291	} while (0)
2292#define napi_enable(_napi) netif_poll_enable(napi_to_poll_dev(_napi))
2293#define napi_disable(_napi) netif_poll_disable(napi_to_poll_dev(_napi))
2294#ifdef CONFIG_SMP
2295static inline void napi_synchronize(const struct napi_struct *n)
2296{
2297	struct net_device *dev = napi_to_poll_dev(n);
2298
2299	while (test_bit(__LINK_STATE_RX_SCHED, &dev->state)) {
2300		/* No hurry. */
2301		msleep(1);
2302	}
2303}
2304#else
2305#define napi_synchronize(n)	barrier()
2306#endif /* CONFIG_SMP */
2307#define __napi_schedule(_napi) __netif_rx_schedule(napi_to_poll_dev(_napi))
2308#ifndef NETIF_F_GRO
2309#define napi_complete(_napi) netif_rx_complete(napi_to_poll_dev(_napi))
2310#else
2311#define napi_complete(_napi) \
2312	do { \
2313		napi_gro_flush(_napi); \
2314		netif_rx_complete(napi_to_poll_dev(_napi)); \
2315	} while (0)
2316#endif /* NETIF_F_GRO */
2317#else /* NAPI */
2318#define netif_napi_add(_netdev, _napi, _poll, _weight) \
2319	do { \
2320		struct napi_struct *__napi = _napi; \
2321		_netdev->poll = &(_poll); \
2322		_netdev->weight = (_weight); \
2323		__napi->poll = &(_poll); \
2324		__napi->weight = (_weight); \
2325		__napi->dev = (_netdev); \
2326	} while (0)
2327#define netif_napi_del(_a) do {} while (0)
2328#endif /* NAPI */
2329
2330#undef dev_get_by_name
2331#define dev_get_by_name(_a, _b) dev_get_by_name(_b)
2332#define __netif_subqueue_stopped(_a, _b) netif_subqueue_stopped(_a, _b)
2333#ifndef DMA_BIT_MASK
2334#define DMA_BIT_MASK(n)	(((n) == 64) ? DMA_64BIT_MASK : ((1ULL<<(n))-1))
2335#endif
2336
2337#ifdef NETIF_F_TSO6
2338#define skb_is_gso_v6 _kc_skb_is_gso_v6
2339static inline int _kc_skb_is_gso_v6(const struct sk_buff *skb)
2340{
2341	return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
2342}
2343#endif /* NETIF_F_TSO6 */
2344
2345#ifndef KERN_CONT
2346#define KERN_CONT	""
2347#endif
2348#ifndef pr_err
2349#define pr_err(fmt, arg...) \
2350	printk(KERN_ERR fmt, ##arg)
2351#endif
2352#else /* < 2.6.24 */
2353#define HAVE_ETHTOOL_GET_SSET_COUNT
2354#define HAVE_NETDEV_NAPI_LIST
2355#endif /* < 2.6.24 */
2356
2357/*****************************************************************************/
2358#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,24) )
2359#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0) )
2360#include <linux/pm_qos_params.h>
2361#else /* >= 3.2.0 */
2362#include <linux/pm_qos.h>
2363#endif /* else >= 3.2.0 */
2364#endif /* > 2.6.24 */
2365
2366/*****************************************************************************/
2367#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) )
2368#define PM_QOS_CPU_DMA_LATENCY	1
2369
2370#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) )
2371#include <linux/latency.h>
2372#define PM_QOS_DEFAULT_VALUE	INFINITE_LATENCY
2373#define pm_qos_add_requirement(pm_qos_class, name, value) \
2374		set_acceptable_latency(name, value)
2375#define pm_qos_remove_requirement(pm_qos_class, name) \
2376		remove_acceptable_latency(name)
2377#define pm_qos_update_requirement(pm_qos_class, name, value) \
2378		modify_acceptable_latency(name, value)
2379#else
2380#define PM_QOS_DEFAULT_VALUE	-1
2381#define pm_qos_add_requirement(pm_qos_class, name, value)
2382#define pm_qos_remove_requirement(pm_qos_class, name)
2383#define pm_qos_update_requirement(pm_qos_class, name, value) { \
2384	if (value != PM_QOS_DEFAULT_VALUE) { \
2385		printk(KERN_WARNING "%s: unable to set PM QoS requirement\n", \
2386			pci_name(adapter->pdev)); \
2387	} \
2388}
2389
2390#endif /* > 2.6.18 */
2391
2392#define pci_enable_device_mem(pdev) pci_enable_device(pdev)
2393
2394#ifndef DEFINE_PCI_DEVICE_TABLE
2395#define DEFINE_PCI_DEVICE_TABLE(_table) struct pci_device_id _table[]
2396#endif /* DEFINE_PCI_DEVICE_TABLE */
2397
2398
2399#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) )
2400#ifndef IGB_PROCFS
2401#define IGB_PROCFS
2402#endif /* IGB_PROCFS */
2403#endif /* >= 2.6.0 */
2404
2405#else /* < 2.6.25 */
2406
2407
2408#if IS_ENABLED(CONFIG_HWMON)
2409#ifndef IGB_HWMON
2410#define IGB_HWMON
2411#endif /* IGB_HWMON */
2412#endif /* CONFIG_HWMON */
2413
2414#endif /* < 2.6.25 */
2415
2416/*****************************************************************************/
2417#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) )
2418#ifndef clamp_t
2419#define clamp_t(type, val, min, max) ({		\
2420	type __val = (val);			\
2421	type __min = (min);			\
2422	type __max = (max);			\
2423	__val = __val < __min ? __min : __val;	\
2424	__val > __max ? __max : __val; })
2425#endif /* clamp_t */
2426#undef kzalloc_node
2427#define kzalloc_node(_size, _flags, _node) kzalloc(_size, _flags)
2428
2429extern void _kc_pci_disable_link_state(struct pci_dev *dev, int state);
2430#define pci_disable_link_state(p, s) _kc_pci_disable_link_state(p, s)
2431#else /* < 2.6.26 */
2432#include <linux/pci-aspm.h>
2433#define HAVE_NETDEV_VLAN_FEATURES
2434#ifndef PCI_EXP_LNKCAP_ASPMS
2435#define PCI_EXP_LNKCAP_ASPMS 0x00000c00 /* ASPM Support */
2436#endif /* PCI_EXP_LNKCAP_ASPMS */
2437#endif /* < 2.6.26 */
2438/*****************************************************************************/
2439#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) )
2440static inline void _kc_ethtool_cmd_speed_set(struct ethtool_cmd *ep,
2441					     __u32 speed)
2442{
2443	ep->speed = (__u16)speed;
2444	/* ep->speed_hi = (__u16)(speed >> 16); */
2445}
2446#define ethtool_cmd_speed_set _kc_ethtool_cmd_speed_set
2447
2448static inline __u32 _kc_ethtool_cmd_speed(struct ethtool_cmd *ep)
2449{
2450	/* no speed_hi before 2.6.27, and probably no need for it yet */
2451	return (__u32)ep->speed;
2452}
2453#define ethtool_cmd_speed _kc_ethtool_cmd_speed
2454
2455#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15) )
2456#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) && defined(CONFIG_PM))
2457#define ANCIENT_PM 1
2458#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)) && \
2459       (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)) && \
2460       defined(CONFIG_PM_SLEEP))
2461#define NEWER_PM 1
2462#endif
2463#if defined(ANCIENT_PM) || defined(NEWER_PM)
2464#undef device_set_wakeup_enable
2465#define device_set_wakeup_enable(dev, val) \
2466	do { \
2467		u16 pmc = 0; \
2468		int pm = pci_find_capability(adapter->pdev, PCI_CAP_ID_PM); \
2469		if (pm) { \
2470			pci_read_config_word(adapter->pdev, pm + PCI_PM_PMC, \
2471				&pmc); \
2472		} \
2473		(dev)->power.can_wakeup = !!(pmc >> 11); \
2474		(dev)->power.should_wakeup = (val && (pmc >> 11)); \
2475	} while (0)
2476#endif /* 2.6.15-2.6.22 and CONFIG_PM or 2.6.23-2.6.25 and CONFIG_PM_SLEEP */
2477#endif /* 2.6.15 through 2.6.27 */
2478#ifndef netif_napi_del
2479#define netif_napi_del(_a) do {} while (0)
2480#ifdef NAPI
2481#ifdef CONFIG_NETPOLL
2482#undef netif_napi_del
2483#define netif_napi_del(_a) list_del(&(_a)->dev_list);
2484#endif
2485#endif
2486#endif /* netif_napi_del */
2487#ifdef dma_mapping_error
2488#undef dma_mapping_error
2489#endif
2490#define dma_mapping_error(dev, dma_addr) pci_dma_mapping_error(dma_addr)
2491
2492#ifdef CONFIG_NETDEVICES_MULTIQUEUE
2493#define HAVE_TX_MQ
2494#endif
2495
2496#ifdef HAVE_TX_MQ
2497extern void _kc_netif_tx_stop_all_queues(struct net_device *);
2498extern void _kc_netif_tx_wake_all_queues(struct net_device *);
2499extern void _kc_netif_tx_start_all_queues(struct net_device *);
2500#define netif_tx_stop_all_queues(a) _kc_netif_tx_stop_all_queues(a)
2501#define netif_tx_wake_all_queues(a) _kc_netif_tx_wake_all_queues(a)
2502#define netif_tx_start_all_queues(a) _kc_netif_tx_start_all_queues(a)
2503#undef netif_stop_subqueue
2504#define netif_stop_subqueue(_ndev,_qi) do { \
2505	if (netif_is_multiqueue((_ndev))) \
2506		netif_stop_subqueue((_ndev), (_qi)); \
2507	else \
2508		netif_stop_queue((_ndev)); \
2509	} while (0)
2510#undef netif_start_subqueue
2511#define netif_start_subqueue(_ndev,_qi) do { \
2512	if (netif_is_multiqueue((_ndev))) \
2513		netif_start_subqueue((_ndev), (_qi)); \
2514	else \
2515		netif_start_queue((_ndev)); \
2516	} while (0)
2517#else /* HAVE_TX_MQ */
2518#define netif_tx_stop_all_queues(a) netif_stop_queue(a)
2519#define netif_tx_wake_all_queues(a) netif_wake_queue(a)
2520#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12) )
2521#define netif_tx_start_all_queues(a) netif_start_queue(a)
2522#else
2523#define netif_tx_start_all_queues(a) do {} while (0)
2524#endif
2525#define netif_stop_subqueue(_ndev,_qi) netif_stop_queue((_ndev))
2526#define netif_start_subqueue(_ndev,_qi) netif_start_queue((_ndev))
2527#endif /* HAVE_TX_MQ */
2528#ifndef NETIF_F_MULTI_QUEUE
2529#define NETIF_F_MULTI_QUEUE 0
2530#define netif_is_multiqueue(a) 0
2531#define netif_wake_subqueue(a, b)
2532#endif /* NETIF_F_MULTI_QUEUE */
2533
2534#ifndef __WARN_printf
2535extern void __kc_warn_slowpath(const char *file, const int line,
2536		const char *fmt, ...) __attribute__((format(printf, 3, 4)));
2537#define __WARN_printf(arg...) __kc_warn_slowpath(__FILE__, __LINE__, arg)
2538#endif /* __WARN_printf */
2539
2540#ifndef WARN
2541#define WARN(condition, format...) ({						\
2542	int __ret_warn_on = !!(condition);				\
2543	if (unlikely(__ret_warn_on))					\
2544		__WARN_printf(format);					\
2545	unlikely(__ret_warn_on);					\
2546})
2547#endif /* WARN */
2548#undef HAVE_IXGBE_DEBUG_FS
2549#undef HAVE_IGB_DEBUG_FS
2550#else /* < 2.6.27 */
2551#define HAVE_TX_MQ
2552#define HAVE_NETDEV_SELECT_QUEUE
2553#ifdef CONFIG_DEBUG_FS
2554#define HAVE_IXGBE_DEBUG_FS
2555#define HAVE_IGB_DEBUG_FS
2556#endif /* CONFIG_DEBUG_FS */
2557#endif /* < 2.6.27 */
2558
2559/*****************************************************************************/
2560#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) )
2561#define pci_ioremap_bar(pdev, bar)	ioremap(pci_resource_start(pdev, bar), \
2562					        pci_resource_len(pdev, bar))
2563#define pci_wake_from_d3 _kc_pci_wake_from_d3
2564#define pci_prepare_to_sleep _kc_pci_prepare_to_sleep
2565extern int _kc_pci_wake_from_d3(struct pci_dev *dev, bool enable);
2566extern int _kc_pci_prepare_to_sleep(struct pci_dev *dev);
2567#define netdev_alloc_page(a) alloc_page(GFP_ATOMIC)
2568#ifndef __skb_queue_head_init
2569static inline void __kc_skb_queue_head_init(struct sk_buff_head *list)
2570{
2571	list->prev = list->next = (struct sk_buff *)list;
2572	list->qlen = 0;
2573}
2574#define __skb_queue_head_init(_q) __kc_skb_queue_head_init(_q)
2575#endif
2576
2577#define PCI_EXP_DEVCAP2		36	/* Device Capabilities 2 */
2578#define PCI_EXP_DEVCTL2		40	/* Device Control 2 */
2579
2580#endif /* < 2.6.28 */
2581
2582/*****************************************************************************/
2583#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) )
2584#ifndef swap
2585#define swap(a, b) \
2586	do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
2587#endif
2588#define pci_request_selected_regions_exclusive(pdev, bars, name) \
2589		pci_request_selected_regions(pdev, bars, name)
2590#ifndef CONFIG_NR_CPUS
2591#define CONFIG_NR_CPUS 1
2592#endif /* CONFIG_NR_CPUS */
2593#ifndef pcie_aspm_enabled
2594#define pcie_aspm_enabled()   (1)
2595#endif /* pcie_aspm_enabled */
2596
2597#define  PCI_EXP_SLTSTA_PDS	0x0040	/* Presence Detect State */
2598
2599#ifndef pci_clear_master
2600extern void _kc_pci_clear_master(struct pci_dev *dev);
2601#define pci_clear_master(dev)	_kc_pci_clear_master(dev)
2602#endif
2603
2604#ifndef PCI_EXP_LNKCTL_ASPMC
2605#define  PCI_EXP_LNKCTL_ASPMC	0x0003	/* ASPM Control */
2606#endif
2607#else /* < 2.6.29 */
2608#ifndef HAVE_NET_DEVICE_OPS
2609#define HAVE_NET_DEVICE_OPS
2610#endif
2611#ifdef CONFIG_DCB
2612#define HAVE_PFC_MODE_ENABLE
2613#endif /* CONFIG_DCB */
2614#endif /* < 2.6.29 */
2615
2616/*****************************************************************************/
2617#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30) )
2618#define skb_rx_queue_recorded(a) false
2619#define skb_get_rx_queue(a) 0
2620#define skb_record_rx_queue(a, b) do {} while (0)
2621#define skb_tx_hash(n, s) ___kc_skb_tx_hash((n), (s), (n)->real_num_tx_queues)
2622#ifndef CONFIG_PCI_IOV
2623#undef pci_enable_sriov
2624#define pci_enable_sriov(a, b) -ENOTSUPP
2625#undef pci_disable_sriov
2626#define pci_disable_sriov(a) do {} while (0)
2627#endif /* CONFIG_PCI_IOV */
2628#ifndef pr_cont
2629#define pr_cont(fmt, ...) \
2630	printk(KERN_CONT fmt, ##__VA_ARGS__)
2631#endif /* pr_cont */
2632static inline void _kc_synchronize_irq(unsigned int a)
2633{
2634#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) )
2635	synchronize_irq();
2636#else /* < 2.5.28 */
2637	synchronize_irq(a);
2638#endif /* < 2.5.28 */
2639}
2640#undef synchronize_irq
2641#define synchronize_irq(a) _kc_synchronize_irq(a)
2642
2643#define PCI_EXP_LNKCTL2		48	/* Link Control 2 */
2644
2645#else /* < 2.6.30 */
2646#define HAVE_ASPM_QUIRKS
2647#endif /* < 2.6.30 */
2648
2649/*****************************************************************************/
2650#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31) )
2651#define ETH_P_1588 0x88F7
2652#define ETH_P_FIP  0x8914
2653#ifndef netdev_uc_count
2654#define netdev_uc_count(dev) ((dev)->uc_count)
2655#endif
2656#ifndef netdev_for_each_uc_addr
2657#define netdev_for_each_uc_addr(uclist, dev) \
2658	for (uclist = dev->uc_list; uclist; uclist = uclist->next)
2659#endif
2660#ifndef PORT_OTHER
2661#define PORT_OTHER 0xff
2662#endif
2663#ifndef MDIO_PHY_ID_PRTAD
2664#define MDIO_PHY_ID_PRTAD 0x03e0
2665#endif
2666#ifndef MDIO_PHY_ID_DEVAD
2667#define MDIO_PHY_ID_DEVAD 0x001f
2668#endif
2669#ifndef skb_dst
2670#define skb_dst(s) ((s)->dst)
2671#endif
2672
2673#ifndef SUPPORTED_1000baseKX_Full
2674#define SUPPORTED_1000baseKX_Full	(1 << 17)
2675#endif
2676#ifndef SUPPORTED_10000baseKX4_Full
2677#define SUPPORTED_10000baseKX4_Full	(1 << 18)
2678#endif
2679#ifndef SUPPORTED_10000baseKR_Full
2680#define SUPPORTED_10000baseKR_Full	(1 << 19)
2681#endif
2682
2683#ifndef ADVERTISED_1000baseKX_Full
2684#define ADVERTISED_1000baseKX_Full	(1 << 17)
2685#endif
2686#ifndef ADVERTISED_10000baseKX4_Full
2687#define ADVERTISED_10000baseKX4_Full	(1 << 18)
2688#endif
2689#ifndef ADVERTISED_10000baseKR_Full
2690#define ADVERTISED_10000baseKR_Full	(1 << 19)
2691#endif
2692
2693#else /* < 2.6.31 */
2694#ifndef HAVE_NETDEV_STORAGE_ADDRESS
2695#define HAVE_NETDEV_STORAGE_ADDRESS
2696#endif
2697#ifndef HAVE_NETDEV_HW_ADDR
2698#define HAVE_NETDEV_HW_ADDR
2699#endif
2700#ifndef HAVE_TRANS_START_IN_QUEUE
2701#define HAVE_TRANS_START_IN_QUEUE
2702#endif
2703#ifndef HAVE_INCLUDE_LINUX_MDIO_H
2704#define HAVE_INCLUDE_LINUX_MDIO_H
2705#endif
2706#endif /* < 2.6.31 */
2707
2708/*****************************************************************************/
2709#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32) )
2710#undef netdev_tx_t
2711#define netdev_tx_t int
2712#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
2713#ifndef NETIF_F_FCOE_MTU
2714#define NETIF_F_FCOE_MTU       (1 << 26)
2715#endif
2716#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */
2717
2718#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
2719static inline int _kc_pm_runtime_get_sync()
2720{
2721	return 1;
2722}
2723#define pm_runtime_get_sync(dev)	_kc_pm_runtime_get_sync()
2724#else /* 2.6.0 => 2.6.32 */
2725static inline int _kc_pm_runtime_get_sync(struct device *dev)
2726{
2727	return 1;
2728}
2729#ifndef pm_runtime_get_sync
2730#define pm_runtime_get_sync(dev)	_kc_pm_runtime_get_sync(dev)
2731#endif
2732#endif /* 2.6.0 => 2.6.32 */
2733#ifndef pm_runtime_put
2734#define pm_runtime_put(dev)		do {} while (0)
2735#endif
2736#ifndef pm_runtime_put_sync
2737#define pm_runtime_put_sync(dev)	do {} while (0)
2738#endif
2739#ifndef pm_runtime_resume
2740#define pm_runtime_resume(dev)		do {} while (0)
2741#endif
2742#ifndef pm_schedule_suspend
2743#define pm_schedule_suspend(dev, t)	do {} while (0)
2744#endif
2745#ifndef pm_runtime_set_suspended
2746#define pm_runtime_set_suspended(dev)	do {} while (0)
2747#endif
2748#ifndef pm_runtime_disable
2749#define pm_runtime_disable(dev)		do {} while (0)
2750#endif
2751#ifndef pm_runtime_put_noidle
2752#define pm_runtime_put_noidle(dev)	do {} while (0)
2753#endif
2754#ifndef pm_runtime_set_active
2755#define pm_runtime_set_active(dev)	do {} while (0)
2756#endif
2757#ifndef pm_runtime_enable
2758#define pm_runtime_enable(dev)	do {} while (0)
2759#endif
2760#ifndef pm_runtime_get_noresume
2761#define pm_runtime_get_noresume(dev)	do {} while (0)
2762#endif
2763#else /* < 2.6.32 */
2764#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
2765#ifndef HAVE_NETDEV_OPS_FCOE_ENABLE
2766#define HAVE_NETDEV_OPS_FCOE_ENABLE
2767#endif
2768#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */
2769#ifdef CONFIG_DCB
2770#ifndef HAVE_DCBNL_OPS_GETAPP
2771#define HAVE_DCBNL_OPS_GETAPP
2772#endif
2773#endif /* CONFIG_DCB */
2774#include <linux/pm_runtime.h>
2775/* IOV bad DMA target work arounds require at least this kernel rev support */
2776#define HAVE_PCIE_TYPE
2777#endif /* < 2.6.32 */
2778
2779/*****************************************************************************/
2780#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) )
2781#ifndef pci_pcie_cap
2782#define pci_pcie_cap(pdev) pci_find_capability(pdev, PCI_CAP_ID_EXP)
2783#endif
2784#ifndef IPV4_FLOW
2785#define IPV4_FLOW 0x10
2786#endif /* IPV4_FLOW */
2787#ifndef IPV6_FLOW
2788#define IPV6_FLOW 0x11
2789#endif /* IPV6_FLOW */
2790/* Features back-ported to RHEL6 or SLES11 SP1 after 2.6.32 */
2791#if ( (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) || \
2792      (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,1,0)) )
2793#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
2794#ifndef HAVE_NETDEV_OPS_FCOE_GETWWN
2795#define HAVE_NETDEV_OPS_FCOE_GETWWN
2796#endif
2797#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */
2798#endif /* RHEL6 or SLES11 SP1 */
2799#ifndef __percpu
2800#define __percpu
2801#endif /* __percpu */
2802#ifndef PORT_DA
2803#define PORT_DA PORT_OTHER
2804#endif
2805#ifndef PORT_NONE
2806#define PORT_NONE PORT_OTHER
2807#endif
2808
2809#if ((RHEL_RELEASE_CODE && \
2810     (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,3)) && \
2811     (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))))
2812#if !defined(CONFIG_X86_32) && !defined(CONFIG_NEED_DMA_MAP_STATE)
2813#undef DEFINE_DMA_UNMAP_ADDR
2814#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)	dma_addr_t ADDR_NAME
2815#undef DEFINE_DMA_UNMAP_LEN
2816#define DEFINE_DMA_UNMAP_LEN(LEN_NAME)		__u32 LEN_NAME
2817#undef dma_unmap_addr
2818#define dma_unmap_addr(PTR, ADDR_NAME)		((PTR)->ADDR_NAME)
2819#undef dma_unmap_addr_set
2820#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL)	(((PTR)->ADDR_NAME) = (VAL))
2821#undef dma_unmap_len
2822#define dma_unmap_len(PTR, LEN_NAME)		((PTR)->LEN_NAME)
2823#undef dma_unmap_len_set
2824#define dma_unmap_len_set(PTR, LEN_NAME, VAL)	(((PTR)->LEN_NAME) = (VAL))
2825#endif /* CONFIG_X86_64 && !CONFIG_NEED_DMA_MAP_STATE */
2826#endif /* RHEL_RELEASE_CODE */
2827
2828#if (!(RHEL_RELEASE_CODE && \
2829       (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,8)) && \
2830         (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0))) || \
2831        ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,1)) && \
2832         (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))))))
2833static inline bool pci_is_pcie(struct pci_dev *dev)
2834{
2835	return !!pci_pcie_cap(dev);
2836}
2837#endif /* RHEL_RELEASE_CODE */
2838
2839#ifndef __always_unused
2840#define __always_unused __attribute__((__unused__))
2841#endif
2842#ifndef __maybe_unused
2843#define __maybe_unused __attribute__((__unused__))
2844#endif
2845
2846#if (!(RHEL_RELEASE_CODE && \
2847      (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2))))
2848#define sk_tx_queue_get(_sk) (-1)
2849#define sk_tx_queue_set(_sk, _tx_queue) do {} while(0)
2850#endif /* !(RHEL >= 6.2) */
2851
2852#if (RHEL_RELEASE_CODE && \
2853     (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) && \
2854     (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))
2855#define HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT
2856#define HAVE_ETHTOOL_SET_PHYS_ID
2857#define HAVE_ETHTOOL_GET_TS_INFO
2858#endif /* RHEL >= 6.4 && RHEL < 7.0 */
2859
2860#if (RHEL_RELEASE_CODE && \
2861     (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,5)) && \
2862     (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))
2863#define HAVE_RHEL6_NETDEV_OPS_EXT_FDB
2864#endif /* RHEL >= 6.5 && RHEL < 7.0 */
2865
2866#else /* < 2.6.33 */
2867#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
2868#ifndef HAVE_NETDEV_OPS_FCOE_GETWWN
2869#define HAVE_NETDEV_OPS_FCOE_GETWWN
2870#endif
2871#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */
2872#endif /* < 2.6.33 */
2873
2874/*****************************************************************************/
2875#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) )
2876#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0))
2877#ifndef pci_num_vf
2878#define pci_num_vf(pdev) _kc_pci_num_vf(pdev)
2879extern int _kc_pci_num_vf(struct pci_dev *dev);
2880#endif
2881#endif /* RHEL_RELEASE_CODE */
2882
2883#ifndef ETH_FLAG_NTUPLE
2884#define ETH_FLAG_NTUPLE NETIF_F_NTUPLE
2885#endif
2886
2887#ifndef netdev_mc_count
2888#define netdev_mc_count(dev) ((dev)->mc_count)
2889#endif
2890#ifndef netdev_mc_empty
2891#define netdev_mc_empty(dev) (netdev_mc_count(dev) == 0)
2892#endif
2893#ifndef netdev_for_each_mc_addr
2894#define netdev_for_each_mc_addr(mclist, dev) \
2895	for (mclist = dev->mc_list; mclist; mclist = mclist->next)
2896#endif
2897#ifndef netdev_uc_count
2898#define netdev_uc_count(dev) ((dev)->uc.count)
2899#endif
2900#ifndef netdev_uc_empty
2901#define netdev_uc_empty(dev) (netdev_uc_count(dev) == 0)
2902#endif
2903#ifndef netdev_for_each_uc_addr
2904#define netdev_for_each_uc_addr(ha, dev) \
2905	list_for_each_entry(ha, &dev->uc.list, list)
2906#endif
2907#ifndef dma_set_coherent_mask
2908#define dma_set_coherent_mask(dev,mask) \
2909	pci_set_consistent_dma_mask(to_pci_dev(dev),(mask))
2910#endif
2911#ifndef pci_dev_run_wake
2912#define pci_dev_run_wake(pdev)	(0)
2913#endif
2914
2915/* netdev logging taken from include/linux/netdevice.h */
2916#ifndef netdev_name
2917static inline const char *_kc_netdev_name(const struct net_device *dev)
2918{
2919	if (dev->reg_state != NETREG_REGISTERED)
2920		return "(unregistered net_device)";
2921	return dev->name;
2922}
2923#define netdev_name(netdev)	_kc_netdev_name(netdev)
2924#endif /* netdev_name */
2925
2926#undef netdev_printk
2927#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
2928#define netdev_printk(level, netdev, format, args...)		\
2929do {								\
2930	struct pci_dev *pdev = _kc_netdev_to_pdev(netdev);	\
2931	printk(level "%s: " format, pci_name(pdev), ##args);	\
2932} while(0)
2933#elif ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) )
2934#define netdev_printk(level, netdev, format, args...)		\
2935do {								\
2936	struct pci_dev *pdev = _kc_netdev_to_pdev(netdev);	\
2937	struct device *dev = pci_dev_to_dev(pdev);		\
2938	dev_printk(level, dev, "%s: " format,			\
2939		   netdev_name(netdev), ##args);		\
2940} while(0)
2941#else /* 2.6.21 => 2.6.34 */
2942#define netdev_printk(level, netdev, format, args...)		\
2943	dev_printk(level, (netdev)->dev.parent,			\
2944		   "%s: " format,				\
2945		   netdev_name(netdev), ##args)
2946#endif /* <2.6.0 <2.6.21 <2.6.34 */
2947#undef netdev_emerg
2948#define netdev_emerg(dev, format, args...)			\
2949	netdev_printk(KERN_EMERG, dev, format, ##args)
2950#undef netdev_alert
2951#define netdev_alert(dev, format, args...)			\
2952	netdev_printk(KERN_ALERT, dev, format, ##args)
2953#undef netdev_crit
2954#define netdev_crit(dev, format, args...)			\
2955	netdev_printk(KERN_CRIT, dev, format, ##args)
2956#undef netdev_err
2957#define netdev_err(dev, format, args...)			\
2958	netdev_printk(KERN_ERR, dev, format, ##args)
2959#undef netdev_warn
2960#define netdev_warn(dev, format, args...)			\
2961	netdev_printk(KERN_WARNING, dev, format, ##args)
2962#undef netdev_notice
2963#define netdev_notice(dev, format, args...)			\
2964	netdev_printk(KERN_NOTICE, dev, format, ##args)
2965#undef netdev_info
2966#define netdev_info(dev, format, args...)			\
2967	netdev_printk(KERN_INFO, dev, format, ##args)
2968#undef netdev_dbg
2969#if defined(DEBUG)
2970#define netdev_dbg(__dev, format, args...)			\
2971	netdev_printk(KERN_DEBUG, __dev, format, ##args)
2972#elif defined(CONFIG_DYNAMIC_DEBUG)
2973#define netdev_dbg(__dev, format, args...)			\
2974do {								\
2975	dynamic_dev_dbg((__dev)->dev.parent, "%s: " format,	\
2976			netdev_name(__dev), ##args);		\
2977} while (0)
2978#else /* DEBUG */
2979#define netdev_dbg(__dev, format, args...)			\
2980({								\
2981	if (0)							\
2982		netdev_printk(KERN_DEBUG, __dev, format, ##args); \
2983	0;							\
2984})
2985#endif /* DEBUG */
2986
2987#undef netif_printk
2988#define netif_printk(priv, type, level, dev, fmt, args...)	\
2989do {								\
2990	if (netif_msg_##type(priv))				\
2991		netdev_printk(level, (dev), fmt, ##args);	\
2992} while (0)
2993
2994#undef netif_emerg
2995#define netif_emerg(priv, type, dev, fmt, args...)		\
2996	netif_level(emerg, priv, type, dev, fmt, ##args)
2997#undef netif_alert
2998#define netif_alert(priv, type, dev, fmt, args...)		\
2999	netif_level(alert, priv, type, dev, fmt, ##args)
3000#undef netif_crit
3001#define netif_crit(priv, type, dev, fmt, args...)		\
3002	netif_level(crit, priv, type, dev, fmt, ##args)
3003#undef netif_err
3004#define netif_err(priv, type, dev, fmt, args...)		\
3005	netif_level(err, priv, type, dev, fmt, ##args)
3006#undef netif_warn
3007#define netif_warn(priv, type, dev, fmt, args...)		\
3008	netif_level(warn, priv, type, dev, fmt, ##args)
3009#undef netif_notice
3010#define netif_notice(priv, type, dev, fmt, args...)		\
3011	netif_level(notice, priv, type, dev, fmt, ##args)
3012#undef netif_info
3013#define netif_info(priv, type, dev, fmt, args...)		\
3014	netif_level(info, priv, type, dev, fmt, ##args)
3015#undef netif_dbg
3016#define netif_dbg(priv, type, dev, fmt, args...)		\
3017	netif_level(dbg, priv, type, dev, fmt, ##args)
3018
3019#ifdef SET_SYSTEM_SLEEP_PM_OPS
3020#define HAVE_SYSTEM_SLEEP_PM_OPS
3021#endif
3022
3023#ifndef for_each_set_bit
3024#define for_each_set_bit(bit, addr, size) \
3025	for ((bit) = find_first_bit((addr), (size)); \
3026		(bit) < (size); \
3027		(bit) = find_next_bit((addr), (size), (bit) + 1))
3028#endif /* for_each_set_bit */
3029
3030#ifndef DEFINE_DMA_UNMAP_ADDR
3031#define DEFINE_DMA_UNMAP_ADDR DECLARE_PCI_UNMAP_ADDR
3032#define DEFINE_DMA_UNMAP_LEN DECLARE_PCI_UNMAP_LEN
3033#define dma_unmap_addr pci_unmap_addr
3034#define dma_unmap_addr_set pci_unmap_addr_set
3035#define dma_unmap_len pci_unmap_len
3036#define dma_unmap_len_set pci_unmap_len_set
3037#endif /* DEFINE_DMA_UNMAP_ADDR */
3038
3039#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,3))
3040#ifdef IGB_HWMON
3041#ifdef CONFIG_DEBUG_LOCK_ALLOC
3042#define sysfs_attr_init(attr)				\
3043	do {						\
3044		static struct lock_class_key __key;	\
3045		(attr)->key = &__key;			\
3046	} while (0)
3047#else
3048#define sysfs_attr_init(attr) do {} while (0)
3049#endif /* CONFIG_DEBUG_LOCK_ALLOC */
3050#endif /* IGB_HWMON */
3051#endif /* RHEL_RELEASE_CODE */
3052
3053#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
3054static inline bool _kc_pm_runtime_suspended()
3055{
3056	return false;
3057}
3058#define pm_runtime_suspended(dev)	_kc_pm_runtime_suspended()
3059#else /* 2.6.0 => 2.6.34 */
3060static inline bool _kc_pm_runtime_suspended(struct device *dev)
3061{
3062	return false;
3063}
3064#ifndef pm_runtime_suspended
3065#define pm_runtime_suspended(dev)	_kc_pm_runtime_suspended(dev)
3066#endif
3067#endif /* 2.6.0 => 2.6.34 */
3068
3069#else /* < 2.6.34 */
3070#define HAVE_SYSTEM_SLEEP_PM_OPS
3071#ifndef HAVE_SET_RX_MODE
3072#define HAVE_SET_RX_MODE
3073#endif
3074
3075#endif /* < 2.6.34 */
3076
3077/*****************************************************************************/
3078#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) )
3079
3080ssize_t _kc_simple_write_to_buffer(void *to, size_t available, loff_t *ppos,
3081				   const void __user *from, size_t count);
3082#define simple_write_to_buffer _kc_simple_write_to_buffer
3083
3084#ifndef numa_node_id
3085#define numa_node_id() 0
3086#endif
3087#ifdef HAVE_TX_MQ
3088#include <net/sch_generic.h>
3089#ifndef CONFIG_NETDEVICES_MULTIQUEUE
3090#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)))
3091void _kc_netif_set_real_num_tx_queues(struct net_device *, unsigned int);
3092#define netif_set_real_num_tx_queues  _kc_netif_set_real_num_tx_queues
3093#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) */
3094#else /* CONFIG_NETDEVICES_MULTI_QUEUE */
3095#define netif_set_real_num_tx_queues(_netdev, _count) \
3096	do { \
3097		(_netdev)->egress_subqueue_count = _count; \
3098	} while (0)
3099#endif /* CONFIG_NETDEVICES_MULTI_QUEUE */
3100#else /* HAVE_TX_MQ */
3101#define netif_set_real_num_tx_queues(_netdev, _count) do {} while(0)
3102#endif /* HAVE_TX_MQ */
3103#ifndef ETH_FLAG_RXHASH
3104#define ETH_FLAG_RXHASH (1<<28)
3105#endif /* ETH_FLAG_RXHASH */
3106#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0))
3107#define HAVE_IRQ_AFFINITY_HINT
3108#endif
3109#else /* < 2.6.35 */
3110#define HAVE_PM_QOS_REQUEST_LIST
3111#define HAVE_IRQ_AFFINITY_HINT
3112#endif /* < 2.6.35 */
3113
3114/*****************************************************************************/
3115#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) )
3116extern int _kc_ethtool_op_set_flags(struct net_device *, u32, u32);
3117#define ethtool_op_set_flags _kc_ethtool_op_set_flags
3118extern u32 _kc_ethtool_op_get_flags(struct net_device *);
3119#define ethtool_op_get_flags _kc_ethtool_op_get_flags
3120
3121#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
3122#ifdef NET_IP_ALIGN
3123#undef NET_IP_ALIGN
3124#endif
3125#define NET_IP_ALIGN 0
3126#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
3127
3128#ifdef NET_SKB_PAD
3129#undef NET_SKB_PAD
3130#endif
3131
3132#if (L1_CACHE_BYTES > 32)
3133#define NET_SKB_PAD L1_CACHE_BYTES
3134#else
3135#define NET_SKB_PAD 32
3136#endif
3137
3138static inline struct sk_buff *_kc_netdev_alloc_skb_ip_align(struct net_device *dev,
3139							    unsigned int length)
3140{
3141	struct sk_buff *skb;
3142
3143	skb = alloc_skb(length + NET_SKB_PAD + NET_IP_ALIGN, GFP_ATOMIC);
3144	if (skb) {
3145#if (NET_IP_ALIGN + NET_SKB_PAD)
3146		skb_reserve(skb, NET_IP_ALIGN + NET_SKB_PAD);
3147#endif
3148		skb->dev = dev;
3149	}
3150	return skb;
3151}
3152
3153#ifdef netdev_alloc_skb_ip_align
3154#undef netdev_alloc_skb_ip_align
3155#endif
3156#define netdev_alloc_skb_ip_align(n, l) _kc_netdev_alloc_skb_ip_align(n, l)
3157
3158#undef netif_level
3159#define netif_level(level, priv, type, dev, fmt, args...)	\
3160do {								\
3161	if (netif_msg_##type(priv))				\
3162		netdev_##level(dev, fmt, ##args);		\
3163} while (0)
3164
3165#undef usleep_range
3166#define usleep_range(min, max)	msleep(DIV_ROUND_UP(min, 1000))
3167
3168#define u64_stats_update_begin(a) do { } while(0)
3169#define u64_stats_update_end(a) do { } while(0)
3170#define u64_stats_fetch_begin(a) do { } while(0)
3171#define u64_stats_fetch_retry_bh(a) (0)
3172#define u64_stats_fetch_begin_bh(a) (0)
3173
3174#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,1))
3175#define HAVE_8021P_SUPPORT
3176#endif
3177
3178#else /* < 2.6.36 */
3179
3180
3181#define HAVE_PM_QOS_REQUEST_ACTIVE
3182#define HAVE_8021P_SUPPORT
3183#define HAVE_NDO_GET_STATS64
3184#endif /* < 2.6.36 */
3185
3186/*****************************************************************************/
3187#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37) )
3188#ifndef netif_set_real_num_rx_queues
3189static inline int __kc_netif_set_real_num_rx_queues(struct net_device *dev,
3190						    unsigned int rxq)
3191{
3192	return 0;
3193}
3194#define netif_set_real_num_rx_queues(dev, rxq) \
3195	__kc_netif_set_real_num_rx_queues((dev), (rxq))
3196#endif
3197#ifndef ETHTOOL_RXNTUPLE_ACTION_CLEAR
3198#define ETHTOOL_RXNTUPLE_ACTION_CLEAR (-2)
3199#endif
3200#ifndef VLAN_N_VID
3201#define VLAN_N_VID	VLAN_GROUP_ARRAY_LEN
3202#endif /* VLAN_N_VID */
3203#ifndef ETH_FLAG_TXVLAN
3204#define ETH_FLAG_TXVLAN (1 << 7)
3205#endif /* ETH_FLAG_TXVLAN */
3206#ifndef ETH_FLAG_RXVLAN
3207#define ETH_FLAG_RXVLAN (1 << 8)
3208#endif /* ETH_FLAG_RXVLAN */
3209
3210static inline void _kc_skb_checksum_none_assert(struct sk_buff *skb)
3211{
3212	WARN_ON(skb->ip_summed != CHECKSUM_NONE);
3213}
3214#define skb_checksum_none_assert(skb) _kc_skb_checksum_none_assert(skb)
3215
3216static inline void *_kc_vzalloc_node(unsigned long size, int node)
3217{
3218	void *addr = vmalloc_node(size, node);
3219	if (addr)
3220		memset(addr, 0, size);
3221	return addr;
3222}
3223#define vzalloc_node(_size, _node) _kc_vzalloc_node(_size, _node)
3224
3225static inline void *_kc_vzalloc(unsigned long size)
3226{
3227	void *addr = vmalloc(size);
3228	if (addr)
3229		memset(addr, 0, size);
3230	return addr;
3231}
3232#define vzalloc(_size) _kc_vzalloc(_size)
3233
3234#ifndef vlan_get_protocol
3235static inline __be16 __kc_vlan_get_protocol(const struct sk_buff *skb)
3236{
3237	if (vlan_tx_tag_present(skb) ||
3238	    skb->protocol != cpu_to_be16(ETH_P_8021Q))
3239		return skb->protocol;
3240
3241	if (skb_headlen(skb) < sizeof(struct vlan_ethhdr))
3242		return 0;
3243
3244	return ((struct vlan_ethhdr*)skb->data)->h_vlan_encapsulated_proto;
3245}
3246#define vlan_get_protocol(_skb) __kc_vlan_get_protocol(_skb)
3247#endif
3248#ifdef HAVE_HW_TIME_STAMP
3249#define SKBTX_HW_TSTAMP (1 << 0)
3250#define SKBTX_IN_PROGRESS (1 << 2)
3251#define SKB_SHARED_TX_IS_UNION
3252#endif
3253
3254#ifndef device_wakeup_enable
3255#define device_wakeup_enable(dev)	device_set_wakeup_enable(dev, true)
3256#endif
3257
3258#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,4,18) )
3259#ifndef HAVE_VLAN_RX_REGISTER
3260#define HAVE_VLAN_RX_REGISTER
3261#endif
3262#endif /* > 2.4.18 */
3263#endif /* < 2.6.37 */
3264
3265/*****************************************************************************/
3266#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) )
3267#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) )
3268#define skb_checksum_start_offset(skb) skb_transport_offset(skb)
3269#else /* 2.6.22 -> 2.6.37 */
3270static inline int _kc_skb_checksum_start_offset(const struct sk_buff *skb)
3271{
3272        return skb->csum_start - skb_headroom(skb);
3273}
3274#define skb_checksum_start_offset(skb) _kc_skb_checksum_start_offset(skb)
3275#endif /* 2.6.22 -> 2.6.37 */
3276#ifdef CONFIG_DCB
3277#ifndef IEEE_8021QAZ_MAX_TCS
3278#define IEEE_8021QAZ_MAX_TCS 8
3279#endif
3280#ifndef DCB_CAP_DCBX_HOST
3281#define DCB_CAP_DCBX_HOST		0x01
3282#endif
3283#ifndef DCB_CAP_DCBX_LLD_MANAGED
3284#define DCB_CAP_DCBX_LLD_MANAGED	0x02
3285#endif
3286#ifndef DCB_CAP_DCBX_VER_CEE
3287#define DCB_CAP_DCBX_VER_CEE		0x04
3288#endif
3289#ifndef DCB_CAP_DCBX_VER_IEEE
3290#define DCB_CAP_DCBX_VER_IEEE		0x08
3291#endif
3292#ifndef DCB_CAP_DCBX_STATIC
3293#define DCB_CAP_DCBX_STATIC		0x10
3294#endif
3295#endif /* CONFIG_DCB */
3296#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2))
3297#define CONFIG_XPS
3298#endif /* RHEL_RELEASE_VERSION(6,2) */
3299#endif /* < 2.6.38 */
3300
3301/*****************************************************************************/
3302#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) )
3303#ifndef NETIF_F_RXCSUM
3304#define NETIF_F_RXCSUM		(1 << 29)
3305#endif
3306#ifndef skb_queue_reverse_walk_safe
3307#define skb_queue_reverse_walk_safe(queue, skb, tmp)				\
3308		for (skb = (queue)->prev, tmp = skb->prev;			\
3309		     skb != (struct sk_buff *)(queue);				\
3310		     skb = tmp, tmp = skb->prev)
3311#endif
3312#else /* < 2.6.39 */
3313#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
3314#ifndef HAVE_NETDEV_OPS_FCOE_DDP_TARGET
3315#define HAVE_NETDEV_OPS_FCOE_DDP_TARGET
3316#endif
3317#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */
3318#ifndef HAVE_MQPRIO
3319#define HAVE_MQPRIO
3320#endif
3321#ifndef HAVE_SETUP_TC
3322#define HAVE_SETUP_TC
3323#endif
3324#ifdef CONFIG_DCB
3325#ifndef HAVE_DCBNL_IEEE
3326#define HAVE_DCBNL_IEEE
3327#endif
3328#endif /* CONFIG_DCB */
3329#ifndef HAVE_NDO_SET_FEATURES
3330#define HAVE_NDO_SET_FEATURES
3331#endif
3332#endif /* < 2.6.39 */
3333
3334/*****************************************************************************/
3335/* use < 2.6.40 because of a Fedora 15 kernel update where they
3336 * updated the kernel version to 2.6.40.x and they back-ported 3.0 features
3337 * like set_phys_id for ethtool.
3338 */
3339#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,40) )
3340#ifdef ETHTOOL_GRXRINGS
3341#ifndef FLOW_EXT
3342#define	FLOW_EXT	0x80000000
3343union _kc_ethtool_flow_union {
3344	struct ethtool_tcpip4_spec		tcp_ip4_spec;
3345	struct ethtool_usrip4_spec		usr_ip4_spec;
3346	__u8					hdata[60];
3347};
3348struct _kc_ethtool_flow_ext {
3349	__be16	vlan_etype;
3350	__be16	vlan_tci;
3351	__be32	data[2];
3352};
3353struct _kc_ethtool_rx_flow_spec {
3354	__u32		flow_type;
3355	union _kc_ethtool_flow_union h_u;
3356	struct _kc_ethtool_flow_ext h_ext;
3357	union _kc_ethtool_flow_union m_u;
3358	struct _kc_ethtool_flow_ext m_ext;
3359	__u64		ring_cookie;
3360	__u32		location;
3361};
3362#define ethtool_rx_flow_spec _kc_ethtool_rx_flow_spec
3363#endif /* FLOW_EXT */
3364#endif
3365
3366#define pci_disable_link_state_locked pci_disable_link_state
3367
3368#ifndef PCI_LTR_VALUE_MASK
3369#define  PCI_LTR_VALUE_MASK	0x000003ff
3370#endif
3371#ifndef PCI_LTR_SCALE_MASK
3372#define  PCI_LTR_SCALE_MASK	0x00001c00
3373#endif
3374#ifndef PCI_LTR_SCALE_SHIFT
3375#define  PCI_LTR_SCALE_SHIFT	10
3376#endif
3377
3378#else /* < 2.6.40 */
3379#define HAVE_ETHTOOL_SET_PHYS_ID
3380#endif /* < 2.6.40 */
3381
3382/*****************************************************************************/
3383#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) )
3384#define USE_LEGACY_PM_SUPPORT
3385#endif /* < 3.0.0 */
3386
3387/*****************************************************************************/
3388#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) )
3389#ifndef __netdev_alloc_skb_ip_align
3390#define __netdev_alloc_skb_ip_align(d,l,_g) netdev_alloc_skb_ip_align(d,l)
3391#endif /* __netdev_alloc_skb_ip_align */
3392#define dcb_ieee_setapp(dev, app) dcb_setapp(dev, app)
3393#define dcb_ieee_delapp(dev, app) 0
3394#define dcb_ieee_getapp_mask(dev, app) (1 << app->priority)
3395
3396/* 1000BASE-T Control register */
3397#define CTL1000_AS_MASTER	0x0800
3398#define CTL1000_ENABLE_MASTER	0x1000
3399
3400#else /* < 3.1.0 */
3401#ifndef HAVE_DCBNL_IEEE_DELAPP
3402#define HAVE_DCBNL_IEEE_DELAPP
3403#endif
3404#endif /* < 3.1.0 */
3405
3406/*****************************************************************************/
3407#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0) )
3408#ifdef ETHTOOL_GRXRINGS
3409#define HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS
3410#endif /* ETHTOOL_GRXRINGS */
3411
3412#ifndef skb_frag_size
3413#define skb_frag_size(frag)	_kc_skb_frag_size(frag)
3414static inline unsigned int _kc_skb_frag_size(const skb_frag_t *frag)
3415{
3416	return frag->size;
3417}
3418#endif /* skb_frag_size */
3419
3420#ifndef skb_frag_size_sub
3421#define skb_frag_size_sub(frag, delta)	_kc_skb_frag_size_sub(frag, delta)
3422static inline void _kc_skb_frag_size_sub(skb_frag_t *frag, int delta)
3423{
3424	frag->size -= delta;
3425}
3426#endif /* skb_frag_size_sub */
3427
3428#ifndef skb_frag_page
3429#define skb_frag_page(frag)	_kc_skb_frag_page(frag)
3430static inline struct page *_kc_skb_frag_page(const skb_frag_t *frag)
3431{
3432	return frag->page;
3433}
3434#endif /* skb_frag_page */
3435
3436#ifndef skb_frag_address
3437#define skb_frag_address(frag)	_kc_skb_frag_address(frag)
3438static inline void *_kc_skb_frag_address(const skb_frag_t *frag)
3439{
3440	return page_address(skb_frag_page(frag)) + frag->page_offset;
3441}
3442#endif /* skb_frag_address */
3443
3444#ifndef skb_frag_dma_map
3445#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) )
3446#include <linux/dma-mapping.h>
3447#endif
3448#define skb_frag_dma_map(dev,frag,offset,size,dir) \
3449		_kc_skb_frag_dma_map(dev,frag,offset,size,dir)
3450static inline dma_addr_t _kc_skb_frag_dma_map(struct device *dev,
3451					      const skb_frag_t *frag,
3452					      size_t offset, size_t size,
3453					      enum dma_data_direction dir)
3454{
3455	return dma_map_page(dev, skb_frag_page(frag),
3456			    frag->page_offset + offset, size, dir);
3457}
3458#endif /* skb_frag_dma_map */
3459
3460#ifndef __skb_frag_unref
3461#define __skb_frag_unref(frag) __kc_skb_frag_unref(frag)
3462static inline void __kc_skb_frag_unref(skb_frag_t *frag)
3463{
3464	put_page(skb_frag_page(frag));
3465}
3466#endif /* __skb_frag_unref */
3467
3468#ifndef SPEED_UNKNOWN
3469#define SPEED_UNKNOWN	-1
3470#endif
3471#ifndef DUPLEX_UNKNOWN
3472#define DUPLEX_UNKNOWN	0xff
3473#endif
3474#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,3))
3475#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED
3476#define HAVE_PCI_DEV_FLAGS_ASSIGNED
3477#endif
3478#endif
3479#else /* < 3.2.0 */
3480#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED
3481#define HAVE_PCI_DEV_FLAGS_ASSIGNED
3482#define HAVE_VF_SPOOFCHK_CONFIGURE
3483#endif
3484#endif /* < 3.2.0 */
3485
3486#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(6,2))
3487#undef ixgbe_get_netdev_tc_txq
3488#define ixgbe_get_netdev_tc_txq(dev, tc) (&netdev_extended(dev)->qos_data.tc_to_txq[tc])
3489#endif
3490/*****************************************************************************/
3491#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0) )
3492typedef u32 kni_netdev_features_t;
3493#undef PCI_EXP_TYPE_RC_EC
3494#define  PCI_EXP_TYPE_RC_EC	0xa	/* Root Complex Event Collector */
3495#ifndef CONFIG_BQL
3496#define netdev_tx_completed_queue(_q, _p, _b) do {} while (0)
3497#define netdev_completed_queue(_n, _p, _b) do {} while (0)
3498#define netdev_tx_sent_queue(_q, _b) do {} while (0)
3499#define netdev_sent_queue(_n, _b) do {} while (0)
3500#define netdev_tx_reset_queue(_q) do {} while (0)
3501#define netdev_reset_queue(_n) do {} while (0)
3502#endif
3503#else /* ! < 3.3.0 */
3504typedef netdev_features_t kni_netdev_features_t;
3505#define HAVE_INT_NDO_VLAN_RX_ADD_VID
3506#ifdef ETHTOOL_SRXNTUPLE
3507#undef ETHTOOL_SRXNTUPLE
3508#endif
3509#endif /* < 3.3.0 */
3510
3511/*****************************************************************************/
3512#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) )
3513#ifndef NETIF_F_RXFCS
3514#define NETIF_F_RXFCS	0
3515#endif /* NETIF_F_RXFCS */
3516#ifndef NETIF_F_RXALL
3517#define NETIF_F_RXALL	0
3518#endif /* NETIF_F_RXALL */
3519
3520#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0))
3521#define NUMTCS_RETURNS_U8
3522
3523int _kc_simple_open(struct inode *inode, struct file *file);
3524#define simple_open _kc_simple_open
3525#endif /* !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) */
3526
3527
3528#ifndef skb_add_rx_frag
3529#define skb_add_rx_frag _kc_skb_add_rx_frag
3530extern void _kc_skb_add_rx_frag(struct sk_buff *, int, struct page *,
3531				int, int, unsigned int);
3532#endif
3533#ifdef NET_ADDR_RANDOM
3534#define eth_hw_addr_random(N) do { \
3535	random_ether_addr(N->dev_addr); \
3536	N->addr_assign_type |= NET_ADDR_RANDOM; \
3537	} while (0)
3538#else /* NET_ADDR_RANDOM */
3539#define eth_hw_addr_random(N) random_ether_addr(N->dev_addr)
3540#endif /* NET_ADDR_RANDOM */
3541#else /* < 3.4.0 */
3542#include <linux/kconfig.h>
3543#endif /* >= 3.4.0 */
3544
3545/*****************************************************************************/
3546#if defined(E1000E_PTP) || defined(IGB_PTP) || defined(IXGBE_PTP) || defined(I40E_PTP)
3547#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0) ) && IS_ENABLED(CONFIG_PTP_1588_CLOCK)
3548#define HAVE_PTP_1588_CLOCK
3549#else
3550#error Cannot enable PTP Hardware Clock support due to a pre-3.0 kernel version or CONFIG_PTP_1588_CLOCK not enabled in the kernel
3551#endif /* > 3.0.0 && IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
3552#endif /* E1000E_PTP || IGB_PTP || IXGBE_PTP || I40E_PTP */
3553
3554/*****************************************************************************/
3555#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) )
3556#define skb_tx_timestamp(skb) do {} while (0)
3557static inline bool __kc_ether_addr_equal(const u8 *addr1, const u8 *addr2)
3558{
3559	return !compare_ether_addr(addr1, addr2);
3560}
3561#define ether_addr_equal(_addr1, _addr2) __kc_ether_addr_equal((_addr1),(_addr2))
3562#else
3563#define HAVE_FDB_OPS
3564#define HAVE_ETHTOOL_GET_TS_INFO
3565#endif /* < 3.5.0 */
3566
3567/*****************************************************************************/
3568#include <linux/mdio.h>
3569#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,6,0) )
3570#define PCI_EXP_LNKCAP2		44	/* Link Capability 2 */
3571
3572#ifndef MDIO_EEE_100TX
3573#define MDIO_EEE_100TX		0x0002	/* 100TX EEE cap */
3574#endif
3575#ifndef MDIO_EEE_1000T
3576#define MDIO_EEE_1000T		0x0004	/* 1000T EEE cap */
3577#endif
3578#ifndef MDIO_EEE_10GT
3579#define MDIO_EEE_10GT		0x0008	/* 10GT EEE cap */
3580#endif
3581#ifndef MDIO_EEE_1000KX
3582#define MDIO_EEE_1000KX		0x0010	/* 1000KX EEE cap */
3583#endif
3584#ifndef MDIO_EEE_10GKX4
3585#define MDIO_EEE_10GKX4		0x0020	/* 10G KX4 EEE cap */
3586#endif
3587#ifndef MDIO_EEE_10GKR
3588#define MDIO_EEE_10GKR		0x0040	/* 10G KR EEE cap */
3589#endif
3590#endif /* < 3.6.0 */
3591
3592/******************************************************************************/
3593#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0) )
3594#ifndef ADVERTISED_40000baseKR4_Full
3595/* these defines were all added in one commit, so should be safe
3596 * to trigger activiation on one define
3597 */
3598#define SUPPORTED_40000baseKR4_Full	(1 << 23)
3599#define SUPPORTED_40000baseCR4_Full	(1 << 24)
3600#define SUPPORTED_40000baseSR4_Full	(1 << 25)
3601#define SUPPORTED_40000baseLR4_Full	(1 << 26)
3602#define ADVERTISED_40000baseKR4_Full	(1 << 23)
3603#define ADVERTISED_40000baseCR4_Full	(1 << 24)
3604#define ADVERTISED_40000baseSR4_Full	(1 << 25)
3605#define ADVERTISED_40000baseLR4_Full	(1 << 26)
3606#endif
3607
3608/**
3609 * mmd_eee_cap_to_ethtool_sup_t
3610 * @eee_cap: value of the MMD EEE Capability register
3611 *
3612 * A small helper function that translates MMD EEE Capability (3.20) bits
3613 * to ethtool supported settings.
3614 */
3615static inline u32 __kc_mmd_eee_cap_to_ethtool_sup_t(u16 eee_cap)
3616{
3617	u32 supported = 0;
3618
3619	if (eee_cap & MDIO_EEE_100TX)
3620		supported |= SUPPORTED_100baseT_Full;
3621	if (eee_cap & MDIO_EEE_1000T)
3622		supported |= SUPPORTED_1000baseT_Full;
3623	if (eee_cap & MDIO_EEE_10GT)
3624		supported |= SUPPORTED_10000baseT_Full;
3625	if (eee_cap & MDIO_EEE_1000KX)
3626		supported |= SUPPORTED_1000baseKX_Full;
3627	if (eee_cap & MDIO_EEE_10GKX4)
3628		supported |= SUPPORTED_10000baseKX4_Full;
3629	if (eee_cap & MDIO_EEE_10GKR)
3630		supported |= SUPPORTED_10000baseKR_Full;
3631
3632	return supported;
3633}
3634#define mmd_eee_cap_to_ethtool_sup_t(eee_cap) \
3635	__kc_mmd_eee_cap_to_ethtool_sup_t(eee_cap)
3636
3637/**
3638 * mmd_eee_adv_to_ethtool_adv_t
3639 * @eee_adv: value of the MMD EEE Advertisement/Link Partner Ability registers
3640 *
3641 * A small helper function that translates the MMD EEE Advertisement (7.60)
3642 * and MMD EEE Link Partner Ability (7.61) bits to ethtool advertisement
3643 * settings.
3644 */
3645static inline u32 __kc_mmd_eee_adv_to_ethtool_adv_t(u16 eee_adv)
3646{
3647	u32 adv = 0;
3648
3649	if (eee_adv & MDIO_EEE_100TX)
3650		adv |= ADVERTISED_100baseT_Full;
3651	if (eee_adv & MDIO_EEE_1000T)
3652		adv |= ADVERTISED_1000baseT_Full;
3653	if (eee_adv & MDIO_EEE_10GT)
3654		adv |= ADVERTISED_10000baseT_Full;
3655	if (eee_adv & MDIO_EEE_1000KX)
3656		adv |= ADVERTISED_1000baseKX_Full;
3657	if (eee_adv & MDIO_EEE_10GKX4)
3658		adv |= ADVERTISED_10000baseKX4_Full;
3659	if (eee_adv & MDIO_EEE_10GKR)
3660		adv |= ADVERTISED_10000baseKR_Full;
3661
3662	return adv;
3663}
3664#define mmd_eee_adv_to_ethtool_adv_t(eee_adv) \
3665	__kc_mmd_eee_adv_to_ethtool_adv_t(eee_adv)
3666
3667/**
3668 * ethtool_adv_to_mmd_eee_adv_t
3669 * @adv: the ethtool advertisement settings
3670 *
3671 * A small helper function that translates ethtool advertisement settings
3672 * to EEE advertisements for the MMD EEE Advertisement (7.60) and
3673 * MMD EEE Link Partner Ability (7.61) registers.
3674 */
3675static inline u16 __kc_ethtool_adv_to_mmd_eee_adv_t(u32 adv)
3676{
3677	u16 reg = 0;
3678
3679	if (adv & ADVERTISED_100baseT_Full)
3680		reg |= MDIO_EEE_100TX;
3681	if (adv & ADVERTISED_1000baseT_Full)
3682		reg |= MDIO_EEE_1000T;
3683	if (adv & ADVERTISED_10000baseT_Full)
3684		reg |= MDIO_EEE_10GT;
3685	if (adv & ADVERTISED_1000baseKX_Full)
3686		reg |= MDIO_EEE_1000KX;
3687	if (adv & ADVERTISED_10000baseKX4_Full)
3688		reg |= MDIO_EEE_10GKX4;
3689	if (adv & ADVERTISED_10000baseKR_Full)
3690		reg |= MDIO_EEE_10GKR;
3691
3692	return reg;
3693}
3694#define ethtool_adv_to_mmd_eee_adv_t(adv) \
3695	__kc_ethtool_adv_to_mmd_eee_adv_t(adv)
3696
3697#ifndef pci_pcie_type
3698#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) )
3699static inline u8 pci_pcie_type(struct pci_dev *pdev)
3700{
3701	int pos;
3702	u16 reg16;
3703
3704	pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
3705	if (!pos)
3706		BUG();
3707	pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
3708	return (reg16 & PCI_EXP_FLAGS_TYPE) >> 4;
3709}
3710#else /* < 2.6.24 */
3711#define pci_pcie_type(x)	(x)->pcie_type
3712#endif /* < 2.6.24 */
3713#endif /* pci_pcie_type */
3714
3715#define ptp_clock_register(caps, args...) ptp_clock_register(caps)
3716
3717#ifndef PCI_EXP_LNKSTA2
3718int __kc_pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val);
3719#define pcie_capability_read_word(d,p,v) __kc_pcie_capability_read_word(d,p,v)
3720int __kc_pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val);
3721#define pcie_capability_write_word(d,p,v) __kc_pcie_capability_write_word(d,p,v)
3722int __kc_pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos,
3723					    u16 clear, u16 set);
3724#define pcie_capability_clear_and_set_word(d,p,c,s) \
3725	__kc_pcie_capability_clear_and_set_word(d,p,c,s)
3726
3727#define PCI_EXP_LNKSTA2		50	/* Link Status 2 */
3728
3729static inline int pcie_capability_clear_word(struct pci_dev *dev, int pos,
3730					     u16 clear)
3731{
3732	return __kc_pcie_capability_clear_and_set_word(dev, pos, clear, 0);
3733}
3734#endif /* !PCI_EXP_LNKSTA2 */
3735
3736#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0))
3737#define USE_CONST_DEV_UC_CHAR
3738#endif
3739
3740#else /* >= 3.7.0 */
3741#define HAVE_CONST_STRUCT_PCI_ERROR_HANDLERS
3742#define USE_CONST_DEV_UC_CHAR
3743#endif /* >= 3.7.0 */
3744
3745/*****************************************************************************/
3746#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0) )
3747#ifndef PCI_EXP_LNKCTL_ASPM_L0S
3748#define  PCI_EXP_LNKCTL_ASPM_L0S  0x01	/* L0s Enable */
3749#endif
3750#ifndef PCI_EXP_LNKCTL_ASPM_L1
3751#define  PCI_EXP_LNKCTL_ASPM_L1   0x02	/* L1 Enable */
3752#endif
3753#define HAVE_CONFIG_HOTPLUG
3754/* Reserved Ethernet Addresses per IEEE 802.1Q */
3755static const u8 eth_reserved_addr_base[ETH_ALEN] __aligned(2) = {
3756	0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
3757#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) &&\
3758    !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,5))
3759static inline bool is_link_local_ether_addr(const u8 *addr)
3760{
3761	__be16 *a = (__be16 *)addr;
3762	static const __be16 *b = (const __be16 *)eth_reserved_addr_base;
3763	static const __be16 m = cpu_to_be16(0xfff0);
3764
3765	return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | ((a[2] ^ b[2]) & m)) == 0;
3766}
3767#endif /* !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) */
3768#else /* >= 3.8.0 */
3769#ifndef __devinit
3770#define __devinit
3771#define HAVE_ENCAP_CSUM_OFFLOAD
3772#endif
3773
3774#ifndef __devinitdata
3775#define __devinitdata
3776#endif
3777
3778#ifndef __devexit
3779#define __devexit
3780#endif
3781
3782#ifndef __devexit_p
3783#define __devexit_p
3784#endif
3785
3786#ifndef HAVE_SRIOV_CONFIGURE
3787#define HAVE_SRIOV_CONFIGURE
3788#endif
3789
3790#define HAVE_BRIDGE_ATTRIBS
3791#ifndef BRIDGE_MODE_VEB
3792#define BRIDGE_MODE_VEB		0	/* Default loopback mode */
3793#endif /* BRIDGE_MODE_VEB */
3794#ifndef BRIDGE_MODE_VEPA
3795#define BRIDGE_MODE_VEPA	1	/* 802.1Qbg defined VEPA mode */
3796#endif /* BRIDGE_MODE_VEPA */
3797#endif /* >= 3.8.0 */
3798
3799/*****************************************************************************/
3800#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0) )
3801
3802#undef hlist_entry
3803#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
3804
3805#undef hlist_entry_safe
3806#define hlist_entry_safe(ptr, type, member) \
3807	(ptr) ? hlist_entry(ptr, type, member) : NULL
3808
3809#undef hlist_for_each_entry
3810#define hlist_for_each_entry(pos, head, member)                             \
3811	for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member); \
3812	     pos;                                                           \
3813	     pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
3814
3815#undef hlist_for_each_entry_safe
3816#define hlist_for_each_entry_safe(pos, n, head, member)		    \
3817	for (pos = hlist_entry_safe((head)->first, typeof(*pos), member);   \
3818	     pos && ({ n = pos->member.next; 1; });			    \
3819	     pos = hlist_entry_safe(n, typeof(*pos), member))
3820
3821#ifdef CONFIG_XPS
3822extern int __kc_netif_set_xps_queue(struct net_device *, struct cpumask *, u16);
3823#define netif_set_xps_queue(_dev, _mask, _idx) __kc_netif_set_xps_queue((_dev), (_mask), (_idx))
3824#else /* CONFIG_XPS */
3825#define netif_set_xps_queue(_dev, _mask, _idx) do {} while (0)
3826#endif /* CONFIG_XPS */
3827
3828#ifdef HAVE_NETDEV_SELECT_QUEUE
3829#define _kc_hashrnd 0xd631614b /* not so random hash salt */
3830extern u16 __kc_netdev_pick_tx(struct net_device *dev, struct sk_buff *skb);
3831#define __netdev_pick_tx __kc_netdev_pick_tx
3832#endif /* HAVE_NETDEV_SELECT_QUEUE */
3833#else
3834#define HAVE_BRIDGE_FILTER
3835#define USE_DEFAULT_FDB_DEL_DUMP
3836#endif /* < 3.9.0 */
3837
3838/*****************************************************************************/
3839#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) )
3840#ifdef CONFIG_PCI_IOV
3841extern int __kc_pci_vfs_assigned(struct pci_dev *dev);
3842#else
3843static inline int __kc_pci_vfs_assigned(struct pci_dev *dev)
3844{
3845	return 0;
3846}
3847#endif
3848#define pci_vfs_assigned(dev) __kc_pci_vfs_assigned(dev)
3849
3850#ifndef VLAN_TX_COOKIE_MAGIC
3851static inline struct sk_buff *__kc__vlan_hwaccel_put_tag(struct sk_buff *skb,
3852							 u16 vlan_tci)
3853{
3854#ifdef VLAN_TAG_PRESENT
3855	vlan_tci |= VLAN_TAG_PRESENT;
3856#endif
3857	skb->vlan_tci = vlan_tci;
3858        return skb;
3859}
3860#define __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci) \
3861	__kc__vlan_hwaccel_put_tag(skb, vlan_tci)
3862#endif
3863
3864#else /* >= 3.10.0 */
3865#define HAVE_ENCAP_TSO_OFFLOAD
3866#endif /* >= 3.10.0 */
3867
3868#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0) )
3869#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,6)))
3870#if (!(UBUNTU_KERNEL_CODE >= UBUNTU_KERNEL_VERSION(3,13,0,30,0) \
3871    && (UBUNTU_RELEASE_CODE == UBUNTU_RELEASE_VERSION(12,4) \
3872     || UBUNTU_RELEASE_CODE == UBUNTU_RELEASE_VERSION(14,4))))
3873#if (!(SLE_VERSION_CODE == SLE_VERSION(12,0,0)))
3874#ifdef NETIF_F_RXHASH
3875#define PKT_HASH_TYPE_L3 0
3876static inline void
3877skb_set_hash(struct sk_buff *skb, __u32 hash, __always_unused int type)
3878{
3879	skb->rxhash = hash;
3880}
3881#endif /* NETIF_F_RXHASH */
3882#endif /* < SLES12 */
3883#endif /* < 3.13.0-30.54 (Ubuntu 14.04) */
3884#endif /* < RHEL7 */
3885#endif /* < 3.14.0 */
3886
3887#if (( LINUX_VERSION_CODE >= KERNEL_VERSION(3,16,0) ) \
3888    || ( RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2) ))
3889#undef SET_ETHTOOL_OPS
3890#define SET_ETHTOOL_OPS(netdev, ops) ((netdev)->ethtool_ops = (ops))
3891#define HAVE_VF_MIN_MAX_TXRATE 1
3892#endif /* >= 3.16.0 */
3893
3894#if (( LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0) ) \
3895    || ( RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2) ))
3896#define HAVE_NDO_DFLT_BRIDGE_ADD_MASK
3897#if ( RHEL_RELEASE_CODE != RHEL_RELEASE_VERSION(7,2) )
3898#define HAVE_NDO_FDB_ADD_VID
3899#endif /* !RHEL 7.2 */
3900#endif /* >= 3.19.0 */
3901
3902#if (( LINUX_VERSION_CODE >= KERNEL_VERSION(4,0,0) ) \
3903    || ( RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2) ))
3904/* vlan_tx_xx functions got renamed to skb_vlan */
3905#define vlan_tx_tag_get skb_vlan_tag_get
3906#define vlan_tx_tag_present skb_vlan_tag_present
3907#if ( RHEL_RELEASE_CODE != RHEL_RELEASE_VERSION(7,2) )
3908#define HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS
3909#endif /* !RHEL 7.2 */
3910#endif /* 4.0.0 */
3911
3912#if (( LINUX_VERSION_CODE >= KERNEL_VERSION(4,1,0) ) \
3913    || ( RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3) ))
3914/* ndo_bridge_getlink adds new nlflags parameter */
3915#define HAVE_NDO_BRIDGE_GETLINK_NLFLAGS
3916#endif /* >= 4.1.0 */
3917
3918#if (( LINUX_VERSION_CODE >= KERNEL_VERSION(4,2,0) ) \
3919    || ( RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4) ))
3920/* ndo_bridge_getlink adds new filter_mask and vlan_fill parameters */
3921#define HAVE_NDO_BRIDGE_GETLINK_FILTER_MASK_VLAN_FILL
3922#endif /* >= 4.2.0 */
3923
3924/*
3925 * vlan_tx_tag_* macros renamed to skb_vlan_tag_* (Linux commit: df8a39defad4)
3926 * For older kernels backported this commit, need to use renamed functions.
3927 * This fix is specific to RedHat/CentOS kernels.
3928 */
3929#if (defined(RHEL_RELEASE_CODE) && \
3930	(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6, 8)) && \
3931	(LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 34)))
3932#define vlan_tx_tag_get skb_vlan_tag_get
3933#define vlan_tx_tag_present skb_vlan_tag_present
3934#endif
3935
3936#if ((LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)) || \
3937     (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12, 3, 0)) || \
3938     (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 4)))
3939#define HAVE_VF_VLAN_PROTO
3940#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 4))
3941/* In RHEL/Centos 7.4, the "new" version of ndo_set_vf_vlan
3942 * is in the struct net_device_ops_extended */
3943#define ndo_set_vf_vlan extended.ndo_set_vf_vlan
3944#endif
3945#endif
3946
3947#if (defined(RHEL_RELEASE_CODE) && \
3948	(RHEL_RELEASE_VERSION(7, 5) <= RHEL_RELEASE_CODE))
3949#define ndo_change_mtu ndo_change_mtu_rh74
3950#endif
3951
3952#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0)
3953#define HAVE_PCI_ENABLE_MSIX
3954#endif
3955
3956#if defined(timer_setup) && defined(from_timer)
3957#define HAVE_TIMER_SETUP
3958#endif
3959
3960#endif /* _KCOMPAT_H_ */
3961