1/*******************************************************************************
2
3Copyright (c) 2001-2015, Intel Corporation
4All rights reserved.
5
6Redistribution and use in source and binary forms, with or without
7modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10    this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13    notice, this list of conditions and the following disclaimer in the
14    documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17    contributors may be used to endorse or promote products derived from
18    this software without specific prior written permission.
19
20THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30POSSIBILITY OF SUCH DAMAGE.
31
32***************************************************************************/
33
34/* 82562G 10/100 Network Connection
35 * 82562G-2 10/100 Network Connection
36 * 82562GT 10/100 Network Connection
37 * 82562GT-2 10/100 Network Connection
38 * 82562V 10/100 Network Connection
39 * 82562V-2 10/100 Network Connection
40 * 82566DC-2 Gigabit Network Connection
41 * 82566DC Gigabit Network Connection
42 * 82566DM-2 Gigabit Network Connection
43 * 82566DM Gigabit Network Connection
44 * 82566MC Gigabit Network Connection
45 * 82566MM Gigabit Network Connection
46 * 82567LM Gigabit Network Connection
47 * 82567LF Gigabit Network Connection
48 * 82567V Gigabit Network Connection
49 * 82567LM-2 Gigabit Network Connection
50 * 82567LF-2 Gigabit Network Connection
51 * 82567V-2 Gigabit Network Connection
52 * 82567LF-3 Gigabit Network Connection
53 * 82567LM-3 Gigabit Network Connection
54 * 82567LM-4 Gigabit Network Connection
55 * 82577LM Gigabit Network Connection
56 * 82577LC Gigabit Network Connection
57 * 82578DM Gigabit Network Connection
58 * 82578DC Gigabit Network Connection
59 * 82579LM Gigabit Network Connection
60 * 82579V Gigabit Network Connection
61 * Ethernet Connection I217-LM
62 * Ethernet Connection I217-V
63 * Ethernet Connection I218-V
64 * Ethernet Connection I218-LM
65 * Ethernet Connection (2) I218-LM
66 * Ethernet Connection (2) I218-V
67 * Ethernet Connection (3) I218-LM
68 * Ethernet Connection (3) I218-V
69 */
70
71#include "e1000_api.h"
72
73STATIC s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state);
74STATIC s32  e1000_acquire_swflag_ich8lan(struct e1000_hw *hw);
75STATIC void e1000_release_swflag_ich8lan(struct e1000_hw *hw);
76STATIC s32  e1000_acquire_nvm_ich8lan(struct e1000_hw *hw);
77STATIC void e1000_release_nvm_ich8lan(struct e1000_hw *hw);
78STATIC bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
79STATIC bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
80STATIC int  e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
81STATIC int  e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
82STATIC s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw);
83#ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT
84STATIC void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
85					      u8 *mc_addr_list,
86					      u32 mc_addr_count);
87#endif /* NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT */
88STATIC s32  e1000_check_reset_block_ich8lan(struct e1000_hw *hw);
89STATIC s32  e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw);
90STATIC s32  e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
91STATIC s32  e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw,
92					    bool active);
93STATIC s32  e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw,
94					    bool active);
95STATIC s32  e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
96				   u16 words, u16 *data);
97STATIC s32  e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset, u16 words,
98			       u16 *data);
99STATIC s32  e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
100				    u16 words, u16 *data);
101STATIC s32  e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw);
102STATIC s32  e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw);
103STATIC s32  e1000_update_nvm_checksum_spt(struct e1000_hw *hw);
104STATIC s32  e1000_valid_led_default_ich8lan(struct e1000_hw *hw,
105					    u16 *data);
106STATIC s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
107STATIC s32  e1000_get_bus_info_ich8lan(struct e1000_hw *hw);
108STATIC s32  e1000_reset_hw_ich8lan(struct e1000_hw *hw);
109STATIC s32  e1000_init_hw_ich8lan(struct e1000_hw *hw);
110STATIC s32  e1000_setup_link_ich8lan(struct e1000_hw *hw);
111STATIC s32  e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
112STATIC s32  e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw);
113STATIC s32  e1000_get_link_up_info_ich8lan(struct e1000_hw *hw,
114					   u16 *speed, u16 *duplex);
115STATIC s32  e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
116STATIC s32  e1000_led_on_ich8lan(struct e1000_hw *hw);
117STATIC s32  e1000_led_off_ich8lan(struct e1000_hw *hw);
118STATIC s32  e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
119STATIC s32  e1000_setup_led_pchlan(struct e1000_hw *hw);
120STATIC s32  e1000_cleanup_led_pchlan(struct e1000_hw *hw);
121STATIC s32  e1000_led_on_pchlan(struct e1000_hw *hw);
122STATIC s32  e1000_led_off_pchlan(struct e1000_hw *hw);
123STATIC void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
124STATIC s32  e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
125STATIC void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
126STATIC s32  e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
127STATIC s32  e1000_read_flash_byte_ich8lan(struct e1000_hw *hw,
128					  u32 offset, u8 *data);
129STATIC s32  e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
130					  u8 size, u16 *data);
131STATIC s32  e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
132					    u32 *data);
133STATIC s32  e1000_read_flash_dword_ich8lan(struct e1000_hw *hw,
134					   u32 offset, u32 *data);
135STATIC s32  e1000_write_flash_data32_ich8lan(struct e1000_hw *hw,
136					     u32 offset, u32 data);
137STATIC s32  e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw,
138						  u32 offset, u32 dword);
139STATIC s32  e1000_read_flash_word_ich8lan(struct e1000_hw *hw,
140					  u32 offset, u16 *data);
141STATIC s32  e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
142						 u32 offset, u8 byte);
143STATIC s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
144STATIC void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
145STATIC s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw);
146STATIC s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
147STATIC s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
148STATIC void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
149
150/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
151/* Offset 04h HSFSTS */
152union ich8_hws_flash_status {
153	struct ich8_hsfsts {
154		u16 flcdone:1; /* bit 0 Flash Cycle Done */
155		u16 flcerr:1; /* bit 1 Flash Cycle Error */
156		u16 dael:1; /* bit 2 Direct Access error Log */
157		u16 berasesz:2; /* bit 4:3 Sector Erase Size */
158		u16 flcinprog:1; /* bit 5 flash cycle in Progress */
159		u16 reserved1:2; /* bit 13:6 Reserved */
160		u16 reserved2:6; /* bit 13:6 Reserved */
161		u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */
162		u16 flockdn:1; /* bit 15 Flash Config Lock-Down */
163	} hsf_status;
164	u16 regval;
165};
166
167/* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
168/* Offset 06h FLCTL */
169union ich8_hws_flash_ctrl {
170	struct ich8_hsflctl {
171		u16 flcgo:1;   /* 0 Flash Cycle Go */
172		u16 flcycle:2;   /* 2:1 Flash Cycle */
173		u16 reserved:5;   /* 7:3 Reserved  */
174		u16 fldbcount:2;   /* 9:8 Flash Data Byte Count */
175		u16 flockdn:6;   /* 15:10 Reserved */
176	} hsf_ctrl;
177	u16 regval;
178};
179
180/* ICH Flash Region Access Permissions */
181union ich8_hws_flash_regacc {
182	struct ich8_flracc {
183		u32 grra:8; /* 0:7 GbE region Read Access */
184		u32 grwa:8; /* 8:15 GbE region Write Access */
185		u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */
186		u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */
187	} hsf_flregacc;
188	u16 regval;
189};
190
191/**
192 *  e1000_phy_is_accessible_pchlan - Check if able to access PHY registers
193 *  @hw: pointer to the HW structure
194 *
195 *  Test access to the PHY registers by reading the PHY ID registers.  If
196 *  the PHY ID is already known (e.g. resume path) compare it with known ID,
197 *  otherwise assume the read PHY ID is correct if it is valid.
198 *
199 *  Assumes the sw/fw/hw semaphore is already acquired.
200 **/
201STATIC bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
202{
203	u16 phy_reg = 0;
204	u32 phy_id = 0;
205	s32 ret_val = 0;
206	u16 retry_count;
207	u32 mac_reg = 0;
208
209	for (retry_count = 0; retry_count < 2; retry_count++) {
210		ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_reg);
211		if (ret_val || (phy_reg == 0xFFFF))
212			continue;
213		phy_id = (u32)(phy_reg << 16);
214
215		ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_reg);
216		if (ret_val || (phy_reg == 0xFFFF)) {
217			phy_id = 0;
218			continue;
219		}
220		phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
221		break;
222	}
223
224	if (hw->phy.id) {
225		if  (hw->phy.id == phy_id)
226			goto out;
227	} else if (phy_id) {
228		hw->phy.id = phy_id;
229		hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
230		goto out;
231	}
232
233	/* In case the PHY needs to be in mdio slow mode,
234	 * set slow mode and try to get the PHY id again.
235	 */
236	if (hw->mac.type < e1000_pch_lpt) {
237		hw->phy.ops.release(hw);
238		ret_val = e1000_set_mdio_slow_mode_hv(hw);
239		if (!ret_val)
240			ret_val = e1000_get_phy_id(hw);
241		hw->phy.ops.acquire(hw);
242	}
243
244	if (ret_val)
245		return false;
246out:
247	if (hw->mac.type >= e1000_pch_lpt) {
248		/* Only unforce SMBus if ME is not active */
249		if (!(E1000_READ_REG(hw, E1000_FWSM) &
250		    E1000_ICH_FWSM_FW_VALID)) {
251			/* Unforce SMBus mode in PHY */
252			hw->phy.ops.read_reg_locked(hw, CV_SMB_CTRL, &phy_reg);
253			phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
254			hw->phy.ops.write_reg_locked(hw, CV_SMB_CTRL, phy_reg);
255
256			/* Unforce SMBus mode in MAC */
257			mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
258			mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
259			E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
260		}
261	}
262
263	return true;
264}
265
266/**
267 *  e1000_toggle_lanphypc_pch_lpt - toggle the LANPHYPC pin value
268 *  @hw: pointer to the HW structure
269 *
270 *  Toggling the LANPHYPC pin value fully power-cycles the PHY and is
271 *  used to reset the PHY to a quiescent state when necessary.
272 **/
273STATIC void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw)
274{
275	u32 mac_reg;
276
277	DEBUGFUNC("e1000_toggle_lanphypc_pch_lpt");
278
279	/* Set Phy Config Counter to 50msec */
280	mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
281	mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
282	mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
283	E1000_WRITE_REG(hw, E1000_FEXTNVM3, mac_reg);
284
285	/* Toggle LANPHYPC Value bit */
286	mac_reg = E1000_READ_REG(hw, E1000_CTRL);
287	mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
288	mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
289	E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
290	E1000_WRITE_FLUSH(hw);
291	msec_delay(1);
292	mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
293	E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
294	E1000_WRITE_FLUSH(hw);
295
296	if (hw->mac.type < e1000_pch_lpt) {
297		msec_delay(50);
298	} else {
299		u16 count = 20;
300
301		do {
302			msec_delay(5);
303		} while (!(E1000_READ_REG(hw, E1000_CTRL_EXT) &
304			   E1000_CTRL_EXT_LPCD) && count--);
305
306		msec_delay(30);
307	}
308}
309
310/**
311 *  e1000_init_phy_workarounds_pchlan - PHY initialization workarounds
312 *  @hw: pointer to the HW structure
313 *
314 *  Workarounds/flow necessary for PHY initialization during driver load
315 *  and resume paths.
316 **/
317STATIC s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
318{
319	u32 mac_reg, fwsm = E1000_READ_REG(hw, E1000_FWSM);
320	s32 ret_val;
321
322	DEBUGFUNC("e1000_init_phy_workarounds_pchlan");
323
324	/* Gate automatic PHY configuration by hardware on managed and
325	 * non-managed 82579 and newer adapters.
326	 */
327	e1000_gate_hw_phy_config_ich8lan(hw, true);
328
329#ifdef ULP_SUPPORT
330	/* It is not possible to be certain of the current state of ULP
331	 * so forcibly disable it.
332	 */
333	hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown;
334
335#endif /* ULP_SUPPORT */
336	ret_val = hw->phy.ops.acquire(hw);
337	if (ret_val) {
338		DEBUGOUT("Failed to initialize PHY flow\n");
339		goto out;
340	}
341
342	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
343	 * inaccessible and resetting the PHY is not blocked, toggle the
344	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
345	 */
346	switch (hw->mac.type) {
347	case e1000_pch_lpt:
348	case e1000_pch_spt:
349	case e1000_pch_cnp:
350		if (e1000_phy_is_accessible_pchlan(hw))
351			break;
352
353		/* Before toggling LANPHYPC, see if PHY is accessible by
354		 * forcing MAC to SMBus mode first.
355		 */
356		mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
357		mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
358		E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
359
360		/* Wait 50 milliseconds for MAC to finish any retries
361		 * that it might be trying to perform from previous
362		 * attempts to acknowledge any phy read requests.
363		 */
364		 msec_delay(50);
365
366		/* fall-through */
367	case e1000_pch2lan:
368		if (e1000_phy_is_accessible_pchlan(hw))
369			break;
370
371		/* fall-through */
372	case e1000_pchlan:
373		if ((hw->mac.type == e1000_pchlan) &&
374		    (fwsm & E1000_ICH_FWSM_FW_VALID))
375			break;
376
377		if (hw->phy.ops.check_reset_block(hw)) {
378			DEBUGOUT("Required LANPHYPC toggle blocked by ME\n");
379			ret_val = -E1000_ERR_PHY;
380			break;
381		}
382
383		/* Toggle LANPHYPC Value bit */
384		e1000_toggle_lanphypc_pch_lpt(hw);
385		if (hw->mac.type >= e1000_pch_lpt) {
386			if (e1000_phy_is_accessible_pchlan(hw))
387				break;
388
389			/* Toggling LANPHYPC brings the PHY out of SMBus mode
390			 * so ensure that the MAC is also out of SMBus mode
391			 */
392			mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
393			mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
394			E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
395
396			if (e1000_phy_is_accessible_pchlan(hw))
397				break;
398
399			ret_val = -E1000_ERR_PHY;
400		}
401		break;
402	default:
403		break;
404	}
405
406	hw->phy.ops.release(hw);
407	if (!ret_val) {
408
409		/* Check to see if able to reset PHY.  Print error if not */
410		if (hw->phy.ops.check_reset_block(hw)) {
411			ERROR_REPORT("Reset blocked by ME\n");
412			goto out;
413		}
414
415		/* Reset the PHY before any access to it.  Doing so, ensures
416		 * that the PHY is in a known good state before we read/write
417		 * PHY registers.  The generic reset is sufficient here,
418		 * because we haven't determined the PHY type yet.
419		 */
420		ret_val = e1000_phy_hw_reset_generic(hw);
421		if (ret_val)
422			goto out;
423
424		/* On a successful reset, possibly need to wait for the PHY
425		 * to quiesce to an accessible state before returning control
426		 * to the calling function.  If the PHY does not quiesce, then
427		 * return E1000E_BLK_PHY_RESET, as this is the condition that
428		 *  the PHY is in.
429		 */
430		ret_val = hw->phy.ops.check_reset_block(hw);
431		if (ret_val)
432			ERROR_REPORT("ME blocked access to PHY after reset\n");
433	}
434
435out:
436	/* Ungate automatic PHY configuration on non-managed 82579 */
437	if ((hw->mac.type == e1000_pch2lan) &&
438	    !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
439		msec_delay(10);
440		e1000_gate_hw_phy_config_ich8lan(hw, false);
441	}
442
443	return ret_val;
444}
445
446/**
447 *  e1000_init_phy_params_pchlan - Initialize PHY function pointers
448 *  @hw: pointer to the HW structure
449 *
450 *  Initialize family-specific PHY parameters and function pointers.
451 **/
452STATIC s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
453{
454	struct e1000_phy_info *phy = &hw->phy;
455	s32 ret_val;
456
457	DEBUGFUNC("e1000_init_phy_params_pchlan");
458
459	phy->addr		= 1;
460	phy->reset_delay_us	= 100;
461
462	phy->ops.acquire	= e1000_acquire_swflag_ich8lan;
463	phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
464	phy->ops.get_cfg_done	= e1000_get_cfg_done_ich8lan;
465	phy->ops.set_page	= e1000_set_page_igp;
466	phy->ops.read_reg	= e1000_read_phy_reg_hv;
467	phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
468	phy->ops.read_reg_page	= e1000_read_phy_reg_page_hv;
469	phy->ops.release	= e1000_release_swflag_ich8lan;
470	phy->ops.reset		= e1000_phy_hw_reset_ich8lan;
471	phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
472	phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
473	phy->ops.write_reg	= e1000_write_phy_reg_hv;
474	phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
475	phy->ops.write_reg_page	= e1000_write_phy_reg_page_hv;
476	phy->ops.power_up	= e1000_power_up_phy_copper;
477	phy->ops.power_down	= e1000_power_down_phy_copper_ich8lan;
478	phy->autoneg_mask	= AUTONEG_ADVERTISE_SPEED_DEFAULT;
479
480	phy->id = e1000_phy_unknown;
481
482	ret_val = e1000_init_phy_workarounds_pchlan(hw);
483	if (ret_val)
484		return ret_val;
485
486	if (phy->id == e1000_phy_unknown)
487		switch (hw->mac.type) {
488		default:
489			ret_val = e1000_get_phy_id(hw);
490			if (ret_val)
491				return ret_val;
492			if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
493				break;
494			/* fall-through */
495		case e1000_pch2lan:
496		case e1000_pch_lpt:
497		case e1000_pch_spt:
498		case e1000_pch_cnp:
499			/* In case the PHY needs to be in mdio slow mode,
500			 * set slow mode and try to get the PHY id again.
501			 */
502			ret_val = e1000_set_mdio_slow_mode_hv(hw);
503			if (ret_val)
504				return ret_val;
505			ret_val = e1000_get_phy_id(hw);
506			if (ret_val)
507				return ret_val;
508			break;
509		}
510	phy->type = e1000_get_phy_type_from_id(phy->id);
511
512	switch (phy->type) {
513	case e1000_phy_82577:
514	case e1000_phy_82579:
515	case e1000_phy_i217:
516		phy->ops.check_polarity = e1000_check_polarity_82577;
517		phy->ops.force_speed_duplex =
518			e1000_phy_force_speed_duplex_82577;
519		phy->ops.get_cable_length = e1000_get_cable_length_82577;
520		phy->ops.get_info = e1000_get_phy_info_82577;
521		phy->ops.commit = e1000_phy_sw_reset_generic;
522		break;
523	case e1000_phy_82578:
524		phy->ops.check_polarity = e1000_check_polarity_m88;
525		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
526		phy->ops.get_cable_length = e1000_get_cable_length_m88;
527		phy->ops.get_info = e1000_get_phy_info_m88;
528		break;
529	default:
530		ret_val = -E1000_ERR_PHY;
531		break;
532	}
533
534	return ret_val;
535}
536
537/**
538 *  e1000_init_phy_params_ich8lan - Initialize PHY function pointers
539 *  @hw: pointer to the HW structure
540 *
541 *  Initialize family-specific PHY parameters and function pointers.
542 **/
543STATIC s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
544{
545	struct e1000_phy_info *phy = &hw->phy;
546	s32 ret_val;
547	u16 i = 0;
548
549	DEBUGFUNC("e1000_init_phy_params_ich8lan");
550
551	phy->addr		= 1;
552	phy->reset_delay_us	= 100;
553
554	phy->ops.acquire	= e1000_acquire_swflag_ich8lan;
555	phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
556	phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
557	phy->ops.get_cfg_done	= e1000_get_cfg_done_ich8lan;
558	phy->ops.read_reg	= e1000_read_phy_reg_igp;
559	phy->ops.release	= e1000_release_swflag_ich8lan;
560	phy->ops.reset		= e1000_phy_hw_reset_ich8lan;
561	phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan;
562	phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan;
563	phy->ops.write_reg	= e1000_write_phy_reg_igp;
564	phy->ops.power_up	= e1000_power_up_phy_copper;
565	phy->ops.power_down	= e1000_power_down_phy_copper_ich8lan;
566
567	/* We may need to do this twice - once for IGP and if that fails,
568	 * we'll set BM func pointers and try again
569	 */
570	ret_val = e1000_determine_phy_address(hw);
571	if (ret_val) {
572		phy->ops.write_reg = e1000_write_phy_reg_bm;
573		phy->ops.read_reg  = e1000_read_phy_reg_bm;
574		ret_val = e1000_determine_phy_address(hw);
575		if (ret_val) {
576			DEBUGOUT("Cannot determine PHY addr. Erroring out\n");
577			return ret_val;
578		}
579	}
580
581	phy->id = 0;
582	while ((e1000_phy_unknown == e1000_get_phy_type_from_id(phy->id)) &&
583	       (i++ < 100)) {
584		msec_delay(1);
585		ret_val = e1000_get_phy_id(hw);
586		if (ret_val)
587			return ret_val;
588	}
589
590	/* Verify phy id */
591	switch (phy->id) {
592	case IGP03E1000_E_PHY_ID:
593		phy->type = e1000_phy_igp_3;
594		phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
595		phy->ops.read_reg_locked = e1000_read_phy_reg_igp_locked;
596		phy->ops.write_reg_locked = e1000_write_phy_reg_igp_locked;
597		phy->ops.get_info = e1000_get_phy_info_igp;
598		phy->ops.check_polarity = e1000_check_polarity_igp;
599		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
600		break;
601	case IFE_E_PHY_ID:
602	case IFE_PLUS_E_PHY_ID:
603	case IFE_C_E_PHY_ID:
604		phy->type = e1000_phy_ife;
605		phy->autoneg_mask = E1000_ALL_NOT_GIG;
606		phy->ops.get_info = e1000_get_phy_info_ife;
607		phy->ops.check_polarity = e1000_check_polarity_ife;
608		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
609		break;
610	case BME1000_E_PHY_ID:
611		phy->type = e1000_phy_bm;
612		phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
613		phy->ops.read_reg = e1000_read_phy_reg_bm;
614		phy->ops.write_reg = e1000_write_phy_reg_bm;
615		phy->ops.commit = e1000_phy_sw_reset_generic;
616		phy->ops.get_info = e1000_get_phy_info_m88;
617		phy->ops.check_polarity = e1000_check_polarity_m88;
618		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
619		break;
620	default:
621		return -E1000_ERR_PHY;
622		break;
623	}
624
625	return E1000_SUCCESS;
626}
627
628/**
629 *  e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
630 *  @hw: pointer to the HW structure
631 *
632 *  Initialize family-specific NVM parameters and function
633 *  pointers.
634 **/
635STATIC s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
636{
637	struct e1000_nvm_info *nvm = &hw->nvm;
638	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
639	u32 gfpreg, sector_base_addr, sector_end_addr;
640	u16 i;
641	u32 nvm_size;
642
643	DEBUGFUNC("e1000_init_nvm_params_ich8lan");
644
645	nvm->type = e1000_nvm_flash_sw;
646
647	if (hw->mac.type >= e1000_pch_spt) {
648		/* in SPT, gfpreg doesn't exist. NVM size is taken from the
649		 * STRAP register. This is because in SPT the GbE Flash region
650		 * is no longer accessed through the flash registers. Instead,
651		 * the mechanism has changed, and the Flash region access
652		 * registers are now implemented in GbE memory space.
653		 */
654		nvm->flash_base_addr = 0;
655		nvm_size =
656		    (((E1000_READ_REG(hw, E1000_STRAP) >> 1) & 0x1F) + 1)
657		    * NVM_SIZE_MULTIPLIER;
658		nvm->flash_bank_size = nvm_size / 2;
659		/* Adjust to word count */
660		nvm->flash_bank_size /= sizeof(u16);
661		/* Set the base address for flash register access */
662		hw->flash_address = hw->hw_addr + E1000_FLASH_BASE_ADDR;
663	} else {
664		/* Can't read flash registers if register set isn't mapped. */
665		if (!hw->flash_address) {
666			DEBUGOUT("ERROR: Flash registers not mapped\n");
667			return -E1000_ERR_CONFIG;
668		}
669
670		gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG);
671
672		/* sector_X_addr is a "sector"-aligned address (4096 bytes)
673		 * Add 1 to sector_end_addr since this sector is included in
674		 * the overall size.
675		 */
676		sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
677		sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
678
679		/* flash_base_addr is byte-aligned */
680		nvm->flash_base_addr = sector_base_addr
681				       << FLASH_SECTOR_ADDR_SHIFT;
682
683		/* find total size of the NVM, then cut in half since the total
684		 * size represents two separate NVM banks.
685		 */
686		nvm->flash_bank_size = ((sector_end_addr - sector_base_addr)
687					<< FLASH_SECTOR_ADDR_SHIFT);
688		nvm->flash_bank_size /= 2;
689		/* Adjust to word count */
690		nvm->flash_bank_size /= sizeof(u16);
691	}
692
693	nvm->word_size = E1000_SHADOW_RAM_WORDS;
694
695	/* Clear shadow ram */
696	for (i = 0; i < nvm->word_size; i++) {
697		dev_spec->shadow_ram[i].modified = false;
698		dev_spec->shadow_ram[i].value    = 0xFFFF;
699	}
700
701	E1000_MUTEX_INIT(&dev_spec->nvm_mutex);
702	E1000_MUTEX_INIT(&dev_spec->swflag_mutex);
703
704	/* Function Pointers */
705	nvm->ops.acquire	= e1000_acquire_nvm_ich8lan;
706	nvm->ops.release	= e1000_release_nvm_ich8lan;
707	if (hw->mac.type >= e1000_pch_spt) {
708		nvm->ops.read	= e1000_read_nvm_spt;
709		nvm->ops.update	= e1000_update_nvm_checksum_spt;
710	} else {
711		nvm->ops.read	= e1000_read_nvm_ich8lan;
712		nvm->ops.update	= e1000_update_nvm_checksum_ich8lan;
713	}
714	nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan;
715	nvm->ops.validate	= e1000_validate_nvm_checksum_ich8lan;
716	nvm->ops.write		= e1000_write_nvm_ich8lan;
717
718	return E1000_SUCCESS;
719}
720
721/**
722 *  e1000_init_mac_params_ich8lan - Initialize MAC function pointers
723 *  @hw: pointer to the HW structure
724 *
725 *  Initialize family-specific MAC parameters and function
726 *  pointers.
727 **/
728STATIC s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
729{
730	struct e1000_mac_info *mac = &hw->mac;
731#if defined(QV_RELEASE) || !defined(NO_PCH_LPT_B0_SUPPORT)
732	u16 pci_cfg;
733#endif /* QV_RELEASE || !defined(NO_PCH_LPT_B0_SUPPORT) */
734
735	DEBUGFUNC("e1000_init_mac_params_ich8lan");
736
737	/* Set media type function pointer */
738	hw->phy.media_type = e1000_media_type_copper;
739
740	/* Set mta register count */
741	mac->mta_reg_count = 32;
742	/* Set rar entry count */
743	mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
744	if (mac->type == e1000_ich8lan)
745		mac->rar_entry_count--;
746	/* Set if part includes ASF firmware */
747	mac->asf_firmware_present = true;
748	/* FWSM register */
749	mac->has_fwsm = true;
750	/* ARC subsystem not supported */
751	mac->arc_subsystem_valid = false;
752	/* Adaptive IFS supported */
753	mac->adaptive_ifs = true;
754
755	/* Function pointers */
756
757	/* bus type/speed/width */
758	mac->ops.get_bus_info = e1000_get_bus_info_ich8lan;
759	/* function id */
760	mac->ops.set_lan_id = e1000_set_lan_id_single_port;
761	/* reset */
762	mac->ops.reset_hw = e1000_reset_hw_ich8lan;
763	/* hw initialization */
764	mac->ops.init_hw = e1000_init_hw_ich8lan;
765	/* link setup */
766	mac->ops.setup_link = e1000_setup_link_ich8lan;
767	/* physical interface setup */
768	mac->ops.setup_physical_interface = e1000_setup_copper_link_ich8lan;
769	/* check for link */
770	mac->ops.check_for_link = e1000_check_for_copper_link_ich8lan;
771	/* link info */
772	mac->ops.get_link_up_info = e1000_get_link_up_info_ich8lan;
773	/* multicast address update */
774	mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
775	/* clear hardware counters */
776	mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan;
777
778	/* LED and other operations */
779	switch (mac->type) {
780	case e1000_ich8lan:
781	case e1000_ich9lan:
782	case e1000_ich10lan:
783		/* check management mode */
784		mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
785		/* ID LED init */
786		mac->ops.id_led_init = e1000_id_led_init_generic;
787		/* blink LED */
788		mac->ops.blink_led = e1000_blink_led_generic;
789		/* setup LED */
790		mac->ops.setup_led = e1000_setup_led_generic;
791		/* cleanup LED */
792		mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
793		/* turn on/off LED */
794		mac->ops.led_on = e1000_led_on_ich8lan;
795		mac->ops.led_off = e1000_led_off_ich8lan;
796		break;
797	case e1000_pch2lan:
798		mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
799		mac->ops.rar_set = e1000_rar_set_pch2lan;
800		/* fall-through */
801	case e1000_pch_lpt:
802	case e1000_pch_spt:
803	case e1000_pch_cnp:
804#ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT
805		/* multicast address update for pch2 */
806		mac->ops.update_mc_addr_list =
807			e1000_update_mc_addr_list_pch2lan;
808		/* fall-through */
809#endif
810	case e1000_pchlan:
811#if defined(QV_RELEASE) || !defined(NO_PCH_LPT_B0_SUPPORT)
812		/* save PCH revision_id */
813		e1000_read_pci_cfg(hw, E1000_PCI_REVISION_ID_REG, &pci_cfg);
814		/* SPT uses full byte for revision ID,
815		 * as opposed to previous generations
816		 */
817		if (hw->mac.type >= e1000_pch_spt)
818			hw->revision_id = (u8)(pci_cfg &= 0x00FF);
819		else
820			hw->revision_id = (u8)(pci_cfg &= 0x000F);
821#endif /* QV_RELEASE || !defined(NO_PCH_LPT_B0_SUPPORT) */
822		/* check management mode */
823		mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
824		/* ID LED init */
825		mac->ops.id_led_init = e1000_id_led_init_pchlan;
826		/* setup LED */
827		mac->ops.setup_led = e1000_setup_led_pchlan;
828		/* cleanup LED */
829		mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
830		/* turn on/off LED */
831		mac->ops.led_on = e1000_led_on_pchlan;
832		mac->ops.led_off = e1000_led_off_pchlan;
833		break;
834	default:
835		break;
836	}
837
838	if (mac->type >= e1000_pch_lpt) {
839		mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
840		mac->ops.rar_set = e1000_rar_set_pch_lpt;
841		mac->ops.setup_physical_interface = e1000_setup_copper_link_pch_lpt;
842	}
843
844	/* Enable PCS Lock-loss workaround for ICH8 */
845	if (mac->type == e1000_ich8lan)
846		e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
847
848	return E1000_SUCCESS;
849}
850
851/**
852 *  __e1000_access_emi_reg_locked - Read/write EMI register
853 *  @hw: pointer to the HW structure
854 *  @addr: EMI address to program
855 *  @data: pointer to value to read/write from/to the EMI address
856 *  @read: boolean flag to indicate read or write
857 *
858 *  This helper function assumes the SW/FW/HW Semaphore is already acquired.
859 **/
860STATIC s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address,
861					 u16 *data, bool read)
862{
863	s32 ret_val;
864
865	DEBUGFUNC("__e1000_access_emi_reg_locked");
866
867	ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, address);
868	if (ret_val)
869		return ret_val;
870
871	if (read)
872		ret_val = hw->phy.ops.read_reg_locked(hw, I82579_EMI_DATA,
873						      data);
874	else
875		ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA,
876						       *data);
877
878	return ret_val;
879}
880
881/**
882 *  e1000_read_emi_reg_locked - Read Extended Management Interface register
883 *  @hw: pointer to the HW structure
884 *  @addr: EMI address to program
885 *  @data: value to be read from the EMI address
886 *
887 *  Assumes the SW/FW/HW Semaphore is already acquired.
888 **/
889s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data)
890{
891	DEBUGFUNC("e1000_read_emi_reg_locked");
892
893	return __e1000_access_emi_reg_locked(hw, addr, data, true);
894}
895
896/**
897 *  e1000_write_emi_reg_locked - Write Extended Management Interface register
898 *  @hw: pointer to the HW structure
899 *  @addr: EMI address to program
900 *  @data: value to be written to the EMI address
901 *
902 *  Assumes the SW/FW/HW Semaphore is already acquired.
903 **/
904s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
905{
906	DEBUGFUNC("e1000_read_emi_reg_locked");
907
908	return __e1000_access_emi_reg_locked(hw, addr, &data, false);
909}
910
911/**
912 *  e1000_set_eee_pchlan - Enable/disable EEE support
913 *  @hw: pointer to the HW structure
914 *
915 *  Enable/disable EEE based on setting in dev_spec structure, the duplex of
916 *  the link and the EEE capabilities of the link partner.  The LPI Control
917 *  register bits will remain set only if/when link is up.
918 *
919 *  EEE LPI must not be asserted earlier than one second after link is up.
920 *  On 82579, EEE LPI should not be enabled until such time otherwise there
921 *  can be link issues with some switches.  Other devices can have EEE LPI
922 *  enabled immediately upon link up since they have a timer in hardware which
923 *  prevents LPI from being asserted too early.
924 **/
925s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
926{
927	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
928	s32 ret_val;
929	u16 lpa, pcs_status, adv, adv_addr, lpi_ctrl, data;
930
931	DEBUGFUNC("e1000_set_eee_pchlan");
932
933	switch (hw->phy.type) {
934	case e1000_phy_82579:
935		lpa = I82579_EEE_LP_ABILITY;
936		pcs_status = I82579_EEE_PCS_STATUS;
937		adv_addr = I82579_EEE_ADVERTISEMENT;
938		break;
939	case e1000_phy_i217:
940		lpa = I217_EEE_LP_ABILITY;
941		pcs_status = I217_EEE_PCS_STATUS;
942		adv_addr = I217_EEE_ADVERTISEMENT;
943		break;
944	default:
945		return E1000_SUCCESS;
946	}
947
948	ret_val = hw->phy.ops.acquire(hw);
949	if (ret_val)
950		return ret_val;
951
952	ret_val = hw->phy.ops.read_reg_locked(hw, I82579_LPI_CTRL, &lpi_ctrl);
953	if (ret_val)
954		goto release;
955
956	/* Clear bits that enable EEE in various speeds */
957	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK;
958
959	/* Enable EEE if not disabled by user */
960	if (!dev_spec->eee_disable) {
961		/* Save off link partner's EEE ability */
962		ret_val = e1000_read_emi_reg_locked(hw, lpa,
963						    &dev_spec->eee_lp_ability);
964		if (ret_val)
965			goto release;
966
967		/* Read EEE advertisement */
968		ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &adv);
969		if (ret_val)
970			goto release;
971
972		/* Enable EEE only for speeds in which the link partner is
973		 * EEE capable and for which we advertise EEE.
974		 */
975		if (adv & dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED)
976			lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
977
978		if (adv & dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) {
979			hw->phy.ops.read_reg_locked(hw, PHY_LP_ABILITY, &data);
980			if (data & NWAY_LPAR_100TX_FD_CAPS)
981				lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
982			else
983				/* EEE is not supported in 100Half, so ignore
984				 * partner's EEE in 100 ability if full-duplex
985				 * is not advertised.
986				 */
987				dev_spec->eee_lp_ability &=
988				    ~I82579_EEE_100_SUPPORTED;
989		}
990	}
991
992	if (hw->phy.type == e1000_phy_82579) {
993		ret_val = e1000_read_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
994						    &data);
995		if (ret_val)
996			goto release;
997
998		data &= ~I82579_LPI_100_PLL_SHUT;
999		ret_val = e1000_write_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
1000						     data);
1001	}
1002
1003	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
1004	ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
1005	if (ret_val)
1006		goto release;
1007
1008	ret_val = hw->phy.ops.write_reg_locked(hw, I82579_LPI_CTRL, lpi_ctrl);
1009release:
1010	hw->phy.ops.release(hw);
1011
1012	return ret_val;
1013}
1014
1015/**
1016 *  e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
1017 *  @hw:   pointer to the HW structure
1018 *  @link: link up bool flag
1019 *
1020 *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
1021 *  preventing further DMA write requests.  Workaround the issue by disabling
1022 *  the de-assertion of the clock request when in 1Gpbs mode.
1023 *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
1024 *  speeds in order to avoid Tx hangs.
1025 **/
1026STATIC s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
1027{
1028	u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
1029	u32 status = E1000_READ_REG(hw, E1000_STATUS);
1030	s32 ret_val = E1000_SUCCESS;
1031	u16 reg;
1032
1033	if (link && (status & E1000_STATUS_SPEED_1000)) {
1034		ret_val = hw->phy.ops.acquire(hw);
1035		if (ret_val)
1036			return ret_val;
1037
1038		ret_val =
1039		    e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
1040					       &reg);
1041		if (ret_val)
1042			goto release;
1043
1044		ret_val =
1045		    e1000_write_kmrn_reg_locked(hw,
1046						E1000_KMRNCTRLSTA_K1_CONFIG,
1047						reg &
1048						~E1000_KMRNCTRLSTA_K1_ENABLE);
1049		if (ret_val)
1050			goto release;
1051
1052		usec_delay(10);
1053
1054		E1000_WRITE_REG(hw, E1000_FEXTNVM6,
1055				fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK);
1056
1057		ret_val =
1058		    e1000_write_kmrn_reg_locked(hw,
1059						E1000_KMRNCTRLSTA_K1_CONFIG,
1060						reg);
1061release:
1062		hw->phy.ops.release(hw);
1063	} else {
1064		/* clear FEXTNVM6 bit 8 on link down or 10/100 */
1065		fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK;
1066
1067		if ((hw->phy.revision > 5) || !link ||
1068		    ((status & E1000_STATUS_SPEED_100) &&
1069		     (status & E1000_STATUS_FD)))
1070			goto update_fextnvm6;
1071
1072		ret_val = hw->phy.ops.read_reg(hw, I217_INBAND_CTRL, &reg);
1073		if (ret_val)
1074			return ret_val;
1075
1076		/* Clear link status transmit timeout */
1077		reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
1078
1079		if (status & E1000_STATUS_SPEED_100) {
1080			/* Set inband Tx timeout to 5x10us for 100Half */
1081			reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1082
1083			/* Do not extend the K1 entry latency for 100Half */
1084			fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1085		} else {
1086			/* Set inband Tx timeout to 50x10us for 10Full/Half */
1087			reg |= 50 <<
1088			       I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1089
1090			/* Extend the K1 entry latency for 10 Mbps */
1091			fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1092		}
1093
1094		ret_val = hw->phy.ops.write_reg(hw, I217_INBAND_CTRL, reg);
1095		if (ret_val)
1096			return ret_val;
1097
1098update_fextnvm6:
1099		E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6);
1100	}
1101
1102	return ret_val;
1103}
1104
1105#ifdef ULP_SUPPORT
1106/**
1107 *  e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP
1108 *  @hw: pointer to the HW structure
1109 *  @to_sx: boolean indicating a system power state transition to Sx
1110 *
1111 *  When link is down, configure ULP mode to significantly reduce the power
1112 *  to the PHY.  If on a Manageability Engine (ME) enabled system, tell the
1113 *  ME firmware to start the ULP configuration.  If not on an ME enabled
1114 *  system, configure the ULP mode by software.
1115 */
1116s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
1117{
1118	u32 mac_reg;
1119	s32 ret_val = E1000_SUCCESS;
1120	u16 phy_reg;
1121	u16 oem_reg = 0;
1122
1123	if ((hw->mac.type < e1000_pch_lpt) ||
1124	    (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1125	    (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
1126	    (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
1127	    (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
1128	    (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_on))
1129		return 0;
1130
1131	if (!to_sx) {
1132		int i = 0;
1133		/* Poll up to 5 seconds for Cable Disconnected indication */
1134		while (!(E1000_READ_REG(hw, E1000_FEXT) &
1135			 E1000_FEXT_PHY_CABLE_DISCONNECTED)) {
1136			/* Bail if link is re-acquired */
1137			if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)
1138				return -E1000_ERR_PHY;
1139			if (i++ == 100)
1140				break;
1141
1142			msec_delay(50);
1143		}
1144		DEBUGOUT2("CABLE_DISCONNECTED %s set after %dmsec\n",
1145			  (E1000_READ_REG(hw, E1000_FEXT) &
1146			   E1000_FEXT_PHY_CABLE_DISCONNECTED) ? "" : "not",
1147			  i * 50);
1148		if (!(E1000_READ_REG(hw, E1000_FEXT) &
1149		    E1000_FEXT_PHY_CABLE_DISCONNECTED))
1150			return 0;
1151	}
1152
1153	if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
1154		/* Request ME configure ULP mode in the PHY */
1155		mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1156		mac_reg |= E1000_H2ME_ULP | E1000_H2ME_ENFORCE_SETTINGS;
1157		E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1158
1159		goto out;
1160	}
1161
1162	ret_val = hw->phy.ops.acquire(hw);
1163	if (ret_val)
1164		goto out;
1165
1166	/* During S0 Idle keep the phy in PCI-E mode */
1167	if (hw->dev_spec.ich8lan.smbus_disable)
1168		goto skip_smbus;
1169
1170	/* Force SMBus mode in PHY */
1171	ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1172	if (ret_val)
1173		goto release;
1174	phy_reg |= CV_SMB_CTRL_FORCE_SMBUS;
1175	e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1176
1177	/* Force SMBus mode in MAC */
1178	mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1179	mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1180	E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1181
1182	/* Si workaround for ULP entry flow on i127/rev6 h/w.  Enable
1183	 * LPLU and disable Gig speed when entering ULP
1184	 */
1185	if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6)) {
1186		ret_val = e1000_read_phy_reg_hv_locked(hw, HV_OEM_BITS,
1187						       &oem_reg);
1188		if (ret_val)
1189			goto release;
1190
1191		phy_reg = oem_reg;
1192		phy_reg |= HV_OEM_BITS_LPLU | HV_OEM_BITS_GBE_DIS;
1193
1194		ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS,
1195							phy_reg);
1196
1197		if (ret_val)
1198			goto release;
1199	}
1200
1201skip_smbus:
1202	if (!to_sx) {
1203		/* Change the 'Link Status Change' interrupt to trigger
1204		 * on 'Cable Status Change'
1205		 */
1206		ret_val = e1000_read_kmrn_reg_locked(hw,
1207						     E1000_KMRNCTRLSTA_OP_MODES,
1208						     &phy_reg);
1209		if (ret_val)
1210			goto release;
1211		phy_reg |= E1000_KMRNCTRLSTA_OP_MODES_LSC2CSC;
1212		e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES,
1213					    phy_reg);
1214	}
1215
1216	/* Set Inband ULP Exit, Reset to SMBus mode and
1217	 * Disable SMBus Release on PERST# in PHY
1218	 */
1219	ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1220	if (ret_val)
1221		goto release;
1222	phy_reg |= (I218_ULP_CONFIG1_RESET_TO_SMBUS |
1223		    I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1224	if (to_sx) {
1225		if (E1000_READ_REG(hw, E1000_WUFC) & E1000_WUFC_LNKC)
1226			phy_reg |= I218_ULP_CONFIG1_WOL_HOST;
1227		else
1228			phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST;
1229
1230		phy_reg |= I218_ULP_CONFIG1_STICKY_ULP;
1231		phy_reg &= ~I218_ULP_CONFIG1_INBAND_EXIT;
1232	} else {
1233		phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT;
1234		phy_reg &= ~I218_ULP_CONFIG1_STICKY_ULP;
1235		phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST;
1236	}
1237	e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1238
1239	/* Set Disable SMBus Release on PERST# in MAC */
1240	mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
1241	mac_reg |= E1000_FEXTNVM7_DISABLE_SMB_PERST;
1242	E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
1243
1244	/* Commit ULP changes in PHY by starting auto ULP configuration */
1245	phy_reg |= I218_ULP_CONFIG1_START;
1246	e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1247
1248	if (!to_sx) {
1249		/* Disable Tx so that the MAC doesn't send any (buffered)
1250		 * packets to the PHY.
1251		 */
1252		mac_reg = E1000_READ_REG(hw, E1000_TCTL);
1253		mac_reg &= ~E1000_TCTL_EN;
1254		E1000_WRITE_REG(hw, E1000_TCTL, mac_reg);
1255	}
1256
1257	if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6) &&
1258	    to_sx && (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
1259		ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS,
1260							oem_reg);
1261		if (ret_val)
1262			goto release;
1263	}
1264
1265release:
1266	hw->phy.ops.release(hw);
1267out:
1268	if (ret_val)
1269		DEBUGOUT1("Error in ULP enable flow: %d\n", ret_val);
1270	else
1271		hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_on;
1272
1273	return ret_val;
1274}
1275
1276/**
1277 *  e1000_disable_ulp_lpt_lp - unconfigure Ultra Low Power mode for LynxPoint-LP
1278 *  @hw: pointer to the HW structure
1279 *  @force: boolean indicating whether or not to force disabling ULP
1280 *
1281 *  Un-configure ULP mode when link is up, the system is transitioned from
1282 *  Sx or the driver is unloaded.  If on a Manageability Engine (ME) enabled
1283 *  system, poll for an indication from ME that ULP has been un-configured.
1284 *  If not on an ME enabled system, un-configure the ULP mode by software.
1285 *
1286 *  During nominal operation, this function is called when link is acquired
1287 *  to disable ULP mode (force=false); otherwise, for example when unloading
1288 *  the driver or during Sx->S0 transitions, this is called with force=true
1289 *  to forcibly disable ULP.
1290
1291 *  When the cable is plugged in while the device is in D0, a Cable Status
1292 *  Change interrupt is generated which causes this function to be called
1293 *  to partially disable ULP mode and restart autonegotiation.  This function
1294 *  is then called again due to the resulting Link Status Change interrupt
1295 *  to finish cleaning up after the ULP flow.
1296 */
1297s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
1298{
1299	s32 ret_val = E1000_SUCCESS;
1300	u32 mac_reg;
1301	u16 phy_reg;
1302	int i = 0;
1303
1304	if ((hw->mac.type < e1000_pch_lpt) ||
1305	    (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1306	    (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
1307	    (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
1308	    (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
1309	    (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_off))
1310		return 0;
1311
1312	if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
1313		if (force) {
1314			/* Request ME un-configure ULP mode in the PHY */
1315			mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1316			mac_reg &= ~E1000_H2ME_ULP;
1317			mac_reg |= E1000_H2ME_ENFORCE_SETTINGS;
1318			E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1319		}
1320
1321		/* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
1322		while (E1000_READ_REG(hw, E1000_FWSM) &
1323		       E1000_FWSM_ULP_CFG_DONE) {
1324			if (i++ == 30) {
1325				ret_val = -E1000_ERR_PHY;
1326				goto out;
1327			}
1328
1329			msec_delay(10);
1330		}
1331		DEBUGOUT1("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
1332
1333		if (force) {
1334			mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1335			mac_reg &= ~E1000_H2ME_ENFORCE_SETTINGS;
1336			E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1337		} else {
1338			/* Clear H2ME.ULP after ME ULP configuration */
1339			mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1340			mac_reg &= ~E1000_H2ME_ULP;
1341			E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1342
1343			/* Restore link speed advertisements and restart
1344			 * Auto-negotiation
1345			 */
1346			if (hw->mac.autoneg) {
1347				ret_val = e1000_phy_setup_autoneg(hw);
1348				if (ret_val)
1349					goto out;
1350			} else {
1351				ret_val = e1000_setup_copper_link_generic(hw);
1352				if (ret_val)
1353					goto out;
1354			}
1355			ret_val = e1000_oem_bits_config_ich8lan(hw, true);
1356		}
1357
1358		goto out;
1359	}
1360
1361	ret_val = hw->phy.ops.acquire(hw);
1362	if (ret_val)
1363		goto out;
1364
1365	/* Revert the change to the 'Link Status Change'
1366	 * interrupt to trigger on 'Cable Status Change'
1367	 */
1368	ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES,
1369					     &phy_reg);
1370	if (ret_val)
1371		goto release;
1372	phy_reg &= ~E1000_KMRNCTRLSTA_OP_MODES_LSC2CSC;
1373	e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_OP_MODES, phy_reg);
1374
1375	if (force)
1376		/* Toggle LANPHYPC Value bit */
1377		e1000_toggle_lanphypc_pch_lpt(hw);
1378
1379	/* Unforce SMBus mode in PHY */
1380	ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1381	if (ret_val) {
1382		/* The MAC might be in PCIe mode, so temporarily force to
1383		 * SMBus mode in order to access the PHY.
1384		 */
1385		mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1386		mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1387		E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1388
1389		msec_delay(50);
1390
1391		ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL,
1392						       &phy_reg);
1393		if (ret_val)
1394			goto release;
1395	}
1396	phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
1397	e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1398
1399	/* Unforce SMBus mode in MAC */
1400	mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1401	mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
1402	E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1403
1404	/* When ULP mode was previously entered, K1 was disabled by the
1405	 * hardware.  Re-Enable K1 in the PHY when exiting ULP.
1406	 */
1407	ret_val = e1000_read_phy_reg_hv_locked(hw, HV_PM_CTRL, &phy_reg);
1408	if (ret_val)
1409		goto release;
1410	phy_reg |= HV_PM_CTRL_K1_ENABLE;
1411	e1000_write_phy_reg_hv_locked(hw, HV_PM_CTRL, phy_reg);
1412
1413	/* Clear ULP enabled configuration */
1414	ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1415	if (ret_val)
1416		goto release;
1417	/* CSC interrupt received due to ULP Indication */
1418	if ((phy_reg & I218_ULP_CONFIG1_IND) || force) {
1419		phy_reg &= ~(I218_ULP_CONFIG1_IND |
1420			     I218_ULP_CONFIG1_STICKY_ULP |
1421			     I218_ULP_CONFIG1_RESET_TO_SMBUS |
1422			     I218_ULP_CONFIG1_WOL_HOST |
1423			     I218_ULP_CONFIG1_INBAND_EXIT |
1424			     I218_ULP_CONFIG1_EN_ULP_LANPHYPC |
1425			     I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST |
1426			     I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1427		e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1428
1429		/* Commit ULP changes by starting auto ULP configuration */
1430		phy_reg |= I218_ULP_CONFIG1_START;
1431		e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1432
1433		/* Clear Disable SMBus Release on PERST# in MAC */
1434		mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
1435		mac_reg &= ~E1000_FEXTNVM7_DISABLE_SMB_PERST;
1436		E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
1437
1438		if (!force) {
1439			hw->phy.ops.release(hw);
1440
1441			if (hw->mac.autoneg)
1442				e1000_phy_setup_autoneg(hw);
1443			else
1444				e1000_setup_copper_link_generic(hw);
1445
1446			e1000_sw_lcd_config_ich8lan(hw);
1447
1448			e1000_oem_bits_config_ich8lan(hw, true);
1449
1450			/* Set ULP state to unknown and return non-zero to
1451			 * indicate no link (yet) and re-enter on the next LSC
1452			 * to finish disabling ULP flow.
1453			 */
1454			hw->dev_spec.ich8lan.ulp_state =
1455			    e1000_ulp_state_unknown;
1456
1457			return 1;
1458		}
1459	}
1460
1461	/* Re-enable Tx */
1462	mac_reg = E1000_READ_REG(hw, E1000_TCTL);
1463	mac_reg |= E1000_TCTL_EN;
1464	E1000_WRITE_REG(hw, E1000_TCTL, mac_reg);
1465
1466release:
1467	hw->phy.ops.release(hw);
1468	if (force) {
1469		hw->phy.ops.reset(hw);
1470		msec_delay(50);
1471	}
1472out:
1473	if (ret_val)
1474		DEBUGOUT1("Error in ULP disable flow: %d\n", ret_val);
1475	else
1476		hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_off;
1477
1478	return ret_val;
1479}
1480
1481#endif /* ULP_SUPPORT */
1482
1483
1484/**
1485 *  e1000_check_for_copper_link_ich8lan - Check for link (Copper)
1486 *  @hw: pointer to the HW structure
1487 *
1488 *  Checks to see of the link status of the hardware has changed.  If a
1489 *  change in link status has been detected, then we read the PHY registers
1490 *  to get the current speed/duplex if link exists.
1491 **/
1492STATIC s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1493{
1494	struct e1000_mac_info *mac = &hw->mac;
1495	s32 ret_val, tipg_reg = 0;
1496	u16 emi_addr, emi_val = 0;
1497	bool link = false;
1498	u16 phy_reg;
1499
1500	DEBUGFUNC("e1000_check_for_copper_link_ich8lan");
1501
1502	/* We only want to go out to the PHY registers to see if Auto-Neg
1503	 * has completed and/or if our link status has changed.  The
1504	 * get_link_status flag is set upon receiving a Link Status
1505	 * Change or Rx Sequence Error interrupt.
1506	 */
1507	if (!mac->get_link_status)
1508		return E1000_SUCCESS;
1509
1510	if ((hw->mac.type < e1000_pch_lpt) ||
1511	    (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1512	    (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V)) {
1513		/* First we want to see if the MII Status Register reports
1514		 * link.  If so, then we want to get the current speed/duplex
1515		 * of the PHY.
1516		 */
1517		ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
1518		if (ret_val)
1519			return ret_val;
1520	} else {
1521		/* Check the MAC's STATUS register to determine link state
1522		 * since the PHY could be inaccessible while in ULP mode.
1523		 */
1524		link = !!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU);
1525		if (link)
1526			ret_val = e1000_disable_ulp_lpt_lp(hw, false);
1527		else
1528			ret_val = e1000_enable_ulp_lpt_lp(hw, false);
1529		if (ret_val)
1530			return ret_val;
1531	}
1532
1533	if (hw->mac.type == e1000_pchlan) {
1534		ret_val = e1000_k1_gig_workaround_hv(hw, link);
1535		if (ret_val)
1536			return ret_val;
1537	}
1538
1539	/* When connected at 10Mbps half-duplex, some parts are excessively
1540	 * aggressive resulting in many collisions. To avoid this, increase
1541	 * the IPG and reduce Rx latency in the PHY.
1542	 */
1543	if ((hw->mac.type >= e1000_pch2lan) && link) {
1544		u16 speed, duplex;
1545
1546		e1000_get_speed_and_duplex_copper_generic(hw, &speed, &duplex);
1547		tipg_reg = E1000_READ_REG(hw, E1000_TIPG);
1548		tipg_reg &= ~E1000_TIPG_IPGT_MASK;
1549
1550		if (duplex == HALF_DUPLEX && speed == SPEED_10) {
1551			tipg_reg |= 0xFF;
1552			/* Reduce Rx latency in analog PHY */
1553			emi_val = 0;
1554		} else if (hw->mac.type >= e1000_pch_spt &&
1555			   duplex == FULL_DUPLEX && speed != SPEED_1000) {
1556			tipg_reg |= 0xC;
1557			emi_val = 1;
1558		} else {
1559			/* Roll back the default values */
1560			tipg_reg |= 0x08;
1561			emi_val = 1;
1562		}
1563
1564		E1000_WRITE_REG(hw, E1000_TIPG, tipg_reg);
1565
1566		ret_val = hw->phy.ops.acquire(hw);
1567		if (ret_val)
1568			return ret_val;
1569
1570		if (hw->mac.type == e1000_pch2lan)
1571			emi_addr = I82579_RX_CONFIG;
1572		else
1573			emi_addr = I217_RX_CONFIG;
1574		ret_val = e1000_write_emi_reg_locked(hw, emi_addr, emi_val);
1575
1576
1577		if (hw->mac.type >= e1000_pch_lpt) {
1578			u16 phy_reg;
1579
1580			hw->phy.ops.read_reg_locked(hw, I217_PLL_CLOCK_GATE_REG,
1581						    &phy_reg);
1582			phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
1583			if (speed == SPEED_100 || speed == SPEED_10)
1584				phy_reg |= 0x3E8;
1585			else
1586				phy_reg |= 0xFA;
1587			hw->phy.ops.write_reg_locked(hw,
1588						     I217_PLL_CLOCK_GATE_REG,
1589						     phy_reg);
1590
1591			if (speed == SPEED_1000) {
1592				hw->phy.ops.read_reg_locked(hw, HV_PM_CTRL,
1593							    &phy_reg);
1594
1595				phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
1596
1597				hw->phy.ops.write_reg_locked(hw, HV_PM_CTRL,
1598							     phy_reg);
1599				}
1600		 }
1601		hw->phy.ops.release(hw);
1602
1603		if (ret_val)
1604			return ret_val;
1605
1606		if (hw->mac.type >= e1000_pch_spt) {
1607			u16 data;
1608			u16 ptr_gap;
1609
1610			if (speed == SPEED_1000) {
1611				ret_val = hw->phy.ops.acquire(hw);
1612				if (ret_val)
1613					return ret_val;
1614
1615				ret_val = hw->phy.ops.read_reg_locked(hw,
1616							      PHY_REG(776, 20),
1617							      &data);
1618				if (ret_val) {
1619					hw->phy.ops.release(hw);
1620					return ret_val;
1621				}
1622
1623				ptr_gap = (data & (0x3FF << 2)) >> 2;
1624				if (ptr_gap < 0x18) {
1625					data &= ~(0x3FF << 2);
1626					data |= (0x18 << 2);
1627					ret_val =
1628						hw->phy.ops.write_reg_locked(hw,
1629							PHY_REG(776, 20), data);
1630				}
1631				hw->phy.ops.release(hw);
1632				if (ret_val)
1633					return ret_val;
1634			} else {
1635				ret_val = hw->phy.ops.acquire(hw);
1636				if (ret_val)
1637					return ret_val;
1638
1639				ret_val = hw->phy.ops.write_reg_locked(hw,
1640							     PHY_REG(776, 20),
1641							     0xC023);
1642				hw->phy.ops.release(hw);
1643				if (ret_val)
1644					return ret_val;
1645
1646			}
1647		}
1648	}
1649
1650	/* I217 Packet Loss issue:
1651	 * ensure that FEXTNVM4 Beacon Duration is set correctly
1652	 * on power up.
1653	 * Set the Beacon Duration for I217 to 8 usec
1654	 */
1655	if (hw->mac.type >= e1000_pch_lpt) {
1656		u32 mac_reg;
1657
1658		mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
1659		mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
1660		mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
1661		E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
1662	}
1663
1664	/* Work-around I218 hang issue */
1665	if ((hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
1666	    (hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
1667	    (hw->device_id == E1000_DEV_ID_PCH_I218_LM3) ||
1668	    (hw->device_id == E1000_DEV_ID_PCH_I218_V3)) {
1669		ret_val = e1000_k1_workaround_lpt_lp(hw, link);
1670		if (ret_val)
1671			return ret_val;
1672	}
1673	/* Clear link partner's EEE ability */
1674	hw->dev_spec.ich8lan.eee_lp_ability = 0;
1675
1676	/* Configure K0s minimum time */
1677	if (hw->mac.type >= e1000_pch_lpt) {
1678		e1000_configure_k0s_lpt(hw, K1_ENTRY_LATENCY, K1_MIN_TIME);
1679	}
1680
1681	if (hw->mac.type >= e1000_pch_lpt) {
1682		u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
1683
1684		if (hw->mac.type == e1000_pch_spt) {
1685			/* FEXTNVM6 K1-off workaround - for SPT only */
1686			u32 pcieanacfg = E1000_READ_REG(hw, E1000_PCIEANACFG);
1687
1688			if (pcieanacfg & E1000_FEXTNVM6_K1_OFF_ENABLE)
1689				fextnvm6 |= E1000_FEXTNVM6_K1_OFF_ENABLE;
1690			else
1691				fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE;
1692		}
1693
1694		if (hw->dev_spec.ich8lan.disable_k1_off == true)
1695			fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE;
1696
1697		E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6);
1698	}
1699
1700	if (!link)
1701		return E1000_SUCCESS; /* No link detected */
1702
1703	mac->get_link_status = false;
1704
1705	switch (hw->mac.type) {
1706	case e1000_pch2lan:
1707		ret_val = e1000_k1_workaround_lv(hw);
1708		if (ret_val)
1709			return ret_val;
1710		/* fall-thru */
1711	case e1000_pchlan:
1712		if (hw->phy.type == e1000_phy_82578) {
1713			ret_val = e1000_link_stall_workaround_hv(hw);
1714			if (ret_val)
1715				return ret_val;
1716		}
1717
1718		/* Workaround for PCHx parts in half-duplex:
1719		 * Set the number of preambles removed from the packet
1720		 * when it is passed from the PHY to the MAC to prevent
1721		 * the MAC from misinterpreting the packet type.
1722		 */
1723		hw->phy.ops.read_reg(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
1724		phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
1725
1726		if ((E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FD) !=
1727		    E1000_STATUS_FD)
1728			phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
1729
1730		hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
1731		break;
1732	default:
1733		break;
1734	}
1735
1736	/* Check if there was DownShift, must be checked
1737	 * immediately after link-up
1738	 */
1739	e1000_check_downshift_generic(hw);
1740
1741	/* Enable/Disable EEE after link up */
1742	if (hw->phy.type > e1000_phy_82579) {
1743		ret_val = e1000_set_eee_pchlan(hw);
1744		if (ret_val)
1745			return ret_val;
1746	}
1747
1748	/* If we are forcing speed/duplex, then we simply return since
1749	 * we have already determined whether we have link or not.
1750	 */
1751	if (!mac->autoneg)
1752		return -E1000_ERR_CONFIG;
1753
1754	/* Auto-Neg is enabled.  Auto Speed Detection takes care
1755	 * of MAC speed/duplex configuration.  So we only need to
1756	 * configure Collision Distance in the MAC.
1757	 */
1758	mac->ops.config_collision_dist(hw);
1759
1760	/* Configure Flow Control now that Auto-Neg has completed.
1761	 * First, we need to restore the desired flow control
1762	 * settings because we may have had to re-autoneg with a
1763	 * different link partner.
1764	 */
1765	ret_val = e1000_config_fc_after_link_up_generic(hw);
1766	if (ret_val)
1767		DEBUGOUT("Error configuring flow control\n");
1768
1769	return ret_val;
1770}
1771
1772/**
1773 *  e1000_init_function_pointers_ich8lan - Initialize ICH8 function pointers
1774 *  @hw: pointer to the HW structure
1775 *
1776 *  Initialize family-specific function pointers for PHY, MAC, and NVM.
1777 **/
1778void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw)
1779{
1780	DEBUGFUNC("e1000_init_function_pointers_ich8lan");
1781
1782	hw->mac.ops.init_params = e1000_init_mac_params_ich8lan;
1783	hw->nvm.ops.init_params = e1000_init_nvm_params_ich8lan;
1784	switch (hw->mac.type) {
1785	case e1000_ich8lan:
1786	case e1000_ich9lan:
1787	case e1000_ich10lan:
1788		hw->phy.ops.init_params = e1000_init_phy_params_ich8lan;
1789		break;
1790	case e1000_pchlan:
1791	case e1000_pch2lan:
1792	case e1000_pch_lpt:
1793	case e1000_pch_spt:
1794	case e1000_pch_cnp:
1795		hw->phy.ops.init_params = e1000_init_phy_params_pchlan;
1796		break;
1797	default:
1798		break;
1799	}
1800}
1801
1802/**
1803 *  e1000_acquire_nvm_ich8lan - Acquire NVM mutex
1804 *  @hw: pointer to the HW structure
1805 *
1806 *  Acquires the mutex for performing NVM operations.
1807 **/
1808STATIC s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
1809{
1810	DEBUGFUNC("e1000_acquire_nvm_ich8lan");
1811
1812	E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.nvm_mutex);
1813
1814	return E1000_SUCCESS;
1815}
1816
1817/**
1818 *  e1000_release_nvm_ich8lan - Release NVM mutex
1819 *  @hw: pointer to the HW structure
1820 *
1821 *  Releases the mutex used while performing NVM operations.
1822 **/
1823STATIC void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
1824{
1825	DEBUGFUNC("e1000_release_nvm_ich8lan");
1826
1827	E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.nvm_mutex);
1828
1829	return;
1830}
1831
1832/**
1833 *  e1000_acquire_swflag_ich8lan - Acquire software control flag
1834 *  @hw: pointer to the HW structure
1835 *
1836 *  Acquires the software control flag for performing PHY and select
1837 *  MAC CSR accesses.
1838 **/
1839STATIC s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
1840{
1841	u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
1842	s32 ret_val = E1000_SUCCESS;
1843
1844	DEBUGFUNC("e1000_acquire_swflag_ich8lan");
1845
1846	E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1847
1848	while (timeout) {
1849		extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1850		if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
1851			break;
1852
1853		msec_delay_irq(1);
1854		timeout--;
1855	}
1856
1857	if (!timeout) {
1858		DEBUGOUT("SW has already locked the resource.\n");
1859		ret_val = -E1000_ERR_CONFIG;
1860		goto out;
1861	}
1862
1863	timeout = SW_FLAG_TIMEOUT;
1864
1865	extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
1866	E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1867
1868	while (timeout) {
1869		extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1870		if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
1871			break;
1872
1873		msec_delay_irq(1);
1874		timeout--;
1875	}
1876
1877	if (!timeout) {
1878		DEBUGOUT2("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n",
1879			  E1000_READ_REG(hw, E1000_FWSM), extcnf_ctrl);
1880		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1881		E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1882		ret_val = -E1000_ERR_CONFIG;
1883		goto out;
1884	}
1885
1886out:
1887	if (ret_val)
1888		E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1889
1890	return ret_val;
1891}
1892
1893/**
1894 *  e1000_release_swflag_ich8lan - Release software control flag
1895 *  @hw: pointer to the HW structure
1896 *
1897 *  Releases the software control flag for performing PHY and select
1898 *  MAC CSR accesses.
1899 **/
1900STATIC void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
1901{
1902	u32 extcnf_ctrl;
1903
1904	DEBUGFUNC("e1000_release_swflag_ich8lan");
1905
1906	extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1907
1908	if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) {
1909		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1910		E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1911	} else {
1912		DEBUGOUT("Semaphore unexpectedly released by sw/fw/hw\n");
1913	}
1914
1915	E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1916
1917	return;
1918}
1919
1920/**
1921 *  e1000_check_mng_mode_ich8lan - Checks management mode
1922 *  @hw: pointer to the HW structure
1923 *
1924 *  This checks if the adapter has any manageability enabled.
1925 *  This is a function pointer entry point only called by read/write
1926 *  routines for the PHY and NVM parts.
1927 **/
1928STATIC bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
1929{
1930	u32 fwsm;
1931
1932	DEBUGFUNC("e1000_check_mng_mode_ich8lan");
1933
1934	fwsm = E1000_READ_REG(hw, E1000_FWSM);
1935
1936	return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1937	       ((fwsm & E1000_FWSM_MODE_MASK) ==
1938		(E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1939}
1940
1941/**
1942 *  e1000_check_mng_mode_pchlan - Checks management mode
1943 *  @hw: pointer to the HW structure
1944 *
1945 *  This checks if the adapter has iAMT enabled.
1946 *  This is a function pointer entry point only called by read/write
1947 *  routines for the PHY and NVM parts.
1948 **/
1949STATIC bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
1950{
1951	u32 fwsm;
1952
1953	DEBUGFUNC("e1000_check_mng_mode_pchlan");
1954
1955	fwsm = E1000_READ_REG(hw, E1000_FWSM);
1956
1957	return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1958	       (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1959}
1960
1961/**
1962 *  e1000_rar_set_pch2lan - Set receive address register
1963 *  @hw: pointer to the HW structure
1964 *  @addr: pointer to the receive address
1965 *  @index: receive address array register
1966 *
1967 *  Sets the receive address array register at index to the address passed
1968 *  in by addr.  For 82579, RAR[0] is the base address register that is to
1969 *  contain the MAC address but RAR[1-6] are reserved for manageability (ME).
1970 *  Use SHRA[0-3] in place of those reserved for ME.
1971 **/
1972STATIC int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
1973{
1974	u32 rar_low, rar_high;
1975
1976	DEBUGFUNC("e1000_rar_set_pch2lan");
1977
1978	/* HW expects these in little endian so we reverse the byte order
1979	 * from network order (big endian) to little endian
1980	 */
1981	rar_low = ((u32) addr[0] |
1982		   ((u32) addr[1] << 8) |
1983		   ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
1984
1985	rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
1986
1987	/* If MAC address zero, no need to set the AV bit */
1988	if (rar_low || rar_high)
1989		rar_high |= E1000_RAH_AV;
1990
1991	if (index == 0) {
1992		E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
1993		E1000_WRITE_FLUSH(hw);
1994		E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
1995		E1000_WRITE_FLUSH(hw);
1996		return E1000_SUCCESS;
1997	}
1998
1999	/* RAR[1-6] are owned by manageability.  Skip those and program the
2000	 * next address into the SHRA register array.
2001	 */
2002	if (index < (u32) (hw->mac.rar_entry_count)) {
2003		s32 ret_val;
2004
2005		ret_val = e1000_acquire_swflag_ich8lan(hw);
2006		if (ret_val)
2007			goto out;
2008
2009		E1000_WRITE_REG(hw, E1000_SHRAL(index - 1), rar_low);
2010		E1000_WRITE_FLUSH(hw);
2011		E1000_WRITE_REG(hw, E1000_SHRAH(index - 1), rar_high);
2012		E1000_WRITE_FLUSH(hw);
2013
2014		e1000_release_swflag_ich8lan(hw);
2015
2016		/* verify the register updates */
2017		if ((E1000_READ_REG(hw, E1000_SHRAL(index - 1)) == rar_low) &&
2018		    (E1000_READ_REG(hw, E1000_SHRAH(index - 1)) == rar_high))
2019			return E1000_SUCCESS;
2020
2021		DEBUGOUT2("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
2022			 (index - 1), E1000_READ_REG(hw, E1000_FWSM));
2023	}
2024
2025out:
2026	DEBUGOUT1("Failed to write receive address at index %d\n", index);
2027	return -E1000_ERR_CONFIG;
2028}
2029
2030/**
2031 *  e1000_rar_set_pch_lpt - Set receive address registers
2032 *  @hw: pointer to the HW structure
2033 *  @addr: pointer to the receive address
2034 *  @index: receive address array register
2035 *
2036 *  Sets the receive address register array at index to the address passed
2037 *  in by addr. For LPT, RAR[0] is the base address register that is to
2038 *  contain the MAC address. SHRA[0-10] are the shared receive address
2039 *  registers that are shared between the Host and manageability engine (ME).
2040 **/
2041STATIC int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
2042{
2043	u32 rar_low, rar_high;
2044	u32 wlock_mac;
2045
2046	DEBUGFUNC("e1000_rar_set_pch_lpt");
2047
2048	/* HW expects these in little endian so we reverse the byte order
2049	 * from network order (big endian) to little endian
2050	 */
2051	rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
2052		   ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
2053
2054	rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
2055
2056	/* If MAC address zero, no need to set the AV bit */
2057	if (rar_low || rar_high)
2058		rar_high |= E1000_RAH_AV;
2059
2060	if (index == 0) {
2061		E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
2062		E1000_WRITE_FLUSH(hw);
2063		E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
2064		E1000_WRITE_FLUSH(hw);
2065		return E1000_SUCCESS;
2066	}
2067
2068	/* The manageability engine (ME) can lock certain SHRAR registers that
2069	 * it is using - those registers are unavailable for use.
2070	 */
2071	if (index < hw->mac.rar_entry_count) {
2072		wlock_mac = E1000_READ_REG(hw, E1000_FWSM) &
2073			    E1000_FWSM_WLOCK_MAC_MASK;
2074		wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
2075
2076		/* Check if all SHRAR registers are locked */
2077		if (wlock_mac == 1)
2078			goto out;
2079
2080		if ((wlock_mac == 0) || (index <= wlock_mac)) {
2081			s32 ret_val;
2082
2083			ret_val = e1000_acquire_swflag_ich8lan(hw);
2084
2085			if (ret_val)
2086				goto out;
2087
2088			E1000_WRITE_REG(hw, E1000_SHRAL_PCH_LPT(index - 1),
2089					rar_low);
2090			E1000_WRITE_FLUSH(hw);
2091			E1000_WRITE_REG(hw, E1000_SHRAH_PCH_LPT(index - 1),
2092					rar_high);
2093			E1000_WRITE_FLUSH(hw);
2094
2095			e1000_release_swflag_ich8lan(hw);
2096
2097			/* verify the register updates */
2098			if ((E1000_READ_REG(hw, E1000_SHRAL_PCH_LPT(index - 1)) == rar_low) &&
2099			    (E1000_READ_REG(hw, E1000_SHRAH_PCH_LPT(index - 1)) == rar_high))
2100				return E1000_SUCCESS;
2101		}
2102	}
2103
2104out:
2105	DEBUGOUT1("Failed to write receive address at index %d\n", index);
2106	return -E1000_ERR_CONFIG;
2107}
2108
2109#ifndef NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT
2110/**
2111 *  e1000_update_mc_addr_list_pch2lan - Update Multicast addresses
2112 *  @hw: pointer to the HW structure
2113 *  @mc_addr_list: array of multicast addresses to program
2114 *  @mc_addr_count: number of multicast addresses to program
2115 *
2116 *  Updates entire Multicast Table Array of the PCH2 MAC and PHY.
2117 *  The caller must have a packed mc_addr_list of multicast addresses.
2118 **/
2119STATIC void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
2120					      u8 *mc_addr_list,
2121					      u32 mc_addr_count)
2122{
2123	u16 phy_reg = 0;
2124	int i;
2125	s32 ret_val;
2126
2127	DEBUGFUNC("e1000_update_mc_addr_list_pch2lan");
2128
2129	e1000_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count);
2130
2131	ret_val = hw->phy.ops.acquire(hw);
2132	if (ret_val)
2133		return;
2134
2135	ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2136	if (ret_val)
2137		goto release;
2138
2139	for (i = 0; i < hw->mac.mta_reg_count; i++) {
2140		hw->phy.ops.write_reg_page(hw, BM_MTA(i),
2141					   (u16)(hw->mac.mta_shadow[i] &
2142						 0xFFFF));
2143		hw->phy.ops.write_reg_page(hw, (BM_MTA(i) + 1),
2144					   (u16)((hw->mac.mta_shadow[i] >> 16) &
2145						 0xFFFF));
2146	}
2147
2148	e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2149
2150release:
2151	hw->phy.ops.release(hw);
2152}
2153
2154#endif /* NO_NON_BLOCKING_PHY_MTA_UPDATE_SUPPORT */
2155/**
2156 *  e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
2157 *  @hw: pointer to the HW structure
2158 *
2159 *  Checks if firmware is blocking the reset of the PHY.
2160 *  This is a function pointer entry point only called by
2161 *  reset routines.
2162 **/
2163STATIC s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
2164{
2165	u32 fwsm;
2166	bool blocked = false;
2167	int i = 0;
2168
2169	DEBUGFUNC("e1000_check_reset_block_ich8lan");
2170
2171	do {
2172		fwsm = E1000_READ_REG(hw, E1000_FWSM);
2173		if (!(fwsm & E1000_ICH_FWSM_RSPCIPHY)) {
2174			blocked = true;
2175			msec_delay(10);
2176			continue;
2177		}
2178		blocked = false;
2179	} while (blocked && (i++ < 30));
2180	return blocked ? E1000_BLK_PHY_RESET : E1000_SUCCESS;
2181}
2182
2183/**
2184 *  e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
2185 *  @hw: pointer to the HW structure
2186 *
2187 *  Assumes semaphore already acquired.
2188 *
2189 **/
2190STATIC s32 e1000_write_smbus_addr(struct e1000_hw *hw)
2191{
2192	u16 phy_data;
2193	u32 strap = E1000_READ_REG(hw, E1000_STRAP);
2194	u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >>
2195		E1000_STRAP_SMT_FREQ_SHIFT;
2196	s32 ret_val;
2197
2198	strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
2199
2200	ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
2201	if (ret_val)
2202		return ret_val;
2203
2204	phy_data &= ~HV_SMB_ADDR_MASK;
2205	phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
2206	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
2207
2208	if (hw->phy.type == e1000_phy_i217) {
2209		/* Restore SMBus frequency */
2210		if (freq--) {
2211			phy_data &= ~HV_SMB_ADDR_FREQ_MASK;
2212			phy_data |= (freq & (1 << 0)) <<
2213				HV_SMB_ADDR_FREQ_LOW_SHIFT;
2214			phy_data |= (freq & (1 << 1)) <<
2215				(HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1);
2216		} else {
2217			DEBUGOUT("Unsupported SMB frequency in PHY\n");
2218		}
2219	}
2220
2221	return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
2222}
2223
2224/**
2225 *  e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
2226 *  @hw:   pointer to the HW structure
2227 *
2228 *  SW should configure the LCD from the NVM extended configuration region
2229 *  as a workaround for certain parts.
2230 **/
2231STATIC s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
2232{
2233	struct e1000_phy_info *phy = &hw->phy;
2234	u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
2235	s32 ret_val = E1000_SUCCESS;
2236	u16 word_addr, reg_data, reg_addr, phy_page = 0;
2237
2238	DEBUGFUNC("e1000_sw_lcd_config_ich8lan");
2239
2240	/* Initialize the PHY from the NVM on ICH platforms.  This
2241	 * is needed due to an issue where the NVM configuration is
2242	 * not properly autoloaded after power transitions.
2243	 * Therefore, after each PHY reset, we will load the
2244	 * configuration data out of the NVM manually.
2245	 */
2246	switch (hw->mac.type) {
2247	case e1000_ich8lan:
2248		if (phy->type != e1000_phy_igp_3)
2249			return ret_val;
2250
2251		if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_AMT) ||
2252		    (hw->device_id == E1000_DEV_ID_ICH8_IGP_C)) {
2253			sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
2254			break;
2255		}
2256		/* Fall-thru */
2257	case e1000_pchlan:
2258	case e1000_pch2lan:
2259	case e1000_pch_lpt:
2260	case e1000_pch_spt:
2261	case e1000_pch_cnp:
2262		sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
2263		break;
2264	default:
2265		return ret_val;
2266	}
2267
2268	ret_val = hw->phy.ops.acquire(hw);
2269	if (ret_val)
2270		return ret_val;
2271
2272	data = E1000_READ_REG(hw, E1000_FEXTNVM);
2273	if (!(data & sw_cfg_mask))
2274		goto release;
2275
2276	/* Make sure HW does not configure LCD from PHY
2277	 * extended configuration before SW configuration
2278	 */
2279	data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2280	if ((hw->mac.type < e1000_pch2lan) &&
2281	    (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE))
2282			goto release;
2283
2284	cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE);
2285	cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
2286	cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
2287	if (!cnf_size)
2288		goto release;
2289
2290	cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
2291	cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
2292
2293	if (((hw->mac.type == e1000_pchlan) &&
2294	     !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) ||
2295	    (hw->mac.type > e1000_pchlan)) {
2296		/* HW configures the SMBus address and LEDs when the
2297		 * OEM and LCD Write Enable bits are set in the NVM.
2298		 * When both NVM bits are cleared, SW will configure
2299		 * them instead.
2300		 */
2301		ret_val = e1000_write_smbus_addr(hw);
2302		if (ret_val)
2303			goto release;
2304
2305		data = E1000_READ_REG(hw, E1000_LEDCTL);
2306		ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
2307							(u16)data);
2308		if (ret_val)
2309			goto release;
2310	}
2311
2312	/* Configure LCD from extended configuration region. */
2313
2314	/* cnf_base_addr is in DWORD */
2315	word_addr = (u16)(cnf_base_addr << 1);
2316
2317	for (i = 0; i < cnf_size; i++) {
2318		ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1,
2319					   &reg_data);
2320		if (ret_val)
2321			goto release;
2322
2323		ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1),
2324					   1, &reg_addr);
2325		if (ret_val)
2326			goto release;
2327
2328		/* Save off the PHY page for future writes. */
2329		if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
2330			phy_page = reg_data;
2331			continue;
2332		}
2333
2334		reg_addr &= PHY_REG_MASK;
2335		reg_addr |= phy_page;
2336
2337		ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
2338						    reg_data);
2339		if (ret_val)
2340			goto release;
2341	}
2342
2343release:
2344	hw->phy.ops.release(hw);
2345	return ret_val;
2346}
2347
2348/**
2349 *  e1000_k1_gig_workaround_hv - K1 Si workaround
2350 *  @hw:   pointer to the HW structure
2351 *  @link: link up bool flag
2352 *
2353 *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
2354 *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
2355 *  If link is down, the function will restore the default K1 setting located
2356 *  in the NVM.
2357 **/
2358STATIC s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
2359{
2360	s32 ret_val = E1000_SUCCESS;
2361	u16 status_reg = 0;
2362	bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
2363
2364	DEBUGFUNC("e1000_k1_gig_workaround_hv");
2365
2366	if (hw->mac.type != e1000_pchlan)
2367		return E1000_SUCCESS;
2368
2369	/* Wrap the whole flow with the sw flag */
2370	ret_val = hw->phy.ops.acquire(hw);
2371	if (ret_val)
2372		return ret_val;
2373
2374	/* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
2375	if (link) {
2376		if (hw->phy.type == e1000_phy_82578) {
2377			ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS,
2378							      &status_reg);
2379			if (ret_val)
2380				goto release;
2381
2382			status_reg &= (BM_CS_STATUS_LINK_UP |
2383				       BM_CS_STATUS_RESOLVED |
2384				       BM_CS_STATUS_SPEED_MASK);
2385
2386			if (status_reg == (BM_CS_STATUS_LINK_UP |
2387					   BM_CS_STATUS_RESOLVED |
2388					   BM_CS_STATUS_SPEED_1000))
2389				k1_enable = false;
2390		}
2391
2392		if (hw->phy.type == e1000_phy_82577) {
2393			ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS,
2394							      &status_reg);
2395			if (ret_val)
2396				goto release;
2397
2398			status_reg &= (HV_M_STATUS_LINK_UP |
2399				       HV_M_STATUS_AUTONEG_COMPLETE |
2400				       HV_M_STATUS_SPEED_MASK);
2401
2402			if (status_reg == (HV_M_STATUS_LINK_UP |
2403					   HV_M_STATUS_AUTONEG_COMPLETE |
2404					   HV_M_STATUS_SPEED_1000))
2405				k1_enable = false;
2406		}
2407
2408		/* Link stall fix for link up */
2409		ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
2410						       0x0100);
2411		if (ret_val)
2412			goto release;
2413
2414	} else {
2415		/* Link stall fix for link down */
2416		ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
2417						       0x4100);
2418		if (ret_val)
2419			goto release;
2420	}
2421
2422	ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
2423
2424release:
2425	hw->phy.ops.release(hw);
2426
2427	return ret_val;
2428}
2429
2430/**
2431 *  e1000_configure_k1_ich8lan - Configure K1 power state
2432 *  @hw: pointer to the HW structure
2433 *  @enable: K1 state to configure
2434 *
2435 *  Configure the K1 power state based on the provided parameter.
2436 *  Assumes semaphore already acquired.
2437 *
2438 *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
2439 **/
2440s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
2441{
2442	s32 ret_val;
2443	u32 ctrl_reg = 0;
2444	u32 ctrl_ext = 0;
2445	u32 reg = 0;
2446	u16 kmrn_reg = 0;
2447
2448	DEBUGFUNC("e1000_configure_k1_ich8lan");
2449
2450	ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2451					     &kmrn_reg);
2452	if (ret_val)
2453		return ret_val;
2454
2455	if (k1_enable)
2456		kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
2457	else
2458		kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
2459
2460	ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2461					      kmrn_reg);
2462	if (ret_val)
2463		return ret_val;
2464
2465	usec_delay(20);
2466	ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
2467	ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
2468
2469	reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
2470	reg |= E1000_CTRL_FRCSPD;
2471	E1000_WRITE_REG(hw, E1000_CTRL, reg);
2472
2473	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
2474	E1000_WRITE_FLUSH(hw);
2475	usec_delay(20);
2476	E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
2477	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
2478	E1000_WRITE_FLUSH(hw);
2479	usec_delay(20);
2480
2481	return E1000_SUCCESS;
2482}
2483
2484/**
2485 *  e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
2486 *  @hw:       pointer to the HW structure
2487 *  @d0_state: boolean if entering d0 or d3 device state
2488 *
2489 *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
2490 *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
2491 *  in NVM determines whether HW should configure LPLU and Gbe Disable.
2492 **/
2493STATIC s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
2494{
2495	s32 ret_val = 0;
2496	u32 mac_reg;
2497	u16 oem_reg;
2498
2499	DEBUGFUNC("e1000_oem_bits_config_ich8lan");
2500
2501	if (hw->mac.type < e1000_pchlan)
2502		return ret_val;
2503
2504	ret_val = hw->phy.ops.acquire(hw);
2505	if (ret_val)
2506		return ret_val;
2507
2508	if (hw->mac.type == e1000_pchlan) {
2509		mac_reg = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2510		if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
2511			goto release;
2512	}
2513
2514	mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM);
2515	if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
2516		goto release;
2517
2518	mac_reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
2519
2520	ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg);
2521	if (ret_val)
2522		goto release;
2523
2524	oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
2525
2526	if (d0_state) {
2527		if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
2528			oem_reg |= HV_OEM_BITS_GBE_DIS;
2529
2530		if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
2531			oem_reg |= HV_OEM_BITS_LPLU;
2532	} else {
2533		if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
2534		    E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
2535			oem_reg |= HV_OEM_BITS_GBE_DIS;
2536
2537		if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU |
2538		    E1000_PHY_CTRL_NOND0A_LPLU))
2539			oem_reg |= HV_OEM_BITS_LPLU;
2540	}
2541
2542	/* Set Restart auto-neg to activate the bits */
2543	if ((d0_state || (hw->mac.type != e1000_pchlan)) &&
2544	    !hw->phy.ops.check_reset_block(hw))
2545		oem_reg |= HV_OEM_BITS_RESTART_AN;
2546
2547	ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
2548
2549release:
2550	hw->phy.ops.release(hw);
2551
2552	return ret_val;
2553}
2554
2555
2556/**
2557 *  e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
2558 *  @hw:   pointer to the HW structure
2559 **/
2560STATIC s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
2561{
2562	s32 ret_val;
2563	u16 data;
2564
2565	DEBUGFUNC("e1000_set_mdio_slow_mode_hv");
2566
2567	ret_val = hw->phy.ops.read_reg(hw, HV_KMRN_MODE_CTRL, &data);
2568	if (ret_val)
2569		return ret_val;
2570
2571	data |= HV_KMRN_MDIO_SLOW;
2572
2573	ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_MODE_CTRL, data);
2574
2575	return ret_val;
2576}
2577
2578/**
2579 *  e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2580 *  done after every PHY reset.
2581 **/
2582STATIC s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2583{
2584	s32 ret_val = E1000_SUCCESS;
2585	u16 phy_data;
2586
2587	DEBUGFUNC("e1000_hv_phy_workarounds_ich8lan");
2588
2589	if (hw->mac.type != e1000_pchlan)
2590		return E1000_SUCCESS;
2591
2592	/* Set MDIO slow mode before any other MDIO access */
2593	if (hw->phy.type == e1000_phy_82577) {
2594		ret_val = e1000_set_mdio_slow_mode_hv(hw);
2595		if (ret_val)
2596			return ret_val;
2597	}
2598
2599	if (((hw->phy.type == e1000_phy_82577) &&
2600	     ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
2601	    ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
2602		/* Disable generation of early preamble */
2603		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431);
2604		if (ret_val)
2605			return ret_val;
2606
2607		/* Preamble tuning for SSC */
2608		ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA,
2609						0xA204);
2610		if (ret_val)
2611			return ret_val;
2612	}
2613
2614	if (hw->phy.type == e1000_phy_82578) {
2615		/* Return registers to default by doing a soft reset then
2616		 * writing 0x3140 to the control register.
2617		 */
2618		if (hw->phy.revision < 2) {
2619			e1000_phy_sw_reset_generic(hw);
2620			ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL,
2621							0x3140);
2622		}
2623	}
2624
2625	/* Select page 0 */
2626	ret_val = hw->phy.ops.acquire(hw);
2627	if (ret_val)
2628		return ret_val;
2629
2630	hw->phy.addr = 1;
2631	ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
2632	hw->phy.ops.release(hw);
2633	if (ret_val)
2634		return ret_val;
2635
2636	/* Configure the K1 Si workaround during phy reset assuming there is
2637	 * link so that it disables K1 if link is in 1Gbps.
2638	 */
2639	ret_val = e1000_k1_gig_workaround_hv(hw, true);
2640	if (ret_val)
2641		return ret_val;
2642
2643	/* Workaround for link disconnects on a busy hub in half duplex */
2644	ret_val = hw->phy.ops.acquire(hw);
2645	if (ret_val)
2646		return ret_val;
2647	ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data);
2648	if (ret_val)
2649		goto release;
2650	ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG,
2651					       phy_data & 0x00FF);
2652	if (ret_val)
2653		goto release;
2654
2655	/* set MSE higher to enable link to stay up when noise is high */
2656	ret_val = e1000_write_emi_reg_locked(hw, I82577_MSE_THRESHOLD, 0x0034);
2657release:
2658	hw->phy.ops.release(hw);
2659
2660	return ret_val;
2661}
2662
2663/**
2664 *  e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
2665 *  @hw:   pointer to the HW structure
2666 **/
2667void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
2668{
2669	u32 mac_reg;
2670	u16 i, phy_reg = 0;
2671	s32 ret_val;
2672
2673	DEBUGFUNC("e1000_copy_rx_addrs_to_phy_ich8lan");
2674
2675	ret_val = hw->phy.ops.acquire(hw);
2676	if (ret_val)
2677		return;
2678	ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2679	if (ret_val)
2680		goto release;
2681
2682	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
2683	for (i = 0; i < (hw->mac.rar_entry_count); i++) {
2684		mac_reg = E1000_READ_REG(hw, E1000_RAL(i));
2685		hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
2686					   (u16)(mac_reg & 0xFFFF));
2687		hw->phy.ops.write_reg_page(hw, BM_RAR_M(i),
2688					   (u16)((mac_reg >> 16) & 0xFFFF));
2689
2690		mac_reg = E1000_READ_REG(hw, E1000_RAH(i));
2691		hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
2692					   (u16)(mac_reg & 0xFFFF));
2693		hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
2694					   (u16)((mac_reg & E1000_RAH_AV)
2695						 >> 16));
2696	}
2697
2698	e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2699
2700release:
2701	hw->phy.ops.release(hw);
2702}
2703
2704#ifndef CRC32_OS_SUPPORT
2705STATIC u32 e1000_calc_rx_da_crc(u8 mac[])
2706{
2707	u32 poly = 0xEDB88320;	/* Polynomial for 802.3 CRC calculation */
2708	u32 i, j, mask, crc;
2709
2710	DEBUGFUNC("e1000_calc_rx_da_crc");
2711
2712	crc = 0xffffffff;
2713	for (i = 0; i < 6; i++) {
2714		crc = crc ^ mac[i];
2715		for (j = 8; j > 0; j--) {
2716			mask = (crc & 1) * (-1);
2717			crc = (crc >> 1) ^ (poly & mask);
2718		}
2719	}
2720	return ~crc;
2721}
2722
2723#endif /* CRC32_OS_SUPPORT */
2724/**
2725 *  e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
2726 *  with 82579 PHY
2727 *  @hw: pointer to the HW structure
2728 *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
2729 **/
2730s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
2731{
2732	s32 ret_val = E1000_SUCCESS;
2733	u16 phy_reg, data;
2734	u32 mac_reg;
2735	u16 i;
2736
2737	DEBUGFUNC("e1000_lv_jumbo_workaround_ich8lan");
2738
2739	if (hw->mac.type < e1000_pch2lan)
2740		return E1000_SUCCESS;
2741
2742	/* disable Rx path while enabling/disabling workaround */
2743	hw->phy.ops.read_reg(hw, PHY_REG(769, 20), &phy_reg);
2744	ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20),
2745					phy_reg | (1 << 14));
2746	if (ret_val)
2747		return ret_val;
2748
2749	if (enable) {
2750		/* Write Rx addresses (rar_entry_count for RAL/H, and
2751		 * SHRAL/H) and initial CRC values to the MAC
2752		 */
2753		for (i = 0; i < hw->mac.rar_entry_count; i++) {
2754			u8 mac_addr[ETH_ADDR_LEN] = {0};
2755			u32 addr_high, addr_low;
2756
2757			addr_high = E1000_READ_REG(hw, E1000_RAH(i));
2758			if (!(addr_high & E1000_RAH_AV))
2759				continue;
2760			addr_low = E1000_READ_REG(hw, E1000_RAL(i));
2761			mac_addr[0] = (addr_low & 0xFF);
2762			mac_addr[1] = ((addr_low >> 8) & 0xFF);
2763			mac_addr[2] = ((addr_low >> 16) & 0xFF);
2764			mac_addr[3] = ((addr_low >> 24) & 0xFF);
2765			mac_addr[4] = (addr_high & 0xFF);
2766			mac_addr[5] = ((addr_high >> 8) & 0xFF);
2767
2768#ifndef CRC32_OS_SUPPORT
2769			E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
2770					e1000_calc_rx_da_crc(mac_addr));
2771#else /* CRC32_OS_SUPPORT */
2772			E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
2773					E1000_CRC32(ETH_ADDR_LEN, mac_addr));
2774#endif /* CRC32_OS_SUPPORT */
2775		}
2776
2777		/* Write Rx addresses to the PHY */
2778		e1000_copy_rx_addrs_to_phy_ich8lan(hw);
2779
2780		/* Enable jumbo frame workaround in the MAC */
2781		mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2782		mac_reg &= ~(1 << 14);
2783		mac_reg |= (7 << 15);
2784		E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2785
2786		mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2787		mac_reg |= E1000_RCTL_SECRC;
2788		E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2789
2790		ret_val = e1000_read_kmrn_reg_generic(hw,
2791						E1000_KMRNCTRLSTA_CTRL_OFFSET,
2792						&data);
2793		if (ret_val)
2794			return ret_val;
2795		ret_val = e1000_write_kmrn_reg_generic(hw,
2796						E1000_KMRNCTRLSTA_CTRL_OFFSET,
2797						data | (1 << 0));
2798		if (ret_val)
2799			return ret_val;
2800		ret_val = e1000_read_kmrn_reg_generic(hw,
2801						E1000_KMRNCTRLSTA_HD_CTRL,
2802						&data);
2803		if (ret_val)
2804			return ret_val;
2805		data &= ~(0xF << 8);
2806		data |= (0xB << 8);
2807		ret_val = e1000_write_kmrn_reg_generic(hw,
2808						E1000_KMRNCTRLSTA_HD_CTRL,
2809						data);
2810		if (ret_val)
2811			return ret_val;
2812
2813		/* Enable jumbo frame workaround in the PHY */
2814		hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2815		data &= ~(0x7F << 5);
2816		data |= (0x37 << 5);
2817		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2818		if (ret_val)
2819			return ret_val;
2820		hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2821		data &= ~(1 << 13);
2822		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2823		if (ret_val)
2824			return ret_val;
2825		hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2826		data &= ~(0x3FF << 2);
2827		data |= (E1000_TX_PTR_GAP << 2);
2828		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2829		if (ret_val)
2830			return ret_val;
2831		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0xF100);
2832		if (ret_val)
2833			return ret_val;
2834		hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2835		ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data |
2836						(1 << 10));
2837		if (ret_val)
2838			return ret_val;
2839	} else {
2840		/* Write MAC register values back to h/w defaults */
2841		mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2842		mac_reg &= ~(0xF << 14);
2843		E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2844
2845		mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2846		mac_reg &= ~E1000_RCTL_SECRC;
2847		E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2848
2849		ret_val = e1000_read_kmrn_reg_generic(hw,
2850						E1000_KMRNCTRLSTA_CTRL_OFFSET,
2851						&data);
2852		if (ret_val)
2853			return ret_val;
2854		ret_val = e1000_write_kmrn_reg_generic(hw,
2855						E1000_KMRNCTRLSTA_CTRL_OFFSET,
2856						data & ~(1 << 0));
2857		if (ret_val)
2858			return ret_val;
2859		ret_val = e1000_read_kmrn_reg_generic(hw,
2860						E1000_KMRNCTRLSTA_HD_CTRL,
2861						&data);
2862		if (ret_val)
2863			return ret_val;
2864		data &= ~(0xF << 8);
2865		data |= (0xB << 8);
2866		ret_val = e1000_write_kmrn_reg_generic(hw,
2867						E1000_KMRNCTRLSTA_HD_CTRL,
2868						data);
2869		if (ret_val)
2870			return ret_val;
2871
2872		/* Write PHY register values back to h/w defaults */
2873		hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2874		data &= ~(0x7F << 5);
2875		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2876		if (ret_val)
2877			return ret_val;
2878		hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2879		data |= (1 << 13);
2880		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2881		if (ret_val)
2882			return ret_val;
2883		hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2884		data &= ~(0x3FF << 2);
2885		data |= (0x8 << 2);
2886		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2887		if (ret_val)
2888			return ret_val;
2889		ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0x7E00);
2890		if (ret_val)
2891			return ret_val;
2892		hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2893		ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data &
2894						~(1 << 10));
2895		if (ret_val)
2896			return ret_val;
2897	}
2898
2899	/* re-enable Rx path after enabling/disabling workaround */
2900	return hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg &
2901				     ~(1 << 14));
2902}
2903
2904/**
2905 *  e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2906 *  done after every PHY reset.
2907 **/
2908STATIC s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2909{
2910	s32 ret_val = E1000_SUCCESS;
2911
2912	DEBUGFUNC("e1000_lv_phy_workarounds_ich8lan");
2913
2914	if (hw->mac.type != e1000_pch2lan)
2915		return E1000_SUCCESS;
2916
2917	/* Set MDIO slow mode before any other MDIO access */
2918	ret_val = e1000_set_mdio_slow_mode_hv(hw);
2919	if (ret_val)
2920		return ret_val;
2921
2922	ret_val = hw->phy.ops.acquire(hw);
2923	if (ret_val)
2924		return ret_val;
2925	/* set MSE higher to enable link to stay up when noise is high */
2926	ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_THRESHOLD, 0x0034);
2927	if (ret_val)
2928		goto release;
2929	/* drop link after 5 times MSE threshold was reached */
2930	ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_LINK_DOWN, 0x0005);
2931release:
2932	hw->phy.ops.release(hw);
2933
2934	return ret_val;
2935}
2936
2937/**
2938 *  e1000_k1_gig_workaround_lv - K1 Si workaround
2939 *  @hw:   pointer to the HW structure
2940 *
2941 *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
2942 *  Disable K1 for 1000 and 100 speeds
2943 **/
2944STATIC s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
2945{
2946	s32 ret_val = E1000_SUCCESS;
2947	u16 status_reg = 0;
2948
2949	DEBUGFUNC("e1000_k1_workaround_lv");
2950
2951	if (hw->mac.type != e1000_pch2lan)
2952		return E1000_SUCCESS;
2953
2954	/* Set K1 beacon duration based on 10Mbs speed */
2955	ret_val = hw->phy.ops.read_reg(hw, HV_M_STATUS, &status_reg);
2956	if (ret_val)
2957		return ret_val;
2958
2959	if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
2960	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
2961		if (status_reg &
2962		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
2963			u16 pm_phy_reg;
2964
2965			/* LV 1G/100 Packet drop issue wa  */
2966			ret_val = hw->phy.ops.read_reg(hw, HV_PM_CTRL,
2967						       &pm_phy_reg);
2968			if (ret_val)
2969				return ret_val;
2970			pm_phy_reg &= ~HV_PM_CTRL_K1_ENABLE;
2971			ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL,
2972							pm_phy_reg);
2973			if (ret_val)
2974				return ret_val;
2975		} else {
2976			u32 mac_reg;
2977			mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
2978			mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
2979			mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
2980			E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
2981		}
2982	}
2983
2984	return ret_val;
2985}
2986
2987/**
2988 *  e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
2989 *  @hw:   pointer to the HW structure
2990 *  @gate: boolean set to true to gate, false to ungate
2991 *
2992 *  Gate/ungate the automatic PHY configuration via hardware; perform
2993 *  the configuration via software instead.
2994 **/
2995STATIC void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
2996{
2997	u32 extcnf_ctrl;
2998
2999	DEBUGFUNC("e1000_gate_hw_phy_config_ich8lan");
3000
3001	if (hw->mac.type < e1000_pch2lan)
3002		return;
3003
3004	extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
3005
3006	if (gate)
3007		extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
3008	else
3009		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
3010
3011	E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
3012}
3013
3014/**
3015 *  e1000_lan_init_done_ich8lan - Check for PHY config completion
3016 *  @hw: pointer to the HW structure
3017 *
3018 *  Check the appropriate indication the MAC has finished configuring the
3019 *  PHY after a software reset.
3020 **/
3021STATIC void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
3022{
3023	u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
3024
3025	DEBUGFUNC("e1000_lan_init_done_ich8lan");
3026
3027	/* Wait for basic configuration completes before proceeding */
3028	do {
3029		data = E1000_READ_REG(hw, E1000_STATUS);
3030		data &= E1000_STATUS_LAN_INIT_DONE;
3031		usec_delay(100);
3032	} while ((!data) && --loop);
3033
3034	/* If basic configuration is incomplete before the above loop
3035	 * count reaches 0, loading the configuration from NVM will
3036	 * leave the PHY in a bad state possibly resulting in no link.
3037	 */
3038	if (loop == 0)
3039		DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n");
3040
3041	/* Clear the Init Done bit for the next init event */
3042	data = E1000_READ_REG(hw, E1000_STATUS);
3043	data &= ~E1000_STATUS_LAN_INIT_DONE;
3044	E1000_WRITE_REG(hw, E1000_STATUS, data);
3045}
3046
3047/**
3048 *  e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
3049 *  @hw: pointer to the HW structure
3050 **/
3051STATIC s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
3052{
3053	s32 ret_val = E1000_SUCCESS;
3054	u16 reg;
3055
3056	DEBUGFUNC("e1000_post_phy_reset_ich8lan");
3057
3058	if (hw->phy.ops.check_reset_block(hw))
3059		return E1000_SUCCESS;
3060
3061	/* Allow time for h/w to get to quiescent state after reset */
3062	msec_delay(10);
3063
3064	/* Perform any necessary post-reset workarounds */
3065	switch (hw->mac.type) {
3066	case e1000_pchlan:
3067		ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
3068		if (ret_val)
3069			return ret_val;
3070		break;
3071	case e1000_pch2lan:
3072		ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
3073		if (ret_val)
3074			return ret_val;
3075		break;
3076	default:
3077		break;
3078	}
3079
3080	/* Clear the host wakeup bit after lcd reset */
3081	if (hw->mac.type >= e1000_pchlan) {
3082		hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &reg);
3083		reg &= ~BM_WUC_HOST_WU_BIT;
3084		hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, reg);
3085	}
3086
3087	/* Configure the LCD with the extended configuration region in NVM */
3088	ret_val = e1000_sw_lcd_config_ich8lan(hw);
3089	if (ret_val)
3090		return ret_val;
3091
3092	/* Configure the LCD with the OEM bits in NVM */
3093	ret_val = e1000_oem_bits_config_ich8lan(hw, true);
3094
3095	if (hw->mac.type == e1000_pch2lan) {
3096		/* Ungate automatic PHY configuration on non-managed 82579 */
3097		if (!(E1000_READ_REG(hw, E1000_FWSM) &
3098		    E1000_ICH_FWSM_FW_VALID)) {
3099			msec_delay(10);
3100			e1000_gate_hw_phy_config_ich8lan(hw, false);
3101		}
3102
3103		/* Set EEE LPI Update Timer to 200usec */
3104		ret_val = hw->phy.ops.acquire(hw);
3105		if (ret_val)
3106			return ret_val;
3107		ret_val = e1000_write_emi_reg_locked(hw,
3108						     I82579_LPI_UPDATE_TIMER,
3109						     0x1387);
3110		hw->phy.ops.release(hw);
3111	}
3112
3113	return ret_val;
3114}
3115
3116/**
3117 *  e1000_phy_hw_reset_ich8lan - Performs a PHY reset
3118 *  @hw: pointer to the HW structure
3119 *
3120 *  Resets the PHY
3121 *  This is a function pointer entry point called by drivers
3122 *  or other shared routines.
3123 **/
3124STATIC s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
3125{
3126	s32 ret_val = E1000_SUCCESS;
3127
3128	DEBUGFUNC("e1000_phy_hw_reset_ich8lan");
3129
3130	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
3131	if ((hw->mac.type == e1000_pch2lan) &&
3132	    !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
3133		e1000_gate_hw_phy_config_ich8lan(hw, true);
3134
3135	ret_val = e1000_phy_hw_reset_generic(hw);
3136	if (ret_val)
3137		return ret_val;
3138
3139	return e1000_post_phy_reset_ich8lan(hw);
3140}
3141
3142/**
3143 *  e1000_set_lplu_state_pchlan - Set Low Power Link Up state
3144 *  @hw: pointer to the HW structure
3145 *  @active: true to enable LPLU, false to disable
3146 *
3147 *  Sets the LPLU state according to the active flag.  For PCH, if OEM write
3148 *  bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
3149 *  the phy speed. This function will manually set the LPLU bit and restart
3150 *  auto-neg as hw would do. D3 and D0 LPLU will call the same function
3151 *  since it configures the same bit.
3152 **/
3153STATIC s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
3154{
3155	s32 ret_val;
3156	u16 oem_reg;
3157
3158	DEBUGFUNC("e1000_set_lplu_state_pchlan");
3159	ret_val = hw->phy.ops.read_reg(hw, HV_OEM_BITS, &oem_reg);
3160	if (ret_val)
3161		return ret_val;
3162
3163	if (active)
3164		oem_reg |= HV_OEM_BITS_LPLU;
3165	else
3166		oem_reg &= ~HV_OEM_BITS_LPLU;
3167
3168	if (!hw->phy.ops.check_reset_block(hw))
3169		oem_reg |= HV_OEM_BITS_RESTART_AN;
3170
3171	return hw->phy.ops.write_reg(hw, HV_OEM_BITS, oem_reg);
3172}
3173
3174/**
3175 *  e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
3176 *  @hw: pointer to the HW structure
3177 *  @active: true to enable LPLU, false to disable
3178 *
3179 *  Sets the LPLU D0 state according to the active flag.  When
3180 *  activating LPLU this function also disables smart speed
3181 *  and vice versa.  LPLU will not be activated unless the
3182 *  device autonegotiation advertisement meets standards of
3183 *  either 10 or 10/100 or 10/100/1000 at all duplexes.
3184 *  This is a function pointer entry point only called by
3185 *  PHY setup routines.
3186 **/
3187STATIC s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
3188{
3189	struct e1000_phy_info *phy = &hw->phy;
3190	u32 phy_ctrl;
3191	s32 ret_val = E1000_SUCCESS;
3192	u16 data;
3193
3194	DEBUGFUNC("e1000_set_d0_lplu_state_ich8lan");
3195
3196	if (phy->type == e1000_phy_ife)
3197		return E1000_SUCCESS;
3198
3199	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3200
3201	if (active) {
3202		phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
3203		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3204
3205		if (phy->type != e1000_phy_igp_3)
3206			return E1000_SUCCESS;
3207
3208		/* Call gig speed drop workaround on LPLU before accessing
3209		 * any PHY registers
3210		 */
3211		if (hw->mac.type == e1000_ich8lan)
3212			e1000_gig_downshift_workaround_ich8lan(hw);
3213
3214		/* When LPLU is enabled, we should disable SmartSpeed */
3215		ret_val = phy->ops.read_reg(hw,
3216					    IGP01E1000_PHY_PORT_CONFIG,
3217					    &data);
3218		if (ret_val)
3219			return ret_val;
3220		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3221		ret_val = phy->ops.write_reg(hw,
3222					     IGP01E1000_PHY_PORT_CONFIG,
3223					     data);
3224		if (ret_val)
3225			return ret_val;
3226	} else {
3227		phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
3228		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3229
3230		if (phy->type != e1000_phy_igp_3)
3231			return E1000_SUCCESS;
3232
3233		/* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
3234		 * during Dx states where the power conservation is most
3235		 * important.  During driver activity we should enable
3236		 * SmartSpeed, so performance is maintained.
3237		 */
3238		if (phy->smart_speed == e1000_smart_speed_on) {
3239			ret_val = phy->ops.read_reg(hw,
3240						    IGP01E1000_PHY_PORT_CONFIG,
3241						    &data);
3242			if (ret_val)
3243				return ret_val;
3244
3245			data |= IGP01E1000_PSCFR_SMART_SPEED;
3246			ret_val = phy->ops.write_reg(hw,
3247						     IGP01E1000_PHY_PORT_CONFIG,
3248						     data);
3249			if (ret_val)
3250				return ret_val;
3251		} else if (phy->smart_speed == e1000_smart_speed_off) {
3252			ret_val = phy->ops.read_reg(hw,
3253						    IGP01E1000_PHY_PORT_CONFIG,
3254						    &data);
3255			if (ret_val)
3256				return ret_val;
3257
3258			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3259			ret_val = phy->ops.write_reg(hw,
3260						     IGP01E1000_PHY_PORT_CONFIG,
3261						     data);
3262			if (ret_val)
3263				return ret_val;
3264		}
3265	}
3266
3267	return E1000_SUCCESS;
3268}
3269
3270/**
3271 *  e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
3272 *  @hw: pointer to the HW structure
3273 *  @active: true to enable LPLU, false to disable
3274 *
3275 *  Sets the LPLU D3 state according to the active flag.  When
3276 *  activating LPLU this function also disables smart speed
3277 *  and vice versa.  LPLU will not be activated unless the
3278 *  device autonegotiation advertisement meets standards of
3279 *  either 10 or 10/100 or 10/100/1000 at all duplexes.
3280 *  This is a function pointer entry point only called by
3281 *  PHY setup routines.
3282 **/
3283STATIC s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
3284{
3285	struct e1000_phy_info *phy = &hw->phy;
3286	u32 phy_ctrl;
3287	s32 ret_val = E1000_SUCCESS;
3288	u16 data;
3289
3290	DEBUGFUNC("e1000_set_d3_lplu_state_ich8lan");
3291
3292	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3293
3294	if (!active) {
3295		phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
3296		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3297
3298		if (phy->type != e1000_phy_igp_3)
3299			return E1000_SUCCESS;
3300
3301		/* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
3302		 * during Dx states where the power conservation is most
3303		 * important.  During driver activity we should enable
3304		 * SmartSpeed, so performance is maintained.
3305		 */
3306		if (phy->smart_speed == e1000_smart_speed_on) {
3307			ret_val = phy->ops.read_reg(hw,
3308						    IGP01E1000_PHY_PORT_CONFIG,
3309						    &data);
3310			if (ret_val)
3311				return ret_val;
3312
3313			data |= IGP01E1000_PSCFR_SMART_SPEED;
3314			ret_val = phy->ops.write_reg(hw,
3315						     IGP01E1000_PHY_PORT_CONFIG,
3316						     data);
3317			if (ret_val)
3318				return ret_val;
3319		} else if (phy->smart_speed == e1000_smart_speed_off) {
3320			ret_val = phy->ops.read_reg(hw,
3321						    IGP01E1000_PHY_PORT_CONFIG,
3322						    &data);
3323			if (ret_val)
3324				return ret_val;
3325
3326			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3327			ret_val = phy->ops.write_reg(hw,
3328						     IGP01E1000_PHY_PORT_CONFIG,
3329						     data);
3330			if (ret_val)
3331				return ret_val;
3332		}
3333	} else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
3334		   (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
3335		   (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
3336		phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
3337		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3338
3339		if (phy->type != e1000_phy_igp_3)
3340			return E1000_SUCCESS;
3341
3342		/* Call gig speed drop workaround on LPLU before accessing
3343		 * any PHY registers
3344		 */
3345		if (hw->mac.type == e1000_ich8lan)
3346			e1000_gig_downshift_workaround_ich8lan(hw);
3347
3348		/* When LPLU is enabled, we should disable SmartSpeed */
3349		ret_val = phy->ops.read_reg(hw,
3350					    IGP01E1000_PHY_PORT_CONFIG,
3351					    &data);
3352		if (ret_val)
3353			return ret_val;
3354
3355		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3356		ret_val = phy->ops.write_reg(hw,
3357					     IGP01E1000_PHY_PORT_CONFIG,
3358					     data);
3359	}
3360
3361	return ret_val;
3362}
3363
3364/**
3365 *  e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
3366 *  @hw: pointer to the HW structure
3367 *  @bank:  pointer to the variable that returns the active bank
3368 *
3369 *  Reads signature byte from the NVM using the flash access registers.
3370 *  Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
3371 **/
3372STATIC s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
3373{
3374	u32 eecd;
3375	struct e1000_nvm_info *nvm = &hw->nvm;
3376	u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
3377	u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
3378	u32 nvm_dword = 0;
3379	u8 sig_byte = 0;
3380	s32 ret_val;
3381
3382	DEBUGFUNC("e1000_valid_nvm_bank_detect_ich8lan");
3383
3384	switch (hw->mac.type) {
3385	case e1000_pch_spt:
3386	case e1000_pch_cnp:
3387		bank1_offset = nvm->flash_bank_size;
3388		act_offset = E1000_ICH_NVM_SIG_WORD;
3389
3390		/* set bank to 0 in case flash read fails */
3391		*bank = 0;
3392
3393		/* Check bank 0 */
3394		ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset,
3395							 &nvm_dword);
3396		if (ret_val)
3397			return ret_val;
3398		sig_byte = (u8)((nvm_dword & 0xFF00) >> 8);
3399		if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3400		    E1000_ICH_NVM_SIG_VALUE) {
3401			*bank = 0;
3402			return E1000_SUCCESS;
3403		}
3404
3405		/* Check bank 1 */
3406		ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset +
3407							 bank1_offset,
3408							 &nvm_dword);
3409		if (ret_val)
3410			return ret_val;
3411		sig_byte = (u8)((nvm_dword & 0xFF00) >> 8);
3412		if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3413		    E1000_ICH_NVM_SIG_VALUE) {
3414			*bank = 1;
3415			return E1000_SUCCESS;
3416		}
3417
3418		DEBUGOUT("ERROR: No valid NVM bank present\n");
3419		return -E1000_ERR_NVM;
3420	case e1000_ich8lan:
3421	case e1000_ich9lan:
3422		eecd = E1000_READ_REG(hw, E1000_EECD);
3423		if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
3424		    E1000_EECD_SEC1VAL_VALID_MASK) {
3425			if (eecd & E1000_EECD_SEC1VAL)
3426				*bank = 1;
3427			else
3428				*bank = 0;
3429
3430			return E1000_SUCCESS;
3431		}
3432		DEBUGOUT("Unable to determine valid NVM bank via EEC - reading flash signature\n");
3433		/* fall-thru */
3434	default:
3435		/* set bank to 0 in case flash read fails */
3436		*bank = 0;
3437
3438		/* Check bank 0 */
3439		ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
3440							&sig_byte);
3441		if (ret_val)
3442			return ret_val;
3443		if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3444		    E1000_ICH_NVM_SIG_VALUE) {
3445			*bank = 0;
3446			return E1000_SUCCESS;
3447		}
3448
3449		/* Check bank 1 */
3450		ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
3451							bank1_offset,
3452							&sig_byte);
3453		if (ret_val)
3454			return ret_val;
3455		if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3456		    E1000_ICH_NVM_SIG_VALUE) {
3457			*bank = 1;
3458			return E1000_SUCCESS;
3459		}
3460
3461		DEBUGOUT("ERROR: No valid NVM bank present\n");
3462		return -E1000_ERR_NVM;
3463	}
3464}
3465
3466/**
3467 *  e1000_read_nvm_spt - NVM access for SPT
3468 *  @hw: pointer to the HW structure
3469 *  @offset: The offset (in bytes) of the word(s) to read.
3470 *  @words: Size of data to read in words.
3471 *  @data: pointer to the word(s) to read at offset.
3472 *
3473 *  Reads a word(s) from the NVM
3474 **/
3475STATIC s32 e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset, u16 words,
3476			      u16 *data)
3477{
3478	struct e1000_nvm_info *nvm = &hw->nvm;
3479	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3480	u32 act_offset;
3481	s32 ret_val = E1000_SUCCESS;
3482	u32 bank = 0;
3483	u32 dword = 0;
3484	u16 offset_to_read;
3485	u16 i;
3486
3487	DEBUGFUNC("e1000_read_nvm_spt");
3488
3489	if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3490	    (words == 0)) {
3491		DEBUGOUT("nvm parameter(s) out of bounds\n");
3492		ret_val = -E1000_ERR_NVM;
3493		goto out;
3494	}
3495
3496	nvm->ops.acquire(hw);
3497
3498	ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3499	if (ret_val != E1000_SUCCESS) {
3500		DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3501		bank = 0;
3502	}
3503
3504	act_offset = (bank) ? nvm->flash_bank_size : 0;
3505	act_offset += offset;
3506
3507	ret_val = E1000_SUCCESS;
3508
3509	for (i = 0; i < words; i += 2) {
3510		if (words - i == 1) {
3511			if (dev_spec->shadow_ram[offset+i].modified) {
3512				data[i] = dev_spec->shadow_ram[offset+i].value;
3513			} else {
3514				offset_to_read = act_offset + i -
3515						 ((act_offset + i) % 2);
3516				ret_val =
3517				   e1000_read_flash_dword_ich8lan(hw,
3518								 offset_to_read,
3519								 &dword);
3520				if (ret_val)
3521					break;
3522				if ((act_offset + i) % 2 == 0)
3523					data[i] = (u16)(dword & 0xFFFF);
3524				else
3525					data[i] = (u16)((dword >> 16) & 0xFFFF);
3526			}
3527		} else {
3528			offset_to_read = act_offset + i;
3529			if (!(dev_spec->shadow_ram[offset+i].modified) ||
3530			    !(dev_spec->shadow_ram[offset+i+1].modified)) {
3531				ret_val =
3532				   e1000_read_flash_dword_ich8lan(hw,
3533								 offset_to_read,
3534								 &dword);
3535				if (ret_val)
3536					break;
3537			}
3538			if (dev_spec->shadow_ram[offset+i].modified)
3539				data[i] = dev_spec->shadow_ram[offset+i].value;
3540			else
3541				data[i] = (u16) (dword & 0xFFFF);
3542			if (dev_spec->shadow_ram[offset+i].modified)
3543				data[i+1] =
3544				   dev_spec->shadow_ram[offset+i+1].value;
3545			else
3546				data[i+1] = (u16) (dword >> 16 & 0xFFFF);
3547		}
3548	}
3549
3550	nvm->ops.release(hw);
3551
3552out:
3553	if (ret_val)
3554		DEBUGOUT1("NVM read error: %d\n", ret_val);
3555
3556	return ret_val;
3557}
3558
3559/**
3560 *  e1000_read_nvm_ich8lan - Read word(s) from the NVM
3561 *  @hw: pointer to the HW structure
3562 *  @offset: The offset (in bytes) of the word(s) to read.
3563 *  @words: Size of data to read in words
3564 *  @data: Pointer to the word(s) to read at offset.
3565 *
3566 *  Reads a word(s) from the NVM using the flash access registers.
3567 **/
3568STATIC s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3569				  u16 *data)
3570{
3571	struct e1000_nvm_info *nvm = &hw->nvm;
3572	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3573	u32 act_offset;
3574	s32 ret_val = E1000_SUCCESS;
3575	u32 bank = 0;
3576	u16 i, word;
3577
3578	DEBUGFUNC("e1000_read_nvm_ich8lan");
3579
3580	if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3581	    (words == 0)) {
3582		DEBUGOUT("nvm parameter(s) out of bounds\n");
3583		ret_val = -E1000_ERR_NVM;
3584		goto out;
3585	}
3586
3587	nvm->ops.acquire(hw);
3588
3589	ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3590	if (ret_val != E1000_SUCCESS) {
3591		DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3592		bank = 0;
3593	}
3594
3595	act_offset = (bank) ? nvm->flash_bank_size : 0;
3596	act_offset += offset;
3597
3598	ret_val = E1000_SUCCESS;
3599	for (i = 0; i < words; i++) {
3600		if (dev_spec->shadow_ram[offset+i].modified) {
3601			data[i] = dev_spec->shadow_ram[offset+i].value;
3602		} else {
3603			ret_val = e1000_read_flash_word_ich8lan(hw,
3604								act_offset + i,
3605								&word);
3606			if (ret_val)
3607				break;
3608			data[i] = word;
3609		}
3610	}
3611
3612	nvm->ops.release(hw);
3613
3614out:
3615	if (ret_val)
3616		DEBUGOUT1("NVM read error: %d\n", ret_val);
3617
3618	return ret_val;
3619}
3620
3621/**
3622 *  e1000_flash_cycle_init_ich8lan - Initialize flash
3623 *  @hw: pointer to the HW structure
3624 *
3625 *  This function does initial flash setup so that a new read/write/erase cycle
3626 *  can be started.
3627 **/
3628STATIC s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
3629{
3630	union ich8_hws_flash_status hsfsts;
3631	s32 ret_val = -E1000_ERR_NVM;
3632
3633	DEBUGFUNC("e1000_flash_cycle_init_ich8lan");
3634
3635	hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3636
3637	/* Check if the flash descriptor is valid */
3638	if (!hsfsts.hsf_status.fldesvalid) {
3639		DEBUGOUT("Flash descriptor invalid.  SW Sequencing must be used.\n");
3640		return -E1000_ERR_NVM;
3641	}
3642
3643	/* Clear FCERR and DAEL in hw status by writing 1 */
3644	hsfsts.hsf_status.flcerr = 1;
3645	hsfsts.hsf_status.dael = 1;
3646	if (hw->mac.type >= e1000_pch_spt)
3647		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3648				      hsfsts.regval & 0xFFFF);
3649	else
3650		E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
3651
3652	/* Either we should have a hardware SPI cycle in progress
3653	 * bit to check against, in order to start a new cycle or
3654	 * FDONE bit should be changed in the hardware so that it
3655	 * is 1 after hardware reset, which can then be used as an
3656	 * indication whether a cycle is in progress or has been
3657	 * completed.
3658	 */
3659
3660	if (!hsfsts.hsf_status.flcinprog) {
3661		/* There is no cycle running at present,
3662		 * so we can start a cycle.
3663		 * Begin by setting Flash Cycle Done.
3664		 */
3665		hsfsts.hsf_status.flcdone = 1;
3666		if (hw->mac.type >= e1000_pch_spt)
3667			E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3668					      hsfsts.regval & 0xFFFF);
3669		else
3670			E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
3671						hsfsts.regval);
3672		ret_val = E1000_SUCCESS;
3673	} else {
3674		s32 i;
3675
3676		/* Otherwise poll for sometime so the current
3677		 * cycle has a chance to end before giving up.
3678		 */
3679		for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
3680			hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3681							      ICH_FLASH_HSFSTS);
3682			if (!hsfsts.hsf_status.flcinprog) {
3683				ret_val = E1000_SUCCESS;
3684				break;
3685			}
3686			usec_delay(1);
3687		}
3688		if (ret_val == E1000_SUCCESS) {
3689			/* Successful in waiting for previous cycle to timeout,
3690			 * now set the Flash Cycle Done.
3691			 */
3692			hsfsts.hsf_status.flcdone = 1;
3693			if (hw->mac.type >= e1000_pch_spt)
3694				E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3695						      hsfsts.regval & 0xFFFF);
3696			else
3697				E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
3698							hsfsts.regval);
3699		} else {
3700			DEBUGOUT("Flash controller busy, cannot get access\n");
3701		}
3702	}
3703
3704	return ret_val;
3705}
3706
3707/**
3708 *  e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
3709 *  @hw: pointer to the HW structure
3710 *  @timeout: maximum time to wait for completion
3711 *
3712 *  This function starts a flash cycle and waits for its completion.
3713 **/
3714STATIC s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
3715{
3716	union ich8_hws_flash_ctrl hsflctl;
3717	union ich8_hws_flash_status hsfsts;
3718	u32 i = 0;
3719
3720	DEBUGFUNC("e1000_flash_cycle_ich8lan");
3721
3722	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
3723	if (hw->mac.type >= e1000_pch_spt)
3724		hsflctl.regval = E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16;
3725	else
3726		hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3727	hsflctl.hsf_ctrl.flcgo = 1;
3728
3729	if (hw->mac.type >= e1000_pch_spt)
3730		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3731				      hsflctl.regval << 16);
3732	else
3733		E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3734
3735	/* wait till FDONE bit is set to 1 */
3736	do {
3737		hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3738		if (hsfsts.hsf_status.flcdone)
3739			break;
3740		usec_delay(1);
3741	} while (i++ < timeout);
3742
3743	if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr)
3744		return E1000_SUCCESS;
3745
3746	return -E1000_ERR_NVM;
3747}
3748
3749/**
3750 *  e1000_read_flash_dword_ich8lan - Read dword from flash
3751 *  @hw: pointer to the HW structure
3752 *  @offset: offset to data location
3753 *  @data: pointer to the location for storing the data
3754 *
3755 *  Reads the flash dword at offset into data.  Offset is converted
3756 *  to bytes before read.
3757 **/
3758STATIC s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw, u32 offset,
3759					  u32 *data)
3760{
3761	DEBUGFUNC("e1000_read_flash_dword_ich8lan");
3762
3763	if (!data)
3764		return -E1000_ERR_NVM;
3765
3766	/* Must convert word offset into bytes. */
3767	offset <<= 1;
3768
3769	return e1000_read_flash_data32_ich8lan(hw, offset, data);
3770}
3771
3772/**
3773 *  e1000_read_flash_word_ich8lan - Read word from flash
3774 *  @hw: pointer to the HW structure
3775 *  @offset: offset to data location
3776 *  @data: pointer to the location for storing the data
3777 *
3778 *  Reads the flash word at offset into data.  Offset is converted
3779 *  to bytes before read.
3780 **/
3781STATIC s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
3782					 u16 *data)
3783{
3784	DEBUGFUNC("e1000_read_flash_word_ich8lan");
3785
3786	if (!data)
3787		return -E1000_ERR_NVM;
3788
3789	/* Must convert offset into bytes. */
3790	offset <<= 1;
3791
3792	return e1000_read_flash_data_ich8lan(hw, offset, 2, data);
3793}
3794
3795/**
3796 *  e1000_read_flash_byte_ich8lan - Read byte from flash
3797 *  @hw: pointer to the HW structure
3798 *  @offset: The offset of the byte to read.
3799 *  @data: Pointer to a byte to store the value read.
3800 *
3801 *  Reads a single byte from the NVM using the flash access registers.
3802 **/
3803STATIC s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3804					 u8 *data)
3805{
3806	s32 ret_val;
3807	u16 word = 0;
3808
3809	/* In SPT, only 32 bits access is supported,
3810	 * so this function should not be called.
3811	 */
3812	if (hw->mac.type >= e1000_pch_spt)
3813		return -E1000_ERR_NVM;
3814	else
3815		ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
3816
3817	if (ret_val)
3818		return ret_val;
3819
3820	*data = (u8)word;
3821
3822	return E1000_SUCCESS;
3823}
3824
3825/**
3826 *  e1000_read_flash_data_ich8lan - Read byte or word from NVM
3827 *  @hw: pointer to the HW structure
3828 *  @offset: The offset (in bytes) of the byte or word to read.
3829 *  @size: Size of data to read, 1=byte 2=word
3830 *  @data: Pointer to the word to store the value read.
3831 *
3832 *  Reads a byte or word from the NVM using the flash access registers.
3833 **/
3834STATIC s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3835					 u8 size, u16 *data)
3836{
3837	union ich8_hws_flash_status hsfsts;
3838	union ich8_hws_flash_ctrl hsflctl;
3839	u32 flash_linear_addr;
3840	u32 flash_data = 0;
3841	s32 ret_val = -E1000_ERR_NVM;
3842	u8 count = 0;
3843
3844	DEBUGFUNC("e1000_read_flash_data_ich8lan");
3845
3846	if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
3847		return -E1000_ERR_NVM;
3848	flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3849			     hw->nvm.flash_base_addr);
3850
3851	do {
3852		usec_delay(1);
3853		/* Steps */
3854		ret_val = e1000_flash_cycle_init_ich8lan(hw);
3855		if (ret_val != E1000_SUCCESS)
3856			break;
3857		hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3858
3859		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3860		hsflctl.hsf_ctrl.fldbcount = size - 1;
3861		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3862		E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3863		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3864
3865		ret_val = e1000_flash_cycle_ich8lan(hw,
3866						ICH_FLASH_READ_COMMAND_TIMEOUT);
3867
3868		/* Check if FCERR is set to 1, if set to 1, clear it
3869		 * and try the whole sequence a few more times, else
3870		 * read in (shift in) the Flash Data0, the order is
3871		 * least significant byte first msb to lsb
3872		 */
3873		if (ret_val == E1000_SUCCESS) {
3874			flash_data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
3875			if (size == 1)
3876				*data = (u8)(flash_data & 0x000000FF);
3877			else if (size == 2)
3878				*data = (u16)(flash_data & 0x0000FFFF);
3879			break;
3880		} else {
3881			/* If we've gotten here, then things are probably
3882			 * completely hosed, but if the error condition is
3883			 * detected, it won't hurt to give it another try...
3884			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
3885			 */
3886			hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3887							      ICH_FLASH_HSFSTS);
3888			if (hsfsts.hsf_status.flcerr) {
3889				/* Repeat for some time before giving up. */
3890				continue;
3891			} else if (!hsfsts.hsf_status.flcdone) {
3892				DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3893				break;
3894			}
3895		}
3896	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3897
3898	return ret_val;
3899}
3900
3901/**
3902 *  e1000_read_flash_data32_ich8lan - Read dword from NVM
3903 *  @hw: pointer to the HW structure
3904 *  @offset: The offset (in bytes) of the dword to read.
3905 *  @data: Pointer to the dword to store the value read.
3906 *
3907 *  Reads a byte or word from the NVM using the flash access registers.
3908 **/
3909STATIC s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
3910					   u32 *data)
3911{
3912	union ich8_hws_flash_status hsfsts;
3913	union ich8_hws_flash_ctrl hsflctl;
3914	u32 flash_linear_addr;
3915	s32 ret_val = -E1000_ERR_NVM;
3916	u8 count = 0;
3917
3918	DEBUGFUNC("e1000_read_flash_data_ich8lan");
3919
3920		if (offset > ICH_FLASH_LINEAR_ADDR_MASK ||
3921		    hw->mac.type < e1000_pch_spt)
3922			return -E1000_ERR_NVM;
3923	flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3924			     hw->nvm.flash_base_addr);
3925
3926	do {
3927		usec_delay(1);
3928		/* Steps */
3929		ret_val = e1000_flash_cycle_init_ich8lan(hw);
3930		if (ret_val != E1000_SUCCESS)
3931			break;
3932		/* In SPT, This register is in Lan memory space, not flash.
3933		 * Therefore, only 32 bit access is supported
3934		 */
3935		hsflctl.regval = E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16;
3936
3937		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3938		hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1;
3939		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3940		/* In SPT, This register is in Lan memory space, not flash.
3941		 * Therefore, only 32 bit access is supported
3942		 */
3943		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3944				      (u32)hsflctl.regval << 16);
3945		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3946
3947		ret_val = e1000_flash_cycle_ich8lan(hw,
3948						ICH_FLASH_READ_COMMAND_TIMEOUT);
3949
3950		/* Check if FCERR is set to 1, if set to 1, clear it
3951		 * and try the whole sequence a few more times, else
3952		 * read in (shift in) the Flash Data0, the order is
3953		 * least significant byte first msb to lsb
3954		 */
3955		if (ret_val == E1000_SUCCESS) {
3956			*data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
3957			break;
3958		} else {
3959			/* If we've gotten here, then things are probably
3960			 * completely hosed, but if the error condition is
3961			 * detected, it won't hurt to give it another try...
3962			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
3963			 */
3964			hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3965							      ICH_FLASH_HSFSTS);
3966			if (hsfsts.hsf_status.flcerr) {
3967				/* Repeat for some time before giving up. */
3968				continue;
3969			} else if (!hsfsts.hsf_status.flcdone) {
3970				DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3971				break;
3972			}
3973		}
3974	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3975
3976	return ret_val;
3977}
3978
3979/**
3980 *  e1000_write_nvm_ich8lan - Write word(s) to the NVM
3981 *  @hw: pointer to the HW structure
3982 *  @offset: The offset (in bytes) of the word(s) to write.
3983 *  @words: Size of data to write in words
3984 *  @data: Pointer to the word(s) to write at offset.
3985 *
3986 *  Writes a byte or word to the NVM using the flash access registers.
3987 **/
3988STATIC s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3989				   u16 *data)
3990{
3991	struct e1000_nvm_info *nvm = &hw->nvm;
3992	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3993	u16 i;
3994
3995	DEBUGFUNC("e1000_write_nvm_ich8lan");
3996
3997	if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3998	    (words == 0)) {
3999		DEBUGOUT("nvm parameter(s) out of bounds\n");
4000		return -E1000_ERR_NVM;
4001	}
4002
4003	nvm->ops.acquire(hw);
4004
4005	for (i = 0; i < words; i++) {
4006		dev_spec->shadow_ram[offset+i].modified = true;
4007		dev_spec->shadow_ram[offset+i].value = data[i];
4008	}
4009
4010	nvm->ops.release(hw);
4011
4012	return E1000_SUCCESS;
4013}
4014
4015/**
4016 *  e1000_update_nvm_checksum_spt - Update the checksum for NVM
4017 *  @hw: pointer to the HW structure
4018 *
4019 *  The NVM checksum is updated by calling the generic update_nvm_checksum,
4020 *  which writes the checksum to the shadow ram.  The changes in the shadow
4021 *  ram are then committed to the EEPROM by processing each bank at a time
4022 *  checking for the modified bit and writing only the pending changes.
4023 *  After a successful commit, the shadow ram is cleared and is ready for
4024 *  future writes.
4025 **/
4026STATIC s32 e1000_update_nvm_checksum_spt(struct e1000_hw *hw)
4027{
4028	struct e1000_nvm_info *nvm = &hw->nvm;
4029	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4030	u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
4031	s32 ret_val;
4032	u32 dword = 0;
4033
4034	DEBUGFUNC("e1000_update_nvm_checksum_spt");
4035
4036	ret_val = e1000_update_nvm_checksum_generic(hw);
4037	if (ret_val)
4038		goto out;
4039
4040	if (nvm->type != e1000_nvm_flash_sw)
4041		goto out;
4042
4043	nvm->ops.acquire(hw);
4044
4045	/* We're writing to the opposite bank so if we're on bank 1,
4046	 * write to bank 0 etc.  We also need to erase the segment that
4047	 * is going to be written
4048	 */
4049	ret_val =  e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
4050	if (ret_val != E1000_SUCCESS) {
4051		DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
4052		bank = 0;
4053	}
4054
4055	if (bank == 0) {
4056		new_bank_offset = nvm->flash_bank_size;
4057		old_bank_offset = 0;
4058		ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
4059		if (ret_val)
4060			goto release;
4061	} else {
4062		old_bank_offset = nvm->flash_bank_size;
4063		new_bank_offset = 0;
4064		ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
4065		if (ret_val)
4066			goto release;
4067	}
4068	for (i = 0; i < E1000_SHADOW_RAM_WORDS; i += 2) {
4069		/* Determine whether to write the value stored
4070		 * in the other NVM bank or a modified value stored
4071		 * in the shadow RAM
4072		 */
4073		ret_val = e1000_read_flash_dword_ich8lan(hw,
4074							 i + old_bank_offset,
4075							 &dword);
4076
4077		if (dev_spec->shadow_ram[i].modified) {
4078			dword &= 0xffff0000;
4079			dword |= (dev_spec->shadow_ram[i].value & 0xffff);
4080		}
4081		if (dev_spec->shadow_ram[i + 1].modified) {
4082			dword &= 0x0000ffff;
4083			dword |= ((dev_spec->shadow_ram[i + 1].value & 0xffff)
4084				  << 16);
4085		}
4086		if (ret_val)
4087			break;
4088
4089		/* If the word is 0x13, then make sure the signature bits
4090		 * (15:14) are 11b until the commit has completed.
4091		 * This will allow us to write 10b which indicates the
4092		 * signature is valid.  We want to do this after the write
4093		 * has completed so that we don't mark the segment valid
4094		 * while the write is still in progress
4095		 */
4096		if (i == E1000_ICH_NVM_SIG_WORD - 1)
4097			dword |= E1000_ICH_NVM_SIG_MASK << 16;
4098
4099		/* Convert offset to bytes. */
4100		act_offset = (i + new_bank_offset) << 1;
4101
4102		usec_delay(100);
4103
4104		/* Write the data to the new bank. Offset in words*/
4105		act_offset = i + new_bank_offset;
4106		ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset,
4107								dword);
4108		if (ret_val)
4109			break;
4110	 }
4111
4112	/* Don't bother writing the segment valid bits if sector
4113	 * programming failed.
4114	 */
4115	if (ret_val) {
4116		DEBUGOUT("Flash commit failed.\n");
4117		goto release;
4118	}
4119
4120	/* Finally validate the new segment by setting bit 15:14
4121	 * to 10b in word 0x13 , this can be done without an
4122	 * erase as well since these bits are 11 to start with
4123	 * and we need to change bit 14 to 0b
4124	 */
4125	act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
4126
4127	/*offset in words but we read dword*/
4128	--act_offset;
4129	ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword);
4130
4131	if (ret_val)
4132		goto release;
4133
4134	dword &= 0xBFFFFFFF;
4135	ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword);
4136
4137	if (ret_val)
4138		goto release;
4139
4140	/* And invalidate the previously valid segment by setting
4141	 * its signature word (0x13) high_byte to 0b. This can be
4142	 * done without an erase because flash erase sets all bits
4143	 * to 1's. We can write 1's to 0's without an erase
4144	 */
4145	act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
4146
4147	/* offset in words but we read dword*/
4148	act_offset = old_bank_offset + E1000_ICH_NVM_SIG_WORD - 1;
4149	ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword);
4150
4151	if (ret_val)
4152		goto release;
4153
4154	dword &= 0x00FFFFFF;
4155	ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword);
4156
4157	if (ret_val)
4158		goto release;
4159
4160	/* Great!  Everything worked, we can now clear the cached entries. */
4161	for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
4162		dev_spec->shadow_ram[i].modified = false;
4163		dev_spec->shadow_ram[i].value = 0xFFFF;
4164	}
4165
4166release:
4167	nvm->ops.release(hw);
4168
4169	/* Reload the EEPROM, or else modifications will not appear
4170	 * until after the next adapter reset.
4171	 */
4172	if (!ret_val) {
4173		nvm->ops.reload(hw);
4174		msec_delay(10);
4175	}
4176
4177out:
4178	if (ret_val)
4179		DEBUGOUT1("NVM update error: %d\n", ret_val);
4180
4181	return ret_val;
4182}
4183
4184/**
4185 *  e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
4186 *  @hw: pointer to the HW structure
4187 *
4188 *  The NVM checksum is updated by calling the generic update_nvm_checksum,
4189 *  which writes the checksum to the shadow ram.  The changes in the shadow
4190 *  ram are then committed to the EEPROM by processing each bank at a time
4191 *  checking for the modified bit and writing only the pending changes.
4192 *  After a successful commit, the shadow ram is cleared and is ready for
4193 *  future writes.
4194 **/
4195STATIC s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
4196{
4197	struct e1000_nvm_info *nvm = &hw->nvm;
4198	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4199	u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
4200	s32 ret_val;
4201	u16 data = 0;
4202
4203	DEBUGFUNC("e1000_update_nvm_checksum_ich8lan");
4204
4205	ret_val = e1000_update_nvm_checksum_generic(hw);
4206	if (ret_val)
4207		goto out;
4208
4209	if (nvm->type != e1000_nvm_flash_sw)
4210		goto out;
4211
4212	nvm->ops.acquire(hw);
4213
4214	/* We're writing to the opposite bank so if we're on bank 1,
4215	 * write to bank 0 etc.  We also need to erase the segment that
4216	 * is going to be written
4217	 */
4218	ret_val =  e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
4219	if (ret_val != E1000_SUCCESS) {
4220		DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
4221		bank = 0;
4222	}
4223
4224	if (bank == 0) {
4225		new_bank_offset = nvm->flash_bank_size;
4226		old_bank_offset = 0;
4227		ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
4228		if (ret_val)
4229			goto release;
4230	} else {
4231		old_bank_offset = nvm->flash_bank_size;
4232		new_bank_offset = 0;
4233		ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
4234		if (ret_val)
4235			goto release;
4236	}
4237	for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
4238		if (dev_spec->shadow_ram[i].modified) {
4239			data = dev_spec->shadow_ram[i].value;
4240		} else {
4241			ret_val = e1000_read_flash_word_ich8lan(hw, i +
4242								old_bank_offset,
4243								&data);
4244			if (ret_val)
4245				break;
4246		}
4247		/* If the word is 0x13, then make sure the signature bits
4248		 * (15:14) are 11b until the commit has completed.
4249		 * This will allow us to write 10b which indicates the
4250		 * signature is valid.  We want to do this after the write
4251		 * has completed so that we don't mark the segment valid
4252		 * while the write is still in progress
4253		 */
4254		if (i == E1000_ICH_NVM_SIG_WORD)
4255			data |= E1000_ICH_NVM_SIG_MASK;
4256
4257		/* Convert offset to bytes. */
4258		act_offset = (i + new_bank_offset) << 1;
4259
4260		usec_delay(100);
4261
4262		/* Write the bytes to the new bank. */
4263		ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
4264							       act_offset,
4265							       (u8)data);
4266		if (ret_val)
4267			break;
4268
4269		usec_delay(100);
4270		ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
4271							  act_offset + 1,
4272							  (u8)(data >> 8));
4273		if (ret_val)
4274			break;
4275	}
4276
4277	/* Don't bother writing the segment valid bits if sector
4278	 * programming failed.
4279	 */
4280	if (ret_val) {
4281		DEBUGOUT("Flash commit failed.\n");
4282		goto release;
4283	}
4284
4285	/* Finally validate the new segment by setting bit 15:14
4286	 * to 10b in word 0x13 , this can be done without an
4287	 * erase as well since these bits are 11 to start with
4288	 * and we need to change bit 14 to 0b
4289	 */
4290	act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
4291	ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
4292	if (ret_val)
4293		goto release;
4294
4295	data &= 0xBFFF;
4296	ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset * 2 + 1,
4297						       (u8)(data >> 8));
4298	if (ret_val)
4299		goto release;
4300
4301	/* And invalidate the previously valid segment by setting
4302	 * its signature word (0x13) high_byte to 0b. This can be
4303	 * done without an erase because flash erase sets all bits
4304	 * to 1's. We can write 1's to 0's without an erase
4305	 */
4306	act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
4307
4308	ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
4309
4310	if (ret_val)
4311		goto release;
4312
4313	/* Great!  Everything worked, we can now clear the cached entries. */
4314	for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
4315		dev_spec->shadow_ram[i].modified = false;
4316		dev_spec->shadow_ram[i].value = 0xFFFF;
4317	}
4318
4319release:
4320	nvm->ops.release(hw);
4321
4322	/* Reload the EEPROM, or else modifications will not appear
4323	 * until after the next adapter reset.
4324	 */
4325	if (!ret_val) {
4326		nvm->ops.reload(hw);
4327		msec_delay(10);
4328	}
4329
4330out:
4331	if (ret_val)
4332		DEBUGOUT1("NVM update error: %d\n", ret_val);
4333
4334	return ret_val;
4335}
4336
4337/**
4338 *  e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
4339 *  @hw: pointer to the HW structure
4340 *
4341 *  Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
4342 *  If the bit is 0, that the EEPROM had been modified, but the checksum was not
4343 *  calculated, in which case we need to calculate the checksum and set bit 6.
4344 **/
4345STATIC s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
4346{
4347	s32 ret_val;
4348	u16 data;
4349	u16 word;
4350	u16 valid_csum_mask;
4351
4352	DEBUGFUNC("e1000_validate_nvm_checksum_ich8lan");
4353
4354	/* Read NVM and check Invalid Image CSUM bit.  If this bit is 0,
4355	 * the checksum needs to be fixed.  This bit is an indication that
4356	 * the NVM was prepared by OEM software and did not calculate
4357	 * the checksum...a likely scenario.
4358	 */
4359	switch (hw->mac.type) {
4360	case e1000_pch_lpt:
4361	case e1000_pch_spt:
4362	case e1000_pch_cnp:
4363		word = NVM_COMPAT;
4364		valid_csum_mask = NVM_COMPAT_VALID_CSUM;
4365		break;
4366	default:
4367		word = NVM_FUTURE_INIT_WORD1;
4368		valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM;
4369		break;
4370	}
4371
4372	ret_val = hw->nvm.ops.read(hw, word, 1, &data);
4373	if (ret_val)
4374		return ret_val;
4375
4376	if (!(data & valid_csum_mask)) {
4377		data |= valid_csum_mask;
4378		ret_val = hw->nvm.ops.write(hw, word, 1, &data);
4379		if (ret_val)
4380			return ret_val;
4381		ret_val = hw->nvm.ops.update(hw);
4382		if (ret_val)
4383			return ret_val;
4384	}
4385
4386	return e1000_validate_nvm_checksum_generic(hw);
4387}
4388
4389/**
4390 *  e1000_write_flash_data_ich8lan - Writes bytes to the NVM
4391 *  @hw: pointer to the HW structure
4392 *  @offset: The offset (in bytes) of the byte/word to read.
4393 *  @size: Size of data to read, 1=byte 2=word
4394 *  @data: The byte(s) to write to the NVM.
4395 *
4396 *  Writes one/two bytes to the NVM using the flash access registers.
4397 **/
4398STATIC s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
4399					  u8 size, u16 data)
4400{
4401	union ich8_hws_flash_status hsfsts;
4402	union ich8_hws_flash_ctrl hsflctl;
4403	u32 flash_linear_addr;
4404	u32 flash_data = 0;
4405	s32 ret_val;
4406	u8 count = 0;
4407
4408	DEBUGFUNC("e1000_write_ich8_data");
4409
4410	if (hw->mac.type >= e1000_pch_spt) {
4411		if (size != 4 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
4412			return -E1000_ERR_NVM;
4413	} else {
4414		if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
4415			return -E1000_ERR_NVM;
4416	}
4417
4418	flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
4419			     hw->nvm.flash_base_addr);
4420
4421	do {
4422		usec_delay(1);
4423		/* Steps */
4424		ret_val = e1000_flash_cycle_init_ich8lan(hw);
4425		if (ret_val != E1000_SUCCESS)
4426			break;
4427		/* In SPT, This register is in Lan memory space, not
4428		 * flash.  Therefore, only 32 bit access is supported
4429		 */
4430		if (hw->mac.type >= e1000_pch_spt)
4431			hsflctl.regval =
4432			    E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16;
4433		else
4434			hsflctl.regval =
4435			    E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
4436
4437		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
4438		hsflctl.hsf_ctrl.fldbcount = size - 1;
4439		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
4440		/* In SPT, This register is in Lan memory space,
4441		 * not flash.  Therefore, only 32 bit access is
4442		 * supported
4443		 */
4444		if (hw->mac.type >= e1000_pch_spt)
4445			E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
4446					      hsflctl.regval << 16);
4447		else
4448			E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
4449						hsflctl.regval);
4450
4451		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
4452
4453		if (size == 1)
4454			flash_data = (u32)data & 0x00FF;
4455		else
4456			flash_data = (u32)data;
4457
4458		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
4459
4460		/* check if FCERR is set to 1 , if set to 1, clear it
4461		 * and try the whole sequence a few more times else done
4462		 */
4463		ret_val =
4464		    e1000_flash_cycle_ich8lan(hw,
4465					      ICH_FLASH_WRITE_COMMAND_TIMEOUT);
4466		if (ret_val == E1000_SUCCESS)
4467			break;
4468
4469		/* If we're here, then things are most likely
4470		 * completely hosed, but if the error condition
4471		 * is detected, it won't hurt to give it another
4472		 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
4473		 */
4474		hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
4475		if (hsfsts.hsf_status.flcerr)
4476			/* Repeat for some time before giving up. */
4477			continue;
4478		if (!hsfsts.hsf_status.flcdone) {
4479			DEBUGOUT("Timeout error - flash cycle did not complete.\n");
4480			break;
4481		}
4482	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
4483
4484	return ret_val;
4485}
4486
4487/**
4488*  e1000_write_flash_data32_ich8lan - Writes 4 bytes to the NVM
4489*  @hw: pointer to the HW structure
4490*  @offset: The offset (in bytes) of the dwords to read.
4491*  @data: The 4 bytes to write to the NVM.
4492*
4493*  Writes one/two/four bytes to the NVM using the flash access registers.
4494**/
4495STATIC s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
4496					    u32 data)
4497{
4498	union ich8_hws_flash_status hsfsts;
4499	union ich8_hws_flash_ctrl hsflctl;
4500	u32 flash_linear_addr;
4501	s32 ret_val;
4502	u8 count = 0;
4503
4504	DEBUGFUNC("e1000_write_flash_data32_ich8lan");
4505
4506	if (hw->mac.type >= e1000_pch_spt) {
4507		if (offset > ICH_FLASH_LINEAR_ADDR_MASK)
4508			return -E1000_ERR_NVM;
4509	}
4510	flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
4511			     hw->nvm.flash_base_addr);
4512	do {
4513		usec_delay(1);
4514		/* Steps */
4515		ret_val = e1000_flash_cycle_init_ich8lan(hw);
4516		if (ret_val != E1000_SUCCESS)
4517			break;
4518
4519		/* In SPT, This register is in Lan memory space, not
4520		 * flash.  Therefore, only 32 bit access is supported
4521		 */
4522		if (hw->mac.type >= e1000_pch_spt)
4523			hsflctl.regval = E1000_READ_FLASH_REG(hw,
4524							      ICH_FLASH_HSFSTS)
4525					 >> 16;
4526		else
4527			hsflctl.regval = E1000_READ_FLASH_REG16(hw,
4528							      ICH_FLASH_HSFCTL);
4529
4530		hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1;
4531		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
4532
4533		/* In SPT, This register is in Lan memory space,
4534		 * not flash.  Therefore, only 32 bit access is
4535		 * supported
4536		 */
4537		if (hw->mac.type >= e1000_pch_spt)
4538			E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
4539					      hsflctl.regval << 16);
4540		else
4541			E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
4542						hsflctl.regval);
4543
4544		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
4545
4546		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, data);
4547
4548		/* check if FCERR is set to 1 , if set to 1, clear it
4549		 * and try the whole sequence a few more times else done
4550		 */
4551		ret_val = e1000_flash_cycle_ich8lan(hw,
4552					       ICH_FLASH_WRITE_COMMAND_TIMEOUT);
4553
4554		if (ret_val == E1000_SUCCESS)
4555			break;
4556
4557		/* If we're here, then things are most likely
4558		 * completely hosed, but if the error condition
4559		 * is detected, it won't hurt to give it another
4560		 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
4561		 */
4562		hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
4563
4564		if (hsfsts.hsf_status.flcerr)
4565			/* Repeat for some time before giving up. */
4566			continue;
4567		if (!hsfsts.hsf_status.flcdone) {
4568			DEBUGOUT("Timeout error - flash cycle did not complete.\n");
4569			break;
4570		}
4571	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
4572
4573	return ret_val;
4574}
4575
4576/**
4577 *  e1000_write_flash_byte_ich8lan - Write a single byte to NVM
4578 *  @hw: pointer to the HW structure
4579 *  @offset: The index of the byte to read.
4580 *  @data: The byte to write to the NVM.
4581 *
4582 *  Writes a single byte to the NVM using the flash access registers.
4583 **/
4584STATIC s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
4585					  u8 data)
4586{
4587	u16 word = (u16)data;
4588
4589	DEBUGFUNC("e1000_write_flash_byte_ich8lan");
4590
4591	return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
4592}
4593
4594/**
4595*  e1000_retry_write_flash_dword_ich8lan - Writes a dword to NVM
4596*  @hw: pointer to the HW structure
4597*  @offset: The offset of the word to write.
4598*  @dword: The dword to write to the NVM.
4599*
4600*  Writes a single dword to the NVM using the flash access registers.
4601*  Goes through a retry algorithm before giving up.
4602**/
4603STATIC s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw,
4604						 u32 offset, u32 dword)
4605{
4606	s32 ret_val;
4607	u16 program_retries;
4608
4609	DEBUGFUNC("e1000_retry_write_flash_dword_ich8lan");
4610
4611	/* Must convert word offset into bytes. */
4612	offset <<= 1;
4613
4614	ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword);
4615
4616	if (!ret_val)
4617		return ret_val;
4618	for (program_retries = 0; program_retries < 100; program_retries++) {
4619		DEBUGOUT2("Retrying Byte %8.8X at offset %u\n", dword, offset);
4620		usec_delay(100);
4621		ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword);
4622		if (ret_val == E1000_SUCCESS)
4623			break;
4624	}
4625	if (program_retries == 100)
4626		return -E1000_ERR_NVM;
4627
4628	return E1000_SUCCESS;
4629}
4630
4631/**
4632 *  e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
4633 *  @hw: pointer to the HW structure
4634 *  @offset: The offset of the byte to write.
4635 *  @byte: The byte to write to the NVM.
4636 *
4637 *  Writes a single byte to the NVM using the flash access registers.
4638 *  Goes through a retry algorithm before giving up.
4639 **/
4640STATIC s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
4641						u32 offset, u8 byte)
4642{
4643	s32 ret_val;
4644	u16 program_retries;
4645
4646	DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan");
4647
4648	ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
4649	if (!ret_val)
4650		return ret_val;
4651
4652	for (program_retries = 0; program_retries < 100; program_retries++) {
4653		DEBUGOUT2("Retrying Byte %2.2X at offset %u\n", byte, offset);
4654		usec_delay(100);
4655		ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
4656		if (ret_val == E1000_SUCCESS)
4657			break;
4658	}
4659	if (program_retries == 100)
4660		return -E1000_ERR_NVM;
4661
4662	return E1000_SUCCESS;
4663}
4664
4665/**
4666 *  e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
4667 *  @hw: pointer to the HW structure
4668 *  @bank: 0 for first bank, 1 for second bank, etc.
4669 *
4670 *  Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
4671 *  bank N is 4096 * N + flash_reg_addr.
4672 **/
4673STATIC s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
4674{
4675	struct e1000_nvm_info *nvm = &hw->nvm;
4676	union ich8_hws_flash_status hsfsts;
4677	union ich8_hws_flash_ctrl hsflctl;
4678	u32 flash_linear_addr;
4679	/* bank size is in 16bit words - adjust to bytes */
4680	u32 flash_bank_size = nvm->flash_bank_size * 2;
4681	s32 ret_val;
4682	s32 count = 0;
4683	s32 j, iteration, sector_size;
4684
4685	DEBUGFUNC("e1000_erase_flash_bank_ich8lan");
4686
4687	hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
4688
4689	/* Determine HW Sector size: Read BERASE bits of hw flash status
4690	 * register
4691	 * 00: The Hw sector is 256 bytes, hence we need to erase 16
4692	 *     consecutive sectors.  The start index for the nth Hw sector
4693	 *     can be calculated as = bank * 4096 + n * 256
4694	 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
4695	 *     The start index for the nth Hw sector can be calculated
4696	 *     as = bank * 4096
4697	 * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
4698	 *     (ich9 only, otherwise error condition)
4699	 * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
4700	 */
4701	switch (hsfsts.hsf_status.berasesz) {
4702	case 0:
4703		/* Hw sector size 256 */
4704		sector_size = ICH_FLASH_SEG_SIZE_256;
4705		iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
4706		break;
4707	case 1:
4708		sector_size = ICH_FLASH_SEG_SIZE_4K;
4709		iteration = 1;
4710		break;
4711	case 2:
4712		sector_size = ICH_FLASH_SEG_SIZE_8K;
4713		iteration = 1;
4714		break;
4715	case 3:
4716		sector_size = ICH_FLASH_SEG_SIZE_64K;
4717		iteration = 1;
4718		break;
4719	default:
4720		return -E1000_ERR_NVM;
4721	}
4722
4723	/* Start with the base address, then add the sector offset. */
4724	flash_linear_addr = hw->nvm.flash_base_addr;
4725	flash_linear_addr += (bank) ? flash_bank_size : 0;
4726
4727	for (j = 0; j < iteration; j++) {
4728		do {
4729			u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT;
4730
4731			/* Steps */
4732			ret_val = e1000_flash_cycle_init_ich8lan(hw);
4733			if (ret_val)
4734				return ret_val;
4735
4736			/* Write a value 11 (block Erase) in Flash
4737			 * Cycle field in hw flash control
4738			 */
4739			if (hw->mac.type >= e1000_pch_spt)
4740				hsflctl.regval =
4741				    E1000_READ_FLASH_REG(hw,
4742							 ICH_FLASH_HSFSTS)>>16;
4743			else
4744				hsflctl.regval =
4745				    E1000_READ_FLASH_REG16(hw,
4746							   ICH_FLASH_HSFCTL);
4747
4748			hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
4749			if (hw->mac.type >= e1000_pch_spt)
4750				E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
4751						      hsflctl.regval << 16);
4752			else
4753				E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
4754							hsflctl.regval);
4755
4756			/* Write the last 24 bits of an index within the
4757			 * block into Flash Linear address field in Flash
4758			 * Address.
4759			 */
4760			flash_linear_addr += (j * sector_size);
4761			E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR,
4762					      flash_linear_addr);
4763
4764			ret_val = e1000_flash_cycle_ich8lan(hw, timeout);
4765			if (ret_val == E1000_SUCCESS)
4766				break;
4767
4768			/* Check if FCERR is set to 1.  If 1,
4769			 * clear it and try the whole sequence
4770			 * a few more times else Done
4771			 */
4772			hsfsts.regval = E1000_READ_FLASH_REG16(hw,
4773						      ICH_FLASH_HSFSTS);
4774			if (hsfsts.hsf_status.flcerr)
4775				/* repeat for some time before giving up */
4776				continue;
4777			else if (!hsfsts.hsf_status.flcdone)
4778				return ret_val;
4779		} while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
4780	}
4781
4782	return E1000_SUCCESS;
4783}
4784
4785/**
4786 *  e1000_valid_led_default_ich8lan - Set the default LED settings
4787 *  @hw: pointer to the HW structure
4788 *  @data: Pointer to the LED settings
4789 *
4790 *  Reads the LED default settings from the NVM to data.  If the NVM LED
4791 *  settings is all 0's or F's, set the LED default to a valid LED default
4792 *  setting.
4793 **/
4794STATIC s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
4795{
4796	s32 ret_val;
4797
4798	DEBUGFUNC("e1000_valid_led_default_ich8lan");
4799
4800	ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
4801	if (ret_val) {
4802		DEBUGOUT("NVM Read Error\n");
4803		return ret_val;
4804	}
4805
4806	if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
4807		*data = ID_LED_DEFAULT_ICH8LAN;
4808
4809	return E1000_SUCCESS;
4810}
4811
4812/**
4813 *  e1000_id_led_init_pchlan - store LED configurations
4814 *  @hw: pointer to the HW structure
4815 *
4816 *  PCH does not control LEDs via the LEDCTL register, rather it uses
4817 *  the PHY LED configuration register.
4818 *
4819 *  PCH also does not have an "always on" or "always off" mode which
4820 *  complicates the ID feature.  Instead of using the "on" mode to indicate
4821 *  in ledctl_mode2 the LEDs to use for ID (see e1000_id_led_init_generic()),
4822 *  use "link_up" mode.  The LEDs will still ID on request if there is no
4823 *  link based on logic in e1000_led_[on|off]_pchlan().
4824 **/
4825STATIC s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
4826{
4827	struct e1000_mac_info *mac = &hw->mac;
4828	s32 ret_val;
4829	const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
4830	const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
4831	u16 data, i, temp, shift;
4832
4833	DEBUGFUNC("e1000_id_led_init_pchlan");
4834
4835	/* Get default ID LED modes */
4836	ret_val = hw->nvm.ops.valid_led_default(hw, &data);
4837	if (ret_val)
4838		return ret_val;
4839
4840	mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
4841	mac->ledctl_mode1 = mac->ledctl_default;
4842	mac->ledctl_mode2 = mac->ledctl_default;
4843
4844	for (i = 0; i < 4; i++) {
4845		temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
4846		shift = (i * 5);
4847		switch (temp) {
4848		case ID_LED_ON1_DEF2:
4849		case ID_LED_ON1_ON2:
4850		case ID_LED_ON1_OFF2:
4851			mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4852			mac->ledctl_mode1 |= (ledctl_on << shift);
4853			break;
4854		case ID_LED_OFF1_DEF2:
4855		case ID_LED_OFF1_ON2:
4856		case ID_LED_OFF1_OFF2:
4857			mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4858			mac->ledctl_mode1 |= (ledctl_off << shift);
4859			break;
4860		default:
4861			/* Do nothing */
4862			break;
4863		}
4864		switch (temp) {
4865		case ID_LED_DEF1_ON2:
4866		case ID_LED_ON1_ON2:
4867		case ID_LED_OFF1_ON2:
4868			mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4869			mac->ledctl_mode2 |= (ledctl_on << shift);
4870			break;
4871		case ID_LED_DEF1_OFF2:
4872		case ID_LED_ON1_OFF2:
4873		case ID_LED_OFF1_OFF2:
4874			mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4875			mac->ledctl_mode2 |= (ledctl_off << shift);
4876			break;
4877		default:
4878			/* Do nothing */
4879			break;
4880		}
4881	}
4882
4883	return E1000_SUCCESS;
4884}
4885
4886/**
4887 *  e1000_get_bus_info_ich8lan - Get/Set the bus type and width
4888 *  @hw: pointer to the HW structure
4889 *
4890 *  ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
4891 *  register, so the the bus width is hard coded.
4892 **/
4893STATIC s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
4894{
4895	struct e1000_bus_info *bus = &hw->bus;
4896	s32 ret_val;
4897
4898	DEBUGFUNC("e1000_get_bus_info_ich8lan");
4899
4900	ret_val = e1000_get_bus_info_pcie_generic(hw);
4901
4902	/* ICH devices are "PCI Express"-ish.  They have
4903	 * a configuration space, but do not contain
4904	 * PCI Express Capability registers, so bus width
4905	 * must be hardcoded.
4906	 */
4907	if (bus->width == e1000_bus_width_unknown)
4908		bus->width = e1000_bus_width_pcie_x1;
4909
4910	return ret_val;
4911}
4912
4913/**
4914 *  e1000_reset_hw_ich8lan - Reset the hardware
4915 *  @hw: pointer to the HW structure
4916 *
4917 *  Does a full reset of the hardware which includes a reset of the PHY and
4918 *  MAC.
4919 **/
4920STATIC s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
4921{
4922	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4923	u16 kum_cfg;
4924	u32 ctrl, reg;
4925	s32 ret_val;
4926
4927	DEBUGFUNC("e1000_reset_hw_ich8lan");
4928
4929	/* Prevent the PCI-E bus from sticking if there is no TLP connection
4930	 * on the last TLP read/write transaction when MAC is reset.
4931	 */
4932	ret_val = e1000_disable_pcie_master_generic(hw);
4933	if (ret_val)
4934		DEBUGOUT("PCI-E Master disable polling has failed.\n");
4935
4936	DEBUGOUT("Masking off all interrupts\n");
4937	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
4938
4939	/* Disable the Transmit and Receive units.  Then delay to allow
4940	 * any pending transactions to complete before we hit the MAC
4941	 * with the global reset.
4942	 */
4943	E1000_WRITE_REG(hw, E1000_RCTL, 0);
4944	E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
4945	E1000_WRITE_FLUSH(hw);
4946
4947	msec_delay(10);
4948
4949	/* Workaround for ICH8 bit corruption issue in FIFO memory */
4950	if (hw->mac.type == e1000_ich8lan) {
4951		/* Set Tx and Rx buffer allocation to 8k apiece. */
4952		E1000_WRITE_REG(hw, E1000_PBA, E1000_PBA_8K);
4953		/* Set Packet Buffer Size to 16k. */
4954		E1000_WRITE_REG(hw, E1000_PBS, E1000_PBS_16K);
4955	}
4956
4957	if (hw->mac.type == e1000_pchlan) {
4958		/* Save the NVM K1 bit setting*/
4959		ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg);
4960		if (ret_val)
4961			return ret_val;
4962
4963		if (kum_cfg & E1000_NVM_K1_ENABLE)
4964			dev_spec->nvm_k1_enabled = true;
4965		else
4966			dev_spec->nvm_k1_enabled = false;
4967	}
4968
4969	ctrl = E1000_READ_REG(hw, E1000_CTRL);
4970
4971	if (!hw->phy.ops.check_reset_block(hw)) {
4972		/* Full-chip reset requires MAC and PHY reset at the same
4973		 * time to make sure the interface between MAC and the
4974		 * external PHY is reset.
4975		 */
4976		ctrl |= E1000_CTRL_PHY_RST;
4977
4978		/* Gate automatic PHY configuration by hardware on
4979		 * non-managed 82579
4980		 */
4981		if ((hw->mac.type == e1000_pch2lan) &&
4982		    !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
4983			e1000_gate_hw_phy_config_ich8lan(hw, true);
4984	}
4985	ret_val = e1000_acquire_swflag_ich8lan(hw);
4986	DEBUGOUT("Issuing a global reset to ich8lan\n");
4987	E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_RST));
4988	/* cannot issue a flush here because it hangs the hardware */
4989	msec_delay(20);
4990
4991	/* Set Phy Config Counter to 50msec */
4992	if (hw->mac.type == e1000_pch2lan) {
4993		reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
4994		reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
4995		reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
4996		E1000_WRITE_REG(hw, E1000_FEXTNVM3, reg);
4997	}
4998
4999	if (!ret_val)
5000		E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
5001
5002	if (ctrl & E1000_CTRL_PHY_RST) {
5003		ret_val = hw->phy.ops.get_cfg_done(hw);
5004		if (ret_val)
5005			return ret_val;
5006
5007		ret_val = e1000_post_phy_reset_ich8lan(hw);
5008		if (ret_val)
5009			return ret_val;
5010	}
5011
5012	/* For PCH, this write will make sure that any noise
5013	 * will be detected as a CRC error and be dropped rather than show up
5014	 * as a bad packet to the DMA engine.
5015	 */
5016	if (hw->mac.type == e1000_pchlan)
5017		E1000_WRITE_REG(hw, E1000_CRC_OFFSET, 0x65656565);
5018
5019	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
5020	E1000_READ_REG(hw, E1000_ICR);
5021
5022	reg = E1000_READ_REG(hw, E1000_KABGTXD);
5023	reg |= E1000_KABGTXD_BGSQLBIAS;
5024	E1000_WRITE_REG(hw, E1000_KABGTXD, reg);
5025
5026	return E1000_SUCCESS;
5027}
5028
5029/**
5030 *  e1000_init_hw_ich8lan - Initialize the hardware
5031 *  @hw: pointer to the HW structure
5032 *
5033 *  Prepares the hardware for transmit and receive by doing the following:
5034 *   - initialize hardware bits
5035 *   - initialize LED identification
5036 *   - setup receive address registers
5037 *   - setup flow control
5038 *   - setup transmit descriptors
5039 *   - clear statistics
5040 **/
5041STATIC s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
5042{
5043	struct e1000_mac_info *mac = &hw->mac;
5044	u32 ctrl_ext, txdctl, snoop;
5045	s32 ret_val;
5046	u16 i;
5047
5048	DEBUGFUNC("e1000_init_hw_ich8lan");
5049
5050	e1000_initialize_hw_bits_ich8lan(hw);
5051
5052	/* Initialize identification LED */
5053	ret_val = mac->ops.id_led_init(hw);
5054	/* An error is not fatal and we should not stop init due to this */
5055	if (ret_val)
5056		DEBUGOUT("Error initializing identification LED\n");
5057
5058	/* Setup the receive address. */
5059	e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
5060
5061	/* Zero out the Multicast HASH table */
5062	DEBUGOUT("Zeroing the MTA\n");
5063	for (i = 0; i < mac->mta_reg_count; i++)
5064		E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
5065
5066	/* The 82578 Rx buffer will stall if wakeup is enabled in host and
5067	 * the ME.  Disable wakeup by clearing the host wakeup bit.
5068	 * Reset the phy after disabling host wakeup to reset the Rx buffer.
5069	 */
5070	if (hw->phy.type == e1000_phy_82578) {
5071		hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &i);
5072		i &= ~BM_WUC_HOST_WU_BIT;
5073		hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, i);
5074		ret_val = e1000_phy_hw_reset_ich8lan(hw);
5075		if (ret_val)
5076			return ret_val;
5077	}
5078
5079	/* Setup link and flow control */
5080	ret_val = mac->ops.setup_link(hw);
5081
5082	/* Set the transmit descriptor write-back policy for both queues */
5083	txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0));
5084	txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
5085		  E1000_TXDCTL_FULL_TX_DESC_WB);
5086	txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
5087		  E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
5088	E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl);
5089	txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1));
5090	txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
5091		  E1000_TXDCTL_FULL_TX_DESC_WB);
5092	txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
5093		  E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
5094	E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl);
5095
5096	/* ICH8 has opposite polarity of no_snoop bits.
5097	 * By default, we should use snoop behavior.
5098	 */
5099	if (mac->type == e1000_ich8lan)
5100		snoop = PCIE_ICH8_SNOOP_ALL;
5101	else
5102		snoop = (u32) ~(PCIE_NO_SNOOP_ALL);
5103	e1000_set_pcie_no_snoop_generic(hw, snoop);
5104
5105	ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
5106	ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
5107	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
5108
5109	/* Clear all of the statistics registers (clear on read).  It is
5110	 * important that we do this after we have tried to establish link
5111	 * because the symbol error count will increment wildly if there
5112	 * is no link.
5113	 */
5114	e1000_clear_hw_cntrs_ich8lan(hw);
5115
5116	return ret_val;
5117}
5118
5119/**
5120 *  e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
5121 *  @hw: pointer to the HW structure
5122 *
5123 *  Sets/Clears required hardware bits necessary for correctly setting up the
5124 *  hardware for transmit and receive.
5125 **/
5126STATIC void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
5127{
5128	u32 reg;
5129
5130	DEBUGFUNC("e1000_initialize_hw_bits_ich8lan");
5131
5132	/* Extended Device Control */
5133	reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
5134	reg |= (1 << 22);
5135	/* Enable PHY low-power state when MAC is at D3 w/o WoL */
5136	if (hw->mac.type >= e1000_pchlan)
5137		reg |= E1000_CTRL_EXT_PHYPDEN;
5138	E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
5139
5140	/* Transmit Descriptor Control 0 */
5141	reg = E1000_READ_REG(hw, E1000_TXDCTL(0));
5142	reg |= (1 << 22);
5143	E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg);
5144
5145	/* Transmit Descriptor Control 1 */
5146	reg = E1000_READ_REG(hw, E1000_TXDCTL(1));
5147	reg |= (1 << 22);
5148	E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg);
5149
5150	/* Transmit Arbitration Control 0 */
5151	reg = E1000_READ_REG(hw, E1000_TARC(0));
5152	if (hw->mac.type == e1000_ich8lan)
5153		reg |= (1 << 28) | (1 << 29);
5154	reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
5155	E1000_WRITE_REG(hw, E1000_TARC(0), reg);
5156
5157	/* Transmit Arbitration Control 1 */
5158	reg = E1000_READ_REG(hw, E1000_TARC(1));
5159	if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR)
5160		reg &= ~(1 << 28);
5161	else
5162		reg |= (1 << 28);
5163	reg |= (1 << 24) | (1 << 26) | (1 << 30);
5164	E1000_WRITE_REG(hw, E1000_TARC(1), reg);
5165
5166	/* Device Status */
5167	if (hw->mac.type == e1000_ich8lan) {
5168		reg = E1000_READ_REG(hw, E1000_STATUS);
5169		reg &= ~(1 << 31);
5170		E1000_WRITE_REG(hw, E1000_STATUS, reg);
5171	}
5172
5173	/* work-around descriptor data corruption issue during nfs v2 udp
5174	 * traffic, just disable the nfs filtering capability
5175	 */
5176	reg = E1000_READ_REG(hw, E1000_RFCTL);
5177	reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
5178
5179	/* Disable IPv6 extension header parsing because some malformed
5180	 * IPv6 headers can hang the Rx.
5181	 */
5182	if (hw->mac.type == e1000_ich8lan)
5183		reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
5184	E1000_WRITE_REG(hw, E1000_RFCTL, reg);
5185
5186	/* Enable ECC on Lynxpoint */
5187	if (hw->mac.type >= e1000_pch_lpt) {
5188		reg = E1000_READ_REG(hw, E1000_PBECCSTS);
5189		reg |= E1000_PBECCSTS_ECC_ENABLE;
5190		E1000_WRITE_REG(hw, E1000_PBECCSTS, reg);
5191
5192		reg = E1000_READ_REG(hw, E1000_CTRL);
5193		reg |= E1000_CTRL_MEHE;
5194		E1000_WRITE_REG(hw, E1000_CTRL, reg);
5195	}
5196
5197	return;
5198}
5199
5200/**
5201 *  e1000_setup_link_ich8lan - Setup flow control and link settings
5202 *  @hw: pointer to the HW structure
5203 *
5204 *  Determines which flow control settings to use, then configures flow
5205 *  control.  Calls the appropriate media-specific link configuration
5206 *  function.  Assuming the adapter has a valid link partner, a valid link
5207 *  should be established.  Assumes the hardware has previously been reset
5208 *  and the transmitter and receiver are not enabled.
5209 **/
5210STATIC s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
5211{
5212	s32 ret_val;
5213
5214	DEBUGFUNC("e1000_setup_link_ich8lan");
5215
5216	if (hw->phy.ops.check_reset_block(hw))
5217		return E1000_SUCCESS;
5218
5219	/* ICH parts do not have a word in the NVM to determine
5220	 * the default flow control setting, so we explicitly
5221	 * set it to full.
5222	 */
5223	if (hw->fc.requested_mode == e1000_fc_default)
5224		hw->fc.requested_mode = e1000_fc_full;
5225
5226	/* Save off the requested flow control mode for use later.  Depending
5227	 * on the link partner's capabilities, we may or may not use this mode.
5228	 */
5229	hw->fc.current_mode = hw->fc.requested_mode;
5230
5231	DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
5232		hw->fc.current_mode);
5233
5234	/* Continue to configure the copper link. */
5235	ret_val = hw->mac.ops.setup_physical_interface(hw);
5236	if (ret_val)
5237		return ret_val;
5238
5239	E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
5240	if ((hw->phy.type == e1000_phy_82578) ||
5241	    (hw->phy.type == e1000_phy_82579) ||
5242	    (hw->phy.type == e1000_phy_i217) ||
5243	    (hw->phy.type == e1000_phy_82577)) {
5244		E1000_WRITE_REG(hw, E1000_FCRTV_PCH, hw->fc.refresh_time);
5245
5246		ret_val = hw->phy.ops.write_reg(hw,
5247					     PHY_REG(BM_PORT_CTRL_PAGE, 27),
5248					     hw->fc.pause_time);
5249		if (ret_val)
5250			return ret_val;
5251	}
5252
5253	return e1000_set_fc_watermarks_generic(hw);
5254}
5255
5256/**
5257 *  e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
5258 *  @hw: pointer to the HW structure
5259 *
5260 *  Configures the kumeran interface to the PHY to wait the appropriate time
5261 *  when polling the PHY, then call the generic setup_copper_link to finish
5262 *  configuring the copper link.
5263 **/
5264STATIC s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
5265{
5266	u32 ctrl;
5267	s32 ret_val;
5268	u16 reg_data;
5269
5270	DEBUGFUNC("e1000_setup_copper_link_ich8lan");
5271
5272	ctrl = E1000_READ_REG(hw, E1000_CTRL);
5273	ctrl |= E1000_CTRL_SLU;
5274	ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
5275	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5276
5277	/* Set the mac to wait the maximum time between each iteration
5278	 * and increase the max iterations when polling the phy;
5279	 * this fixes erroneous timeouts at 10Mbps.
5280	 */
5281	ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_TIMEOUTS,
5282					       0xFFFF);
5283	if (ret_val)
5284		return ret_val;
5285	ret_val = e1000_read_kmrn_reg_generic(hw,
5286					      E1000_KMRNCTRLSTA_INBAND_PARAM,
5287					      &reg_data);
5288	if (ret_val)
5289		return ret_val;
5290	reg_data |= 0x3F;
5291	ret_val = e1000_write_kmrn_reg_generic(hw,
5292					       E1000_KMRNCTRLSTA_INBAND_PARAM,
5293					       reg_data);
5294	if (ret_val)
5295		return ret_val;
5296
5297	switch (hw->phy.type) {
5298	case e1000_phy_igp_3:
5299		ret_val = e1000_copper_link_setup_igp(hw);
5300		if (ret_val)
5301			return ret_val;
5302		break;
5303	case e1000_phy_bm:
5304	case e1000_phy_82578:
5305		ret_val = e1000_copper_link_setup_m88(hw);
5306		if (ret_val)
5307			return ret_val;
5308		break;
5309	case e1000_phy_82577:
5310	case e1000_phy_82579:
5311		ret_val = e1000_copper_link_setup_82577(hw);
5312		if (ret_val)
5313			return ret_val;
5314		break;
5315	case e1000_phy_ife:
5316		ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL,
5317					       &reg_data);
5318		if (ret_val)
5319			return ret_val;
5320
5321		reg_data &= ~IFE_PMC_AUTO_MDIX;
5322
5323		switch (hw->phy.mdix) {
5324		case 1:
5325			reg_data &= ~IFE_PMC_FORCE_MDIX;
5326			break;
5327		case 2:
5328			reg_data |= IFE_PMC_FORCE_MDIX;
5329			break;
5330		case 0:
5331		default:
5332			reg_data |= IFE_PMC_AUTO_MDIX;
5333			break;
5334		}
5335		ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL,
5336						reg_data);
5337		if (ret_val)
5338			return ret_val;
5339		break;
5340	default:
5341		break;
5342	}
5343
5344	return e1000_setup_copper_link_generic(hw);
5345}
5346
5347/**
5348 *  e1000_setup_copper_link_pch_lpt - Configure MAC/PHY interface
5349 *  @hw: pointer to the HW structure
5350 *
5351 *  Calls the PHY specific link setup function and then calls the
5352 *  generic setup_copper_link to finish configuring the link for
5353 *  Lynxpoint PCH devices
5354 **/
5355STATIC s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw)
5356{
5357	u32 ctrl;
5358	s32 ret_val;
5359
5360	DEBUGFUNC("e1000_setup_copper_link_pch_lpt");
5361
5362	ctrl = E1000_READ_REG(hw, E1000_CTRL);
5363	ctrl |= E1000_CTRL_SLU;
5364	ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
5365	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5366
5367	ret_val = e1000_copper_link_setup_82577(hw);
5368	if (ret_val)
5369		return ret_val;
5370
5371	return e1000_setup_copper_link_generic(hw);
5372}
5373
5374/**
5375 *  e1000_get_link_up_info_ich8lan - Get current link speed and duplex
5376 *  @hw: pointer to the HW structure
5377 *  @speed: pointer to store current link speed
5378 *  @duplex: pointer to store the current link duplex
5379 *
5380 *  Calls the generic get_speed_and_duplex to retrieve the current link
5381 *  information and then calls the Kumeran lock loss workaround for links at
5382 *  gigabit speeds.
5383 **/
5384STATIC s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
5385					  u16 *duplex)
5386{
5387	s32 ret_val;
5388
5389	DEBUGFUNC("e1000_get_link_up_info_ich8lan");
5390
5391	ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex);
5392	if (ret_val)
5393		return ret_val;
5394
5395	if ((hw->mac.type == e1000_ich8lan) &&
5396	    (hw->phy.type == e1000_phy_igp_3) &&
5397	    (*speed == SPEED_1000)) {
5398		ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
5399	}
5400
5401	return ret_val;
5402}
5403
5404/**
5405 *  e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
5406 *  @hw: pointer to the HW structure
5407 *
5408 *  Work-around for 82566 Kumeran PCS lock loss:
5409 *  On link status change (i.e. PCI reset, speed change) and link is up and
5410 *  speed is gigabit-
5411 *    0) if workaround is optionally disabled do nothing
5412 *    1) wait 1ms for Kumeran link to come up
5413 *    2) check Kumeran Diagnostic register PCS lock loss bit
5414 *    3) if not set the link is locked (all is good), otherwise...
5415 *    4) reset the PHY
5416 *    5) repeat up to 10 times
5417 *  Note: this is only called for IGP3 copper when speed is 1gb.
5418 **/
5419STATIC s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
5420{
5421	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5422	u32 phy_ctrl;
5423	s32 ret_val;
5424	u16 i, data;
5425	bool link;
5426
5427	DEBUGFUNC("e1000_kmrn_lock_loss_workaround_ich8lan");
5428
5429	if (!dev_spec->kmrn_lock_loss_workaround_enabled)
5430		return E1000_SUCCESS;
5431
5432	/* Make sure link is up before proceeding.  If not just return.
5433	 * Attempting this while link is negotiating fouled up link
5434	 * stability
5435	 */
5436	ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
5437	if (!link)
5438		return E1000_SUCCESS;
5439
5440	for (i = 0; i < 10; i++) {
5441		/* read once to clear */
5442		ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
5443		if (ret_val)
5444			return ret_val;
5445		/* and again to get new status */
5446		ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
5447		if (ret_val)
5448			return ret_val;
5449
5450		/* check for PCS lock */
5451		if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
5452			return E1000_SUCCESS;
5453
5454		/* Issue PHY reset */
5455		hw->phy.ops.reset(hw);
5456		msec_delay_irq(5);
5457	}
5458	/* Disable GigE link negotiation */
5459	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
5460	phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
5461		     E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
5462	E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
5463
5464	/* Call gig speed drop workaround on Gig disable before accessing
5465	 * any PHY registers
5466	 */
5467	e1000_gig_downshift_workaround_ich8lan(hw);
5468
5469	/* unable to acquire PCS lock */
5470	return -E1000_ERR_PHY;
5471}
5472
5473/**
5474 *  e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
5475 *  @hw: pointer to the HW structure
5476 *  @state: boolean value used to set the current Kumeran workaround state
5477 *
5478 *  If ICH8, set the current Kumeran workaround state (enabled - true
5479 *  /disabled - false).
5480 **/
5481void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
5482						 bool state)
5483{
5484	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5485
5486	DEBUGFUNC("e1000_set_kmrn_lock_loss_workaround_ich8lan");
5487
5488	if (hw->mac.type != e1000_ich8lan) {
5489		DEBUGOUT("Workaround applies to ICH8 only.\n");
5490		return;
5491	}
5492
5493	dev_spec->kmrn_lock_loss_workaround_enabled = state;
5494
5495	return;
5496}
5497
5498/**
5499 *  e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
5500 *  @hw: pointer to the HW structure
5501 *
5502 *  Workaround for 82566 power-down on D3 entry:
5503 *    1) disable gigabit link
5504 *    2) write VR power-down enable
5505 *    3) read it back
5506 *  Continue if successful, else issue LCD reset and repeat
5507 **/
5508void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
5509{
5510	u32 reg;
5511	u16 data;
5512	u8  retry = 0;
5513
5514	DEBUGFUNC("e1000_igp3_phy_powerdown_workaround_ich8lan");
5515
5516	if (hw->phy.type != e1000_phy_igp_3)
5517		return;
5518
5519	/* Try the workaround twice (if needed) */
5520	do {
5521		/* Disable link */
5522		reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
5523		reg |= (E1000_PHY_CTRL_GBE_DISABLE |
5524			E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
5525		E1000_WRITE_REG(hw, E1000_PHY_CTRL, reg);
5526
5527		/* Call gig speed drop workaround on Gig disable before
5528		 * accessing any PHY registers
5529		 */
5530		if (hw->mac.type == e1000_ich8lan)
5531			e1000_gig_downshift_workaround_ich8lan(hw);
5532
5533		/* Write VR power-down enable */
5534		hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
5535		data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
5536		hw->phy.ops.write_reg(hw, IGP3_VR_CTRL,
5537				      data | IGP3_VR_CTRL_MODE_SHUTDOWN);
5538
5539		/* Read it back and test */
5540		hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
5541		data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
5542		if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
5543			break;
5544
5545		/* Issue PHY reset and repeat at most one more time */
5546		reg = E1000_READ_REG(hw, E1000_CTRL);
5547		E1000_WRITE_REG(hw, E1000_CTRL, reg | E1000_CTRL_PHY_RST);
5548		retry++;
5549	} while (retry);
5550}
5551
5552/**
5553 *  e1000_gig_downshift_workaround_ich8lan - WoL from S5 stops working
5554 *  @hw: pointer to the HW structure
5555 *
5556 *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
5557 *  LPLU, Gig disable, MDIC PHY reset):
5558 *    1) Set Kumeran Near-end loopback
5559 *    2) Clear Kumeran Near-end loopback
5560 *  Should only be called for ICH8[m] devices with any 1G Phy.
5561 **/
5562void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
5563{
5564	s32 ret_val;
5565	u16 reg_data;
5566
5567	DEBUGFUNC("e1000_gig_downshift_workaround_ich8lan");
5568
5569	if ((hw->mac.type != e1000_ich8lan) ||
5570	    (hw->phy.type == e1000_phy_ife))
5571		return;
5572
5573	ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
5574					      &reg_data);
5575	if (ret_val)
5576		return;
5577	reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
5578	ret_val = e1000_write_kmrn_reg_generic(hw,
5579					       E1000_KMRNCTRLSTA_DIAG_OFFSET,
5580					       reg_data);
5581	if (ret_val)
5582		return;
5583	reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
5584	e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
5585				     reg_data);
5586}
5587
5588/**
5589 *  e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
5590 *  @hw: pointer to the HW structure
5591 *
5592 *  During S0 to Sx transition, it is possible the link remains at gig
5593 *  instead of negotiating to a lower speed.  Before going to Sx, set
5594 *  'Gig Disable' to force link speed negotiation to a lower speed based on
5595 *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
5596 *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
5597 *  needs to be written.
5598 *  Parts that support (and are linked to a partner which support) EEE in
5599 *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
5600 *  than 10Mbps w/o EEE.
5601 **/
5602void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
5603{
5604	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5605	u32 phy_ctrl;
5606	s32 ret_val;
5607
5608	DEBUGFUNC("e1000_suspend_workarounds_ich8lan");
5609
5610	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
5611	phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
5612
5613	if (hw->phy.type == e1000_phy_i217) {
5614		u16 phy_reg, device_id = hw->device_id;
5615
5616		if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
5617		    (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
5618		    (device_id == E1000_DEV_ID_PCH_I218_LM3) ||
5619		    (device_id == E1000_DEV_ID_PCH_I218_V3) ||
5620		    (hw->mac.type >= e1000_pch_spt)) {
5621			u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
5622
5623			E1000_WRITE_REG(hw, E1000_FEXTNVM6,
5624					fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
5625		}
5626
5627		ret_val = hw->phy.ops.acquire(hw);
5628		if (ret_val)
5629			goto out;
5630
5631		if (!dev_spec->eee_disable) {
5632			u16 eee_advert;
5633
5634			ret_val =
5635			    e1000_read_emi_reg_locked(hw,
5636						      I217_EEE_ADVERTISEMENT,
5637						      &eee_advert);
5638			if (ret_val)
5639				goto release;
5640
5641			/* Disable LPLU if both link partners support 100BaseT
5642			 * EEE and 100Full is advertised on both ends of the
5643			 * link, and enable Auto Enable LPI since there will
5644			 * be no driver to enable LPI while in Sx.
5645			 */
5646			if ((eee_advert & I82579_EEE_100_SUPPORTED) &&
5647			    (dev_spec->eee_lp_ability &
5648			     I82579_EEE_100_SUPPORTED) &&
5649			    (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)) {
5650				phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU |
5651					      E1000_PHY_CTRL_NOND0A_LPLU);
5652
5653				/* Set Auto Enable LPI after link up */
5654				hw->phy.ops.read_reg_locked(hw,
5655							    I217_LPI_GPIO_CTRL,
5656							    &phy_reg);
5657				phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
5658				hw->phy.ops.write_reg_locked(hw,
5659							     I217_LPI_GPIO_CTRL,
5660							     phy_reg);
5661			}
5662		}
5663
5664		/* For i217 Intel Rapid Start Technology support,
5665		 * when the system is going into Sx and no manageability engine
5666		 * is present, the driver must configure proxy to reset only on
5667		 * power good.  LPI (Low Power Idle) state must also reset only
5668		 * on power good, as well as the MTA (Multicast table array).
5669		 * The SMBus release must also be disabled on LCD reset.
5670		 */
5671		if (!(E1000_READ_REG(hw, E1000_FWSM) &
5672		      E1000_ICH_FWSM_FW_VALID)) {
5673			/* Enable proxy to reset only on power good. */
5674			hw->phy.ops.read_reg_locked(hw, I217_PROXY_CTRL,
5675						    &phy_reg);
5676			phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
5677			hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL,
5678						     phy_reg);
5679
5680			/* Set bit enable LPI (EEE) to reset only on
5681			 * power good.
5682			*/
5683			hw->phy.ops.read_reg_locked(hw, I217_SxCTRL, &phy_reg);
5684			phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET;
5685			hw->phy.ops.write_reg_locked(hw, I217_SxCTRL, phy_reg);
5686
5687			/* Disable the SMB release on LCD reset. */
5688			hw->phy.ops.read_reg_locked(hw, I217_MEMPWR, &phy_reg);
5689			phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE;
5690			hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
5691		}
5692
5693		/* Enable MTA to reset for Intel Rapid Start Technology
5694		 * Support
5695		 */
5696		hw->phy.ops.read_reg_locked(hw, I217_CGFREG, &phy_reg);
5697		phy_reg |= I217_CGFREG_ENABLE_MTA_RESET;
5698		hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
5699
5700release:
5701		hw->phy.ops.release(hw);
5702	}
5703out:
5704	E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
5705
5706	if (hw->mac.type == e1000_ich8lan)
5707		e1000_gig_downshift_workaround_ich8lan(hw);
5708
5709	if (hw->mac.type >= e1000_pchlan) {
5710		e1000_oem_bits_config_ich8lan(hw, false);
5711
5712		/* Reset PHY to activate OEM bits on 82577/8 */
5713		if (hw->mac.type == e1000_pchlan)
5714			e1000_phy_hw_reset_generic(hw);
5715
5716		ret_val = hw->phy.ops.acquire(hw);
5717		if (ret_val)
5718			return;
5719		e1000_write_smbus_addr(hw);
5720		hw->phy.ops.release(hw);
5721	}
5722
5723	return;
5724}
5725
5726/**
5727 *  e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0
5728 *  @hw: pointer to the HW structure
5729 *
5730 *  During Sx to S0 transitions on non-managed devices or managed devices
5731 *  on which PHY resets are not blocked, if the PHY registers cannot be
5732 *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
5733 *  the PHY.
5734 *  On i217, setup Intel Rapid Start Technology.
5735 **/
5736u32 e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
5737{
5738	s32 ret_val;
5739
5740	DEBUGFUNC("e1000_resume_workarounds_pchlan");
5741	if (hw->mac.type < e1000_pch2lan)
5742		return E1000_SUCCESS;
5743
5744	ret_val = e1000_init_phy_workarounds_pchlan(hw);
5745	if (ret_val) {
5746		DEBUGOUT1("Failed to init PHY flow ret_val=%d\n", ret_val);
5747		return ret_val;
5748	}
5749
5750	/* For i217 Intel Rapid Start Technology support when the system
5751	 * is transitioning from Sx and no manageability engine is present
5752	 * configure SMBus to restore on reset, disable proxy, and enable
5753	 * the reset on MTA (Multicast table array).
5754	 */
5755	if (hw->phy.type == e1000_phy_i217) {
5756		u16 phy_reg;
5757
5758		ret_val = hw->phy.ops.acquire(hw);
5759		if (ret_val) {
5760			DEBUGOUT("Failed to setup iRST\n");
5761			return ret_val;
5762		}
5763
5764		/* Clear Auto Enable LPI after link up */
5765		hw->phy.ops.read_reg_locked(hw, I217_LPI_GPIO_CTRL, &phy_reg);
5766		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
5767		hw->phy.ops.write_reg_locked(hw, I217_LPI_GPIO_CTRL, phy_reg);
5768
5769		if (!(E1000_READ_REG(hw, E1000_FWSM) &
5770		    E1000_ICH_FWSM_FW_VALID)) {
5771			/* Restore clear on SMB if no manageability engine
5772			 * is present
5773			 */
5774			ret_val = hw->phy.ops.read_reg_locked(hw, I217_MEMPWR,
5775							      &phy_reg);
5776			if (ret_val)
5777				goto release;
5778			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
5779			hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
5780
5781			/* Disable Proxy */
5782			hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL, 0);
5783		}
5784		/* Enable reset on MTA */
5785		ret_val = hw->phy.ops.read_reg_locked(hw, I217_CGFREG,
5786						      &phy_reg);
5787		if (ret_val)
5788			goto release;
5789		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
5790		hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
5791release:
5792		if (ret_val)
5793			DEBUGOUT1("Error %d in resume workarounds\n", ret_val);
5794		hw->phy.ops.release(hw);
5795		return ret_val;
5796	}
5797	return E1000_SUCCESS;
5798}
5799
5800/**
5801 *  e1000_cleanup_led_ich8lan - Restore the default LED operation
5802 *  @hw: pointer to the HW structure
5803 *
5804 *  Return the LED back to the default configuration.
5805 **/
5806STATIC s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
5807{
5808	DEBUGFUNC("e1000_cleanup_led_ich8lan");
5809
5810	if (hw->phy.type == e1000_phy_ife)
5811		return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5812					     0);
5813
5814	E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
5815	return E1000_SUCCESS;
5816}
5817
5818/**
5819 *  e1000_led_on_ich8lan - Turn LEDs on
5820 *  @hw: pointer to the HW structure
5821 *
5822 *  Turn on the LEDs.
5823 **/
5824STATIC s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
5825{
5826	DEBUGFUNC("e1000_led_on_ich8lan");
5827
5828	if (hw->phy.type == e1000_phy_ife)
5829		return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5830				(IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
5831
5832	E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
5833	return E1000_SUCCESS;
5834}
5835
5836/**
5837 *  e1000_led_off_ich8lan - Turn LEDs off
5838 *  @hw: pointer to the HW structure
5839 *
5840 *  Turn off the LEDs.
5841 **/
5842STATIC s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
5843{
5844	DEBUGFUNC("e1000_led_off_ich8lan");
5845
5846	if (hw->phy.type == e1000_phy_ife)
5847		return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5848			       (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
5849
5850	E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
5851	return E1000_SUCCESS;
5852}
5853
5854/**
5855 *  e1000_setup_led_pchlan - Configures SW controllable LED
5856 *  @hw: pointer to the HW structure
5857 *
5858 *  This prepares the SW controllable LED for use.
5859 **/
5860STATIC s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
5861{
5862	DEBUGFUNC("e1000_setup_led_pchlan");
5863
5864	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
5865				     (u16)hw->mac.ledctl_mode1);
5866}
5867
5868/**
5869 *  e1000_cleanup_led_pchlan - Restore the default LED operation
5870 *  @hw: pointer to the HW structure
5871 *
5872 *  Return the LED back to the default configuration.
5873 **/
5874STATIC s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
5875{
5876	DEBUGFUNC("e1000_cleanup_led_pchlan");
5877
5878	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
5879				     (u16)hw->mac.ledctl_default);
5880}
5881
5882/**
5883 *  e1000_led_on_pchlan - Turn LEDs on
5884 *  @hw: pointer to the HW structure
5885 *
5886 *  Turn on the LEDs.
5887 **/
5888STATIC s32 e1000_led_on_pchlan(struct e1000_hw *hw)
5889{
5890	u16 data = (u16)hw->mac.ledctl_mode2;
5891	u32 i, led;
5892
5893	DEBUGFUNC("e1000_led_on_pchlan");
5894
5895	/* If no link, then turn LED on by setting the invert bit
5896	 * for each LED that's mode is "link_up" in ledctl_mode2.
5897	 */
5898	if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
5899		for (i = 0; i < 3; i++) {
5900			led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5901			if ((led & E1000_PHY_LED0_MODE_MASK) !=
5902			    E1000_LEDCTL_MODE_LINK_UP)
5903				continue;
5904			if (led & E1000_PHY_LED0_IVRT)
5905				data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5906			else
5907				data |= (E1000_PHY_LED0_IVRT << (i * 5));
5908		}
5909	}
5910
5911	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
5912}
5913
5914/**
5915 *  e1000_led_off_pchlan - Turn LEDs off
5916 *  @hw: pointer to the HW structure
5917 *
5918 *  Turn off the LEDs.
5919 **/
5920STATIC s32 e1000_led_off_pchlan(struct e1000_hw *hw)
5921{
5922	u16 data = (u16)hw->mac.ledctl_mode1;
5923	u32 i, led;
5924
5925	DEBUGFUNC("e1000_led_off_pchlan");
5926
5927	/* If no link, then turn LED off by clearing the invert bit
5928	 * for each LED that's mode is "link_up" in ledctl_mode1.
5929	 */
5930	if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
5931		for (i = 0; i < 3; i++) {
5932			led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5933			if ((led & E1000_PHY_LED0_MODE_MASK) !=
5934			    E1000_LEDCTL_MODE_LINK_UP)
5935				continue;
5936			if (led & E1000_PHY_LED0_IVRT)
5937				data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5938			else
5939				data |= (E1000_PHY_LED0_IVRT << (i * 5));
5940		}
5941	}
5942
5943	return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
5944}
5945
5946/**
5947 *  e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
5948 *  @hw: pointer to the HW structure
5949 *
5950 *  Read appropriate register for the config done bit for completion status
5951 *  and configure the PHY through s/w for EEPROM-less parts.
5952 *
5953 *  NOTE: some silicon which is EEPROM-less will fail trying to read the
5954 *  config done bit, so only an error is logged and continues.  If we were
5955 *  to return with error, EEPROM-less silicon would not be able to be reset
5956 *  or change link.
5957 **/
5958STATIC s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
5959{
5960	s32 ret_val = E1000_SUCCESS;
5961	u32 bank = 0;
5962	u32 status;
5963
5964	DEBUGFUNC("e1000_get_cfg_done_ich8lan");
5965
5966	e1000_get_cfg_done_generic(hw);
5967
5968	/* Wait for indication from h/w that it has completed basic config */
5969	if (hw->mac.type >= e1000_ich10lan) {
5970		e1000_lan_init_done_ich8lan(hw);
5971	} else {
5972		ret_val = e1000_get_auto_rd_done_generic(hw);
5973		if (ret_val) {
5974			/* When auto config read does not complete, do not
5975			 * return with an error. This can happen in situations
5976			 * where there is no eeprom and prevents getting link.
5977			 */
5978			DEBUGOUT("Auto Read Done did not complete\n");
5979			ret_val = E1000_SUCCESS;
5980		}
5981	}
5982
5983	/* Clear PHY Reset Asserted bit */
5984	status = E1000_READ_REG(hw, E1000_STATUS);
5985	if (status & E1000_STATUS_PHYRA)
5986		E1000_WRITE_REG(hw, E1000_STATUS, status & ~E1000_STATUS_PHYRA);
5987	else
5988		DEBUGOUT("PHY Reset Asserted not set - needs delay\n");
5989
5990	/* If EEPROM is not marked present, init the IGP 3 PHY manually */
5991	if (hw->mac.type <= e1000_ich9lan) {
5992		if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) &&
5993		    (hw->phy.type == e1000_phy_igp_3)) {
5994			e1000_phy_init_script_igp3(hw);
5995		}
5996	} else {
5997		if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
5998			/* Maybe we should do a basic PHY config */
5999			DEBUGOUT("EEPROM not present\n");
6000			ret_val = -E1000_ERR_CONFIG;
6001		}
6002	}
6003
6004	return ret_val;
6005}
6006
6007/**
6008 * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
6009 * @hw: pointer to the HW structure
6010 *
6011 * In the case of a PHY power down to save power, or to turn off link during a
6012 * driver unload, or wake on lan is not enabled, remove the link.
6013 **/
6014STATIC void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
6015{
6016	/* If the management interface is not enabled, then power down */
6017	if (!(hw->mac.ops.check_mng_mode(hw) ||
6018	      hw->phy.ops.check_reset_block(hw)))
6019		e1000_power_down_phy_copper(hw);
6020
6021	return;
6022}
6023
6024/**
6025 *  e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
6026 *  @hw: pointer to the HW structure
6027 *
6028 *  Clears hardware counters specific to the silicon family and calls
6029 *  clear_hw_cntrs_generic to clear all general purpose counters.
6030 **/
6031STATIC void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
6032{
6033	u16 phy_data;
6034	s32 ret_val;
6035
6036	DEBUGFUNC("e1000_clear_hw_cntrs_ich8lan");
6037
6038	e1000_clear_hw_cntrs_base_generic(hw);
6039
6040	E1000_READ_REG(hw, E1000_ALGNERRC);
6041	E1000_READ_REG(hw, E1000_RXERRC);
6042	E1000_READ_REG(hw, E1000_TNCRS);
6043	E1000_READ_REG(hw, E1000_CEXTERR);
6044	E1000_READ_REG(hw, E1000_TSCTC);
6045	E1000_READ_REG(hw, E1000_TSCTFC);
6046
6047	E1000_READ_REG(hw, E1000_MGTPRC);
6048	E1000_READ_REG(hw, E1000_MGTPDC);
6049	E1000_READ_REG(hw, E1000_MGTPTC);
6050
6051	E1000_READ_REG(hw, E1000_IAC);
6052	E1000_READ_REG(hw, E1000_ICRXOC);
6053
6054	/* Clear PHY statistics registers */
6055	if ((hw->phy.type == e1000_phy_82578) ||
6056	    (hw->phy.type == e1000_phy_82579) ||
6057	    (hw->phy.type == e1000_phy_i217) ||
6058	    (hw->phy.type == e1000_phy_82577)) {
6059		ret_val = hw->phy.ops.acquire(hw);
6060		if (ret_val)
6061			return;
6062		ret_val = hw->phy.ops.set_page(hw,
6063					       HV_STATS_PAGE << IGP_PAGE_SHIFT);
6064		if (ret_val)
6065			goto release;
6066		hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
6067		hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
6068		hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
6069		hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
6070		hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
6071		hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
6072		hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
6073		hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
6074		hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
6075		hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
6076		hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
6077		hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
6078		hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
6079		hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
6080release:
6081		hw->phy.ops.release(hw);
6082	}
6083}
6084
6085/**
6086 *  e1000_configure_k0s_lpt - Configure K0s power state
6087 *  @hw: pointer to the HW structure
6088 *  @entry_latency: Tx idle period for entering K0s - valid values are 0 to 3.
6089 *	0 corresponds to 128ns, each value over 0 doubles the duration.
6090 *  @min_time: Minimum Tx idle period allowed  - valid values are 0 to 4.
6091 *	0 corresponds to 128ns, each value over 0 doubles the duration.
6092 *
6093 *  Configure the K1 power state based on the provided parameter.
6094 *  Assumes semaphore already acquired.
6095 *
6096 *  Success returns 0, Failure returns:
6097 *	-E1000_ERR_PHY (-2) in case of access error
6098 *	-E1000_ERR_PARAM (-4) in case of parameters error
6099 **/
6100s32 e1000_configure_k0s_lpt(struct e1000_hw *hw, u8 entry_latency, u8 min_time)
6101{
6102	s32 ret_val;
6103	u16 kmrn_reg = 0;
6104
6105	DEBUGFUNC("e1000_configure_k0s_lpt");
6106
6107	if (entry_latency > 3 || min_time > 4)
6108		return -E1000_ERR_PARAM;
6109
6110	ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K0S_CTRL,
6111					     &kmrn_reg);
6112	if (ret_val)
6113		return ret_val;
6114
6115	/* for now don't touch the latency */
6116	kmrn_reg &= ~(E1000_KMRNCTRLSTA_K0S_CTRL_MIN_TIME_MASK);
6117	kmrn_reg |= ((min_time << E1000_KMRNCTRLSTA_K0S_CTRL_MIN_TIME_SHIFT));
6118
6119	ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K0S_CTRL,
6120					      kmrn_reg);
6121	if (ret_val)
6122		return ret_val;
6123
6124	return E1000_SUCCESS;
6125}
6126