2 * forcedeth: Ethernet driver for NVIDIA nForce media access controllers.
4 * Note: This driver is a cleanroom reimplementation based on reverse
5 * engineered documentation written by Carl-Daniel Hailfinger
6 * and Andrew de Quincey.
8 * NVIDIA, nForce and other NVIDIA marks are trademarks or registered
9 * trademarks of NVIDIA Corporation in the United States and other
12 * Copyright (C) 2003,4,5 Manfred Spraul
13 * Copyright (C) 2004 Andrew de Quincey (wol support)
14 * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane
15 * IRQ rate fixes, bigendian fixes, cleanups, verification)
16 * Copyright (c) 2004,2005,2006,2007,2008,2009 NVIDIA Corporation
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by
20 * the Free Software Foundation; either version 2 of the License, or
21 * (at your option) any later version.
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
33 * We suspect that on some hardware no TX done interrupts are generated.
34 * This means recovery from netif_stop_queue only happens if the hw timer
35 * interrupt fires (100 times/second, configurable with NVREG_POLL_DEFAULT)
36 * and the timer is active in the IRQMask, or if a rx packet arrives by chance.
37 * If your hardware reliably generates tx done interrupts, then you can remove
38 * DEV_NEED_TIMERIRQ from the driver_data flags.
39 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
40 * superfluous timer interrupts from the nic.
42 #define FORCEDETH_VERSION "0.64"
43 #define DRV_NAME "forcedeth"
45 #include <linux/module.h>
46 #include <linux/types.h>
47 #include <linux/pci.h>
48 #include <linux/interrupt.h>
49 #include <linux/netdevice.h>
50 #include <linux/etherdevice.h>
51 #include <linux/delay.h>
52 #include <linux/sched.h>
53 #include <linux/spinlock.h>
54 #include <linux/ethtool.h>
55 #include <linux/timer.h>
56 #include <linux/skbuff.h>
57 #include <linux/mii.h>
58 #include <linux/random.h>
59 #include <linux/init.h>
60 #include <linux/if_vlan.h>
61 #include <linux/dma-mapping.h>
62 #include <linux/slab.h>
66 #include <asm/uaccess.h>
67 #include <asm/system.h>
70 #define dprintk printk
72 #define dprintk(x...) do { } while (0)
75 #define TX_WORK_PER_LOOP 64
76 #define RX_WORK_PER_LOOP 64
82 #define DEV_NEED_TIMERIRQ 0x0000001 /* set the timer irq flag in the irq mask */
83 #define DEV_NEED_LINKTIMER 0x0000002 /* poll link settings. Relies on the timer irq */
84 #define DEV_HAS_LARGEDESC 0x0000004 /* device supports jumbo frames and needs packet format 2 */
85 #define DEV_HAS_HIGH_DMA 0x0000008 /* device supports 64bit dma */
86 #define DEV_HAS_CHECKSUM 0x0000010 /* device supports tx and rx checksum offloads */
87 #define DEV_HAS_VLAN 0x0000020 /* device supports vlan tagging and striping */
88 #define DEV_HAS_MSI 0x0000040 /* device supports MSI */
89 #define DEV_HAS_MSI_X 0x0000080 /* device supports MSI-X */
90 #define DEV_HAS_POWER_CNTRL 0x0000100 /* device supports power savings */
91 #define DEV_HAS_STATISTICS_V1 0x0000200 /* device supports hw statistics version 1 */
92 #define DEV_HAS_STATISTICS_V2 0x0000400 /* device supports hw statistics version 2 */
93 #define DEV_HAS_STATISTICS_V3 0x0000800 /* device supports hw statistics version 3 */
94 #define DEV_HAS_STATISTICS_V12 0x0000600 /* device supports hw statistics version 1 and 2 */
95 #define DEV_HAS_STATISTICS_V123 0x0000e00 /* device supports hw statistics version 1, 2, and 3 */
96 #define DEV_HAS_TEST_EXTENDED 0x0001000 /* device supports extended diagnostic test */
97 #define DEV_HAS_MGMT_UNIT 0x0002000 /* device supports management unit */
98 #define DEV_HAS_CORRECT_MACADDR 0x0004000 /* device supports correct mac address order */
99 #define DEV_HAS_COLLISION_FIX 0x0008000 /* device supports tx collision fix */
100 #define DEV_HAS_PAUSEFRAME_TX_V1 0x0010000 /* device supports tx pause frames version 1 */
101 #define DEV_HAS_PAUSEFRAME_TX_V2 0x0020000 /* device supports tx pause frames version 2 */
102 #define DEV_HAS_PAUSEFRAME_TX_V3 0x0040000 /* device supports tx pause frames version 3 */
103 #define DEV_NEED_TX_LIMIT 0x0080000 /* device needs to limit tx */
104 #define DEV_NEED_TX_LIMIT2 0x0180000 /* device needs to limit tx, expect for some revs */
105 #define DEV_HAS_GEAR_MODE 0x0200000 /* device supports gear mode */
106 #define DEV_NEED_PHY_INIT_FIX 0x0400000 /* device needs specific phy workaround */
107 #define DEV_NEED_LOW_POWER_FIX 0x0800000 /* device needs special power up workaround */
108 #define DEV_NEED_MSI_FIX 0x1000000 /* device needs msi workaround */
111 NvRegIrqStatus = 0x000,
112 #define NVREG_IRQSTAT_MIIEVENT 0x040
113 #define NVREG_IRQSTAT_MASK 0x83ff
114 NvRegIrqMask = 0x004,
115 #define NVREG_IRQ_RX_ERROR 0x0001
116 #define NVREG_IRQ_RX 0x0002
117 #define NVREG_IRQ_RX_NOBUF 0x0004
118 #define NVREG_IRQ_TX_ERR 0x0008
119 #define NVREG_IRQ_TX_OK 0x0010
120 #define NVREG_IRQ_TIMER 0x0020
121 #define NVREG_IRQ_LINK 0x0040
122 #define NVREG_IRQ_RX_FORCED 0x0080
123 #define NVREG_IRQ_TX_FORCED 0x0100
124 #define NVREG_IRQ_RECOVER_ERROR 0x8200
125 #define NVREG_IRQMASK_THROUGHPUT 0x00df
126 #define NVREG_IRQMASK_CPU 0x0060
127 #define NVREG_IRQ_TX_ALL (NVREG_IRQ_TX_ERR|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_FORCED)
128 #define NVREG_IRQ_RX_ALL (NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_RX_FORCED)
129 #define NVREG_IRQ_OTHER (NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_RECOVER_ERROR)
131 NvRegUnknownSetupReg6 = 0x008,
132 #define NVREG_UNKSETUP6_VAL 3
135 * NVREG_POLL_DEFAULT is the interval length of the timer source on the nic
136 * NVREG_POLL_DEFAULT=97 would result in an interval length of 1 ms
138 NvRegPollingInterval = 0x00c,
139 #define NVREG_POLL_DEFAULT_THROUGHPUT 65535 /* backup tx cleanup if loop max reached */
140 #define NVREG_POLL_DEFAULT_CPU 13
141 NvRegMSIMap0 = 0x020,
142 NvRegMSIMap1 = 0x024,
143 NvRegMSIIrqMask = 0x030,
144 #define NVREG_MSI_VECTOR_0_ENABLED 0x01
146 #define NVREG_MISC1_PAUSE_TX 0x01
147 #define NVREG_MISC1_HD 0x02
148 #define NVREG_MISC1_FORCE 0x3b0f3c
150 NvRegMacReset = 0x34,
151 #define NVREG_MAC_RESET_ASSERT 0x0F3
152 NvRegTransmitterControl = 0x084,
153 #define NVREG_XMITCTL_START 0x01
154 #define NVREG_XMITCTL_MGMT_ST 0x40000000
155 #define NVREG_XMITCTL_SYNC_MASK 0x000f0000
156 #define NVREG_XMITCTL_SYNC_NOT_READY 0x0
157 #define NVREG_XMITCTL_SYNC_PHY_INIT 0x00040000
158 #define NVREG_XMITCTL_MGMT_SEMA_MASK 0x00000f00
159 #define NVREG_XMITCTL_MGMT_SEMA_FREE 0x0
160 #define NVREG_XMITCTL_HOST_SEMA_MASK 0x0000f000
161 #define NVREG_XMITCTL_HOST_SEMA_ACQ 0x0000f000
162 #define NVREG_XMITCTL_HOST_LOADED 0x00004000
163 #define NVREG_XMITCTL_TX_PATH_EN 0x01000000
164 #define NVREG_XMITCTL_DATA_START 0x00100000
165 #define NVREG_XMITCTL_DATA_READY 0x00010000
166 #define NVREG_XMITCTL_DATA_ERROR 0x00020000
167 NvRegTransmitterStatus = 0x088,
168 #define NVREG_XMITSTAT_BUSY 0x01
170 NvRegPacketFilterFlags = 0x8c,
171 #define NVREG_PFF_PAUSE_RX 0x08
172 #define NVREG_PFF_ALWAYS 0x7F0000
173 #define NVREG_PFF_PROMISC 0x80
174 #define NVREG_PFF_MYADDR 0x20
175 #define NVREG_PFF_LOOPBACK 0x10
177 NvRegOffloadConfig = 0x90,
178 #define NVREG_OFFLOAD_HOMEPHY 0x601
179 #define NVREG_OFFLOAD_NORMAL RX_NIC_BUFSIZE
180 NvRegReceiverControl = 0x094,
181 #define NVREG_RCVCTL_START 0x01
182 #define NVREG_RCVCTL_RX_PATH_EN 0x01000000
183 NvRegReceiverStatus = 0x98,
184 #define NVREG_RCVSTAT_BUSY 0x01
186 NvRegSlotTime = 0x9c,
187 #define NVREG_SLOTTIME_LEGBF_ENABLED 0x80000000
188 #define NVREG_SLOTTIME_10_100_FULL 0x00007f00
189 #define NVREG_SLOTTIME_1000_FULL 0x0003ff00
190 #define NVREG_SLOTTIME_HALF 0x0000ff00
191 #define NVREG_SLOTTIME_DEFAULT 0x00007f00
192 #define NVREG_SLOTTIME_MASK 0x000000ff
194 NvRegTxDeferral = 0xA0,
195 #define NVREG_TX_DEFERRAL_DEFAULT 0x15050f
196 #define NVREG_TX_DEFERRAL_RGMII_10_100 0x16070f
197 #define NVREG_TX_DEFERRAL_RGMII_1000 0x14050f
198 #define NVREG_TX_DEFERRAL_RGMII_STRETCH_10 0x16190f
199 #define NVREG_TX_DEFERRAL_RGMII_STRETCH_100 0x16300f
200 #define NVREG_TX_DEFERRAL_MII_STRETCH 0x152000
201 NvRegRxDeferral = 0xA4,
202 #define NVREG_RX_DEFERRAL_DEFAULT 0x16
203 NvRegMacAddrA = 0xA8,
204 NvRegMacAddrB = 0xAC,
205 NvRegMulticastAddrA = 0xB0,
206 #define NVREG_MCASTADDRA_FORCE 0x01
207 NvRegMulticastAddrB = 0xB4,
208 NvRegMulticastMaskA = 0xB8,
209 #define NVREG_MCASTMASKA_NONE 0xffffffff
210 NvRegMulticastMaskB = 0xBC,
211 #define NVREG_MCASTMASKB_NONE 0xffff
213 NvRegPhyInterface = 0xC0,
214 #define PHY_RGMII 0x10000000
215 NvRegBackOffControl = 0xC4,
216 #define NVREG_BKOFFCTRL_DEFAULT 0x70000000
217 #define NVREG_BKOFFCTRL_SEED_MASK 0x000003ff
218 #define NVREG_BKOFFCTRL_SELECT 24
219 #define NVREG_BKOFFCTRL_GEAR 12
221 NvRegTxRingPhysAddr = 0x100,
222 NvRegRxRingPhysAddr = 0x104,
223 NvRegRingSizes = 0x108,
224 #define NVREG_RINGSZ_TXSHIFT 0
225 #define NVREG_RINGSZ_RXSHIFT 16
226 NvRegTransmitPoll = 0x10c,
227 #define NVREG_TRANSMITPOLL_MAC_ADDR_REV 0x00008000
228 NvRegLinkSpeed = 0x110,
229 #define NVREG_LINKSPEED_FORCE 0x10000
230 #define NVREG_LINKSPEED_10 1000
231 #define NVREG_LINKSPEED_100 100
232 #define NVREG_LINKSPEED_1000 50
233 #define NVREG_LINKSPEED_MASK (0xFFF)
234 NvRegUnknownSetupReg5 = 0x130,
235 #define NVREG_UNKSETUP5_BIT31 (1<<31)
236 NvRegTxWatermark = 0x13c,
237 #define NVREG_TX_WM_DESC1_DEFAULT 0x0200010
238 #define NVREG_TX_WM_DESC2_3_DEFAULT 0x1e08000
239 #define NVREG_TX_WM_DESC2_3_1000 0xfe08000
240 NvRegTxRxControl = 0x144,
241 #define NVREG_TXRXCTL_KICK 0x0001
242 #define NVREG_TXRXCTL_BIT1 0x0002
243 #define NVREG_TXRXCTL_BIT2 0x0004
244 #define NVREG_TXRXCTL_IDLE 0x0008
245 #define NVREG_TXRXCTL_RESET 0x0010
246 #define NVREG_TXRXCTL_RXCHECK 0x0400
247 #define NVREG_TXRXCTL_DESC_1 0
248 #define NVREG_TXRXCTL_DESC_2 0x002100
249 #define NVREG_TXRXCTL_DESC_3 0xc02200
250 #define NVREG_TXRXCTL_VLANSTRIP 0x00040
251 #define NVREG_TXRXCTL_VLANINS 0x00080
252 NvRegTxRingPhysAddrHigh = 0x148,
253 NvRegRxRingPhysAddrHigh = 0x14C,
254 NvRegTxPauseFrame = 0x170,
255 #define NVREG_TX_PAUSEFRAME_DISABLE 0x0fff0080
256 #define NVREG_TX_PAUSEFRAME_ENABLE_V1 0x01800010
257 #define NVREG_TX_PAUSEFRAME_ENABLE_V2 0x056003f0
258 #define NVREG_TX_PAUSEFRAME_ENABLE_V3 0x09f00880
259 NvRegTxPauseFrameLimit = 0x174,
260 #define NVREG_TX_PAUSEFRAMELIMIT_ENABLE 0x00010000
261 NvRegMIIStatus = 0x180,
262 #define NVREG_MIISTAT_ERROR 0x0001
263 #define NVREG_MIISTAT_LINKCHANGE 0x0008
264 #define NVREG_MIISTAT_MASK_RW 0x0007
265 #define NVREG_MIISTAT_MASK_ALL 0x000f
266 NvRegMIIMask = 0x184,
267 #define NVREG_MII_LINKCHANGE 0x0008
269 NvRegAdapterControl = 0x188,
270 #define NVREG_ADAPTCTL_START 0x02
271 #define NVREG_ADAPTCTL_LINKUP 0x04
272 #define NVREG_ADAPTCTL_PHYVALID 0x40000
273 #define NVREG_ADAPTCTL_RUNNING 0x100000
274 #define NVREG_ADAPTCTL_PHYSHIFT 24
275 NvRegMIISpeed = 0x18c,
276 #define NVREG_MIISPEED_BIT8 (1<<8)
277 #define NVREG_MIIDELAY 5
278 NvRegMIIControl = 0x190,
279 #define NVREG_MIICTL_INUSE 0x08000
280 #define NVREG_MIICTL_WRITE 0x00400
281 #define NVREG_MIICTL_ADDRSHIFT 5
282 NvRegMIIData = 0x194,
283 NvRegTxUnicast = 0x1a0,
284 NvRegTxMulticast = 0x1a4,
285 NvRegTxBroadcast = 0x1a8,
286 NvRegWakeUpFlags = 0x200,
287 #define NVREG_WAKEUPFLAGS_VAL 0x7770
288 #define NVREG_WAKEUPFLAGS_BUSYSHIFT 24
289 #define NVREG_WAKEUPFLAGS_ENABLESHIFT 16
290 #define NVREG_WAKEUPFLAGS_D3SHIFT 12
291 #define NVREG_WAKEUPFLAGS_D2SHIFT 8
292 #define NVREG_WAKEUPFLAGS_D1SHIFT 4
293 #define NVREG_WAKEUPFLAGS_D0SHIFT 0
294 #define NVREG_WAKEUPFLAGS_ACCEPT_MAGPAT 0x01
295 #define NVREG_WAKEUPFLAGS_ACCEPT_WAKEUPPAT 0x02
296 #define NVREG_WAKEUPFLAGS_ACCEPT_LINKCHANGE 0x04
297 #define NVREG_WAKEUPFLAGS_ENABLE 0x1111
299 NvRegMgmtUnitGetVersion = 0x204,
300 #define NVREG_MGMTUNITGETVERSION 0x01
301 NvRegMgmtUnitVersion = 0x208,
302 #define NVREG_MGMTUNITVERSION 0x08
303 NvRegPowerCap = 0x268,
304 #define NVREG_POWERCAP_D3SUPP (1<<30)
305 #define NVREG_POWERCAP_D2SUPP (1<<26)
306 #define NVREG_POWERCAP_D1SUPP (1<<25)
307 NvRegPowerState = 0x26c,
308 #define NVREG_POWERSTATE_POWEREDUP 0x8000
309 #define NVREG_POWERSTATE_VALID 0x0100
310 #define NVREG_POWERSTATE_MASK 0x0003
311 #define NVREG_POWERSTATE_D0 0x0000
312 #define NVREG_POWERSTATE_D1 0x0001
313 #define NVREG_POWERSTATE_D2 0x0002
314 #define NVREG_POWERSTATE_D3 0x0003
315 NvRegMgmtUnitControl = 0x278,
316 #define NVREG_MGMTUNITCONTROL_INUSE 0x20000
318 NvRegTxZeroReXmt = 0x284,
319 NvRegTxOneReXmt = 0x288,
320 NvRegTxManyReXmt = 0x28c,
321 NvRegTxLateCol = 0x290,
322 NvRegTxUnderflow = 0x294,
323 NvRegTxLossCarrier = 0x298,
324 NvRegTxExcessDef = 0x29c,
325 NvRegTxRetryErr = 0x2a0,
326 NvRegRxFrameErr = 0x2a4,
327 NvRegRxExtraByte = 0x2a8,
328 NvRegRxLateCol = 0x2ac,
330 NvRegRxFrameTooLong = 0x2b4,
331 NvRegRxOverflow = 0x2b8,
332 NvRegRxFCSErr = 0x2bc,
333 NvRegRxFrameAlignErr = 0x2c0,
334 NvRegRxLenErr = 0x2c4,
335 NvRegRxUnicast = 0x2c8,
336 NvRegRxMulticast = 0x2cc,
337 NvRegRxBroadcast = 0x2d0,
339 NvRegTxFrame = 0x2d8,
341 NvRegTxPause = 0x2e0,
342 NvRegRxPause = 0x2e4,
343 NvRegRxDropFrame = 0x2e8,
344 NvRegVlanControl = 0x300,
345 #define NVREG_VLANCONTROL_ENABLE 0x2000
346 NvRegMSIXMap0 = 0x3e0,
347 NvRegMSIXMap1 = 0x3e4,
348 NvRegMSIXIrqStatus = 0x3f0,
350 NvRegPowerState2 = 0x600,
351 #define NVREG_POWERSTATE2_POWERUP_MASK 0x0F15
352 #define NVREG_POWERSTATE2_POWERUP_REV_A3 0x0001
353 #define NVREG_POWERSTATE2_PHY_RESET 0x0004
354 #define NVREG_POWERSTATE2_GATE_CLOCKS 0x0F00
357 /* Big endian: should work, but is untested */
363 struct ring_desc_ex {
371 struct ring_desc *orig;
372 struct ring_desc_ex *ex;
375 #define FLAG_MASK_V1 0xffff0000
376 #define FLAG_MASK_V2 0xffffc000
377 #define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1)
378 #define LEN_MASK_V2 (0xffffffff ^ FLAG_MASK_V2)
380 #define NV_TX_LASTPACKET (1<<16)
381 #define NV_TX_RETRYERROR (1<<19)
382 #define NV_TX_RETRYCOUNT_MASK (0xF<<20)
383 #define NV_TX_FORCED_INTERRUPT (1<<24)
384 #define NV_TX_DEFERRED (1<<26)
385 #define NV_TX_CARRIERLOST (1<<27)
386 #define NV_TX_LATECOLLISION (1<<28)
387 #define NV_TX_UNDERFLOW (1<<29)
388 #define NV_TX_ERROR (1<<30)
389 #define NV_TX_VALID (1<<31)
391 #define NV_TX2_LASTPACKET (1<<29)
392 #define NV_TX2_RETRYERROR (1<<18)
393 #define NV_TX2_RETRYCOUNT_MASK (0xF<<19)
394 #define NV_TX2_FORCED_INTERRUPT (1<<30)
395 #define NV_TX2_DEFERRED (1<<25)
396 #define NV_TX2_CARRIERLOST (1<<26)
397 #define NV_TX2_LATECOLLISION (1<<27)
398 #define NV_TX2_UNDERFLOW (1<<28)
399 /* error and valid are the same for both */
400 #define NV_TX2_ERROR (1<<30)
401 #define NV_TX2_VALID (1<<31)
402 #define NV_TX2_TSO (1<<28)
403 #define NV_TX2_TSO_SHIFT 14
404 #define NV_TX2_TSO_MAX_SHIFT 14
405 #define NV_TX2_TSO_MAX_SIZE (1<<NV_TX2_TSO_MAX_SHIFT)
406 #define NV_TX2_CHECKSUM_L3 (1<<27)
407 #define NV_TX2_CHECKSUM_L4 (1<<26)
409 #define NV_TX3_VLAN_TAG_PRESENT (1<<18)
411 #define NV_RX_DESCRIPTORVALID (1<<16)
412 #define NV_RX_MISSEDFRAME (1<<17)
413 #define NV_RX_SUBSTRACT1 (1<<18)
414 #define NV_RX_ERROR1 (1<<23)
415 #define NV_RX_ERROR2 (1<<24)
416 #define NV_RX_ERROR3 (1<<25)
417 #define NV_RX_ERROR4 (1<<26)
418 #define NV_RX_CRCERR (1<<27)
419 #define NV_RX_OVERFLOW (1<<28)
420 #define NV_RX_FRAMINGERR (1<<29)
421 #define NV_RX_ERROR (1<<30)
422 #define NV_RX_AVAIL (1<<31)
423 #define NV_RX_ERROR_MASK (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3|NV_RX_ERROR4|NV_RX_CRCERR|NV_RX_OVERFLOW|NV_RX_FRAMINGERR)
425 #define NV_RX2_CHECKSUMMASK (0x1C000000)
426 #define NV_RX2_CHECKSUM_IP (0x10000000)
427 #define NV_RX2_CHECKSUM_IP_TCP (0x14000000)
428 #define NV_RX2_CHECKSUM_IP_UDP (0x18000000)
429 #define NV_RX2_DESCRIPTORVALID (1<<29)
430 #define NV_RX2_SUBSTRACT1 (1<<25)
431 #define NV_RX2_ERROR1 (1<<18)
432 #define NV_RX2_ERROR2 (1<<19)
433 #define NV_RX2_ERROR3 (1<<20)
434 #define NV_RX2_ERROR4 (1<<21)
435 #define NV_RX2_CRCERR (1<<22)
436 #define NV_RX2_OVERFLOW (1<<23)
437 #define NV_RX2_FRAMINGERR (1<<24)
438 /* error and avail are the same for both */
439 #define NV_RX2_ERROR (1<<30)
440 #define NV_RX2_AVAIL (1<<31)
441 #define NV_RX2_ERROR_MASK (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3|NV_RX2_ERROR4|NV_RX2_CRCERR|NV_RX2_OVERFLOW|NV_RX2_FRAMINGERR)
443 #define NV_RX3_VLAN_TAG_PRESENT (1<<16)
444 #define NV_RX3_VLAN_TAG_MASK (0x0000FFFF)
446 /* Miscelaneous hardware related defines: */
447 #define NV_PCI_REGSZ_VER1 0x270
448 #define NV_PCI_REGSZ_VER2 0x2d4
449 #define NV_PCI_REGSZ_VER3 0x604
450 #define NV_PCI_REGSZ_MAX 0x604
452 /* various timeout delays: all in usec */
453 #define NV_TXRX_RESET_DELAY 4
454 #define NV_TXSTOP_DELAY1 10
455 #define NV_TXSTOP_DELAY1MAX 500000
456 #define NV_TXSTOP_DELAY2 100
457 #define NV_RXSTOP_DELAY1 10
458 #define NV_RXSTOP_DELAY1MAX 500000
459 #define NV_RXSTOP_DELAY2 100
460 #define NV_SETUP5_DELAY 5
461 #define NV_SETUP5_DELAYMAX 50000
462 #define NV_POWERUP_DELAY 5
463 #define NV_POWERUP_DELAYMAX 5000
464 #define NV_MIIBUSY_DELAY 50
465 #define NV_MIIPHY_DELAY 10
466 #define NV_MIIPHY_DELAYMAX 10000
467 #define NV_MAC_RESET_DELAY 64
469 #define NV_WAKEUPPATTERNS 5
470 #define NV_WAKEUPMASKENTRIES 4
472 /* General driver defaults */
473 #define NV_WATCHDOG_TIMEO (5*HZ)
475 #define RX_RING_DEFAULT 512
476 #define TX_RING_DEFAULT 256
477 #define RX_RING_MIN 128
478 #define TX_RING_MIN 64
479 #define RING_MAX_DESC_VER_1 1024
480 #define RING_MAX_DESC_VER_2_3 16384
482 /* rx/tx mac addr + type + vlan + align + slack*/
483 #define NV_RX_HEADERS (64)
484 /* even more slack. */
485 #define NV_RX_ALLOC_PAD (64)
487 /* maximum mtu size */
488 #define NV_PKTLIMIT_1 ETH_DATA_LEN /* hard limit not known */
489 #define NV_PKTLIMIT_2 9100 /* Actual limit according to NVidia: 9202 */
491 #define OOM_REFILL (1+HZ/20)
492 #define POLL_WAIT (1+HZ/100)
493 #define LINK_TIMEOUT (3*HZ)
494 #define STATS_INTERVAL (10*HZ)
498 * The nic supports three different descriptor types:
499 * - DESC_VER_1: Original
500 * - DESC_VER_2: support for jumbo frames.
501 * - DESC_VER_3: 64-bit format.
508 #define PHY_OUI_MARVELL 0x5043
509 #define PHY_OUI_CICADA 0x03f1
510 #define PHY_OUI_VITESSE 0x01c1
511 #define PHY_OUI_REALTEK 0x0732
512 #define PHY_OUI_REALTEK2 0x0020
513 #define PHYID1_OUI_MASK 0x03ff
514 #define PHYID1_OUI_SHFT 6
515 #define PHYID2_OUI_MASK 0xfc00
516 #define PHYID2_OUI_SHFT 10
517 #define PHYID2_MODEL_MASK 0x03f0
518 #define PHY_MODEL_REALTEK_8211 0x0110
519 #define PHY_REV_MASK 0x0001
520 #define PHY_REV_REALTEK_8211B 0x0000
521 #define PHY_REV_REALTEK_8211C 0x0001
522 #define PHY_MODEL_REALTEK_8201 0x0200
523 #define PHY_MODEL_MARVELL_E3016 0x0220
524 #define PHY_MARVELL_E3016_INITMASK 0x0300
525 #define PHY_CICADA_INIT1 0x0f000
526 #define PHY_CICADA_INIT2 0x0e00
527 #define PHY_CICADA_INIT3 0x01000
528 #define PHY_CICADA_INIT4 0x0200
529 #define PHY_CICADA_INIT5 0x0004
530 #define PHY_CICADA_INIT6 0x02000
531 #define PHY_VITESSE_INIT_REG1 0x1f
532 #define PHY_VITESSE_INIT_REG2 0x10
533 #define PHY_VITESSE_INIT_REG3 0x11
534 #define PHY_VITESSE_INIT_REG4 0x12
535 #define PHY_VITESSE_INIT_MSK1 0xc
536 #define PHY_VITESSE_INIT_MSK2 0x0180
537 #define PHY_VITESSE_INIT1 0x52b5
538 #define PHY_VITESSE_INIT2 0xaf8a
539 #define PHY_VITESSE_INIT3 0x8
540 #define PHY_VITESSE_INIT4 0x8f8a
541 #define PHY_VITESSE_INIT5 0xaf86
542 #define PHY_VITESSE_INIT6 0x8f86
543 #define PHY_VITESSE_INIT7 0xaf82
544 #define PHY_VITESSE_INIT8 0x0100
545 #define PHY_VITESSE_INIT9 0x8f82
546 #define PHY_VITESSE_INIT10 0x0
547 #define PHY_REALTEK_INIT_REG1 0x1f
548 #define PHY_REALTEK_INIT_REG2 0x19
549 #define PHY_REALTEK_INIT_REG3 0x13
550 #define PHY_REALTEK_INIT_REG4 0x14
551 #define PHY_REALTEK_INIT_REG5 0x18
552 #define PHY_REALTEK_INIT_REG6 0x11
553 #define PHY_REALTEK_INIT_REG7 0x01
554 #define PHY_REALTEK_INIT1 0x0000
555 #define PHY_REALTEK_INIT2 0x8e00
556 #define PHY_REALTEK_INIT3 0x0001
557 #define PHY_REALTEK_INIT4 0xad17
558 #define PHY_REALTEK_INIT5 0xfb54
559 #define PHY_REALTEK_INIT6 0xf5c7
560 #define PHY_REALTEK_INIT7 0x1000
561 #define PHY_REALTEK_INIT8 0x0003
562 #define PHY_REALTEK_INIT9 0x0008
563 #define PHY_REALTEK_INIT10 0x0005
564 #define PHY_REALTEK_INIT11 0x0200
565 #define PHY_REALTEK_INIT_MSK1 0x0003
567 #define PHY_GIGABIT 0x0100
569 #define PHY_TIMEOUT 0x1
570 #define PHY_ERROR 0x2
574 #define PHY_HALF 0x100
576 #define NV_PAUSEFRAME_RX_CAPABLE 0x0001
577 #define NV_PAUSEFRAME_TX_CAPABLE 0x0002
578 #define NV_PAUSEFRAME_RX_ENABLE 0x0004
579 #define NV_PAUSEFRAME_TX_ENABLE 0x0008
580 #define NV_PAUSEFRAME_RX_REQ 0x0010
581 #define NV_PAUSEFRAME_TX_REQ 0x0020
582 #define NV_PAUSEFRAME_AUTONEG 0x0040
584 /* MSI/MSI-X defines */
585 #define NV_MSI_X_MAX_VECTORS 8
586 #define NV_MSI_X_VECTORS_MASK 0x000f
587 #define NV_MSI_CAPABLE 0x0010
588 #define NV_MSI_X_CAPABLE 0x0020
589 #define NV_MSI_ENABLED 0x0040
590 #define NV_MSI_X_ENABLED 0x0080
592 #define NV_MSI_X_VECTOR_ALL 0x0
593 #define NV_MSI_X_VECTOR_RX 0x0
594 #define NV_MSI_X_VECTOR_TX 0x1
595 #define NV_MSI_X_VECTOR_OTHER 0x2
597 #define NV_MSI_PRIV_OFFSET 0x68
598 #define NV_MSI_PRIV_VALUE 0xffffffff
600 #define NV_RESTART_TX 0x1
601 #define NV_RESTART_RX 0x2
603 #define NV_TX_LIMIT_COUNT 16
605 #define NV_DYNAMIC_THRESHOLD 4
606 #define NV_DYNAMIC_MAX_QUIET_COUNT 2048
609 struct nv_ethtool_str {
610 char name[ETH_GSTRING_LEN];
613 static const struct nv_ethtool_str nv_estats_str[] = {
618 { "tx_late_collision" },
619 { "tx_fifo_errors" },
620 { "tx_carrier_errors" },
621 { "tx_excess_deferral" },
622 { "tx_retry_error" },
623 { "rx_frame_error" },
625 { "rx_late_collision" },
627 { "rx_frame_too_long" },
628 { "rx_over_errors" },
630 { "rx_frame_align_error" },
631 { "rx_length_error" },
636 { "rx_errors_total" },
637 { "tx_errors_total" },
639 /* version 2 stats */
647 /* version 3 stats */
653 struct nv_ethtool_stats {
658 u64 tx_late_collision;
660 u64 tx_carrier_errors;
661 u64 tx_excess_deferral;
665 u64 rx_late_collision;
667 u64 rx_frame_too_long;
670 u64 rx_frame_align_error;
679 /* version 2 stats */
687 /* version 3 stats */
693 #define NV_DEV_STATISTICS_V3_COUNT (sizeof(struct nv_ethtool_stats)/sizeof(u64))
694 #define NV_DEV_STATISTICS_V2_COUNT (NV_DEV_STATISTICS_V3_COUNT - 3)
695 #define NV_DEV_STATISTICS_V1_COUNT (NV_DEV_STATISTICS_V2_COUNT - 6)
698 #define NV_TEST_COUNT_BASE 3
699 #define NV_TEST_COUNT_EXTENDED 4
701 static const struct nv_ethtool_str nv_etests_str[] = {
702 { "link (online/offline)" },
703 { "register (offline) " },
704 { "interrupt (offline) " },
705 { "loopback (offline) " }
708 struct register_test {
713 static const struct register_test nv_registers_test[] = {
714 { NvRegUnknownSetupReg6, 0x01 },
715 { NvRegMisc1, 0x03c },
716 { NvRegOffloadConfig, 0x03ff },
717 { NvRegMulticastAddrA, 0xffffffff },
718 { NvRegTxWatermark, 0x0ff },
719 { NvRegWakeUpFlags, 0x07777 },
726 unsigned int dma_len:31;
727 unsigned int dma_single:1;
728 struct ring_desc_ex *first_tx_desc;
729 struct nv_skb_map *next_tx_ctx;
734 * All hardware access under netdev_priv(dev)->lock, except the performance
736 * - rx is (pseudo-) lockless: it relies on the single-threading provided
737 * by the arch code for interrupts.
738 * - tx setup is lockless: it relies on netif_tx_lock. Actual submission
739 * needs netdev_priv(dev)->lock :-(
740 * - set_multicast_list: preparation lockless, relies on netif_tx_lock.
743 /* in dev: base, irq */
747 struct net_device *dev;
748 struct napi_struct napi;
751 * Locking: spin_lock(&np->lock); */
752 struct nv_ethtool_stats estats;
760 unsigned int phy_oui;
761 unsigned int phy_model;
762 unsigned int phy_rev;
768 /* General data: RO fields */
769 dma_addr_t ring_addr;
770 struct pci_dev *pci_dev;
787 /* rx specific fields.
788 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
790 union ring_type get_rx, put_rx, first_rx, last_rx;
791 struct nv_skb_map *get_rx_ctx, *put_rx_ctx;
792 struct nv_skb_map *first_rx_ctx, *last_rx_ctx;
793 struct nv_skb_map *rx_skb;
795 union ring_type rx_ring;
796 unsigned int rx_buf_sz;
797 unsigned int pkt_limit;
798 struct timer_list oom_kick;
799 struct timer_list nic_poll;
800 struct timer_list stats_poll;
804 /* media detection workaround.
805 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
808 unsigned long link_timeout;
810 * tx specific fields.
812 union ring_type get_tx, put_tx, first_tx, last_tx;
813 struct nv_skb_map *get_tx_ctx, *put_tx_ctx;
814 struct nv_skb_map *first_tx_ctx, *last_tx_ctx;
815 struct nv_skb_map *tx_skb;
817 union ring_type tx_ring;
821 u32 tx_pkts_in_progress;
822 struct nv_skb_map *tx_change_owner;
823 struct nv_skb_map *tx_end_flip;
827 struct vlan_group *vlangrp;
829 /* msi/msi-x fields */
831 struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS];
836 /* power saved state */
837 u32 saved_config_space[NV_PCI_REGSZ_MAX/4];
839 /* for different msi-x irq type */
840 char name_rx[IFNAMSIZ + 3]; /* -rx */
841 char name_tx[IFNAMSIZ + 3]; /* -tx */
842 char name_other[IFNAMSIZ + 6]; /* -other */
846 * Maximum number of loops until we assume that a bit in the irq mask
847 * is stuck. Overridable with module param.
849 static int max_interrupt_work = 4;
852 * Optimization can be either throuput mode or cpu mode
854 * Throughput Mode: Every tx and rx packet will generate an interrupt.
855 * CPU Mode: Interrupts are controlled by a timer.
858 NV_OPTIMIZATION_MODE_THROUGHPUT,
859 NV_OPTIMIZATION_MODE_CPU,
860 NV_OPTIMIZATION_MODE_DYNAMIC
862 static int optimization_mode = NV_OPTIMIZATION_MODE_DYNAMIC;
865 * Poll interval for timer irq
867 * This interval determines how frequent an interrupt is generated.
868 * The is value is determined by [(time_in_micro_secs * 100) / (2^10)]
869 * Min = 0, and Max = 65535
871 static int poll_interval = -1;
880 static int msi = NV_MSI_INT_ENABLED;
886 NV_MSIX_INT_DISABLED,
889 static int msix = NV_MSIX_INT_ENABLED;
895 NV_DMA_64BIT_DISABLED,
898 static int dma_64bit = NV_DMA_64BIT_ENABLED;
901 * Crossover Detection
902 * Realtek 8201 phy + some OEM boards do not work properly.
905 NV_CROSSOVER_DETECTION_DISABLED,
906 NV_CROSSOVER_DETECTION_ENABLED
908 static int phy_cross = NV_CROSSOVER_DETECTION_DISABLED;
911 * Power down phy when interface is down (persists through reboot;
912 * older Linux and other OSes may not power it up again)
914 static int phy_power_down;
916 static inline struct fe_priv *get_nvpriv(struct net_device *dev)
918 return netdev_priv(dev);
921 static inline u8 __iomem *get_hwbase(struct net_device *dev)
923 return ((struct fe_priv *)netdev_priv(dev))->base;
926 static inline void pci_push(u8 __iomem *base)
928 /* force out pending posted writes */
932 static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v)
934 return le32_to_cpu(prd->flaglen)
935 & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2);
938 static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v)
940 return le32_to_cpu(prd->flaglen) & LEN_MASK_V2;
943 static bool nv_optimized(struct fe_priv *np)
945 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
950 static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
951 int delay, int delaymax, const char *msg)
953 u8 __iomem *base = get_hwbase(dev);
964 } while ((readl(base + offset) & mask) != target);
968 #define NV_SETUP_RX_RING 0x01
969 #define NV_SETUP_TX_RING 0x02
971 static inline u32 dma_low(dma_addr_t addr)
976 static inline u32 dma_high(dma_addr_t addr)
978 return addr>>31>>1; /* 0 if 32bit, shift down by 32 if 64bit */
981 static void setup_hw_rings(struct net_device *dev, int rxtx_flags)
983 struct fe_priv *np = get_nvpriv(dev);
984 u8 __iomem *base = get_hwbase(dev);
986 if (!nv_optimized(np)) {
987 if (rxtx_flags & NV_SETUP_RX_RING)
988 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
989 if (rxtx_flags & NV_SETUP_TX_RING)
990 writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
992 if (rxtx_flags & NV_SETUP_RX_RING) {
993 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
994 writel(dma_high(np->ring_addr), base + NvRegRxRingPhysAddrHigh);
996 if (rxtx_flags & NV_SETUP_TX_RING) {
997 writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
998 writel(dma_high(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddrHigh);
1003 static void free_rings(struct net_device *dev)
1005 struct fe_priv *np = get_nvpriv(dev);
1007 if (!nv_optimized(np)) {
1008 if (np->rx_ring.orig)
1009 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
1010 np->rx_ring.orig, np->ring_addr);
1013 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
1014 np->rx_ring.ex, np->ring_addr);
1020 static int using_multi_irqs(struct net_device *dev)
1022 struct fe_priv *np = get_nvpriv(dev);
1024 if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
1025 ((np->msi_flags & NV_MSI_X_ENABLED) &&
1026 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1)))
1032 static void nv_txrx_gate(struct net_device *dev, bool gate)
1034 struct fe_priv *np = get_nvpriv(dev);
1035 u8 __iomem *base = get_hwbase(dev);
1038 if (!np->mac_in_use &&
1039 (np->driver_data & DEV_HAS_POWER_CNTRL)) {
1040 powerstate = readl(base + NvRegPowerState2);
1042 powerstate |= NVREG_POWERSTATE2_GATE_CLOCKS;
1044 powerstate &= ~NVREG_POWERSTATE2_GATE_CLOCKS;
1045 writel(powerstate, base + NvRegPowerState2);
1049 static void nv_enable_irq(struct net_device *dev)
1051 struct fe_priv *np = get_nvpriv(dev);
1053 if (!using_multi_irqs(dev)) {
1054 if (np->msi_flags & NV_MSI_X_ENABLED)
1055 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1057 enable_irq(np->pci_dev->irq);
1059 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1060 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
1061 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
1065 static void nv_disable_irq(struct net_device *dev)
1067 struct fe_priv *np = get_nvpriv(dev);
1069 if (!using_multi_irqs(dev)) {
1070 if (np->msi_flags & NV_MSI_X_ENABLED)
1071 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1073 disable_irq(np->pci_dev->irq);
1075 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1076 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
1077 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
1081 /* In MSIX mode, a write to irqmask behaves as XOR */
1082 static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask)
1084 u8 __iomem *base = get_hwbase(dev);
1086 writel(mask, base + NvRegIrqMask);
1089 static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask)
1091 struct fe_priv *np = get_nvpriv(dev);
1092 u8 __iomem *base = get_hwbase(dev);
1094 if (np->msi_flags & NV_MSI_X_ENABLED) {
1095 writel(mask, base + NvRegIrqMask);
1097 if (np->msi_flags & NV_MSI_ENABLED)
1098 writel(0, base + NvRegMSIIrqMask);
1099 writel(0, base + NvRegIrqMask);
1103 static void nv_napi_enable(struct net_device *dev)
1105 struct fe_priv *np = get_nvpriv(dev);
1107 napi_enable(&np->napi);
1110 static void nv_napi_disable(struct net_device *dev)
1112 struct fe_priv *np = get_nvpriv(dev);
1114 napi_disable(&np->napi);
1117 #define MII_READ (-1)
1118 /* mii_rw: read/write a register on the PHY.
1120 * Caller must guarantee serialization
1122 static int mii_rw(struct net_device *dev, int addr, int miireg, int value)
1124 u8 __iomem *base = get_hwbase(dev);
1128 writel(NVREG_MIISTAT_MASK_RW, base + NvRegMIIStatus);
1130 reg = readl(base + NvRegMIIControl);
1131 if (reg & NVREG_MIICTL_INUSE) {
1132 writel(NVREG_MIICTL_INUSE, base + NvRegMIIControl);
1133 udelay(NV_MIIBUSY_DELAY);
1136 reg = (addr << NVREG_MIICTL_ADDRSHIFT) | miireg;
1137 if (value != MII_READ) {
1138 writel(value, base + NvRegMIIData);
1139 reg |= NVREG_MIICTL_WRITE;
1141 writel(reg, base + NvRegMIIControl);
1143 if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0,
1144 NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX, NULL)) {
1145 dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d timed out.\n",
1146 dev->name, miireg, addr);
1148 } else if (value != MII_READ) {
1149 /* it was a write operation - fewer failures are detectable */
1150 dprintk(KERN_DEBUG "%s: mii_rw wrote 0x%x to reg %d at PHY %d\n",
1151 dev->name, value, miireg, addr);
1153 } else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) {
1154 dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d failed.\n",
1155 dev->name, miireg, addr);
1158 retval = readl(base + NvRegMIIData);
1159 dprintk(KERN_DEBUG "%s: mii_rw read from reg %d at PHY %d: 0x%x.\n",
1160 dev->name, miireg, addr, retval);
1166 static int phy_reset(struct net_device *dev, u32 bmcr_setup)
1168 struct fe_priv *np = netdev_priv(dev);
1170 unsigned int tries = 0;
1172 miicontrol = BMCR_RESET | bmcr_setup;
1173 if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol))
1176 /* wait for 500ms */
1179 /* must wait till reset is deasserted */
1180 while (miicontrol & BMCR_RESET) {
1182 miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1183 /* FIXME: 100 tries seem excessive */
1190 static int phy_init(struct net_device *dev)
1192 struct fe_priv *np = get_nvpriv(dev);
1193 u8 __iomem *base = get_hwbase(dev);
1194 u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000, reg;
1196 /* phy errata for E3016 phy */
1197 if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
1198 reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
1199 reg &= ~PHY_MARVELL_E3016_INITMASK;
1200 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) {
1201 printk(KERN_INFO "%s: phy write to errata reg failed.\n", pci_name(np->pci_dev));
1205 if (np->phy_oui == PHY_OUI_REALTEK) {
1206 if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
1207 np->phy_rev == PHY_REV_REALTEK_8211B) {
1208 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
1209 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1212 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) {
1213 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1216 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) {
1217 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1220 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) {
1221 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1224 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG4, PHY_REALTEK_INIT5)) {
1225 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1228 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG5, PHY_REALTEK_INIT6)) {
1229 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1232 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
1233 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1237 if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
1238 np->phy_rev == PHY_REV_REALTEK_8211C) {
1239 u32 powerstate = readl(base + NvRegPowerState2);
1241 /* need to perform hw phy reset */
1242 powerstate |= NVREG_POWERSTATE2_PHY_RESET;
1243 writel(powerstate, base + NvRegPowerState2);
1246 powerstate &= ~NVREG_POWERSTATE2_PHY_RESET;
1247 writel(powerstate, base + NvRegPowerState2);
1250 reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
1251 reg |= PHY_REALTEK_INIT9;
1252 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, reg)) {
1253 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1256 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT10)) {
1257 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1260 reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, MII_READ);
1261 if (!(reg & PHY_REALTEK_INIT11)) {
1262 reg |= PHY_REALTEK_INIT11;
1263 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, reg)) {
1264 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1268 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
1269 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1273 if (np->phy_model == PHY_MODEL_REALTEK_8201) {
1274 if (np->driver_data & DEV_NEED_PHY_INIT_FIX) {
1275 phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
1276 phy_reserved |= PHY_REALTEK_INIT7;
1277 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, phy_reserved)) {
1278 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1285 /* set advertise register */
1286 reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
1287 reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|ADVERTISE_PAUSE_ASYM|ADVERTISE_PAUSE_CAP);
1288 if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) {
1289 printk(KERN_INFO "%s: phy write to advertise failed.\n", pci_name(np->pci_dev));
1293 /* get phy interface type */
1294 phyinterface = readl(base + NvRegPhyInterface);
1296 /* see if gigabit phy */
1297 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
1298 if (mii_status & PHY_GIGABIT) {
1299 np->gigabit = PHY_GIGABIT;
1300 mii_control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
1301 mii_control_1000 &= ~ADVERTISE_1000HALF;
1302 if (phyinterface & PHY_RGMII)
1303 mii_control_1000 |= ADVERTISE_1000FULL;
1305 mii_control_1000 &= ~ADVERTISE_1000FULL;
1307 if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) {
1308 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1314 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1315 mii_control |= BMCR_ANENABLE;
1317 if (np->phy_oui == PHY_OUI_REALTEK &&
1318 np->phy_model == PHY_MODEL_REALTEK_8211 &&
1319 np->phy_rev == PHY_REV_REALTEK_8211C) {
1320 /* start autoneg since we already performed hw reset above */
1321 mii_control |= BMCR_ANRESTART;
1322 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
1323 printk(KERN_INFO "%s: phy init failed\n", pci_name(np->pci_dev));
1328 * (certain phys need bmcr to be setup with reset)
1330 if (phy_reset(dev, mii_control)) {
1331 printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev));
1336 /* phy vendor specific configuration */
1337 if ((np->phy_oui == PHY_OUI_CICADA) && (phyinterface & PHY_RGMII)) {
1338 phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ);
1339 phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2);
1340 phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4);
1341 if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved)) {
1342 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1345 phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
1346 phy_reserved |= PHY_CICADA_INIT5;
1347 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved)) {
1348 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1352 if (np->phy_oui == PHY_OUI_CICADA) {
1353 phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ);
1354 phy_reserved |= PHY_CICADA_INIT6;
1355 if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved)) {
1356 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1360 if (np->phy_oui == PHY_OUI_VITESSE) {
1361 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT1)) {
1362 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1365 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT2)) {
1366 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1369 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
1370 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
1371 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1374 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
1375 phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
1376 phy_reserved |= PHY_VITESSE_INIT3;
1377 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
1378 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1381 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT4)) {
1382 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1385 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT5)) {
1386 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1389 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
1390 phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
1391 phy_reserved |= PHY_VITESSE_INIT3;
1392 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
1393 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1396 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
1397 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
1398 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1401 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT6)) {
1402 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1405 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT7)) {
1406 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1409 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
1410 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
1411 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1414 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
1415 phy_reserved &= ~PHY_VITESSE_INIT_MSK2;
1416 phy_reserved |= PHY_VITESSE_INIT8;
1417 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
1418 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1421 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT9)) {
1422 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1425 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT10)) {
1426 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1430 if (np->phy_oui == PHY_OUI_REALTEK) {
1431 if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
1432 np->phy_rev == PHY_REV_REALTEK_8211B) {
1433 /* reset could have cleared these out, set them back */
1434 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
1435 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1438 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) {
1439 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1442 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) {
1443 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1446 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) {
1447 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1450 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG4, PHY_REALTEK_INIT5)) {
1451 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1454 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG5, PHY_REALTEK_INIT6)) {
1455 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1458 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
1459 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1463 if (np->phy_model == PHY_MODEL_REALTEK_8201) {
1464 if (np->driver_data & DEV_NEED_PHY_INIT_FIX) {
1465 phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
1466 phy_reserved |= PHY_REALTEK_INIT7;
1467 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, phy_reserved)) {
1468 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1472 if (phy_cross == NV_CROSSOVER_DETECTION_DISABLED) {
1473 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) {
1474 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1477 phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, MII_READ);
1478 phy_reserved &= ~PHY_REALTEK_INIT_MSK1;
1479 phy_reserved |= PHY_REALTEK_INIT3;
1480 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, phy_reserved)) {
1481 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1484 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
1485 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1492 /* some phys clear out pause advertisment on reset, set it back */
1493 mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg);
1495 /* restart auto negotiation, power down phy */
1496 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1497 mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
1499 mii_control |= BMCR_PDOWN;
1500 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control))
1506 static void nv_start_rx(struct net_device *dev)
1508 struct fe_priv *np = netdev_priv(dev);
1509 u8 __iomem *base = get_hwbase(dev);
1510 u32 rx_ctrl = readl(base + NvRegReceiverControl);
1512 dprintk(KERN_DEBUG "%s: nv_start_rx\n", dev->name);
1513 /* Already running? Stop it. */
1514 if ((readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) && !np->mac_in_use) {
1515 rx_ctrl &= ~NVREG_RCVCTL_START;
1516 writel(rx_ctrl, base + NvRegReceiverControl);
1519 writel(np->linkspeed, base + NvRegLinkSpeed);
1521 rx_ctrl |= NVREG_RCVCTL_START;
1523 rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN;
1524 writel(rx_ctrl, base + NvRegReceiverControl);
1525 dprintk(KERN_DEBUG "%s: nv_start_rx to duplex %d, speed 0x%08x.\n",
1526 dev->name, np->duplex, np->linkspeed);
1530 static void nv_stop_rx(struct net_device *dev)
1532 struct fe_priv *np = netdev_priv(dev);
1533 u8 __iomem *base = get_hwbase(dev);
1534 u32 rx_ctrl = readl(base + NvRegReceiverControl);
1536 dprintk(KERN_DEBUG "%s: nv_stop_rx\n", dev->name);
1537 if (!np->mac_in_use)
1538 rx_ctrl &= ~NVREG_RCVCTL_START;
1540 rx_ctrl |= NVREG_RCVCTL_RX_PATH_EN;
1541 writel(rx_ctrl, base + NvRegReceiverControl);
1542 reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0,
1543 NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX,
1544 KERN_INFO "nv_stop_rx: ReceiverStatus remained busy");
1546 udelay(NV_RXSTOP_DELAY2);
1547 if (!np->mac_in_use)
1548 writel(0, base + NvRegLinkSpeed);
1551 static void nv_start_tx(struct net_device *dev)
1553 struct fe_priv *np = netdev_priv(dev);
1554 u8 __iomem *base = get_hwbase(dev);
1555 u32 tx_ctrl = readl(base + NvRegTransmitterControl);
1557 dprintk(KERN_DEBUG "%s: nv_start_tx\n", dev->name);
1558 tx_ctrl |= NVREG_XMITCTL_START;
1560 tx_ctrl &= ~NVREG_XMITCTL_TX_PATH_EN;
1561 writel(tx_ctrl, base + NvRegTransmitterControl);
1565 static void nv_stop_tx(struct net_device *dev)
1567 struct fe_priv *np = netdev_priv(dev);
1568 u8 __iomem *base = get_hwbase(dev);
1569 u32 tx_ctrl = readl(base + NvRegTransmitterControl);
1571 dprintk(KERN_DEBUG "%s: nv_stop_tx\n", dev->name);
1572 if (!np->mac_in_use)
1573 tx_ctrl &= ~NVREG_XMITCTL_START;
1575 tx_ctrl |= NVREG_XMITCTL_TX_PATH_EN;
1576 writel(tx_ctrl, base + NvRegTransmitterControl);
1577 reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0,
1578 NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX,
1579 KERN_INFO "nv_stop_tx: TransmitterStatus remained busy");
1581 udelay(NV_TXSTOP_DELAY2);
1582 if (!np->mac_in_use)
1583 writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV,
1584 base + NvRegTransmitPoll);
1587 static void nv_start_rxtx(struct net_device *dev)
1593 static void nv_stop_rxtx(struct net_device *dev)
1599 static void nv_txrx_reset(struct net_device *dev)
1601 struct fe_priv *np = netdev_priv(dev);
1602 u8 __iomem *base = get_hwbase(dev);
1604 dprintk(KERN_DEBUG "%s: nv_txrx_reset\n", dev->name);
1605 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
1607 udelay(NV_TXRX_RESET_DELAY);
1608 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
1612 static void nv_mac_reset(struct net_device *dev)
1614 struct fe_priv *np = netdev_priv(dev);
1615 u8 __iomem *base = get_hwbase(dev);
1616 u32 temp1, temp2, temp3;
1618 dprintk(KERN_DEBUG "%s: nv_mac_reset\n", dev->name);
1620 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
1623 /* save registers since they will be cleared on reset */
1624 temp1 = readl(base + NvRegMacAddrA);
1625 temp2 = readl(base + NvRegMacAddrB);
1626 temp3 = readl(base + NvRegTransmitPoll);
1628 writel(NVREG_MAC_RESET_ASSERT, base + NvRegMacReset);
1630 udelay(NV_MAC_RESET_DELAY);
1631 writel(0, base + NvRegMacReset);
1633 udelay(NV_MAC_RESET_DELAY);
1635 /* restore saved registers */
1636 writel(temp1, base + NvRegMacAddrA);
1637 writel(temp2, base + NvRegMacAddrB);
1638 writel(temp3, base + NvRegTransmitPoll);
1640 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
1644 static void nv_get_hw_stats(struct net_device *dev)
1646 struct fe_priv *np = netdev_priv(dev);
1647 u8 __iomem *base = get_hwbase(dev);
1649 np->estats.tx_bytes += readl(base + NvRegTxCnt);
1650 np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt);
1651 np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt);
1652 np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt);
1653 np->estats.tx_late_collision += readl(base + NvRegTxLateCol);
1654 np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow);
1655 np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier);
1656 np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef);
1657 np->estats.tx_retry_error += readl(base + NvRegTxRetryErr);
1658 np->estats.rx_frame_error += readl(base + NvRegRxFrameErr);
1659 np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte);
1660 np->estats.rx_late_collision += readl(base + NvRegRxLateCol);
1661 np->estats.rx_runt += readl(base + NvRegRxRunt);
1662 np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong);
1663 np->estats.rx_over_errors += readl(base + NvRegRxOverflow);
1664 np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr);
1665 np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr);
1666 np->estats.rx_length_error += readl(base + NvRegRxLenErr);
1667 np->estats.rx_unicast += readl(base + NvRegRxUnicast);
1668 np->estats.rx_multicast += readl(base + NvRegRxMulticast);
1669 np->estats.rx_broadcast += readl(base + NvRegRxBroadcast);
1670 np->estats.rx_packets =
1671 np->estats.rx_unicast +
1672 np->estats.rx_multicast +
1673 np->estats.rx_broadcast;
1674 np->estats.rx_errors_total =
1675 np->estats.rx_crc_errors +
1676 np->estats.rx_over_errors +
1677 np->estats.rx_frame_error +
1678 (np->estats.rx_frame_align_error - np->estats.rx_extra_byte) +
1679 np->estats.rx_late_collision +
1680 np->estats.rx_runt +
1681 np->estats.rx_frame_too_long;
1682 np->estats.tx_errors_total =
1683 np->estats.tx_late_collision +
1684 np->estats.tx_fifo_errors +
1685 np->estats.tx_carrier_errors +
1686 np->estats.tx_excess_deferral +
1687 np->estats.tx_retry_error;
1689 if (np->driver_data & DEV_HAS_STATISTICS_V2) {
1690 np->estats.tx_deferral += readl(base + NvRegTxDef);
1691 np->estats.tx_packets += readl(base + NvRegTxFrame);
1692 np->estats.rx_bytes += readl(base + NvRegRxCnt);
1693 np->estats.tx_pause += readl(base + NvRegTxPause);
1694 np->estats.rx_pause += readl(base + NvRegRxPause);
1695 np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame);
1698 if (np->driver_data & DEV_HAS_STATISTICS_V3) {
1699 np->estats.tx_unicast += readl(base + NvRegTxUnicast);
1700 np->estats.tx_multicast += readl(base + NvRegTxMulticast);
1701 np->estats.tx_broadcast += readl(base + NvRegTxBroadcast);
1706 * nv_get_stats: dev->get_stats function
1707 * Get latest stats value from the nic.
1708 * Called with read_lock(&dev_base_lock) held for read -
1709 * only synchronized against unregister_netdevice.
1711 static struct net_device_stats *nv_get_stats(struct net_device *dev)
1713 struct fe_priv *np = netdev_priv(dev);
1715 /* If the nic supports hw counters then retrieve latest values */
1716 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3)) {
1717 nv_get_hw_stats(dev);
1719 /* copy to net_device stats */
1720 dev->stats.tx_bytes = np->estats.tx_bytes;
1721 dev->stats.tx_fifo_errors = np->estats.tx_fifo_errors;
1722 dev->stats.tx_carrier_errors = np->estats.tx_carrier_errors;
1723 dev->stats.rx_crc_errors = np->estats.rx_crc_errors;
1724 dev->stats.rx_over_errors = np->estats.rx_over_errors;
1725 dev->stats.rx_errors = np->estats.rx_errors_total;
1726 dev->stats.tx_errors = np->estats.tx_errors_total;
1733 * nv_alloc_rx: fill rx ring entries.
1734 * Return 1 if the allocations for the skbs failed and the
1735 * rx engine is without Available descriptors
1737 static int nv_alloc_rx(struct net_device *dev)
1739 struct fe_priv *np = netdev_priv(dev);
1740 struct ring_desc *less_rx;
1742 less_rx = np->get_rx.orig;
1743 if (less_rx-- == np->first_rx.orig)
1744 less_rx = np->last_rx.orig;
1746 while (np->put_rx.orig != less_rx) {
1747 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
1749 np->put_rx_ctx->skb = skb;
1750 np->put_rx_ctx->dma = pci_map_single(np->pci_dev,
1753 PCI_DMA_FROMDEVICE);
1754 np->put_rx_ctx->dma_len = skb_tailroom(skb);
1755 np->put_rx.orig->buf = cpu_to_le32(np->put_rx_ctx->dma);
1757 np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
1758 if (unlikely(np->put_rx.orig++ == np->last_rx.orig))
1759 np->put_rx.orig = np->first_rx.orig;
1760 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
1761 np->put_rx_ctx = np->first_rx_ctx;
1768 static int nv_alloc_rx_optimized(struct net_device *dev)
1770 struct fe_priv *np = netdev_priv(dev);
1771 struct ring_desc_ex *less_rx;
1773 less_rx = np->get_rx.ex;
1774 if (less_rx-- == np->first_rx.ex)
1775 less_rx = np->last_rx.ex;
1777 while (np->put_rx.ex != less_rx) {
1778 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
1780 np->put_rx_ctx->skb = skb;
1781 np->put_rx_ctx->dma = pci_map_single(np->pci_dev,
1784 PCI_DMA_FROMDEVICE);
1785 np->put_rx_ctx->dma_len = skb_tailroom(skb);
1786 np->put_rx.ex->bufhigh = cpu_to_le32(dma_high(np->put_rx_ctx->dma));
1787 np->put_rx.ex->buflow = cpu_to_le32(dma_low(np->put_rx_ctx->dma));
1789 np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
1790 if (unlikely(np->put_rx.ex++ == np->last_rx.ex))
1791 np->put_rx.ex = np->first_rx.ex;
1792 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
1793 np->put_rx_ctx = np->first_rx_ctx;
1800 /* If rx bufs are exhausted called after 50ms to attempt to refresh */
1801 static void nv_do_rx_refill(unsigned long data)
1803 struct net_device *dev = (struct net_device *) data;
1804 struct fe_priv *np = netdev_priv(dev);
1806 /* Just reschedule NAPI rx processing */
1807 napi_schedule(&np->napi);
1810 static void nv_init_rx(struct net_device *dev)
1812 struct fe_priv *np = netdev_priv(dev);
1815 np->get_rx = np->put_rx = np->first_rx = np->rx_ring;
1817 if (!nv_optimized(np))
1818 np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1];
1820 np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1];
1821 np->get_rx_ctx = np->put_rx_ctx = np->first_rx_ctx = np->rx_skb;
1822 np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1];
1824 for (i = 0; i < np->rx_ring_size; i++) {
1825 if (!nv_optimized(np)) {
1826 np->rx_ring.orig[i].flaglen = 0;
1827 np->rx_ring.orig[i].buf = 0;
1829 np->rx_ring.ex[i].flaglen = 0;
1830 np->rx_ring.ex[i].txvlan = 0;
1831 np->rx_ring.ex[i].bufhigh = 0;
1832 np->rx_ring.ex[i].buflow = 0;
1834 np->rx_skb[i].skb = NULL;
1835 np->rx_skb[i].dma = 0;
1839 static void nv_init_tx(struct net_device *dev)
1841 struct fe_priv *np = netdev_priv(dev);
1844 np->get_tx = np->put_tx = np->first_tx = np->tx_ring;
1846 if (!nv_optimized(np))
1847 np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1];
1849 np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1];
1850 np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb;
1851 np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1];
1852 np->tx_pkts_in_progress = 0;
1853 np->tx_change_owner = NULL;
1854 np->tx_end_flip = NULL;
1857 for (i = 0; i < np->tx_ring_size; i++) {
1858 if (!nv_optimized(np)) {
1859 np->tx_ring.orig[i].flaglen = 0;
1860 np->tx_ring.orig[i].buf = 0;
1862 np->tx_ring.ex[i].flaglen = 0;
1863 np->tx_ring.ex[i].txvlan = 0;
1864 np->tx_ring.ex[i].bufhigh = 0;
1865 np->tx_ring.ex[i].buflow = 0;
1867 np->tx_skb[i].skb = NULL;
1868 np->tx_skb[i].dma = 0;
1869 np->tx_skb[i].dma_len = 0;
1870 np->tx_skb[i].dma_single = 0;
1871 np->tx_skb[i].first_tx_desc = NULL;
1872 np->tx_skb[i].next_tx_ctx = NULL;
1876 static int nv_init_ring(struct net_device *dev)
1878 struct fe_priv *np = netdev_priv(dev);
1883 if (!nv_optimized(np))
1884 return nv_alloc_rx(dev);
1886 return nv_alloc_rx_optimized(dev);
1889 static void nv_unmap_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb)
1892 if (tx_skb->dma_single)
1893 pci_unmap_single(np->pci_dev, tx_skb->dma,
1897 pci_unmap_page(np->pci_dev, tx_skb->dma,
1904 static int nv_release_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb)
1906 nv_unmap_txskb(np, tx_skb);
1908 dev_kfree_skb_any(tx_skb->skb);
1915 static void nv_drain_tx(struct net_device *dev)
1917 struct fe_priv *np = netdev_priv(dev);
1920 for (i = 0; i < np->tx_ring_size; i++) {
1921 if (!nv_optimized(np)) {
1922 np->tx_ring.orig[i].flaglen = 0;
1923 np->tx_ring.orig[i].buf = 0;
1925 np->tx_ring.ex[i].flaglen = 0;
1926 np->tx_ring.ex[i].txvlan = 0;
1927 np->tx_ring.ex[i].bufhigh = 0;
1928 np->tx_ring.ex[i].buflow = 0;
1930 if (nv_release_txskb(np, &np->tx_skb[i]))
1931 dev->stats.tx_dropped++;
1932 np->tx_skb[i].dma = 0;
1933 np->tx_skb[i].dma_len = 0;
1934 np->tx_skb[i].dma_single = 0;
1935 np->tx_skb[i].first_tx_desc = NULL;
1936 np->tx_skb[i].next_tx_ctx = NULL;
1938 np->tx_pkts_in_progress = 0;
1939 np->tx_change_owner = NULL;
1940 np->tx_end_flip = NULL;
1943 static void nv_drain_rx(struct net_device *dev)
1945 struct fe_priv *np = netdev_priv(dev);
1948 for (i = 0; i < np->rx_ring_size; i++) {
1949 if (!nv_optimized(np)) {
1950 np->rx_ring.orig[i].flaglen = 0;
1951 np->rx_ring.orig[i].buf = 0;
1953 np->rx_ring.ex[i].flaglen = 0;
1954 np->rx_ring.ex[i].txvlan = 0;
1955 np->rx_ring.ex[i].bufhigh = 0;
1956 np->rx_ring.ex[i].buflow = 0;
1959 if (np->rx_skb[i].skb) {
1960 pci_unmap_single(np->pci_dev, np->rx_skb[i].dma,
1961 (skb_end_pointer(np->rx_skb[i].skb) -
1962 np->rx_skb[i].skb->data),
1963 PCI_DMA_FROMDEVICE);
1964 dev_kfree_skb(np->rx_skb[i].skb);
1965 np->rx_skb[i].skb = NULL;
1970 static void nv_drain_rxtx(struct net_device *dev)
1976 static inline u32 nv_get_empty_tx_slots(struct fe_priv *np)
1978 return (u32)(np->tx_ring_size - ((np->tx_ring_size + (np->put_tx_ctx - np->get_tx_ctx)) % np->tx_ring_size));
1981 static void nv_legacybackoff_reseed(struct net_device *dev)
1983 u8 __iomem *base = get_hwbase(dev);
1988 reg = readl(base + NvRegSlotTime) & ~NVREG_SLOTTIME_MASK;
1989 get_random_bytes(&low, sizeof(low));
1990 reg |= low & NVREG_SLOTTIME_MASK;
1992 /* Need to stop tx before change takes effect.
1993 * Caller has already gained np->lock.
1995 tx_status = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START;
1999 writel(reg, base + NvRegSlotTime);
2005 /* Gear Backoff Seeds */
2006 #define BACKOFF_SEEDSET_ROWS 8
2007 #define BACKOFF_SEEDSET_LFSRS 15
2009 /* Known Good seed sets */
2010 static const u32 main_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = {
2011 {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
2012 {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 385, 761, 790, 974},
2013 {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
2014 {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 386, 761, 790, 974},
2015 {266, 265, 276, 585, 397, 208, 345, 355, 365, 376, 385, 396, 771, 700, 984},
2016 {266, 265, 276, 586, 397, 208, 346, 355, 365, 376, 285, 396, 771, 700, 984},
2017 {366, 365, 376, 686, 497, 308, 447, 455, 466, 476, 485, 496, 871, 800, 84},
2018 {466, 465, 476, 786, 597, 408, 547, 555, 566, 576, 585, 597, 971, 900, 184} };
2020 static const u32 gear_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = {
2021 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
2022 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
2023 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 397},
2024 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
2025 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
2026 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
2027 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
2028 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395} };
2030 static void nv_gear_backoff_reseed(struct net_device *dev)
2032 u8 __iomem *base = get_hwbase(dev);
2033 u32 miniseed1, miniseed2, miniseed2_reversed, miniseed3, miniseed3_reversed;
2034 u32 temp, seedset, combinedSeed;
2037 /* Setup seed for free running LFSR */
2038 /* We are going to read the time stamp counter 3 times
2039 and swizzle bits around to increase randomness */
2040 get_random_bytes(&miniseed1, sizeof(miniseed1));
2041 miniseed1 &= 0x0fff;
2045 get_random_bytes(&miniseed2, sizeof(miniseed2));
2046 miniseed2 &= 0x0fff;
2049 miniseed2_reversed =
2050 ((miniseed2 & 0xF00) >> 8) |
2051 (miniseed2 & 0x0F0) |
2052 ((miniseed2 & 0x00F) << 8);
2054 get_random_bytes(&miniseed3, sizeof(miniseed3));
2055 miniseed3 &= 0x0fff;
2058 miniseed3_reversed =
2059 ((miniseed3 & 0xF00) >> 8) |
2060 (miniseed3 & 0x0F0) |
2061 ((miniseed3 & 0x00F) << 8);
2063 combinedSeed = ((miniseed1 ^ miniseed2_reversed) << 12) |
2064 (miniseed2 ^ miniseed3_reversed);
2066 /* Seeds can not be zero */
2067 if ((combinedSeed & NVREG_BKOFFCTRL_SEED_MASK) == 0)
2068 combinedSeed |= 0x08;
2069 if ((combinedSeed & (NVREG_BKOFFCTRL_SEED_MASK << NVREG_BKOFFCTRL_GEAR)) == 0)
2070 combinedSeed |= 0x8000;
2072 /* No need to disable tx here */
2073 temp = NVREG_BKOFFCTRL_DEFAULT | (0 << NVREG_BKOFFCTRL_SELECT);
2074 temp |= combinedSeed & NVREG_BKOFFCTRL_SEED_MASK;
2075 temp |= combinedSeed >> NVREG_BKOFFCTRL_GEAR;
2076 writel(temp, base + NvRegBackOffControl);
2078 /* Setup seeds for all gear LFSRs. */
2079 get_random_bytes(&seedset, sizeof(seedset));
2080 seedset = seedset % BACKOFF_SEEDSET_ROWS;
2081 for (i = 1; i <= BACKOFF_SEEDSET_LFSRS; i++) {
2082 temp = NVREG_BKOFFCTRL_DEFAULT | (i << NVREG_BKOFFCTRL_SELECT);
2083 temp |= main_seedset[seedset][i-1] & 0x3ff;
2084 temp |= ((gear_seedset[seedset][i-1] & 0x3ff) << NVREG_BKOFFCTRL_GEAR);
2085 writel(temp, base + NvRegBackOffControl);
2090 * nv_start_xmit: dev->hard_start_xmit function
2091 * Called with netif_tx_lock held.
2093 static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
2095 struct fe_priv *np = netdev_priv(dev);
2097 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
2098 unsigned int fragments = skb_shinfo(skb)->nr_frags;
2102 u32 size = skb_headlen(skb);
2103 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2105 struct ring_desc *put_tx;
2106 struct ring_desc *start_tx;
2107 struct ring_desc *prev_tx;
2108 struct nv_skb_map *prev_tx_ctx;
2109 unsigned long flags;
2111 /* add fragments to entries count */
2112 for (i = 0; i < fragments; i++) {
2113 entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) +
2114 ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2117 spin_lock_irqsave(&np->lock, flags);
2118 empty_slots = nv_get_empty_tx_slots(np);
2119 if (unlikely(empty_slots <= entries)) {
2120 netif_stop_queue(dev);
2122 spin_unlock_irqrestore(&np->lock, flags);
2123 return NETDEV_TX_BUSY;
2125 spin_unlock_irqrestore(&np->lock, flags);
2127 start_tx = put_tx = np->put_tx.orig;
2129 /* setup the header buffer */
2132 prev_tx_ctx = np->put_tx_ctx;
2133 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
2134 np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
2136 np->put_tx_ctx->dma_len = bcnt;
2137 np->put_tx_ctx->dma_single = 1;
2138 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
2139 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
2141 tx_flags = np->tx_flags;
2144 if (unlikely(put_tx++ == np->last_tx.orig))
2145 put_tx = np->first_tx.orig;
2146 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2147 np->put_tx_ctx = np->first_tx_ctx;
2150 /* setup the fragments */
2151 for (i = 0; i < fragments; i++) {
2152 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2153 u32 size = frag->size;
2158 prev_tx_ctx = np->put_tx_ctx;
2159 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
2160 np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
2162 np->put_tx_ctx->dma_len = bcnt;
2163 np->put_tx_ctx->dma_single = 0;
2164 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
2165 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
2169 if (unlikely(put_tx++ == np->last_tx.orig))
2170 put_tx = np->first_tx.orig;
2171 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2172 np->put_tx_ctx = np->first_tx_ctx;
2176 /* set last fragment flag */
2177 prev_tx->flaglen |= cpu_to_le32(tx_flags_extra);
2179 /* save skb in this slot's context area */
2180 prev_tx_ctx->skb = skb;
2182 if (skb_is_gso(skb))
2183 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
2185 tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
2186 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
2188 spin_lock_irqsave(&np->lock, flags);
2191 start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
2192 np->put_tx.orig = put_tx;
2194 spin_unlock_irqrestore(&np->lock, flags);
2196 dprintk(KERN_DEBUG "%s: nv_start_xmit: entries %d queued for transmission. tx_flags_extra: %x\n",
2197 dev->name, entries, tx_flags_extra);
2200 for (j = 0; j < 64; j++) {
2202 dprintk("\n%03x:", j);
2203 dprintk(" %02x", ((unsigned char *)skb->data)[j]);
2208 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2209 return NETDEV_TX_OK;
2212 static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
2213 struct net_device *dev)
2215 struct fe_priv *np = netdev_priv(dev);
2218 unsigned int fragments = skb_shinfo(skb)->nr_frags;
2222 u32 size = skb_headlen(skb);
2223 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2225 struct ring_desc_ex *put_tx;
2226 struct ring_desc_ex *start_tx;
2227 struct ring_desc_ex *prev_tx;
2228 struct nv_skb_map *prev_tx_ctx;
2229 struct nv_skb_map *start_tx_ctx;
2230 unsigned long flags;
2232 /* add fragments to entries count */
2233 for (i = 0; i < fragments; i++) {
2234 entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) +
2235 ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2238 spin_lock_irqsave(&np->lock, flags);
2239 empty_slots = nv_get_empty_tx_slots(np);
2240 if (unlikely(empty_slots <= entries)) {
2241 netif_stop_queue(dev);
2243 spin_unlock_irqrestore(&np->lock, flags);
2244 return NETDEV_TX_BUSY;
2246 spin_unlock_irqrestore(&np->lock, flags);
2248 start_tx = put_tx = np->put_tx.ex;
2249 start_tx_ctx = np->put_tx_ctx;
2251 /* setup the header buffer */
2254 prev_tx_ctx = np->put_tx_ctx;
2255 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
2256 np->put_tx_ctx->dma = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
2258 np->put_tx_ctx->dma_len = bcnt;
2259 np->put_tx_ctx->dma_single = 1;
2260 put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma));
2261 put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma));
2262 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
2264 tx_flags = NV_TX2_VALID;
2267 if (unlikely(put_tx++ == np->last_tx.ex))
2268 put_tx = np->first_tx.ex;
2269 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2270 np->put_tx_ctx = np->first_tx_ctx;
2273 /* setup the fragments */
2274 for (i = 0; i < fragments; i++) {
2275 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2276 u32 size = frag->size;
2281 prev_tx_ctx = np->put_tx_ctx;
2282 bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
2283 np->put_tx_ctx->dma = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
2285 np->put_tx_ctx->dma_len = bcnt;
2286 np->put_tx_ctx->dma_single = 0;
2287 put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma));
2288 put_tx->buflow = cpu_to_le32(dma_low(np->put_tx_ctx->dma));
2289 put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
2293 if (unlikely(put_tx++ == np->last_tx.ex))
2294 put_tx = np->first_tx.ex;
2295 if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
2296 np->put_tx_ctx = np->first_tx_ctx;
2300 /* set last fragment flag */
2301 prev_tx->flaglen |= cpu_to_le32(NV_TX2_LASTPACKET);
2303 /* save skb in this slot's context area */
2304 prev_tx_ctx->skb = skb;
2306 if (skb_is_gso(skb))
2307 tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT);
2309 tx_flags_extra = skb->ip_summed == CHECKSUM_PARTIAL ?
2310 NV_TX2_CHECKSUM_L3 | NV_TX2_CHECKSUM_L4 : 0;
2313 if (vlan_tx_tag_present(skb))
2314 start_tx->txvlan = cpu_to_le32(NV_TX3_VLAN_TAG_PRESENT |
2315 vlan_tx_tag_get(skb));
2317 start_tx->txvlan = 0;
2319 spin_lock_irqsave(&np->lock, flags);
2322 /* Limit the number of outstanding tx. Setup all fragments, but
2323 * do not set the VALID bit on the first descriptor. Save a pointer
2324 * to that descriptor and also for next skb_map element.
2327 if (np->tx_pkts_in_progress == NV_TX_LIMIT_COUNT) {
2328 if (!np->tx_change_owner)
2329 np->tx_change_owner = start_tx_ctx;
2331 /* remove VALID bit */
2332 tx_flags &= ~NV_TX2_VALID;
2333 start_tx_ctx->first_tx_desc = start_tx;
2334 start_tx_ctx->next_tx_ctx = np->put_tx_ctx;
2335 np->tx_end_flip = np->put_tx_ctx;
2337 np->tx_pkts_in_progress++;
2342 start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
2343 np->put_tx.ex = put_tx;
2345 spin_unlock_irqrestore(&np->lock, flags);
2347 dprintk(KERN_DEBUG "%s: nv_start_xmit_optimized: entries %d queued for transmission. tx_flags_extra: %x\n",
2348 dev->name, entries, tx_flags_extra);
2351 for (j = 0; j < 64; j++) {
2353 dprintk("\n%03x:", j);
2354 dprintk(" %02x", ((unsigned char *)skb->data)[j]);
2359 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2360 return NETDEV_TX_OK;
2363 static inline void nv_tx_flip_ownership(struct net_device *dev)
2365 struct fe_priv *np = netdev_priv(dev);
2367 np->tx_pkts_in_progress--;
2368 if (np->tx_change_owner) {
2369 np->tx_change_owner->first_tx_desc->flaglen |=
2370 cpu_to_le32(NV_TX2_VALID);
2371 np->tx_pkts_in_progress++;
2373 np->tx_change_owner = np->tx_change_owner->next_tx_ctx;
2374 if (np->tx_change_owner == np->tx_end_flip)
2375 np->tx_change_owner = NULL;
2377 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2382 * nv_tx_done: check for completed packets, release the skbs.
2384 * Caller must own np->lock.
2386 static int nv_tx_done(struct net_device *dev, int limit)
2388 struct fe_priv *np = netdev_priv(dev);
2391 struct ring_desc *orig_get_tx = np->get_tx.orig;
2393 while ((np->get_tx.orig != np->put_tx.orig) &&
2394 !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID) &&
2395 (tx_work < limit)) {
2397 dprintk(KERN_DEBUG "%s: nv_tx_done: flags 0x%x.\n",
2400 nv_unmap_txskb(np, np->get_tx_ctx);
2402 if (np->desc_ver == DESC_VER_1) {
2403 if (flags & NV_TX_LASTPACKET) {
2404 if (flags & NV_TX_ERROR) {
2405 if (flags & NV_TX_UNDERFLOW)
2406 dev->stats.tx_fifo_errors++;
2407 if (flags & NV_TX_CARRIERLOST)
2408 dev->stats.tx_carrier_errors++;
2409 if ((flags & NV_TX_RETRYERROR) && !(flags & NV_TX_RETRYCOUNT_MASK))
2410 nv_legacybackoff_reseed(dev);
2411 dev->stats.tx_errors++;
2413 dev->stats.tx_packets++;
2414 dev->stats.tx_bytes += np->get_tx_ctx->skb->len;
2416 dev_kfree_skb_any(np->get_tx_ctx->skb);
2417 np->get_tx_ctx->skb = NULL;
2421 if (flags & NV_TX2_LASTPACKET) {
2422 if (flags & NV_TX2_ERROR) {
2423 if (flags & NV_TX2_UNDERFLOW)
2424 dev->stats.tx_fifo_errors++;
2425 if (flags & NV_TX2_CARRIERLOST)
2426 dev->stats.tx_carrier_errors++;
2427 if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK))
2428 nv_legacybackoff_reseed(dev);
2429 dev->stats.tx_errors++;
2431 dev->stats.tx_packets++;
2432 dev->stats.tx_bytes += np->get_tx_ctx->skb->len;
2434 dev_kfree_skb_any(np->get_tx_ctx->skb);
2435 np->get_tx_ctx->skb = NULL;
2439 if (unlikely(np->get_tx.orig++ == np->last_tx.orig))
2440 np->get_tx.orig = np->first_tx.orig;
2441 if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
2442 np->get_tx_ctx = np->first_tx_ctx;
2444 if (unlikely((np->tx_stop == 1) && (np->get_tx.orig != orig_get_tx))) {
2446 netif_wake_queue(dev);
2451 static int nv_tx_done_optimized(struct net_device *dev, int limit)
2453 struct fe_priv *np = netdev_priv(dev);
2456 struct ring_desc_ex *orig_get_tx = np->get_tx.ex;
2458 while ((np->get_tx.ex != np->put_tx.ex) &&
2459 !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX2_VALID) &&
2460 (tx_work < limit)) {
2462 dprintk(KERN_DEBUG "%s: nv_tx_done_optimized: flags 0x%x.\n",
2465 nv_unmap_txskb(np, np->get_tx_ctx);
2467 if (flags & NV_TX2_LASTPACKET) {
2468 if (!(flags & NV_TX2_ERROR))
2469 dev->stats.tx_packets++;
2471 if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) {
2472 if (np->driver_data & DEV_HAS_GEAR_MODE)
2473 nv_gear_backoff_reseed(dev);
2475 nv_legacybackoff_reseed(dev);
2479 dev_kfree_skb_any(np->get_tx_ctx->skb);
2480 np->get_tx_ctx->skb = NULL;
2484 nv_tx_flip_ownership(dev);
2486 if (unlikely(np->get_tx.ex++ == np->last_tx.ex))
2487 np->get_tx.ex = np->first_tx.ex;
2488 if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
2489 np->get_tx_ctx = np->first_tx_ctx;
2491 if (unlikely((np->tx_stop == 1) && (np->get_tx.ex != orig_get_tx))) {
2493 netif_wake_queue(dev);
2499 * nv_tx_timeout: dev->tx_timeout function
2500 * Called with netif_tx_lock held.
2502 static void nv_tx_timeout(struct net_device *dev)
2504 struct fe_priv *np = netdev_priv(dev);
2505 u8 __iomem *base = get_hwbase(dev);
2507 union ring_type put_tx;
2510 if (np->msi_flags & NV_MSI_X_ENABLED)
2511 status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
2513 status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
2515 printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name, status);
2520 printk(KERN_INFO "%s: Ring at %lx\n",
2521 dev->name, (unsigned long)np->ring_addr);
2522 printk(KERN_INFO "%s: Dumping tx registers\n", dev->name);
2523 for (i = 0; i <= np->register_size; i += 32) {
2524 printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
2526 readl(base + i + 0), readl(base + i + 4),
2527 readl(base + i + 8), readl(base + i + 12),
2528 readl(base + i + 16), readl(base + i + 20),
2529 readl(base + i + 24), readl(base + i + 28));
2531 printk(KERN_INFO "%s: Dumping tx ring\n", dev->name);
2532 for (i = 0; i < np->tx_ring_size; i += 4) {
2533 if (!nv_optimized(np)) {
2534 printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
2536 le32_to_cpu(np->tx_ring.orig[i].buf),
2537 le32_to_cpu(np->tx_ring.orig[i].flaglen),
2538 le32_to_cpu(np->tx_ring.orig[i+1].buf),
2539 le32_to_cpu(np->tx_ring.orig[i+1].flaglen),
2540 le32_to_cpu(np->tx_ring.orig[i+2].buf),
2541 le32_to_cpu(np->tx_ring.orig[i+2].flaglen),
2542 le32_to_cpu(np->tx_ring.orig[i+3].buf),
2543 le32_to_cpu(np->tx_ring.orig[i+3].flaglen));
2545 printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n",
2547 le32_to_cpu(np->tx_ring.ex[i].bufhigh),
2548 le32_to_cpu(np->tx_ring.ex[i].buflow),
2549 le32_to_cpu(np->tx_ring.ex[i].flaglen),
2550 le32_to_cpu(np->tx_ring.ex[i+1].bufhigh),
2551 le32_to_cpu(np->tx_ring.ex[i+1].buflow),
2552 le32_to_cpu(np->tx_ring.ex[i+1].flaglen),
2553 le32_to_cpu(np->tx_ring.ex[i+2].bufhigh),
2554 le32_to_cpu(np->tx_ring.ex[i+2].buflow),
2555 le32_to_cpu(np->tx_ring.ex[i+2].flaglen),
2556 le32_to_cpu(np->tx_ring.ex[i+3].bufhigh),
2557 le32_to_cpu(np->tx_ring.ex[i+3].buflow),
2558 le32_to_cpu(np->tx_ring.ex[i+3].flaglen));
2563 spin_lock_irq(&np->lock);
2565 /* 1) stop tx engine */
2568 /* 2) complete any outstanding tx and do not give HW any limited tx pkts */
2569 saved_tx_limit = np->tx_limit;
2570 np->tx_limit = 0; /* prevent giving HW any limited pkts */
2571 np->tx_stop = 0; /* prevent waking tx queue */
2572 if (!nv_optimized(np))
2573 nv_tx_done(dev, np->tx_ring_size);
2575 nv_tx_done_optimized(dev, np->tx_ring_size);
2577 /* save current HW postion */
2578 if (np->tx_change_owner)
2579 put_tx.ex = np->tx_change_owner->first_tx_desc;
2581 put_tx = np->put_tx;
2583 /* 3) clear all tx state */
2587 /* 4) restore state to current HW position */
2588 np->get_tx = np->put_tx = put_tx;
2589 np->tx_limit = saved_tx_limit;
2591 /* 5) restart tx engine */
2593 netif_wake_queue(dev);
2594 spin_unlock_irq(&np->lock);
2598 * Called when the nic notices a mismatch between the actual data len on the
2599 * wire and the len indicated in the 802 header
2601 static int nv_getlen(struct net_device *dev, void *packet, int datalen)
2603 int hdrlen; /* length of the 802 header */
2604 int protolen; /* length as stored in the proto field */
2606 /* 1) calculate len according to header */
2607 if (((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) {
2608 protolen = ntohs(((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto);
2611 protolen = ntohs(((struct ethhdr *)packet)->h_proto);
2614 dprintk(KERN_DEBUG "%s: nv_getlen: datalen %d, protolen %d, hdrlen %d\n",
2615 dev->name, datalen, protolen, hdrlen);
2616 if (protolen > ETH_DATA_LEN)
2617 return datalen; /* Value in proto field not a len, no checks possible */
2620 /* consistency checks: */
2621 if (datalen > ETH_ZLEN) {
2622 if (datalen >= protolen) {
2623 /* more data on wire than in 802 header, trim of
2626 dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n",
2627 dev->name, protolen);
2630 /* less data on wire than mentioned in header.
2631 * Discard the packet.
2633 dprintk(KERN_DEBUG "%s: nv_getlen: discarding long packet.\n",
2638 /* short packet. Accept only if 802 values are also short */
2639 if (protolen > ETH_ZLEN) {
2640 dprintk(KERN_DEBUG "%s: nv_getlen: discarding short packet.\n",
2644 dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n",
2645 dev->name, datalen);
2650 static int nv_rx_process(struct net_device *dev, int limit)
2652 struct fe_priv *np = netdev_priv(dev);
2655 struct sk_buff *skb;
2658 while ((np->get_rx.orig != np->put_rx.orig) &&
2659 !((flags = le32_to_cpu(np->get_rx.orig->flaglen)) & NV_RX_AVAIL) &&
2660 (rx_work < limit)) {
2662 dprintk(KERN_DEBUG "%s: nv_rx_process: flags 0x%x.\n",
2666 * the packet is for us - immediately tear down the pci mapping.
2667 * TODO: check if a prefetch of the first cacheline improves
2670 pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma,
2671 np->get_rx_ctx->dma_len,
2672 PCI_DMA_FROMDEVICE);
2673 skb = np->get_rx_ctx->skb;
2674 np->get_rx_ctx->skb = NULL;
2678 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).", flags);
2679 for (j = 0; j < 64; j++) {
2681 dprintk("\n%03x:", j);
2682 dprintk(" %02x", ((unsigned char *)skb->data)[j]);
2686 /* look at what we actually got: */
2687 if (np->desc_ver == DESC_VER_1) {
2688 if (likely(flags & NV_RX_DESCRIPTORVALID)) {
2689 len = flags & LEN_MASK_V1;
2690 if (unlikely(flags & NV_RX_ERROR)) {
2691 if ((flags & NV_RX_ERROR_MASK) == NV_RX_ERROR4) {
2692 len = nv_getlen(dev, skb->data, len);
2694 dev->stats.rx_errors++;
2699 /* framing errors are soft errors */
2700 else if ((flags & NV_RX_ERROR_MASK) == NV_RX_FRAMINGERR) {
2701 if (flags & NV_RX_SUBSTRACT1)
2704 /* the rest are hard errors */
2706 if (flags & NV_RX_MISSEDFRAME)
2707 dev->stats.rx_missed_errors++;
2708 if (flags & NV_RX_CRCERR)
2709 dev->stats.rx_crc_errors++;
2710 if (flags & NV_RX_OVERFLOW)
2711 dev->stats.rx_over_errors++;
2712 dev->stats.rx_errors++;
2722 if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
2723 len = flags & LEN_MASK_V2;
2724 if (unlikely(flags & NV_RX2_ERROR)) {
2725 if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) {
2726 len = nv_getlen(dev, skb->data, len);
2728 dev->stats.rx_errors++;
2733 /* framing errors are soft errors */
2734 else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
2735 if (flags & NV_RX2_SUBSTRACT1)
2738 /* the rest are hard errors */
2740 if (flags & NV_RX2_CRCERR)
2741 dev->stats.rx_crc_errors++;
2742 if (flags & NV_RX2_OVERFLOW)
2743 dev->stats.rx_over_errors++;
2744 dev->stats.rx_errors++;
2749 if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */
2750 ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP)) /*ip and udp */
2751 skb->ip_summed = CHECKSUM_UNNECESSARY;
2757 /* got a valid packet - forward it to the network core */
2759 skb->protocol = eth_type_trans(skb, dev);
2760 dprintk(KERN_DEBUG "%s: nv_rx_process: %d bytes, proto %d accepted.\n",
2761 dev->name, len, skb->protocol);
2762 napi_gro_receive(&np->napi, skb);
2763 dev->stats.rx_packets++;
2764 dev->stats.rx_bytes += len;
2766 if (unlikely(np->get_rx.orig++ == np->last_rx.orig))
2767 np->get_rx.orig = np->first_rx.orig;
2768 if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
2769 np->get_rx_ctx = np->first_rx_ctx;
2777 static int nv_rx_process_optimized(struct net_device *dev, int limit)
2779 struct fe_priv *np = netdev_priv(dev);
2783 struct sk_buff *skb;
2786 while ((np->get_rx.ex != np->put_rx.ex) &&
2787 !((flags = le32_to_cpu(np->get_rx.ex->flaglen)) & NV_RX2_AVAIL) &&
2788 (rx_work < limit)) {
2790 dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: flags 0x%x.\n",
2794 * the packet is for us - immediately tear down the pci mapping.
2795 * TODO: check if a prefetch of the first cacheline improves
2798 pci_unmap_single(np->pci_dev, np->get_rx_ctx->dma,
2799 np->get_rx_ctx->dma_len,
2800 PCI_DMA_FROMDEVICE);
2801 skb = np->get_rx_ctx->skb;
2802 np->get_rx_ctx->skb = NULL;
2806 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).", flags);
2807 for (j = 0; j < 64; j++) {
2809 dprintk("\n%03x:", j);
2810 dprintk(" %02x", ((unsigned char *)skb->data)[j]);
2814 /* look at what we actually got: */
2815 if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
2816 len = flags & LEN_MASK_V2;
2817 if (unlikely(flags & NV_RX2_ERROR)) {
2818 if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) {
2819 len = nv_getlen(dev, skb->data, len);
2825 /* framing errors are soft errors */
2826 else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
2827 if (flags & NV_RX2_SUBSTRACT1)
2830 /* the rest are hard errors */
2837 if (((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_TCP) || /*ip and tcp */
2838 ((flags & NV_RX2_CHECKSUMMASK) == NV_RX2_CHECKSUM_IP_UDP)) /*ip and udp */
2839 skb->ip_summed = CHECKSUM_UNNECESSARY;
2841 /* got a valid packet - forward it to the network core */
2843 skb->protocol = eth_type_trans(skb, dev);
2844 prefetch(skb->data);
2846 dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: %d bytes, proto %d accepted.\n",
2847 dev->name, len, skb->protocol);
2849 if (likely(!np->vlangrp)) {
2850 napi_gro_receive(&np->napi, skb);
2852 vlanflags = le32_to_cpu(np->get_rx.ex->buflow);
2853 if (vlanflags & NV_RX3_VLAN_TAG_PRESENT) {
2854 vlan_gro_receive(&np->napi, np->vlangrp,
2855 vlanflags & NV_RX3_VLAN_TAG_MASK, skb);
2857 napi_gro_receive(&np->napi, skb);
2861 dev->stats.rx_packets++;
2862 dev->stats.rx_bytes += len;
2867 if (unlikely(np->get_rx.ex++ == np->last_rx.ex))
2868 np->get_rx.ex = np->first_rx.ex;
2869 if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx))
2870 np->get_rx_ctx = np->first_rx_ctx;
2878 static void set_bufsize(struct net_device *dev)
2880 struct fe_priv *np = netdev_priv(dev);
2882 if (dev->mtu <= ETH_DATA_LEN)
2883 np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS;
2885 np->rx_buf_sz = dev->mtu + NV_RX_HEADERS;
2889 * nv_change_mtu: dev->change_mtu function
2890 * Called with dev_base_lock held for read.
2892 static int nv_change_mtu(struct net_device *dev, int new_mtu)
2894 struct fe_priv *np = netdev_priv(dev);
2897 if (new_mtu < 64 || new_mtu > np->pkt_limit)
2903 /* return early if the buffer sizes will not change */
2904 if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
2906 if (old_mtu == new_mtu)
2909 /* synchronized against open : rtnl_lock() held by caller */
2910 if (netif_running(dev)) {
2911 u8 __iomem *base = get_hwbase(dev);
2913 * It seems that the nic preloads valid ring entries into an
2914 * internal buffer. The procedure for flushing everything is
2915 * guessed, there is probably a simpler approach.
2916 * Changing the MTU is a rare event, it shouldn't matter.
2918 nv_disable_irq(dev);
2919 nv_napi_disable(dev);
2920 netif_tx_lock_bh(dev);
2921 netif_addr_lock(dev);
2922 spin_lock(&np->lock);
2926 /* drain rx queue */
2928 /* reinit driver view of the rx queue */
2930 if (nv_init_ring(dev)) {
2931 if (!np->in_shutdown)
2932 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
2934 /* reinit nic view of the rx queue */
2935 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
2936 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
2937 writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
2938 base + NvRegRingSizes);
2940 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2943 /* restart rx engine */
2945 spin_unlock(&np->lock);
2946 netif_addr_unlock(dev);
2947 netif_tx_unlock_bh(dev);
2948 nv_napi_enable(dev);
2954 static void nv_copy_mac_to_hw(struct net_device *dev)
2956 u8 __iomem *base = get_hwbase(dev);
2959 mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
2960 (dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
2961 mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8);
2963 writel(mac[0], base + NvRegMacAddrA);
2964 writel(mac[1], base + NvRegMacAddrB);
2968 * nv_set_mac_address: dev->set_mac_address function
2969 * Called with rtnl_lock() held.
2971 static int nv_set_mac_address(struct net_device *dev, void *addr)
2973 struct fe_priv *np = netdev_priv(dev);
2974 struct sockaddr *macaddr = (struct sockaddr *)addr;
2976 if (!is_valid_ether_addr(macaddr->sa_data))
2977 return -EADDRNOTAVAIL;
2979 /* synchronized against open : rtnl_lock() held by caller */
2980 memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN);
2982 if (netif_running(dev)) {
2983 netif_tx_lock_bh(dev);
2984 netif_addr_lock(dev);
2985 spin_lock_irq(&np->lock);
2987 /* stop rx engine */
2990 /* set mac address */
2991 nv_copy_mac_to_hw(dev);
2993 /* restart rx engine */
2995 spin_unlock_irq(&np->lock);
2996 netif_addr_unlock(dev);
2997 netif_tx_unlock_bh(dev);
2999 nv_copy_mac_to_hw(dev);
3005 * nv_set_multicast: dev->set_multicast function
3006 * Called with netif_tx_lock held.
3008 static void nv_set_multicast(struct net_device *dev)
3010 struct fe_priv *np = netdev_priv(dev);
3011 u8 __iomem *base = get_hwbase(dev);
3014 u32 pff = readl(base + NvRegPacketFilterFlags) & NVREG_PFF_PAUSE_RX;
3016 memset(addr, 0, sizeof(addr));
3017 memset(mask, 0, sizeof(mask));
3019 if (dev->flags & IFF_PROMISC) {
3020 pff |= NVREG_PFF_PROMISC;
3022 pff |= NVREG_PFF_MYADDR;
3024 if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev)) {
3028 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0xffffffff;
3029 if (dev->flags & IFF_ALLMULTI) {
3030 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0;
3032 struct netdev_hw_addr *ha;
3034 netdev_for_each_mc_addr(ha, dev) {
3035 unsigned char *addr = ha->addr;
3038 a = le32_to_cpu(*(__le32 *) addr);
3039 b = le16_to_cpu(*(__le16 *) (&addr[4]));
3046 addr[0] = alwaysOn[0];
3047 addr[1] = alwaysOn[1];
3048 mask[0] = alwaysOn[0] | alwaysOff[0];
3049 mask[1] = alwaysOn[1] | alwaysOff[1];
3051 mask[0] = NVREG_MCASTMASKA_NONE;
3052 mask[1] = NVREG_MCASTMASKB_NONE;
3055 addr[0] |= NVREG_MCASTADDRA_FORCE;
3056 pff |= NVREG_PFF_ALWAYS;
3057 spin_lock_irq(&np->lock);
3059 writel(addr[0], base + NvRegMulticastAddrA);
3060 writel(addr[1], base + NvRegMulticastAddrB);
3061 writel(mask[0], base + NvRegMulticastMaskA);
3062 writel(mask[1], base + NvRegMulticastMaskB);
3063 writel(pff, base + NvRegPacketFilterFlags);
3064 dprintk(KERN_INFO "%s: reconfiguration for multicast lists.\n",
3067 spin_unlock_irq(&np->lock);
3070 static void nv_update_pause(struct net_device *dev, u32 pause_flags)
3072 struct fe_priv *np = netdev_priv(dev);
3073 u8 __iomem *base = get_hwbase(dev);
3075 np->pause_flags &= ~(NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE);
3077 if (np->pause_flags & NV_PAUSEFRAME_RX_CAPABLE) {
3078 u32 pff = readl(base + NvRegPacketFilterFlags) & ~NVREG_PFF_PAUSE_RX;
3079 if (pause_flags & NV_PAUSEFRAME_RX_ENABLE) {
3080 writel(pff|NVREG_PFF_PAUSE_RX, base + NvRegPacketFilterFlags);
3081 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3083 writel(pff, base + NvRegPacketFilterFlags);
3086 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) {
3087 u32 regmisc = readl(base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX;
3088 if (pause_flags & NV_PAUSEFRAME_TX_ENABLE) {
3089 u32 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V1;
3090 if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V2)
3091 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V2;
3092 if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V3) {
3093 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V3;
3094 /* limit the number of tx pause frames to a default of 8 */
3095 writel(readl(base + NvRegTxPauseFrameLimit)|NVREG_TX_PAUSEFRAMELIMIT_ENABLE, base + NvRegTxPauseFrameLimit);
3097 writel(pause_enable, base + NvRegTxPauseFrame);
3098 writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1);
3099 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3101 writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame);
3102 writel(regmisc, base + NvRegMisc1);
3108 * nv_update_linkspeed: Setup the MAC according to the link partner
3109 * @dev: Network device to be configured
3111 * The function queries the PHY and checks if there is a link partner.
3112 * If yes, then it sets up the MAC accordingly. Otherwise, the MAC is
3113 * set to 10 MBit HD.
3115 * The function returns 0 if there is no link partner and 1 if there is
3116 * a good link partner.
3118 static int nv_update_linkspeed(struct net_device *dev)
3120 struct fe_priv *np = netdev_priv(dev);
3121 u8 __iomem *base = get_hwbase(dev);
3124 int adv_lpa, adv_pause, lpa_pause;
3125 int newls = np->linkspeed;
3126 int newdup = np->duplex;
3129 u32 control_1000, status_1000, phyreg, pause_flags, txreg;
3133 /* BMSR_LSTATUS is latched, read it twice:
3134 * we want the current value.
3136 mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
3137 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
3139 if (!(mii_status & BMSR_LSTATUS)) {
3140 dprintk(KERN_DEBUG "%s: no link detected by phy - falling back to 10HD.\n",
3142 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3148 if (np->autoneg == 0) {
3149 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: autoneg off, PHY set to 0x%04x.\n",
3150 dev->name, np->fixed_mode);
3151 if (np->fixed_mode & LPA_100FULL) {
3152 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
3154 } else if (np->fixed_mode & LPA_100HALF) {
3155 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
3157 } else if (np->fixed_mode & LPA_10FULL) {
3158 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3161 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3167 /* check auto negotiation is complete */
3168 if (!(mii_status & BMSR_ANEGCOMPLETE)) {
3169 /* still in autonegotiation - configure nic for 10 MBit HD and wait. */
3170 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3173 dprintk(KERN_DEBUG "%s: autoneg not completed - falling back to 10HD.\n", dev->name);
3177 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
3178 lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ);
3179 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n",
3180 dev->name, adv, lpa);
3183 if (np->gigabit == PHY_GIGABIT) {
3184 control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
3185 status_1000 = mii_rw(dev, np->phyaddr, MII_STAT1000, MII_READ);
3187 if ((control_1000 & ADVERTISE_1000FULL) &&
3188 (status_1000 & LPA_1000FULL)) {
3189 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: GBit ethernet detected.\n",
3191 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000;
3197 /* FIXME: handle parallel detection properly */
3198 adv_lpa = lpa & adv;
3199 if (adv_lpa & LPA_100FULL) {
3200 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
3202 } else if (adv_lpa & LPA_100HALF) {
3203 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
3205 } else if (adv_lpa & LPA_10FULL) {
3206 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3208 } else if (adv_lpa & LPA_10HALF) {
3209 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3212 dprintk(KERN_DEBUG "%s: bad ability %04x - falling back to 10HD.\n", dev->name, adv_lpa);
3213 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3218 if (np->duplex == newdup && np->linkspeed == newls)
3221 dprintk(KERN_INFO "%s: changing link setting from %d/%d to %d/%d.\n",
3222 dev->name, np->linkspeed, np->duplex, newls, newdup);
3224 np->duplex = newdup;
3225 np->linkspeed = newls;
3227 /* The transmitter and receiver must be restarted for safe update */
3228 if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_START) {
3229 txrxFlags |= NV_RESTART_TX;
3232 if (readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) {
3233 txrxFlags |= NV_RESTART_RX;
3237 if (np->gigabit == PHY_GIGABIT) {
3238 phyreg = readl(base + NvRegSlotTime);
3239 phyreg &= ~(0x3FF00);
3240 if (((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10) ||
3241 ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100))
3242 phyreg |= NVREG_SLOTTIME_10_100_FULL;
3243 else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000)
3244 phyreg |= NVREG_SLOTTIME_1000_FULL;
3245 writel(phyreg, base + NvRegSlotTime);
3248 phyreg = readl(base + NvRegPhyInterface);
3249 phyreg &= ~(PHY_HALF|PHY_100|PHY_1000);
3250 if (np->duplex == 0)
3252 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100)
3254 else if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
3256 writel(phyreg, base + NvRegPhyInterface);
3258 phy_exp = mii_rw(dev, np->phyaddr, MII_EXPANSION, MII_READ) & EXPANSION_NWAY; /* autoneg capable */
3259 if (phyreg & PHY_RGMII) {
3260 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) {
3261 txreg = NVREG_TX_DEFERRAL_RGMII_1000;
3263 if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX)) {
3264 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_10)
3265 txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_10;
3267 txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_100;
3269 txreg = NVREG_TX_DEFERRAL_RGMII_10_100;
3273 if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX))
3274 txreg = NVREG_TX_DEFERRAL_MII_STRETCH;
3276 txreg = NVREG_TX_DEFERRAL_DEFAULT;
3278 writel(txreg, base + NvRegTxDeferral);
3280 if (np->desc_ver == DESC_VER_1) {
3281 txreg = NVREG_TX_WM_DESC1_DEFAULT;
3283 if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000)
3284 txreg = NVREG_TX_WM_DESC2_3_1000;
3286 txreg = NVREG_TX_WM_DESC2_3_DEFAULT;
3288 writel(txreg, base + NvRegTxWatermark);
3290 writel(NVREG_MISC1_FORCE | (np->duplex ? 0 : NVREG_MISC1_HD),
3293 writel(np->linkspeed, base + NvRegLinkSpeed);
3297 /* setup pause frame */
3298 if (np->duplex != 0) {
3299 if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) {
3300 adv_pause = adv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3301 lpa_pause = lpa & (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
3303 switch (adv_pause) {
3304 case ADVERTISE_PAUSE_CAP:
3305 if (lpa_pause & LPA_PAUSE_CAP) {
3306 pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3307 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
3308 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3311 case ADVERTISE_PAUSE_ASYM:
3312 if (lpa_pause == (LPA_PAUSE_CAP | LPA_PAUSE_ASYM))
3313 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3315 case ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM:
3316 if (lpa_pause & LPA_PAUSE_CAP) {
3317 pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3318 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
3319 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3321 if (lpa_pause == LPA_PAUSE_ASYM)
3322 pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3326 pause_flags = np->pause_flags;
3329 nv_update_pause(dev, pause_flags);
3331 if (txrxFlags & NV_RESTART_TX)
3333 if (txrxFlags & NV_RESTART_RX)
3339 static void nv_linkchange(struct net_device *dev)
3341 if (nv_update_linkspeed(dev)) {
3342 if (!netif_carrier_ok(dev)) {
3343 netif_carrier_on(dev);
3344 printk(KERN_INFO "%s: link up.\n", dev->name);
3345 nv_txrx_gate(dev, false);
3349 if (netif_carrier_ok(dev)) {
3350 netif_carrier_off(dev);
3351 printk(KERN_INFO "%s: link down.\n", dev->name);
3352 nv_txrx_gate(dev, true);
3358 static void nv_link_irq(struct net_device *dev)
3360 u8 __iomem *base = get_hwbase(dev);
3363 miistat = readl(base + NvRegMIIStatus);
3364 writel(NVREG_MIISTAT_LINKCHANGE, base + NvRegMIIStatus);
3365 dprintk(KERN_INFO "%s: link change irq, status 0x%x.\n", dev->name, miistat);
3367 if (miistat & (NVREG_MIISTAT_LINKCHANGE))
3369 dprintk(KERN_DEBUG "%s: link change notification done.\n", dev->name);
3372 static void nv_msi_workaround(struct fe_priv *np)
3375 /* Need to toggle the msi irq mask within the ethernet device,
3376 * otherwise, future interrupts will not be detected.
3378 if (np->msi_flags & NV_MSI_ENABLED) {
3379 u8 __iomem *base = np->base;
3381 writel(0, base + NvRegMSIIrqMask);
3382 writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
3386 static inline int nv_change_interrupt_mode(struct net_device *dev, int total_work)
3388 struct fe_priv *np = netdev_priv(dev);
3390 if (optimization_mode == NV_OPTIMIZATION_MODE_DYNAMIC) {
3391 if (total_work > NV_DYNAMIC_THRESHOLD) {
3392 /* transition to poll based interrupts */
3393 np->quiet_count = 0;
3394 if (np->irqmask != NVREG_IRQMASK_CPU) {
3395 np->irqmask = NVREG_IRQMASK_CPU;
3399 if (np->quiet_count < NV_DYNAMIC_MAX_QUIET_COUNT) {
3402 /* reached a period of low activity, switch
3403 to per tx/rx packet interrupts */
3404 if (np->irqmask != NVREG_IRQMASK_THROUGHPUT) {
3405 np->irqmask = NVREG_IRQMASK_THROUGHPUT;
3414 static irqreturn_t nv_nic_irq(int foo, void *data)
3416 struct net_device *dev = (struct net_device *) data;
3417 struct fe_priv *np = netdev_priv(dev);
3418 u8 __iomem *base = get_hwbase(dev);
3420 dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name);
3422 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3423 np->events = readl(base + NvRegIrqStatus);
3424 writel(np->events, base + NvRegIrqStatus);
3426 np->events = readl(base + NvRegMSIXIrqStatus);
3427 writel(np->events, base + NvRegMSIXIrqStatus);
3429 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, np->events);
3430 if (!(np->events & np->irqmask))
3433 nv_msi_workaround(np);
3435 if (napi_schedule_prep(&np->napi)) {
3437 * Disable further irq's (msix not enabled with napi)
3439 writel(0, base + NvRegIrqMask);
3440 __napi_schedule(&np->napi);
3443 dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name);
3449 * All _optimized functions are used to help increase performance
3450 * (reduce CPU and increase throughput). They use descripter version 3,
3451 * compiler directives, and reduce memory accesses.
3453 static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
3455 struct net_device *dev = (struct net_device *) data;
3456 struct fe_priv *np = netdev_priv(dev);
3457 u8 __iomem *base = get_hwbase(dev);
3459 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized\n", dev->name);
3461 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3462 np->events = readl(base + NvRegIrqStatus);
3463 writel(np->events, base + NvRegIrqStatus);
3465 np->events = readl(base + NvRegMSIXIrqStatus);
3466 writel(np->events, base + NvRegMSIXIrqStatus);
3468 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, np->events);
3469 if (!(np->events & np->irqmask))
3472 nv_msi_workaround(np);
3474 if (napi_schedule_prep(&np->napi)) {
3476 * Disable further irq's (msix not enabled with napi)
3478 writel(0, base + NvRegIrqMask);
3479 __napi_schedule(&np->napi);
3481 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized completed\n", dev->name);
3486 static irqreturn_t nv_nic_irq_tx(int foo, void *data)
3488 struct net_device *dev = (struct net_device *) data;
3489 struct fe_priv *np = netdev_priv(dev);
3490 u8 __iomem *base = get_hwbase(dev);
3493 unsigned long flags;
3495 dprintk(KERN_DEBUG "%s: nv_nic_irq_tx\n", dev->name);
3498 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL;
3499 writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus);
3500 dprintk(KERN_DEBUG "%s: tx irq: %08x\n", dev->name, events);
3501 if (!(events & np->irqmask))
3504 spin_lock_irqsave(&np->lock, flags);
3505 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
3506 spin_unlock_irqrestore(&np->lock, flags);
3508 if (unlikely(i > max_interrupt_work)) {
3509 spin_lock_irqsave(&np->lock, flags);
3510 /* disable interrupts on the nic */
3511 writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask);
3514 if (!np->in_shutdown) {
3515 np->nic_poll_irq |= NVREG_IRQ_TX_ALL;
3516 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3518 spin_unlock_irqrestore(&np->lock, flags);
3519 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i);
3524 dprintk(KERN_DEBUG "%s: nv_nic_irq_tx completed\n", dev->name);
3526 return IRQ_RETVAL(i);
3529 static int nv_napi_poll(struct napi_struct *napi, int budget)
3531 struct fe_priv *np = container_of(napi, struct fe_priv, napi);
3532 struct net_device *dev = np->dev;
3533 u8 __iomem *base = get_hwbase(dev);
3534 unsigned long flags;
3536 int rx_count, tx_work = 0, rx_work = 0;
3539 if (!nv_optimized(np)) {
3540 spin_lock_irqsave(&np->lock, flags);
3541 tx_work += nv_tx_done(dev, np->tx_ring_size);
3542 spin_unlock_irqrestore(&np->lock, flags);
3544 rx_count = nv_rx_process(dev, budget - rx_work);
3545 retcode = nv_alloc_rx(dev);
3547 spin_lock_irqsave(&np->lock, flags);
3548 tx_work += nv_tx_done_optimized(dev, np->tx_ring_size);
3549 spin_unlock_irqrestore(&np->lock, flags);
3551 rx_count = nv_rx_process_optimized(dev,
3553 retcode = nv_alloc_rx_optimized(dev);
3555 } while (retcode == 0 &&
3556 rx_count > 0 && (rx_work += rx_count) < budget);
3559 spin_lock_irqsave(&np->lock, flags);
3560 if (!np->in_shutdown)
3561 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3562 spin_unlock_irqrestore(&np->lock, flags);
3565 nv_change_interrupt_mode(dev, tx_work + rx_work);
3567 if (unlikely(np->events & NVREG_IRQ_LINK)) {
3568 spin_lock_irqsave(&np->lock, flags);
3570 spin_unlock_irqrestore(&np->lock, flags);
3572 if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
3573 spin_lock_irqsave(&np->lock, flags);
3575 spin_unlock_irqrestore(&np->lock, flags);
3576 np->link_timeout = jiffies + LINK_TIMEOUT;
3578 if (unlikely(np->events & NVREG_IRQ_RECOVER_ERROR)) {
3579 spin_lock_irqsave(&np->lock, flags);
3580 if (!np->in_shutdown) {
3581 np->nic_poll_irq = np->irqmask;
3582 np->recover_error = 1;
3583 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3585 spin_unlock_irqrestore(&np->lock, flags);
3586 napi_complete(napi);
3590 if (rx_work < budget) {
3591 /* re-enable interrupts
3592 (msix not enabled in napi) */
3593 napi_complete(napi);
3595 writel(np->irqmask, base + NvRegIrqMask);
3600 static irqreturn_t nv_nic_irq_rx(int foo, void *data)
3602 struct net_device *dev = (struct net_device *) data;
3603 struct fe_priv *np = netdev_priv(dev);
3604 u8 __iomem *base = get_hwbase(dev);
3607 unsigned long flags;
3609 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx\n", dev->name);
3612 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
3613 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus);
3614 dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events);
3615 if (!(events & np->irqmask))
3618 if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) {
3619 if (unlikely(nv_alloc_rx_optimized(dev))) {
3620 spin_lock_irqsave(&np->lock, flags);
3621 if (!np->in_shutdown)
3622 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3623 spin_unlock_irqrestore(&np->lock, flags);
3627 if (unlikely(i > max_interrupt_work)) {
3628 spin_lock_irqsave(&np->lock, flags);
3629 /* disable interrupts on the nic */
3630 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
3633 if (!np->in_shutdown) {
3634 np->nic_poll_irq |= NVREG_IRQ_RX_ALL;
3635 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3637 spin_unlock_irqrestore(&np->lock, flags);
3638 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i);
3642 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx completed\n", dev->name);
3644 return IRQ_RETVAL(i);
3647 static irqreturn_t nv_nic_irq_other(int foo, void *data)
3649 struct net_device *dev = (struct net_device *) data;
3650 struct fe_priv *np = netdev_priv(dev);
3651 u8 __iomem *base = get_hwbase(dev);
3654 unsigned long flags;
3656 dprintk(KERN_DEBUG "%s: nv_nic_irq_other\n", dev->name);
3659 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER;
3660 writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus);
3661 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
3662 if (!(events & np->irqmask))
3665 /* check tx in case we reached max loop limit in tx isr */
3666 spin_lock_irqsave(&np->lock, flags);
3667 nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
3668 spin_unlock_irqrestore(&np->lock, flags);
3670 if (events & NVREG_IRQ_LINK) {
3671 spin_lock_irqsave(&np->lock, flags);
3673 spin_unlock_irqrestore(&np->lock, flags);
3675 if (np->need_linktimer && time_after(jiffies, np->link_timeout)) {
3676 spin_lock_irqsave(&np->lock, flags);
3678 spin_unlock_irqrestore(&np->lock, flags);
3679 np->link_timeout = jiffies + LINK_TIMEOUT;
3681 if (events & NVREG_IRQ_RECOVER_ERROR) {
3682 spin_lock_irq(&np->lock);
3683 /* disable interrupts on the nic */
3684 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
3687 if (!np->in_shutdown) {
3688 np->nic_poll_irq |= NVREG_IRQ_OTHER;
3689 np->recover_error = 1;
3690 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3692 spin_unlock_irq(&np->lock);
3695 if (unlikely(i > max_interrupt_work)) {
3696 spin_lock_irqsave(&np->lock, flags);
3697 /* disable interrupts on the nic */
3698 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
3701 if (!np->in_shutdown) {
3702 np->nic_poll_irq |= NVREG_IRQ_OTHER;
3703 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3705 spin_unlock_irqrestore(&np->lock, flags);
3706 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i);
3711 dprintk(KERN_DEBUG "%s: nv_nic_irq_other completed\n", dev->name);
3713 return IRQ_RETVAL(i);
3716 static irqreturn_t nv_nic_irq_test(int foo, void *data)
3718 struct net_device *dev = (struct net_device *) data;
3719 struct fe_priv *np = netdev_priv(dev);
3720 u8 __iomem *base = get_hwbase(dev);
3723 dprintk(KERN_DEBUG "%s: nv_nic_irq_test\n", dev->name);
3725 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3726 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
3727 writel(NVREG_IRQ_TIMER, base + NvRegIrqStatus);
3729 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
3730 writel(NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus);
3733 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
3734 if (!(events & NVREG_IRQ_TIMER))
3735 return IRQ_RETVAL(0);
3737 nv_msi_workaround(np);
3739 spin_lock(&np->lock);
3741 spin_unlock(&np->lock);
3743 dprintk(KERN_DEBUG "%s: nv_nic_irq_test completed\n", dev->name);
3745 return IRQ_RETVAL(1);
3748 static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask)
3750 u8 __iomem *base = get_hwbase(dev);
3754 /* Each interrupt bit can be mapped to a MSIX vector (4 bits).
3755 * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents
3756 * the remaining 8 interrupts.
3758 for (i = 0; i < 8; i++) {
3759 if ((irqmask >> i) & 0x1)
3760 msixmap |= vector << (i << 2);
3762 writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0);
3765 for (i = 0; i < 8; i++) {
3766 if ((irqmask >> (i + 8)) & 0x1)
3767 msixmap |= vector << (i << 2);
3769 writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1);
3772 static int nv_request_irq(struct net_device *dev, int intr_test)
3774 struct fe_priv *np = get_nvpriv(dev);
3775 u8 __iomem *base = get_hwbase(dev);
3778 irqreturn_t (*handler)(int foo, void *data);
3781 handler = nv_nic_irq_test;
3783 if (nv_optimized(np))
3784 handler = nv_nic_irq_optimized;
3786 handler = nv_nic_irq;
3789 if (np->msi_flags & NV_MSI_X_CAPABLE) {
3790 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++)
3791 np->msi_x_entry[i].entry = i;
3792 if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) {
3793 np->msi_flags |= NV_MSI_X_ENABLED;
3794 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) {
3795 /* Request irq for rx handling */
3796 sprintf(np->name_rx, "%s-rx", dev->name);
3797 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector,
3798 nv_nic_irq_rx, IRQF_SHARED, np->name_rx, dev) != 0) {
3799 printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret);
3800 pci_disable_msix(np->pci_dev);
3801 np->msi_flags &= ~NV_MSI_X_ENABLED;
3804 /* Request irq for tx handling */
3805 sprintf(np->name_tx, "%s-tx", dev->name);
3806 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector,
3807 nv_nic_irq_tx, IRQF_SHARED, np->name_tx, dev) != 0) {
3808 printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret);
3809 pci_disable_msix(np->pci_dev);
3810 np->msi_flags &= ~NV_MSI_X_ENABLED;
3813 /* Request irq for link and timer handling */
3814 sprintf(np->name_other, "%s-other", dev->name);
3815 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector,
3816 nv_nic_irq_other, IRQF_SHARED, np->name_other, dev) != 0) {
3817 printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret);
3818 pci_disable_msix(np->pci_dev);
3819 np->msi_flags &= ~NV_MSI_X_ENABLED;
3822 /* map interrupts to their respective vector */
3823 writel(0, base + NvRegMSIXMap0);
3824 writel(0, base + NvRegMSIXMap1);
3825 set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL);
3826 set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL);
3827 set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
3829 /* Request irq for all interrupts */
3830 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, handler, IRQF_SHARED, dev->name, dev) != 0) {
3831 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
3832 pci_disable_msix(np->pci_dev);
3833 np->msi_flags &= ~NV_MSI_X_ENABLED;
3837 /* map interrupts to vector 0 */
3838 writel(0, base + NvRegMSIXMap0);
3839 writel(0, base + NvRegMSIXMap1);
3843 if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
3844 if ((ret = pci_enable_msi(np->pci_dev)) == 0) {
3845 np->msi_flags |= NV_MSI_ENABLED;
3846 dev->irq = np->pci_dev->irq;
3847 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) {
3848 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
3849 pci_disable_msi(np->pci_dev);
3850 np->msi_flags &= ~NV_MSI_ENABLED;
3851 dev->irq = np->pci_dev->irq;
3855 /* map interrupts to vector 0 */
3856 writel(0, base + NvRegMSIMap0);
3857 writel(0, base + NvRegMSIMap1);
3858 /* enable msi vector 0 */
3859 writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
3863 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0)
3870 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev);
3872 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev);
3877 static void nv_free_irq(struct net_device *dev)
3879 struct fe_priv *np = get_nvpriv(dev);
3882 if (np->msi_flags & NV_MSI_X_ENABLED) {
3883 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++)
3884 free_irq(np->msi_x_entry[i].vector, dev);
3885 pci_disable_msix(np->pci_dev);
3886 np->msi_flags &= ~NV_MSI_X_ENABLED;
3888 free_irq(np->pci_dev->irq, dev);
3889 if (np->msi_flags & NV_MSI_ENABLED) {
3890 pci_disable_msi(np->pci_dev);
3891 np->msi_flags &= ~NV_MSI_ENABLED;
3896 static void nv_do_nic_poll(unsigned long data)
3898 struct net_device *dev = (struct net_device *) data;
3899 struct fe_priv *np = netdev_priv(dev);
3900 u8 __iomem *base = get_hwbase(dev);
3904 * First disable irq(s) and then
3905 * reenable interrupts on the nic, we have to do this before calling
3906 * nv_nic_irq because that may decide to do otherwise
3909 if (!using_multi_irqs(dev)) {
3910 if (np->msi_flags & NV_MSI_X_ENABLED)
3911 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
3913 disable_irq_lockdep(np->pci_dev->irq);
3916 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
3917 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
3918 mask |= NVREG_IRQ_RX_ALL;
3920 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
3921 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
3922 mask |= NVREG_IRQ_TX_ALL;
3924 if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
3925 disable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
3926 mask |= NVREG_IRQ_OTHER;
3929 /* disable_irq() contains synchronize_irq, thus no irq handler can run now */
3931 if (np->recover_error) {
3932 np->recover_error = 0;
3933 printk(KERN_INFO "%s: MAC in recoverable error state\n", dev->name);
3934 if (netif_running(dev)) {
3935 netif_tx_lock_bh(dev);
3936 netif_addr_lock(dev);
3937 spin_lock(&np->lock);
3940 if (np->driver_data & DEV_HAS_POWER_CNTRL)
3943 /* drain rx queue */
3945 /* reinit driver view of the rx queue */
3947 if (nv_init_ring(dev)) {
3948 if (!np->in_shutdown)
3949 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3951 /* reinit nic view of the rx queue */
3952 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
3953 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
3954 writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
3955 base + NvRegRingSizes);
3957 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
3959 /* clear interrupts */
3960 if (!(np->msi_flags & NV_MSI_X_ENABLED))
3961 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
3963 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
3965 /* restart rx engine */
3967 spin_unlock(&np->lock);
3968 netif_addr_unlock(dev);
3969 netif_tx_unlock_bh(dev);
3973 writel(mask, base + NvRegIrqMask);
3976 if (!using_multi_irqs(dev)) {
3977 np->nic_poll_irq = 0;
3978 if (nv_optimized(np))
3979 nv_nic_irq_optimized(0, dev);
3982 if (np->msi_flags & NV_MSI_X_ENABLED)
3983 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
3985 enable_irq_lockdep(np->pci_dev->irq);
3987 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
3988 np->nic_poll_irq &= ~NVREG_IRQ_RX_ALL;
3989 nv_nic_irq_rx(0, dev);
3990 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
3992 if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
3993 np->nic_poll_irq &= ~NVREG_IRQ_TX_ALL;
3994 nv_nic_irq_tx(0, dev);
3995 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
3997 if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
3998 np->nic_poll_irq &= ~NVREG_IRQ_OTHER;
3999 nv_nic_irq_other(0, dev);
4000 enable_irq_lockdep(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
4006 #ifdef CONFIG_NET_POLL_CONTROLLER
4007 static void nv_poll_controller(struct net_device *dev)
4009 nv_do_nic_poll((unsigned long) dev);
4013 static void nv_do_stats_poll(unsigned long data)
4015 struct net_device *dev = (struct net_device *) data;
4016 struct fe_priv *np = netdev_priv(dev);
4018 nv_get_hw_stats(dev);
4020 if (!np->in_shutdown)
4021 mod_timer(&np->stats_poll,
4022 round_jiffies(jiffies + STATS_INTERVAL));
4025 static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4027 struct fe_priv *np = netdev_priv(dev);
4028 strcpy(info->driver, DRV_NAME);
4029 strcpy(info->version, FORCEDETH_VERSION);
4030 strcpy(info->bus_info, pci_name(np->pci_dev));
4033 static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
4035 struct fe_priv *np = netdev_priv(dev);
4036 wolinfo->supported = WAKE_MAGIC;
4038 spin_lock_irq(&np->lock);
4040 wolinfo->wolopts = WAKE_MAGIC;
4041 spin_unlock_irq(&np->lock);
4044 static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
4046 struct fe_priv *np = netdev_priv(dev);
4047 u8 __iomem *base = get_hwbase(dev);
4050 if (wolinfo->wolopts == 0) {
4052 } else if (wolinfo->wolopts & WAKE_MAGIC) {
4054 flags = NVREG_WAKEUPFLAGS_ENABLE;
4056 if (netif_running(dev)) {
4057 spin_lock_irq(&np->lock);
4058 writel(flags, base + NvRegWakeUpFlags);
4059 spin_unlock_irq(&np->lock);
4064 static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
4066 struct fe_priv *np = netdev_priv(dev);
4069 spin_lock_irq(&np->lock);
4070 ecmd->port = PORT_MII;
4071 if (!netif_running(dev)) {
4072 /* We do not track link speed / duplex setting if the
4073 * interface is disabled. Force a link check */
4074 if (nv_update_linkspeed(dev)) {
4075 if (!netif_carrier_ok(dev))
4076 netif_carrier_on(dev);
4078 if (netif_carrier_ok(dev))
4079 netif_carrier_off(dev);
4083 if (netif_carrier_ok(dev)) {
4084 switch (np->linkspeed & (NVREG_LINKSPEED_MASK)) {
4085 case NVREG_LINKSPEED_10:
4086 ecmd->speed = SPEED_10;
4088 case NVREG_LINKSPEED_100:
4089 ecmd->speed = SPEED_100;
4091 case NVREG_LINKSPEED_1000:
4092 ecmd->speed = SPEED_1000;
4095 ecmd->duplex = DUPLEX_HALF;
4097 ecmd->duplex = DUPLEX_FULL;
4103 ecmd->autoneg = np->autoneg;
4105 ecmd->advertising = ADVERTISED_MII;
4107 ecmd->advertising |= ADVERTISED_Autoneg;
4108 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4109 if (adv & ADVERTISE_10HALF)
4110 ecmd->advertising |= ADVERTISED_10baseT_Half;
4111 if (adv & ADVERTISE_10FULL)
4112 ecmd->advertising |= ADVERTISED_10baseT_Full;
4113 if (adv & ADVERTISE_100HALF)
4114 ecmd->advertising |= ADVERTISED_100baseT_Half;
4115 if (adv & ADVERTISE_100FULL)
4116 ecmd->advertising |= ADVERTISED_100baseT_Full;
4117 if (np->gigabit == PHY_GIGABIT) {
4118 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
4119 if (adv & ADVERTISE_1000FULL)
4120 ecmd->advertising |= ADVERTISED_1000baseT_Full;
4123 ecmd->supported = (SUPPORTED_Autoneg |
4124 SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
4125 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
4127 if (np->gigabit == PHY_GIGABIT)
4128 ecmd->supported |= SUPPORTED_1000baseT_Full;
4130 ecmd->phy_address = np->phyaddr;
4131 ecmd->transceiver = XCVR_EXTERNAL;
4133 /* ignore maxtxpkt, maxrxpkt for now */
4134 spin_unlock_irq(&np->lock);
4138 static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
4140 struct fe_priv *np = netdev_priv(dev);
4142 if (ecmd->port != PORT_MII)
4144 if (ecmd->transceiver != XCVR_EXTERNAL)
4146 if (ecmd->phy_address != np->phyaddr) {
4147 /* TODO: support switching between multiple phys. Should be
4148 * trivial, but not enabled due to lack of test hardware. */
4151 if (ecmd->autoneg == AUTONEG_ENABLE) {
4154 mask = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
4155 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
4156 if (np->gigabit == PHY_GIGABIT)
4157 mask |= ADVERTISED_1000baseT_Full;
4159 if ((ecmd->advertising & mask) == 0)
4162 } else if (ecmd->autoneg == AUTONEG_DISABLE) {
4163 /* Note: autonegotiation disable, speed 1000 intentionally
4164 * forbidden - noone should need that. */
4166 if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100)
4168 if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
4174 netif_carrier_off(dev);
4175 if (netif_running(dev)) {
4176 unsigned long flags;
4178 nv_disable_irq(dev);
4179 netif_tx_lock_bh(dev);
4180 netif_addr_lock(dev);
4181 /* with plain spinlock lockdep complains */
4182 spin_lock_irqsave(&np->lock, flags);
4185 * this can take some time, and interrupts are disabled
4186 * due to spin_lock_irqsave, but let's hope no daemon
4187 * is going to change the settings very often...
4189 * NV_RXSTOP_DELAY1MAX + NV_TXSTOP_DELAY1MAX
4190 * + some minor delays, which is up to a second approximately
4193 spin_unlock_irqrestore(&np->lock, flags);
4194 netif_addr_unlock(dev);
4195 netif_tx_unlock_bh(dev);
4198 if (ecmd->autoneg == AUTONEG_ENABLE) {
4203 /* advertise only what has been requested */
4204 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4205 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
4206 if (ecmd->advertising & ADVERTISED_10baseT_Half)
4207 adv |= ADVERTISE_10HALF;
4208 if (ecmd->advertising & ADVERTISED_10baseT_Full)
4209 adv |= ADVERTISE_10FULL;
4210 if (ecmd->advertising & ADVERTISED_100baseT_Half)
4211 adv |= ADVERTISE_100HALF;
4212 if (ecmd->advertising & ADVERTISED_100baseT_Full)
4213 adv |= ADVERTISE_100FULL;
4214 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */
4215 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4216 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
4217 adv |= ADVERTISE_PAUSE_ASYM;
4218 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
4220 if (np->gigabit == PHY_GIGABIT) {
4221 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
4222 adv &= ~ADVERTISE_1000FULL;
4223 if (ecmd->advertising & ADVERTISED_1000baseT_Full)
4224 adv |= ADVERTISE_1000FULL;
4225 mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
4228 if (netif_running(dev))
4229 printk(KERN_INFO "%s: link down.\n", dev->name);
4230 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4231 if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
4232 bmcr |= BMCR_ANENABLE;
4233 /* reset the phy in order for settings to stick,
4234 * and cause autoneg to start */
4235 if (phy_reset(dev, bmcr)) {
4236 printk(KERN_INFO "%s: phy reset failed\n", dev->name);
4240 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
4241 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4248 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4249 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
4250 if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF)
4251 adv |= ADVERTISE_10HALF;
4252 if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL)
4253 adv |= ADVERTISE_10FULL;
4254 if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF)
4255 adv |= ADVERTISE_100HALF;
4256 if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL)
4257 adv |= ADVERTISE_100FULL;
4258 np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
4259 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisments but disable tx pause */
4260 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4261 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
4263 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) {
4264 adv |= ADVERTISE_PAUSE_ASYM;
4265 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
4267 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
4268 np->fixed_mode = adv;
4270 if (np->gigabit == PHY_GIGABIT) {
4271 adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
4272 adv &= ~ADVERTISE_1000FULL;
4273 mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
4276 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4277 bmcr &= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_SPEED1000|BMCR_FULLDPLX);
4278 if (np->fixed_mode & (ADVERTISE_10FULL|ADVERTISE_100FULL))
4279 bmcr |= BMCR_FULLDPLX;
4280 if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL))
4281 bmcr |= BMCR_SPEED100;
4282 if (np->phy_oui == PHY_OUI_MARVELL) {
4283 /* reset the phy in order for forced mode settings to stick */
4284 if (phy_reset(dev, bmcr)) {
4285 printk(KERN_INFO "%s: phy reset failed\n", dev->name);
4289 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4290 if (netif_running(dev)) {
4291 /* Wait a bit and then reconfigure the nic. */
4298 if (netif_running(dev)) {
4306 #define FORCEDETH_REGS_VER 1
4308 static int nv_get_regs_len(struct net_device *dev)
4310 struct fe_priv *np = netdev_priv(dev);
4311 return np->register_size;
4314 static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
4316 struct fe_priv *np = netdev_priv(dev);
4317 u8 __iomem *base = get_hwbase(dev);
4321 regs->version = FORCEDETH_REGS_VER;
4322 spin_lock_irq(&np->lock);
4323 for (i = 0; i <= np->register_size/sizeof(u32); i++)
4324 rbuf[i] = readl(base + i*sizeof(u32));
4325 spin_unlock_irq(&np->lock);
4328 static int nv_nway_reset(struct net_device *dev)
4330 struct fe_priv *np = netdev_priv(dev);
4336 netif_carrier_off(dev);
4337 if (netif_running(dev)) {
4338 nv_disable_irq(dev);
4339 netif_tx_lock_bh(dev);
4340 netif_addr_lock(dev);
4341 spin_lock(&np->lock);
4344 spin_unlock(&np->lock);
4345 netif_addr_unlock(dev);
4346 netif_tx_unlock_bh(dev);
4347 printk(KERN_INFO "%s: link down.\n", dev->name);
4350 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4351 if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
4352 bmcr |= BMCR_ANENABLE;
4353 /* reset the phy in order for settings to stick*/
4354 if (phy_reset(dev, bmcr)) {
4355 printk(KERN_INFO "%s: phy reset failed\n", dev->name);
4359 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
4360 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4363 if (netif_running(dev)) {
4375 static int nv_set_tso(struct net_device *dev, u32 value)
4377 struct fe_priv *np = netdev_priv(dev);
4379 if ((np->driver_data & DEV_HAS_CHECKSUM))
4380 return ethtool_op_set_tso(dev, value);
4385 static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
4387 struct fe_priv *np = netdev_priv(dev);
4389 ring->rx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
4390 ring->rx_mini_max_pending = 0;
4391 ring->rx_jumbo_max_pending = 0;
4392 ring->tx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
4394 ring->rx_pending = np->rx_ring_size;
4395 ring->rx_mini_pending = 0;
4396 ring->rx_jumbo_pending = 0;
4397 ring->tx_pending = np->tx_ring_size;
4400 static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
4402 struct fe_priv *np = netdev_priv(dev);
4403 u8 __iomem *base = get_hwbase(dev);
4404 u8 *rxtx_ring, *rx_skbuff, *tx_skbuff;
4405 dma_addr_t ring_addr;
4407 if (ring->rx_pending < RX_RING_MIN ||
4408 ring->tx_pending < TX_RING_MIN ||
4409 ring->rx_mini_pending != 0 ||
4410 ring->rx_jumbo_pending != 0 ||
4411 (np->desc_ver == DESC_VER_1 &&
4412 (ring->rx_pending > RING_MAX_DESC_VER_1 ||
4413 ring->tx_pending > RING_MAX_DESC_VER_1)) ||
4414 (np->desc_ver != DESC_VER_1 &&
4415 (ring->rx_pending > RING_MAX_DESC_VER_2_3 ||
4416 ring->tx_pending > RING_MAX_DESC_VER_2_3))) {
4420 /* allocate new rings */
4421 if (!nv_optimized(np)) {
4422 rxtx_ring = pci_alloc_consistent(np->pci_dev,
4423 sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
4426 rxtx_ring = pci_alloc_consistent(np->pci_dev,
4427 sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
4430 rx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->rx_pending, GFP_KERNEL);
4431 tx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->tx_pending, GFP_KERNEL);
4432 if (!rxtx_ring || !rx_skbuff || !tx_skbuff) {
4433 /* fall back to old rings */
4434 if (!nv_optimized(np)) {
4436 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
4437 rxtx_ring, ring_addr);
4440 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
4441 rxtx_ring, ring_addr);
4449 if (netif_running(dev)) {
4450 nv_disable_irq(dev);
4451 nv_napi_disable(dev);
4452 netif_tx_lock_bh(dev);
4453 netif_addr_lock(dev);
4454 spin_lock(&np->lock);
4464 /* set new values */
4465 np->rx_ring_size = ring->rx_pending;
4466 np->tx_ring_size = ring->tx_pending;
4468 if (!nv_optimized(np)) {
4469 np->rx_ring.orig = (struct ring_desc *)rxtx_ring;
4470 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
4472 np->rx_ring.ex = (struct ring_desc_ex *)rxtx_ring;
4473 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
4475 np->rx_skb = (struct nv_skb_map *)rx_skbuff;
4476 np->tx_skb = (struct nv_skb_map *)tx_skbuff;
4477 np->ring_addr = ring_addr;
4479 memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size);
4480 memset(np->tx_skb, 0, sizeof(struct nv_skb_map) * np->tx_ring_size);
4482 if (netif_running(dev)) {
4483 /* reinit driver view of the queues */
4485 if (nv_init_ring(dev)) {
4486 if (!np->in_shutdown)
4487 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
4490 /* reinit nic view of the queues */
4491 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4492 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4493 writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4494 base + NvRegRingSizes);
4496 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4499 /* restart engines */
4501 spin_unlock(&np->lock);
4502 netif_addr_unlock(dev);
4503 netif_tx_unlock_bh(dev);
4504 nv_napi_enable(dev);
4512 static void nv_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
4514 struct fe_priv *np = netdev_priv(dev);
4516 pause->autoneg = (np->pause_flags & NV_PAUSEFRAME_AUTONEG) != 0;
4517 pause->rx_pause = (np->pause_flags & NV_PAUSEFRAME_RX_ENABLE) != 0;
4518 pause->tx_pause = (np->pause_flags & NV_PAUSEFRAME_TX_ENABLE) != 0;
4521 static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
4523 struct fe_priv *np = netdev_priv(dev);
4526 if ((!np->autoneg && np->duplex == 0) ||
4527 (np->autoneg && !pause->autoneg && np->duplex == 0)) {
4528 printk(KERN_INFO "%s: can not set pause settings when forced link is in half duplex.\n",
4532 if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) {
4533 printk(KERN_INFO "%s: hardware does not support tx pause frames.\n", dev->name);
4537 netif_carrier_off(dev);
4538 if (netif_running(dev)) {
4539 nv_disable_irq(dev);
4540 netif_tx_lock_bh(dev);
4541 netif_addr_lock(dev);
4542 spin_lock(&np->lock);
4545 spin_unlock(&np->lock);
4546 netif_addr_unlock(dev);
4547 netif_tx_unlock_bh(dev);
4550 np->pause_flags &= ~(NV_PAUSEFRAME_RX_REQ|NV_PAUSEFRAME_TX_REQ);
4551 if (pause->rx_pause)
4552 np->pause_flags |= NV_PAUSEFRAME_RX_REQ;
4553 if (pause->tx_pause)
4554 np->pause_flags |= NV_PAUSEFRAME_TX_REQ;
4556 if (np->autoneg && pause->autoneg) {
4557 np->pause_flags |= NV_PAUSEFRAME_AUTONEG;
4559 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
4560 adv &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
4561 if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */
4562 adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4563 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
4564 adv |= ADVERTISE_PAUSE_ASYM;
4565 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
4567 if (netif_running(dev))
4568 printk(KERN_INFO "%s: link down.\n", dev->name);
4569 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4570 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
4571 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
4573 np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
4574 if (pause->rx_pause)
4575 np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
4576 if (pause->tx_pause)
4577 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
4579 if (!netif_running(dev))
4580 nv_update_linkspeed(dev);
4582 nv_update_pause(dev, np->pause_flags);
4585 if (netif_running(dev)) {
4592 static u32 nv_get_rx_csum(struct net_device *dev)
4594 struct fe_priv *np = netdev_priv(dev);
4595 return np->rx_csum != 0;
4598 static int nv_set_rx_csum(struct net_device *dev, u32 data)
4600 struct fe_priv *np = netdev_priv(dev);
4601 u8 __iomem *base = get_hwbase(dev);
4604 if (np->driver_data & DEV_HAS_CHECKSUM) {
4607 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
4610 /* vlan is dependent on rx checksum offload */
4611 if (!(np->vlanctl_bits & NVREG_VLANCONTROL_ENABLE))
4612 np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK;
4614 if (netif_running(dev)) {
4615 spin_lock_irq(&np->lock);
4616 writel(np->txrxctl_bits, base + NvRegTxRxControl);
4617 spin_unlock_irq(&np->lock);
4626 static int nv_set_tx_csum(struct net_device *dev, u32 data)
4628 struct fe_priv *np = netdev_priv(dev);
4630 if (np->driver_data & DEV_HAS_CHECKSUM)
4631 return ethtool_op_set_tx_csum(dev, data);
4636 static int nv_set_sg(struct net_device *dev, u32 data)
4638 struct fe_priv *np = netdev_priv(dev);
4640 if (np->driver_data & DEV_HAS_CHECKSUM)
4641 return ethtool_op_set_sg(dev, data);
4646 static int nv_get_sset_count(struct net_device *dev, int sset)
4648 struct fe_priv *np = netdev_priv(dev);
4652 if (np->driver_data & DEV_HAS_TEST_EXTENDED)
4653 return NV_TEST_COUNT_EXTENDED;
4655 return NV_TEST_COUNT_BASE;
4657 if (np->driver_data & DEV_HAS_STATISTICS_V3)
4658 return NV_DEV_STATISTICS_V3_COUNT;
4659 else if (np->driver_data & DEV_HAS_STATISTICS_V2)
4660 return NV_DEV_STATISTICS_V2_COUNT;
4661 else if (np->driver_data & DEV_HAS_STATISTICS_V1)
4662 return NV_DEV_STATISTICS_V1_COUNT;
4670 static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *buffer)
4672 struct fe_priv *np = netdev_priv(dev);
4675 nv_do_stats_poll((unsigned long)dev);
4677 memcpy(buffer, &np->estats, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64));
4680 static int nv_link_test(struct net_device *dev)
4682 struct fe_priv *np = netdev_priv(dev);
4685 mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
4686 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
4688 /* check phy link status */
4689 if (!(mii_status & BMSR_LSTATUS))
4695 static int nv_register_test(struct net_device *dev)
4697 u8 __iomem *base = get_hwbase(dev);
4699 u32 orig_read, new_read;
4702 orig_read = readl(base + nv_registers_test[i].reg);
4704 /* xor with mask to toggle bits */
4705 orig_read ^= nv_registers_test[i].mask;
4707 writel(orig_read, base + nv_registers_test[i].reg);
4709 new_read = readl(base + nv_registers_test[i].reg);
4711 if ((new_read & nv_registers_test[i].mask) != (orig_read & nv_registers_test[i].mask))
4714 /* restore original value */
4715 orig_read ^= nv_registers_test[i].mask;
4716 writel(orig_read, base + nv_registers_test[i].reg);
4718 } while (nv_registers_test[++i].reg != 0);
4723 static int nv_interrupt_test(struct net_device *dev)
4725 struct fe_priv *np = netdev_priv(dev);
4726 u8 __iomem *base = get_hwbase(dev);
4729 u32 save_msi_flags, save_poll_interval = 0;
4731 if (netif_running(dev)) {
4732 /* free current irq */
4734 save_poll_interval = readl(base+NvRegPollingInterval);
4737 /* flag to test interrupt handler */
4740 /* setup test irq */
4741 save_msi_flags = np->msi_flags;
4742 np->msi_flags &= ~NV_MSI_X_VECTORS_MASK;
4743 np->msi_flags |= 0x001; /* setup 1 vector */
4744 if (nv_request_irq(dev, 1))
4747 /* setup timer interrupt */
4748 writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
4749 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
4751 nv_enable_hw_interrupts(dev, NVREG_IRQ_TIMER);
4753 /* wait for at least one interrupt */
4756 spin_lock_irq(&np->lock);
4758 /* flag should be set within ISR */
4759 testcnt = np->intr_test;
4763 nv_disable_hw_interrupts(dev, NVREG_IRQ_TIMER);
4764 if (!(np->msi_flags & NV_MSI_X_ENABLED))
4765 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
4767 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
4769 spin_unlock_irq(&np->lock);
4773 np->msi_flags = save_msi_flags;
4775 if (netif_running(dev)) {
4776 writel(save_poll_interval, base + NvRegPollingInterval);
4777 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
4778 /* restore original irq */
4779 if (nv_request_irq(dev, 0))
4786 static int nv_loopback_test(struct net_device *dev)
4788 struct fe_priv *np = netdev_priv(dev);
4789 u8 __iomem *base = get_hwbase(dev);
4790 struct sk_buff *tx_skb, *rx_skb;
4791 dma_addr_t test_dma_addr;
4792 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
4794 int len, i, pkt_len;
4796 u32 filter_flags = 0;
4797 u32 misc1_flags = 0;
4800 if (netif_running(dev)) {
4801 nv_disable_irq(dev);
4802 filter_flags = readl(base + NvRegPacketFilterFlags);
4803 misc1_flags = readl(base + NvRegMisc1);
4808 /* reinit driver view of the rx queue */
4812 /* setup hardware for loopback */
4813 writel(NVREG_MISC1_FORCE, base + NvRegMisc1);
4814 writel(NVREG_PFF_ALWAYS | NVREG_PFF_LOOPBACK, base + NvRegPacketFilterFlags);
4816 /* reinit nic view of the rx queue */
4817 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4818 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4819 writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4820 base + NvRegRingSizes);
4823 /* restart rx engine */
4826 /* setup packet for tx */
4827 pkt_len = ETH_DATA_LEN;
4828 tx_skb = dev_alloc_skb(pkt_len);
4830 printk(KERN_ERR "dev_alloc_skb() failed during loopback test"
4831 " of %s\n", dev->name);
4835 test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data,
4836 skb_tailroom(tx_skb),
4837 PCI_DMA_FROMDEVICE);
4838 pkt_data = skb_put(tx_skb, pkt_len);
4839 for (i = 0; i < pkt_len; i++)
4840 pkt_data[i] = (u8)(i & 0xff);
4842 if (!nv_optimized(np)) {
4843 np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr);
4844 np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
4846 np->tx_ring.ex[0].bufhigh = cpu_to_le32(dma_high(test_dma_addr));
4847 np->tx_ring.ex[0].buflow = cpu_to_le32(dma_low(test_dma_addr));
4848 np->tx_ring.ex[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
4850 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4851 pci_push(get_hwbase(dev));
4855 /* check for rx of the packet */
4856 if (!nv_optimized(np)) {
4857 flags = le32_to_cpu(np->rx_ring.orig[0].flaglen);
4858 len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver);
4861 flags = le32_to_cpu(np->rx_ring.ex[0].flaglen);
4862 len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver);
4865 if (flags & NV_RX_AVAIL) {
4867 } else if (np->desc_ver == DESC_VER_1) {
4868 if (flags & NV_RX_ERROR)
4871 if (flags & NV_RX2_ERROR)
4876 if (len != pkt_len) {
4878 dprintk(KERN_DEBUG "%s: loopback len mismatch %d vs %d\n",
4879 dev->name, len, pkt_len);
4881 rx_skb = np->rx_skb[0].skb;
4882 for (i = 0; i < pkt_len; i++) {
4883 if (rx_skb->data[i] != (u8)(i & 0xff)) {
4885 dprintk(KERN_DEBUG "%s: loopback pattern check failed on byte %d\n",
4892 dprintk(KERN_DEBUG "%s: loopback - did not receive test packet\n", dev->name);
4895 pci_unmap_single(np->pci_dev, test_dma_addr,
4896 (skb_end_pointer(tx_skb) - tx_skb->data),
4898 dev_kfree_skb_any(tx_skb);
4903 /* drain rx queue */
4906 if (netif_running(dev)) {
4907 writel(misc1_flags, base + NvRegMisc1);
4908 writel(filter_flags, base + NvRegPacketFilterFlags);
4915 static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 *buffer)
4917 struct fe_priv *np = netdev_priv(dev);
4918 u8 __iomem *base = get_hwbase(dev);
4920 memset(buffer, 0, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(u64));
4922 if (!nv_link_test(dev)) {
4923 test->flags |= ETH_TEST_FL_FAILED;
4927 if (test->flags & ETH_TEST_FL_OFFLINE) {
4928 if (netif_running(dev)) {
4929 netif_stop_queue(dev);
4930 nv_napi_disable(dev);
4931 netif_tx_lock_bh(dev);
4932 netif_addr_lock(dev);
4933 spin_lock_irq(&np->lock);
4934 nv_disable_hw_interrupts(dev, np->irqmask);
4935 if (!(np->msi_flags & NV_MSI_X_ENABLED))
4936 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
4938 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
4942 /* drain rx queue */
4944 spin_unlock_irq(&np->lock);
4945 netif_addr_unlock(dev);
4946 netif_tx_unlock_bh(dev);
4949 if (!nv_register_test(dev)) {
4950 test->flags |= ETH_TEST_FL_FAILED;
4954 result = nv_interrupt_test(dev);
4956 test->flags |= ETH_TEST_FL_FAILED;
4964 if (!nv_loopback_test(dev)) {
4965 test->flags |= ETH_TEST_FL_FAILED;
4969 if (netif_running(dev)) {
4970 /* reinit driver view of the rx queue */
4972 if (nv_init_ring(dev)) {
4973 if (!np->in_shutdown)
4974 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
4976 /* reinit nic view of the rx queue */
4977 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4978 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4979 writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4980 base + NvRegRingSizes);
4982 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4984 /* restart rx engine */
4986 netif_start_queue(dev);
4987 nv_napi_enable(dev);
4988 nv_enable_hw_interrupts(dev, np->irqmask);
4993 static void nv_get_strings(struct net_device *dev, u32 stringset, u8 *buffer)
4995 switch (stringset) {
4997 memcpy(buffer, &nv_estats_str, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(struct nv_ethtool_str));
5000 memcpy(buffer, &nv_etests_str, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(struct nv_ethtool_str));
5005 static const struct ethtool_ops ops = {
5006 .get_drvinfo = nv_get_drvinfo,
5007 .get_link = ethtool_op_get_link,
5008 .get_wol = nv_get_wol,
5009 .set_wol = nv_set_wol,
5010 .get_settings = nv_get_settings,
5011 .set_settings = nv_set_settings,
5012 .get_regs_len = nv_get_regs_len,
5013 .get_regs = nv_get_regs,
5014 .nway_reset = nv_nway_reset,
5015 .set_tso = nv_set_tso,
5016 .get_ringparam = nv_get_ringparam,
5017 .set_ringparam = nv_set_ringparam,
5018 .get_pauseparam = nv_get_pauseparam,
5019 .set_pauseparam = nv_set_pauseparam,
5020 .get_rx_csum = nv_get_rx_csum,
5021 .set_rx_csum = nv_set_rx_csum,
5022 .set_tx_csum = nv_set_tx_csum,
5023 .set_sg = nv_set_sg,
5024 .get_strings = nv_get_strings,
5025 .get_ethtool_stats = nv_get_ethtool_stats,
5026 .get_sset_count = nv_get_sset_count,
5027 .self_test = nv_self_test,
5030 static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
5032 struct fe_priv *np = get_nvpriv(dev);
5034 spin_lock_irq(&np->lock);
5036 /* save vlan group */
5040 /* enable vlan on MAC */
5041 np->txrxctl_bits |= NVREG_TXRXCTL_VLANSTRIP | NVREG_TXRXCTL_VLANINS;
5043 /* disable vlan on MAC */
5044 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANSTRIP;
5045 np->txrxctl_bits &= ~NVREG_TXRXCTL_VLANINS;
5048 writel(np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
5050 spin_unlock_irq(&np->lock);
5053 /* The mgmt unit and driver use a semaphore to access the phy during init */
5054 static int nv_mgmt_acquire_sema(struct net_device *dev)
5056 struct fe_priv *np = netdev_priv(dev);
5057 u8 __iomem *base = get_hwbase(dev);
5059 u32 tx_ctrl, mgmt_sema;
5061 for (i = 0; i < 10; i++) {
5062 mgmt_sema = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_SEMA_MASK;
5063 if (mgmt_sema == NVREG_XMITCTL_MGMT_SEMA_FREE)
5068 if (mgmt_sema != NVREG_XMITCTL_MGMT_SEMA_FREE)
5071 for (i = 0; i < 2; i++) {
5072 tx_ctrl = readl(base + NvRegTransmitterControl);
5073 tx_ctrl |= NVREG_XMITCTL_HOST_SEMA_ACQ;
5074 writel(tx_ctrl, base + NvRegTransmitterControl);
5076 /* verify that semaphore was acquired */
5077 tx_ctrl = readl(base + NvRegTransmitterControl);
5078 if (((tx_ctrl & NVREG_XMITCTL_HOST_SEMA_MASK) == NVREG_XMITCTL_HOST_SEMA_ACQ) &&
5079 ((tx_ctrl & NVREG_XMITCTL_MGMT_SEMA_MASK) == NVREG_XMITCTL_MGMT_SEMA_FREE)) {
5089 static void nv_mgmt_release_sema(struct net_device *dev)
5091 struct fe_priv *np = netdev_priv(dev);
5092 u8 __iomem *base = get_hwbase(dev);
5095 if (np->driver_data & DEV_HAS_MGMT_UNIT) {
5096 if (np->mgmt_sema) {
5097 tx_ctrl = readl(base + NvRegTransmitterControl);
5098 tx_ctrl &= ~NVREG_XMITCTL_HOST_SEMA_ACQ;
5099 writel(tx_ctrl, base + NvRegTransmitterControl);
5105 static int nv_mgmt_get_version(struct net_device *dev)
5107 struct fe_priv *np = netdev_priv(dev);
5108 u8 __iomem *base = get_hwbase(dev);
5109 u32 data_ready = readl(base + NvRegTransmitterControl);
5110 u32 data_ready2 = 0;
5111 unsigned long start;
5114 writel(NVREG_MGMTUNITGETVERSION, base + NvRegMgmtUnitGetVersion);
5115 writel(data_ready ^ NVREG_XMITCTL_DATA_START, base + NvRegTransmitterControl);
5117 while (time_before(jiffies, start + 5*HZ)) {
5118 data_ready2 = readl(base + NvRegTransmitterControl);
5119 if ((data_ready & NVREG_XMITCTL_DATA_READY) != (data_ready2 & NVREG_XMITCTL_DATA_READY)) {
5123 schedule_timeout_uninterruptible(1);
5126 if (!ready || (data_ready2 & NVREG_XMITCTL_DATA_ERROR))
5129 np->mgmt_version = readl(base + NvRegMgmtUnitVersion) & NVREG_MGMTUNITVERSION;
5134 static int nv_open(struct net_device *dev)
5136 struct fe_priv *np = netdev_priv(dev);
5137 u8 __iomem *base = get_hwbase(dev);
5142 dprintk(KERN_DEBUG "nv_open: begin\n");
5145 mii_rw(dev, np->phyaddr, MII_BMCR,
5146 mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ) & ~BMCR_PDOWN);
5148 nv_txrx_gate(dev, false);
5149 /* erase previous misconfiguration */
5150 if (np->driver_data & DEV_HAS_POWER_CNTRL)
5152 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
5153 writel(0, base + NvRegMulticastAddrB);
5154 writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA);
5155 writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB);
5156 writel(0, base + NvRegPacketFilterFlags);
5158 writel(0, base + NvRegTransmitterControl);
5159 writel(0, base + NvRegReceiverControl);
5161 writel(0, base + NvRegAdapterControl);
5163 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)
5164 writel(NVREG_TX_PAUSEFRAME_DISABLE, base + NvRegTxPauseFrame);
5166 /* initialize descriptor rings */
5168 oom = nv_init_ring(dev);
5170 writel(0, base + NvRegLinkSpeed);
5171 writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
5173 writel(0, base + NvRegUnknownSetupReg6);
5175 np->in_shutdown = 0;
5178 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
5179 writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
5180 base + NvRegRingSizes);
5182 writel(np->linkspeed, base + NvRegLinkSpeed);
5183 if (np->desc_ver == DESC_VER_1)
5184 writel(NVREG_TX_WM_DESC1_DEFAULT, base + NvRegTxWatermark);
5186 writel(NVREG_TX_WM_DESC2_3_DEFAULT, base + NvRegTxWatermark);
5187 writel(np->txrxctl_bits, base + NvRegTxRxControl);
5188 writel(np->vlanctl_bits, base + NvRegVlanControl);
5190 writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl);
5191 reg_delay(dev, NvRegUnknownSetupReg5, NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31,
5192 NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX,
5193 KERN_INFO "open: SetupReg5, Bit 31 remained off\n");
5195 writel(0, base + NvRegMIIMask);
5196 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
5197 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
5199 writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1);
5200 writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus);
5201 writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags);
5202 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
5204 writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus);
5206 get_random_bytes(&low, sizeof(low));
5207 low &= NVREG_SLOTTIME_MASK;
5208 if (np->desc_ver == DESC_VER_1) {
5209 writel(low|NVREG_SLOTTIME_DEFAULT, base + NvRegSlotTime);
5211 if (!(np->driver_data & DEV_HAS_GEAR_MODE)) {
5212 /* setup legacy backoff */
5213 writel(NVREG_SLOTTIME_LEGBF_ENABLED|NVREG_SLOTTIME_10_100_FULL|low, base + NvRegSlotTime);
5215 writel(NVREG_SLOTTIME_10_100_FULL, base + NvRegSlotTime);
5216 nv_gear_backoff_reseed(dev);
5219 writel(NVREG_TX_DEFERRAL_DEFAULT, base + NvRegTxDeferral);
5220 writel(NVREG_RX_DEFERRAL_DEFAULT, base + NvRegRxDeferral);
5221 if (poll_interval == -1) {
5222 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT)
5223 writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval);
5225 writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
5227 writel(poll_interval & 0xFFFF, base + NvRegPollingInterval);
5228 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
5229 writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING,
5230 base + NvRegAdapterControl);
5231 writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed);
5232 writel(NVREG_MII_LINKCHANGE, base + NvRegMIIMask);
5234 writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags);
5236 i = readl(base + NvRegPowerState);
5237 if ((i & NVREG_POWERSTATE_POWEREDUP) == 0)
5238 writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState);
5242 writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState);
5244 nv_disable_hw_interrupts(dev, np->irqmask);
5246 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
5247 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
5250 if (nv_request_irq(dev, 0))
5253 /* ask for interrupts */
5254 nv_enable_hw_interrupts(dev, np->irqmask);
5256 spin_lock_irq(&np->lock);
5257 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
5258 writel(0, base + NvRegMulticastAddrB);
5259 writel(NVREG_MCASTMASKA_NONE, base + NvRegMulticastMaskA);
5260 writel(NVREG_MCASTMASKB_NONE, base + NvRegMulticastMaskB);
5261 writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
5262 /* One manual link speed update: Interrupts are enabled, future link
5263 * speed changes cause interrupts and are handled by nv_link_irq().
5267 miistat = readl(base + NvRegMIIStatus);
5268 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
5269 dprintk(KERN_INFO "startup: got 0x%08x.\n", miistat);
5271 /* set linkspeed to invalid value, thus force nv_update_linkspeed
5274 ret = nv_update_linkspeed(dev);
5276 netif_start_queue(dev);
5277 nv_napi_enable(dev);
5280 netif_carrier_on(dev);
5282 printk(KERN_INFO "%s: no link during initialization.\n", dev->name);
5283 netif_carrier_off(dev);
5286 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
5288 /* start statistics timer */
5289 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3))
5290 mod_timer(&np->stats_poll,
5291 round_jiffies(jiffies + STATS_INTERVAL));
5293 spin_unlock_irq(&np->lock);
5301 static int nv_close(struct net_device *dev)
5303 struct fe_priv *np = netdev_priv(dev);
5306 spin_lock_irq(&np->lock);
5307 np->in_shutdown = 1;
5308 spin_unlock_irq(&np->lock);
5309 nv_napi_disable(dev);
5310 synchronize_irq(np->pci_dev->irq);
5312 del_timer_sync(&np->oom_kick);
5313 del_timer_sync(&np->nic_poll);
5314 del_timer_sync(&np->stats_poll);
5316 netif_stop_queue(dev);
5317 spin_lock_irq(&np->lock);
5321 /* disable interrupts on the nic or we will lock up */
5322 base = get_hwbase(dev);
5323 nv_disable_hw_interrupts(dev, np->irqmask);
5325 dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name);
5327 spin_unlock_irq(&np->lock);
5333 if (np->wolenabled || !phy_power_down) {
5334 nv_txrx_gate(dev, false);
5335 writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
5338 /* power down phy */
5339 mii_rw(dev, np->phyaddr, MII_BMCR,
5340 mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ)|BMCR_PDOWN);
5341 nv_txrx_gate(dev, true);
5344 /* FIXME: power down nic */
5349 static const struct net_device_ops nv_netdev_ops = {
5350 .ndo_open = nv_open,
5351 .ndo_stop = nv_close,
5352 .ndo_get_stats = nv_get_stats,
5353 .ndo_start_xmit = nv_start_xmit,
5354 .ndo_tx_timeout = nv_tx_timeout,
5355 .ndo_change_mtu = nv_change_mtu,
5356 .ndo_validate_addr = eth_validate_addr,
5357 .ndo_set_mac_address = nv_set_mac_address,
5358 .ndo_set_multicast_list = nv_set_multicast,
5359 .ndo_vlan_rx_register = nv_vlan_rx_register,
5360 #ifdef CONFIG_NET_POLL_CONTROLLER
5361 .ndo_poll_controller = nv_poll_controller,
5365 static const struct net_device_ops nv_netdev_ops_optimized = {
5366 .ndo_open = nv_open,
5367 .ndo_stop = nv_close,
5368 .ndo_get_stats = nv_get_stats,
5369 .ndo_start_xmit = nv_start_xmit_optimized,
5370 .ndo_tx_timeout = nv_tx_timeout,
5371 .ndo_change_mtu = nv_change_mtu,
5372 .ndo_validate_addr = eth_validate_addr,
5373 .ndo_set_mac_address = nv_set_mac_address,
5374 .ndo_set_multicast_list = nv_set_multicast,
5375 .ndo_vlan_rx_register = nv_vlan_rx_register,
5376 #ifdef CONFIG_NET_POLL_CONTROLLER
5377 .ndo_poll_controller = nv_poll_controller,
5381 static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
5383 struct net_device *dev;
5388 u32 powerstate, txreg;
5389 u32 phystate_orig = 0, phystate;
5390 int phyinitialized = 0;
5391 static int printed_version;
5393 if (!printed_version++)
5394 printk(KERN_INFO "%s: Reverse Engineered nForce ethernet"
5395 " driver. Version %s.\n", DRV_NAME, FORCEDETH_VERSION);
5397 dev = alloc_etherdev(sizeof(struct fe_priv));
5402 np = netdev_priv(dev);
5404 np->pci_dev = pci_dev;
5405 spin_lock_init(&np->lock);
5406 SET_NETDEV_DEV(dev, &pci_dev->dev);
5408 init_timer(&np->oom_kick);
5409 np->oom_kick.data = (unsigned long) dev;
5410 np->oom_kick.function = nv_do_rx_refill; /* timer handler */
5411 init_timer(&np->nic_poll);
5412 np->nic_poll.data = (unsigned long) dev;
5413 np->nic_poll.function = nv_do_nic_poll; /* timer handler */
5414 init_timer(&np->stats_poll);
5415 np->stats_poll.data = (unsigned long) dev;
5416 np->stats_poll.function = nv_do_stats_poll; /* timer handler */
5418 err = pci_enable_device(pci_dev);
5422 pci_set_master(pci_dev);
5424 err = pci_request_regions(pci_dev, DRV_NAME);
5428 if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3))
5429 np->register_size = NV_PCI_REGSZ_VER3;
5430 else if (id->driver_data & DEV_HAS_STATISTICS_V1)
5431 np->register_size = NV_PCI_REGSZ_VER2;
5433 np->register_size = NV_PCI_REGSZ_VER1;
5437 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
5438 dprintk(KERN_DEBUG "%s: resource %d start %p len %ld flags 0x%08lx.\n",
5439 pci_name(pci_dev), i, (void *)pci_resource_start(pci_dev, i),
5440 pci_resource_len(pci_dev, i),
5441 pci_resource_flags(pci_dev, i));
5442 if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM &&
5443 pci_resource_len(pci_dev, i) >= np->register_size) {
5444 addr = pci_resource_start(pci_dev, i);
5448 if (i == DEVICE_COUNT_RESOURCE) {
5449 dev_printk(KERN_INFO, &pci_dev->dev,
5450 "Couldn't find register window\n");
5454 /* copy of driver data */
5455 np->driver_data = id->driver_data;
5456 /* copy of device id */
5457 np->device_id = id->device;
5459 /* handle different descriptor versions */
5460 if (id->driver_data & DEV_HAS_HIGH_DMA) {
5461 /* packet format 3: supports 40-bit addressing */
5462 np->desc_ver = DESC_VER_3;
5463 np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
5465 if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(39)))
5466 dev_printk(KERN_INFO, &pci_dev->dev,
5467 "64-bit DMA failed, using 32-bit addressing\n");
5469 dev->features |= NETIF_F_HIGHDMA;
5470 if (pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(39))) {
5471 dev_printk(KERN_INFO, &pci_dev->dev,
5472 "64-bit DMA (consistent) failed, using 32-bit ring buffers\n");
5475 } else if (id->driver_data & DEV_HAS_LARGEDESC) {
5476 /* packet format 2: supports jumbo frames */
5477 np->desc_ver = DESC_VER_2;
5478 np->txrxctl_bits = NVREG_TXRXCTL_DESC_2;
5480 /* original packet format */
5481 np->desc_ver = DESC_VER_1;
5482 np->txrxctl_bits = NVREG_TXRXCTL_DESC_1;
5485 np->pkt_limit = NV_PKTLIMIT_1;
5486 if (id->driver_data & DEV_HAS_LARGEDESC)
5487 np->pkt_limit = NV_PKTLIMIT_2;
5489 if (id->driver_data & DEV_HAS_CHECKSUM) {
5491 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
5492 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
5493 dev->features |= NETIF_F_TSO;
5494 dev->features |= NETIF_F_GRO;
5497 np->vlanctl_bits = 0;
5498 if (id->driver_data & DEV_HAS_VLAN) {
5499 np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE;
5500 dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX;
5503 np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG;
5504 if ((id->driver_data & DEV_HAS_PAUSEFRAME_TX_V1) ||
5505 (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) ||
5506 (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V3)) {
5507 np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ;
5512 np->base = ioremap(addr, np->register_size);
5515 dev->base_addr = (unsigned long)np->base;
5517 dev->irq = pci_dev->irq;
5519 np->rx_ring_size = RX_RING_DEFAULT;
5520 np->tx_ring_size = TX_RING_DEFAULT;
5522 if (!nv_optimized(np)) {
5523 np->rx_ring.orig = pci_alloc_consistent(pci_dev,
5524 sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
5526 if (!np->rx_ring.orig)
5528 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
5530 np->rx_ring.ex = pci_alloc_consistent(pci_dev,
5531 sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
5533 if (!np->rx_ring.ex)
5535 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
5537 np->rx_skb = kcalloc(np->rx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL);
5538 np->tx_skb = kcalloc(np->tx_ring_size, sizeof(struct nv_skb_map), GFP_KERNEL);
5539 if (!np->rx_skb || !np->tx_skb)
5542 if (!nv_optimized(np))
5543 dev->netdev_ops = &nv_netdev_ops;
5545 dev->netdev_ops = &nv_netdev_ops_optimized;
5547 netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP);
5548 SET_ETHTOOL_OPS(dev, &ops);
5549 dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
5551 pci_set_drvdata(pci_dev, dev);
5553 /* read the mac address */
5554 base = get_hwbase(dev);
5555 np->orig_mac[0] = readl(base + NvRegMacAddrA);
5556 np->orig_mac[1] = readl(base + NvRegMacAddrB);
5558 /* check the workaround bit for correct mac address order */
5559 txreg = readl(base + NvRegTransmitPoll);
5560 if (id->driver_data & DEV_HAS_CORRECT_MACADDR) {
5561 /* mac address is already in correct order */
5562 dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff;
5563 dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff;
5564 dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff;
5565 dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff;
5566 dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff;
5567 dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff;
5568 } else if (txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) {
5569 /* mac address is already in correct order */
5570 dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff;
5571 dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff;
5572 dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff;
5573 dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff;
5574 dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff;
5575 dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff;
5577 * Set orig mac address back to the reversed version.
5578 * This flag will be cleared during low power transition.
5579 * Therefore, we should always put back the reversed address.
5581 np->orig_mac[0] = (dev->dev_addr[5] << 0) + (dev->dev_addr[4] << 8) +
5582 (dev->dev_addr[3] << 16) + (dev->dev_addr[2] << 24);
5583 np->orig_mac[1] = (dev->dev_addr[1] << 0) + (dev->dev_addr[0] << 8);
5585 /* need to reverse mac address to correct order */
5586 dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff;
5587 dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff;
5588 dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff;
5589 dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff;
5590 dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff;
5591 dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff;
5592 writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
5593 printk(KERN_DEBUG "nv_probe: set workaround bit for reversed mac addr\n");
5595 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
5597 if (!is_valid_ether_addr(dev->perm_addr)) {
5599 * Bad mac address. At least one bios sets the mac address
5600 * to 01:23:45:67:89:ab
5602 dev_printk(KERN_ERR, &pci_dev->dev,
5603 "Invalid Mac address detected: %pM\n",
5605 dev_printk(KERN_ERR, &pci_dev->dev,
5606 "Please complain to your hardware vendor. Switching to a random MAC.\n");
5607 random_ether_addr(dev->dev_addr);
5610 dprintk(KERN_DEBUG "%s: MAC Address %pM\n",
5611 pci_name(pci_dev), dev->dev_addr);
5613 /* set mac address */
5614 nv_copy_mac_to_hw(dev);
5616 /* Workaround current PCI init glitch: wakeup bits aren't
5617 * being set from PCI PM capability.
5619 device_init_wakeup(&pci_dev->dev, 1);
5622 writel(0, base + NvRegWakeUpFlags);
5625 if (id->driver_data & DEV_HAS_POWER_CNTRL) {
5627 /* take phy and nic out of low power mode */
5628 powerstate = readl(base + NvRegPowerState2);
5629 powerstate &= ~NVREG_POWERSTATE2_POWERUP_MASK;
5630 if ((id->driver_data & DEV_NEED_LOW_POWER_FIX) &&
5631 pci_dev->revision >= 0xA3)
5632 powerstate |= NVREG_POWERSTATE2_POWERUP_REV_A3;
5633 writel(powerstate, base + NvRegPowerState2);
5636 if (np->desc_ver == DESC_VER_1)
5637 np->tx_flags = NV_TX_VALID;
5639 np->tx_flags = NV_TX2_VALID;
5642 if ((id->driver_data & DEV_HAS_MSI) && msi)
5643 np->msi_flags |= NV_MSI_CAPABLE;
5645 if ((id->driver_data & DEV_HAS_MSI_X) && msix) {
5646 /* msix has had reported issues when modifying irqmask
5647 as in the case of napi, therefore, disable for now
5650 np->msi_flags |= NV_MSI_X_CAPABLE;
5654 if (optimization_mode == NV_OPTIMIZATION_MODE_CPU) {
5655 np->irqmask = NVREG_IRQMASK_CPU;
5656 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
5657 np->msi_flags |= 0x0001;
5658 } else if (optimization_mode == NV_OPTIMIZATION_MODE_DYNAMIC &&
5659 !(id->driver_data & DEV_NEED_TIMERIRQ)) {
5660 /* start off in throughput mode */
5661 np->irqmask = NVREG_IRQMASK_THROUGHPUT;
5662 /* remove support for msix mode */
5663 np->msi_flags &= ~NV_MSI_X_CAPABLE;
5665 optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT;
5666 np->irqmask = NVREG_IRQMASK_THROUGHPUT;
5667 if (np->msi_flags & NV_MSI_X_CAPABLE) /* set number of vectors */
5668 np->msi_flags |= 0x0003;
5671 if (id->driver_data & DEV_NEED_TIMERIRQ)
5672 np->irqmask |= NVREG_IRQ_TIMER;
5673 if (id->driver_data & DEV_NEED_LINKTIMER) {
5674 dprintk(KERN_INFO "%s: link timer on.\n", pci_name(pci_dev));
5675 np->need_linktimer = 1;
5676 np->link_timeout = jiffies + LINK_TIMEOUT;
5678 dprintk(KERN_INFO "%s: link timer off.\n", pci_name(pci_dev));
5679 np->need_linktimer = 0;
5682 /* Limit the number of tx's outstanding for hw bug */
5683 if (id->driver_data & DEV_NEED_TX_LIMIT) {
5685 if (((id->driver_data & DEV_NEED_TX_LIMIT2) == DEV_NEED_TX_LIMIT2) &&
5686 pci_dev->revision >= 0xA2)
5690 /* clear phy state and temporarily halt phy interrupts */
5691 writel(0, base + NvRegMIIMask);
5692 phystate = readl(base + NvRegAdapterControl);
5693 if (phystate & NVREG_ADAPTCTL_RUNNING) {
5695 phystate &= ~NVREG_ADAPTCTL_RUNNING;
5696 writel(phystate, base + NvRegAdapterControl);
5698 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
5700 if (id->driver_data & DEV_HAS_MGMT_UNIT) {
5701 /* management unit running on the mac? */
5702 if ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST) &&
5703 (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_PHY_INIT) &&
5704 nv_mgmt_acquire_sema(dev) &&
5705 nv_mgmt_get_version(dev)) {
5707 if (np->mgmt_version > 0)
5708 np->mac_in_use = readl(base + NvRegMgmtUnitControl) & NVREG_MGMTUNITCONTROL_INUSE;
5709 dprintk(KERN_INFO "%s: mgmt unit is running. mac in use %x.\n",
5710 pci_name(pci_dev), np->mac_in_use);
5711 /* management unit setup the phy already? */
5712 if (np->mac_in_use &&
5713 ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) ==
5714 NVREG_XMITCTL_SYNC_PHY_INIT)) {
5715 /* phy is inited by mgmt unit */
5717 dprintk(KERN_INFO "%s: Phy already initialized by mgmt unit.\n",
5720 /* we need to init the phy */
5725 /* find a suitable phy */
5726 for (i = 1; i <= 32; i++) {
5728 int phyaddr = i & 0x1F;
5730 spin_lock_irq(&np->lock);
5731 id1 = mii_rw(dev, phyaddr, MII_PHYSID1, MII_READ);
5732 spin_unlock_irq(&np->lock);
5733 if (id1 < 0 || id1 == 0xffff)
5735 spin_lock_irq(&np->lock);
5736 id2 = mii_rw(dev, phyaddr, MII_PHYSID2, MII_READ);
5737 spin_unlock_irq(&np->lock);
5738 if (id2 < 0 || id2 == 0xffff)
5741 np->phy_model = id2 & PHYID2_MODEL_MASK;
5742 id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT;
5743 id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT;
5744 dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address %d.\n",
5745 pci_name(pci_dev), id1, id2, phyaddr);
5746 np->phyaddr = phyaddr;
5747 np->phy_oui = id1 | id2;
5749 /* Realtek hardcoded phy id1 to all zero's on certain phys */
5750 if (np->phy_oui == PHY_OUI_REALTEK2)
5751 np->phy_oui = PHY_OUI_REALTEK;
5752 /* Setup phy revision for Realtek */
5753 if (np->phy_oui == PHY_OUI_REALTEK && np->phy_model == PHY_MODEL_REALTEK_8211)
5754 np->phy_rev = mii_rw(dev, phyaddr, MII_RESV1, MII_READ) & PHY_REV_MASK;
5759 dev_printk(KERN_INFO, &pci_dev->dev,
5760 "open: Could not find a valid PHY.\n");
5764 if (!phyinitialized) {
5768 /* see if it is a gigabit phy */
5769 u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
5770 if (mii_status & PHY_GIGABIT)
5771 np->gigabit = PHY_GIGABIT;
5774 /* set default link speed settings */
5775 np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
5779 err = register_netdev(dev);
5781 dev_printk(KERN_INFO, &pci_dev->dev,
5782 "unable to register netdev: %d\n", err);
5786 dev_printk(KERN_INFO, &pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, "
5787 "addr %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
5798 dev_printk(KERN_INFO, &pci_dev->dev, "%s%s%s%s%s%s%s%s%s%sdesc-v%u\n",
5799 dev->features & NETIF_F_HIGHDMA ? "highdma " : "",
5800 dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ?
5802 dev->features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ?
5804 id->driver_data & DEV_HAS_POWER_CNTRL ? "pwrctl " : "",
5805 id->driver_data & DEV_HAS_MGMT_UNIT ? "mgmt " : "",
5806 id->driver_data & DEV_NEED_TIMERIRQ ? "timirq " : "",
5807 np->gigabit == PHY_GIGABIT ? "gbit " : "",
5808 np->need_linktimer ? "lnktim " : "",
5809 np->msi_flags & NV_MSI_CAPABLE ? "msi " : "",
5810 np->msi_flags & NV_MSI_X_CAPABLE ? "msi-x " : "",
5817 writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl);
5818 pci_set_drvdata(pci_dev, NULL);
5822 iounmap(get_hwbase(dev));
5824 pci_release_regions(pci_dev);
5826 pci_disable_device(pci_dev);
5833 static void nv_restore_phy(struct net_device *dev)
5835 struct fe_priv *np = netdev_priv(dev);
5836 u16 phy_reserved, mii_control;
5838 if (np->phy_oui == PHY_OUI_REALTEK &&
5839 np->phy_model == PHY_MODEL_REALTEK_8201 &&
5840 phy_cross == NV_CROSSOVER_DETECTION_DISABLED) {
5841 mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3);
5842 phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, MII_READ);
5843 phy_reserved &= ~PHY_REALTEK_INIT_MSK1;
5844 phy_reserved |= PHY_REALTEK_INIT8;
5845 mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, phy_reserved);
5846 mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1);
5848 /* restart auto negotiation */
5849 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
5850 mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
5851 mii_rw(dev, np->phyaddr, MII_BMCR, mii_control);
5855 static void nv_restore_mac_addr(struct pci_dev *pci_dev)
5857 struct net_device *dev = pci_get_drvdata(pci_dev);
5858 struct fe_priv *np = netdev_priv(dev);
5859 u8 __iomem *base = get_hwbase(dev);
5861 /* special op: write back the misordered MAC address - otherwise
5862 * the next nv_probe would see a wrong address.
5864 writel(np->orig_mac[0], base + NvRegMacAddrA);
5865 writel(np->orig_mac[1], base + NvRegMacAddrB);
5866 writel(readl(base + NvRegTransmitPoll) & ~NVREG_TRANSMITPOLL_MAC_ADDR_REV,
5867 base + NvRegTransmitPoll);
5870 static void __devexit nv_remove(struct pci_dev *pci_dev)
5872 struct net_device *dev = pci_get_drvdata(pci_dev);
5874 unregister_netdev(dev);
5876 nv_restore_mac_addr(pci_dev);
5878 /* restore any phy related changes */
5879 nv_restore_phy(dev);
5881 nv_mgmt_release_sema(dev);
5883 /* free all structures */
5885 iounmap(get_hwbase(dev));
5886 pci_release_regions(pci_dev);
5887 pci_disable_device(pci_dev);
5889 pci_set_drvdata(pci_dev, NULL);
5893 static int nv_suspend(struct pci_dev *pdev, pm_message_t state)
5895 struct net_device *dev = pci_get_drvdata(pdev);
5896 struct fe_priv *np = netdev_priv(dev);
5897 u8 __iomem *base = get_hwbase(dev);
5900 if (netif_running(dev)) {
5904 netif_device_detach(dev);
5906 /* save non-pci configuration space */
5907 for (i = 0; i <= np->register_size/sizeof(u32); i++)
5908 np->saved_config_space[i] = readl(base + i*sizeof(u32));
5910 pci_save_state(pdev);
5911 pci_enable_wake(pdev, pci_choose_state(pdev, state), np->wolenabled);
5912 pci_disable_device(pdev);
5913 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5917 static int nv_resume(struct pci_dev *pdev)
5919 struct net_device *dev = pci_get_drvdata(pdev);
5920 struct fe_priv *np = netdev_priv(dev);
5921 u8 __iomem *base = get_hwbase(dev);
5924 pci_set_power_state(pdev, PCI_D0);
5925 pci_restore_state(pdev);
5926 /* ack any pending wake events, disable PME */
5927 pci_enable_wake(pdev, PCI_D0, 0);
5929 /* restore non-pci configuration space */
5930 for (i = 0; i <= np->register_size/sizeof(u32); i++)
5931 writel(np->saved_config_space[i], base+i*sizeof(u32));
5933 if (np->driver_data & DEV_NEED_MSI_FIX)
5934 pci_write_config_dword(pdev, NV_MSI_PRIV_OFFSET, NV_MSI_PRIV_VALUE);
5936 /* restore phy state, including autoneg */
5939 netif_device_attach(dev);
5940 if (netif_running(dev)) {
5942 nv_set_multicast(dev);
5947 static void nv_shutdown(struct pci_dev *pdev)
5949 struct net_device *dev = pci_get_drvdata(pdev);
5950 struct fe_priv *np = netdev_priv(dev);
5952 if (netif_running(dev))
5956 * Restore the MAC so a kernel started by kexec won't get confused.
5957 * If we really go for poweroff, we must not restore the MAC,
5958 * otherwise the MAC for WOL will be reversed at least on some boards.
5960 if (system_state != SYSTEM_POWER_OFF)
5961 nv_restore_mac_addr(pdev);
5963 pci_disable_device(pdev);
5965 * Apparently it is not possible to reinitialise from D3 hot,
5966 * only put the device into D3 if we really go for poweroff.
5968 if (system_state == SYSTEM_POWER_OFF) {
5969 if (pci_enable_wake(pdev, PCI_D3cold, np->wolenabled))
5970 pci_enable_wake(pdev, PCI_D3hot, np->wolenabled);
5971 pci_set_power_state(pdev, PCI_D3hot);
5975 #define nv_suspend NULL
5976 #define nv_shutdown NULL
5977 #define nv_resume NULL
5978 #endif /* CONFIG_PM */
5980 static DEFINE_PCI_DEVICE_TABLE(pci_tbl) = {
5981 { /* nForce Ethernet Controller */
5982 PCI_DEVICE(0x10DE, 0x01C3),
5983 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
5985 { /* nForce2 Ethernet Controller */
5986 PCI_DEVICE(0x10DE, 0x0066),
5987 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
5989 { /* nForce3 Ethernet Controller */
5990 PCI_DEVICE(0x10DE, 0x00D6),
5991 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
5993 { /* nForce3 Ethernet Controller */
5994 PCI_DEVICE(0x10DE, 0x0086),
5995 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
5997 { /* nForce3 Ethernet Controller */
5998 PCI_DEVICE(0x10DE, 0x008C),
5999 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
6001 { /* nForce3 Ethernet Controller */
6002 PCI_DEVICE(0x10DE, 0x00E6),
6003 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
6005 { /* nForce3 Ethernet Controller */
6006 PCI_DEVICE(0x10DE, 0x00DF),
6007 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM,
6009 { /* CK804 Ethernet Controller */
6010 PCI_DEVICE(0x10DE, 0x0056),
6011 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
6013 { /* CK804 Ethernet Controller */
6014 PCI_DEVICE(0x10DE, 0x0057),
6015 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
6017 { /* MCP04 Ethernet Controller */
6018 PCI_DEVICE(0x10DE, 0x0037),
6019 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
6021 { /* MCP04 Ethernet Controller */
6022 PCI_DEVICE(0x10DE, 0x0038),
6023 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_STATISTICS_V1|DEV_NEED_TX_LIMIT,
6025 { /* MCP51 Ethernet Controller */
6026 PCI_DEVICE(0x10DE, 0x0268),
6027 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1|DEV_NEED_LOW_POWER_FIX,
6029 { /* MCP51 Ethernet Controller */
6030 PCI_DEVICE(0x10DE, 0x0269),
6031 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_STATISTICS_V1|DEV_NEED_LOW_POWER_FIX,
6033 { /* MCP55 Ethernet Controller */
6034 PCI_DEVICE(0x10DE, 0x0372),
6035 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX,
6037 { /* MCP55 Ethernet Controller */
6038 PCI_DEVICE(0x10DE, 0x0373),
6039 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_NEED_TX_LIMIT|DEV_NEED_MSI_FIX,
6041 { /* MCP61 Ethernet Controller */
6042 PCI_DEVICE(0x10DE, 0x03E5),
6043 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
6045 { /* MCP61 Ethernet Controller */
6046 PCI_DEVICE(0x10DE, 0x03E6),
6047 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
6049 { /* MCP61 Ethernet Controller */
6050 PCI_DEVICE(0x10DE, 0x03EE),
6051 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
6053 { /* MCP61 Ethernet Controller */
6054 PCI_DEVICE(0x10DE, 0x03EF),
6055 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_MSI_FIX,
6057 { /* MCP65 Ethernet Controller */
6058 PCI_DEVICE(0x10DE, 0x0450),
6059 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6061 { /* MCP65 Ethernet Controller */
6062 PCI_DEVICE(0x10DE, 0x0451),
6063 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6065 { /* MCP65 Ethernet Controller */
6066 PCI_DEVICE(0x10DE, 0x0452),
6067 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6069 { /* MCP65 Ethernet Controller */
6070 PCI_DEVICE(0x10DE, 0x0453),
6071 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6073 { /* MCP67 Ethernet Controller */
6074 PCI_DEVICE(0x10DE, 0x054C),
6075 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6077 { /* MCP67 Ethernet Controller */
6078 PCI_DEVICE(0x10DE, 0x054D),
6079 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6081 { /* MCP67 Ethernet Controller */
6082 PCI_DEVICE(0x10DE, 0x054E),
6083 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6085 { /* MCP67 Ethernet Controller */
6086 PCI_DEVICE(0x10DE, 0x054F),
6087 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6089 { /* MCP73 Ethernet Controller */
6090 PCI_DEVICE(0x10DE, 0x07DC),
6091 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6093 { /* MCP73 Ethernet Controller */
6094 PCI_DEVICE(0x10DE, 0x07DD),
6095 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6097 { /* MCP73 Ethernet Controller */
6098 PCI_DEVICE(0x10DE, 0x07DE),
6099 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6101 { /* MCP73 Ethernet Controller */
6102 PCI_DEVICE(0x10DE, 0x07DF),
6103 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V12|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_MSI_FIX,
6105 { /* MCP77 Ethernet Controller */
6106 PCI_DEVICE(0x10DE, 0x0760),
6107 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6109 { /* MCP77 Ethernet Controller */
6110 PCI_DEVICE(0x10DE, 0x0761),
6111 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6113 { /* MCP77 Ethernet Controller */
6114 PCI_DEVICE(0x10DE, 0x0762),
6115 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6117 { /* MCP77 Ethernet Controller */
6118 PCI_DEVICE(0x10DE, 0x0763),
6119 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6121 { /* MCP79 Ethernet Controller */
6122 PCI_DEVICE(0x10DE, 0x0AB0),
6123 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6125 { /* MCP79 Ethernet Controller */
6126 PCI_DEVICE(0x10DE, 0x0AB1),
6127 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6129 { /* MCP79 Ethernet Controller */
6130 PCI_DEVICE(0x10DE, 0x0AB2),
6131 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6133 { /* MCP79 Ethernet Controller */
6134 PCI_DEVICE(0x10DE, 0x0AB3),
6135 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT2|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX|DEV_NEED_MSI_FIX,
6137 { /* MCP89 Ethernet Controller */
6138 PCI_DEVICE(0x10DE, 0x0D7D),
6139 .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V123|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_HAS_GEAR_MODE|DEV_NEED_PHY_INIT_FIX,
6144 static struct pci_driver driver = {
6146 .id_table = pci_tbl,
6148 .remove = __devexit_p(nv_remove),
6149 .suspend = nv_suspend,
6150 .resume = nv_resume,
6151 .shutdown = nv_shutdown,
6154 static int __init init_nic(void)
6156 return pci_register_driver(&driver);
6159 static void __exit exit_nic(void)
6161 pci_unregister_driver(&driver);
6164 module_param(max_interrupt_work, int, 0);
6165 MODULE_PARM_DESC(max_interrupt_work, "forcedeth maximum events handled per interrupt");
6166 module_param(optimization_mode, int, 0);
6167 MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer. In dynamic mode (2), the mode toggles between throughput and CPU mode based on network load.");
6168 module_param(poll_interval, int, 0);
6169 MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535.");
6170 module_param(msi, int, 0);
6171 MODULE_PARM_DESC(msi, "MSI interrupts are enabled by setting to 1 and disabled by setting to 0.");
6172 module_param(msix, int, 0);
6173 MODULE_PARM_DESC(msix, "MSIX interrupts are enabled by setting to 1 and disabled by setting to 0.");
6174 module_param(dma_64bit, int, 0);
6175 MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0.");
6176 module_param(phy_cross, int, 0);
6177 MODULE_PARM_DESC(phy_cross, "Phy crossover detection for Realtek 8201 phy is enabled by setting to 1 and disabled by setting to 0.");
6178 module_param(phy_power_down, int, 0);
6179 MODULE_PARM_DESC(phy_power_down, "Power down phy and disable link when interface is down (1), or leave phy powered up (0).");
6181 MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
6182 MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
6183 MODULE_LICENSE("GPL");
6185 MODULE_DEVICE_TABLE(pci, pci_tbl);
6187 module_init(init_nic);
6188 module_exit(exit_nic);