1 /*******************************************************************************
3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2012 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *******************************************************************************/
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 #include <linux/module.h>
32 #include <linux/types.h>
33 #include <linux/init.h>
34 #include <linux/pci.h>
35 #include <linux/vmalloc.h>
36 #include <linux/pagemap.h>
37 #include <linux/delay.h>
38 #include <linux/netdevice.h>
39 #include <linux/interrupt.h>
40 #include <linux/tcp.h>
41 #include <linux/ipv6.h>
42 #include <linux/slab.h>
43 #include <net/checksum.h>
44 #include <net/ip6_checksum.h>
45 #include <linux/mii.h>
46 #include <linux/ethtool.h>
47 #include <linux/if_vlan.h>
48 #include <linux/cpu.h>
49 #include <linux/smp.h>
50 #include <linux/pm_qos.h>
51 #include <linux/pm_runtime.h>
52 #include <linux/aer.h>
53 #include <linux/prefetch.h>
57 #define DRV_EXTRAVERSION "-k"
59 #define DRV_VERSION "2.1.4" DRV_EXTRAVERSION
60 char e1000e_driver_name[] = "e1000e";
61 const char e1000e_driver_version[] = DRV_VERSION;
63 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
64 static int debug = -1;
65 module_param(debug, int, 0);
66 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
68 static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state);
70 static const struct e1000_info *e1000_info_tbl[] = {
71 [board_82571] = &e1000_82571_info,
72 [board_82572] = &e1000_82572_info,
73 [board_82573] = &e1000_82573_info,
74 [board_82574] = &e1000_82574_info,
75 [board_82583] = &e1000_82583_info,
76 [board_80003es2lan] = &e1000_es2_info,
77 [board_ich8lan] = &e1000_ich8_info,
78 [board_ich9lan] = &e1000_ich9_info,
79 [board_ich10lan] = &e1000_ich10_info,
80 [board_pchlan] = &e1000_pch_info,
81 [board_pch2lan] = &e1000_pch2_info,
82 [board_pch_lpt] = &e1000_pch_lpt_info,
85 struct e1000_reg_info {
90 #define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */
91 #define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */
92 #define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */
93 #define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */
94 #define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */
96 #define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */
97 #define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */
98 #define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */
99 #define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */
100 #define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */
102 static const struct e1000_reg_info e1000_reg_info_tbl[] = {
104 /* General Registers */
105 {E1000_CTRL, "CTRL"},
106 {E1000_STATUS, "STATUS"},
107 {E1000_CTRL_EXT, "CTRL_EXT"},
109 /* Interrupt Registers */
113 {E1000_RCTL, "RCTL"},
114 {E1000_RDLEN(0), "RDLEN"},
115 {E1000_RDH(0), "RDH"},
116 {E1000_RDT(0), "RDT"},
117 {E1000_RDTR, "RDTR"},
118 {E1000_RXDCTL(0), "RXDCTL"},
120 {E1000_RDBAL(0), "RDBAL"},
121 {E1000_RDBAH(0), "RDBAH"},
122 {E1000_RDFH, "RDFH"},
123 {E1000_RDFT, "RDFT"},
124 {E1000_RDFHS, "RDFHS"},
125 {E1000_RDFTS, "RDFTS"},
126 {E1000_RDFPC, "RDFPC"},
129 {E1000_TCTL, "TCTL"},
130 {E1000_TDBAL(0), "TDBAL"},
131 {E1000_TDBAH(0), "TDBAH"},
132 {E1000_TDLEN(0), "TDLEN"},
133 {E1000_TDH(0), "TDH"},
134 {E1000_TDT(0), "TDT"},
135 {E1000_TIDV, "TIDV"},
136 {E1000_TXDCTL(0), "TXDCTL"},
137 {E1000_TADV, "TADV"},
138 {E1000_TARC(0), "TARC"},
139 {E1000_TDFH, "TDFH"},
140 {E1000_TDFT, "TDFT"},
141 {E1000_TDFHS, "TDFHS"},
142 {E1000_TDFTS, "TDFTS"},
143 {E1000_TDFPC, "TDFPC"},
145 /* List Terminator */
150 * e1000_regdump - register printout routine
151 * @hw: pointer to the HW structure
152 * @reginfo: pointer to the register info table
154 static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo)
160 switch (reginfo->ofs) {
161 case E1000_RXDCTL(0):
162 for (n = 0; n < 2; n++)
163 regs[n] = __er32(hw, E1000_RXDCTL(n));
165 case E1000_TXDCTL(0):
166 for (n = 0; n < 2; n++)
167 regs[n] = __er32(hw, E1000_TXDCTL(n));
170 for (n = 0; n < 2; n++)
171 regs[n] = __er32(hw, E1000_TARC(n));
174 pr_info("%-15s %08x\n",
175 reginfo->name, __er32(hw, reginfo->ofs));
179 snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]");
180 pr_info("%-15s %08x %08x\n", rname, regs[0], regs[1]);
183 static void e1000e_dump_ps_pages(struct e1000_adapter *adapter,
184 struct e1000_buffer *bi)
187 struct e1000_ps_page *ps_page;
189 for (i = 0; i < adapter->rx_ps_pages; i++) {
190 ps_page = &bi->ps_pages[i];
193 pr_info("packet dump for ps_page %d:\n", i);
194 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
195 16, 1, page_address(ps_page->page),
202 * e1000e_dump - Print registers, Tx-ring and Rx-ring
203 * @adapter: board private structure
205 static void e1000e_dump(struct e1000_adapter *adapter)
207 struct net_device *netdev = adapter->netdev;
208 struct e1000_hw *hw = &adapter->hw;
209 struct e1000_reg_info *reginfo;
210 struct e1000_ring *tx_ring = adapter->tx_ring;
211 struct e1000_tx_desc *tx_desc;
216 struct e1000_buffer *buffer_info;
217 struct e1000_ring *rx_ring = adapter->rx_ring;
218 union e1000_rx_desc_packet_split *rx_desc_ps;
219 union e1000_rx_desc_extended *rx_desc;
229 if (!netif_msg_hw(adapter))
232 /* Print netdevice Info */
234 dev_info(&adapter->pdev->dev, "Net device Info\n");
235 pr_info("Device Name state trans_start last_rx\n");
236 pr_info("%-15s %016lX %016lX %016lX\n",
237 netdev->name, netdev->state, netdev->trans_start,
241 /* Print Registers */
242 dev_info(&adapter->pdev->dev, "Register Dump\n");
243 pr_info(" Register Name Value\n");
244 for (reginfo = (struct e1000_reg_info *)e1000_reg_info_tbl;
245 reginfo->name; reginfo++) {
246 e1000_regdump(hw, reginfo);
249 /* Print Tx Ring Summary */
250 if (!netdev || !netif_running(netdev))
253 dev_info(&adapter->pdev->dev, "Tx Ring Summary\n");
254 pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
255 buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
256 pr_info(" %5d %5X %5X %016llX %04X %3X %016llX\n",
257 0, tx_ring->next_to_use, tx_ring->next_to_clean,
258 (unsigned long long)buffer_info->dma,
260 buffer_info->next_to_watch,
261 (unsigned long long)buffer_info->time_stamp);
264 if (!netif_msg_tx_done(adapter))
265 goto rx_ring_summary;
267 dev_info(&adapter->pdev->dev, "Tx Ring Dump\n");
269 /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
271 * Legacy Transmit Descriptor
272 * +--------------------------------------------------------------+
273 * 0 | Buffer Address [63:0] (Reserved on Write Back) |
274 * +--------------------------------------------------------------+
275 * 8 | Special | CSS | Status | CMD | CSO | Length |
276 * +--------------------------------------------------------------+
277 * 63 48 47 36 35 32 31 24 23 16 15 0
279 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
280 * 63 48 47 40 39 32 31 16 15 8 7 0
281 * +----------------------------------------------------------------+
282 * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS |
283 * +----------------------------------------------------------------+
284 * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN |
285 * +----------------------------------------------------------------+
286 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
288 * Extended Data Descriptor (DTYP=0x1)
289 * +----------------------------------------------------------------+
290 * 0 | Buffer Address [63:0] |
291 * +----------------------------------------------------------------+
292 * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN |
293 * +----------------------------------------------------------------+
294 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
296 pr_info("Tl[desc] [address 63:0 ] [SpeCssSCmCsLen] [bi->dma ] leng ntw timestamp bi->skb <-- Legacy format\n");
297 pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestamp bi->skb <-- Ext Context format\n");
298 pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestamp bi->skb <-- Ext Data format\n");
299 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
300 const char *next_desc;
301 tx_desc = E1000_TX_DESC(*tx_ring, i);
302 buffer_info = &tx_ring->buffer_info[i];
303 u0 = (struct my_u0 *)tx_desc;
304 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
305 next_desc = " NTC/U";
306 else if (i == tx_ring->next_to_use)
308 else if (i == tx_ring->next_to_clean)
312 pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p%s\n",
313 (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' :
314 ((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')),
316 (unsigned long long)le64_to_cpu(u0->a),
317 (unsigned long long)le64_to_cpu(u0->b),
318 (unsigned long long)buffer_info->dma,
319 buffer_info->length, buffer_info->next_to_watch,
320 (unsigned long long)buffer_info->time_stamp,
321 buffer_info->skb, next_desc);
323 if (netif_msg_pktdata(adapter) && buffer_info->skb)
324 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
325 16, 1, buffer_info->skb->data,
326 buffer_info->skb->len, true);
329 /* Print Rx Ring Summary */
331 dev_info(&adapter->pdev->dev, "Rx Ring Summary\n");
332 pr_info("Queue [NTU] [NTC]\n");
333 pr_info(" %5d %5X %5X\n",
334 0, rx_ring->next_to_use, rx_ring->next_to_clean);
337 if (!netif_msg_rx_status(adapter))
340 dev_info(&adapter->pdev->dev, "Rx Ring Dump\n");
341 switch (adapter->rx_ps_pages) {
345 /* [Extended] Packet Split Receive Descriptor Format
347 * +-----------------------------------------------------+
348 * 0 | Buffer Address 0 [63:0] |
349 * +-----------------------------------------------------+
350 * 8 | Buffer Address 1 [63:0] |
351 * +-----------------------------------------------------+
352 * 16 | Buffer Address 2 [63:0] |
353 * +-----------------------------------------------------+
354 * 24 | Buffer Address 3 [63:0] |
355 * +-----------------------------------------------------+
357 pr_info("R [desc] [buffer 0 63:0 ] [buffer 1 63:0 ] [buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] [bi->skb] <-- Ext Pkt Split format\n");
358 /* [Extended] Receive Descriptor (Write-Back) Format
360 * 63 48 47 32 31 13 12 8 7 4 3 0
361 * +------------------------------------------------------+
362 * 0 | Packet | IP | Rsvd | MRQ | Rsvd | MRQ RSS |
363 * | Checksum | Ident | | Queue | | Type |
364 * +------------------------------------------------------+
365 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
366 * +------------------------------------------------------+
367 * 63 48 47 32 31 20 19 0
369 pr_info("RWB[desc] [ck ipid mrqhsh] [vl l0 ee es] [ l3 l2 l1 hs] [reserved ] ---------------- [bi->skb] <-- Ext Rx Write-Back format\n");
370 for (i = 0; i < rx_ring->count; i++) {
371 const char *next_desc;
372 buffer_info = &rx_ring->buffer_info[i];
373 rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i);
374 u1 = (struct my_u1 *)rx_desc_ps;
376 le32_to_cpu(rx_desc_ps->wb.middle.status_error);
378 if (i == rx_ring->next_to_use)
380 else if (i == rx_ring->next_to_clean)
385 if (staterr & E1000_RXD_STAT_DD) {
386 /* Descriptor Done */
387 pr_info("%s[0x%03X] %016llX %016llX %016llX %016llX ---------------- %p%s\n",
389 (unsigned long long)le64_to_cpu(u1->a),
390 (unsigned long long)le64_to_cpu(u1->b),
391 (unsigned long long)le64_to_cpu(u1->c),
392 (unsigned long long)le64_to_cpu(u1->d),
393 buffer_info->skb, next_desc);
395 pr_info("%s[0x%03X] %016llX %016llX %016llX %016llX %016llX %p%s\n",
397 (unsigned long long)le64_to_cpu(u1->a),
398 (unsigned long long)le64_to_cpu(u1->b),
399 (unsigned long long)le64_to_cpu(u1->c),
400 (unsigned long long)le64_to_cpu(u1->d),
401 (unsigned long long)buffer_info->dma,
402 buffer_info->skb, next_desc);
404 if (netif_msg_pktdata(adapter))
405 e1000e_dump_ps_pages(adapter,
412 /* Extended Receive Descriptor (Read) Format
414 * +-----------------------------------------------------+
415 * 0 | Buffer Address [63:0] |
416 * +-----------------------------------------------------+
418 * +-----------------------------------------------------+
420 pr_info("R [desc] [buf addr 63:0 ] [reserved 63:0 ] [bi->dma ] [bi->skb] <-- Ext (Read) format\n");
421 /* Extended Receive Descriptor (Write-Back) Format
423 * 63 48 47 32 31 24 23 4 3 0
424 * +------------------------------------------------------+
426 * 0 +-------------------+ Rsvd | Reserved | MRQ RSS |
427 * | Packet | IP | | | Type |
428 * | Checksum | Ident | | | |
429 * +------------------------------------------------------+
430 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
431 * +------------------------------------------------------+
432 * 63 48 47 32 31 20 19 0
434 pr_info("RWB[desc] [cs ipid mrq] [vt ln xe xs] [bi->skb] <-- Ext (Write-Back) format\n");
436 for (i = 0; i < rx_ring->count; i++) {
437 const char *next_desc;
439 buffer_info = &rx_ring->buffer_info[i];
440 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
441 u1 = (struct my_u1 *)rx_desc;
442 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
444 if (i == rx_ring->next_to_use)
446 else if (i == rx_ring->next_to_clean)
451 if (staterr & E1000_RXD_STAT_DD) {
452 /* Descriptor Done */
453 pr_info("%s[0x%03X] %016llX %016llX ---------------- %p%s\n",
455 (unsigned long long)le64_to_cpu(u1->a),
456 (unsigned long long)le64_to_cpu(u1->b),
457 buffer_info->skb, next_desc);
459 pr_info("%s[0x%03X] %016llX %016llX %016llX %p%s\n",
461 (unsigned long long)le64_to_cpu(u1->a),
462 (unsigned long long)le64_to_cpu(u1->b),
463 (unsigned long long)buffer_info->dma,
464 buffer_info->skb, next_desc);
466 if (netif_msg_pktdata(adapter) &&
468 print_hex_dump(KERN_INFO, "",
469 DUMP_PREFIX_ADDRESS, 16,
471 buffer_info->skb->data,
472 adapter->rx_buffer_len,
480 * e1000_desc_unused - calculate if we have unused descriptors
482 static int e1000_desc_unused(struct e1000_ring *ring)
484 if (ring->next_to_clean > ring->next_to_use)
485 return ring->next_to_clean - ring->next_to_use - 1;
487 return ring->count + ring->next_to_clean - ring->next_to_use - 1;
491 * e1000_receive_skb - helper function to handle Rx indications
492 * @adapter: board private structure
493 * @status: descriptor status field as written by hardware
494 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
495 * @skb: pointer to sk_buff to be indicated to stack
497 static void e1000_receive_skb(struct e1000_adapter *adapter,
498 struct net_device *netdev, struct sk_buff *skb,
499 u8 status, __le16 vlan)
501 u16 tag = le16_to_cpu(vlan);
502 skb->protocol = eth_type_trans(skb, netdev);
504 if (status & E1000_RXD_STAT_VP)
505 __vlan_hwaccel_put_tag(skb, tag);
507 napi_gro_receive(&adapter->napi, skb);
511 * e1000_rx_checksum - Receive Checksum Offload
512 * @adapter: board private structure
513 * @status_err: receive descriptor status and error fields
514 * @csum: receive descriptor csum field
515 * @sk_buff: socket buffer with received data
517 static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
520 u16 status = (u16)status_err;
521 u8 errors = (u8)(status_err >> 24);
523 skb_checksum_none_assert(skb);
525 /* Rx checksum disabled */
526 if (!(adapter->netdev->features & NETIF_F_RXCSUM))
529 /* Ignore Checksum bit is set */
530 if (status & E1000_RXD_STAT_IXSM)
533 /* TCP/UDP checksum error bit or IP checksum error bit is set */
534 if (errors & (E1000_RXD_ERR_TCPE | E1000_RXD_ERR_IPE)) {
535 /* let the stack verify checksum errors */
536 adapter->hw_csum_err++;
540 /* TCP/UDP Checksum has not been calculated */
541 if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
544 /* It must be a TCP or UDP packet with a valid checksum */
545 skb->ip_summed = CHECKSUM_UNNECESSARY;
546 adapter->hw_csum_good++;
549 static void e1000e_update_rdt_wa(struct e1000_ring *rx_ring, unsigned int i)
551 struct e1000_adapter *adapter = rx_ring->adapter;
552 struct e1000_hw *hw = &adapter->hw;
553 s32 ret_val = __ew32_prepare(hw);
555 writel(i, rx_ring->tail);
557 if (unlikely(!ret_val && (i != readl(rx_ring->tail)))) {
558 u32 rctl = er32(RCTL);
559 ew32(RCTL, rctl & ~E1000_RCTL_EN);
560 e_err("ME firmware caused invalid RDT - resetting\n");
561 schedule_work(&adapter->reset_task);
565 static void e1000e_update_tdt_wa(struct e1000_ring *tx_ring, unsigned int i)
567 struct e1000_adapter *adapter = tx_ring->adapter;
568 struct e1000_hw *hw = &adapter->hw;
569 s32 ret_val = __ew32_prepare(hw);
571 writel(i, tx_ring->tail);
573 if (unlikely(!ret_val && (i != readl(tx_ring->tail)))) {
574 u32 tctl = er32(TCTL);
575 ew32(TCTL, tctl & ~E1000_TCTL_EN);
576 e_err("ME firmware caused invalid TDT - resetting\n");
577 schedule_work(&adapter->reset_task);
582 * e1000_alloc_rx_buffers - Replace used receive buffers
583 * @rx_ring: Rx descriptor ring
585 static void e1000_alloc_rx_buffers(struct e1000_ring *rx_ring,
586 int cleaned_count, gfp_t gfp)
588 struct e1000_adapter *adapter = rx_ring->adapter;
589 struct net_device *netdev = adapter->netdev;
590 struct pci_dev *pdev = adapter->pdev;
591 union e1000_rx_desc_extended *rx_desc;
592 struct e1000_buffer *buffer_info;
595 unsigned int bufsz = adapter->rx_buffer_len;
597 i = rx_ring->next_to_use;
598 buffer_info = &rx_ring->buffer_info[i];
600 while (cleaned_count--) {
601 skb = buffer_info->skb;
607 skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp);
609 /* Better luck next round */
610 adapter->alloc_rx_buff_failed++;
614 buffer_info->skb = skb;
616 buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
617 adapter->rx_buffer_len,
619 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
620 dev_err(&pdev->dev, "Rx DMA map failed\n");
621 adapter->rx_dma_failed++;
625 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
626 rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
628 if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
629 /* Force memory writes to complete before letting h/w
630 * know there are new descriptors to fetch. (Only
631 * applicable for weak-ordered memory model archs,
635 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
636 e1000e_update_rdt_wa(rx_ring, i);
638 writel(i, rx_ring->tail);
641 if (i == rx_ring->count)
643 buffer_info = &rx_ring->buffer_info[i];
646 rx_ring->next_to_use = i;
650 * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split
651 * @rx_ring: Rx descriptor ring
653 static void e1000_alloc_rx_buffers_ps(struct e1000_ring *rx_ring,
654 int cleaned_count, gfp_t gfp)
656 struct e1000_adapter *adapter = rx_ring->adapter;
657 struct net_device *netdev = adapter->netdev;
658 struct pci_dev *pdev = adapter->pdev;
659 union e1000_rx_desc_packet_split *rx_desc;
660 struct e1000_buffer *buffer_info;
661 struct e1000_ps_page *ps_page;
665 i = rx_ring->next_to_use;
666 buffer_info = &rx_ring->buffer_info[i];
668 while (cleaned_count--) {
669 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
671 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
672 ps_page = &buffer_info->ps_pages[j];
673 if (j >= adapter->rx_ps_pages) {
674 /* all unused desc entries get hw null ptr */
675 rx_desc->read.buffer_addr[j + 1] =
679 if (!ps_page->page) {
680 ps_page->page = alloc_page(gfp);
681 if (!ps_page->page) {
682 adapter->alloc_rx_buff_failed++;
685 ps_page->dma = dma_map_page(&pdev->dev,
689 if (dma_mapping_error(&pdev->dev,
691 dev_err(&adapter->pdev->dev,
692 "Rx DMA page map failed\n");
693 adapter->rx_dma_failed++;
697 /* Refresh the desc even if buffer_addrs
698 * didn't change because each write-back
701 rx_desc->read.buffer_addr[j + 1] =
702 cpu_to_le64(ps_page->dma);
705 skb = __netdev_alloc_skb_ip_align(netdev,
706 adapter->rx_ps_bsize0,
710 adapter->alloc_rx_buff_failed++;
714 buffer_info->skb = skb;
715 buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
716 adapter->rx_ps_bsize0,
718 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
719 dev_err(&pdev->dev, "Rx DMA map failed\n");
720 adapter->rx_dma_failed++;
722 dev_kfree_skb_any(skb);
723 buffer_info->skb = NULL;
727 rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
729 if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
730 /* Force memory writes to complete before letting h/w
731 * know there are new descriptors to fetch. (Only
732 * applicable for weak-ordered memory model archs,
736 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
737 e1000e_update_rdt_wa(rx_ring, i << 1);
739 writel(i << 1, rx_ring->tail);
743 if (i == rx_ring->count)
745 buffer_info = &rx_ring->buffer_info[i];
749 rx_ring->next_to_use = i;
753 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
754 * @rx_ring: Rx descriptor ring
755 * @cleaned_count: number of buffers to allocate this pass
758 static void e1000_alloc_jumbo_rx_buffers(struct e1000_ring *rx_ring,
759 int cleaned_count, gfp_t gfp)
761 struct e1000_adapter *adapter = rx_ring->adapter;
762 struct net_device *netdev = adapter->netdev;
763 struct pci_dev *pdev = adapter->pdev;
764 union e1000_rx_desc_extended *rx_desc;
765 struct e1000_buffer *buffer_info;
768 unsigned int bufsz = 256 - 16 /* for skb_reserve */;
770 i = rx_ring->next_to_use;
771 buffer_info = &rx_ring->buffer_info[i];
773 while (cleaned_count--) {
774 skb = buffer_info->skb;
780 skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp);
781 if (unlikely(!skb)) {
782 /* Better luck next round */
783 adapter->alloc_rx_buff_failed++;
787 buffer_info->skb = skb;
789 /* allocate a new page if necessary */
790 if (!buffer_info->page) {
791 buffer_info->page = alloc_page(gfp);
792 if (unlikely(!buffer_info->page)) {
793 adapter->alloc_rx_buff_failed++;
798 if (!buffer_info->dma)
799 buffer_info->dma = dma_map_page(&pdev->dev,
800 buffer_info->page, 0,
804 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
805 rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
807 if (unlikely(++i == rx_ring->count))
809 buffer_info = &rx_ring->buffer_info[i];
812 if (likely(rx_ring->next_to_use != i)) {
813 rx_ring->next_to_use = i;
814 if (unlikely(i-- == 0))
815 i = (rx_ring->count - 1);
817 /* Force memory writes to complete before letting h/w
818 * know there are new descriptors to fetch. (Only
819 * applicable for weak-ordered memory model archs,
823 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
824 e1000e_update_rdt_wa(rx_ring, i);
826 writel(i, rx_ring->tail);
830 static inline void e1000_rx_hash(struct net_device *netdev, __le32 rss,
833 if (netdev->features & NETIF_F_RXHASH)
834 skb->rxhash = le32_to_cpu(rss);
838 * e1000_clean_rx_irq - Send received data up the network stack
839 * @rx_ring: Rx descriptor ring
841 * the return value indicates whether actual cleaning was done, there
842 * is no guarantee that everything was cleaned
844 static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done,
847 struct e1000_adapter *adapter = rx_ring->adapter;
848 struct net_device *netdev = adapter->netdev;
849 struct pci_dev *pdev = adapter->pdev;
850 struct e1000_hw *hw = &adapter->hw;
851 union e1000_rx_desc_extended *rx_desc, *next_rxd;
852 struct e1000_buffer *buffer_info, *next_buffer;
855 int cleaned_count = 0;
856 bool cleaned = false;
857 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
859 i = rx_ring->next_to_clean;
860 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
861 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
862 buffer_info = &rx_ring->buffer_info[i];
864 while (staterr & E1000_RXD_STAT_DD) {
867 if (*work_done >= work_to_do)
870 rmb(); /* read descriptor and rx_buffer_info after status DD */
872 skb = buffer_info->skb;
873 buffer_info->skb = NULL;
875 prefetch(skb->data - NET_IP_ALIGN);
878 if (i == rx_ring->count)
880 next_rxd = E1000_RX_DESC_EXT(*rx_ring, i);
883 next_buffer = &rx_ring->buffer_info[i];
887 dma_unmap_single(&pdev->dev,
889 adapter->rx_buffer_len,
891 buffer_info->dma = 0;
893 length = le16_to_cpu(rx_desc->wb.upper.length);
895 /* !EOP means multiple descriptors were used to store a single
896 * packet, if that's the case we need to toss it. In fact, we
897 * need to toss every packet with the EOP bit clear and the
898 * next frame that _does_ have the EOP bit set, as it is by
899 * definition only a frame fragment
901 if (unlikely(!(staterr & E1000_RXD_STAT_EOP)))
902 adapter->flags2 |= FLAG2_IS_DISCARDING;
904 if (adapter->flags2 & FLAG2_IS_DISCARDING) {
905 /* All receives must fit into a single buffer */
906 e_dbg("Receive packet consumed multiple buffers\n");
908 buffer_info->skb = skb;
909 if (staterr & E1000_RXD_STAT_EOP)
910 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
914 if (unlikely((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) &&
915 !(netdev->features & NETIF_F_RXALL))) {
917 buffer_info->skb = skb;
921 /* adjust length to remove Ethernet CRC */
922 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) {
923 /* If configured to store CRC, don't subtract FCS,
924 * but keep the FCS bytes out of the total_rx_bytes
927 if (netdev->features & NETIF_F_RXFCS)
933 total_rx_bytes += length;
936 /* code added for copybreak, this should improve
937 * performance for small packets with large amounts
938 * of reassembly being done in the stack
940 if (length < copybreak) {
941 struct sk_buff *new_skb =
942 netdev_alloc_skb_ip_align(netdev, length);
944 skb_copy_to_linear_data_offset(new_skb,
950 /* save the skb in buffer_info as good */
951 buffer_info->skb = skb;
954 /* else just continue with the old one */
956 /* end copybreak code */
957 skb_put(skb, length);
959 /* Receive Checksum Offload */
960 e1000_rx_checksum(adapter, staterr, skb);
962 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
964 e1000_receive_skb(adapter, netdev, skb, staterr,
965 rx_desc->wb.upper.vlan);
968 rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF);
970 /* return some buffers to hardware, one at a time is too slow */
971 if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
972 adapter->alloc_rx_buf(rx_ring, cleaned_count,
977 /* use prefetched values */
979 buffer_info = next_buffer;
981 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
983 rx_ring->next_to_clean = i;
985 cleaned_count = e1000_desc_unused(rx_ring);
987 adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC);
989 adapter->total_rx_bytes += total_rx_bytes;
990 adapter->total_rx_packets += total_rx_packets;
994 static void e1000_put_txbuf(struct e1000_ring *tx_ring,
995 struct e1000_buffer *buffer_info)
997 struct e1000_adapter *adapter = tx_ring->adapter;
999 if (buffer_info->dma) {
1000 if (buffer_info->mapped_as_page)
1001 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
1002 buffer_info->length, DMA_TO_DEVICE);
1004 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1005 buffer_info->length, DMA_TO_DEVICE);
1006 buffer_info->dma = 0;
1008 if (buffer_info->skb) {
1009 dev_kfree_skb_any(buffer_info->skb);
1010 buffer_info->skb = NULL;
1012 buffer_info->time_stamp = 0;
1015 static void e1000_print_hw_hang(struct work_struct *work)
1017 struct e1000_adapter *adapter = container_of(work,
1018 struct e1000_adapter,
1020 struct net_device *netdev = adapter->netdev;
1021 struct e1000_ring *tx_ring = adapter->tx_ring;
1022 unsigned int i = tx_ring->next_to_clean;
1023 unsigned int eop = tx_ring->buffer_info[i].next_to_watch;
1024 struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop);
1025 struct e1000_hw *hw = &adapter->hw;
1026 u16 phy_status, phy_1000t_status, phy_ext_status;
1029 if (test_bit(__E1000_DOWN, &adapter->state))
1032 if (!adapter->tx_hang_recheck &&
1033 (adapter->flags2 & FLAG2_DMA_BURST)) {
1034 /* May be block on write-back, flush and detect again
1035 * flush pending descriptor writebacks to memory
1037 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
1038 /* execute the writes immediately */
1040 /* Due to rare timing issues, write to TIDV again to ensure
1041 * the write is successful
1043 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
1044 /* execute the writes immediately */
1046 adapter->tx_hang_recheck = true;
1049 /* Real hang detected */
1050 adapter->tx_hang_recheck = false;
1051 netif_stop_queue(netdev);
1053 e1e_rphy(hw, PHY_STATUS, &phy_status);
1054 e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status);
1055 e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status);
1057 pci_read_config_word(adapter->pdev, PCI_STATUS, &pci_status);
1059 /* detected Hardware unit hang */
1060 e_err("Detected Hardware Unit Hang:\n"
1063 " next_to_use <%x>\n"
1064 " next_to_clean <%x>\n"
1065 "buffer_info[next_to_clean]:\n"
1066 " time_stamp <%lx>\n"
1067 " next_to_watch <%x>\n"
1069 " next_to_watch.status <%x>\n"
1072 "PHY 1000BASE-T Status <%x>\n"
1073 "PHY Extended Status <%x>\n"
1074 "PCI Status <%x>\n",
1075 readl(tx_ring->head),
1076 readl(tx_ring->tail),
1077 tx_ring->next_to_use,
1078 tx_ring->next_to_clean,
1079 tx_ring->buffer_info[eop].time_stamp,
1082 eop_desc->upper.fields.status,
1089 /* Suggest workaround for known h/w issue */
1090 if ((hw->mac.type == e1000_pchlan) && (er32(CTRL) & E1000_CTRL_TFCE))
1091 e_err("Try turning off Tx pause (flow control) via ethtool\n");
1095 * e1000_clean_tx_irq - Reclaim resources after transmit completes
1096 * @tx_ring: Tx descriptor ring
1098 * the return value indicates whether actual cleaning was done, there
1099 * is no guarantee that everything was cleaned
1101 static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring)
1103 struct e1000_adapter *adapter = tx_ring->adapter;
1104 struct net_device *netdev = adapter->netdev;
1105 struct e1000_hw *hw = &adapter->hw;
1106 struct e1000_tx_desc *tx_desc, *eop_desc;
1107 struct e1000_buffer *buffer_info;
1108 unsigned int i, eop;
1109 unsigned int count = 0;
1110 unsigned int total_tx_bytes = 0, total_tx_packets = 0;
1111 unsigned int bytes_compl = 0, pkts_compl = 0;
1113 i = tx_ring->next_to_clean;
1114 eop = tx_ring->buffer_info[i].next_to_watch;
1115 eop_desc = E1000_TX_DESC(*tx_ring, eop);
1117 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
1118 (count < tx_ring->count)) {
1119 bool cleaned = false;
1120 rmb(); /* read buffer_info after eop_desc */
1121 for (; !cleaned; count++) {
1122 tx_desc = E1000_TX_DESC(*tx_ring, i);
1123 buffer_info = &tx_ring->buffer_info[i];
1124 cleaned = (i == eop);
1127 total_tx_packets += buffer_info->segs;
1128 total_tx_bytes += buffer_info->bytecount;
1129 if (buffer_info->skb) {
1130 bytes_compl += buffer_info->skb->len;
1135 e1000_put_txbuf(tx_ring, buffer_info);
1136 tx_desc->upper.data = 0;
1139 if (i == tx_ring->count)
1143 if (i == tx_ring->next_to_use)
1145 eop = tx_ring->buffer_info[i].next_to_watch;
1146 eop_desc = E1000_TX_DESC(*tx_ring, eop);
1149 tx_ring->next_to_clean = i;
1151 netdev_completed_queue(netdev, pkts_compl, bytes_compl);
1153 #define TX_WAKE_THRESHOLD 32
1154 if (count && netif_carrier_ok(netdev) &&
1155 e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) {
1156 /* Make sure that anybody stopping the queue after this
1157 * sees the new next_to_clean.
1161 if (netif_queue_stopped(netdev) &&
1162 !(test_bit(__E1000_DOWN, &adapter->state))) {
1163 netif_wake_queue(netdev);
1164 ++adapter->restart_queue;
1168 if (adapter->detect_tx_hung) {
1169 /* Detect a transmit hang in hardware, this serializes the
1170 * check with the clearing of time_stamp and movement of i
1172 adapter->detect_tx_hung = false;
1173 if (tx_ring->buffer_info[i].time_stamp &&
1174 time_after(jiffies, tx_ring->buffer_info[i].time_stamp
1175 + (adapter->tx_timeout_factor * HZ)) &&
1176 !(er32(STATUS) & E1000_STATUS_TXOFF))
1177 schedule_work(&adapter->print_hang_task);
1179 adapter->tx_hang_recheck = false;
1181 adapter->total_tx_bytes += total_tx_bytes;
1182 adapter->total_tx_packets += total_tx_packets;
1183 return count < tx_ring->count;
1187 * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
1188 * @rx_ring: Rx descriptor ring
1190 * the return value indicates whether actual cleaning was done, there
1191 * is no guarantee that everything was cleaned
1193 static bool e1000_clean_rx_irq_ps(struct e1000_ring *rx_ring, int *work_done,
1196 struct e1000_adapter *adapter = rx_ring->adapter;
1197 struct e1000_hw *hw = &adapter->hw;
1198 union e1000_rx_desc_packet_split *rx_desc, *next_rxd;
1199 struct net_device *netdev = adapter->netdev;
1200 struct pci_dev *pdev = adapter->pdev;
1201 struct e1000_buffer *buffer_info, *next_buffer;
1202 struct e1000_ps_page *ps_page;
1203 struct sk_buff *skb;
1205 u32 length, staterr;
1206 int cleaned_count = 0;
1207 bool cleaned = false;
1208 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1210 i = rx_ring->next_to_clean;
1211 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
1212 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
1213 buffer_info = &rx_ring->buffer_info[i];
1215 while (staterr & E1000_RXD_STAT_DD) {
1216 if (*work_done >= work_to_do)
1219 skb = buffer_info->skb;
1220 rmb(); /* read descriptor and rx_buffer_info after status DD */
1222 /* in the packet split case this is header only */
1223 prefetch(skb->data - NET_IP_ALIGN);
1226 if (i == rx_ring->count)
1228 next_rxd = E1000_RX_DESC_PS(*rx_ring, i);
1231 next_buffer = &rx_ring->buffer_info[i];
1235 dma_unmap_single(&pdev->dev, buffer_info->dma,
1236 adapter->rx_ps_bsize0, DMA_FROM_DEVICE);
1237 buffer_info->dma = 0;
1239 /* see !EOP comment in other Rx routine */
1240 if (!(staterr & E1000_RXD_STAT_EOP))
1241 adapter->flags2 |= FLAG2_IS_DISCARDING;
1243 if (adapter->flags2 & FLAG2_IS_DISCARDING) {
1244 e_dbg("Packet Split buffers didn't pick up the full packet\n");
1245 dev_kfree_skb_irq(skb);
1246 if (staterr & E1000_RXD_STAT_EOP)
1247 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
1251 if (unlikely((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) &&
1252 !(netdev->features & NETIF_F_RXALL))) {
1253 dev_kfree_skb_irq(skb);
1257 length = le16_to_cpu(rx_desc->wb.middle.length0);
1260 e_dbg("Last part of the packet spanning multiple descriptors\n");
1261 dev_kfree_skb_irq(skb);
1266 skb_put(skb, length);
1269 /* this looks ugly, but it seems compiler issues make
1270 * it more efficient than reusing j
1272 int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]);
1274 /* page alloc/put takes too long and effects small
1275 * packet throughput, so unsplit small packets and
1276 * save the alloc/put only valid in softirq (napi)
1277 * context to call kmap_*
1279 if (l1 && (l1 <= copybreak) &&
1280 ((length + l1) <= adapter->rx_ps_bsize0)) {
1283 ps_page = &buffer_info->ps_pages[0];
1285 /* there is no documentation about how to call
1286 * kmap_atomic, so we can't hold the mapping
1289 dma_sync_single_for_cpu(&pdev->dev,
1293 vaddr = kmap_atomic(ps_page->page);
1294 memcpy(skb_tail_pointer(skb), vaddr, l1);
1295 kunmap_atomic(vaddr);
1296 dma_sync_single_for_device(&pdev->dev,
1301 /* remove the CRC */
1302 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) {
1303 if (!(netdev->features & NETIF_F_RXFCS))
1312 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
1313 length = le16_to_cpu(rx_desc->wb.upper.length[j]);
1317 ps_page = &buffer_info->ps_pages[j];
1318 dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
1321 skb_fill_page_desc(skb, j, ps_page->page, 0, length);
1322 ps_page->page = NULL;
1324 skb->data_len += length;
1325 skb->truesize += PAGE_SIZE;
1328 /* strip the ethernet crc, problem is we're using pages now so
1329 * this whole operation can get a little cpu intensive
1331 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) {
1332 if (!(netdev->features & NETIF_F_RXFCS))
1333 pskb_trim(skb, skb->len - 4);
1337 total_rx_bytes += skb->len;
1340 e1000_rx_checksum(adapter, staterr, skb);
1342 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
1344 if (rx_desc->wb.upper.header_status &
1345 cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP))
1346 adapter->rx_hdr_split++;
1348 e1000_receive_skb(adapter, netdev, skb,
1349 staterr, rx_desc->wb.middle.vlan);
1352 rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF);
1353 buffer_info->skb = NULL;
1355 /* return some buffers to hardware, one at a time is too slow */
1356 if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
1357 adapter->alloc_rx_buf(rx_ring, cleaned_count,
1362 /* use prefetched values */
1364 buffer_info = next_buffer;
1366 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
1368 rx_ring->next_to_clean = i;
1370 cleaned_count = e1000_desc_unused(rx_ring);
1372 adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC);
1374 adapter->total_rx_bytes += total_rx_bytes;
1375 adapter->total_rx_packets += total_rx_packets;
1380 * e1000_consume_page - helper function
1382 static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
1387 skb->data_len += length;
1388 skb->truesize += PAGE_SIZE;
1392 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
1393 * @adapter: board private structure
1395 * the return value indicates whether actual cleaning was done, there
1396 * is no guarantee that everything was cleaned
1398 static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
1401 struct e1000_adapter *adapter = rx_ring->adapter;
1402 struct net_device *netdev = adapter->netdev;
1403 struct pci_dev *pdev = adapter->pdev;
1404 union e1000_rx_desc_extended *rx_desc, *next_rxd;
1405 struct e1000_buffer *buffer_info, *next_buffer;
1406 u32 length, staterr;
1408 int cleaned_count = 0;
1409 bool cleaned = false;
1410 unsigned int total_rx_bytes=0, total_rx_packets=0;
1412 i = rx_ring->next_to_clean;
1413 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
1414 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1415 buffer_info = &rx_ring->buffer_info[i];
1417 while (staterr & E1000_RXD_STAT_DD) {
1418 struct sk_buff *skb;
1420 if (*work_done >= work_to_do)
1423 rmb(); /* read descriptor and rx_buffer_info after status DD */
1425 skb = buffer_info->skb;
1426 buffer_info->skb = NULL;
1429 if (i == rx_ring->count)
1431 next_rxd = E1000_RX_DESC_EXT(*rx_ring, i);
1434 next_buffer = &rx_ring->buffer_info[i];
1438 dma_unmap_page(&pdev->dev, buffer_info->dma, PAGE_SIZE,
1440 buffer_info->dma = 0;
1442 length = le16_to_cpu(rx_desc->wb.upper.length);
1444 /* errors is only valid for DD + EOP descriptors */
1445 if (unlikely((staterr & E1000_RXD_STAT_EOP) &&
1446 ((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) &&
1447 !(netdev->features & NETIF_F_RXALL)))) {
1448 /* recycle both page and skb */
1449 buffer_info->skb = skb;
1450 /* an error means any chain goes out the window too */
1451 if (rx_ring->rx_skb_top)
1452 dev_kfree_skb_irq(rx_ring->rx_skb_top);
1453 rx_ring->rx_skb_top = NULL;
1457 #define rxtop (rx_ring->rx_skb_top)
1458 if (!(staterr & E1000_RXD_STAT_EOP)) {
1459 /* this descriptor is only the beginning (or middle) */
1461 /* this is the beginning of a chain */
1463 skb_fill_page_desc(rxtop, 0, buffer_info->page,
1466 /* this is the middle of a chain */
1467 skb_fill_page_desc(rxtop,
1468 skb_shinfo(rxtop)->nr_frags,
1469 buffer_info->page, 0, length);
1470 /* re-use the skb, only consumed the page */
1471 buffer_info->skb = skb;
1473 e1000_consume_page(buffer_info, rxtop, length);
1477 /* end of the chain */
1478 skb_fill_page_desc(rxtop,
1479 skb_shinfo(rxtop)->nr_frags,
1480 buffer_info->page, 0, length);
1481 /* re-use the current skb, we only consumed the
1484 buffer_info->skb = skb;
1487 e1000_consume_page(buffer_info, skb, length);
1489 /* no chain, got EOP, this buf is the packet
1490 * copybreak to save the put_page/alloc_page
1492 if (length <= copybreak &&
1493 skb_tailroom(skb) >= length) {
1495 vaddr = kmap_atomic(buffer_info->page);
1496 memcpy(skb_tail_pointer(skb), vaddr,
1498 kunmap_atomic(vaddr);
1499 /* re-use the page, so don't erase
1502 skb_put(skb, length);
1504 skb_fill_page_desc(skb, 0,
1505 buffer_info->page, 0,
1507 e1000_consume_page(buffer_info, skb,
1513 /* Receive Checksum Offload */
1514 e1000_rx_checksum(adapter, staterr, skb);
1516 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
1518 /* probably a little skewed due to removing CRC */
1519 total_rx_bytes += skb->len;
1522 /* eth type trans needs skb->data to point to something */
1523 if (!pskb_may_pull(skb, ETH_HLEN)) {
1524 e_err("pskb_may_pull failed.\n");
1525 dev_kfree_skb_irq(skb);
1529 e1000_receive_skb(adapter, netdev, skb, staterr,
1530 rx_desc->wb.upper.vlan);
1533 rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF);
1535 /* return some buffers to hardware, one at a time is too slow */
1536 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
1537 adapter->alloc_rx_buf(rx_ring, cleaned_count,
1542 /* use prefetched values */
1544 buffer_info = next_buffer;
1546 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1548 rx_ring->next_to_clean = i;
1550 cleaned_count = e1000_desc_unused(rx_ring);
1552 adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC);
1554 adapter->total_rx_bytes += total_rx_bytes;
1555 adapter->total_rx_packets += total_rx_packets;
1560 * e1000_clean_rx_ring - Free Rx Buffers per Queue
1561 * @rx_ring: Rx descriptor ring
1563 static void e1000_clean_rx_ring(struct e1000_ring *rx_ring)
1565 struct e1000_adapter *adapter = rx_ring->adapter;
1566 struct e1000_buffer *buffer_info;
1567 struct e1000_ps_page *ps_page;
1568 struct pci_dev *pdev = adapter->pdev;
1571 /* Free all the Rx ring sk_buffs */
1572 for (i = 0; i < rx_ring->count; i++) {
1573 buffer_info = &rx_ring->buffer_info[i];
1574 if (buffer_info->dma) {
1575 if (adapter->clean_rx == e1000_clean_rx_irq)
1576 dma_unmap_single(&pdev->dev, buffer_info->dma,
1577 adapter->rx_buffer_len,
1579 else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq)
1580 dma_unmap_page(&pdev->dev, buffer_info->dma,
1583 else if (adapter->clean_rx == e1000_clean_rx_irq_ps)
1584 dma_unmap_single(&pdev->dev, buffer_info->dma,
1585 adapter->rx_ps_bsize0,
1587 buffer_info->dma = 0;
1590 if (buffer_info->page) {
1591 put_page(buffer_info->page);
1592 buffer_info->page = NULL;
1595 if (buffer_info->skb) {
1596 dev_kfree_skb(buffer_info->skb);
1597 buffer_info->skb = NULL;
1600 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
1601 ps_page = &buffer_info->ps_pages[j];
1604 dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
1607 put_page(ps_page->page);
1608 ps_page->page = NULL;
1612 /* there also may be some cached data from a chained receive */
1613 if (rx_ring->rx_skb_top) {
1614 dev_kfree_skb(rx_ring->rx_skb_top);
1615 rx_ring->rx_skb_top = NULL;
1618 /* Zero out the descriptor ring */
1619 memset(rx_ring->desc, 0, rx_ring->size);
1621 rx_ring->next_to_clean = 0;
1622 rx_ring->next_to_use = 0;
1623 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
1625 writel(0, rx_ring->head);
1626 if (rx_ring->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
1627 e1000e_update_rdt_wa(rx_ring, 0);
1629 writel(0, rx_ring->tail);
1632 static void e1000e_downshift_workaround(struct work_struct *work)
1634 struct e1000_adapter *adapter = container_of(work,
1635 struct e1000_adapter, downshift_task);
1637 if (test_bit(__E1000_DOWN, &adapter->state))
1640 e1000e_gig_downshift_workaround_ich8lan(&adapter->hw);
1644 * e1000_intr_msi - Interrupt Handler
1645 * @irq: interrupt number
1646 * @data: pointer to a network interface device structure
1648 static irqreturn_t e1000_intr_msi(int irq, void *data)
1650 struct net_device *netdev = data;
1651 struct e1000_adapter *adapter = netdev_priv(netdev);
1652 struct e1000_hw *hw = &adapter->hw;
1653 u32 icr = er32(ICR);
1655 /* read ICR disables interrupts using IAM */
1656 if (icr & E1000_ICR_LSC) {
1657 hw->mac.get_link_status = true;
1658 /* ICH8 workaround-- Call gig speed drop workaround on cable
1659 * disconnect (LSC) before accessing any PHY registers
1661 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
1662 (!(er32(STATUS) & E1000_STATUS_LU)))
1663 schedule_work(&adapter->downshift_task);
1665 /* 80003ES2LAN workaround-- For packet buffer work-around on
1666 * link down event; disable receives here in the ISR and reset
1667 * adapter in watchdog
1669 if (netif_carrier_ok(netdev) &&
1670 adapter->flags & FLAG_RX_NEEDS_RESTART) {
1671 /* disable receives */
1672 u32 rctl = er32(RCTL);
1673 ew32(RCTL, rctl & ~E1000_RCTL_EN);
1674 adapter->flags |= FLAG_RX_RESTART_NOW;
1676 /* guard against interrupt when we're going down */
1677 if (!test_bit(__E1000_DOWN, &adapter->state))
1678 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1681 if (napi_schedule_prep(&adapter->napi)) {
1682 adapter->total_tx_bytes = 0;
1683 adapter->total_tx_packets = 0;
1684 adapter->total_rx_bytes = 0;
1685 adapter->total_rx_packets = 0;
1686 __napi_schedule(&adapter->napi);
1693 * e1000_intr - Interrupt Handler
1694 * @irq: interrupt number
1695 * @data: pointer to a network interface device structure
1697 static irqreturn_t e1000_intr(int irq, void *data)
1699 struct net_device *netdev = data;
1700 struct e1000_adapter *adapter = netdev_priv(netdev);
1701 struct e1000_hw *hw = &adapter->hw;
1702 u32 rctl, icr = er32(ICR);
1704 if (!icr || test_bit(__E1000_DOWN, &adapter->state))
1705 return IRQ_NONE; /* Not our interrupt */
1707 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
1708 * not set, then the adapter didn't send an interrupt
1710 if (!(icr & E1000_ICR_INT_ASSERTED))
1713 /* Interrupt Auto-Mask...upon reading ICR,
1714 * interrupts are masked. No need for the
1718 if (icr & E1000_ICR_LSC) {
1719 hw->mac.get_link_status = true;
1720 /* ICH8 workaround-- Call gig speed drop workaround on cable
1721 * disconnect (LSC) before accessing any PHY registers
1723 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
1724 (!(er32(STATUS) & E1000_STATUS_LU)))
1725 schedule_work(&adapter->downshift_task);
1727 /* 80003ES2LAN workaround--
1728 * For packet buffer work-around on link down event;
1729 * disable receives here in the ISR and
1730 * reset adapter in watchdog
1732 if (netif_carrier_ok(netdev) &&
1733 (adapter->flags & FLAG_RX_NEEDS_RESTART)) {
1734 /* disable receives */
1736 ew32(RCTL, rctl & ~E1000_RCTL_EN);
1737 adapter->flags |= FLAG_RX_RESTART_NOW;
1739 /* guard against interrupt when we're going down */
1740 if (!test_bit(__E1000_DOWN, &adapter->state))
1741 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1744 if (napi_schedule_prep(&adapter->napi)) {
1745 adapter->total_tx_bytes = 0;
1746 adapter->total_tx_packets = 0;
1747 adapter->total_rx_bytes = 0;
1748 adapter->total_rx_packets = 0;
1749 __napi_schedule(&adapter->napi);
1755 static irqreturn_t e1000_msix_other(int irq, void *data)
1757 struct net_device *netdev = data;
1758 struct e1000_adapter *adapter = netdev_priv(netdev);
1759 struct e1000_hw *hw = &adapter->hw;
1760 u32 icr = er32(ICR);
1762 if (!(icr & E1000_ICR_INT_ASSERTED)) {
1763 if (!test_bit(__E1000_DOWN, &adapter->state))
1764 ew32(IMS, E1000_IMS_OTHER);
1768 if (icr & adapter->eiac_mask)
1769 ew32(ICS, (icr & adapter->eiac_mask));
1771 if (icr & E1000_ICR_OTHER) {
1772 if (!(icr & E1000_ICR_LSC))
1773 goto no_link_interrupt;
1774 hw->mac.get_link_status = true;
1775 /* guard against interrupt when we're going down */
1776 if (!test_bit(__E1000_DOWN, &adapter->state))
1777 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1781 if (!test_bit(__E1000_DOWN, &adapter->state))
1782 ew32(IMS, E1000_IMS_LSC | E1000_IMS_OTHER);
1788 static irqreturn_t e1000_intr_msix_tx(int irq, void *data)
1790 struct net_device *netdev = data;
1791 struct e1000_adapter *adapter = netdev_priv(netdev);
1792 struct e1000_hw *hw = &adapter->hw;
1793 struct e1000_ring *tx_ring = adapter->tx_ring;
1796 adapter->total_tx_bytes = 0;
1797 adapter->total_tx_packets = 0;
1799 if (!e1000_clean_tx_irq(tx_ring))
1800 /* Ring was not completely cleaned, so fire another interrupt */
1801 ew32(ICS, tx_ring->ims_val);
1806 static irqreturn_t e1000_intr_msix_rx(int irq, void *data)
1808 struct net_device *netdev = data;
1809 struct e1000_adapter *adapter = netdev_priv(netdev);
1810 struct e1000_ring *rx_ring = adapter->rx_ring;
1812 /* Write the ITR value calculated at the end of the
1813 * previous interrupt.
1815 if (rx_ring->set_itr) {
1816 writel(1000000000 / (rx_ring->itr_val * 256),
1817 rx_ring->itr_register);
1818 rx_ring->set_itr = 0;
1821 if (napi_schedule_prep(&adapter->napi)) {
1822 adapter->total_rx_bytes = 0;
1823 adapter->total_rx_packets = 0;
1824 __napi_schedule(&adapter->napi);
1830 * e1000_configure_msix - Configure MSI-X hardware
1832 * e1000_configure_msix sets up the hardware to properly
1833 * generate MSI-X interrupts.
1835 static void e1000_configure_msix(struct e1000_adapter *adapter)
1837 struct e1000_hw *hw = &adapter->hw;
1838 struct e1000_ring *rx_ring = adapter->rx_ring;
1839 struct e1000_ring *tx_ring = adapter->tx_ring;
1841 u32 ctrl_ext, ivar = 0;
1843 adapter->eiac_mask = 0;
1845 /* Workaround issue with spurious interrupts on 82574 in MSI-X mode */
1846 if (hw->mac.type == e1000_82574) {
1847 u32 rfctl = er32(RFCTL);
1848 rfctl |= E1000_RFCTL_ACK_DIS;
1852 #define E1000_IVAR_INT_ALLOC_VALID 0x8
1853 /* Configure Rx vector */
1854 rx_ring->ims_val = E1000_IMS_RXQ0;
1855 adapter->eiac_mask |= rx_ring->ims_val;
1856 if (rx_ring->itr_val)
1857 writel(1000000000 / (rx_ring->itr_val * 256),
1858 rx_ring->itr_register);
1860 writel(1, rx_ring->itr_register);
1861 ivar = E1000_IVAR_INT_ALLOC_VALID | vector;
1863 /* Configure Tx vector */
1864 tx_ring->ims_val = E1000_IMS_TXQ0;
1866 if (tx_ring->itr_val)
1867 writel(1000000000 / (tx_ring->itr_val * 256),
1868 tx_ring->itr_register);
1870 writel(1, tx_ring->itr_register);
1871 adapter->eiac_mask |= tx_ring->ims_val;
1872 ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 8);
1874 /* set vector for Other Causes, e.g. link changes */
1876 ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 16);
1877 if (rx_ring->itr_val)
1878 writel(1000000000 / (rx_ring->itr_val * 256),
1879 hw->hw_addr + E1000_EITR_82574(vector));
1881 writel(1, hw->hw_addr + E1000_EITR_82574(vector));
1883 /* Cause Tx interrupts on every write back */
1888 /* enable MSI-X PBA support */
1889 ctrl_ext = er32(CTRL_EXT);
1890 ctrl_ext |= E1000_CTRL_EXT_PBA_CLR;
1892 /* Auto-Mask Other interrupts upon ICR read */
1893 #define E1000_EIAC_MASK_82574 0x01F00000
1894 ew32(IAM, ~E1000_EIAC_MASK_82574 | E1000_IMS_OTHER);
1895 ctrl_ext |= E1000_CTRL_EXT_EIAME;
1896 ew32(CTRL_EXT, ctrl_ext);
1900 void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter)
1902 if (adapter->msix_entries) {
1903 pci_disable_msix(adapter->pdev);
1904 kfree(adapter->msix_entries);
1905 adapter->msix_entries = NULL;
1906 } else if (adapter->flags & FLAG_MSI_ENABLED) {
1907 pci_disable_msi(adapter->pdev);
1908 adapter->flags &= ~FLAG_MSI_ENABLED;
1913 * e1000e_set_interrupt_capability - set MSI or MSI-X if supported
1915 * Attempt to configure interrupts using the best available
1916 * capabilities of the hardware and kernel.
1918 void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
1923 switch (adapter->int_mode) {
1924 case E1000E_INT_MODE_MSIX:
1925 if (adapter->flags & FLAG_HAS_MSIX) {
1926 adapter->num_vectors = 3; /* RxQ0, TxQ0 and other */
1927 adapter->msix_entries = kcalloc(adapter->num_vectors,
1928 sizeof(struct msix_entry),
1930 if (adapter->msix_entries) {
1931 for (i = 0; i < adapter->num_vectors; i++)
1932 adapter->msix_entries[i].entry = i;
1934 err = pci_enable_msix(adapter->pdev,
1935 adapter->msix_entries,
1936 adapter->num_vectors);
1940 /* MSI-X failed, so fall through and try MSI */
1941 e_err("Failed to initialize MSI-X interrupts. Falling back to MSI interrupts.\n");
1942 e1000e_reset_interrupt_capability(adapter);
1944 adapter->int_mode = E1000E_INT_MODE_MSI;
1946 case E1000E_INT_MODE_MSI:
1947 if (!pci_enable_msi(adapter->pdev)) {
1948 adapter->flags |= FLAG_MSI_ENABLED;
1950 adapter->int_mode = E1000E_INT_MODE_LEGACY;
1951 e_err("Failed to initialize MSI interrupts. Falling back to legacy interrupts.\n");
1954 case E1000E_INT_MODE_LEGACY:
1955 /* Don't do anything; this is the system default */
1959 /* store the number of vectors being used */
1960 adapter->num_vectors = 1;
1964 * e1000_request_msix - Initialize MSI-X interrupts
1966 * e1000_request_msix allocates MSI-X vectors and requests interrupts from the
1969 static int e1000_request_msix(struct e1000_adapter *adapter)
1971 struct net_device *netdev = adapter->netdev;
1972 int err = 0, vector = 0;
1974 if (strlen(netdev->name) < (IFNAMSIZ - 5))
1975 snprintf(adapter->rx_ring->name,
1976 sizeof(adapter->rx_ring->name) - 1,
1977 "%s-rx-0", netdev->name);
1979 memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
1980 err = request_irq(adapter->msix_entries[vector].vector,
1981 e1000_intr_msix_rx, 0, adapter->rx_ring->name,
1985 adapter->rx_ring->itr_register = adapter->hw.hw_addr +
1986 E1000_EITR_82574(vector);
1987 adapter->rx_ring->itr_val = adapter->itr;
1990 if (strlen(netdev->name) < (IFNAMSIZ - 5))
1991 snprintf(adapter->tx_ring->name,
1992 sizeof(adapter->tx_ring->name) - 1,
1993 "%s-tx-0", netdev->name);
1995 memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
1996 err = request_irq(adapter->msix_entries[vector].vector,
1997 e1000_intr_msix_tx, 0, adapter->tx_ring->name,
2001 adapter->tx_ring->itr_register = adapter->hw.hw_addr +
2002 E1000_EITR_82574(vector);
2003 adapter->tx_ring->itr_val = adapter->itr;
2006 err = request_irq(adapter->msix_entries[vector].vector,
2007 e1000_msix_other, 0, netdev->name, netdev);
2011 e1000_configure_msix(adapter);
2017 * e1000_request_irq - initialize interrupts
2019 * Attempts to configure interrupts using the best available
2020 * capabilities of the hardware and kernel.
2022 static int e1000_request_irq(struct e1000_adapter *adapter)
2024 struct net_device *netdev = adapter->netdev;
2027 if (adapter->msix_entries) {
2028 err = e1000_request_msix(adapter);
2031 /* fall back to MSI */
2032 e1000e_reset_interrupt_capability(adapter);
2033 adapter->int_mode = E1000E_INT_MODE_MSI;
2034 e1000e_set_interrupt_capability(adapter);
2036 if (adapter->flags & FLAG_MSI_ENABLED) {
2037 err = request_irq(adapter->pdev->irq, e1000_intr_msi, 0,
2038 netdev->name, netdev);
2042 /* fall back to legacy interrupt */
2043 e1000e_reset_interrupt_capability(adapter);
2044 adapter->int_mode = E1000E_INT_MODE_LEGACY;
2047 err = request_irq(adapter->pdev->irq, e1000_intr, IRQF_SHARED,
2048 netdev->name, netdev);
2050 e_err("Unable to allocate interrupt, Error: %d\n", err);
2055 static void e1000_free_irq(struct e1000_adapter *adapter)
2057 struct net_device *netdev = adapter->netdev;
2059 if (adapter->msix_entries) {
2062 free_irq(adapter->msix_entries[vector].vector, netdev);
2065 free_irq(adapter->msix_entries[vector].vector, netdev);
2068 /* Other Causes interrupt vector */
2069 free_irq(adapter->msix_entries[vector].vector, netdev);
2073 free_irq(adapter->pdev->irq, netdev);
2077 * e1000_irq_disable - Mask off interrupt generation on the NIC
2079 static void e1000_irq_disable(struct e1000_adapter *adapter)
2081 struct e1000_hw *hw = &adapter->hw;
2084 if (adapter->msix_entries)
2085 ew32(EIAC_82574, 0);
2088 if (adapter->msix_entries) {
2090 for (i = 0; i < adapter->num_vectors; i++)
2091 synchronize_irq(adapter->msix_entries[i].vector);
2093 synchronize_irq(adapter->pdev->irq);
2098 * e1000_irq_enable - Enable default interrupt generation settings
2100 static void e1000_irq_enable(struct e1000_adapter *adapter)
2102 struct e1000_hw *hw = &adapter->hw;
2104 if (adapter->msix_entries) {
2105 ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574);
2106 ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC);
2108 ew32(IMS, IMS_ENABLE_MASK);
2114 * e1000e_get_hw_control - get control of the h/w from f/w
2115 * @adapter: address of board private structure
2117 * e1000e_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit.
2118 * For ASF and Pass Through versions of f/w this means that
2119 * the driver is loaded. For AMT version (only with 82573)
2120 * of the f/w this means that the network i/f is open.
2122 void e1000e_get_hw_control(struct e1000_adapter *adapter)
2124 struct e1000_hw *hw = &adapter->hw;
2128 /* Let firmware know the driver has taken over */
2129 if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {
2131 ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD);
2132 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
2133 ctrl_ext = er32(CTRL_EXT);
2134 ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
2139 * e1000e_release_hw_control - release control of the h/w to f/w
2140 * @adapter: address of board private structure
2142 * e1000e_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit.
2143 * For ASF and Pass Through versions of f/w this means that the
2144 * driver is no longer loaded. For AMT version (only with 82573) i
2145 * of the f/w this means that the network i/f is closed.
2148 void e1000e_release_hw_control(struct e1000_adapter *adapter)
2150 struct e1000_hw *hw = &adapter->hw;
2154 /* Let firmware taken over control of h/w */
2155 if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {
2157 ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD);
2158 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
2159 ctrl_ext = er32(CTRL_EXT);
2160 ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
2165 * e1000_alloc_ring_dma - allocate memory for a ring structure
2167 static int e1000_alloc_ring_dma(struct e1000_adapter *adapter,
2168 struct e1000_ring *ring)
2170 struct pci_dev *pdev = adapter->pdev;
2172 ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma,
2181 * e1000e_setup_tx_resources - allocate Tx resources (Descriptors)
2182 * @tx_ring: Tx descriptor ring
2184 * Return 0 on success, negative on failure
2186 int e1000e_setup_tx_resources(struct e1000_ring *tx_ring)
2188 struct e1000_adapter *adapter = tx_ring->adapter;
2189 int err = -ENOMEM, size;
2191 size = sizeof(struct e1000_buffer) * tx_ring->count;
2192 tx_ring->buffer_info = vzalloc(size);
2193 if (!tx_ring->buffer_info)
2196 /* round up to nearest 4K */
2197 tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
2198 tx_ring->size = ALIGN(tx_ring->size, 4096);
2200 err = e1000_alloc_ring_dma(adapter, tx_ring);
2204 tx_ring->next_to_use = 0;
2205 tx_ring->next_to_clean = 0;
2209 vfree(tx_ring->buffer_info);
2210 e_err("Unable to allocate memory for the transmit descriptor ring\n");
2215 * e1000e_setup_rx_resources - allocate Rx resources (Descriptors)
2216 * @rx_ring: Rx descriptor ring
2218 * Returns 0 on success, negative on failure
2220 int e1000e_setup_rx_resources(struct e1000_ring *rx_ring)
2222 struct e1000_adapter *adapter = rx_ring->adapter;
2223 struct e1000_buffer *buffer_info;
2224 int i, size, desc_len, err = -ENOMEM;
2226 size = sizeof(struct e1000_buffer) * rx_ring->count;
2227 rx_ring->buffer_info = vzalloc(size);
2228 if (!rx_ring->buffer_info)
2231 for (i = 0; i < rx_ring->count; i++) {
2232 buffer_info = &rx_ring->buffer_info[i];
2233 buffer_info->ps_pages = kcalloc(PS_PAGE_BUFFERS,
2234 sizeof(struct e1000_ps_page),
2236 if (!buffer_info->ps_pages)
2240 desc_len = sizeof(union e1000_rx_desc_packet_split);
2242 /* Round up to nearest 4K */
2243 rx_ring->size = rx_ring->count * desc_len;
2244 rx_ring->size = ALIGN(rx_ring->size, 4096);
2246 err = e1000_alloc_ring_dma(adapter, rx_ring);
2250 rx_ring->next_to_clean = 0;
2251 rx_ring->next_to_use = 0;
2252 rx_ring->rx_skb_top = NULL;
2257 for (i = 0; i < rx_ring->count; i++) {
2258 buffer_info = &rx_ring->buffer_info[i];
2259 kfree(buffer_info->ps_pages);
2262 vfree(rx_ring->buffer_info);
2263 e_err("Unable to allocate memory for the receive descriptor ring\n");
2268 * e1000_clean_tx_ring - Free Tx Buffers
2269 * @tx_ring: Tx descriptor ring
2271 static void e1000_clean_tx_ring(struct e1000_ring *tx_ring)
2273 struct e1000_adapter *adapter = tx_ring->adapter;
2274 struct e1000_buffer *buffer_info;
2278 for (i = 0; i < tx_ring->count; i++) {
2279 buffer_info = &tx_ring->buffer_info[i];
2280 e1000_put_txbuf(tx_ring, buffer_info);
2283 netdev_reset_queue(adapter->netdev);
2284 size = sizeof(struct e1000_buffer) * tx_ring->count;
2285 memset(tx_ring->buffer_info, 0, size);
2287 memset(tx_ring->desc, 0, tx_ring->size);
2289 tx_ring->next_to_use = 0;
2290 tx_ring->next_to_clean = 0;
2292 writel(0, tx_ring->head);
2293 if (tx_ring->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
2294 e1000e_update_tdt_wa(tx_ring, 0);
2296 writel(0, tx_ring->tail);
2300 * e1000e_free_tx_resources - Free Tx Resources per Queue
2301 * @tx_ring: Tx descriptor ring
2303 * Free all transmit software resources
2305 void e1000e_free_tx_resources(struct e1000_ring *tx_ring)
2307 struct e1000_adapter *adapter = tx_ring->adapter;
2308 struct pci_dev *pdev = adapter->pdev;
2310 e1000_clean_tx_ring(tx_ring);
2312 vfree(tx_ring->buffer_info);
2313 tx_ring->buffer_info = NULL;
2315 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
2317 tx_ring->desc = NULL;
2321 * e1000e_free_rx_resources - Free Rx Resources
2322 * @rx_ring: Rx descriptor ring
2324 * Free all receive software resources
2326 void e1000e_free_rx_resources(struct e1000_ring *rx_ring)
2328 struct e1000_adapter *adapter = rx_ring->adapter;
2329 struct pci_dev *pdev = adapter->pdev;
2332 e1000_clean_rx_ring(rx_ring);
2334 for (i = 0; i < rx_ring->count; i++)
2335 kfree(rx_ring->buffer_info[i].ps_pages);
2337 vfree(rx_ring->buffer_info);
2338 rx_ring->buffer_info = NULL;
2340 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2342 rx_ring->desc = NULL;
2346 * e1000_update_itr - update the dynamic ITR value based on statistics
2347 * @adapter: pointer to adapter
2348 * @itr_setting: current adapter->itr
2349 * @packets: the number of packets during this measurement interval
2350 * @bytes: the number of bytes during this measurement interval
2352 * Stores a new ITR value based on packets and byte
2353 * counts during the last interrupt. The advantage of per interrupt
2354 * computation is faster updates and more accurate ITR for the current
2355 * traffic pattern. Constants in this function were computed
2356 * based on theoretical maximum wire speed and thresholds were set based
2357 * on testing data as well as attempting to minimize response time
2358 * while increasing bulk throughput. This functionality is controlled
2359 * by the InterruptThrottleRate module parameter.
2361 static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
2362 u16 itr_setting, int packets,
2365 unsigned int retval = itr_setting;
2370 switch (itr_setting) {
2371 case lowest_latency:
2372 /* handle TSO and jumbo frames */
2373 if (bytes/packets > 8000)
2374 retval = bulk_latency;
2375 else if ((packets < 5) && (bytes > 512))
2376 retval = low_latency;
2378 case low_latency: /* 50 usec aka 20000 ints/s */
2379 if (bytes > 10000) {
2380 /* this if handles the TSO accounting */
2381 if (bytes/packets > 8000)
2382 retval = bulk_latency;
2383 else if ((packets < 10) || ((bytes/packets) > 1200))
2384 retval = bulk_latency;
2385 else if ((packets > 35))
2386 retval = lowest_latency;
2387 } else if (bytes/packets > 2000) {
2388 retval = bulk_latency;
2389 } else if (packets <= 2 && bytes < 512) {
2390 retval = lowest_latency;
2393 case bulk_latency: /* 250 usec aka 4000 ints/s */
2394 if (bytes > 25000) {
2396 retval = low_latency;
2397 } else if (bytes < 6000) {
2398 retval = low_latency;
2406 static void e1000_set_itr(struct e1000_adapter *adapter)
2408 struct e1000_hw *hw = &adapter->hw;
2410 u32 new_itr = adapter->itr;
2412 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2413 if (adapter->link_speed != SPEED_1000) {
2419 if (adapter->flags2 & FLAG2_DISABLE_AIM) {
2424 adapter->tx_itr = e1000_update_itr(adapter,
2426 adapter->total_tx_packets,
2427 adapter->total_tx_bytes);
2428 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2429 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2430 adapter->tx_itr = low_latency;
2432 adapter->rx_itr = e1000_update_itr(adapter,
2434 adapter->total_rx_packets,
2435 adapter->total_rx_bytes);
2436 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2437 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2438 adapter->rx_itr = low_latency;
2440 current_itr = max(adapter->rx_itr, adapter->tx_itr);
2442 switch (current_itr) {
2443 /* counts and packets in update_itr are dependent on these numbers */
2444 case lowest_latency:
2448 new_itr = 20000; /* aka hwitr = ~200 */
2458 if (new_itr != adapter->itr) {
2459 /* this attempts to bias the interrupt rate towards Bulk
2460 * by adding intermediate steps when interrupt rate is
2463 new_itr = new_itr > adapter->itr ?
2464 min(adapter->itr + (new_itr >> 2), new_itr) :
2466 adapter->itr = new_itr;
2467 adapter->rx_ring->itr_val = new_itr;
2468 if (adapter->msix_entries)
2469 adapter->rx_ring->set_itr = 1;
2472 ew32(ITR, 1000000000 / (new_itr * 256));
2479 * e1000e_write_itr - write the ITR value to the appropriate registers
2480 * @adapter: address of board private structure
2481 * @itr: new ITR value to program
2483 * e1000e_write_itr determines if the adapter is in MSI-X mode
2484 * and, if so, writes the EITR registers with the ITR value.
2485 * Otherwise, it writes the ITR value into the ITR register.
2487 void e1000e_write_itr(struct e1000_adapter *adapter, u32 itr)
2489 struct e1000_hw *hw = &adapter->hw;
2490 u32 new_itr = itr ? 1000000000 / (itr * 256) : 0;
2492 if (adapter->msix_entries) {
2495 for (vector = 0; vector < adapter->num_vectors; vector++)
2496 writel(new_itr, hw->hw_addr + E1000_EITR_82574(vector));
2503 * e1000_alloc_queues - Allocate memory for all rings
2504 * @adapter: board private structure to initialize
2506 static int e1000_alloc_queues(struct e1000_adapter *adapter)
2508 int size = sizeof(struct e1000_ring);
2510 adapter->tx_ring = kzalloc(size, GFP_KERNEL);
2511 if (!adapter->tx_ring)
2513 adapter->tx_ring->count = adapter->tx_ring_count;
2514 adapter->tx_ring->adapter = adapter;
2516 adapter->rx_ring = kzalloc(size, GFP_KERNEL);
2517 if (!adapter->rx_ring)
2519 adapter->rx_ring->count = adapter->rx_ring_count;
2520 adapter->rx_ring->adapter = adapter;
2524 e_err("Unable to allocate memory for queues\n");
2525 kfree(adapter->rx_ring);
2526 kfree(adapter->tx_ring);
2531 * e1000e_poll - NAPI Rx polling callback
2532 * @napi: struct associated with this polling callback
2533 * @weight: number of packets driver is allowed to process this poll
2535 static int e1000e_poll(struct napi_struct *napi, int weight)
2537 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter,
2539 struct e1000_hw *hw = &adapter->hw;
2540 struct net_device *poll_dev = adapter->netdev;
2541 int tx_cleaned = 1, work_done = 0;
2543 adapter = netdev_priv(poll_dev);
2545 if (!adapter->msix_entries ||
2546 (adapter->rx_ring->ims_val & adapter->tx_ring->ims_val))
2547 tx_cleaned = e1000_clean_tx_irq(adapter->tx_ring);
2549 adapter->clean_rx(adapter->rx_ring, &work_done, weight);
2554 /* If weight not fully consumed, exit the polling mode */
2555 if (work_done < weight) {
2556 if (adapter->itr_setting & 3)
2557 e1000_set_itr(adapter);
2558 napi_complete(napi);
2559 if (!test_bit(__E1000_DOWN, &adapter->state)) {
2560 if (adapter->msix_entries)
2561 ew32(IMS, adapter->rx_ring->ims_val);
2563 e1000_irq_enable(adapter);
2570 static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
2572 struct e1000_adapter *adapter = netdev_priv(netdev);
2573 struct e1000_hw *hw = &adapter->hw;
2576 /* don't update vlan cookie if already programmed */
2577 if ((adapter->hw.mng_cookie.status &
2578 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
2579 (vid == adapter->mng_vlan_id))
2582 /* add VID to filter table */
2583 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2584 index = (vid >> 5) & 0x7F;
2585 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
2586 vfta |= (1 << (vid & 0x1F));
2587 hw->mac.ops.write_vfta(hw, index, vfta);
2590 set_bit(vid, adapter->active_vlans);
2595 static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
2597 struct e1000_adapter *adapter = netdev_priv(netdev);
2598 struct e1000_hw *hw = &adapter->hw;
2601 if ((adapter->hw.mng_cookie.status &
2602 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
2603 (vid == adapter->mng_vlan_id)) {
2604 /* release control to f/w */
2605 e1000e_release_hw_control(adapter);
2609 /* remove VID from filter table */
2610 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2611 index = (vid >> 5) & 0x7F;
2612 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
2613 vfta &= ~(1 << (vid & 0x1F));
2614 hw->mac.ops.write_vfta(hw, index, vfta);
2617 clear_bit(vid, adapter->active_vlans);
2623 * e1000e_vlan_filter_disable - helper to disable hw VLAN filtering
2624 * @adapter: board private structure to initialize
2626 static void e1000e_vlan_filter_disable(struct e1000_adapter *adapter)
2628 struct net_device *netdev = adapter->netdev;
2629 struct e1000_hw *hw = &adapter->hw;
2632 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2633 /* disable VLAN receive filtering */
2635 rctl &= ~(E1000_RCTL_VFE | E1000_RCTL_CFIEN);
2638 if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) {
2639 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
2640 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
2646 * e1000e_vlan_filter_enable - helper to enable HW VLAN filtering
2647 * @adapter: board private structure to initialize
2649 static void e1000e_vlan_filter_enable(struct e1000_adapter *adapter)
2651 struct e1000_hw *hw = &adapter->hw;
2654 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2655 /* enable VLAN receive filtering */
2657 rctl |= E1000_RCTL_VFE;
2658 rctl &= ~E1000_RCTL_CFIEN;
2664 * e1000e_vlan_strip_enable - helper to disable HW VLAN stripping
2665 * @adapter: board private structure to initialize
2667 static void e1000e_vlan_strip_disable(struct e1000_adapter *adapter)
2669 struct e1000_hw *hw = &adapter->hw;
2672 /* disable VLAN tag insert/strip */
2674 ctrl &= ~E1000_CTRL_VME;
2679 * e1000e_vlan_strip_enable - helper to enable HW VLAN stripping
2680 * @adapter: board private structure to initialize
2682 static void e1000e_vlan_strip_enable(struct e1000_adapter *adapter)
2684 struct e1000_hw *hw = &adapter->hw;
2687 /* enable VLAN tag insert/strip */
2689 ctrl |= E1000_CTRL_VME;
2693 static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
2695 struct net_device *netdev = adapter->netdev;
2696 u16 vid = adapter->hw.mng_cookie.vlan_id;
2697 u16 old_vid = adapter->mng_vlan_id;
2699 if (adapter->hw.mng_cookie.status &
2700 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
2701 e1000_vlan_rx_add_vid(netdev, vid);
2702 adapter->mng_vlan_id = vid;
2705 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && (vid != old_vid))
2706 e1000_vlan_rx_kill_vid(netdev, old_vid);
2709 static void e1000_restore_vlan(struct e1000_adapter *adapter)
2713 e1000_vlan_rx_add_vid(adapter->netdev, 0);
2715 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
2716 e1000_vlan_rx_add_vid(adapter->netdev, vid);
2719 static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
2721 struct e1000_hw *hw = &adapter->hw;
2722 u32 manc, manc2h, mdef, i, j;
2724 if (!(adapter->flags & FLAG_MNG_PT_ENABLED))
2729 /* enable receiving management packets to the host. this will probably
2730 * generate destination unreachable messages from the host OS, but
2731 * the packets will be handled on SMBUS
2733 manc |= E1000_MANC_EN_MNG2HOST;
2734 manc2h = er32(MANC2H);
2736 switch (hw->mac.type) {
2738 manc2h |= (E1000_MANC2H_PORT_623 | E1000_MANC2H_PORT_664);
2742 /* Check if IPMI pass-through decision filter already exists;
2745 for (i = 0, j = 0; i < 8; i++) {
2746 mdef = er32(MDEF(i));
2748 /* Ignore filters with anything other than IPMI ports */
2749 if (mdef & ~(E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
2752 /* Enable this decision filter in MANC2H */
2759 if (j == (E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
2762 /* Create new decision filter in an empty filter */
2763 for (i = 0, j = 0; i < 8; i++)
2764 if (er32(MDEF(i)) == 0) {
2765 ew32(MDEF(i), (E1000_MDEF_PORT_623 |
2766 E1000_MDEF_PORT_664));
2773 e_warn("Unable to create IPMI pass-through filter\n");
2777 ew32(MANC2H, manc2h);
2782 * e1000_configure_tx - Configure Transmit Unit after Reset
2783 * @adapter: board private structure
2785 * Configure the Tx unit of the MAC after a reset.
2787 static void e1000_configure_tx(struct e1000_adapter *adapter)
2789 struct e1000_hw *hw = &adapter->hw;
2790 struct e1000_ring *tx_ring = adapter->tx_ring;
2794 /* Setup the HW Tx Head and Tail descriptor pointers */
2795 tdba = tx_ring->dma;
2796 tdlen = tx_ring->count * sizeof(struct e1000_tx_desc);
2797 ew32(TDBAL(0), (tdba & DMA_BIT_MASK(32)));
2798 ew32(TDBAH(0), (tdba >> 32));
2799 ew32(TDLEN(0), tdlen);
2802 tx_ring->head = adapter->hw.hw_addr + E1000_TDH(0);
2803 tx_ring->tail = adapter->hw.hw_addr + E1000_TDT(0);
2805 /* Set the Tx Interrupt Delay register */
2806 ew32(TIDV, adapter->tx_int_delay);
2807 /* Tx irq moderation */
2808 ew32(TADV, adapter->tx_abs_int_delay);
2810 if (adapter->flags2 & FLAG2_DMA_BURST) {
2811 u32 txdctl = er32(TXDCTL(0));
2812 txdctl &= ~(E1000_TXDCTL_PTHRESH | E1000_TXDCTL_HTHRESH |
2813 E1000_TXDCTL_WTHRESH);
2814 /* set up some performance related parameters to encourage the
2815 * hardware to use the bus more efficiently in bursts, depends
2816 * on the tx_int_delay to be enabled,
2817 * wthresh = 1 ==> burst write is disabled to avoid Tx stalls
2818 * hthresh = 1 ==> prefetch when one or more available
2819 * pthresh = 0x1f ==> prefetch if internal cache 31 or less
2820 * BEWARE: this seems to work but should be considered first if
2821 * there are Tx hangs or other Tx related bugs
2823 txdctl |= E1000_TXDCTL_DMA_BURST_ENABLE;
2824 ew32(TXDCTL(0), txdctl);
2826 /* erratum work around: set txdctl the same for both queues */
2827 ew32(TXDCTL(1), er32(TXDCTL(0)));
2829 if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) {
2830 tarc = er32(TARC(0));
2831 /* set the speed mode bit, we'll clear it if we're not at
2832 * gigabit link later
2834 #define SPEED_MODE_BIT (1 << 21)
2835 tarc |= SPEED_MODE_BIT;
2836 ew32(TARC(0), tarc);
2839 /* errata: program both queues to unweighted RR */
2840 if (adapter->flags & FLAG_TARC_SET_BIT_ZERO) {
2841 tarc = er32(TARC(0));
2843 ew32(TARC(0), tarc);
2844 tarc = er32(TARC(1));
2846 ew32(TARC(1), tarc);
2849 /* Setup Transmit Descriptor Settings for eop descriptor */
2850 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
2852 /* only set IDE if we are delaying interrupts using the timers */
2853 if (adapter->tx_int_delay)
2854 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
2856 /* enable Report Status bit */
2857 adapter->txd_cmd |= E1000_TXD_CMD_RS;
2859 hw->mac.ops.config_collision_dist(hw);
2863 * e1000_setup_rctl - configure the receive control registers
2864 * @adapter: Board private structure
2866 #define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
2867 (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
2868 static void e1000_setup_rctl(struct e1000_adapter *adapter)
2870 struct e1000_hw *hw = &adapter->hw;
2874 /* Workaround Si errata on PCHx - configure jumbo frame flow */
2875 if (hw->mac.type >= e1000_pch2lan) {
2878 if (adapter->netdev->mtu > ETH_DATA_LEN)
2879 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
2881 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
2884 e_dbg("failed to enable jumbo frame workaround mode\n");
2887 /* Program MC offset vector base */
2889 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
2890 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
2891 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
2892 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
2894 /* Do not Store bad packets */
2895 rctl &= ~E1000_RCTL_SBP;
2897 /* Enable Long Packet receive */
2898 if (adapter->netdev->mtu <= ETH_DATA_LEN)
2899 rctl &= ~E1000_RCTL_LPE;
2901 rctl |= E1000_RCTL_LPE;
2903 /* Some systems expect that the CRC is included in SMBUS traffic. The
2904 * hardware strips the CRC before sending to both SMBUS (BMC) and to
2905 * host memory when this is enabled
2907 if (adapter->flags2 & FLAG2_CRC_STRIPPING)
2908 rctl |= E1000_RCTL_SECRC;
2910 /* Workaround Si errata on 82577 PHY - configure IPG for jumbos */
2911 if ((hw->phy.type == e1000_phy_82577) && (rctl & E1000_RCTL_LPE)) {
2914 e1e_rphy(hw, PHY_REG(770, 26), &phy_data);
2916 phy_data |= (1 << 2);
2917 e1e_wphy(hw, PHY_REG(770, 26), phy_data);
2919 e1e_rphy(hw, 22, &phy_data);
2921 phy_data |= (1 << 14);
2922 e1e_wphy(hw, 0x10, 0x2823);
2923 e1e_wphy(hw, 0x11, 0x0003);
2924 e1e_wphy(hw, 22, phy_data);
2927 /* Setup buffer sizes */
2928 rctl &= ~E1000_RCTL_SZ_4096;
2929 rctl |= E1000_RCTL_BSEX;
2930 switch (adapter->rx_buffer_len) {
2933 rctl |= E1000_RCTL_SZ_2048;
2934 rctl &= ~E1000_RCTL_BSEX;
2937 rctl |= E1000_RCTL_SZ_4096;
2940 rctl |= E1000_RCTL_SZ_8192;
2943 rctl |= E1000_RCTL_SZ_16384;
2947 /* Enable Extended Status in all Receive Descriptors */
2948 rfctl = er32(RFCTL);
2949 rfctl |= E1000_RFCTL_EXTEN;
2952 /* 82571 and greater support packet-split where the protocol
2953 * header is placed in skb->data and the packet data is
2954 * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
2955 * In the case of a non-split, skb->data is linearly filled,
2956 * followed by the page buffers. Therefore, skb->data is
2957 * sized to hold the largest protocol header.
2959 * allocations using alloc_page take too long for regular MTU
2960 * so only enable packet split for jumbo frames
2962 * Using pages when the page size is greater than 16k wastes
2963 * a lot of memory, since we allocate 3 pages at all times
2966 pages = PAGE_USE_COUNT(adapter->netdev->mtu);
2967 if ((pages <= 3) && (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE))
2968 adapter->rx_ps_pages = pages;
2970 adapter->rx_ps_pages = 0;
2972 if (adapter->rx_ps_pages) {
2975 /* Enable Packet split descriptors */
2976 rctl |= E1000_RCTL_DTYP_PS;
2978 psrctl |= adapter->rx_ps_bsize0 >>
2979 E1000_PSRCTL_BSIZE0_SHIFT;
2981 switch (adapter->rx_ps_pages) {
2983 psrctl |= PAGE_SIZE <<
2984 E1000_PSRCTL_BSIZE3_SHIFT;
2986 psrctl |= PAGE_SIZE <<
2987 E1000_PSRCTL_BSIZE2_SHIFT;
2989 psrctl |= PAGE_SIZE >>
2990 E1000_PSRCTL_BSIZE1_SHIFT;
2994 ew32(PSRCTL, psrctl);
2997 /* This is useful for sniffing bad packets. */
2998 if (adapter->netdev->features & NETIF_F_RXALL) {
2999 /* UPE and MPE will be handled by normal PROMISC logic
3000 * in e1000e_set_rx_mode
3002 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
3003 E1000_RCTL_BAM | /* RX All Bcast Pkts */
3004 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
3006 rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
3007 E1000_RCTL_DPF | /* Allow filtered pause */
3008 E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
3009 /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
3010 * and that breaks VLANs.
3015 /* just started the receive unit, no need to restart */
3016 adapter->flags &= ~FLAG_RX_RESTART_NOW;
3020 * e1000_configure_rx - Configure Receive Unit after Reset
3021 * @adapter: board private structure
3023 * Configure the Rx unit of the MAC after a reset.
3025 static void e1000_configure_rx(struct e1000_adapter *adapter)
3027 struct e1000_hw *hw = &adapter->hw;
3028 struct e1000_ring *rx_ring = adapter->rx_ring;
3030 u32 rdlen, rctl, rxcsum, ctrl_ext;
3032 if (adapter->rx_ps_pages) {
3033 /* this is a 32 byte descriptor */
3034 rdlen = rx_ring->count *
3035 sizeof(union e1000_rx_desc_packet_split);
3036 adapter->clean_rx = e1000_clean_rx_irq_ps;
3037 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
3038 } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) {
3039 rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended);
3040 adapter->clean_rx = e1000_clean_jumbo_rx_irq;
3041 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
3043 rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended);
3044 adapter->clean_rx = e1000_clean_rx_irq;
3045 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
3048 /* disable receives while setting up the descriptors */
3050 if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
3051 ew32(RCTL, rctl & ~E1000_RCTL_EN);
3053 usleep_range(10000, 20000);
3055 if (adapter->flags2 & FLAG2_DMA_BURST) {
3056 /* set the writeback threshold (only takes effect if the RDTR
3057 * is set). set GRAN=1 and write back up to 0x4 worth, and
3058 * enable prefetching of 0x20 Rx descriptors
3064 ew32(RXDCTL(0), E1000_RXDCTL_DMA_BURST_ENABLE);
3065 ew32(RXDCTL(1), E1000_RXDCTL_DMA_BURST_ENABLE);
3067 /* override the delay timers for enabling bursting, only if
3068 * the value was not set by the user via module options
3070 if (adapter->rx_int_delay == DEFAULT_RDTR)
3071 adapter->rx_int_delay = BURST_RDTR;
3072 if (adapter->rx_abs_int_delay == DEFAULT_RADV)
3073 adapter->rx_abs_int_delay = BURST_RADV;
3076 /* set the Receive Delay Timer Register */
3077 ew32(RDTR, adapter->rx_int_delay);
3079 /* irq moderation */
3080 ew32(RADV, adapter->rx_abs_int_delay);
3081 if ((adapter->itr_setting != 0) && (adapter->itr != 0))
3082 e1000e_write_itr(adapter, adapter->itr);
3084 ctrl_ext = er32(CTRL_EXT);
3085 /* Auto-Mask interrupts upon ICR access */
3086 ctrl_ext |= E1000_CTRL_EXT_IAME;
3087 ew32(IAM, 0xffffffff);
3088 ew32(CTRL_EXT, ctrl_ext);
3091 /* Setup the HW Rx Head and Tail Descriptor Pointers and
3092 * the Base and Length of the Rx Descriptor Ring
3094 rdba = rx_ring->dma;
3095 ew32(RDBAL(0), (rdba & DMA_BIT_MASK(32)));
3096 ew32(RDBAH(0), (rdba >> 32));
3097 ew32(RDLEN(0), rdlen);
3100 rx_ring->head = adapter->hw.hw_addr + E1000_RDH(0);
3101 rx_ring->tail = adapter->hw.hw_addr + E1000_RDT(0);
3103 /* Enable Receive Checksum Offload for TCP and UDP */
3104 rxcsum = er32(RXCSUM);
3105 if (adapter->netdev->features & NETIF_F_RXCSUM)
3106 rxcsum |= E1000_RXCSUM_TUOFL;
3108 rxcsum &= ~E1000_RXCSUM_TUOFL;
3109 ew32(RXCSUM, rxcsum);
3111 if (adapter->hw.mac.type == e1000_pch2lan) {
3112 /* With jumbo frames, excessive C-state transition
3113 * latencies result in dropped transactions.
3115 if (adapter->netdev->mtu > ETH_DATA_LEN) {
3116 u32 rxdctl = er32(RXDCTL(0));
3117 ew32(RXDCTL(0), rxdctl | 0x3);
3118 pm_qos_update_request(&adapter->netdev->pm_qos_req, 55);
3120 pm_qos_update_request(&adapter->netdev->pm_qos_req,
3121 PM_QOS_DEFAULT_VALUE);
3125 /* Enable Receives */
3130 * e1000e_write_mc_addr_list - write multicast addresses to MTA
3131 * @netdev: network interface device structure
3133 * Writes multicast address list to the MTA hash table.
3134 * Returns: -ENOMEM on failure
3135 * 0 on no addresses written
3136 * X on writing X addresses to MTA
3138 static int e1000e_write_mc_addr_list(struct net_device *netdev)
3140 struct e1000_adapter *adapter = netdev_priv(netdev);
3141 struct e1000_hw *hw = &adapter->hw;
3142 struct netdev_hw_addr *ha;
3146 if (netdev_mc_empty(netdev)) {
3147 /* nothing to program, so clear mc list */
3148 hw->mac.ops.update_mc_addr_list(hw, NULL, 0);
3152 mta_list = kzalloc(netdev_mc_count(netdev) * ETH_ALEN, GFP_ATOMIC);
3156 /* update_mc_addr_list expects a packed array of only addresses. */
3158 netdev_for_each_mc_addr(ha, netdev)
3159 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
3161 hw->mac.ops.update_mc_addr_list(hw, mta_list, i);
3164 return netdev_mc_count(netdev);
3168 * e1000e_write_uc_addr_list - write unicast addresses to RAR table
3169 * @netdev: network interface device structure
3171 * Writes unicast address list to the RAR table.
3172 * Returns: -ENOMEM on failure/insufficient address space
3173 * 0 on no addresses written
3174 * X on writing X addresses to the RAR table
3176 static int e1000e_write_uc_addr_list(struct net_device *netdev)
3178 struct e1000_adapter *adapter = netdev_priv(netdev);
3179 struct e1000_hw *hw = &adapter->hw;
3180 unsigned int rar_entries = hw->mac.rar_entry_count;
3183 /* save a rar entry for our hardware address */
3186 /* save a rar entry for the LAA workaround */
3187 if (adapter->flags & FLAG_RESET_OVERWRITES_LAA)
3190 /* return ENOMEM indicating insufficient memory for addresses */
3191 if (netdev_uc_count(netdev) > rar_entries)
3194 if (!netdev_uc_empty(netdev) && rar_entries) {
3195 struct netdev_hw_addr *ha;
3197 /* write the addresses in reverse order to avoid write
3200 netdev_for_each_uc_addr(ha, netdev) {
3203 hw->mac.ops.rar_set(hw, ha->addr, rar_entries--);
3208 /* zero out the remaining RAR entries not used above */
3209 for (; rar_entries > 0; rar_entries--) {
3210 ew32(RAH(rar_entries), 0);
3211 ew32(RAL(rar_entries), 0);
3219 * e1000e_set_rx_mode - secondary unicast, Multicast and Promiscuous mode set
3220 * @netdev: network interface device structure
3222 * The ndo_set_rx_mode entry point is called whenever the unicast or multicast
3223 * address list or the network interface flags are updated. This routine is
3224 * responsible for configuring the hardware for proper unicast, multicast,
3225 * promiscuous mode, and all-multi behavior.
3227 static void e1000e_set_rx_mode(struct net_device *netdev)
3229 struct e1000_adapter *adapter = netdev_priv(netdev);
3230 struct e1000_hw *hw = &adapter->hw;
3233 /* Check for Promiscuous and All Multicast modes */
3236 /* clear the affected bits */
3237 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
3239 if (netdev->flags & IFF_PROMISC) {
3240 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
3241 /* Do not hardware filter VLANs in promisc mode */
3242 e1000e_vlan_filter_disable(adapter);
3246 if (netdev->flags & IFF_ALLMULTI) {
3247 rctl |= E1000_RCTL_MPE;
3249 /* Write addresses to the MTA, if the attempt fails
3250 * then we should just turn on promiscuous mode so
3251 * that we can at least receive multicast traffic
3253 count = e1000e_write_mc_addr_list(netdev);
3255 rctl |= E1000_RCTL_MPE;
3257 e1000e_vlan_filter_enable(adapter);
3258 /* Write addresses to available RAR registers, if there is not
3259 * sufficient space to store all the addresses then enable
3260 * unicast promiscuous mode
3262 count = e1000e_write_uc_addr_list(netdev);
3264 rctl |= E1000_RCTL_UPE;
3269 if (netdev->features & NETIF_F_HW_VLAN_RX)
3270 e1000e_vlan_strip_enable(adapter);
3272 e1000e_vlan_strip_disable(adapter);
3275 static void e1000e_setup_rss_hash(struct e1000_adapter *adapter)
3277 struct e1000_hw *hw = &adapter->hw;
3280 static const u32 rsskey[10] = {
3281 0xda565a6d, 0xc20e5b25, 0x3d256741, 0xb08fa343, 0xcb2bcad0,
3282 0xb4307bae, 0xa32dcb77, 0x0cf23080, 0x3bb7426a, 0xfa01acbe
3285 /* Fill out hash function seed */
3286 for (i = 0; i < 10; i++)
3287 ew32(RSSRK(i), rsskey[i]);
3289 /* Direct all traffic to queue 0 */
3290 for (i = 0; i < 32; i++)
3293 /* Disable raw packet checksumming so that RSS hash is placed in
3294 * descriptor on writeback.
3296 rxcsum = er32(RXCSUM);
3297 rxcsum |= E1000_RXCSUM_PCSD;
3299 ew32(RXCSUM, rxcsum);
3301 mrqc = (E1000_MRQC_RSS_FIELD_IPV4 |
3302 E1000_MRQC_RSS_FIELD_IPV4_TCP |
3303 E1000_MRQC_RSS_FIELD_IPV6 |
3304 E1000_MRQC_RSS_FIELD_IPV6_TCP |
3305 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
3311 * e1000_configure - configure the hardware for Rx and Tx
3312 * @adapter: private board structure
3314 static void e1000_configure(struct e1000_adapter *adapter)
3316 struct e1000_ring *rx_ring = adapter->rx_ring;
3318 e1000e_set_rx_mode(adapter->netdev);
3320 e1000_restore_vlan(adapter);
3321 e1000_init_manageability_pt(adapter);
3323 e1000_configure_tx(adapter);
3325 if (adapter->netdev->features & NETIF_F_RXHASH)
3326 e1000e_setup_rss_hash(adapter);
3327 e1000_setup_rctl(adapter);
3328 e1000_configure_rx(adapter);
3329 adapter->alloc_rx_buf(rx_ring, e1000_desc_unused(rx_ring), GFP_KERNEL);
3333 * e1000e_power_up_phy - restore link in case the phy was powered down
3334 * @adapter: address of board private structure
3336 * The phy may be powered down to save power and turn off link when the
3337 * driver is unloaded and wake on lan is not enabled (among others)
3338 * *** this routine MUST be followed by a call to e1000e_reset ***
3340 void e1000e_power_up_phy(struct e1000_adapter *adapter)
3342 if (adapter->hw.phy.ops.power_up)
3343 adapter->hw.phy.ops.power_up(&adapter->hw);
3345 adapter->hw.mac.ops.setup_link(&adapter->hw);
3349 * e1000_power_down_phy - Power down the PHY
3351 * Power down the PHY so no link is implied when interface is down.
3352 * The PHY cannot be powered down if management or WoL is active.
3354 static void e1000_power_down_phy(struct e1000_adapter *adapter)
3356 /* WoL is enabled */
3360 if (adapter->hw.phy.ops.power_down)
3361 adapter->hw.phy.ops.power_down(&adapter->hw);
3365 * e1000e_reset - bring the hardware into a known good state
3367 * This function boots the hardware and enables some settings that
3368 * require a configuration cycle of the hardware - those cannot be
3369 * set/changed during runtime. After reset the device needs to be
3370 * properly configured for Rx, Tx etc.
3372 void e1000e_reset(struct e1000_adapter *adapter)
3374 struct e1000_mac_info *mac = &adapter->hw.mac;
3375 struct e1000_fc_info *fc = &adapter->hw.fc;
3376 struct e1000_hw *hw = &adapter->hw;
3377 u32 tx_space, min_tx_space, min_rx_space;
3378 u32 pba = adapter->pba;
3381 /* reset Packet Buffer Allocation to default */
3384 if (adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) {
3385 /* To maintain wire speed transmits, the Tx FIFO should be
3386 * large enough to accommodate two full transmit packets,
3387 * rounded up to the next 1KB and expressed in KB. Likewise,
3388 * the Rx FIFO should be large enough to accommodate at least
3389 * one full receive packet and is similarly rounded up and
3393 /* upper 16 bits has Tx packet buffer allocation size in KB */
3394 tx_space = pba >> 16;
3395 /* lower 16 bits has Rx packet buffer allocation size in KB */
3397 /* the Tx fifo also stores 16 bytes of information about the Tx
3398 * but don't include ethernet FCS because hardware appends it
3400 min_tx_space = (adapter->max_frame_size +
3401 sizeof(struct e1000_tx_desc) -
3403 min_tx_space = ALIGN(min_tx_space, 1024);
3404 min_tx_space >>= 10;
3405 /* software strips receive CRC, so leave room for it */
3406 min_rx_space = adapter->max_frame_size;
3407 min_rx_space = ALIGN(min_rx_space, 1024);
3408 min_rx_space >>= 10;
3410 /* If current Tx allocation is less than the min Tx FIFO size,
3411 * and the min Tx FIFO size is less than the current Rx FIFO
3412 * allocation, take space away from current Rx allocation
3414 if ((tx_space < min_tx_space) &&
3415 ((min_tx_space - tx_space) < pba)) {
3416 pba -= min_tx_space - tx_space;
3418 /* if short on Rx space, Rx wins and must trump Tx
3421 if (pba < min_rx_space)
3428 /* flow control settings
3430 * The high water mark must be low enough to fit one full frame
3431 * (or the size used for early receive) above it in the Rx FIFO.
3432 * Set it to the lower of:
3433 * - 90% of the Rx FIFO size, and
3434 * - the full Rx FIFO size minus one full frame
3436 if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME)
3437 fc->pause_time = 0xFFFF;
3439 fc->pause_time = E1000_FC_PAUSE_TIME;
3440 fc->send_xon = true;
3441 fc->current_mode = fc->requested_mode;
3443 switch (hw->mac.type) {
3445 case e1000_ich10lan:
3446 if (adapter->netdev->mtu > ETH_DATA_LEN) {
3449 fc->high_water = 0x2800;
3450 fc->low_water = fc->high_water - 8;
3455 hwm = min(((pba << 10) * 9 / 10),
3456 ((pba << 10) - adapter->max_frame_size));
3458 fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */
3459 fc->low_water = fc->high_water - 8;
3462 /* Workaround PCH LOM adapter hangs with certain network
3463 * loads. If hangs persist, try disabling Tx flow control.
3465 if (adapter->netdev->mtu > ETH_DATA_LEN) {
3466 fc->high_water = 0x3500;
3467 fc->low_water = 0x1500;
3469 fc->high_water = 0x5000;
3470 fc->low_water = 0x3000;
3472 fc->refresh_time = 0x1000;
3476 fc->high_water = 0x05C20;
3477 fc->low_water = 0x05048;
3478 fc->pause_time = 0x0650;
3479 fc->refresh_time = 0x0400;
3480 if (adapter->netdev->mtu > ETH_DATA_LEN) {
3487 /* Alignment of Tx data is on an arbitrary byte boundary with the
3488 * maximum size per Tx descriptor limited only to the transmit
3489 * allocation of the packet buffer minus 96 bytes with an upper
3490 * limit of 24KB due to receive synchronization limitations.
3492 adapter->tx_fifo_limit = min_t(u32, ((er32(PBA) >> 16) << 10) - 96,
3495 /* Disable Adaptive Interrupt Moderation if 2 full packets cannot
3496 * fit in receive buffer.
3498 if (adapter->itr_setting & 0x3) {
3499 if ((adapter->max_frame_size * 2) > (pba << 10)) {
3500 if (!(adapter->flags2 & FLAG2_DISABLE_AIM)) {
3501 dev_info(&adapter->pdev->dev,
3502 "Interrupt Throttle Rate turned off\n");
3503 adapter->flags2 |= FLAG2_DISABLE_AIM;
3504 e1000e_write_itr(adapter, 0);
3506 } else if (adapter->flags2 & FLAG2_DISABLE_AIM) {
3507 dev_info(&adapter->pdev->dev,
3508 "Interrupt Throttle Rate turned on\n");
3509 adapter->flags2 &= ~FLAG2_DISABLE_AIM;
3510 adapter->itr = 20000;
3511 e1000e_write_itr(adapter, adapter->itr);
3515 /* Allow time for pending master requests to run */
3516 mac->ops.reset_hw(hw);
3518 /* For parts with AMT enabled, let the firmware know
3519 * that the network interface is in control
3521 if (adapter->flags & FLAG_HAS_AMT)
3522 e1000e_get_hw_control(adapter);
3526 if (mac->ops.init_hw(hw))
3527 e_err("Hardware Error\n");
3529 e1000_update_mng_vlan(adapter);
3531 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
3532 ew32(VET, ETH_P_8021Q);
3534 e1000e_reset_adaptive(hw);
3536 if (!netif_running(adapter->netdev) &&
3537 !test_bit(__E1000_TESTING, &adapter->state)) {
3538 e1000_power_down_phy(adapter);
3542 e1000_get_phy_info(hw);
3544 if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) &&
3545 !(adapter->flags & FLAG_SMART_POWER_DOWN)) {
3547 /* speed up time to link by disabling smart power down, ignore
3548 * the return value of this function because there is nothing
3549 * different we would do if it failed
3551 e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
3552 phy_data &= ~IGP02E1000_PM_SPD;
3553 e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
3557 int e1000e_up(struct e1000_adapter *adapter)
3559 struct e1000_hw *hw = &adapter->hw;
3561 /* hardware has been reset, we need to reload some things */
3562 e1000_configure(adapter);
3564 clear_bit(__E1000_DOWN, &adapter->state);
3566 if (adapter->msix_entries)
3567 e1000_configure_msix(adapter);
3568 e1000_irq_enable(adapter);
3570 netif_start_queue(adapter->netdev);
3572 /* fire a link change interrupt to start the watchdog */
3573 if (adapter->msix_entries)
3574 ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER);
3576 ew32(ICS, E1000_ICS_LSC);
3581 static void e1000e_flush_descriptors(struct e1000_adapter *adapter)
3583 struct e1000_hw *hw = &adapter->hw;
3585 if (!(adapter->flags2 & FLAG2_DMA_BURST))
3588 /* flush pending descriptor writebacks to memory */
3589 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
3590 ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
3592 /* execute the writes immediately */
3595 /* due to rare timing issues, write to TIDV/RDTR again to ensure the
3596 * write is successful
3598 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
3599 ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
3601 /* execute the writes immediately */
3605 static void e1000e_update_stats(struct e1000_adapter *adapter);
3607 void e1000e_down(struct e1000_adapter *adapter)
3609 struct net_device *netdev = adapter->netdev;
3610 struct e1000_hw *hw = &adapter->hw;
3613 /* signal that we're down so the interrupt handler does not
3614 * reschedule our watchdog timer
3616 set_bit(__E1000_DOWN, &adapter->state);
3618 /* disable receives in the hardware */
3620 if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
3621 ew32(RCTL, rctl & ~E1000_RCTL_EN);
3622 /* flush and sleep below */
3624 netif_stop_queue(netdev);
3626 /* disable transmits in the hardware */
3628 tctl &= ~E1000_TCTL_EN;
3631 /* flush both disables and wait for them to finish */
3633 usleep_range(10000, 20000);
3635 e1000_irq_disable(adapter);
3637 del_timer_sync(&adapter->watchdog_timer);
3638 del_timer_sync(&adapter->phy_info_timer);
3640 netif_carrier_off(netdev);
3642 spin_lock(&adapter->stats64_lock);
3643 e1000e_update_stats(adapter);
3644 spin_unlock(&adapter->stats64_lock);
3646 e1000e_flush_descriptors(adapter);
3647 e1000_clean_tx_ring(adapter->tx_ring);
3648 e1000_clean_rx_ring(adapter->rx_ring);
3650 adapter->link_speed = 0;
3651 adapter->link_duplex = 0;
3653 if (!pci_channel_offline(adapter->pdev))
3654 e1000e_reset(adapter);
3656 /* TODO: for power management, we could drop the link and
3657 * pci_disable_device here.
3661 void e1000e_reinit_locked(struct e1000_adapter *adapter)
3664 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
3665 usleep_range(1000, 2000);
3666 e1000e_down(adapter);
3668 clear_bit(__E1000_RESETTING, &adapter->state);
3672 * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
3673 * @adapter: board private structure to initialize
3675 * e1000_sw_init initializes the Adapter private data structure.
3676 * Fields are initialized based on PCI device information and
3677 * OS network device settings (MTU size).
3679 static int e1000_sw_init(struct e1000_adapter *adapter)
3681 struct net_device *netdev = adapter->netdev;
3683 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN;
3684 adapter->rx_ps_bsize0 = 128;
3685 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
3686 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
3687 adapter->tx_ring_count = E1000_DEFAULT_TXD;
3688 adapter->rx_ring_count = E1000_DEFAULT_RXD;
3690 spin_lock_init(&adapter->stats64_lock);
3692 e1000e_set_interrupt_capability(adapter);
3694 if (e1000_alloc_queues(adapter))
3697 /* Explicitly disable IRQ since the NIC can be in any state. */
3698 e1000_irq_disable(adapter);
3700 set_bit(__E1000_DOWN, &adapter->state);
3705 * e1000_intr_msi_test - Interrupt Handler
3706 * @irq: interrupt number
3707 * @data: pointer to a network interface device structure
3709 static irqreturn_t e1000_intr_msi_test(int irq, void *data)
3711 struct net_device *netdev = data;
3712 struct e1000_adapter *adapter = netdev_priv(netdev);
3713 struct e1000_hw *hw = &adapter->hw;
3714 u32 icr = er32(ICR);
3716 e_dbg("icr is %08X\n", icr);
3717 if (icr & E1000_ICR_RXSEQ) {
3718 adapter->flags &= ~FLAG_MSI_TEST_FAILED;
3719 /* Force memory writes to complete before acknowledging the
3720 * interrupt is handled.
3729 * e1000_test_msi_interrupt - Returns 0 for successful test
3730 * @adapter: board private struct
3732 * code flow taken from tg3.c
3734 static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
3736 struct net_device *netdev = adapter->netdev;
3737 struct e1000_hw *hw = &adapter->hw;
3740 /* poll_enable hasn't been called yet, so don't need disable */
3741 /* clear any pending events */
3744 /* free the real vector and request a test handler */
3745 e1000_free_irq(adapter);
3746 e1000e_reset_interrupt_capability(adapter);
3748 /* Assume that the test fails, if it succeeds then the test
3749 * MSI irq handler will unset this flag
3751 adapter->flags |= FLAG_MSI_TEST_FAILED;
3753 err = pci_enable_msi(adapter->pdev);
3755 goto msi_test_failed;
3757 err = request_irq(adapter->pdev->irq, e1000_intr_msi_test, 0,
3758 netdev->name, netdev);
3760 pci_disable_msi(adapter->pdev);
3761 goto msi_test_failed;
3764 /* Force memory writes to complete before enabling and firing an
3769 e1000_irq_enable(adapter);
3771 /* fire an unusual interrupt on the test handler */
3772 ew32(ICS, E1000_ICS_RXSEQ);
3776 e1000_irq_disable(adapter);
3778 rmb(); /* read flags after interrupt has been fired */
3780 if (adapter->flags & FLAG_MSI_TEST_FAILED) {
3781 adapter->int_mode = E1000E_INT_MODE_LEGACY;
3782 e_info("MSI interrupt test failed, using legacy interrupt.\n");
3784 e_dbg("MSI interrupt test succeeded!\n");
3787 free_irq(adapter->pdev->irq, netdev);
3788 pci_disable_msi(adapter->pdev);
3791 e1000e_set_interrupt_capability(adapter);
3792 return e1000_request_irq(adapter);
3796 * e1000_test_msi - Returns 0 if MSI test succeeds or INTx mode is restored
3797 * @adapter: board private struct
3799 * code flow taken from tg3.c, called with e1000 interrupts disabled.
3801 static int e1000_test_msi(struct e1000_adapter *adapter)
3806 if (!(adapter->flags & FLAG_MSI_ENABLED))
3809 /* disable SERR in case the MSI write causes a master abort */
3810 pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
3811 if (pci_cmd & PCI_COMMAND_SERR)
3812 pci_write_config_word(adapter->pdev, PCI_COMMAND,
3813 pci_cmd & ~PCI_COMMAND_SERR);
3815 err = e1000_test_msi_interrupt(adapter);
3817 /* re-enable SERR */
3818 if (pci_cmd & PCI_COMMAND_SERR) {
3819 pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
3820 pci_cmd |= PCI_COMMAND_SERR;
3821 pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd);
3828 * e1000_open - Called when a network interface is made active
3829 * @netdev: network interface device structure
3831 * Returns 0 on success, negative value on failure
3833 * The open entry point is called when a network interface is made
3834 * active by the system (IFF_UP). At this point all resources needed
3835 * for transmit and receive operations are allocated, the interrupt
3836 * handler is registered with the OS, the watchdog timer is started,
3837 * and the stack is notified that the interface is ready.
3839 static int e1000_open(struct net_device *netdev)
3841 struct e1000_adapter *adapter = netdev_priv(netdev);
3842 struct e1000_hw *hw = &adapter->hw;
3843 struct pci_dev *pdev = adapter->pdev;
3846 /* disallow open during test */
3847 if (test_bit(__E1000_TESTING, &adapter->state))
3850 pm_runtime_get_sync(&pdev->dev);
3852 netif_carrier_off(netdev);
3854 /* allocate transmit descriptors */
3855 err = e1000e_setup_tx_resources(adapter->tx_ring);
3859 /* allocate receive descriptors */
3860 err = e1000e_setup_rx_resources(adapter->rx_ring);
3864 /* If AMT is enabled, let the firmware know that the network
3865 * interface is now open and reset the part to a known state.
3867 if (adapter->flags & FLAG_HAS_AMT) {
3868 e1000e_get_hw_control(adapter);
3869 e1000e_reset(adapter);
3872 e1000e_power_up_phy(adapter);
3874 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
3875 if ((adapter->hw.mng_cookie.status &
3876 E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
3877 e1000_update_mng_vlan(adapter);
3879 /* DMA latency requirement to workaround jumbo issue */
3880 if (adapter->hw.mac.type == e1000_pch2lan)
3881 pm_qos_add_request(&adapter->netdev->pm_qos_req,
3882 PM_QOS_CPU_DMA_LATENCY,
3883 PM_QOS_DEFAULT_VALUE);
3885 /* before we allocate an interrupt, we must be ready to handle it.
3886 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
3887 * as soon as we call pci_request_irq, so we have to setup our
3888 * clean_rx handler before we do so.
3890 e1000_configure(adapter);
3892 err = e1000_request_irq(adapter);
3896 /* Work around PCIe errata with MSI interrupts causing some chipsets to
3897 * ignore e1000e MSI messages, which means we need to test our MSI
3900 if (adapter->int_mode != E1000E_INT_MODE_LEGACY) {
3901 err = e1000_test_msi(adapter);
3903 e_err("Interrupt allocation failed\n");
3908 /* From here on the code is the same as e1000e_up() */
3909 clear_bit(__E1000_DOWN, &adapter->state);
3911 napi_enable(&adapter->napi);
3913 e1000_irq_enable(adapter);
3915 adapter->tx_hang_recheck = false;
3916 netif_start_queue(netdev);
3918 adapter->idle_check = true;
3919 pm_runtime_put(&pdev->dev);
3921 /* fire a link status change interrupt to start the watchdog */
3922 if (adapter->msix_entries)
3923 ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER);
3925 ew32(ICS, E1000_ICS_LSC);
3930 e1000e_release_hw_control(adapter);
3931 e1000_power_down_phy(adapter);
3932 e1000e_free_rx_resources(adapter->rx_ring);
3934 e1000e_free_tx_resources(adapter->tx_ring);
3936 e1000e_reset(adapter);
3937 pm_runtime_put_sync(&pdev->dev);
3943 * e1000_close - Disables a network interface
3944 * @netdev: network interface device structure
3946 * Returns 0, this is not allowed to fail
3948 * The close entry point is called when an interface is de-activated
3949 * by the OS. The hardware is still under the drivers control, but
3950 * needs to be disabled. A global MAC reset is issued to stop the
3951 * hardware, and all transmit and receive resources are freed.
3953 static int e1000_close(struct net_device *netdev)
3955 struct e1000_adapter *adapter = netdev_priv(netdev);
3956 struct pci_dev *pdev = adapter->pdev;
3957 int count = E1000_CHECK_RESET_COUNT;
3959 while (test_bit(__E1000_RESETTING, &adapter->state) && count--)
3960 usleep_range(10000, 20000);
3962 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
3964 pm_runtime_get_sync(&pdev->dev);
3966 napi_disable(&adapter->napi);
3968 if (!test_bit(__E1000_DOWN, &adapter->state)) {
3969 e1000e_down(adapter);
3970 e1000_free_irq(adapter);
3972 e1000_power_down_phy(adapter);
3974 e1000e_free_tx_resources(adapter->tx_ring);
3975 e1000e_free_rx_resources(adapter->rx_ring);
3977 /* kill manageability vlan ID if supported, but not if a vlan with
3978 * the same ID is registered on the host OS (let 8021q kill it)
3980 if (adapter->hw.mng_cookie.status &
3981 E1000_MNG_DHCP_COOKIE_STATUS_VLAN)
3982 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
3984 /* If AMT is enabled, let the firmware know that the network
3985 * interface is now closed
3987 if ((adapter->flags & FLAG_HAS_AMT) &&
3988 !test_bit(__E1000_TESTING, &adapter->state))
3989 e1000e_release_hw_control(adapter);
3991 if (adapter->hw.mac.type == e1000_pch2lan)
3992 pm_qos_remove_request(&adapter->netdev->pm_qos_req);
3994 pm_runtime_put_sync(&pdev->dev);
3999 * e1000_set_mac - Change the Ethernet Address of the NIC
4000 * @netdev: network interface device structure
4001 * @p: pointer to an address structure
4003 * Returns 0 on success, negative on failure
4005 static int e1000_set_mac(struct net_device *netdev, void *p)
4007 struct e1000_adapter *adapter = netdev_priv(netdev);
4008 struct e1000_hw *hw = &adapter->hw;
4009 struct sockaddr *addr = p;
4011 if (!is_valid_ether_addr(addr->sa_data))
4012 return -EADDRNOTAVAIL;
4014 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
4015 memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len);
4017 hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
4019 if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) {
4020 /* activate the work around */
4021 e1000e_set_laa_state_82571(&adapter->hw, 1);
4023 /* Hold a copy of the LAA in RAR[14] This is done so that
4024 * between the time RAR[0] gets clobbered and the time it
4025 * gets fixed (in e1000_watchdog), the actual LAA is in one
4026 * of the RARs and no incoming packets directed to this port
4027 * are dropped. Eventually the LAA will be in RAR[0] and
4030 hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr,
4031 adapter->hw.mac.rar_entry_count - 1);
4038 * e1000e_update_phy_task - work thread to update phy
4039 * @work: pointer to our work struct
4041 * this worker thread exists because we must acquire a
4042 * semaphore to read the phy, which we could msleep while
4043 * waiting for it, and we can't msleep in a timer.
4045 static void e1000e_update_phy_task(struct work_struct *work)
4047 struct e1000_adapter *adapter = container_of(work,
4048 struct e1000_adapter, update_phy_task);
4050 if (test_bit(__E1000_DOWN, &adapter->state))
4053 e1000_get_phy_info(&adapter->hw);
4057 * e1000_update_phy_info - timre call-back to update PHY info
4058 * @data: pointer to adapter cast into an unsigned long
4060 * Need to wait a few seconds after link up to get diagnostic information from
4063 static void e1000_update_phy_info(unsigned long data)
4065 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
4067 if (test_bit(__E1000_DOWN, &adapter->state))
4070 schedule_work(&adapter->update_phy_task);
4074 * e1000e_update_phy_stats - Update the PHY statistics counters
4075 * @adapter: board private structure
4077 * Read/clear the upper 16-bit PHY registers and read/accumulate lower
4079 static void e1000e_update_phy_stats(struct e1000_adapter *adapter)
4081 struct e1000_hw *hw = &adapter->hw;
4085 ret_val = hw->phy.ops.acquire(hw);
4089 /* A page set is expensive so check if already on desired page.
4090 * If not, set to the page with the PHY status registers.
4093 ret_val = e1000e_read_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
4097 if (phy_data != (HV_STATS_PAGE << IGP_PAGE_SHIFT)) {
4098 ret_val = hw->phy.ops.set_page(hw,
4099 HV_STATS_PAGE << IGP_PAGE_SHIFT);
4104 /* Single Collision Count */
4105 hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
4106 ret_val = hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
4108 adapter->stats.scc += phy_data;
4110 /* Excessive Collision Count */
4111 hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
4112 ret_val = hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
4114 adapter->stats.ecol += phy_data;
4116 /* Multiple Collision Count */
4117 hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
4118 ret_val = hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
4120 adapter->stats.mcc += phy_data;
4122 /* Late Collision Count */
4123 hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
4124 ret_val = hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
4126 adapter->stats.latecol += phy_data;
4128 /* Collision Count - also used for adaptive IFS */
4129 hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
4130 ret_val = hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
4132 hw->mac.collision_delta = phy_data;
4135 hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
4136 ret_val = hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
4138 adapter->stats.dc += phy_data;
4140 /* Transmit with no CRS */
4141 hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
4142 ret_val = hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
4144 adapter->stats.tncrs += phy_data;
4147 hw->phy.ops.release(hw);
4151 * e1000e_update_stats - Update the board statistics counters
4152 * @adapter: board private structure
4154 static void e1000e_update_stats(struct e1000_adapter *adapter)
4156 struct net_device *netdev = adapter->netdev;
4157 struct e1000_hw *hw = &adapter->hw;
4158 struct pci_dev *pdev = adapter->pdev;
4160 /* Prevent stats update while adapter is being reset, or if the pci
4161 * connection is down.
4163 if (adapter->link_speed == 0)
4165 if (pci_channel_offline(pdev))
4168 adapter->stats.crcerrs += er32(CRCERRS);
4169 adapter->stats.gprc += er32(GPRC);
4170 adapter->stats.gorc += er32(GORCL);
4171 er32(GORCH); /* Clear gorc */
4172 adapter->stats.bprc += er32(BPRC);
4173 adapter->stats.mprc += er32(MPRC);
4174 adapter->stats.roc += er32(ROC);
4176 adapter->stats.mpc += er32(MPC);
4178 /* Half-duplex statistics */
4179 if (adapter->link_duplex == HALF_DUPLEX) {
4180 if (adapter->flags2 & FLAG2_HAS_PHY_STATS) {
4181 e1000e_update_phy_stats(adapter);
4183 adapter->stats.scc += er32(SCC);
4184 adapter->stats.ecol += er32(ECOL);
4185 adapter->stats.mcc += er32(MCC);
4186 adapter->stats.latecol += er32(LATECOL);
4187 adapter->stats.dc += er32(DC);
4189 hw->mac.collision_delta = er32(COLC);
4191 if ((hw->mac.type != e1000_82574) &&
4192 (hw->mac.type != e1000_82583))
4193 adapter->stats.tncrs += er32(TNCRS);
4195 adapter->stats.colc += hw->mac.collision_delta;
4198 adapter->stats.xonrxc += er32(XONRXC);
4199 adapter->stats.xontxc += er32(XONTXC);
4200 adapter->stats.xoffrxc += er32(XOFFRXC);
4201 adapter->stats.xofftxc += er32(XOFFTXC);
4202 adapter->stats.gptc += er32(GPTC);
4203 adapter->stats.gotc += er32(GOTCL);
4204 er32(GOTCH); /* Clear gotc */
4205 adapter->stats.rnbc += er32(RNBC);
4206 adapter->stats.ruc += er32(RUC);
4208 adapter->stats.mptc += er32(MPTC);
4209 adapter->stats.bptc += er32(BPTC);
4211 /* used for adaptive IFS */
4213 hw->mac.tx_packet_delta = er32(TPT);
4214 adapter->stats.tpt += hw->mac.tx_packet_delta;
4216 adapter->stats.algnerrc += er32(ALGNERRC);
4217 adapter->stats.rxerrc += er32(RXERRC);
4218 adapter->stats.cexterr += er32(CEXTERR);
4219 adapter->stats.tsctc += er32(TSCTC);
4220 adapter->stats.tsctfc += er32(TSCTFC);
4222 /* Fill out the OS statistics structure */
4223 netdev->stats.multicast = adapter->stats.mprc;
4224 netdev->stats.collisions = adapter->stats.colc;
4228 /* RLEC on some newer hardware can be incorrect so build
4229 * our own version based on RUC and ROC
4231 netdev->stats.rx_errors = adapter->stats.rxerrc +
4232 adapter->stats.crcerrs + adapter->stats.algnerrc +
4233 adapter->stats.ruc + adapter->stats.roc +
4234 adapter->stats.cexterr;
4235 netdev->stats.rx_length_errors = adapter->stats.ruc +
4237 netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
4238 netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
4239 netdev->stats.rx_missed_errors = adapter->stats.mpc;
4242 netdev->stats.tx_errors = adapter->stats.ecol +
4243 adapter->stats.latecol;
4244 netdev->stats.tx_aborted_errors = adapter->stats.ecol;
4245 netdev->stats.tx_window_errors = adapter->stats.latecol;
4246 netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
4248 /* Tx Dropped needs to be maintained elsewhere */
4250 /* Management Stats */
4251 adapter->stats.mgptc += er32(MGTPTC);
4252 adapter->stats.mgprc += er32(MGTPRC);
4253 adapter->stats.mgpdc += er32(MGTPDC);
4257 * e1000_phy_read_status - Update the PHY register status snapshot
4258 * @adapter: board private structure
4260 static void e1000_phy_read_status(struct e1000_adapter *adapter)
4262 struct e1000_hw *hw = &adapter->hw;
4263 struct e1000_phy_regs *phy = &adapter->phy_regs;
4265 if ((er32(STATUS) & E1000_STATUS_LU) &&
4266 (adapter->hw.phy.media_type == e1000_media_type_copper)) {
4269 ret_val = e1e_rphy(hw, PHY_CONTROL, &phy->bmcr);
4270 ret_val |= e1e_rphy(hw, PHY_STATUS, &phy->bmsr);
4271 ret_val |= e1e_rphy(hw, PHY_AUTONEG_ADV, &phy->advertise);
4272 ret_val |= e1e_rphy(hw, PHY_LP_ABILITY, &phy->lpa);
4273 ret_val |= e1e_rphy(hw, PHY_AUTONEG_EXP, &phy->expansion);
4274 ret_val |= e1e_rphy(hw, PHY_1000T_CTRL, &phy->ctrl1000);
4275 ret_val |= e1e_rphy(hw, PHY_1000T_STATUS, &phy->stat1000);
4276 ret_val |= e1e_rphy(hw, PHY_EXT_STATUS, &phy->estatus);
4278 e_warn("Error reading PHY register\n");
4280 /* Do not read PHY registers if link is not up
4281 * Set values to typical power-on defaults
4283 phy->bmcr = (BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_FULLDPLX);
4284 phy->bmsr = (BMSR_100FULL | BMSR_100HALF | BMSR_10FULL |
4285 BMSR_10HALF | BMSR_ESTATEN | BMSR_ANEGCAPABLE |
4287 phy->advertise = (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP |
4288 ADVERTISE_ALL | ADVERTISE_CSMA);
4290 phy->expansion = EXPANSION_ENABLENPAGE;
4291 phy->ctrl1000 = ADVERTISE_1000FULL;
4293 phy->estatus = (ESTATUS_1000_TFULL | ESTATUS_1000_THALF);
4297 static void e1000_print_link_info(struct e1000_adapter *adapter)
4299 struct e1000_hw *hw = &adapter->hw;
4300 u32 ctrl = er32(CTRL);
4302 /* Link status message must follow this format for user tools */
4303 printk(KERN_INFO "e1000e: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
4304 adapter->netdev->name,
4305 adapter->link_speed,
4306 adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half",
4307 (ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE) ? "Rx/Tx" :
4308 (ctrl & E1000_CTRL_RFCE) ? "Rx" :
4309 (ctrl & E1000_CTRL_TFCE) ? "Tx" : "None");
4312 static bool e1000e_has_link(struct e1000_adapter *adapter)
4314 struct e1000_hw *hw = &adapter->hw;
4315 bool link_active = false;
4318 /* get_link_status is set on LSC (link status) interrupt or
4319 * Rx sequence error interrupt. get_link_status will stay
4320 * false until the check_for_link establishes link
4321 * for copper adapters ONLY
4323 switch (hw->phy.media_type) {
4324 case e1000_media_type_copper:
4325 if (hw->mac.get_link_status) {
4326 ret_val = hw->mac.ops.check_for_link(hw);
4327 link_active = !hw->mac.get_link_status;
4332 case e1000_media_type_fiber:
4333 ret_val = hw->mac.ops.check_for_link(hw);
4334 link_active = !!(er32(STATUS) & E1000_STATUS_LU);
4336 case e1000_media_type_internal_serdes:
4337 ret_val = hw->mac.ops.check_for_link(hw);
4338 link_active = adapter->hw.mac.serdes_has_link;
4341 case e1000_media_type_unknown:
4345 if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) &&
4346 (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
4347 /* See e1000_kmrn_lock_loss_workaround_ich8lan() */
4348 e_info("Gigabit has been disabled, downgrading speed\n");
4354 static void e1000e_enable_receives(struct e1000_adapter *adapter)
4356 /* make sure the receive unit is started */
4357 if ((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
4358 (adapter->flags & FLAG_RX_RESTART_NOW)) {
4359 struct e1000_hw *hw = &adapter->hw;
4360 u32 rctl = er32(RCTL);
4361 ew32(RCTL, rctl | E1000_RCTL_EN);
4362 adapter->flags &= ~FLAG_RX_RESTART_NOW;
4366 static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter)
4368 struct e1000_hw *hw = &adapter->hw;
4370 /* With 82574 controllers, PHY needs to be checked periodically
4371 * for hung state and reset, if two calls return true
4373 if (e1000_check_phy_82574(hw))
4374 adapter->phy_hang_count++;
4376 adapter->phy_hang_count = 0;
4378 if (adapter->phy_hang_count > 1) {
4379 adapter->phy_hang_count = 0;
4380 schedule_work(&adapter->reset_task);
4385 * e1000_watchdog - Timer Call-back
4386 * @data: pointer to adapter cast into an unsigned long
4388 static void e1000_watchdog(unsigned long data)
4390 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
4392 /* Do the rest outside of interrupt context */
4393 schedule_work(&adapter->watchdog_task);
4395 /* TODO: make this use queue_delayed_work() */
4398 static void e1000_watchdog_task(struct work_struct *work)
4400 struct e1000_adapter *adapter = container_of(work,
4401 struct e1000_adapter, watchdog_task);
4402 struct net_device *netdev = adapter->netdev;
4403 struct e1000_mac_info *mac = &adapter->hw.mac;
4404 struct e1000_phy_info *phy = &adapter->hw.phy;
4405 struct e1000_ring *tx_ring = adapter->tx_ring;
4406 struct e1000_hw *hw = &adapter->hw;
4409 if (test_bit(__E1000_DOWN, &adapter->state))
4412 link = e1000e_has_link(adapter);
4413 if ((netif_carrier_ok(netdev)) && link) {
4414 /* Cancel scheduled suspend requests. */
4415 pm_runtime_resume(netdev->dev.parent);
4417 e1000e_enable_receives(adapter);
4421 if ((e1000e_enable_tx_pkt_filtering(hw)) &&
4422 (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id))
4423 e1000_update_mng_vlan(adapter);
4426 if (!netif_carrier_ok(netdev)) {
4429 /* Cancel scheduled suspend requests. */
4430 pm_runtime_resume(netdev->dev.parent);
4432 /* update snapshot of PHY registers on LSC */
4433 e1000_phy_read_status(adapter);
4434 mac->ops.get_link_up_info(&adapter->hw,
4435 &adapter->link_speed,
4436 &adapter->link_duplex);
4437 e1000_print_link_info(adapter);
4438 /* On supported PHYs, check for duplex mismatch only
4439 * if link has autonegotiated at 10/100 half
4441 if ((hw->phy.type == e1000_phy_igp_3 ||
4442 hw->phy.type == e1000_phy_bm) &&
4443 (hw->mac.autoneg == true) &&
4444 (adapter->link_speed == SPEED_10 ||
4445 adapter->link_speed == SPEED_100) &&
4446 (adapter->link_duplex == HALF_DUPLEX)) {
4449 e1e_rphy(hw, PHY_AUTONEG_EXP, &autoneg_exp);
4451 if (!(autoneg_exp & NWAY_ER_LP_NWAY_CAPS))
4452 e_info("Autonegotiated half duplex but link partner cannot autoneg. Try forcing full duplex if link gets many collisions.\n");
4455 /* adjust timeout factor according to speed/duplex */
4456 adapter->tx_timeout_factor = 1;
4457 switch (adapter->link_speed) {
4460 adapter->tx_timeout_factor = 16;
4464 adapter->tx_timeout_factor = 10;
4468 /* workaround: re-program speed mode bit after
4471 if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) &&
4474 tarc0 = er32(TARC(0));
4475 tarc0 &= ~SPEED_MODE_BIT;
4476 ew32(TARC(0), tarc0);
4479 /* disable TSO for pcie and 10/100 speeds, to avoid
4480 * some hardware issues
4482 if (!(adapter->flags & FLAG_TSO_FORCE)) {
4483 switch (adapter->link_speed) {
4486 e_info("10/100 speed: disabling TSO\n");
4487 netdev->features &= ~NETIF_F_TSO;
4488 netdev->features &= ~NETIF_F_TSO6;
4491 netdev->features |= NETIF_F_TSO;
4492 netdev->features |= NETIF_F_TSO6;
4500 /* enable transmits in the hardware, need to do this
4501 * after setting TARC(0)
4504 tctl |= E1000_TCTL_EN;
4507 /* Perform any post-link-up configuration before
4508 * reporting link up.
4510 if (phy->ops.cfg_on_link_up)
4511 phy->ops.cfg_on_link_up(hw);
4513 netif_carrier_on(netdev);
4515 if (!test_bit(__E1000_DOWN, &adapter->state))
4516 mod_timer(&adapter->phy_info_timer,
4517 round_jiffies(jiffies + 2 * HZ));
4520 if (netif_carrier_ok(netdev)) {
4521 adapter->link_speed = 0;
4522 adapter->link_duplex = 0;
4523 /* Link status message must follow this format */
4524 printk(KERN_INFO "e1000e: %s NIC Link is Down\n",
4525 adapter->netdev->name);
4526 netif_carrier_off(netdev);
4527 if (!test_bit(__E1000_DOWN, &adapter->state))
4528 mod_timer(&adapter->phy_info_timer,
4529 round_jiffies(jiffies + 2 * HZ));
4531 if (adapter->flags & FLAG_RX_NEEDS_RESTART)
4532 schedule_work(&adapter->reset_task);
4534 pm_schedule_suspend(netdev->dev.parent,
4540 spin_lock(&adapter->stats64_lock);
4541 e1000e_update_stats(adapter);
4543 mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
4544 adapter->tpt_old = adapter->stats.tpt;
4545 mac->collision_delta = adapter->stats.colc - adapter->colc_old;
4546 adapter->colc_old = adapter->stats.colc;
4548 adapter->gorc = adapter->stats.gorc - adapter->gorc_old;
4549 adapter->gorc_old = adapter->stats.gorc;
4550 adapter->gotc = adapter->stats.gotc - adapter->gotc_old;
4551 adapter->gotc_old = adapter->stats.gotc;
4552 spin_unlock(&adapter->stats64_lock);
4554 e1000e_update_adaptive(&adapter->hw);
4556 if (!netif_carrier_ok(netdev) &&
4557 (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) {
4558 /* We've lost link, so the controller stops DMA,
4559 * but we've got queued Tx work that's never going
4560 * to get done, so reset controller to flush Tx.
4561 * (Do the reset outside of interrupt context).
4563 schedule_work(&adapter->reset_task);
4564 /* return immediately since reset is imminent */
4568 /* Simple mode for Interrupt Throttle Rate (ITR) */
4569 if (adapter->itr_setting == 4) {
4570 /* Symmetric Tx/Rx gets a reduced ITR=2000;
4571 * Total asymmetrical Tx or Rx gets ITR=8000;
4572 * everyone else is between 2000-8000.
4574 u32 goc = (adapter->gotc + adapter->gorc) / 10000;
4575 u32 dif = (adapter->gotc > adapter->gorc ?
4576 adapter->gotc - adapter->gorc :
4577 adapter->gorc - adapter->gotc) / 10000;
4578 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
4580 e1000e_write_itr(adapter, itr);
4583 /* Cause software interrupt to ensure Rx ring is cleaned */
4584 if (adapter->msix_entries)
4585 ew32(ICS, adapter->rx_ring->ims_val);
4587 ew32(ICS, E1000_ICS_RXDMT0);
4589 /* flush pending descriptors to memory before detecting Tx hang */
4590 e1000e_flush_descriptors(adapter);
4592 /* Force detection of hung controller every watchdog period */
4593 adapter->detect_tx_hung = true;
4595 /* With 82571 controllers, LAA may be overwritten due to controller
4596 * reset from the other port. Set the appropriate LAA in RAR[0]
4598 if (e1000e_get_laa_state_82571(hw))
4599 hw->mac.ops.rar_set(hw, adapter->hw.mac.addr, 0);
4601 if (adapter->flags2 & FLAG2_CHECK_PHY_HANG)
4602 e1000e_check_82574_phy_workaround(adapter);
4604 /* Reset the timer */
4605 if (!test_bit(__E1000_DOWN, &adapter->state))
4606 mod_timer(&adapter->watchdog_timer,
4607 round_jiffies(jiffies + 2 * HZ));
4610 #define E1000_TX_FLAGS_CSUM 0x00000001
4611 #define E1000_TX_FLAGS_VLAN 0x00000002
4612 #define E1000_TX_FLAGS_TSO 0x00000004
4613 #define E1000_TX_FLAGS_IPV4 0x00000008
4614 #define E1000_TX_FLAGS_NO_FCS 0x00000010
4615 #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
4616 #define E1000_TX_FLAGS_VLAN_SHIFT 16
4618 static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
4620 struct e1000_context_desc *context_desc;
4621 struct e1000_buffer *buffer_info;
4625 u8 ipcss, ipcso, tucss, tucso, hdr_len;
4627 if (!skb_is_gso(skb))
4630 if (skb_header_cloned(skb)) {
4631 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
4637 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
4638 mss = skb_shinfo(skb)->gso_size;
4639 if (skb->protocol == htons(ETH_P_IP)) {
4640 struct iphdr *iph = ip_hdr(skb);
4643 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
4645 cmd_length = E1000_TXD_CMD_IP;
4646 ipcse = skb_transport_offset(skb) - 1;
4647 } else if (skb_is_gso_v6(skb)) {
4648 ipv6_hdr(skb)->payload_len = 0;
4649 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
4650 &ipv6_hdr(skb)->daddr,
4654 ipcss = skb_network_offset(skb);
4655 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
4656 tucss = skb_transport_offset(skb);
4657 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
4659 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
4660 E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
4662 i = tx_ring->next_to_use;
4663 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
4664 buffer_info = &tx_ring->buffer_info[i];
4666 context_desc->lower_setup.ip_fields.ipcss = ipcss;
4667 context_desc->lower_setup.ip_fields.ipcso = ipcso;
4668 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
4669 context_desc->upper_setup.tcp_fields.tucss = tucss;
4670 context_desc->upper_setup.tcp_fields.tucso = tucso;
4671 context_desc->upper_setup.tcp_fields.tucse = 0;
4672 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
4673 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
4674 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
4676 buffer_info->time_stamp = jiffies;
4677 buffer_info->next_to_watch = i;
4680 if (i == tx_ring->count)
4682 tx_ring->next_to_use = i;
4687 static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
4689 struct e1000_adapter *adapter = tx_ring->adapter;
4690 struct e1000_context_desc *context_desc;
4691 struct e1000_buffer *buffer_info;
4694 u32 cmd_len = E1000_TXD_CMD_DEXT;
4697 if (skb->ip_summed != CHECKSUM_PARTIAL)
4700 if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
4701 protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
4703 protocol = skb->protocol;
4706 case cpu_to_be16(ETH_P_IP):
4707 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
4708 cmd_len |= E1000_TXD_CMD_TCP;
4710 case cpu_to_be16(ETH_P_IPV6):
4711 /* XXX not handling all IPV6 headers */
4712 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
4713 cmd_len |= E1000_TXD_CMD_TCP;
4716 if (unlikely(net_ratelimit()))
4717 e_warn("checksum_partial proto=%x!\n",
4718 be16_to_cpu(protocol));
4722 css = skb_checksum_start_offset(skb);
4724 i = tx_ring->next_to_use;
4725 buffer_info = &tx_ring->buffer_info[i];
4726 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
4728 context_desc->lower_setup.ip_config = 0;
4729 context_desc->upper_setup.tcp_fields.tucss = css;
4730 context_desc->upper_setup.tcp_fields.tucso =
4731 css + skb->csum_offset;
4732 context_desc->upper_setup.tcp_fields.tucse = 0;
4733 context_desc->tcp_seg_setup.data = 0;
4734 context_desc->cmd_and_length = cpu_to_le32(cmd_len);
4736 buffer_info->time_stamp = jiffies;
4737 buffer_info->next_to_watch = i;
4740 if (i == tx_ring->count)
4742 tx_ring->next_to_use = i;
4747 static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb,
4748 unsigned int first, unsigned int max_per_txd,
4749 unsigned int nr_frags)
4751 struct e1000_adapter *adapter = tx_ring->adapter;
4752 struct pci_dev *pdev = adapter->pdev;
4753 struct e1000_buffer *buffer_info;
4754 unsigned int len = skb_headlen(skb);
4755 unsigned int offset = 0, size, count = 0, i;
4756 unsigned int f, bytecount, segs;
4758 i = tx_ring->next_to_use;
4761 buffer_info = &tx_ring->buffer_info[i];
4762 size = min(len, max_per_txd);
4764 buffer_info->length = size;
4765 buffer_info->time_stamp = jiffies;
4766 buffer_info->next_to_watch = i;
4767 buffer_info->dma = dma_map_single(&pdev->dev,
4769 size, DMA_TO_DEVICE);
4770 buffer_info->mapped_as_page = false;
4771 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
4780 if (i == tx_ring->count)
4785 for (f = 0; f < nr_frags; f++) {
4786 const struct skb_frag_struct *frag;
4788 frag = &skb_shinfo(skb)->frags[f];
4789 len = skb_frag_size(frag);
4794 if (i == tx_ring->count)
4797 buffer_info = &tx_ring->buffer_info[i];
4798 size = min(len, max_per_txd);
4800 buffer_info->length = size;
4801 buffer_info->time_stamp = jiffies;
4802 buffer_info->next_to_watch = i;
4803 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
4804 offset, size, DMA_TO_DEVICE);
4805 buffer_info->mapped_as_page = true;
4806 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
4815 segs = skb_shinfo(skb)->gso_segs ? : 1;
4816 /* multiply data chunks by size of headers */
4817 bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
4819 tx_ring->buffer_info[i].skb = skb;
4820 tx_ring->buffer_info[i].segs = segs;
4821 tx_ring->buffer_info[i].bytecount = bytecount;
4822 tx_ring->buffer_info[first].next_to_watch = i;
4827 dev_err(&pdev->dev, "Tx DMA map failed\n");
4828 buffer_info->dma = 0;
4834 i += tx_ring->count;
4836 buffer_info = &tx_ring->buffer_info[i];
4837 e1000_put_txbuf(tx_ring, buffer_info);
4843 static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count)
4845 struct e1000_adapter *adapter = tx_ring->adapter;
4846 struct e1000_tx_desc *tx_desc = NULL;
4847 struct e1000_buffer *buffer_info;
4848 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
4851 if (tx_flags & E1000_TX_FLAGS_TSO) {
4852 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
4854 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
4856 if (tx_flags & E1000_TX_FLAGS_IPV4)
4857 txd_upper |= E1000_TXD_POPTS_IXSM << 8;
4860 if (tx_flags & E1000_TX_FLAGS_CSUM) {
4861 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
4862 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
4865 if (tx_flags & E1000_TX_FLAGS_VLAN) {
4866 txd_lower |= E1000_TXD_CMD_VLE;
4867 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
4870 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
4871 txd_lower &= ~(E1000_TXD_CMD_IFCS);
4873 i = tx_ring->next_to_use;
4876 buffer_info = &tx_ring->buffer_info[i];
4877 tx_desc = E1000_TX_DESC(*tx_ring, i);
4878 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4879 tx_desc->lower.data =
4880 cpu_to_le32(txd_lower | buffer_info->length);
4881 tx_desc->upper.data = cpu_to_le32(txd_upper);
4884 if (i == tx_ring->count)
4886 } while (--count > 0);
4888 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
4890 /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */
4891 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
4892 tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
4894 /* Force memory writes to complete before letting h/w
4895 * know there are new descriptors to fetch. (Only
4896 * applicable for weak-ordered memory model archs,
4901 tx_ring->next_to_use = i;
4903 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
4904 e1000e_update_tdt_wa(tx_ring, i);
4906 writel(i, tx_ring->tail);
4908 /* we need this if more than one processor can write to our tail
4909 * at a time, it synchronizes IO on IA64/Altix systems
4914 #define MINIMUM_DHCP_PACKET_SIZE 282
4915 static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
4916 struct sk_buff *skb)
4918 struct e1000_hw *hw = &adapter->hw;
4921 if (vlan_tx_tag_present(skb)) {
4922 if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
4923 (adapter->hw.mng_cookie.status &
4924 E1000_MNG_DHCP_COOKIE_STATUS_VLAN)))
4928 if (skb->len <= MINIMUM_DHCP_PACKET_SIZE)
4931 if (((struct ethhdr *) skb->data)->h_proto != htons(ETH_P_IP))
4935 const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data+14);
4938 if (ip->protocol != IPPROTO_UDP)
4941 udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2));
4942 if (ntohs(udp->dest) != 67)
4945 offset = (u8 *)udp + 8 - skb->data;
4946 length = skb->len - offset;
4947 return e1000e_mng_write_dhcp_info(hw, (u8 *)udp + 8, length);
4953 static int __e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
4955 struct e1000_adapter *adapter = tx_ring->adapter;
4957 netif_stop_queue(adapter->netdev);
4958 /* Herbert's original patch had:
4959 * smp_mb__after_netif_stop_queue();
4960 * but since that doesn't exist yet, just open code it.
4964 /* We need to check again in a case another CPU has just
4965 * made room available.
4967 if (e1000_desc_unused(tx_ring) < size)
4971 netif_start_queue(adapter->netdev);
4972 ++adapter->restart_queue;
4976 static int e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
4978 BUG_ON(size > tx_ring->count);
4980 if (e1000_desc_unused(tx_ring) >= size)
4982 return __e1000_maybe_stop_tx(tx_ring, size);
4985 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
4986 struct net_device *netdev)
4988 struct e1000_adapter *adapter = netdev_priv(netdev);
4989 struct e1000_ring *tx_ring = adapter->tx_ring;
4991 unsigned int tx_flags = 0;
4992 unsigned int len = skb_headlen(skb);
4993 unsigned int nr_frags;
4999 if (test_bit(__E1000_DOWN, &adapter->state)) {
5000 dev_kfree_skb_any(skb);
5001 return NETDEV_TX_OK;
5004 if (skb->len <= 0) {
5005 dev_kfree_skb_any(skb);
5006 return NETDEV_TX_OK;
5009 /* The minimum packet size with TCTL.PSP set is 17 bytes so
5010 * pad skb in order to meet this minimum size requirement
5012 if (unlikely(skb->len < 17)) {
5013 if (skb_pad(skb, 17 - skb->len))
5014 return NETDEV_TX_OK;
5016 skb_set_tail_pointer(skb, 17);
5019 mss = skb_shinfo(skb)->gso_size;
5023 /* TSO Workaround for 82571/2/3 Controllers -- if skb->data
5024 * points to just header, pull a few bytes of payload from
5025 * frags into skb->data
5027 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
5028 /* we do this workaround for ES2LAN, but it is un-necessary,
5029 * avoiding it could save a lot of cycles
5031 if (skb->data_len && (hdr_len == len)) {
5032 unsigned int pull_size;
5034 pull_size = min_t(unsigned int, 4, skb->data_len);
5035 if (!__pskb_pull_tail(skb, pull_size)) {
5036 e_err("__pskb_pull_tail failed.\n");
5037 dev_kfree_skb_any(skb);
5038 return NETDEV_TX_OK;
5040 len = skb_headlen(skb);
5044 /* reserve a descriptor for the offload context */
5045 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
5049 count += DIV_ROUND_UP(len, adapter->tx_fifo_limit);
5051 nr_frags = skb_shinfo(skb)->nr_frags;
5052 for (f = 0; f < nr_frags; f++)
5053 count += DIV_ROUND_UP(skb_frag_size(&skb_shinfo(skb)->frags[f]),
5054 adapter->tx_fifo_limit);
5056 if (adapter->hw.mac.tx_pkt_filtering)
5057 e1000_transfer_dhcp_info(adapter, skb);
5059 /* need: count + 2 desc gap to keep tail from touching
5060 * head, otherwise try next time
5062 if (e1000_maybe_stop_tx(tx_ring, count + 2))
5063 return NETDEV_TX_BUSY;
5065 if (vlan_tx_tag_present(skb)) {
5066 tx_flags |= E1000_TX_FLAGS_VLAN;
5067 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
5070 first = tx_ring->next_to_use;
5072 tso = e1000_tso(tx_ring, skb);
5074 dev_kfree_skb_any(skb);
5075 return NETDEV_TX_OK;
5079 tx_flags |= E1000_TX_FLAGS_TSO;
5080 else if (e1000_tx_csum(tx_ring, skb))
5081 tx_flags |= E1000_TX_FLAGS_CSUM;
5083 /* Old method was to assume IPv4 packet by default if TSO was enabled.
5084 * 82571 hardware supports TSO capabilities for IPv6 as well...
5085 * no longer assume, we must.
5087 if (skb->protocol == htons(ETH_P_IP))
5088 tx_flags |= E1000_TX_FLAGS_IPV4;
5090 if (unlikely(skb->no_fcs))
5091 tx_flags |= E1000_TX_FLAGS_NO_FCS;
5093 /* if count is 0 then mapping error has occurred */
5094 count = e1000_tx_map(tx_ring, skb, first, adapter->tx_fifo_limit,
5097 skb_tx_timestamp(skb);
5099 netdev_sent_queue(netdev, skb->len);
5100 e1000_tx_queue(tx_ring, tx_flags, count);
5101 /* Make sure there is space in the ring for the next send. */
5102 e1000_maybe_stop_tx(tx_ring,
5104 DIV_ROUND_UP(PAGE_SIZE,
5105 adapter->tx_fifo_limit) + 2));
5107 dev_kfree_skb_any(skb);
5108 tx_ring->buffer_info[first].time_stamp = 0;
5109 tx_ring->next_to_use = first;
5112 return NETDEV_TX_OK;
5116 * e1000_tx_timeout - Respond to a Tx Hang
5117 * @netdev: network interface device structure
5119 static void e1000_tx_timeout(struct net_device *netdev)
5121 struct e1000_adapter *adapter = netdev_priv(netdev);
5123 /* Do the reset outside of interrupt context */
5124 adapter->tx_timeout_count++;
5125 schedule_work(&adapter->reset_task);
5128 static void e1000_reset_task(struct work_struct *work)
5130 struct e1000_adapter *adapter;
5131 adapter = container_of(work, struct e1000_adapter, reset_task);
5133 /* don't run the task if already down */
5134 if (test_bit(__E1000_DOWN, &adapter->state))
5137 if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
5138 (adapter->flags & FLAG_RX_RESTART_NOW))) {
5139 e1000e_dump(adapter);
5140 e_err("Reset adapter\n");
5142 e1000e_reinit_locked(adapter);
5146 * e1000_get_stats64 - Get System Network Statistics
5147 * @netdev: network interface device structure
5148 * @stats: rtnl_link_stats64 pointer
5150 * Returns the address of the device statistics structure.
5152 struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
5153 struct rtnl_link_stats64 *stats)
5155 struct e1000_adapter *adapter = netdev_priv(netdev);
5157 memset(stats, 0, sizeof(struct rtnl_link_stats64));
5158 spin_lock(&adapter->stats64_lock);
5159 e1000e_update_stats(adapter);
5160 /* Fill out the OS statistics structure */
5161 stats->rx_bytes = adapter->stats.gorc;
5162 stats->rx_packets = adapter->stats.gprc;
5163 stats->tx_bytes = adapter->stats.gotc;
5164 stats->tx_packets = adapter->stats.gptc;
5165 stats->multicast = adapter->stats.mprc;
5166 stats->collisions = adapter->stats.colc;
5170 /* RLEC on some newer hardware can be incorrect so build
5171 * our own version based on RUC and ROC
5173 stats->rx_errors = adapter->stats.rxerrc +
5174 adapter->stats.crcerrs + adapter->stats.algnerrc +
5175 adapter->stats.ruc + adapter->stats.roc +
5176 adapter->stats.cexterr;
5177 stats->rx_length_errors = adapter->stats.ruc +
5179 stats->rx_crc_errors = adapter->stats.crcerrs;
5180 stats->rx_frame_errors = adapter->stats.algnerrc;
5181 stats->rx_missed_errors = adapter->stats.mpc;
5184 stats->tx_errors = adapter->stats.ecol +
5185 adapter->stats.latecol;
5186 stats->tx_aborted_errors = adapter->stats.ecol;
5187 stats->tx_window_errors = adapter->stats.latecol;
5188 stats->tx_carrier_errors = adapter->stats.tncrs;
5190 /* Tx Dropped needs to be maintained elsewhere */
5192 spin_unlock(&adapter->stats64_lock);
5197 * e1000_change_mtu - Change the Maximum Transfer Unit
5198 * @netdev: network interface device structure
5199 * @new_mtu: new value for maximum frame size
5201 * Returns 0 on success, negative on failure
5203 static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
5205 struct e1000_adapter *adapter = netdev_priv(netdev);
5206 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
5208 /* Jumbo frame support */
5209 if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) &&
5210 !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
5211 e_err("Jumbo Frames not supported.\n");
5215 /* Supported frame sizes */
5216 if ((new_mtu < ETH_ZLEN + ETH_FCS_LEN + VLAN_HLEN) ||
5217 (max_frame > adapter->max_hw_frame_size)) {
5218 e_err("Unsupported MTU setting\n");
5222 /* Jumbo frame workaround on 82579 and newer requires CRC be stripped */
5223 if ((adapter->hw.mac.type >= e1000_pch2lan) &&
5224 !(adapter->flags2 & FLAG2_CRC_STRIPPING) &&
5225 (new_mtu > ETH_DATA_LEN)) {
5226 e_err("Jumbo Frames not supported on this device when CRC stripping is disabled.\n");
5230 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
5231 usleep_range(1000, 2000);
5232 /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */
5233 adapter->max_frame_size = max_frame;
5234 e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu);
5235 netdev->mtu = new_mtu;
5236 if (netif_running(netdev))
5237 e1000e_down(adapter);
5239 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
5240 * means we reserve 2 more, this pushes us to allocate from the next
5242 * i.e. RXBUFFER_2048 --> size-4096 slab
5243 * However with the new *_jumbo_rx* routines, jumbo receives will use
5247 if (max_frame <= 2048)
5248 adapter->rx_buffer_len = 2048;
5250 adapter->rx_buffer_len = 4096;
5252 /* adjust allocation if LPE protects us, and we aren't using SBP */
5253 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
5254 (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
5255 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN
5258 if (netif_running(netdev))
5261 e1000e_reset(adapter);
5263 clear_bit(__E1000_RESETTING, &adapter->state);
5268 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
5271 struct e1000_adapter *adapter = netdev_priv(netdev);
5272 struct mii_ioctl_data *data = if_mii(ifr);
5274 if (adapter->hw.phy.media_type != e1000_media_type_copper)
5279 data->phy_id = adapter->hw.phy.addr;
5282 e1000_phy_read_status(adapter);
5284 switch (data->reg_num & 0x1F) {
5286 data->val_out = adapter->phy_regs.bmcr;
5289 data->val_out = adapter->phy_regs.bmsr;
5292 data->val_out = (adapter->hw.phy.id >> 16);
5295 data->val_out = (adapter->hw.phy.id & 0xFFFF);
5298 data->val_out = adapter->phy_regs.advertise;
5301 data->val_out = adapter->phy_regs.lpa;
5304 data->val_out = adapter->phy_regs.expansion;
5307 data->val_out = adapter->phy_regs.ctrl1000;
5310 data->val_out = adapter->phy_regs.stat1000;
5313 data->val_out = adapter->phy_regs.estatus;
5326 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
5332 return e1000_mii_ioctl(netdev, ifr, cmd);
5338 static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
5340 struct e1000_hw *hw = &adapter->hw;
5342 u16 phy_reg, wuc_enable;
5345 /* copy MAC RARs to PHY RARs */
5346 e1000_copy_rx_addrs_to_phy_ich8lan(hw);
5348 retval = hw->phy.ops.acquire(hw);
5350 e_err("Could not acquire PHY\n");
5354 /* Enable access to wakeup registers on and set page to BM_WUC_PAGE */
5355 retval = e1000_enable_phy_wakeup_reg_access_bm(hw, &wuc_enable);
5359 /* copy MAC MTA to PHY MTA - only needed for pchlan */
5360 for (i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
5361 mac_reg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i);
5362 hw->phy.ops.write_reg_page(hw, BM_MTA(i),
5363 (u16)(mac_reg & 0xFFFF));
5364 hw->phy.ops.write_reg_page(hw, BM_MTA(i) + 1,
5365 (u16)((mac_reg >> 16) & 0xFFFF));
5368 /* configure PHY Rx Control register */
5369 hw->phy.ops.read_reg_page(&adapter->hw, BM_RCTL, &phy_reg);
5370 mac_reg = er32(RCTL);
5371 if (mac_reg & E1000_RCTL_UPE)
5372 phy_reg |= BM_RCTL_UPE;
5373 if (mac_reg & E1000_RCTL_MPE)
5374 phy_reg |= BM_RCTL_MPE;
5375 phy_reg &= ~(BM_RCTL_MO_MASK);
5376 if (mac_reg & E1000_RCTL_MO_3)
5377 phy_reg |= (((mac_reg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
5378 << BM_RCTL_MO_SHIFT);
5379 if (mac_reg & E1000_RCTL_BAM)
5380 phy_reg |= BM_RCTL_BAM;
5381 if (mac_reg & E1000_RCTL_PMCF)
5382 phy_reg |= BM_RCTL_PMCF;
5383 mac_reg = er32(CTRL);
5384 if (mac_reg & E1000_CTRL_RFCE)
5385 phy_reg |= BM_RCTL_RFCE;
5386 hw->phy.ops.write_reg_page(&adapter->hw, BM_RCTL, phy_reg);
5388 /* enable PHY wakeup in MAC register */
5390 ew32(WUC, E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN);
5392 /* configure and enable PHY wakeup in PHY registers */
5393 hw->phy.ops.write_reg_page(&adapter->hw, BM_WUFC, wufc);
5394 hw->phy.ops.write_reg_page(&adapter->hw, BM_WUC, E1000_WUC_PME_EN);
5396 /* activate PHY wakeup */
5397 wuc_enable |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
5398 retval = e1000_disable_phy_wakeup_reg_access_bm(hw, &wuc_enable);
5400 e_err("Could not set PHY Host Wakeup bit\n");
5402 hw->phy.ops.release(hw);
5407 static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
5410 struct net_device *netdev = pci_get_drvdata(pdev);
5411 struct e1000_adapter *adapter = netdev_priv(netdev);
5412 struct e1000_hw *hw = &adapter->hw;
5413 u32 ctrl, ctrl_ext, rctl, status;
5414 /* Runtime suspend should only enable wakeup for link changes */
5415 u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
5418 netif_device_detach(netdev);
5420 if (netif_running(netdev)) {
5421 int count = E1000_CHECK_RESET_COUNT;
5423 while (test_bit(__E1000_RESETTING, &adapter->state) && count--)
5424 usleep_range(10000, 20000);
5426 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
5427 e1000e_down(adapter);
5428 e1000_free_irq(adapter);
5430 e1000e_reset_interrupt_capability(adapter);
5432 retval = pci_save_state(pdev);
5436 status = er32(STATUS);
5437 if (status & E1000_STATUS_LU)
5438 wufc &= ~E1000_WUFC_LNKC;
5441 e1000_setup_rctl(adapter);
5442 e1000e_set_rx_mode(netdev);
5444 /* turn on all-multi mode if wake on multicast is enabled */
5445 if (wufc & E1000_WUFC_MC) {
5447 rctl |= E1000_RCTL_MPE;
5452 /* advertise wake from D3Cold */
5453 #define E1000_CTRL_ADVD3WUC 0x00100000
5454 /* phy power management enable */
5455 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5456 ctrl |= E1000_CTRL_ADVD3WUC;
5457 if (!(adapter->flags2 & FLAG2_HAS_PHY_WAKEUP))
5458 ctrl |= E1000_CTRL_EN_PHY_PWR_MGMT;
5461 if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
5462 adapter->hw.phy.media_type ==
5463 e1000_media_type_internal_serdes) {
5464 /* keep the laser running in D3 */
5465 ctrl_ext = er32(CTRL_EXT);
5466 ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA;
5467 ew32(CTRL_EXT, ctrl_ext);
5470 if (adapter->flags & FLAG_IS_ICH)
5471 e1000_suspend_workarounds_ich8lan(&adapter->hw);
5473 /* Allow time for pending master requests to run */
5474 e1000e_disable_pcie_master(&adapter->hw);
5476 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
5477 /* enable wakeup by the PHY */
5478 retval = e1000_init_phy_wakeup(adapter, wufc);
5482 /* enable wakeup by the MAC */
5484 ew32(WUC, E1000_WUC_PME_EN);
5491 *enable_wake = !!wufc;
5493 /* make sure adapter isn't asleep if manageability is enabled */
5494 if ((adapter->flags & FLAG_MNG_PT_ENABLED) ||
5495 (hw->mac.ops.check_mng_mode(hw)))
5496 *enable_wake = true;
5498 if (adapter->hw.phy.type == e1000_phy_igp_3)
5499 e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
5501 /* Release control of h/w to f/w. If f/w is AMT enabled, this
5502 * would have already happened in close and is redundant.
5504 e1000e_release_hw_control(adapter);
5506 pci_disable_device(pdev);
5511 static void e1000_power_off(struct pci_dev *pdev, bool sleep, bool wake)
5513 if (sleep && wake) {
5514 pci_prepare_to_sleep(pdev);
5518 pci_wake_from_d3(pdev, wake);
5519 pci_set_power_state(pdev, PCI_D3hot);
5522 static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep,
5525 struct net_device *netdev = pci_get_drvdata(pdev);
5526 struct e1000_adapter *adapter = netdev_priv(netdev);
5528 /* The pci-e switch on some quad port adapters will report a
5529 * correctable error when the MAC transitions from D0 to D3. To
5530 * prevent this we need to mask off the correctable errors on the
5531 * downstream port of the pci-e switch.
5533 if (adapter->flags & FLAG_IS_QUAD_PORT) {
5534 struct pci_dev *us_dev = pdev->bus->self;
5537 pcie_capability_read_word(us_dev, PCI_EXP_DEVCTL, &devctl);
5538 pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL,
5539 (devctl & ~PCI_EXP_DEVCTL_CERE));
5541 e1000_power_off(pdev, sleep, wake);
5543 pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL, devctl);
5545 e1000_power_off(pdev, sleep, wake);
5549 #ifdef CONFIG_PCIEASPM
5550 static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
5552 pci_disable_link_state_locked(pdev, state);
5555 static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
5557 /* Both device and parent should have the same ASPM setting.
5558 * Disable ASPM in downstream component first and then upstream.
5560 pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, state);
5562 if (pdev->bus->self)
5563 pcie_capability_clear_word(pdev->bus->self, PCI_EXP_LNKCTL,
5567 static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
5569 dev_info(&pdev->dev, "Disabling ASPM %s %s\n",
5570 (state & PCIE_LINK_STATE_L0S) ? "L0s" : "",
5571 (state & PCIE_LINK_STATE_L1) ? "L1" : "");
5573 __e1000e_disable_aspm(pdev, state);
5577 static bool e1000e_pm_ready(struct e1000_adapter *adapter)
5579 return !!adapter->tx_ring->buffer_info;
5582 static int __e1000_resume(struct pci_dev *pdev)
5584 struct net_device *netdev = pci_get_drvdata(pdev);
5585 struct e1000_adapter *adapter = netdev_priv(netdev);
5586 struct e1000_hw *hw = &adapter->hw;
5587 u16 aspm_disable_flag = 0;
5590 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S)
5591 aspm_disable_flag = PCIE_LINK_STATE_L0S;
5592 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
5593 aspm_disable_flag |= PCIE_LINK_STATE_L1;
5594 if (aspm_disable_flag)
5595 e1000e_disable_aspm(pdev, aspm_disable_flag);
5597 pci_set_power_state(pdev, PCI_D0);
5598 pci_restore_state(pdev);
5599 pci_save_state(pdev);
5601 e1000e_set_interrupt_capability(adapter);
5602 if (netif_running(netdev)) {
5603 err = e1000_request_irq(adapter);
5608 if (hw->mac.type >= e1000_pch2lan)
5609 e1000_resume_workarounds_pchlan(&adapter->hw);
5611 e1000e_power_up_phy(adapter);
5613 /* report the system wakeup cause from S3/S4 */
5614 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
5617 e1e_rphy(&adapter->hw, BM_WUS, &phy_data);
5619 e_info("PHY Wakeup cause - %s\n",
5620 phy_data & E1000_WUS_EX ? "Unicast Packet" :
5621 phy_data & E1000_WUS_MC ? "Multicast Packet" :
5622 phy_data & E1000_WUS_BC ? "Broadcast Packet" :
5623 phy_data & E1000_WUS_MAG ? "Magic Packet" :
5624 phy_data & E1000_WUS_LNKC ?
5625 "Link Status Change" : "other");
5627 e1e_wphy(&adapter->hw, BM_WUS, ~0);
5629 u32 wus = er32(WUS);
5631 e_info("MAC Wakeup cause - %s\n",
5632 wus & E1000_WUS_EX ? "Unicast Packet" :
5633 wus & E1000_WUS_MC ? "Multicast Packet" :
5634 wus & E1000_WUS_BC ? "Broadcast Packet" :
5635 wus & E1000_WUS_MAG ? "Magic Packet" :
5636 wus & E1000_WUS_LNKC ? "Link Status Change" :
5642 e1000e_reset(adapter);
5644 e1000_init_manageability_pt(adapter);
5646 if (netif_running(netdev))
5649 netif_device_attach(netdev);
5651 /* If the controller has AMT, do not set DRV_LOAD until the interface
5652 * is up. For all other cases, let the f/w know that the h/w is now
5653 * under the control of the driver.
5655 if (!(adapter->flags & FLAG_HAS_AMT))
5656 e1000e_get_hw_control(adapter);
5661 #ifdef CONFIG_PM_SLEEP
5662 static int e1000_suspend(struct device *dev)
5664 struct pci_dev *pdev = to_pci_dev(dev);
5668 retval = __e1000_shutdown(pdev, &wake, false);
5670 e1000_complete_shutdown(pdev, true, wake);
5675 static int e1000_resume(struct device *dev)
5677 struct pci_dev *pdev = to_pci_dev(dev);
5678 struct net_device *netdev = pci_get_drvdata(pdev);
5679 struct e1000_adapter *adapter = netdev_priv(netdev);
5681 if (e1000e_pm_ready(adapter))
5682 adapter->idle_check = true;
5684 return __e1000_resume(pdev);
5686 #endif /* CONFIG_PM_SLEEP */
5688 #ifdef CONFIG_PM_RUNTIME
5689 static int e1000_runtime_suspend(struct device *dev)
5691 struct pci_dev *pdev = to_pci_dev(dev);
5692 struct net_device *netdev = pci_get_drvdata(pdev);
5693 struct e1000_adapter *adapter = netdev_priv(netdev);
5695 if (e1000e_pm_ready(adapter)) {
5698 __e1000_shutdown(pdev, &wake, true);
5704 static int e1000_idle(struct device *dev)
5706 struct pci_dev *pdev = to_pci_dev(dev);
5707 struct net_device *netdev = pci_get_drvdata(pdev);
5708 struct e1000_adapter *adapter = netdev_priv(netdev);
5710 if (!e1000e_pm_ready(adapter))
5713 if (adapter->idle_check) {
5714 adapter->idle_check = false;
5715 if (!e1000e_has_link(adapter))
5716 pm_schedule_suspend(dev, MSEC_PER_SEC);
5722 static int e1000_runtime_resume(struct device *dev)
5724 struct pci_dev *pdev = to_pci_dev(dev);
5725 struct net_device *netdev = pci_get_drvdata(pdev);
5726 struct e1000_adapter *adapter = netdev_priv(netdev);
5728 if (!e1000e_pm_ready(adapter))
5731 adapter->idle_check = !dev->power.runtime_auto;
5732 return __e1000_resume(pdev);
5734 #endif /* CONFIG_PM_RUNTIME */
5735 #endif /* CONFIG_PM */
5737 static void e1000_shutdown(struct pci_dev *pdev)
5741 __e1000_shutdown(pdev, &wake, false);
5743 if (system_state == SYSTEM_POWER_OFF)
5744 e1000_complete_shutdown(pdev, false, wake);
5747 #ifdef CONFIG_NET_POLL_CONTROLLER
5749 static irqreturn_t e1000_intr_msix(int irq, void *data)
5751 struct net_device *netdev = data;
5752 struct e1000_adapter *adapter = netdev_priv(netdev);
5754 if (adapter->msix_entries) {
5755 int vector, msix_irq;
5758 msix_irq = adapter->msix_entries[vector].vector;
5759 disable_irq(msix_irq);
5760 e1000_intr_msix_rx(msix_irq, netdev);
5761 enable_irq(msix_irq);
5764 msix_irq = adapter->msix_entries[vector].vector;
5765 disable_irq(msix_irq);
5766 e1000_intr_msix_tx(msix_irq, netdev);
5767 enable_irq(msix_irq);
5770 msix_irq = adapter->msix_entries[vector].vector;
5771 disable_irq(msix_irq);
5772 e1000_msix_other(msix_irq, netdev);
5773 enable_irq(msix_irq);
5781 * @netdev: network interface device structure
5783 * Polling 'interrupt' - used by things like netconsole to send skbs
5784 * without having to re-enable interrupts. It's not called while
5785 * the interrupt routine is executing.
5787 static void e1000_netpoll(struct net_device *netdev)
5789 struct e1000_adapter *adapter = netdev_priv(netdev);
5791 switch (adapter->int_mode) {
5792 case E1000E_INT_MODE_MSIX:
5793 e1000_intr_msix(adapter->pdev->irq, netdev);
5795 case E1000E_INT_MODE_MSI:
5796 disable_irq(adapter->pdev->irq);
5797 e1000_intr_msi(adapter->pdev->irq, netdev);
5798 enable_irq(adapter->pdev->irq);
5800 default: /* E1000E_INT_MODE_LEGACY */
5801 disable_irq(adapter->pdev->irq);
5802 e1000_intr(adapter->pdev->irq, netdev);
5803 enable_irq(adapter->pdev->irq);
5810 * e1000_io_error_detected - called when PCI error is detected
5811 * @pdev: Pointer to PCI device
5812 * @state: The current pci connection state
5814 * This function is called after a PCI bus error affecting
5815 * this device has been detected.
5817 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
5818 pci_channel_state_t state)
5820 struct net_device *netdev = pci_get_drvdata(pdev);
5821 struct e1000_adapter *adapter = netdev_priv(netdev);
5823 netif_device_detach(netdev);
5825 if (state == pci_channel_io_perm_failure)
5826 return PCI_ERS_RESULT_DISCONNECT;
5828 if (netif_running(netdev))
5829 e1000e_down(adapter);
5830 pci_disable_device(pdev);
5832 /* Request a slot slot reset. */
5833 return PCI_ERS_RESULT_NEED_RESET;
5837 * e1000_io_slot_reset - called after the pci bus has been reset.
5838 * @pdev: Pointer to PCI device
5840 * Restart the card from scratch, as if from a cold-boot. Implementation
5841 * resembles the first-half of the e1000_resume routine.
5843 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
5845 struct net_device *netdev = pci_get_drvdata(pdev);
5846 struct e1000_adapter *adapter = netdev_priv(netdev);
5847 struct e1000_hw *hw = &adapter->hw;
5848 u16 aspm_disable_flag = 0;
5850 pci_ers_result_t result;
5852 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S)
5853 aspm_disable_flag = PCIE_LINK_STATE_L0S;
5854 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
5855 aspm_disable_flag |= PCIE_LINK_STATE_L1;
5856 if (aspm_disable_flag)
5857 e1000e_disable_aspm(pdev, aspm_disable_flag);
5859 err = pci_enable_device_mem(pdev);
5862 "Cannot re-enable PCI device after reset.\n");
5863 result = PCI_ERS_RESULT_DISCONNECT;
5865 pci_set_master(pdev);
5866 pdev->state_saved = true;
5867 pci_restore_state(pdev);
5869 pci_enable_wake(pdev, PCI_D3hot, 0);
5870 pci_enable_wake(pdev, PCI_D3cold, 0);
5872 e1000e_reset(adapter);
5874 result = PCI_ERS_RESULT_RECOVERED;
5877 pci_cleanup_aer_uncorrect_error_status(pdev);
5883 * e1000_io_resume - called when traffic can start flowing again.
5884 * @pdev: Pointer to PCI device
5886 * This callback is called when the error recovery driver tells us that
5887 * its OK to resume normal operation. Implementation resembles the
5888 * second-half of the e1000_resume routine.
5890 static void e1000_io_resume(struct pci_dev *pdev)
5892 struct net_device *netdev = pci_get_drvdata(pdev);
5893 struct e1000_adapter *adapter = netdev_priv(netdev);
5895 e1000_init_manageability_pt(adapter);
5897 if (netif_running(netdev)) {
5898 if (e1000e_up(adapter)) {
5900 "can't bring device back up after reset\n");
5905 netif_device_attach(netdev);
5907 /* If the controller has AMT, do not set DRV_LOAD until the interface
5908 * is up. For all other cases, let the f/w know that the h/w is now
5909 * under the control of the driver.
5911 if (!(adapter->flags & FLAG_HAS_AMT))
5912 e1000e_get_hw_control(adapter);
5916 static void e1000_print_device_info(struct e1000_adapter *adapter)
5918 struct e1000_hw *hw = &adapter->hw;
5919 struct net_device *netdev = adapter->netdev;
5921 u8 pba_str[E1000_PBANUM_LENGTH];
5923 /* print bus type/speed/width info */
5924 e_info("(PCI Express:2.5GT/s:%s) %pM\n",
5926 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
5930 e_info("Intel(R) PRO/%s Network Connection\n",
5931 (hw->phy.type == e1000_phy_ife) ? "10/100" : "1000");
5932 ret_val = e1000_read_pba_string_generic(hw, pba_str,
5933 E1000_PBANUM_LENGTH);
5935 strlcpy((char *)pba_str, "Unknown", sizeof(pba_str));
5936 e_info("MAC: %d, PHY: %d, PBA No: %s\n",
5937 hw->mac.type, hw->phy.type, pba_str);
5940 static void e1000_eeprom_checks(struct e1000_adapter *adapter)
5942 struct e1000_hw *hw = &adapter->hw;
5946 if (hw->mac.type != e1000_82573)
5949 ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf);
5951 if (!ret_val && (!(buf & (1 << 0)))) {
5952 /* Deep Smart Power Down (DSPD) */
5953 dev_warn(&adapter->pdev->dev,
5954 "Warning: detected DSPD enabled in EEPROM\n");
5958 static int e1000_set_features(struct net_device *netdev,
5959 netdev_features_t features)
5961 struct e1000_adapter *adapter = netdev_priv(netdev);
5962 netdev_features_t changed = features ^ netdev->features;
5964 if (changed & (NETIF_F_TSO | NETIF_F_TSO6))
5965 adapter->flags |= FLAG_TSO_FORCE;
5967 if (!(changed & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX |
5968 NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_RXFCS |
5972 if (changed & NETIF_F_RXFCS) {
5973 if (features & NETIF_F_RXFCS) {
5974 adapter->flags2 &= ~FLAG2_CRC_STRIPPING;
5976 /* We need to take it back to defaults, which might mean
5977 * stripping is still disabled at the adapter level.
5979 if (adapter->flags2 & FLAG2_DFLT_CRC_STRIPPING)
5980 adapter->flags2 |= FLAG2_CRC_STRIPPING;
5982 adapter->flags2 &= ~FLAG2_CRC_STRIPPING;
5986 netdev->features = features;
5988 if (netif_running(netdev))
5989 e1000e_reinit_locked(adapter);
5991 e1000e_reset(adapter);
5996 static const struct net_device_ops e1000e_netdev_ops = {
5997 .ndo_open = e1000_open,
5998 .ndo_stop = e1000_close,
5999 .ndo_start_xmit = e1000_xmit_frame,
6000 .ndo_get_stats64 = e1000e_get_stats64,
6001 .ndo_set_rx_mode = e1000e_set_rx_mode,
6002 .ndo_set_mac_address = e1000_set_mac,
6003 .ndo_change_mtu = e1000_change_mtu,
6004 .ndo_do_ioctl = e1000_ioctl,
6005 .ndo_tx_timeout = e1000_tx_timeout,
6006 .ndo_validate_addr = eth_validate_addr,
6008 .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid,
6009 .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid,
6010 #ifdef CONFIG_NET_POLL_CONTROLLER
6011 .ndo_poll_controller = e1000_netpoll,
6013 .ndo_set_features = e1000_set_features,
6017 * e1000_probe - Device Initialization Routine
6018 * @pdev: PCI device information struct
6019 * @ent: entry in e1000_pci_tbl
6021 * Returns 0 on success, negative on failure
6023 * e1000_probe initializes an adapter identified by a pci_dev structure.
6024 * The OS initialization, configuring of the adapter private structure,
6025 * and a hardware reset occur.
6027 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
6029 struct net_device *netdev;
6030 struct e1000_adapter *adapter;
6031 struct e1000_hw *hw;
6032 const struct e1000_info *ei = e1000_info_tbl[ent->driver_data];
6033 resource_size_t mmio_start, mmio_len;
6034 resource_size_t flash_start, flash_len;
6035 static int cards_found;
6036 u16 aspm_disable_flag = 0;
6037 int i, err, pci_using_dac;
6038 u16 eeprom_data = 0;
6039 u16 eeprom_apme_mask = E1000_EEPROM_APME;
6041 if (ei->flags2 & FLAG2_DISABLE_ASPM_L0S)
6042 aspm_disable_flag = PCIE_LINK_STATE_L0S;
6043 if (ei->flags2 & FLAG2_DISABLE_ASPM_L1)
6044 aspm_disable_flag |= PCIE_LINK_STATE_L1;
6045 if (aspm_disable_flag)
6046 e1000e_disable_aspm(pdev, aspm_disable_flag);
6048 err = pci_enable_device_mem(pdev);
6053 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
6055 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
6059 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
6061 err = dma_set_coherent_mask(&pdev->dev,
6064 dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
6070 err = pci_request_selected_regions_exclusive(pdev,
6071 pci_select_bars(pdev, IORESOURCE_MEM),
6072 e1000e_driver_name);
6076 /* AER (Advanced Error Reporting) hooks */
6077 pci_enable_pcie_error_reporting(pdev);
6079 pci_set_master(pdev);
6080 /* PCI config space info */
6081 err = pci_save_state(pdev);
6083 goto err_alloc_etherdev;
6086 netdev = alloc_etherdev(sizeof(struct e1000_adapter));
6088 goto err_alloc_etherdev;
6090 SET_NETDEV_DEV(netdev, &pdev->dev);
6092 netdev->irq = pdev->irq;
6094 pci_set_drvdata(pdev, netdev);
6095 adapter = netdev_priv(netdev);
6097 adapter->netdev = netdev;
6098 adapter->pdev = pdev;
6100 adapter->pba = ei->pba;
6101 adapter->flags = ei->flags;
6102 adapter->flags2 = ei->flags2;
6103 adapter->hw.adapter = adapter;
6104 adapter->hw.mac.type = ei->mac;
6105 adapter->max_hw_frame_size = ei->max_hw_frame_size;
6106 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
6108 mmio_start = pci_resource_start(pdev, 0);
6109 mmio_len = pci_resource_len(pdev, 0);
6112 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
6113 if (!adapter->hw.hw_addr)
6116 if ((adapter->flags & FLAG_HAS_FLASH) &&
6117 (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
6118 flash_start = pci_resource_start(pdev, 1);
6119 flash_len = pci_resource_len(pdev, 1);
6120 adapter->hw.flash_address = ioremap(flash_start, flash_len);
6121 if (!adapter->hw.flash_address)
6125 /* construct the net_device struct */
6126 netdev->netdev_ops = &e1000e_netdev_ops;
6127 e1000e_set_ethtool_ops(netdev);
6128 netdev->watchdog_timeo = 5 * HZ;
6129 netif_napi_add(netdev, &adapter->napi, e1000e_poll, 64);
6130 strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
6132 netdev->mem_start = mmio_start;
6133 netdev->mem_end = mmio_start + mmio_len;
6135 adapter->bd_number = cards_found++;
6137 e1000e_check_options(adapter);
6139 /* setup adapter struct */
6140 err = e1000_sw_init(adapter);
6144 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
6145 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
6146 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
6148 err = ei->get_variants(adapter);
6152 if ((adapter->flags & FLAG_IS_ICH) &&
6153 (adapter->flags & FLAG_READ_ONLY_NVM))
6154 e1000e_write_protect_nvm_ich8lan(&adapter->hw);
6156 hw->mac.ops.get_bus_info(&adapter->hw);
6158 adapter->hw.phy.autoneg_wait_to_complete = 0;
6160 /* Copper options */
6161 if (adapter->hw.phy.media_type == e1000_media_type_copper) {
6162 adapter->hw.phy.mdix = AUTO_ALL_MODES;
6163 adapter->hw.phy.disable_polarity_correction = 0;
6164 adapter->hw.phy.ms_type = e1000_ms_hw_default;
6167 if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw))
6168 dev_info(&pdev->dev,
6169 "PHY reset is blocked due to SOL/IDER session.\n");
6171 /* Set initial default active device features */
6172 netdev->features = (NETIF_F_SG |
6173 NETIF_F_HW_VLAN_RX |
6174 NETIF_F_HW_VLAN_TX |
6181 /* Set user-changeable features (subset of all device features) */
6182 netdev->hw_features = netdev->features;
6183 netdev->hw_features |= NETIF_F_RXFCS;
6184 netdev->priv_flags |= IFF_SUPP_NOFCS;
6185 netdev->hw_features |= NETIF_F_RXALL;
6187 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER)
6188 netdev->features |= NETIF_F_HW_VLAN_FILTER;
6190 netdev->vlan_features |= (NETIF_F_SG |
6195 netdev->priv_flags |= IFF_UNICAST_FLT;
6197 if (pci_using_dac) {
6198 netdev->features |= NETIF_F_HIGHDMA;
6199 netdev->vlan_features |= NETIF_F_HIGHDMA;
6202 if (e1000e_enable_mng_pass_thru(&adapter->hw))
6203 adapter->flags |= FLAG_MNG_PT_ENABLED;
6205 /* before reading the NVM, reset the controller to
6206 * put the device in a known good starting state
6208 adapter->hw.mac.ops.reset_hw(&adapter->hw);
6210 /* systems with ASPM and others may see the checksum fail on the first
6211 * attempt. Let's give it a few tries
6214 if (e1000_validate_nvm_checksum(&adapter->hw) >= 0)
6217 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
6223 e1000_eeprom_checks(adapter);
6225 /* copy the MAC address */
6226 if (e1000e_read_mac_addr(&adapter->hw))
6228 "NVM Read Error while reading MAC address\n");
6230 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
6231 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
6233 if (!is_valid_ether_addr(netdev->perm_addr)) {
6234 dev_err(&pdev->dev, "Invalid MAC Address: %pM\n",
6240 init_timer(&adapter->watchdog_timer);
6241 adapter->watchdog_timer.function = e1000_watchdog;
6242 adapter->watchdog_timer.data = (unsigned long) adapter;
6244 init_timer(&adapter->phy_info_timer);
6245 adapter->phy_info_timer.function = e1000_update_phy_info;
6246 adapter->phy_info_timer.data = (unsigned long) adapter;
6248 INIT_WORK(&adapter->reset_task, e1000_reset_task);
6249 INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task);
6250 INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround);
6251 INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task);
6252 INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang);
6254 /* Initialize link parameters. User can change them with ethtool */
6255 adapter->hw.mac.autoneg = 1;
6256 adapter->fc_autoneg = true;
6257 adapter->hw.fc.requested_mode = e1000_fc_default;
6258 adapter->hw.fc.current_mode = e1000_fc_default;
6259 adapter->hw.phy.autoneg_advertised = 0x2f;
6261 /* ring size defaults */
6262 adapter->rx_ring->count = E1000_DEFAULT_RXD;
6263 adapter->tx_ring->count = E1000_DEFAULT_TXD;
6265 /* Initial Wake on LAN setting - If APM wake is enabled in
6266 * the EEPROM, enable the ACPI Magic Packet filter
6268 if (adapter->flags & FLAG_APME_IN_WUC) {
6269 /* APME bit in EEPROM is mapped to WUC.APME */
6270 eeprom_data = er32(WUC);
6271 eeprom_apme_mask = E1000_WUC_APME;
6272 if ((hw->mac.type > e1000_ich10lan) &&
6273 (eeprom_data & E1000_WUC_PHY_WAKE))
6274 adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP;
6275 } else if (adapter->flags & FLAG_APME_IN_CTRL3) {
6276 if (adapter->flags & FLAG_APME_CHECK_PORT_B &&
6277 (adapter->hw.bus.func == 1))
6278 e1000_read_nvm(&adapter->hw, NVM_INIT_CONTROL3_PORT_B,
6281 e1000_read_nvm(&adapter->hw, NVM_INIT_CONTROL3_PORT_A,
6285 /* fetch WoL from EEPROM */
6286 if (eeprom_data & eeprom_apme_mask)
6287 adapter->eeprom_wol |= E1000_WUFC_MAG;
6289 /* now that we have the eeprom settings, apply the special cases
6290 * where the eeprom may be wrong or the board simply won't support
6291 * wake on lan on a particular port
6293 if (!(adapter->flags & FLAG_HAS_WOL))
6294 adapter->eeprom_wol = 0;
6296 /* initialize the wol settings based on the eeprom settings */
6297 adapter->wol = adapter->eeprom_wol;
6298 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
6300 /* save off EEPROM version number */
6301 e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers);
6303 /* reset the hardware with the new settings */
6304 e1000e_reset(adapter);
6306 /* If the controller has AMT, do not set DRV_LOAD until the interface
6307 * is up. For all other cases, let the f/w know that the h/w is now
6308 * under the control of the driver.
6310 if (!(adapter->flags & FLAG_HAS_AMT))
6311 e1000e_get_hw_control(adapter);
6313 strlcpy(netdev->name, "eth%d", sizeof(netdev->name));
6314 err = register_netdev(netdev);
6318 /* carrier off reporting is important to ethtool even BEFORE open */
6319 netif_carrier_off(netdev);
6321 e1000_print_device_info(adapter);
6323 if (pci_dev_run_wake(pdev))
6324 pm_runtime_put_noidle(&pdev->dev);
6329 if (!(adapter->flags & FLAG_HAS_AMT))
6330 e1000e_release_hw_control(adapter);
6332 if (hw->phy.ops.check_reset_block && !hw->phy.ops.check_reset_block(hw))
6333 e1000_phy_hw_reset(&adapter->hw);
6335 kfree(adapter->tx_ring);
6336 kfree(adapter->rx_ring);
6338 if (adapter->hw.flash_address)
6339 iounmap(adapter->hw.flash_address);
6340 e1000e_reset_interrupt_capability(adapter);
6342 iounmap(adapter->hw.hw_addr);
6344 free_netdev(netdev);
6346 pci_release_selected_regions(pdev,
6347 pci_select_bars(pdev, IORESOURCE_MEM));
6350 pci_disable_device(pdev);
6355 * e1000_remove - Device Removal Routine
6356 * @pdev: PCI device information struct
6358 * e1000_remove is called by the PCI subsystem to alert the driver
6359 * that it should release a PCI device. The could be caused by a
6360 * Hot-Plug event, or because the driver is going to be removed from
6363 static void e1000_remove(struct pci_dev *pdev)
6365 struct net_device *netdev = pci_get_drvdata(pdev);
6366 struct e1000_adapter *adapter = netdev_priv(netdev);
6367 bool down = test_bit(__E1000_DOWN, &adapter->state);
6369 /* The timers may be rescheduled, so explicitly disable them
6370 * from being rescheduled.
6373 set_bit(__E1000_DOWN, &adapter->state);
6374 del_timer_sync(&adapter->watchdog_timer);
6375 del_timer_sync(&adapter->phy_info_timer);
6377 cancel_work_sync(&adapter->reset_task);
6378 cancel_work_sync(&adapter->watchdog_task);
6379 cancel_work_sync(&adapter->downshift_task);
6380 cancel_work_sync(&adapter->update_phy_task);
6381 cancel_work_sync(&adapter->print_hang_task);
6383 if (!(netdev->flags & IFF_UP))
6384 e1000_power_down_phy(adapter);
6386 /* Don't lie to e1000_close() down the road. */
6388 clear_bit(__E1000_DOWN, &adapter->state);
6389 unregister_netdev(netdev);
6391 if (pci_dev_run_wake(pdev))
6392 pm_runtime_get_noresume(&pdev->dev);
6394 /* Release control of h/w to f/w. If f/w is AMT enabled, this
6395 * would have already happened in close and is redundant.
6397 e1000e_release_hw_control(adapter);
6399 e1000e_reset_interrupt_capability(adapter);
6400 kfree(adapter->tx_ring);
6401 kfree(adapter->rx_ring);
6403 iounmap(adapter->hw.hw_addr);
6404 if (adapter->hw.flash_address)
6405 iounmap(adapter->hw.flash_address);
6406 pci_release_selected_regions(pdev,
6407 pci_select_bars(pdev, IORESOURCE_MEM));
6409 free_netdev(netdev);
6412 pci_disable_pcie_error_reporting(pdev);
6414 pci_disable_device(pdev);
6417 /* PCI Error Recovery (ERS) */
6418 static const struct pci_error_handlers e1000_err_handler = {
6419 .error_detected = e1000_io_error_detected,
6420 .slot_reset = e1000_io_slot_reset,
6421 .resume = e1000_io_resume,
6424 static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
6425 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 },
6426 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 },
6427 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 },
6428 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP), board_82571 },
6429 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_FIBER), board_82571 },
6430 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES), board_82571 },
6431 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL), board_82571 },
6432 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_QUAD), board_82571 },
6433 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571PT_QUAD_COPPER), board_82571 },
6435 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI), board_82572 },
6436 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_COPPER), board_82572 },
6437 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_FIBER), board_82572 },
6438 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_SERDES), board_82572 },
6440 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E), board_82573 },
6441 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E_IAMT), board_82573 },
6442 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 },
6444 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574L), board_82574 },
6445 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574LA), board_82574 },
6446 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82583V), board_82583 },
6448 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT),
6449 board_80003es2lan },
6450 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_SPT),
6451 board_80003es2lan },
6452 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_DPT),
6453 board_80003es2lan },
6454 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_SPT),
6455 board_80003es2lan },
6457 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE), board_ich8lan },
6458 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_G), board_ich8lan },
6459 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_GT), board_ich8lan },
6460 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_AMT), board_ich8lan },
6461 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan },
6462 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan },
6463 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan },
6464 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_82567V_3), board_ich8lan },
6466 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan },
6467 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan },
6468 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan },
6469 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan },
6470 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan },
6471 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_BM), board_ich9lan },
6472 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M), board_ich9lan },
6473 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_AMT), board_ich9lan },
6474 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_V), board_ich9lan },
6476 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LM), board_ich9lan },
6477 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LF), board_ich9lan },
6478 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_V), board_ich9lan },
6480 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LM), board_ich10lan },
6481 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LF), board_ich10lan },
6482 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_V), board_ich10lan },
6484 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LM), board_pchlan },
6485 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LC), board_pchlan },
6486 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DM), board_pchlan },
6487 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DC), board_pchlan },
6489 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_LM), board_pch2lan },
6490 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_V), board_pch2lan },
6492 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_LM), board_pch_lpt },
6493 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_V), board_pch_lpt },
6494 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_LM), board_pch_lpt },
6495 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_V), board_pch_lpt },
6497 { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
6499 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
6502 static const struct dev_pm_ops e1000_pm_ops = {
6503 SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume)
6504 SET_RUNTIME_PM_OPS(e1000_runtime_suspend,
6505 e1000_runtime_resume, e1000_idle)
6509 /* PCI Device API Driver */
6510 static struct pci_driver e1000_driver = {
6511 .name = e1000e_driver_name,
6512 .id_table = e1000_pci_tbl,
6513 .probe = e1000_probe,
6514 .remove = e1000_remove,
6517 .pm = &e1000_pm_ops,
6520 .shutdown = e1000_shutdown,
6521 .err_handler = &e1000_err_handler
6525 * e1000_init_module - Driver Registration Routine
6527 * e1000_init_module is the first routine called when the driver is
6528 * loaded. All it does is register with the PCI subsystem.
6530 static int __init e1000_init_module(void)
6533 pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
6534 e1000e_driver_version);
6535 pr_info("Copyright(c) 1999 - 2012 Intel Corporation.\n");
6536 ret = pci_register_driver(&e1000_driver);
6540 module_init(e1000_init_module);
6543 * e1000_exit_module - Driver Exit Cleanup Routine
6545 * e1000_exit_module is called just before the driver is removed
6548 static void __exit e1000_exit_module(void)
6550 pci_unregister_driver(&e1000_driver);
6552 module_exit(e1000_exit_module);
6555 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
6556 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
6557 MODULE_LICENSE("GPL");
6558 MODULE_VERSION(DRV_VERSION);