1 /*******************************************************************************
3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2006 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *******************************************************************************/
30 #include <net/ip6_checksum.h>
32 #include <linux/prefetch.h>
33 #include <linux/bitops.h>
34 #include <linux/if_vlan.h>
36 char e1000_driver_name[] = "e1000";
37 static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
38 #define DRV_VERSION "7.3.21-k8-NAPI"
39 const char e1000_driver_version[] = DRV_VERSION;
40 static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
42 /* e1000_pci_tbl - PCI Device ID Table
44 * Last entry must be all 0s
47 * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
49 static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
50 INTEL_E1000_ETHERNET_DEVICE(0x1000),
51 INTEL_E1000_ETHERNET_DEVICE(0x1001),
52 INTEL_E1000_ETHERNET_DEVICE(0x1004),
53 INTEL_E1000_ETHERNET_DEVICE(0x1008),
54 INTEL_E1000_ETHERNET_DEVICE(0x1009),
55 INTEL_E1000_ETHERNET_DEVICE(0x100C),
56 INTEL_E1000_ETHERNET_DEVICE(0x100D),
57 INTEL_E1000_ETHERNET_DEVICE(0x100E),
58 INTEL_E1000_ETHERNET_DEVICE(0x100F),
59 INTEL_E1000_ETHERNET_DEVICE(0x1010),
60 INTEL_E1000_ETHERNET_DEVICE(0x1011),
61 INTEL_E1000_ETHERNET_DEVICE(0x1012),
62 INTEL_E1000_ETHERNET_DEVICE(0x1013),
63 INTEL_E1000_ETHERNET_DEVICE(0x1014),
64 INTEL_E1000_ETHERNET_DEVICE(0x1015),
65 INTEL_E1000_ETHERNET_DEVICE(0x1016),
66 INTEL_E1000_ETHERNET_DEVICE(0x1017),
67 INTEL_E1000_ETHERNET_DEVICE(0x1018),
68 INTEL_E1000_ETHERNET_DEVICE(0x1019),
69 INTEL_E1000_ETHERNET_DEVICE(0x101A),
70 INTEL_E1000_ETHERNET_DEVICE(0x101D),
71 INTEL_E1000_ETHERNET_DEVICE(0x101E),
72 INTEL_E1000_ETHERNET_DEVICE(0x1026),
73 INTEL_E1000_ETHERNET_DEVICE(0x1027),
74 INTEL_E1000_ETHERNET_DEVICE(0x1028),
75 INTEL_E1000_ETHERNET_DEVICE(0x1075),
76 INTEL_E1000_ETHERNET_DEVICE(0x1076),
77 INTEL_E1000_ETHERNET_DEVICE(0x1077),
78 INTEL_E1000_ETHERNET_DEVICE(0x1078),
79 INTEL_E1000_ETHERNET_DEVICE(0x1079),
80 INTEL_E1000_ETHERNET_DEVICE(0x107A),
81 INTEL_E1000_ETHERNET_DEVICE(0x107B),
82 INTEL_E1000_ETHERNET_DEVICE(0x107C),
83 INTEL_E1000_ETHERNET_DEVICE(0x108A),
84 INTEL_E1000_ETHERNET_DEVICE(0x1099),
85 INTEL_E1000_ETHERNET_DEVICE(0x10B5),
86 INTEL_E1000_ETHERNET_DEVICE(0x2E6E),
87 /* required last entry */
91 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
93 int e1000_up(struct e1000_adapter *adapter);
94 void e1000_down(struct e1000_adapter *adapter);
95 void e1000_reinit_locked(struct e1000_adapter *adapter);
96 void e1000_reset(struct e1000_adapter *adapter);
97 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
98 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
99 void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
100 void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
101 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
102 struct e1000_tx_ring *txdr);
103 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
104 struct e1000_rx_ring *rxdr);
105 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
106 struct e1000_tx_ring *tx_ring);
107 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
108 struct e1000_rx_ring *rx_ring);
109 void e1000_update_stats(struct e1000_adapter *adapter);
111 static int e1000_init_module(void);
112 static void e1000_exit_module(void);
113 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
114 static void __devexit e1000_remove(struct pci_dev *pdev);
115 static int e1000_alloc_queues(struct e1000_adapter *adapter);
116 static int e1000_sw_init(struct e1000_adapter *adapter);
117 static int e1000_open(struct net_device *netdev);
118 static int e1000_close(struct net_device *netdev);
119 static void e1000_configure_tx(struct e1000_adapter *adapter);
120 static void e1000_configure_rx(struct e1000_adapter *adapter);
121 static void e1000_setup_rctl(struct e1000_adapter *adapter);
122 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
123 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
124 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
125 struct e1000_tx_ring *tx_ring);
126 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
127 struct e1000_rx_ring *rx_ring);
128 static void e1000_set_rx_mode(struct net_device *netdev);
129 static void e1000_update_phy_info_task(struct work_struct *work);
130 static void e1000_watchdog(struct work_struct *work);
131 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work);
132 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
133 struct net_device *netdev);
134 static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
135 static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
136 static int e1000_set_mac(struct net_device *netdev, void *p);
137 static irqreturn_t e1000_intr(int irq, void *data);
138 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
139 struct e1000_tx_ring *tx_ring);
140 static int e1000_clean(struct napi_struct *napi, int budget);
141 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
142 struct e1000_rx_ring *rx_ring,
143 int *work_done, int work_to_do);
144 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
145 struct e1000_rx_ring *rx_ring,
146 int *work_done, int work_to_do);
147 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
148 struct e1000_rx_ring *rx_ring,
150 static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
151 struct e1000_rx_ring *rx_ring,
153 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
154 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
156 static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
157 static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
158 static void e1000_tx_timeout(struct net_device *dev);
159 static void e1000_reset_task(struct work_struct *work);
160 static void e1000_smartspeed(struct e1000_adapter *adapter);
161 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
162 struct sk_buff *skb);
164 static bool e1000_vlan_used(struct e1000_adapter *adapter);
165 static void e1000_vlan_mode(struct net_device *netdev,
166 netdev_features_t features);
167 static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
169 static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
170 static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
171 static void e1000_restore_vlan(struct e1000_adapter *adapter);
174 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
175 static int e1000_resume(struct pci_dev *pdev);
177 static void e1000_shutdown(struct pci_dev *pdev);
179 #ifdef CONFIG_NET_POLL_CONTROLLER
180 /* for netdump / net console */
181 static void e1000_netpoll (struct net_device *netdev);
184 #define COPYBREAK_DEFAULT 256
185 static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
186 module_param(copybreak, uint, 0644);
187 MODULE_PARM_DESC(copybreak,
188 "Maximum size of packet that is copied to a new buffer on receive");
190 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
191 pci_channel_state_t state);
192 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
193 static void e1000_io_resume(struct pci_dev *pdev);
195 static const struct pci_error_handlers e1000_err_handler = {
196 .error_detected = e1000_io_error_detected,
197 .slot_reset = e1000_io_slot_reset,
198 .resume = e1000_io_resume,
201 static struct pci_driver e1000_driver = {
202 .name = e1000_driver_name,
203 .id_table = e1000_pci_tbl,
204 .probe = e1000_probe,
205 .remove = __devexit_p(e1000_remove),
207 /* Power Management Hooks */
208 .suspend = e1000_suspend,
209 .resume = e1000_resume,
211 .shutdown = e1000_shutdown,
212 .err_handler = &e1000_err_handler
215 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
216 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
217 MODULE_LICENSE("GPL");
218 MODULE_VERSION(DRV_VERSION);
220 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
221 static int debug = -1;
222 module_param(debug, int, 0);
223 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
226 * e1000_get_hw_dev - return device
227 * used by hardware layer to print debugging information
230 struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
232 struct e1000_adapter *adapter = hw->back;
233 return adapter->netdev;
237 * e1000_init_module - Driver Registration Routine
239 * e1000_init_module is the first routine called when the driver is
240 * loaded. All it does is register with the PCI subsystem.
243 static int __init e1000_init_module(void)
246 pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version);
248 pr_info("%s\n", e1000_copyright);
250 ret = pci_register_driver(&e1000_driver);
251 if (copybreak != COPYBREAK_DEFAULT) {
253 pr_info("copybreak disabled\n");
255 pr_info("copybreak enabled for "
256 "packets <= %u bytes\n", copybreak);
261 module_init(e1000_init_module);
264 * e1000_exit_module - Driver Exit Cleanup Routine
266 * e1000_exit_module is called just before the driver is removed
270 static void __exit e1000_exit_module(void)
272 pci_unregister_driver(&e1000_driver);
275 module_exit(e1000_exit_module);
277 static int e1000_request_irq(struct e1000_adapter *adapter)
279 struct net_device *netdev = adapter->netdev;
280 irq_handler_t handler = e1000_intr;
281 int irq_flags = IRQF_SHARED;
284 err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
287 e_err(probe, "Unable to allocate interrupt Error: %d\n", err);
293 static void e1000_free_irq(struct e1000_adapter *adapter)
295 struct net_device *netdev = adapter->netdev;
297 free_irq(adapter->pdev->irq, netdev);
301 * e1000_irq_disable - Mask off interrupt generation on the NIC
302 * @adapter: board private structure
305 static void e1000_irq_disable(struct e1000_adapter *adapter)
307 struct e1000_hw *hw = &adapter->hw;
311 synchronize_irq(adapter->pdev->irq);
315 * e1000_irq_enable - Enable default interrupt generation settings
316 * @adapter: board private structure
319 static void e1000_irq_enable(struct e1000_adapter *adapter)
321 struct e1000_hw *hw = &adapter->hw;
323 ew32(IMS, IMS_ENABLE_MASK);
327 static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
329 struct e1000_hw *hw = &adapter->hw;
330 struct net_device *netdev = adapter->netdev;
331 u16 vid = hw->mng_cookie.vlan_id;
332 u16 old_vid = adapter->mng_vlan_id;
334 if (!e1000_vlan_used(adapter))
337 if (!test_bit(vid, adapter->active_vlans)) {
338 if (hw->mng_cookie.status &
339 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
340 e1000_vlan_rx_add_vid(netdev, vid);
341 adapter->mng_vlan_id = vid;
343 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
345 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
347 !test_bit(old_vid, adapter->active_vlans))
348 e1000_vlan_rx_kill_vid(netdev, old_vid);
350 adapter->mng_vlan_id = vid;
354 static void e1000_init_manageability(struct e1000_adapter *adapter)
356 struct e1000_hw *hw = &adapter->hw;
358 if (adapter->en_mng_pt) {
359 u32 manc = er32(MANC);
361 /* disable hardware interception of ARP */
362 manc &= ~(E1000_MANC_ARP_EN);
368 static void e1000_release_manageability(struct e1000_adapter *adapter)
370 struct e1000_hw *hw = &adapter->hw;
372 if (adapter->en_mng_pt) {
373 u32 manc = er32(MANC);
375 /* re-enable hardware interception of ARP */
376 manc |= E1000_MANC_ARP_EN;
383 * e1000_configure - configure the hardware for RX and TX
384 * @adapter = private board structure
386 static void e1000_configure(struct e1000_adapter *adapter)
388 struct net_device *netdev = adapter->netdev;
391 e1000_set_rx_mode(netdev);
393 e1000_restore_vlan(adapter);
394 e1000_init_manageability(adapter);
396 e1000_configure_tx(adapter);
397 e1000_setup_rctl(adapter);
398 e1000_configure_rx(adapter);
399 /* call E1000_DESC_UNUSED which always leaves
400 * at least 1 descriptor unused to make sure
401 * next_to_use != next_to_clean */
402 for (i = 0; i < adapter->num_rx_queues; i++) {
403 struct e1000_rx_ring *ring = &adapter->rx_ring[i];
404 adapter->alloc_rx_buf(adapter, ring,
405 E1000_DESC_UNUSED(ring));
409 int e1000_up(struct e1000_adapter *adapter)
411 struct e1000_hw *hw = &adapter->hw;
413 /* hardware has been reset, we need to reload some things */
414 e1000_configure(adapter);
416 clear_bit(__E1000_DOWN, &adapter->flags);
418 napi_enable(&adapter->napi);
420 e1000_irq_enable(adapter);
422 netif_wake_queue(adapter->netdev);
424 /* fire a link change interrupt to start the watchdog */
425 ew32(ICS, E1000_ICS_LSC);
430 * e1000_power_up_phy - restore link in case the phy was powered down
431 * @adapter: address of board private structure
433 * The phy may be powered down to save power and turn off link when the
434 * driver is unloaded and wake on lan is not enabled (among others)
435 * *** this routine MUST be followed by a call to e1000_reset ***
439 void e1000_power_up_phy(struct e1000_adapter *adapter)
441 struct e1000_hw *hw = &adapter->hw;
444 /* Just clear the power down bit to wake the phy back up */
445 if (hw->media_type == e1000_media_type_copper) {
446 /* according to the manual, the phy will retain its
447 * settings across a power-down/up cycle */
448 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
449 mii_reg &= ~MII_CR_POWER_DOWN;
450 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
454 static void e1000_power_down_phy(struct e1000_adapter *adapter)
456 struct e1000_hw *hw = &adapter->hw;
458 /* Power down the PHY so no link is implied when interface is down *
459 * The PHY cannot be powered down if any of the following is true *
462 * (c) SoL/IDER session is active */
463 if (!adapter->wol && hw->mac_type >= e1000_82540 &&
464 hw->media_type == e1000_media_type_copper) {
467 switch (hw->mac_type) {
470 case e1000_82545_rev_3:
473 case e1000_82546_rev_3:
475 case e1000_82541_rev_2:
477 case e1000_82547_rev_2:
478 if (er32(MANC) & E1000_MANC_SMBUS_EN)
484 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
485 mii_reg |= MII_CR_POWER_DOWN;
486 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
493 static void e1000_down_and_stop(struct e1000_adapter *adapter)
495 set_bit(__E1000_DOWN, &adapter->flags);
497 /* Only kill reset task if adapter is not resetting */
498 if (!test_bit(__E1000_RESETTING, &adapter->flags))
499 cancel_work_sync(&adapter->reset_task);
501 cancel_delayed_work_sync(&adapter->watchdog_task);
502 cancel_delayed_work_sync(&adapter->phy_info_task);
503 cancel_delayed_work_sync(&adapter->fifo_stall_task);
506 void e1000_down(struct e1000_adapter *adapter)
508 struct e1000_hw *hw = &adapter->hw;
509 struct net_device *netdev = adapter->netdev;
513 /* disable receives in the hardware */
515 ew32(RCTL, rctl & ~E1000_RCTL_EN);
516 /* flush and sleep below */
518 netif_tx_disable(netdev);
520 /* disable transmits in the hardware */
522 tctl &= ~E1000_TCTL_EN;
524 /* flush both disables and wait for them to finish */
528 napi_disable(&adapter->napi);
530 e1000_irq_disable(adapter);
533 * Setting DOWN must be after irq_disable to prevent
534 * a screaming interrupt. Setting DOWN also prevents
535 * tasks from rescheduling.
537 e1000_down_and_stop(adapter);
539 adapter->link_speed = 0;
540 adapter->link_duplex = 0;
541 netif_carrier_off(netdev);
543 e1000_reset(adapter);
544 e1000_clean_all_tx_rings(adapter);
545 e1000_clean_all_rx_rings(adapter);
548 static void e1000_reinit_safe(struct e1000_adapter *adapter)
550 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
552 mutex_lock(&adapter->mutex);
555 mutex_unlock(&adapter->mutex);
556 clear_bit(__E1000_RESETTING, &adapter->flags);
559 void e1000_reinit_locked(struct e1000_adapter *adapter)
561 /* if rtnl_lock is not held the call path is bogus */
563 WARN_ON(in_interrupt());
564 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
568 clear_bit(__E1000_RESETTING, &adapter->flags);
571 void e1000_reset(struct e1000_adapter *adapter)
573 struct e1000_hw *hw = &adapter->hw;
574 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
575 bool legacy_pba_adjust = false;
578 /* Repartition Pba for greater than 9k mtu
579 * To take effect CTRL.RST is required.
582 switch (hw->mac_type) {
583 case e1000_82542_rev2_0:
584 case e1000_82542_rev2_1:
589 case e1000_82541_rev_2:
590 legacy_pba_adjust = true;
594 case e1000_82545_rev_3:
597 case e1000_82546_rev_3:
601 case e1000_82547_rev_2:
602 legacy_pba_adjust = true;
605 case e1000_undefined:
610 if (legacy_pba_adjust) {
611 if (hw->max_frame_size > E1000_RXBUFFER_8192)
612 pba -= 8; /* allocate more FIFO for Tx */
614 if (hw->mac_type == e1000_82547) {
615 adapter->tx_fifo_head = 0;
616 adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
617 adapter->tx_fifo_size =
618 (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
619 atomic_set(&adapter->tx_fifo_stall, 0);
621 } else if (hw->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) {
622 /* adjust PBA for jumbo frames */
625 /* To maintain wire speed transmits, the Tx FIFO should be
626 * large enough to accommodate two full transmit packets,
627 * rounded up to the next 1KB and expressed in KB. Likewise,
628 * the Rx FIFO should be large enough to accommodate at least
629 * one full receive packet and is similarly rounded up and
630 * expressed in KB. */
632 /* upper 16 bits has Tx packet buffer allocation size in KB */
633 tx_space = pba >> 16;
634 /* lower 16 bits has Rx packet buffer allocation size in KB */
637 * the tx fifo also stores 16 bytes of information about the tx
638 * but don't include ethernet FCS because hardware appends it
640 min_tx_space = (hw->max_frame_size +
641 sizeof(struct e1000_tx_desc) -
643 min_tx_space = ALIGN(min_tx_space, 1024);
645 /* software strips receive CRC, so leave room for it */
646 min_rx_space = hw->max_frame_size;
647 min_rx_space = ALIGN(min_rx_space, 1024);
650 /* If current Tx allocation is less than the min Tx FIFO size,
651 * and the min Tx FIFO size is less than the current Rx FIFO
652 * allocation, take space away from current Rx allocation */
653 if (tx_space < min_tx_space &&
654 ((min_tx_space - tx_space) < pba)) {
655 pba = pba - (min_tx_space - tx_space);
657 /* PCI/PCIx hardware has PBA alignment constraints */
658 switch (hw->mac_type) {
659 case e1000_82545 ... e1000_82546_rev_3:
660 pba &= ~(E1000_PBA_8K - 1);
666 /* if short on rx space, rx wins and must trump tx
667 * adjustment or use Early Receive if available */
668 if (pba < min_rx_space)
676 * flow control settings:
677 * The high water mark must be low enough to fit one full frame
678 * (or the size used for early receive) above it in the Rx FIFO.
679 * Set it to the lower of:
680 * - 90% of the Rx FIFO size, and
681 * - the full Rx FIFO size minus the early receive size (for parts
682 * with ERT support assuming ERT set to E1000_ERT_2048), or
683 * - the full Rx FIFO size minus one full frame
685 hwm = min(((pba << 10) * 9 / 10),
686 ((pba << 10) - hw->max_frame_size));
688 hw->fc_high_water = hwm & 0xFFF8; /* 8-byte granularity */
689 hw->fc_low_water = hw->fc_high_water - 8;
690 hw->fc_pause_time = E1000_FC_PAUSE_TIME;
692 hw->fc = hw->original_fc;
694 /* Allow time for pending master requests to run */
696 if (hw->mac_type >= e1000_82544)
699 if (e1000_init_hw(hw))
700 e_dev_err("Hardware Error\n");
701 e1000_update_mng_vlan(adapter);
703 /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
704 if (hw->mac_type >= e1000_82544 &&
706 hw->autoneg_advertised == ADVERTISE_1000_FULL) {
707 u32 ctrl = er32(CTRL);
708 /* clear phy power management bit if we are in gig only mode,
709 * which if enabled will attempt negotiation to 100Mb, which
710 * can cause a loss of link at power off or driver unload */
711 ctrl &= ~E1000_CTRL_SWDPIN3;
715 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
716 ew32(VET, ETHERNET_IEEE_VLAN_TYPE);
718 e1000_reset_adaptive(hw);
719 e1000_phy_get_info(hw, &adapter->phy_info);
721 e1000_release_manageability(adapter);
724 /* Dump the eeprom for users having checksum issues */
725 static void e1000_dump_eeprom(struct e1000_adapter *adapter)
727 struct net_device *netdev = adapter->netdev;
728 struct ethtool_eeprom eeprom;
729 const struct ethtool_ops *ops = netdev->ethtool_ops;
732 u16 csum_old, csum_new = 0;
734 eeprom.len = ops->get_eeprom_len(netdev);
737 data = kmalloc(eeprom.len, GFP_KERNEL);
741 ops->get_eeprom(netdev, &eeprom, data);
743 csum_old = (data[EEPROM_CHECKSUM_REG * 2]) +
744 (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8);
745 for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2)
746 csum_new += data[i] + (data[i + 1] << 8);
747 csum_new = EEPROM_SUM - csum_new;
749 pr_err("/*********************/\n");
750 pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old);
751 pr_err("Calculated : 0x%04x\n", csum_new);
753 pr_err("Offset Values\n");
754 pr_err("======== ======\n");
755 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
757 pr_err("Include this output when contacting your support provider.\n");
758 pr_err("This is not a software error! Something bad happened to\n");
759 pr_err("your hardware or EEPROM image. Ignoring this problem could\n");
760 pr_err("result in further problems, possibly loss of data,\n");
761 pr_err("corruption or system hangs!\n");
762 pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n");
763 pr_err("which is invalid and requires you to set the proper MAC\n");
764 pr_err("address manually before continuing to enable this network\n");
765 pr_err("device. Please inspect the EEPROM dump and report the\n");
766 pr_err("issue to your hardware vendor or Intel Customer Support.\n");
767 pr_err("/*********************/\n");
773 * e1000_is_need_ioport - determine if an adapter needs ioport resources or not
774 * @pdev: PCI device information struct
776 * Return true if an adapter needs ioport resources
778 static int e1000_is_need_ioport(struct pci_dev *pdev)
780 switch (pdev->device) {
781 case E1000_DEV_ID_82540EM:
782 case E1000_DEV_ID_82540EM_LOM:
783 case E1000_DEV_ID_82540EP:
784 case E1000_DEV_ID_82540EP_LOM:
785 case E1000_DEV_ID_82540EP_LP:
786 case E1000_DEV_ID_82541EI:
787 case E1000_DEV_ID_82541EI_MOBILE:
788 case E1000_DEV_ID_82541ER:
789 case E1000_DEV_ID_82541ER_LOM:
790 case E1000_DEV_ID_82541GI:
791 case E1000_DEV_ID_82541GI_LF:
792 case E1000_DEV_ID_82541GI_MOBILE:
793 case E1000_DEV_ID_82544EI_COPPER:
794 case E1000_DEV_ID_82544EI_FIBER:
795 case E1000_DEV_ID_82544GC_COPPER:
796 case E1000_DEV_ID_82544GC_LOM:
797 case E1000_DEV_ID_82545EM_COPPER:
798 case E1000_DEV_ID_82545EM_FIBER:
799 case E1000_DEV_ID_82546EB_COPPER:
800 case E1000_DEV_ID_82546EB_FIBER:
801 case E1000_DEV_ID_82546EB_QUAD_COPPER:
808 static netdev_features_t e1000_fix_features(struct net_device *netdev,
809 netdev_features_t features)
812 * Since there is no support for separate rx/tx vlan accel
813 * enable/disable make sure tx flag is always in same state as rx.
815 if (features & NETIF_F_HW_VLAN_RX)
816 features |= NETIF_F_HW_VLAN_TX;
818 features &= ~NETIF_F_HW_VLAN_TX;
823 static int e1000_set_features(struct net_device *netdev,
824 netdev_features_t features)
826 struct e1000_adapter *adapter = netdev_priv(netdev);
827 netdev_features_t changed = features ^ netdev->features;
829 if (changed & NETIF_F_HW_VLAN_RX)
830 e1000_vlan_mode(netdev, features);
832 if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL)))
835 netdev->features = features;
836 adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
838 if (netif_running(netdev))
839 e1000_reinit_locked(adapter);
841 e1000_reset(adapter);
846 static const struct net_device_ops e1000_netdev_ops = {
847 .ndo_open = e1000_open,
848 .ndo_stop = e1000_close,
849 .ndo_start_xmit = e1000_xmit_frame,
850 .ndo_get_stats = e1000_get_stats,
851 .ndo_set_rx_mode = e1000_set_rx_mode,
852 .ndo_set_mac_address = e1000_set_mac,
853 .ndo_tx_timeout = e1000_tx_timeout,
854 .ndo_change_mtu = e1000_change_mtu,
855 .ndo_do_ioctl = e1000_ioctl,
856 .ndo_validate_addr = eth_validate_addr,
857 .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid,
858 .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid,
859 #ifdef CONFIG_NET_POLL_CONTROLLER
860 .ndo_poll_controller = e1000_netpoll,
862 .ndo_fix_features = e1000_fix_features,
863 .ndo_set_features = e1000_set_features,
867 * e1000_init_hw_struct - initialize members of hw struct
868 * @adapter: board private struct
869 * @hw: structure used by e1000_hw.c
871 * Factors out initialization of the e1000_hw struct to its own function
872 * that can be called very early at init (just after struct allocation).
873 * Fields are initialized based on PCI device information and
874 * OS network device settings (MTU size).
875 * Returns negative error codes if MAC type setup fails.
877 static int e1000_init_hw_struct(struct e1000_adapter *adapter,
880 struct pci_dev *pdev = adapter->pdev;
882 /* PCI config space info */
883 hw->vendor_id = pdev->vendor;
884 hw->device_id = pdev->device;
885 hw->subsystem_vendor_id = pdev->subsystem_vendor;
886 hw->subsystem_id = pdev->subsystem_device;
887 hw->revision_id = pdev->revision;
889 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
891 hw->max_frame_size = adapter->netdev->mtu +
892 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
893 hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
895 /* identify the MAC */
896 if (e1000_set_mac_type(hw)) {
897 e_err(probe, "Unknown MAC Type\n");
901 switch (hw->mac_type) {
906 case e1000_82541_rev_2:
907 case e1000_82547_rev_2:
908 hw->phy_init_script = 1;
912 e1000_set_media_type(hw);
913 e1000_get_bus_info(hw);
915 hw->wait_autoneg_complete = false;
916 hw->tbi_compatibility_en = true;
917 hw->adaptive_ifs = true;
921 if (hw->media_type == e1000_media_type_copper) {
922 hw->mdix = AUTO_ALL_MODES;
923 hw->disable_polarity_correction = false;
924 hw->master_slave = E1000_MASTER_SLAVE;
931 * e1000_probe - Device Initialization Routine
932 * @pdev: PCI device information struct
933 * @ent: entry in e1000_pci_tbl
935 * Returns 0 on success, negative on failure
937 * e1000_probe initializes an adapter identified by a pci_dev structure.
938 * The OS initialization, configuring of the adapter private structure,
939 * and a hardware reset occur.
941 static int __devinit e1000_probe(struct pci_dev *pdev,
942 const struct pci_device_id *ent)
944 struct net_device *netdev;
945 struct e1000_adapter *adapter;
948 static int cards_found = 0;
949 static int global_quad_port_a = 0; /* global ksp3 port a indication */
950 int i, err, pci_using_dac;
953 u16 eeprom_apme_mask = E1000_EEPROM_APME;
954 int bars, need_ioport;
956 /* do not allocate ioport bars when not needed */
957 need_ioport = e1000_is_need_ioport(pdev);
959 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
960 err = pci_enable_device(pdev);
962 bars = pci_select_bars(pdev, IORESOURCE_MEM);
963 err = pci_enable_device_mem(pdev);
968 err = pci_request_selected_regions(pdev, bars, e1000_driver_name);
972 pci_set_master(pdev);
973 err = pci_save_state(pdev);
975 goto err_alloc_etherdev;
978 netdev = alloc_etherdev(sizeof(struct e1000_adapter));
980 goto err_alloc_etherdev;
982 SET_NETDEV_DEV(netdev, &pdev->dev);
984 pci_set_drvdata(pdev, netdev);
985 adapter = netdev_priv(netdev);
986 adapter->netdev = netdev;
987 adapter->pdev = pdev;
988 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
989 adapter->bars = bars;
990 adapter->need_ioport = need_ioport;
996 hw->hw_addr = pci_ioremap_bar(pdev, BAR_0);
1000 if (adapter->need_ioport) {
1001 for (i = BAR_1; i <= BAR_5; i++) {
1002 if (pci_resource_len(pdev, i) == 0)
1004 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
1005 hw->io_base = pci_resource_start(pdev, i);
1011 /* make ready for any if (hw->...) below */
1012 err = e1000_init_hw_struct(adapter, hw);
1017 * there is a workaround being applied below that limits
1018 * 64-bit DMA addresses to 64-bit hardware. There are some
1019 * 32-bit adapters that Tx hang when given 64-bit DMA addresses
1022 if ((hw->bus_type == e1000_bus_type_pcix) &&
1023 !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
1025 * according to DMA-API-HOWTO, coherent calls will always
1026 * succeed if the set call did
1028 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
1031 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
1033 pr_err("No usable DMA config, aborting\n");
1036 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
1039 netdev->netdev_ops = &e1000_netdev_ops;
1040 e1000_set_ethtool_ops(netdev);
1041 netdev->watchdog_timeo = 5 * HZ;
1042 netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
1044 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1046 adapter->bd_number = cards_found;
1048 /* setup the private structure */
1050 err = e1000_sw_init(adapter);
1055 if (hw->mac_type == e1000_ce4100) {
1056 hw->ce4100_gbe_mdio_base_virt =
1057 ioremap(pci_resource_start(pdev, BAR_1),
1058 pci_resource_len(pdev, BAR_1));
1060 if (!hw->ce4100_gbe_mdio_base_virt)
1061 goto err_mdio_ioremap;
1064 if (hw->mac_type >= e1000_82543) {
1065 netdev->hw_features = NETIF_F_SG |
1068 netdev->features = NETIF_F_HW_VLAN_TX |
1069 NETIF_F_HW_VLAN_FILTER;
1072 if ((hw->mac_type >= e1000_82544) &&
1073 (hw->mac_type != e1000_82547))
1074 netdev->hw_features |= NETIF_F_TSO;
1076 netdev->priv_flags |= IFF_SUPP_NOFCS;
1078 netdev->features |= netdev->hw_features;
1079 netdev->hw_features |= (NETIF_F_RXCSUM |
1083 if (pci_using_dac) {
1084 netdev->features |= NETIF_F_HIGHDMA;
1085 netdev->vlan_features |= NETIF_F_HIGHDMA;
1088 netdev->vlan_features |= (NETIF_F_TSO |
1092 netdev->priv_flags |= IFF_UNICAST_FLT;
1094 adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
1096 /* initialize eeprom parameters */
1097 if (e1000_init_eeprom_params(hw)) {
1098 e_err(probe, "EEPROM initialization failed\n");
1102 /* before reading the EEPROM, reset the controller to
1103 * put the device in a known good starting state */
1107 /* make sure the EEPROM is good */
1108 if (e1000_validate_eeprom_checksum(hw) < 0) {
1109 e_err(probe, "The EEPROM Checksum Is Not Valid\n");
1110 e1000_dump_eeprom(adapter);
1112 * set MAC address to all zeroes to invalidate and temporary
1113 * disable this device for the user. This blocks regular
1114 * traffic while still permitting ethtool ioctls from reaching
1115 * the hardware as well as allowing the user to run the
1116 * interface after manually setting a hw addr using
1119 memset(hw->mac_addr, 0, netdev->addr_len);
1121 /* copy the MAC address out of the EEPROM */
1122 if (e1000_read_mac_addr(hw))
1123 e_err(probe, "EEPROM Read Error\n");
1125 /* don't block initalization here due to bad MAC address */
1126 memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
1127 memcpy(netdev->perm_addr, hw->mac_addr, netdev->addr_len);
1129 if (!is_valid_ether_addr(netdev->perm_addr))
1130 e_err(probe, "Invalid MAC Address\n");
1133 INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog);
1134 INIT_DELAYED_WORK(&adapter->fifo_stall_task,
1135 e1000_82547_tx_fifo_stall_task);
1136 INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task);
1137 INIT_WORK(&adapter->reset_task, e1000_reset_task);
1139 e1000_check_options(adapter);
1141 /* Initial Wake on LAN setting
1142 * If APM wake is enabled in the EEPROM,
1143 * enable the ACPI Magic Packet filter
1146 switch (hw->mac_type) {
1147 case e1000_82542_rev2_0:
1148 case e1000_82542_rev2_1:
1152 e1000_read_eeprom(hw,
1153 EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
1154 eeprom_apme_mask = E1000_EEPROM_82544_APM;
1157 case e1000_82546_rev_3:
1158 if (er32(STATUS) & E1000_STATUS_FUNC_1){
1159 e1000_read_eeprom(hw,
1160 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1165 e1000_read_eeprom(hw,
1166 EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1169 if (eeprom_data & eeprom_apme_mask)
1170 adapter->eeprom_wol |= E1000_WUFC_MAG;
1172 /* now that we have the eeprom settings, apply the special cases
1173 * where the eeprom may be wrong or the board simply won't support
1174 * wake on lan on a particular port */
1175 switch (pdev->device) {
1176 case E1000_DEV_ID_82546GB_PCIE:
1177 adapter->eeprom_wol = 0;
1179 case E1000_DEV_ID_82546EB_FIBER:
1180 case E1000_DEV_ID_82546GB_FIBER:
1181 /* Wake events only supported on port A for dual fiber
1182 * regardless of eeprom setting */
1183 if (er32(STATUS) & E1000_STATUS_FUNC_1)
1184 adapter->eeprom_wol = 0;
1186 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1187 /* if quad port adapter, disable WoL on all but port A */
1188 if (global_quad_port_a != 0)
1189 adapter->eeprom_wol = 0;
1191 adapter->quad_port_a = true;
1192 /* Reset for multiple quad port adapters */
1193 if (++global_quad_port_a == 4)
1194 global_quad_port_a = 0;
1198 /* initialize the wol settings based on the eeprom settings */
1199 adapter->wol = adapter->eeprom_wol;
1200 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1202 /* Auto detect PHY address */
1203 if (hw->mac_type == e1000_ce4100) {
1204 for (i = 0; i < 32; i++) {
1206 e1000_read_phy_reg(hw, PHY_ID2, &tmp);
1207 if (tmp == 0 || tmp == 0xFF) {
1216 /* reset the hardware with the new settings */
1217 e1000_reset(adapter);
1219 strcpy(netdev->name, "eth%d");
1220 err = register_netdev(netdev);
1224 e1000_vlan_filter_on_off(adapter, false);
1226 /* print bus type/speed/width info */
1227 e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n",
1228 ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
1229 ((hw->bus_speed == e1000_bus_speed_133) ? 133 :
1230 (hw->bus_speed == e1000_bus_speed_120) ? 120 :
1231 (hw->bus_speed == e1000_bus_speed_100) ? 100 :
1232 (hw->bus_speed == e1000_bus_speed_66) ? 66 : 33),
1233 ((hw->bus_width == e1000_bus_width_64) ? 64 : 32),
1236 /* carrier off reporting is important to ethtool even BEFORE open */
1237 netif_carrier_off(netdev);
1239 e_info(probe, "Intel(R) PRO/1000 Network Connection\n");
1246 e1000_phy_hw_reset(hw);
1248 if (hw->flash_address)
1249 iounmap(hw->flash_address);
1250 kfree(adapter->tx_ring);
1251 kfree(adapter->rx_ring);
1255 iounmap(hw->ce4100_gbe_mdio_base_virt);
1256 iounmap(hw->hw_addr);
1258 free_netdev(netdev);
1260 pci_release_selected_regions(pdev, bars);
1262 pci_disable_device(pdev);
1267 * e1000_remove - Device Removal Routine
1268 * @pdev: PCI device information struct
1270 * e1000_remove is called by the PCI subsystem to alert the driver
1271 * that it should release a PCI device. The could be caused by a
1272 * Hot-Plug event, or because the driver is going to be removed from
1276 static void __devexit e1000_remove(struct pci_dev *pdev)
1278 struct net_device *netdev = pci_get_drvdata(pdev);
1279 struct e1000_adapter *adapter = netdev_priv(netdev);
1280 struct e1000_hw *hw = &adapter->hw;
1282 e1000_down_and_stop(adapter);
1283 e1000_release_manageability(adapter);
1285 unregister_netdev(netdev);
1287 e1000_phy_hw_reset(hw);
1289 kfree(adapter->tx_ring);
1290 kfree(adapter->rx_ring);
1292 if (hw->mac_type == e1000_ce4100)
1293 iounmap(hw->ce4100_gbe_mdio_base_virt);
1294 iounmap(hw->hw_addr);
1295 if (hw->flash_address)
1296 iounmap(hw->flash_address);
1297 pci_release_selected_regions(pdev, adapter->bars);
1299 free_netdev(netdev);
1301 pci_disable_device(pdev);
1305 * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
1306 * @adapter: board private structure to initialize
1308 * e1000_sw_init initializes the Adapter private data structure.
1309 * e1000_init_hw_struct MUST be called before this function
1312 static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
1314 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1316 adapter->num_tx_queues = 1;
1317 adapter->num_rx_queues = 1;
1319 if (e1000_alloc_queues(adapter)) {
1320 e_err(probe, "Unable to allocate memory for queues\n");
1324 /* Explicitly disable IRQ since the NIC can be in any state. */
1325 e1000_irq_disable(adapter);
1327 spin_lock_init(&adapter->stats_lock);
1328 mutex_init(&adapter->mutex);
1330 set_bit(__E1000_DOWN, &adapter->flags);
1336 * e1000_alloc_queues - Allocate memory for all rings
1337 * @adapter: board private structure to initialize
1339 * We allocate one ring per queue at run-time since we don't know the
1340 * number of queues at compile-time.
1343 static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter)
1345 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1346 sizeof(struct e1000_tx_ring), GFP_KERNEL);
1347 if (!adapter->tx_ring)
1350 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1351 sizeof(struct e1000_rx_ring), GFP_KERNEL);
1352 if (!adapter->rx_ring) {
1353 kfree(adapter->tx_ring);
1357 return E1000_SUCCESS;
1361 * e1000_open - Called when a network interface is made active
1362 * @netdev: network interface device structure
1364 * Returns 0 on success, negative value on failure
1366 * The open entry point is called when a network interface is made
1367 * active by the system (IFF_UP). At this point all resources needed
1368 * for transmit and receive operations are allocated, the interrupt
1369 * handler is registered with the OS, the watchdog task is started,
1370 * and the stack is notified that the interface is ready.
1373 static int e1000_open(struct net_device *netdev)
1375 struct e1000_adapter *adapter = netdev_priv(netdev);
1376 struct e1000_hw *hw = &adapter->hw;
1379 /* disallow open during test */
1380 if (test_bit(__E1000_TESTING, &adapter->flags))
1383 netif_carrier_off(netdev);
1385 /* allocate transmit descriptors */
1386 err = e1000_setup_all_tx_resources(adapter);
1390 /* allocate receive descriptors */
1391 err = e1000_setup_all_rx_resources(adapter);
1395 e1000_power_up_phy(adapter);
1397 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
1398 if ((hw->mng_cookie.status &
1399 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1400 e1000_update_mng_vlan(adapter);
1403 /* before we allocate an interrupt, we must be ready to handle it.
1404 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1405 * as soon as we call pci_request_irq, so we have to setup our
1406 * clean_rx handler before we do so. */
1407 e1000_configure(adapter);
1409 err = e1000_request_irq(adapter);
1413 /* From here on the code is the same as e1000_up() */
1414 clear_bit(__E1000_DOWN, &adapter->flags);
1416 napi_enable(&adapter->napi);
1418 e1000_irq_enable(adapter);
1420 netif_start_queue(netdev);
1422 /* fire a link status change interrupt to start the watchdog */
1423 ew32(ICS, E1000_ICS_LSC);
1425 return E1000_SUCCESS;
1428 e1000_power_down_phy(adapter);
1429 e1000_free_all_rx_resources(adapter);
1431 e1000_free_all_tx_resources(adapter);
1433 e1000_reset(adapter);
1439 * e1000_close - Disables a network interface
1440 * @netdev: network interface device structure
1442 * Returns 0, this is not allowed to fail
1444 * The close entry point is called when an interface is de-activated
1445 * by the OS. The hardware is still under the drivers control, but
1446 * needs to be disabled. A global MAC reset is issued to stop the
1447 * hardware, and all transmit and receive resources are freed.
1450 static int e1000_close(struct net_device *netdev)
1452 struct e1000_adapter *adapter = netdev_priv(netdev);
1453 struct e1000_hw *hw = &adapter->hw;
1455 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
1456 e1000_down(adapter);
1457 e1000_power_down_phy(adapter);
1458 e1000_free_irq(adapter);
1460 e1000_free_all_tx_resources(adapter);
1461 e1000_free_all_rx_resources(adapter);
1463 /* kill manageability vlan ID if supported, but not if a vlan with
1464 * the same ID is registered on the host OS (let 8021q kill it) */
1465 if ((hw->mng_cookie.status &
1466 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
1467 !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
1468 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
1475 * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
1476 * @adapter: address of board private structure
1477 * @start: address of beginning of memory
1478 * @len: length of memory
1480 static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
1483 struct e1000_hw *hw = &adapter->hw;
1484 unsigned long begin = (unsigned long)start;
1485 unsigned long end = begin + len;
1487 /* First rev 82545 and 82546 need to not allow any memory
1488 * write location to cross 64k boundary due to errata 23 */
1489 if (hw->mac_type == e1000_82545 ||
1490 hw->mac_type == e1000_ce4100 ||
1491 hw->mac_type == e1000_82546) {
1492 return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
1499 * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
1500 * @adapter: board private structure
1501 * @txdr: tx descriptor ring (for a specific queue) to setup
1503 * Return 0 on success, negative on failure
1506 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
1507 struct e1000_tx_ring *txdr)
1509 struct pci_dev *pdev = adapter->pdev;
1512 size = sizeof(struct e1000_buffer) * txdr->count;
1513 txdr->buffer_info = vzalloc(size);
1514 if (!txdr->buffer_info) {
1515 e_err(probe, "Unable to allocate memory for the Tx descriptor "
1520 /* round up to nearest 4K */
1522 txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
1523 txdr->size = ALIGN(txdr->size, 4096);
1525 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
1529 vfree(txdr->buffer_info);
1530 e_err(probe, "Unable to allocate memory for the Tx descriptor "
1535 /* Fix for errata 23, can't cross 64kB boundary */
1536 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1537 void *olddesc = txdr->desc;
1538 dma_addr_t olddma = txdr->dma;
1539 e_err(tx_err, "txdr align check failed: %u bytes at %p\n",
1540 txdr->size, txdr->desc);
1541 /* Try again, without freeing the previous */
1542 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size,
1543 &txdr->dma, GFP_KERNEL);
1544 /* Failed allocation, critical failure */
1546 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1548 goto setup_tx_desc_die;
1551 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1553 dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
1555 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1557 e_err(probe, "Unable to allocate aligned memory "
1558 "for the transmit descriptor ring\n");
1559 vfree(txdr->buffer_info);
1562 /* Free old allocation, new allocation was successful */
1563 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1567 memset(txdr->desc, 0, txdr->size);
1569 txdr->next_to_use = 0;
1570 txdr->next_to_clean = 0;
1576 * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
1577 * (Descriptors) for all queues
1578 * @adapter: board private structure
1580 * Return 0 on success, negative on failure
1583 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
1587 for (i = 0; i < adapter->num_tx_queues; i++) {
1588 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1590 e_err(probe, "Allocation for Tx Queue %u failed\n", i);
1591 for (i-- ; i >= 0; i--)
1592 e1000_free_tx_resources(adapter,
1593 &adapter->tx_ring[i]);
1602 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
1603 * @adapter: board private structure
1605 * Configure the Tx unit of the MAC after a reset.
1608 static void e1000_configure_tx(struct e1000_adapter *adapter)
1611 struct e1000_hw *hw = &adapter->hw;
1612 u32 tdlen, tctl, tipg;
1615 /* Setup the HW Tx Head and Tail descriptor pointers */
1617 switch (adapter->num_tx_queues) {
1620 tdba = adapter->tx_ring[0].dma;
1621 tdlen = adapter->tx_ring[0].count *
1622 sizeof(struct e1000_tx_desc);
1624 ew32(TDBAH, (tdba >> 32));
1625 ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
1628 adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? E1000_TDH : E1000_82542_TDH);
1629 adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? E1000_TDT : E1000_82542_TDT);
1633 /* Set the default values for the Tx Inter Packet Gap timer */
1634 if ((hw->media_type == e1000_media_type_fiber ||
1635 hw->media_type == e1000_media_type_internal_serdes))
1636 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1638 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1640 switch (hw->mac_type) {
1641 case e1000_82542_rev2_0:
1642 case e1000_82542_rev2_1:
1643 tipg = DEFAULT_82542_TIPG_IPGT;
1644 ipgr1 = DEFAULT_82542_TIPG_IPGR1;
1645 ipgr2 = DEFAULT_82542_TIPG_IPGR2;
1648 ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1649 ipgr2 = DEFAULT_82543_TIPG_IPGR2;
1652 tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
1653 tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
1656 /* Set the Tx Interrupt Delay register */
1658 ew32(TIDV, adapter->tx_int_delay);
1659 if (hw->mac_type >= e1000_82540)
1660 ew32(TADV, adapter->tx_abs_int_delay);
1662 /* Program the Transmit Control Register */
1665 tctl &= ~E1000_TCTL_CT;
1666 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
1667 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1669 e1000_config_collision_dist(hw);
1671 /* Setup Transmit Descriptor Settings for eop descriptor */
1672 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
1674 /* only set IDE if we are delaying interrupts using the timers */
1675 if (adapter->tx_int_delay)
1676 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
1678 if (hw->mac_type < e1000_82543)
1679 adapter->txd_cmd |= E1000_TXD_CMD_RPS;
1681 adapter->txd_cmd |= E1000_TXD_CMD_RS;
1683 /* Cache if we're 82544 running in PCI-X because we'll
1684 * need this to apply a workaround later in the send path. */
1685 if (hw->mac_type == e1000_82544 &&
1686 hw->bus_type == e1000_bus_type_pcix)
1687 adapter->pcix_82544 = true;
1694 * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
1695 * @adapter: board private structure
1696 * @rxdr: rx descriptor ring (for a specific queue) to setup
1698 * Returns 0 on success, negative on failure
1701 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1702 struct e1000_rx_ring *rxdr)
1704 struct pci_dev *pdev = adapter->pdev;
1707 size = sizeof(struct e1000_buffer) * rxdr->count;
1708 rxdr->buffer_info = vzalloc(size);
1709 if (!rxdr->buffer_info) {
1710 e_err(probe, "Unable to allocate memory for the Rx descriptor "
1715 desc_len = sizeof(struct e1000_rx_desc);
1717 /* Round up to nearest 4K */
1719 rxdr->size = rxdr->count * desc_len;
1720 rxdr->size = ALIGN(rxdr->size, 4096);
1722 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
1726 e_err(probe, "Unable to allocate memory for the Rx descriptor "
1729 vfree(rxdr->buffer_info);
1733 /* Fix for errata 23, can't cross 64kB boundary */
1734 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1735 void *olddesc = rxdr->desc;
1736 dma_addr_t olddma = rxdr->dma;
1737 e_err(rx_err, "rxdr align check failed: %u bytes at %p\n",
1738 rxdr->size, rxdr->desc);
1739 /* Try again, without freeing the previous */
1740 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size,
1741 &rxdr->dma, GFP_KERNEL);
1742 /* Failed allocation, critical failure */
1744 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1746 e_err(probe, "Unable to allocate memory for the Rx "
1747 "descriptor ring\n");
1748 goto setup_rx_desc_die;
1751 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1753 dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
1755 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1757 e_err(probe, "Unable to allocate aligned memory for "
1758 "the Rx descriptor ring\n");
1759 goto setup_rx_desc_die;
1761 /* Free old allocation, new allocation was successful */
1762 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1766 memset(rxdr->desc, 0, rxdr->size);
1768 rxdr->next_to_clean = 0;
1769 rxdr->next_to_use = 0;
1770 rxdr->rx_skb_top = NULL;
1776 * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
1777 * (Descriptors) for all queues
1778 * @adapter: board private structure
1780 * Return 0 on success, negative on failure
1783 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
1787 for (i = 0; i < adapter->num_rx_queues; i++) {
1788 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1790 e_err(probe, "Allocation for Rx Queue %u failed\n", i);
1791 for (i-- ; i >= 0; i--)
1792 e1000_free_rx_resources(adapter,
1793 &adapter->rx_ring[i]);
1802 * e1000_setup_rctl - configure the receive control registers
1803 * @adapter: Board private structure
1805 static void e1000_setup_rctl(struct e1000_adapter *adapter)
1807 struct e1000_hw *hw = &adapter->hw;
1812 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1814 rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
1815 E1000_RCTL_RDMTS_HALF |
1816 (hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
1818 if (hw->tbi_compatibility_on == 1)
1819 rctl |= E1000_RCTL_SBP;
1821 rctl &= ~E1000_RCTL_SBP;
1823 if (adapter->netdev->mtu <= ETH_DATA_LEN)
1824 rctl &= ~E1000_RCTL_LPE;
1826 rctl |= E1000_RCTL_LPE;
1828 /* Setup buffer sizes */
1829 rctl &= ~E1000_RCTL_SZ_4096;
1830 rctl |= E1000_RCTL_BSEX;
1831 switch (adapter->rx_buffer_len) {
1832 case E1000_RXBUFFER_2048:
1834 rctl |= E1000_RCTL_SZ_2048;
1835 rctl &= ~E1000_RCTL_BSEX;
1837 case E1000_RXBUFFER_4096:
1838 rctl |= E1000_RCTL_SZ_4096;
1840 case E1000_RXBUFFER_8192:
1841 rctl |= E1000_RCTL_SZ_8192;
1843 case E1000_RXBUFFER_16384:
1844 rctl |= E1000_RCTL_SZ_16384;
1848 /* This is useful for sniffing bad packets. */
1849 if (adapter->netdev->features & NETIF_F_RXALL) {
1850 /* UPE and MPE will be handled by normal PROMISC logic
1851 * in e1000e_set_rx_mode */
1852 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
1853 E1000_RCTL_BAM | /* RX All Bcast Pkts */
1854 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
1856 rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
1857 E1000_RCTL_DPF | /* Allow filtered pause */
1858 E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
1859 /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
1860 * and that breaks VLANs.
1868 * e1000_configure_rx - Configure 8254x Receive Unit after Reset
1869 * @adapter: board private structure
1871 * Configure the Rx unit of the MAC after a reset.
1874 static void e1000_configure_rx(struct e1000_adapter *adapter)
1877 struct e1000_hw *hw = &adapter->hw;
1878 u32 rdlen, rctl, rxcsum;
1880 if (adapter->netdev->mtu > ETH_DATA_LEN) {
1881 rdlen = adapter->rx_ring[0].count *
1882 sizeof(struct e1000_rx_desc);
1883 adapter->clean_rx = e1000_clean_jumbo_rx_irq;
1884 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
1886 rdlen = adapter->rx_ring[0].count *
1887 sizeof(struct e1000_rx_desc);
1888 adapter->clean_rx = e1000_clean_rx_irq;
1889 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
1892 /* disable receives while setting up the descriptors */
1894 ew32(RCTL, rctl & ~E1000_RCTL_EN);
1896 /* set the Receive Delay Timer Register */
1897 ew32(RDTR, adapter->rx_int_delay);
1899 if (hw->mac_type >= e1000_82540) {
1900 ew32(RADV, adapter->rx_abs_int_delay);
1901 if (adapter->itr_setting != 0)
1902 ew32(ITR, 1000000000 / (adapter->itr * 256));
1905 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1906 * the Base and Length of the Rx Descriptor Ring */
1907 switch (adapter->num_rx_queues) {
1910 rdba = adapter->rx_ring[0].dma;
1912 ew32(RDBAH, (rdba >> 32));
1913 ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
1916 adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? E1000_RDH : E1000_82542_RDH);
1917 adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? E1000_RDT : E1000_82542_RDT);
1921 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
1922 if (hw->mac_type >= e1000_82543) {
1923 rxcsum = er32(RXCSUM);
1924 if (adapter->rx_csum)
1925 rxcsum |= E1000_RXCSUM_TUOFL;
1927 /* don't need to clear IPPCSE as it defaults to 0 */
1928 rxcsum &= ~E1000_RXCSUM_TUOFL;
1929 ew32(RXCSUM, rxcsum);
1932 /* Enable Receives */
1933 ew32(RCTL, rctl | E1000_RCTL_EN);
1937 * e1000_free_tx_resources - Free Tx Resources per Queue
1938 * @adapter: board private structure
1939 * @tx_ring: Tx descriptor ring for a specific queue
1941 * Free all transmit software resources
1944 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
1945 struct e1000_tx_ring *tx_ring)
1947 struct pci_dev *pdev = adapter->pdev;
1949 e1000_clean_tx_ring(adapter, tx_ring);
1951 vfree(tx_ring->buffer_info);
1952 tx_ring->buffer_info = NULL;
1954 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1957 tx_ring->desc = NULL;
1961 * e1000_free_all_tx_resources - Free Tx Resources for All Queues
1962 * @adapter: board private structure
1964 * Free all transmit software resources
1967 void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
1971 for (i = 0; i < adapter->num_tx_queues; i++)
1972 e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
1975 static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1976 struct e1000_buffer *buffer_info)
1978 if (buffer_info->dma) {
1979 if (buffer_info->mapped_as_page)
1980 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
1981 buffer_info->length, DMA_TO_DEVICE);
1983 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1984 buffer_info->length,
1986 buffer_info->dma = 0;
1988 if (buffer_info->skb) {
1989 dev_kfree_skb_any(buffer_info->skb);
1990 buffer_info->skb = NULL;
1992 buffer_info->time_stamp = 0;
1993 /* buffer_info must be completely set up in the transmit path */
1997 * e1000_clean_tx_ring - Free Tx Buffers
1998 * @adapter: board private structure
1999 * @tx_ring: ring to be cleaned
2002 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
2003 struct e1000_tx_ring *tx_ring)
2005 struct e1000_hw *hw = &adapter->hw;
2006 struct e1000_buffer *buffer_info;
2010 /* Free all the Tx ring sk_buffs */
2012 for (i = 0; i < tx_ring->count; i++) {
2013 buffer_info = &tx_ring->buffer_info[i];
2014 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2017 size = sizeof(struct e1000_buffer) * tx_ring->count;
2018 memset(tx_ring->buffer_info, 0, size);
2020 /* Zero out the descriptor ring */
2022 memset(tx_ring->desc, 0, tx_ring->size);
2024 tx_ring->next_to_use = 0;
2025 tx_ring->next_to_clean = 0;
2026 tx_ring->last_tx_tso = false;
2028 writel(0, hw->hw_addr + tx_ring->tdh);
2029 writel(0, hw->hw_addr + tx_ring->tdt);
2033 * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
2034 * @adapter: board private structure
2037 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
2041 for (i = 0; i < adapter->num_tx_queues; i++)
2042 e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
2046 * e1000_free_rx_resources - Free Rx Resources
2047 * @adapter: board private structure
2048 * @rx_ring: ring to clean the resources from
2050 * Free all receive software resources
2053 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
2054 struct e1000_rx_ring *rx_ring)
2056 struct pci_dev *pdev = adapter->pdev;
2058 e1000_clean_rx_ring(adapter, rx_ring);
2060 vfree(rx_ring->buffer_info);
2061 rx_ring->buffer_info = NULL;
2063 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2066 rx_ring->desc = NULL;
2070 * e1000_free_all_rx_resources - Free Rx Resources for All Queues
2071 * @adapter: board private structure
2073 * Free all receive software resources
2076 void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
2080 for (i = 0; i < adapter->num_rx_queues; i++)
2081 e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
2085 * e1000_clean_rx_ring - Free Rx Buffers per Queue
2086 * @adapter: board private structure
2087 * @rx_ring: ring to free buffers from
2090 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
2091 struct e1000_rx_ring *rx_ring)
2093 struct e1000_hw *hw = &adapter->hw;
2094 struct e1000_buffer *buffer_info;
2095 struct pci_dev *pdev = adapter->pdev;
2099 /* Free all the Rx ring sk_buffs */
2100 for (i = 0; i < rx_ring->count; i++) {
2101 buffer_info = &rx_ring->buffer_info[i];
2102 if (buffer_info->dma &&
2103 adapter->clean_rx == e1000_clean_rx_irq) {
2104 dma_unmap_single(&pdev->dev, buffer_info->dma,
2105 buffer_info->length,
2107 } else if (buffer_info->dma &&
2108 adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
2109 dma_unmap_page(&pdev->dev, buffer_info->dma,
2110 buffer_info->length,
2114 buffer_info->dma = 0;
2115 if (buffer_info->page) {
2116 put_page(buffer_info->page);
2117 buffer_info->page = NULL;
2119 if (buffer_info->skb) {
2120 dev_kfree_skb(buffer_info->skb);
2121 buffer_info->skb = NULL;
2125 /* there also may be some cached data from a chained receive */
2126 if (rx_ring->rx_skb_top) {
2127 dev_kfree_skb(rx_ring->rx_skb_top);
2128 rx_ring->rx_skb_top = NULL;
2131 size = sizeof(struct e1000_buffer) * rx_ring->count;
2132 memset(rx_ring->buffer_info, 0, size);
2134 /* Zero out the descriptor ring */
2135 memset(rx_ring->desc, 0, rx_ring->size);
2137 rx_ring->next_to_clean = 0;
2138 rx_ring->next_to_use = 0;
2140 writel(0, hw->hw_addr + rx_ring->rdh);
2141 writel(0, hw->hw_addr + rx_ring->rdt);
2145 * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
2146 * @adapter: board private structure
2149 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
2153 for (i = 0; i < adapter->num_rx_queues; i++)
2154 e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
2157 /* The 82542 2.0 (revision 2) needs to have the receive unit in reset
2158 * and memory write and invalidate disabled for certain operations
2160 static void e1000_enter_82542_rst(struct e1000_adapter *adapter)
2162 struct e1000_hw *hw = &adapter->hw;
2163 struct net_device *netdev = adapter->netdev;
2166 e1000_pci_clear_mwi(hw);
2169 rctl |= E1000_RCTL_RST;
2171 E1000_WRITE_FLUSH();
2174 if (netif_running(netdev))
2175 e1000_clean_all_rx_rings(adapter);
2178 static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
2180 struct e1000_hw *hw = &adapter->hw;
2181 struct net_device *netdev = adapter->netdev;
2185 rctl &= ~E1000_RCTL_RST;
2187 E1000_WRITE_FLUSH();
2190 if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
2191 e1000_pci_set_mwi(hw);
2193 if (netif_running(netdev)) {
2194 /* No need to loop, because 82542 supports only 1 queue */
2195 struct e1000_rx_ring *ring = &adapter->rx_ring[0];
2196 e1000_configure_rx(adapter);
2197 adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
2202 * e1000_set_mac - Change the Ethernet Address of the NIC
2203 * @netdev: network interface device structure
2204 * @p: pointer to an address structure
2206 * Returns 0 on success, negative on failure
2209 static int e1000_set_mac(struct net_device *netdev, void *p)
2211 struct e1000_adapter *adapter = netdev_priv(netdev);
2212 struct e1000_hw *hw = &adapter->hw;
2213 struct sockaddr *addr = p;
2215 if (!is_valid_ether_addr(addr->sa_data))
2216 return -EADDRNOTAVAIL;
2218 /* 82542 2.0 needs to be in reset to write receive address registers */
2220 if (hw->mac_type == e1000_82542_rev2_0)
2221 e1000_enter_82542_rst(adapter);
2223 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2224 memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
2226 e1000_rar_set(hw, hw->mac_addr, 0);
2228 if (hw->mac_type == e1000_82542_rev2_0)
2229 e1000_leave_82542_rst(adapter);
2235 * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
2236 * @netdev: network interface device structure
2238 * The set_rx_mode entry point is called whenever the unicast or multicast
2239 * address lists or the network interface flags are updated. This routine is
2240 * responsible for configuring the hardware for proper unicast, multicast,
2241 * promiscuous mode, and all-multi behavior.
2244 static void e1000_set_rx_mode(struct net_device *netdev)
2246 struct e1000_adapter *adapter = netdev_priv(netdev);
2247 struct e1000_hw *hw = &adapter->hw;
2248 struct netdev_hw_addr *ha;
2249 bool use_uc = false;
2252 int i, rar_entries = E1000_RAR_ENTRIES;
2253 int mta_reg_count = E1000_NUM_MTA_REGISTERS;
2254 u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
2257 e_err(probe, "memory allocation failed\n");
2261 /* Check for Promiscuous and All Multicast modes */
2265 if (netdev->flags & IFF_PROMISC) {
2266 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2267 rctl &= ~E1000_RCTL_VFE;
2269 if (netdev->flags & IFF_ALLMULTI)
2270 rctl |= E1000_RCTL_MPE;
2272 rctl &= ~E1000_RCTL_MPE;
2273 /* Enable VLAN filter if there is a VLAN */
2274 if (e1000_vlan_used(adapter))
2275 rctl |= E1000_RCTL_VFE;
2278 if (netdev_uc_count(netdev) > rar_entries - 1) {
2279 rctl |= E1000_RCTL_UPE;
2280 } else if (!(netdev->flags & IFF_PROMISC)) {
2281 rctl &= ~E1000_RCTL_UPE;
2287 /* 82542 2.0 needs to be in reset to write receive address registers */
2289 if (hw->mac_type == e1000_82542_rev2_0)
2290 e1000_enter_82542_rst(adapter);
2292 /* load the first 14 addresses into the exact filters 1-14. Unicast
2293 * addresses take precedence to avoid disabling unicast filtering
2296 * RAR 0 is used for the station MAC address
2297 * if there are not 14 addresses, go ahead and clear the filters
2301 netdev_for_each_uc_addr(ha, netdev) {
2302 if (i == rar_entries)
2304 e1000_rar_set(hw, ha->addr, i++);
2307 netdev_for_each_mc_addr(ha, netdev) {
2308 if (i == rar_entries) {
2309 /* load any remaining addresses into the hash table */
2310 u32 hash_reg, hash_bit, mta;
2311 hash_value = e1000_hash_mc_addr(hw, ha->addr);
2312 hash_reg = (hash_value >> 5) & 0x7F;
2313 hash_bit = hash_value & 0x1F;
2314 mta = (1 << hash_bit);
2315 mcarray[hash_reg] |= mta;
2317 e1000_rar_set(hw, ha->addr, i++);
2321 for (; i < rar_entries; i++) {
2322 E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
2323 E1000_WRITE_FLUSH();
2324 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
2325 E1000_WRITE_FLUSH();
2328 /* write the hash table completely, write from bottom to avoid
2329 * both stupid write combining chipsets, and flushing each write */
2330 for (i = mta_reg_count - 1; i >= 0 ; i--) {
2332 * If we are on an 82544 has an errata where writing odd
2333 * offsets overwrites the previous even offset, but writing
2334 * backwards over the range solves the issue by always
2335 * writing the odd offset first
2337 E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]);
2339 E1000_WRITE_FLUSH();
2341 if (hw->mac_type == e1000_82542_rev2_0)
2342 e1000_leave_82542_rst(adapter);
2348 * e1000_update_phy_info_task - get phy info
2349 * @work: work struct contained inside adapter struct
2351 * Need to wait a few seconds after link up to get diagnostic information from
2354 static void e1000_update_phy_info_task(struct work_struct *work)
2356 struct e1000_adapter *adapter = container_of(work,
2357 struct e1000_adapter,
2358 phy_info_task.work);
2359 if (test_bit(__E1000_DOWN, &adapter->flags))
2361 mutex_lock(&adapter->mutex);
2362 e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
2363 mutex_unlock(&adapter->mutex);
2367 * e1000_82547_tx_fifo_stall_task - task to complete work
2368 * @work: work struct contained inside adapter struct
2370 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
2372 struct e1000_adapter *adapter = container_of(work,
2373 struct e1000_adapter,
2374 fifo_stall_task.work);
2375 struct e1000_hw *hw = &adapter->hw;
2376 struct net_device *netdev = adapter->netdev;
2379 if (test_bit(__E1000_DOWN, &adapter->flags))
2381 mutex_lock(&adapter->mutex);
2382 if (atomic_read(&adapter->tx_fifo_stall)) {
2383 if ((er32(TDT) == er32(TDH)) &&
2384 (er32(TDFT) == er32(TDFH)) &&
2385 (er32(TDFTS) == er32(TDFHS))) {
2387 ew32(TCTL, tctl & ~E1000_TCTL_EN);
2388 ew32(TDFT, adapter->tx_head_addr);
2389 ew32(TDFH, adapter->tx_head_addr);
2390 ew32(TDFTS, adapter->tx_head_addr);
2391 ew32(TDFHS, adapter->tx_head_addr);
2393 E1000_WRITE_FLUSH();
2395 adapter->tx_fifo_head = 0;
2396 atomic_set(&adapter->tx_fifo_stall, 0);
2397 netif_wake_queue(netdev);
2398 } else if (!test_bit(__E1000_DOWN, &adapter->flags)) {
2399 schedule_delayed_work(&adapter->fifo_stall_task, 1);
2402 mutex_unlock(&adapter->mutex);
2405 bool e1000_has_link(struct e1000_adapter *adapter)
2407 struct e1000_hw *hw = &adapter->hw;
2408 bool link_active = false;
2410 /* get_link_status is set on LSC (link status) interrupt or rx
2411 * sequence error interrupt (except on intel ce4100).
2412 * get_link_status will stay false until the
2413 * e1000_check_for_link establishes link for copper adapters
2416 switch (hw->media_type) {
2417 case e1000_media_type_copper:
2418 if (hw->mac_type == e1000_ce4100)
2419 hw->get_link_status = 1;
2420 if (hw->get_link_status) {
2421 e1000_check_for_link(hw);
2422 link_active = !hw->get_link_status;
2427 case e1000_media_type_fiber:
2428 e1000_check_for_link(hw);
2429 link_active = !!(er32(STATUS) & E1000_STATUS_LU);
2431 case e1000_media_type_internal_serdes:
2432 e1000_check_for_link(hw);
2433 link_active = hw->serdes_has_link;
2443 * e1000_watchdog - work function
2444 * @work: work struct contained inside adapter struct
2446 static void e1000_watchdog(struct work_struct *work)
2448 struct e1000_adapter *adapter = container_of(work,
2449 struct e1000_adapter,
2450 watchdog_task.work);
2451 struct e1000_hw *hw = &adapter->hw;
2452 struct net_device *netdev = adapter->netdev;
2453 struct e1000_tx_ring *txdr = adapter->tx_ring;
2456 if (test_bit(__E1000_DOWN, &adapter->flags))
2459 mutex_lock(&adapter->mutex);
2460 link = e1000_has_link(adapter);
2461 if ((netif_carrier_ok(netdev)) && link)
2465 if (!netif_carrier_ok(netdev)) {
2468 /* update snapshot of PHY registers on LSC */
2469 e1000_get_speed_and_duplex(hw,
2470 &adapter->link_speed,
2471 &adapter->link_duplex);
2474 pr_info("%s NIC Link is Up %d Mbps %s, "
2475 "Flow Control: %s\n",
2477 adapter->link_speed,
2478 adapter->link_duplex == FULL_DUPLEX ?
2479 "Full Duplex" : "Half Duplex",
2480 ((ctrl & E1000_CTRL_TFCE) && (ctrl &
2481 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2482 E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2483 E1000_CTRL_TFCE) ? "TX" : "None")));
2485 /* adjust timeout factor according to speed/duplex */
2486 adapter->tx_timeout_factor = 1;
2487 switch (adapter->link_speed) {
2490 adapter->tx_timeout_factor = 16;
2494 /* maybe add some timeout factor ? */
2498 /* enable transmits in the hardware */
2500 tctl |= E1000_TCTL_EN;
2503 netif_carrier_on(netdev);
2504 if (!test_bit(__E1000_DOWN, &adapter->flags))
2505 schedule_delayed_work(&adapter->phy_info_task,
2507 adapter->smartspeed = 0;
2510 if (netif_carrier_ok(netdev)) {
2511 adapter->link_speed = 0;
2512 adapter->link_duplex = 0;
2513 pr_info("%s NIC Link is Down\n",
2515 netif_carrier_off(netdev);
2517 if (!test_bit(__E1000_DOWN, &adapter->flags))
2518 schedule_delayed_work(&adapter->phy_info_task,
2522 e1000_smartspeed(adapter);
2526 e1000_update_stats(adapter);
2528 hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
2529 adapter->tpt_old = adapter->stats.tpt;
2530 hw->collision_delta = adapter->stats.colc - adapter->colc_old;
2531 adapter->colc_old = adapter->stats.colc;
2533 adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
2534 adapter->gorcl_old = adapter->stats.gorcl;
2535 adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
2536 adapter->gotcl_old = adapter->stats.gotcl;
2538 e1000_update_adaptive(hw);
2540 if (!netif_carrier_ok(netdev)) {
2541 if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
2542 /* We've lost link, so the controller stops DMA,
2543 * but we've got queued Tx work that's never going
2544 * to get done, so reset controller to flush Tx.
2545 * (Do the reset outside of interrupt context). */
2546 adapter->tx_timeout_count++;
2547 schedule_work(&adapter->reset_task);
2548 /* exit immediately since reset is imminent */
2553 /* Simple mode for Interrupt Throttle Rate (ITR) */
2554 if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
2556 * Symmetric Tx/Rx gets a reduced ITR=2000;
2557 * Total asymmetrical Tx or Rx gets ITR=8000;
2558 * everyone else is between 2000-8000.
2560 u32 goc = (adapter->gotcl + adapter->gorcl) / 10000;
2561 u32 dif = (adapter->gotcl > adapter->gorcl ?
2562 adapter->gotcl - adapter->gorcl :
2563 adapter->gorcl - adapter->gotcl) / 10000;
2564 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
2566 ew32(ITR, 1000000000 / (itr * 256));
2569 /* Cause software interrupt to ensure rx ring is cleaned */
2570 ew32(ICS, E1000_ICS_RXDMT0);
2572 /* Force detection of hung controller every watchdog period */
2573 adapter->detect_tx_hung = true;
2575 /* Reschedule the task */
2576 if (!test_bit(__E1000_DOWN, &adapter->flags))
2577 schedule_delayed_work(&adapter->watchdog_task, 2 * HZ);
2580 mutex_unlock(&adapter->mutex);
2583 enum latency_range {
2587 latency_invalid = 255
2591 * e1000_update_itr - update the dynamic ITR value based on statistics
2592 * @adapter: pointer to adapter
2593 * @itr_setting: current adapter->itr
2594 * @packets: the number of packets during this measurement interval
2595 * @bytes: the number of bytes during this measurement interval
2597 * Stores a new ITR value based on packets and byte
2598 * counts during the last interrupt. The advantage of per interrupt
2599 * computation is faster updates and more accurate ITR for the current
2600 * traffic pattern. Constants in this function were computed
2601 * based on theoretical maximum wire speed and thresholds were set based
2602 * on testing data as well as attempting to minimize response time
2603 * while increasing bulk throughput.
2604 * this functionality is controlled by the InterruptThrottleRate module
2605 * parameter (see e1000_param.c)
2607 static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
2608 u16 itr_setting, int packets, int bytes)
2610 unsigned int retval = itr_setting;
2611 struct e1000_hw *hw = &adapter->hw;
2613 if (unlikely(hw->mac_type < e1000_82540))
2614 goto update_itr_done;
2617 goto update_itr_done;
2619 switch (itr_setting) {
2620 case lowest_latency:
2621 /* jumbo frames get bulk treatment*/
2622 if (bytes/packets > 8000)
2623 retval = bulk_latency;
2624 else if ((packets < 5) && (bytes > 512))
2625 retval = low_latency;
2627 case low_latency: /* 50 usec aka 20000 ints/s */
2628 if (bytes > 10000) {
2629 /* jumbo frames need bulk latency setting */
2630 if (bytes/packets > 8000)
2631 retval = bulk_latency;
2632 else if ((packets < 10) || ((bytes/packets) > 1200))
2633 retval = bulk_latency;
2634 else if ((packets > 35))
2635 retval = lowest_latency;
2636 } else if (bytes/packets > 2000)
2637 retval = bulk_latency;
2638 else if (packets <= 2 && bytes < 512)
2639 retval = lowest_latency;
2641 case bulk_latency: /* 250 usec aka 4000 ints/s */
2642 if (bytes > 25000) {
2644 retval = low_latency;
2645 } else if (bytes < 6000) {
2646 retval = low_latency;
2655 static void e1000_set_itr(struct e1000_adapter *adapter)
2657 struct e1000_hw *hw = &adapter->hw;
2659 u32 new_itr = adapter->itr;
2661 if (unlikely(hw->mac_type < e1000_82540))
2664 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2665 if (unlikely(adapter->link_speed != SPEED_1000)) {
2671 adapter->tx_itr = e1000_update_itr(adapter,
2673 adapter->total_tx_packets,
2674 adapter->total_tx_bytes);
2675 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2676 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2677 adapter->tx_itr = low_latency;
2679 adapter->rx_itr = e1000_update_itr(adapter,
2681 adapter->total_rx_packets,
2682 adapter->total_rx_bytes);
2683 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2684 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2685 adapter->rx_itr = low_latency;
2687 current_itr = max(adapter->rx_itr, adapter->tx_itr);
2689 switch (current_itr) {
2690 /* counts and packets in update_itr are dependent on these numbers */
2691 case lowest_latency:
2695 new_itr = 20000; /* aka hwitr = ~200 */
2705 if (new_itr != adapter->itr) {
2706 /* this attempts to bias the interrupt rate towards Bulk
2707 * by adding intermediate steps when interrupt rate is
2709 new_itr = new_itr > adapter->itr ?
2710 min(adapter->itr + (new_itr >> 2), new_itr) :
2712 adapter->itr = new_itr;
2713 ew32(ITR, 1000000000 / (new_itr * 256));
2717 #define E1000_TX_FLAGS_CSUM 0x00000001
2718 #define E1000_TX_FLAGS_VLAN 0x00000002
2719 #define E1000_TX_FLAGS_TSO 0x00000004
2720 #define E1000_TX_FLAGS_IPV4 0x00000008
2721 #define E1000_TX_FLAGS_NO_FCS 0x00000010
2722 #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
2723 #define E1000_TX_FLAGS_VLAN_SHIFT 16
2725 static int e1000_tso(struct e1000_adapter *adapter,
2726 struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
2728 struct e1000_context_desc *context_desc;
2729 struct e1000_buffer *buffer_info;
2732 u16 ipcse = 0, tucse, mss;
2733 u8 ipcss, ipcso, tucss, tucso, hdr_len;
2736 if (skb_is_gso(skb)) {
2737 if (skb_header_cloned(skb)) {
2738 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2743 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2744 mss = skb_shinfo(skb)->gso_size;
2745 if (skb->protocol == htons(ETH_P_IP)) {
2746 struct iphdr *iph = ip_hdr(skb);
2749 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2753 cmd_length = E1000_TXD_CMD_IP;
2754 ipcse = skb_transport_offset(skb) - 1;
2755 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2756 ipv6_hdr(skb)->payload_len = 0;
2757 tcp_hdr(skb)->check =
2758 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2759 &ipv6_hdr(skb)->daddr,
2763 ipcss = skb_network_offset(skb);
2764 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
2765 tucss = skb_transport_offset(skb);
2766 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
2769 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
2770 E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
2772 i = tx_ring->next_to_use;
2773 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2774 buffer_info = &tx_ring->buffer_info[i];
2776 context_desc->lower_setup.ip_fields.ipcss = ipcss;
2777 context_desc->lower_setup.ip_fields.ipcso = ipcso;
2778 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
2779 context_desc->upper_setup.tcp_fields.tucss = tucss;
2780 context_desc->upper_setup.tcp_fields.tucso = tucso;
2781 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
2782 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
2783 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
2784 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
2786 buffer_info->time_stamp = jiffies;
2787 buffer_info->next_to_watch = i;
2789 if (++i == tx_ring->count) i = 0;
2790 tx_ring->next_to_use = i;
2797 static bool e1000_tx_csum(struct e1000_adapter *adapter,
2798 struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
2800 struct e1000_context_desc *context_desc;
2801 struct e1000_buffer *buffer_info;
2804 u32 cmd_len = E1000_TXD_CMD_DEXT;
2806 if (skb->ip_summed != CHECKSUM_PARTIAL)
2809 switch (skb->protocol) {
2810 case cpu_to_be16(ETH_P_IP):
2811 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2812 cmd_len |= E1000_TXD_CMD_TCP;
2814 case cpu_to_be16(ETH_P_IPV6):
2815 /* XXX not handling all IPV6 headers */
2816 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2817 cmd_len |= E1000_TXD_CMD_TCP;
2820 if (unlikely(net_ratelimit()))
2821 e_warn(drv, "checksum_partial proto=%x!\n",
2826 css = skb_checksum_start_offset(skb);
2828 i = tx_ring->next_to_use;
2829 buffer_info = &tx_ring->buffer_info[i];
2830 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2832 context_desc->lower_setup.ip_config = 0;
2833 context_desc->upper_setup.tcp_fields.tucss = css;
2834 context_desc->upper_setup.tcp_fields.tucso =
2835 css + skb->csum_offset;
2836 context_desc->upper_setup.tcp_fields.tucse = 0;
2837 context_desc->tcp_seg_setup.data = 0;
2838 context_desc->cmd_and_length = cpu_to_le32(cmd_len);
2840 buffer_info->time_stamp = jiffies;
2841 buffer_info->next_to_watch = i;
2843 if (unlikely(++i == tx_ring->count)) i = 0;
2844 tx_ring->next_to_use = i;
2849 #define E1000_MAX_TXD_PWR 12
2850 #define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR)
2852 static int e1000_tx_map(struct e1000_adapter *adapter,
2853 struct e1000_tx_ring *tx_ring,
2854 struct sk_buff *skb, unsigned int first,
2855 unsigned int max_per_txd, unsigned int nr_frags,
2858 struct e1000_hw *hw = &adapter->hw;
2859 struct pci_dev *pdev = adapter->pdev;
2860 struct e1000_buffer *buffer_info;
2861 unsigned int len = skb_headlen(skb);
2862 unsigned int offset = 0, size, count = 0, i;
2863 unsigned int f, bytecount, segs;
2865 i = tx_ring->next_to_use;
2868 buffer_info = &tx_ring->buffer_info[i];
2869 size = min(len, max_per_txd);
2870 /* Workaround for Controller erratum --
2871 * descriptor for non-tso packet in a linear SKB that follows a
2872 * tso gets written back prematurely before the data is fully
2873 * DMA'd to the controller */
2874 if (!skb->data_len && tx_ring->last_tx_tso &&
2876 tx_ring->last_tx_tso = false;
2880 /* Workaround for premature desc write-backs
2881 * in TSO mode. Append 4-byte sentinel desc */
2882 if (unlikely(mss && !nr_frags && size == len && size > 8))
2884 /* work-around for errata 10 and it applies
2885 * to all controllers in PCI-X mode
2886 * The fix is to make sure that the first descriptor of a
2887 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
2889 if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
2890 (size > 2015) && count == 0))
2893 /* Workaround for potential 82544 hang in PCI-X. Avoid
2894 * terminating buffers within evenly-aligned dwords. */
2895 if (unlikely(adapter->pcix_82544 &&
2896 !((unsigned long)(skb->data + offset + size - 1) & 4) &&
2900 buffer_info->length = size;
2901 /* set time_stamp *before* dma to help avoid a possible race */
2902 buffer_info->time_stamp = jiffies;
2903 buffer_info->mapped_as_page = false;
2904 buffer_info->dma = dma_map_single(&pdev->dev,
2906 size, DMA_TO_DEVICE);
2907 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2909 buffer_info->next_to_watch = i;
2916 if (unlikely(i == tx_ring->count))
2921 for (f = 0; f < nr_frags; f++) {
2922 const struct skb_frag_struct *frag;
2924 frag = &skb_shinfo(skb)->frags[f];
2925 len = skb_frag_size(frag);
2929 unsigned long bufend;
2931 if (unlikely(i == tx_ring->count))
2934 buffer_info = &tx_ring->buffer_info[i];
2935 size = min(len, max_per_txd);
2936 /* Workaround for premature desc write-backs
2937 * in TSO mode. Append 4-byte sentinel desc */
2938 if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
2940 /* Workaround for potential 82544 hang in PCI-X.
2941 * Avoid terminating buffers within evenly-aligned
2943 bufend = (unsigned long)
2944 page_to_phys(skb_frag_page(frag));
2945 bufend += offset + size - 1;
2946 if (unlikely(adapter->pcix_82544 &&
2951 buffer_info->length = size;
2952 buffer_info->time_stamp = jiffies;
2953 buffer_info->mapped_as_page = true;
2954 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
2955 offset, size, DMA_TO_DEVICE);
2956 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2958 buffer_info->next_to_watch = i;
2966 segs = skb_shinfo(skb)->gso_segs ?: 1;
2967 /* multiply data chunks by size of headers */
2968 bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
2970 tx_ring->buffer_info[i].skb = skb;
2971 tx_ring->buffer_info[i].segs = segs;
2972 tx_ring->buffer_info[i].bytecount = bytecount;
2973 tx_ring->buffer_info[first].next_to_watch = i;
2978 dev_err(&pdev->dev, "TX DMA map failed\n");
2979 buffer_info->dma = 0;
2985 i += tx_ring->count;
2987 buffer_info = &tx_ring->buffer_info[i];
2988 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2994 static void e1000_tx_queue(struct e1000_adapter *adapter,
2995 struct e1000_tx_ring *tx_ring, int tx_flags,
2998 struct e1000_hw *hw = &adapter->hw;
2999 struct e1000_tx_desc *tx_desc = NULL;
3000 struct e1000_buffer *buffer_info;
3001 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
3004 if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
3005 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
3007 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3009 if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
3010 txd_upper |= E1000_TXD_POPTS_IXSM << 8;
3013 if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
3014 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
3015 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3018 if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
3019 txd_lower |= E1000_TXD_CMD_VLE;
3020 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
3023 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
3024 txd_lower &= ~(E1000_TXD_CMD_IFCS);
3026 i = tx_ring->next_to_use;
3029 buffer_info = &tx_ring->buffer_info[i];
3030 tx_desc = E1000_TX_DESC(*tx_ring, i);
3031 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
3032 tx_desc->lower.data =
3033 cpu_to_le32(txd_lower | buffer_info->length);
3034 tx_desc->upper.data = cpu_to_le32(txd_upper);
3035 if (unlikely(++i == tx_ring->count)) i = 0;
3038 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
3040 /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */
3041 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
3042 tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
3044 /* Force memory writes to complete before letting h/w
3045 * know there are new descriptors to fetch. (Only
3046 * applicable for weak-ordered memory model archs,
3047 * such as IA-64). */
3050 tx_ring->next_to_use = i;
3051 writel(i, hw->hw_addr + tx_ring->tdt);
3052 /* we need this if more than one processor can write to our tail
3053 * at a time, it syncronizes IO on IA64/Altix systems */
3057 /* 82547 workaround to avoid controller hang in half-duplex environment.
3058 * The workaround is to avoid queuing a large packet that would span
3059 * the internal Tx FIFO ring boundary by notifying the stack to resend
3060 * the packet at a later time. This gives the Tx FIFO an opportunity to
3061 * flush all packets. When that occurs, we reset the Tx FIFO pointers
3062 * to the beginning of the Tx FIFO.
3065 #define E1000_FIFO_HDR 0x10
3066 #define E1000_82547_PAD_LEN 0x3E0
3068 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
3069 struct sk_buff *skb)
3071 u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
3072 u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
3074 skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
3076 if (adapter->link_duplex != HALF_DUPLEX)
3077 goto no_fifo_stall_required;
3079 if (atomic_read(&adapter->tx_fifo_stall))
3082 if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
3083 atomic_set(&adapter->tx_fifo_stall, 1);
3087 no_fifo_stall_required:
3088 adapter->tx_fifo_head += skb_fifo_len;
3089 if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
3090 adapter->tx_fifo_head -= adapter->tx_fifo_size;
3094 static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
3096 struct e1000_adapter *adapter = netdev_priv(netdev);
3097 struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3099 netif_stop_queue(netdev);
3100 /* Herbert's original patch had:
3101 * smp_mb__after_netif_stop_queue();
3102 * but since that doesn't exist yet, just open code it. */
3105 /* We need to check again in a case another CPU has just
3106 * made room available. */
3107 if (likely(E1000_DESC_UNUSED(tx_ring) < size))
3111 netif_start_queue(netdev);
3112 ++adapter->restart_queue;
3116 static int e1000_maybe_stop_tx(struct net_device *netdev,
3117 struct e1000_tx_ring *tx_ring, int size)
3119 if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
3121 return __e1000_maybe_stop_tx(netdev, size);
3124 #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
3125 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3126 struct net_device *netdev)
3128 struct e1000_adapter *adapter = netdev_priv(netdev);
3129 struct e1000_hw *hw = &adapter->hw;
3130 struct e1000_tx_ring *tx_ring;
3131 unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
3132 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
3133 unsigned int tx_flags = 0;
3134 unsigned int len = skb_headlen(skb);
3135 unsigned int nr_frags;
3141 /* This goes back to the question of how to logically map a tx queue
3142 * to a flow. Right now, performance is impacted slightly negatively
3143 * if using multiple tx queues. If the stack breaks away from a
3144 * single qdisc implementation, we can look at this again. */
3145 tx_ring = adapter->tx_ring;
3147 if (unlikely(skb->len <= 0)) {
3148 dev_kfree_skb_any(skb);
3149 return NETDEV_TX_OK;
3152 /* On PCI/PCI-X HW, if packet size is less than ETH_ZLEN,
3153 * packets may get corrupted during padding by HW.
3154 * To WA this issue, pad all small packets manually.
3156 if (skb->len < ETH_ZLEN) {
3157 if (skb_pad(skb, ETH_ZLEN - skb->len))
3158 return NETDEV_TX_OK;
3159 skb->len = ETH_ZLEN;
3160 skb_set_tail_pointer(skb, ETH_ZLEN);
3163 mss = skb_shinfo(skb)->gso_size;
3164 /* The controller does a simple calculation to
3165 * make sure there is enough room in the FIFO before
3166 * initiating the DMA for each buffer. The calc is:
3167 * 4 = ceil(buffer len/mss). To make sure we don't
3168 * overrun the FIFO, adjust the max buffer len if mss
3172 max_per_txd = min(mss << 2, max_per_txd);
3173 max_txd_pwr = fls(max_per_txd) - 1;
3175 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3176 if (skb->data_len && hdr_len == len) {
3177 switch (hw->mac_type) {
3178 unsigned int pull_size;
3180 /* Make sure we have room to chop off 4 bytes,
3181 * and that the end alignment will work out to
3182 * this hardware's requirements
3183 * NOTE: this is a TSO only workaround
3184 * if end byte alignment not correct move us
3185 * into the next dword */
3186 if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4)
3189 pull_size = min((unsigned int)4, skb->data_len);
3190 if (!__pskb_pull_tail(skb, pull_size)) {
3191 e_err(drv, "__pskb_pull_tail "
3193 dev_kfree_skb_any(skb);
3194 return NETDEV_TX_OK;
3196 len = skb_headlen(skb);
3205 /* reserve a descriptor for the offload context */
3206 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
3210 /* Controller Erratum workaround */
3211 if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
3214 count += TXD_USE_COUNT(len, max_txd_pwr);
3216 if (adapter->pcix_82544)
3219 /* work-around for errata 10 and it applies to all controllers
3220 * in PCI-X mode, so add one more descriptor to the count
3222 if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
3226 nr_frags = skb_shinfo(skb)->nr_frags;
3227 for (f = 0; f < nr_frags; f++)
3228 count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
3230 if (adapter->pcix_82544)
3233 /* need: count + 2 desc gap to keep tail from touching
3234 * head, otherwise try next time */
3235 if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
3236 return NETDEV_TX_BUSY;
3238 if (unlikely((hw->mac_type == e1000_82547) &&
3239 (e1000_82547_fifo_workaround(adapter, skb)))) {
3240 netif_stop_queue(netdev);
3241 if (!test_bit(__E1000_DOWN, &adapter->flags))
3242 schedule_delayed_work(&adapter->fifo_stall_task, 1);
3243 return NETDEV_TX_BUSY;
3246 if (vlan_tx_tag_present(skb)) {
3247 tx_flags |= E1000_TX_FLAGS_VLAN;
3248 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
3251 first = tx_ring->next_to_use;
3253 tso = e1000_tso(adapter, tx_ring, skb);
3255 dev_kfree_skb_any(skb);
3256 return NETDEV_TX_OK;
3260 if (likely(hw->mac_type != e1000_82544))
3261 tx_ring->last_tx_tso = true;
3262 tx_flags |= E1000_TX_FLAGS_TSO;
3263 } else if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
3264 tx_flags |= E1000_TX_FLAGS_CSUM;
3266 if (likely(skb->protocol == htons(ETH_P_IP)))
3267 tx_flags |= E1000_TX_FLAGS_IPV4;
3269 if (unlikely(skb->no_fcs))
3270 tx_flags |= E1000_TX_FLAGS_NO_FCS;
3272 count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
3276 skb_tx_timestamp(skb);
3278 e1000_tx_queue(adapter, tx_ring, tx_flags, count);
3279 /* Make sure there is space in the ring for the next send. */
3280 e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
3283 dev_kfree_skb_any(skb);
3284 tx_ring->buffer_info[first].time_stamp = 0;
3285 tx_ring->next_to_use = first;
3288 return NETDEV_TX_OK;
3291 #define NUM_REGS 38 /* 1 based count */
3292 static void e1000_regdump(struct e1000_adapter *adapter)
3294 struct e1000_hw *hw = &adapter->hw;
3296 u32 *regs_buff = regs;
3299 static const char * const reg_name[] = {
3301 "RCTL", "RDLEN", "RDH", "RDT", "RDTR",
3302 "TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT",
3303 "TIDV", "TXDCTL", "TADV", "TARC0",
3304 "TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1",
3306 "CTRL_EXT", "ERT", "RDBAL", "RDBAH",
3307 "TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC",
3308 "RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC"
3311 regs_buff[0] = er32(CTRL);
3312 regs_buff[1] = er32(STATUS);
3314 regs_buff[2] = er32(RCTL);
3315 regs_buff[3] = er32(RDLEN);
3316 regs_buff[4] = er32(RDH);
3317 regs_buff[5] = er32(RDT);
3318 regs_buff[6] = er32(RDTR);
3320 regs_buff[7] = er32(TCTL);
3321 regs_buff[8] = er32(TDBAL);
3322 regs_buff[9] = er32(TDBAH);
3323 regs_buff[10] = er32(TDLEN);
3324 regs_buff[11] = er32(TDH);
3325 regs_buff[12] = er32(TDT);
3326 regs_buff[13] = er32(TIDV);
3327 regs_buff[14] = er32(TXDCTL);
3328 regs_buff[15] = er32(TADV);
3329 regs_buff[16] = er32(TARC0);
3331 regs_buff[17] = er32(TDBAL1);
3332 regs_buff[18] = er32(TDBAH1);
3333 regs_buff[19] = er32(TDLEN1);
3334 regs_buff[20] = er32(TDH1);
3335 regs_buff[21] = er32(TDT1);
3336 regs_buff[22] = er32(TXDCTL1);
3337 regs_buff[23] = er32(TARC1);
3338 regs_buff[24] = er32(CTRL_EXT);
3339 regs_buff[25] = er32(ERT);
3340 regs_buff[26] = er32(RDBAL0);
3341 regs_buff[27] = er32(RDBAH0);
3342 regs_buff[28] = er32(TDFH);
3343 regs_buff[29] = er32(TDFT);
3344 regs_buff[30] = er32(TDFHS);
3345 regs_buff[31] = er32(TDFTS);
3346 regs_buff[32] = er32(TDFPC);
3347 regs_buff[33] = er32(RDFH);
3348 regs_buff[34] = er32(RDFT);
3349 regs_buff[35] = er32(RDFHS);
3350 regs_buff[36] = er32(RDFTS);
3351 regs_buff[37] = er32(RDFPC);
3353 pr_info("Register dump\n");
3354 for (i = 0; i < NUM_REGS; i++)
3355 pr_info("%-15s %08x\n", reg_name[i], regs_buff[i]);
3359 * e1000_dump: Print registers, tx ring and rx ring
3361 static void e1000_dump(struct e1000_adapter *adapter)
3363 /* this code doesn't handle multiple rings */
3364 struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3365 struct e1000_rx_ring *rx_ring = adapter->rx_ring;
3368 if (!netif_msg_hw(adapter))
3371 /* Print Registers */
3372 e1000_regdump(adapter);
3377 pr_info("TX Desc ring0 dump\n");
3379 /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
3381 * Legacy Transmit Descriptor
3382 * +--------------------------------------------------------------+
3383 * 0 | Buffer Address [63:0] (Reserved on Write Back) |
3384 * +--------------------------------------------------------------+
3385 * 8 | Special | CSS | Status | CMD | CSO | Length |
3386 * +--------------------------------------------------------------+
3387 * 63 48 47 36 35 32 31 24 23 16 15 0
3389 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
3390 * 63 48 47 40 39 32 31 16 15 8 7 0
3391 * +----------------------------------------------------------------+
3392 * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS |
3393 * +----------------------------------------------------------------+
3394 * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN |
3395 * +----------------------------------------------------------------+
3396 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
3398 * Extended Data Descriptor (DTYP=0x1)
3399 * +----------------------------------------------------------------+
3400 * 0 | Buffer Address [63:0] |
3401 * +----------------------------------------------------------------+
3402 * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN |
3403 * +----------------------------------------------------------------+
3404 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
3406 pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestmp bi->skb\n");
3407 pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestmp bi->skb\n");
3409 if (!netif_msg_tx_done(adapter))
3410 goto rx_ring_summary;
3412 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
3413 struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
3414 struct e1000_buffer *buffer_info = &tx_ring->buffer_info[i];
3415 struct my_u { __le64 a; __le64 b; };
3416 struct my_u *u = (struct my_u *)tx_desc;
3419 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
3421 else if (i == tx_ring->next_to_use)
3423 else if (i == tx_ring->next_to_clean)
3428 pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p %s\n",
3429 ((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i,
3430 le64_to_cpu(u->a), le64_to_cpu(u->b),
3431 (u64)buffer_info->dma, buffer_info->length,
3432 buffer_info->next_to_watch,
3433 (u64)buffer_info->time_stamp, buffer_info->skb, type);
3440 pr_info("\nRX Desc ring dump\n");
3442 /* Legacy Receive Descriptor Format
3444 * +-----------------------------------------------------+
3445 * | Buffer Address [63:0] |
3446 * +-----------------------------------------------------+
3447 * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
3448 * +-----------------------------------------------------+
3449 * 63 48 47 40 39 32 31 16 15 0
3451 pr_info("R[desc] [address 63:0 ] [vl er S cks ln] [bi->dma ] [bi->skb]\n");
3453 if (!netif_msg_rx_status(adapter))
3456 for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
3457 struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
3458 struct e1000_buffer *buffer_info = &rx_ring->buffer_info[i];
3459 struct my_u { __le64 a; __le64 b; };
3460 struct my_u *u = (struct my_u *)rx_desc;
3463 if (i == rx_ring->next_to_use)
3465 else if (i == rx_ring->next_to_clean)
3470 pr_info("R[0x%03X] %016llX %016llX %016llX %p %s\n",
3471 i, le64_to_cpu(u->a), le64_to_cpu(u->b),
3472 (u64)buffer_info->dma, buffer_info->skb, type);
3475 /* dump the descriptor caches */
3477 pr_info("Rx descriptor cache in 64bit format\n");
3478 for (i = 0x6000; i <= 0x63FF ; i += 0x10) {
3479 pr_info("R%04X: %08X|%08X %08X|%08X\n",
3481 readl(adapter->hw.hw_addr + i+4),
3482 readl(adapter->hw.hw_addr + i),
3483 readl(adapter->hw.hw_addr + i+12),
3484 readl(adapter->hw.hw_addr + i+8));
3487 pr_info("Tx descriptor cache in 64bit format\n");
3488 for (i = 0x7000; i <= 0x73FF ; i += 0x10) {
3489 pr_info("T%04X: %08X|%08X %08X|%08X\n",
3491 readl(adapter->hw.hw_addr + i+4),
3492 readl(adapter->hw.hw_addr + i),
3493 readl(adapter->hw.hw_addr + i+12),
3494 readl(adapter->hw.hw_addr + i+8));
3501 * e1000_tx_timeout - Respond to a Tx Hang
3502 * @netdev: network interface device structure
3505 static void e1000_tx_timeout(struct net_device *netdev)
3507 struct e1000_adapter *adapter = netdev_priv(netdev);
3509 /* Do the reset outside of interrupt context */
3510 adapter->tx_timeout_count++;
3511 schedule_work(&adapter->reset_task);
3514 static void e1000_reset_task(struct work_struct *work)
3516 struct e1000_adapter *adapter =
3517 container_of(work, struct e1000_adapter, reset_task);
3519 if (test_bit(__E1000_DOWN, &adapter->flags))
3521 e_err(drv, "Reset adapter\n");
3522 e1000_reinit_safe(adapter);
3526 * e1000_get_stats - Get System Network Statistics
3527 * @netdev: network interface device structure
3529 * Returns the address of the device statistics structure.
3530 * The statistics are actually updated from the watchdog.
3533 static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
3535 /* only return the current stats */
3536 return &netdev->stats;
3540 * e1000_change_mtu - Change the Maximum Transfer Unit
3541 * @netdev: network interface device structure
3542 * @new_mtu: new value for maximum frame size
3544 * Returns 0 on success, negative on failure
3547 static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3549 struct e1000_adapter *adapter = netdev_priv(netdev);
3550 struct e1000_hw *hw = &adapter->hw;
3551 int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
3553 if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
3554 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
3555 e_err(probe, "Invalid MTU setting\n");
3559 /* Adapter-specific max frame size limits. */
3560 switch (hw->mac_type) {
3561 case e1000_undefined ... e1000_82542_rev2_1:
3562 if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
3563 e_err(probe, "Jumbo Frames not supported.\n");
3568 /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
3572 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
3574 /* e1000_down has a dependency on max_frame_size */
3575 hw->max_frame_size = max_frame;
3576 if (netif_running(netdev))
3577 e1000_down(adapter);
3579 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3580 * means we reserve 2 more, this pushes us to allocate from the next
3582 * i.e. RXBUFFER_2048 --> size-4096 slab
3583 * however with the new *_jumbo_rx* routines, jumbo receives will use
3584 * fragmented skbs */
3586 if (max_frame <= E1000_RXBUFFER_2048)
3587 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
3589 #if (PAGE_SIZE >= E1000_RXBUFFER_16384)
3590 adapter->rx_buffer_len = E1000_RXBUFFER_16384;
3591 #elif (PAGE_SIZE >= E1000_RXBUFFER_4096)
3592 adapter->rx_buffer_len = PAGE_SIZE;
3595 /* adjust allocation if LPE protects us, and we aren't using SBP */
3596 if (!hw->tbi_compatibility_on &&
3597 ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) ||
3598 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
3599 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3601 pr_info("%s changing MTU from %d to %d\n",
3602 netdev->name, netdev->mtu, new_mtu);
3603 netdev->mtu = new_mtu;
3605 if (netif_running(netdev))
3608 e1000_reset(adapter);
3610 clear_bit(__E1000_RESETTING, &adapter->flags);
3616 * e1000_update_stats - Update the board statistics counters
3617 * @adapter: board private structure
3620 void e1000_update_stats(struct e1000_adapter *adapter)
3622 struct net_device *netdev = adapter->netdev;
3623 struct e1000_hw *hw = &adapter->hw;
3624 struct pci_dev *pdev = adapter->pdev;
3625 unsigned long flags;
3628 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3631 * Prevent stats update while adapter is being reset, or if the pci
3632 * connection is down.
3634 if (adapter->link_speed == 0)
3636 if (pci_channel_offline(pdev))
3639 spin_lock_irqsave(&adapter->stats_lock, flags);
3641 /* these counters are modified from e1000_tbi_adjust_stats,
3642 * called from the interrupt context, so they must only
3643 * be written while holding adapter->stats_lock
3646 adapter->stats.crcerrs += er32(CRCERRS);
3647 adapter->stats.gprc += er32(GPRC);
3648 adapter->stats.gorcl += er32(GORCL);
3649 adapter->stats.gorch += er32(GORCH);
3650 adapter->stats.bprc += er32(BPRC);
3651 adapter->stats.mprc += er32(MPRC);
3652 adapter->stats.roc += er32(ROC);
3654 adapter->stats.prc64 += er32(PRC64);
3655 adapter->stats.prc127 += er32(PRC127);
3656 adapter->stats.prc255 += er32(PRC255);
3657 adapter->stats.prc511 += er32(PRC511);
3658 adapter->stats.prc1023 += er32(PRC1023);
3659 adapter->stats.prc1522 += er32(PRC1522);
3661 adapter->stats.symerrs += er32(SYMERRS);
3662 adapter->stats.mpc += er32(MPC);
3663 adapter->stats.scc += er32(SCC);
3664 adapter->stats.ecol += er32(ECOL);
3665 adapter->stats.mcc += er32(MCC);
3666 adapter->stats.latecol += er32(LATECOL);
3667 adapter->stats.dc += er32(DC);
3668 adapter->stats.sec += er32(SEC);
3669 adapter->stats.rlec += er32(RLEC);
3670 adapter->stats.xonrxc += er32(XONRXC);
3671 adapter->stats.xontxc += er32(XONTXC);
3672 adapter->stats.xoffrxc += er32(XOFFRXC);
3673 adapter->stats.xofftxc += er32(XOFFTXC);
3674 adapter->stats.fcruc += er32(FCRUC);
3675 adapter->stats.gptc += er32(GPTC);
3676 adapter->stats.gotcl += er32(GOTCL);
3677 adapter->stats.gotch += er32(GOTCH);
3678 adapter->stats.rnbc += er32(RNBC);
3679 adapter->stats.ruc += er32(RUC);
3680 adapter->stats.rfc += er32(RFC);
3681 adapter->stats.rjc += er32(RJC);
3682 adapter->stats.torl += er32(TORL);
3683 adapter->stats.torh += er32(TORH);
3684 adapter->stats.totl += er32(TOTL);
3685 adapter->stats.toth += er32(TOTH);
3686 adapter->stats.tpr += er32(TPR);
3688 adapter->stats.ptc64 += er32(PTC64);
3689 adapter->stats.ptc127 += er32(PTC127);
3690 adapter->stats.ptc255 += er32(PTC255);
3691 adapter->stats.ptc511 += er32(PTC511);
3692 adapter->stats.ptc1023 += er32(PTC1023);
3693 adapter->stats.ptc1522 += er32(PTC1522);
3695 adapter->stats.mptc += er32(MPTC);
3696 adapter->stats.bptc += er32(BPTC);
3698 /* used for adaptive IFS */
3700 hw->tx_packet_delta = er32(TPT);
3701 adapter->stats.tpt += hw->tx_packet_delta;
3702 hw->collision_delta = er32(COLC);
3703 adapter->stats.colc += hw->collision_delta;
3705 if (hw->mac_type >= e1000_82543) {
3706 adapter->stats.algnerrc += er32(ALGNERRC);
3707 adapter->stats.rxerrc += er32(RXERRC);
3708 adapter->stats.tncrs += er32(TNCRS);
3709 adapter->stats.cexterr += er32(CEXTERR);
3710 adapter->stats.tsctc += er32(TSCTC);
3711 adapter->stats.tsctfc += er32(TSCTFC);
3714 /* Fill out the OS statistics structure */
3715 netdev->stats.multicast = adapter->stats.mprc;
3716 netdev->stats.collisions = adapter->stats.colc;
3720 /* RLEC on some newer hardware can be incorrect so build
3721 * our own version based on RUC and ROC */
3722 netdev->stats.rx_errors = adapter->stats.rxerrc +
3723 adapter->stats.crcerrs + adapter->stats.algnerrc +
3724 adapter->stats.ruc + adapter->stats.roc +
3725 adapter->stats.cexterr;
3726 adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
3727 netdev->stats.rx_length_errors = adapter->stats.rlerrc;
3728 netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
3729 netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
3730 netdev->stats.rx_missed_errors = adapter->stats.mpc;
3733 adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
3734 netdev->stats.tx_errors = adapter->stats.txerrc;
3735 netdev->stats.tx_aborted_errors = adapter->stats.ecol;
3736 netdev->stats.tx_window_errors = adapter->stats.latecol;
3737 netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
3738 if (hw->bad_tx_carr_stats_fd &&
3739 adapter->link_duplex == FULL_DUPLEX) {
3740 netdev->stats.tx_carrier_errors = 0;
3741 adapter->stats.tncrs = 0;
3744 /* Tx Dropped needs to be maintained elsewhere */
3747 if (hw->media_type == e1000_media_type_copper) {
3748 if ((adapter->link_speed == SPEED_1000) &&
3749 (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
3750 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3751 adapter->phy_stats.idle_errors += phy_tmp;
3754 if ((hw->mac_type <= e1000_82546) &&
3755 (hw->phy_type == e1000_phy_m88) &&
3756 !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
3757 adapter->phy_stats.receive_errors += phy_tmp;
3760 /* Management Stats */
3761 if (hw->has_smbus) {
3762 adapter->stats.mgptc += er32(MGTPTC);
3763 adapter->stats.mgprc += er32(MGTPRC);
3764 adapter->stats.mgpdc += er32(MGTPDC);
3767 spin_unlock_irqrestore(&adapter->stats_lock, flags);
3771 * e1000_intr - Interrupt Handler
3772 * @irq: interrupt number
3773 * @data: pointer to a network interface device structure
3776 static irqreturn_t e1000_intr(int irq, void *data)
3778 struct net_device *netdev = data;
3779 struct e1000_adapter *adapter = netdev_priv(netdev);
3780 struct e1000_hw *hw = &adapter->hw;
3781 u32 icr = er32(ICR);
3783 if (unlikely((!icr)))
3784 return IRQ_NONE; /* Not our interrupt */
3787 * we might have caused the interrupt, but the above
3788 * read cleared it, and just in case the driver is
3789 * down there is nothing to do so return handled
3791 if (unlikely(test_bit(__E1000_DOWN, &adapter->flags)))
3794 if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
3795 hw->get_link_status = 1;
3796 /* guard against interrupt when we're going down */
3797 if (!test_bit(__E1000_DOWN, &adapter->flags))
3798 schedule_delayed_work(&adapter->watchdog_task, 1);
3801 /* disable interrupts, without the synchronize_irq bit */
3803 E1000_WRITE_FLUSH();
3805 if (likely(napi_schedule_prep(&adapter->napi))) {
3806 adapter->total_tx_bytes = 0;
3807 adapter->total_tx_packets = 0;
3808 adapter->total_rx_bytes = 0;
3809 adapter->total_rx_packets = 0;
3810 __napi_schedule(&adapter->napi);
3812 /* this really should not happen! if it does it is basically a
3813 * bug, but not a hard error, so enable ints and continue */
3814 if (!test_bit(__E1000_DOWN, &adapter->flags))
3815 e1000_irq_enable(adapter);
3822 * e1000_clean - NAPI Rx polling callback
3823 * @adapter: board private structure
3825 static int e1000_clean(struct napi_struct *napi, int budget)
3827 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
3828 int tx_clean_complete = 0, work_done = 0;
3830 tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
3832 adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget);
3834 if (!tx_clean_complete)
3837 /* If budget not fully consumed, exit the polling mode */
3838 if (work_done < budget) {
3839 if (likely(adapter->itr_setting & 3))
3840 e1000_set_itr(adapter);
3841 napi_complete(napi);
3842 if (!test_bit(__E1000_DOWN, &adapter->flags))
3843 e1000_irq_enable(adapter);
3850 * e1000_clean_tx_irq - Reclaim resources after transmit completes
3851 * @adapter: board private structure
3853 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3854 struct e1000_tx_ring *tx_ring)
3856 struct e1000_hw *hw = &adapter->hw;
3857 struct net_device *netdev = adapter->netdev;
3858 struct e1000_tx_desc *tx_desc, *eop_desc;
3859 struct e1000_buffer *buffer_info;
3860 unsigned int i, eop;
3861 unsigned int count = 0;
3862 unsigned int total_tx_bytes=0, total_tx_packets=0;
3864 i = tx_ring->next_to_clean;
3865 eop = tx_ring->buffer_info[i].next_to_watch;
3866 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3868 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
3869 (count < tx_ring->count)) {
3870 bool cleaned = false;
3871 rmb(); /* read buffer_info after eop_desc */
3872 for ( ; !cleaned; count++) {
3873 tx_desc = E1000_TX_DESC(*tx_ring, i);
3874 buffer_info = &tx_ring->buffer_info[i];
3875 cleaned = (i == eop);
3878 total_tx_packets += buffer_info->segs;
3879 total_tx_bytes += buffer_info->bytecount;
3881 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
3882 tx_desc->upper.data = 0;
3884 if (unlikely(++i == tx_ring->count)) i = 0;
3887 eop = tx_ring->buffer_info[i].next_to_watch;
3888 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3891 tx_ring->next_to_clean = i;
3893 #define TX_WAKE_THRESHOLD 32
3894 if (unlikely(count && netif_carrier_ok(netdev) &&
3895 E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
3896 /* Make sure that anybody stopping the queue after this
3897 * sees the new next_to_clean.
3901 if (netif_queue_stopped(netdev) &&
3902 !(test_bit(__E1000_DOWN, &adapter->flags))) {
3903 netif_wake_queue(netdev);
3904 ++adapter->restart_queue;
3908 if (adapter->detect_tx_hung) {
3909 /* Detect a transmit hang in hardware, this serializes the
3910 * check with the clearing of time_stamp and movement of i */
3911 adapter->detect_tx_hung = false;
3912 if (tx_ring->buffer_info[eop].time_stamp &&
3913 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
3914 (adapter->tx_timeout_factor * HZ)) &&
3915 !(er32(STATUS) & E1000_STATUS_TXOFF)) {
3917 /* detected Tx unit hang */
3918 e_err(drv, "Detected Tx Unit Hang\n"
3922 " next_to_use <%x>\n"
3923 " next_to_clean <%x>\n"
3924 "buffer_info[next_to_clean]\n"
3925 " time_stamp <%lx>\n"
3926 " next_to_watch <%x>\n"
3928 " next_to_watch.status <%x>\n",
3929 (unsigned long)((tx_ring - adapter->tx_ring) /
3930 sizeof(struct e1000_tx_ring)),
3931 readl(hw->hw_addr + tx_ring->tdh),
3932 readl(hw->hw_addr + tx_ring->tdt),
3933 tx_ring->next_to_use,
3934 tx_ring->next_to_clean,
3935 tx_ring->buffer_info[eop].time_stamp,
3938 eop_desc->upper.fields.status);
3939 e1000_dump(adapter);
3940 netif_stop_queue(netdev);
3943 adapter->total_tx_bytes += total_tx_bytes;
3944 adapter->total_tx_packets += total_tx_packets;
3945 netdev->stats.tx_bytes += total_tx_bytes;
3946 netdev->stats.tx_packets += total_tx_packets;
3947 return count < tx_ring->count;
3951 * e1000_rx_checksum - Receive Checksum Offload for 82543
3952 * @adapter: board private structure
3953 * @status_err: receive descriptor status and error fields
3954 * @csum: receive descriptor csum field
3955 * @sk_buff: socket buffer with received data
3958 static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
3959 u32 csum, struct sk_buff *skb)
3961 struct e1000_hw *hw = &adapter->hw;
3962 u16 status = (u16)status_err;
3963 u8 errors = (u8)(status_err >> 24);
3965 skb_checksum_none_assert(skb);
3967 /* 82543 or newer only */
3968 if (unlikely(hw->mac_type < e1000_82543)) return;
3969 /* Ignore Checksum bit is set */
3970 if (unlikely(status & E1000_RXD_STAT_IXSM)) return;
3971 /* TCP/UDP checksum error bit is set */
3972 if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
3973 /* let the stack verify checksum errors */
3974 adapter->hw_csum_err++;
3977 /* TCP/UDP Checksum has not been calculated */
3978 if (!(status & E1000_RXD_STAT_TCPCS))
3981 /* It must be a TCP or UDP packet with a valid checksum */
3982 if (likely(status & E1000_RXD_STAT_TCPCS)) {
3983 /* TCP checksum is good */
3984 skb->ip_summed = CHECKSUM_UNNECESSARY;
3986 adapter->hw_csum_good++;
3990 * e1000_consume_page - helper function
3992 static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
3997 skb->data_len += length;
3998 skb->truesize += PAGE_SIZE;
4002 * e1000_receive_skb - helper function to handle rx indications
4003 * @adapter: board private structure
4004 * @status: descriptor status field as written by hardware
4005 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
4006 * @skb: pointer to sk_buff to be indicated to stack
4008 static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
4009 __le16 vlan, struct sk_buff *skb)
4011 skb->protocol = eth_type_trans(skb, adapter->netdev);
4013 if (status & E1000_RXD_STAT_VP) {
4014 u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
4016 __vlan_hwaccel_put_tag(skb, vid);
4018 napi_gro_receive(&adapter->napi, skb);
4022 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
4023 * @adapter: board private structure
4024 * @rx_ring: ring to clean
4025 * @work_done: amount of napi work completed this call
4026 * @work_to_do: max amount of work allowed for this call to do
4028 * the return value indicates whether actual cleaning was done, there
4029 * is no guarantee that everything was cleaned
4031 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
4032 struct e1000_rx_ring *rx_ring,
4033 int *work_done, int work_to_do)
4035 struct e1000_hw *hw = &adapter->hw;
4036 struct net_device *netdev = adapter->netdev;
4037 struct pci_dev *pdev = adapter->pdev;
4038 struct e1000_rx_desc *rx_desc, *next_rxd;
4039 struct e1000_buffer *buffer_info, *next_buffer;
4040 unsigned long irq_flags;
4043 int cleaned_count = 0;
4044 bool cleaned = false;
4045 unsigned int total_rx_bytes=0, total_rx_packets=0;
4047 i = rx_ring->next_to_clean;
4048 rx_desc = E1000_RX_DESC(*rx_ring, i);
4049 buffer_info = &rx_ring->buffer_info[i];
4051 while (rx_desc->status & E1000_RXD_STAT_DD) {
4052 struct sk_buff *skb;
4055 if (*work_done >= work_to_do)
4058 rmb(); /* read descriptor and rx_buffer_info after status DD */
4060 status = rx_desc->status;
4061 skb = buffer_info->skb;
4062 buffer_info->skb = NULL;
4064 if (++i == rx_ring->count) i = 0;
4065 next_rxd = E1000_RX_DESC(*rx_ring, i);
4068 next_buffer = &rx_ring->buffer_info[i];
4072 dma_unmap_page(&pdev->dev, buffer_info->dma,
4073 buffer_info->length, DMA_FROM_DEVICE);
4074 buffer_info->dma = 0;
4076 length = le16_to_cpu(rx_desc->length);
4078 /* errors is only valid for DD + EOP descriptors */
4079 if (unlikely((status & E1000_RXD_STAT_EOP) &&
4080 (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
4084 mapped = page_address(buffer_info->page);
4085 last_byte = *(mapped + length - 1);
4086 if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
4088 spin_lock_irqsave(&adapter->stats_lock,
4090 e1000_tbi_adjust_stats(hw, &adapter->stats,
4092 spin_unlock_irqrestore(&adapter->stats_lock,
4096 if (netdev->features & NETIF_F_RXALL)
4098 /* recycle both page and skb */
4099 buffer_info->skb = skb;
4100 /* an error means any chain goes out the window
4102 if (rx_ring->rx_skb_top)
4103 dev_kfree_skb(rx_ring->rx_skb_top);
4104 rx_ring->rx_skb_top = NULL;
4109 #define rxtop rx_ring->rx_skb_top
4111 if (!(status & E1000_RXD_STAT_EOP)) {
4112 /* this descriptor is only the beginning (or middle) */
4114 /* this is the beginning of a chain */
4116 skb_fill_page_desc(rxtop, 0, buffer_info->page,
4119 /* this is the middle of a chain */
4120 skb_fill_page_desc(rxtop,
4121 skb_shinfo(rxtop)->nr_frags,
4122 buffer_info->page, 0, length);
4123 /* re-use the skb, only consumed the page */
4124 buffer_info->skb = skb;
4126 e1000_consume_page(buffer_info, rxtop, length);
4130 /* end of the chain */
4131 skb_fill_page_desc(rxtop,
4132 skb_shinfo(rxtop)->nr_frags,
4133 buffer_info->page, 0, length);
4134 /* re-use the current skb, we only consumed the
4136 buffer_info->skb = skb;
4139 e1000_consume_page(buffer_info, skb, length);
4141 /* no chain, got EOP, this buf is the packet
4142 * copybreak to save the put_page/alloc_page */
4143 if (length <= copybreak &&
4144 skb_tailroom(skb) >= length) {
4146 vaddr = kmap_atomic(buffer_info->page);
4147 memcpy(skb_tail_pointer(skb), vaddr, length);
4148 kunmap_atomic(vaddr);
4149 /* re-use the page, so don't erase
4150 * buffer_info->page */
4151 skb_put(skb, length);
4153 skb_fill_page_desc(skb, 0,
4154 buffer_info->page, 0,
4156 e1000_consume_page(buffer_info, skb,
4162 /* Receive Checksum Offload XXX recompute due to CRC strip? */
4163 e1000_rx_checksum(adapter,
4165 ((u32)(rx_desc->errors) << 24),
4166 le16_to_cpu(rx_desc->csum), skb);
4168 total_rx_bytes += (skb->len - 4); /* don't count FCS */
4169 if (likely(!(netdev->features & NETIF_F_RXFCS)))
4170 pskb_trim(skb, skb->len - 4);
4173 /* eth type trans needs skb->data to point to something */
4174 if (!pskb_may_pull(skb, ETH_HLEN)) {
4175 e_err(drv, "pskb_may_pull failed.\n");
4180 e1000_receive_skb(adapter, status, rx_desc->special, skb);
4183 rx_desc->status = 0;
4185 /* return some buffers to hardware, one at a time is too slow */
4186 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4187 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4191 /* use prefetched values */
4193 buffer_info = next_buffer;
4195 rx_ring->next_to_clean = i;
4197 cleaned_count = E1000_DESC_UNUSED(rx_ring);
4199 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4201 adapter->total_rx_packets += total_rx_packets;
4202 adapter->total_rx_bytes += total_rx_bytes;
4203 netdev->stats.rx_bytes += total_rx_bytes;
4204 netdev->stats.rx_packets += total_rx_packets;
4209 * this should improve performance for small packets with large amounts
4210 * of reassembly being done in the stack
4212 static void e1000_check_copybreak(struct net_device *netdev,
4213 struct e1000_buffer *buffer_info,
4214 u32 length, struct sk_buff **skb)
4216 struct sk_buff *new_skb;
4218 if (length > copybreak)
4221 new_skb = netdev_alloc_skb_ip_align(netdev, length);
4225 skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN,
4226 (*skb)->data - NET_IP_ALIGN,
4227 length + NET_IP_ALIGN);
4228 /* save the skb in buffer_info as good */
4229 buffer_info->skb = *skb;
4234 * e1000_clean_rx_irq - Send received data up the network stack; legacy
4235 * @adapter: board private structure
4236 * @rx_ring: ring to clean
4237 * @work_done: amount of napi work completed this call
4238 * @work_to_do: max amount of work allowed for this call to do
4240 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
4241 struct e1000_rx_ring *rx_ring,
4242 int *work_done, int work_to_do)
4244 struct e1000_hw *hw = &adapter->hw;
4245 struct net_device *netdev = adapter->netdev;
4246 struct pci_dev *pdev = adapter->pdev;
4247 struct e1000_rx_desc *rx_desc, *next_rxd;
4248 struct e1000_buffer *buffer_info, *next_buffer;
4249 unsigned long flags;
4252 int cleaned_count = 0;
4253 bool cleaned = false;
4254 unsigned int total_rx_bytes=0, total_rx_packets=0;
4256 i = rx_ring->next_to_clean;
4257 rx_desc = E1000_RX_DESC(*rx_ring, i);
4258 buffer_info = &rx_ring->buffer_info[i];
4260 while (rx_desc->status & E1000_RXD_STAT_DD) {
4261 struct sk_buff *skb;
4264 if (*work_done >= work_to_do)
4267 rmb(); /* read descriptor and rx_buffer_info after status DD */
4269 status = rx_desc->status;
4270 skb = buffer_info->skb;
4271 buffer_info->skb = NULL;
4273 prefetch(skb->data - NET_IP_ALIGN);
4275 if (++i == rx_ring->count) i = 0;
4276 next_rxd = E1000_RX_DESC(*rx_ring, i);
4279 next_buffer = &rx_ring->buffer_info[i];
4283 dma_unmap_single(&pdev->dev, buffer_info->dma,
4284 buffer_info->length, DMA_FROM_DEVICE);
4285 buffer_info->dma = 0;
4287 length = le16_to_cpu(rx_desc->length);
4288 /* !EOP means multiple descriptors were used to store a single
4289 * packet, if thats the case we need to toss it. In fact, we
4290 * to toss every packet with the EOP bit clear and the next
4291 * frame that _does_ have the EOP bit set, as it is by
4292 * definition only a frame fragment
4294 if (unlikely(!(status & E1000_RXD_STAT_EOP)))
4295 adapter->discarding = true;
4297 if (adapter->discarding) {
4298 /* All receives must fit into a single buffer */
4299 e_dbg("Receive packet consumed multiple buffers\n");
4301 buffer_info->skb = skb;
4302 if (status & E1000_RXD_STAT_EOP)
4303 adapter->discarding = false;
4307 if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
4308 u8 last_byte = *(skb->data + length - 1);
4309 if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
4311 spin_lock_irqsave(&adapter->stats_lock, flags);
4312 e1000_tbi_adjust_stats(hw, &adapter->stats,
4314 spin_unlock_irqrestore(&adapter->stats_lock,
4318 if (netdev->features & NETIF_F_RXALL)
4321 buffer_info->skb = skb;
4327 total_rx_bytes += (length - 4); /* don't count FCS */
4330 if (likely(!(netdev->features & NETIF_F_RXFCS)))
4331 /* adjust length to remove Ethernet CRC, this must be
4332 * done after the TBI_ACCEPT workaround above
4336 e1000_check_copybreak(netdev, buffer_info, length, &skb);
4338 skb_put(skb, length);
4340 /* Receive Checksum Offload */
4341 e1000_rx_checksum(adapter,
4343 ((u32)(rx_desc->errors) << 24),
4344 le16_to_cpu(rx_desc->csum), skb);
4346 e1000_receive_skb(adapter, status, rx_desc->special, skb);
4349 rx_desc->status = 0;
4351 /* return some buffers to hardware, one at a time is too slow */
4352 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4353 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4357 /* use prefetched values */
4359 buffer_info = next_buffer;
4361 rx_ring->next_to_clean = i;
4363 cleaned_count = E1000_DESC_UNUSED(rx_ring);
4365 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4367 adapter->total_rx_packets += total_rx_packets;
4368 adapter->total_rx_bytes += total_rx_bytes;
4369 netdev->stats.rx_bytes += total_rx_bytes;
4370 netdev->stats.rx_packets += total_rx_packets;
4375 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
4376 * @adapter: address of board private structure
4377 * @rx_ring: pointer to receive ring structure
4378 * @cleaned_count: number of buffers to allocate this pass
4382 e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
4383 struct e1000_rx_ring *rx_ring, int cleaned_count)
4385 struct net_device *netdev = adapter->netdev;
4386 struct pci_dev *pdev = adapter->pdev;
4387 struct e1000_rx_desc *rx_desc;
4388 struct e1000_buffer *buffer_info;
4389 struct sk_buff *skb;
4391 unsigned int bufsz = 256 - 16 /*for skb_reserve */ ;
4393 i = rx_ring->next_to_use;
4394 buffer_info = &rx_ring->buffer_info[i];
4396 while (cleaned_count--) {
4397 skb = buffer_info->skb;
4403 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4404 if (unlikely(!skb)) {
4405 /* Better luck next round */
4406 adapter->alloc_rx_buff_failed++;
4410 buffer_info->skb = skb;
4411 buffer_info->length = adapter->rx_buffer_len;
4413 /* allocate a new page if necessary */
4414 if (!buffer_info->page) {
4415 buffer_info->page = alloc_page(GFP_ATOMIC);
4416 if (unlikely(!buffer_info->page)) {
4417 adapter->alloc_rx_buff_failed++;
4422 if (!buffer_info->dma) {
4423 buffer_info->dma = dma_map_page(&pdev->dev,
4424 buffer_info->page, 0,
4425 buffer_info->length,
4427 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4428 put_page(buffer_info->page);
4430 buffer_info->page = NULL;
4431 buffer_info->skb = NULL;
4432 buffer_info->dma = 0;
4433 adapter->alloc_rx_buff_failed++;
4434 break; /* while !buffer_info->skb */
4438 rx_desc = E1000_RX_DESC(*rx_ring, i);
4439 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4441 if (unlikely(++i == rx_ring->count))
4443 buffer_info = &rx_ring->buffer_info[i];
4446 if (likely(rx_ring->next_to_use != i)) {
4447 rx_ring->next_to_use = i;
4448 if (unlikely(i-- == 0))
4449 i = (rx_ring->count - 1);
4451 /* Force memory writes to complete before letting h/w
4452 * know there are new descriptors to fetch. (Only
4453 * applicable for weak-ordered memory model archs,
4454 * such as IA-64). */
4456 writel(i, adapter->hw.hw_addr + rx_ring->rdt);
4461 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
4462 * @adapter: address of board private structure
4465 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4466 struct e1000_rx_ring *rx_ring,
4469 struct e1000_hw *hw = &adapter->hw;
4470 struct net_device *netdev = adapter->netdev;
4471 struct pci_dev *pdev = adapter->pdev;
4472 struct e1000_rx_desc *rx_desc;
4473 struct e1000_buffer *buffer_info;
4474 struct sk_buff *skb;
4476 unsigned int bufsz = adapter->rx_buffer_len;
4478 i = rx_ring->next_to_use;
4479 buffer_info = &rx_ring->buffer_info[i];
4481 while (cleaned_count--) {
4482 skb = buffer_info->skb;
4488 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4489 if (unlikely(!skb)) {
4490 /* Better luck next round */
4491 adapter->alloc_rx_buff_failed++;
4495 /* Fix for errata 23, can't cross 64kB boundary */
4496 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4497 struct sk_buff *oldskb = skb;
4498 e_err(rx_err, "skb align check failed: %u bytes at "
4499 "%p\n", bufsz, skb->data);
4500 /* Try again, without freeing the previous */
4501 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4502 /* Failed allocation, critical failure */
4504 dev_kfree_skb(oldskb);
4505 adapter->alloc_rx_buff_failed++;
4509 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4512 dev_kfree_skb(oldskb);
4513 adapter->alloc_rx_buff_failed++;
4514 break; /* while !buffer_info->skb */
4517 /* Use new allocation */
4518 dev_kfree_skb(oldskb);
4520 buffer_info->skb = skb;
4521 buffer_info->length = adapter->rx_buffer_len;
4523 buffer_info->dma = dma_map_single(&pdev->dev,
4525 buffer_info->length,
4527 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4529 buffer_info->skb = NULL;
4530 buffer_info->dma = 0;
4531 adapter->alloc_rx_buff_failed++;
4532 break; /* while !buffer_info->skb */
4536 * XXX if it was allocated cleanly it will never map to a
4540 /* Fix for errata 23, can't cross 64kB boundary */
4541 if (!e1000_check_64k_bound(adapter,
4542 (void *)(unsigned long)buffer_info->dma,
4543 adapter->rx_buffer_len)) {
4544 e_err(rx_err, "dma align check failed: %u bytes at "
4545 "%p\n", adapter->rx_buffer_len,
4546 (void *)(unsigned long)buffer_info->dma);
4548 buffer_info->skb = NULL;
4550 dma_unmap_single(&pdev->dev, buffer_info->dma,
4551 adapter->rx_buffer_len,
4553 buffer_info->dma = 0;
4555 adapter->alloc_rx_buff_failed++;
4556 break; /* while !buffer_info->skb */
4558 rx_desc = E1000_RX_DESC(*rx_ring, i);
4559 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4561 if (unlikely(++i == rx_ring->count))
4563 buffer_info = &rx_ring->buffer_info[i];
4566 if (likely(rx_ring->next_to_use != i)) {
4567 rx_ring->next_to_use = i;
4568 if (unlikely(i-- == 0))
4569 i = (rx_ring->count - 1);
4571 /* Force memory writes to complete before letting h/w
4572 * know there are new descriptors to fetch. (Only
4573 * applicable for weak-ordered memory model archs,
4574 * such as IA-64). */
4576 writel(i, hw->hw_addr + rx_ring->rdt);
4581 * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
4585 static void e1000_smartspeed(struct e1000_adapter *adapter)
4587 struct e1000_hw *hw = &adapter->hw;
4591 if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg ||
4592 !(hw->autoneg_advertised & ADVERTISE_1000_FULL))
4595 if (adapter->smartspeed == 0) {
4596 /* If Master/Slave config fault is asserted twice,
4597 * we assume back-to-back */
4598 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4599 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
4600 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4601 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
4602 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4603 if (phy_ctrl & CR_1000T_MS_ENABLE) {
4604 phy_ctrl &= ~CR_1000T_MS_ENABLE;
4605 e1000_write_phy_reg(hw, PHY_1000T_CTRL,
4607 adapter->smartspeed++;
4608 if (!e1000_phy_setup_autoneg(hw) &&
4609 !e1000_read_phy_reg(hw, PHY_CTRL,
4611 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4612 MII_CR_RESTART_AUTO_NEG);
4613 e1000_write_phy_reg(hw, PHY_CTRL,
4618 } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
4619 /* If still no link, perhaps using 2/3 pair cable */
4620 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4621 phy_ctrl |= CR_1000T_MS_ENABLE;
4622 e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
4623 if (!e1000_phy_setup_autoneg(hw) &&
4624 !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) {
4625 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4626 MII_CR_RESTART_AUTO_NEG);
4627 e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl);
4630 /* Restart process after E1000_SMARTSPEED_MAX iterations */
4631 if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
4632 adapter->smartspeed = 0;
4642 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4648 return e1000_mii_ioctl(netdev, ifr, cmd);
4661 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
4664 struct e1000_adapter *adapter = netdev_priv(netdev);
4665 struct e1000_hw *hw = &adapter->hw;
4666 struct mii_ioctl_data *data = if_mii(ifr);
4669 unsigned long flags;
4671 if (hw->media_type != e1000_media_type_copper)
4676 data->phy_id = hw->phy_addr;
4679 spin_lock_irqsave(&adapter->stats_lock, flags);
4680 if (e1000_read_phy_reg(hw, data->reg_num & 0x1F,
4682 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4685 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4688 if (data->reg_num & ~(0x1F))
4690 mii_reg = data->val_in;
4691 spin_lock_irqsave(&adapter->stats_lock, flags);
4692 if (e1000_write_phy_reg(hw, data->reg_num,
4694 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4697 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4698 if (hw->media_type == e1000_media_type_copper) {
4699 switch (data->reg_num) {
4701 if (mii_reg & MII_CR_POWER_DOWN)
4703 if (mii_reg & MII_CR_AUTO_NEG_EN) {
4705 hw->autoneg_advertised = 0x2F;
4710 else if (mii_reg & 0x2000)
4714 retval = e1000_set_spd_dplx(
4722 if (netif_running(adapter->netdev))
4723 e1000_reinit_locked(adapter);
4725 e1000_reset(adapter);
4727 case M88E1000_PHY_SPEC_CTRL:
4728 case M88E1000_EXT_PHY_SPEC_CTRL:
4729 if (e1000_phy_reset(hw))
4734 switch (data->reg_num) {
4736 if (mii_reg & MII_CR_POWER_DOWN)
4738 if (netif_running(adapter->netdev))
4739 e1000_reinit_locked(adapter);
4741 e1000_reset(adapter);
4749 return E1000_SUCCESS;
4752 void e1000_pci_set_mwi(struct e1000_hw *hw)
4754 struct e1000_adapter *adapter = hw->back;
4755 int ret_val = pci_set_mwi(adapter->pdev);
4758 e_err(probe, "Error in setting MWI\n");
4761 void e1000_pci_clear_mwi(struct e1000_hw *hw)
4763 struct e1000_adapter *adapter = hw->back;
4765 pci_clear_mwi(adapter->pdev);
4768 int e1000_pcix_get_mmrbc(struct e1000_hw *hw)
4770 struct e1000_adapter *adapter = hw->back;
4771 return pcix_get_mmrbc(adapter->pdev);
4774 void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
4776 struct e1000_adapter *adapter = hw->back;
4777 pcix_set_mmrbc(adapter->pdev, mmrbc);
4780 void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
4785 static bool e1000_vlan_used(struct e1000_adapter *adapter)
4789 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4794 static void __e1000_vlan_mode(struct e1000_adapter *adapter,
4795 netdev_features_t features)
4797 struct e1000_hw *hw = &adapter->hw;
4801 if (features & NETIF_F_HW_VLAN_RX) {
4802 /* enable VLAN tag insert/strip */
4803 ctrl |= E1000_CTRL_VME;
4805 /* disable VLAN tag insert/strip */
4806 ctrl &= ~E1000_CTRL_VME;
4810 static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
4813 struct e1000_hw *hw = &adapter->hw;
4816 if (!test_bit(__E1000_DOWN, &adapter->flags))
4817 e1000_irq_disable(adapter);
4819 __e1000_vlan_mode(adapter, adapter->netdev->features);
4821 /* enable VLAN receive filtering */
4823 rctl &= ~E1000_RCTL_CFIEN;
4824 if (!(adapter->netdev->flags & IFF_PROMISC))
4825 rctl |= E1000_RCTL_VFE;
4827 e1000_update_mng_vlan(adapter);
4829 /* disable VLAN receive filtering */
4831 rctl &= ~E1000_RCTL_VFE;
4835 if (!test_bit(__E1000_DOWN, &adapter->flags))
4836 e1000_irq_enable(adapter);
4839 static void e1000_vlan_mode(struct net_device *netdev,
4840 netdev_features_t features)
4842 struct e1000_adapter *adapter = netdev_priv(netdev);
4844 if (!test_bit(__E1000_DOWN, &adapter->flags))
4845 e1000_irq_disable(adapter);
4847 __e1000_vlan_mode(adapter, features);
4849 if (!test_bit(__E1000_DOWN, &adapter->flags))
4850 e1000_irq_enable(adapter);
4853 static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
4855 struct e1000_adapter *adapter = netdev_priv(netdev);
4856 struct e1000_hw *hw = &adapter->hw;
4859 if ((hw->mng_cookie.status &
4860 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4861 (vid == adapter->mng_vlan_id))
4864 if (!e1000_vlan_used(adapter))
4865 e1000_vlan_filter_on_off(adapter, true);
4867 /* add VID to filter table */
4868 index = (vid >> 5) & 0x7F;
4869 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4870 vfta |= (1 << (vid & 0x1F));
4871 e1000_write_vfta(hw, index, vfta);
4873 set_bit(vid, adapter->active_vlans);
4878 static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
4880 struct e1000_adapter *adapter = netdev_priv(netdev);
4881 struct e1000_hw *hw = &adapter->hw;
4884 if (!test_bit(__E1000_DOWN, &adapter->flags))
4885 e1000_irq_disable(adapter);
4886 if (!test_bit(__E1000_DOWN, &adapter->flags))
4887 e1000_irq_enable(adapter);
4889 /* remove VID from filter table */
4890 index = (vid >> 5) & 0x7F;
4891 vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4892 vfta &= ~(1 << (vid & 0x1F));
4893 e1000_write_vfta(hw, index, vfta);
4895 clear_bit(vid, adapter->active_vlans);
4897 if (!e1000_vlan_used(adapter))
4898 e1000_vlan_filter_on_off(adapter, false);
4903 static void e1000_restore_vlan(struct e1000_adapter *adapter)
4907 if (!e1000_vlan_used(adapter))
4910 e1000_vlan_filter_on_off(adapter, true);
4911 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4912 e1000_vlan_rx_add_vid(adapter->netdev, vid);
4915 int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
4917 struct e1000_hw *hw = &adapter->hw;
4921 /* Make sure dplx is at most 1 bit and lsb of speed is not set
4922 * for the switch() below to work */
4923 if ((spd & 1) || (dplx & ~1))
4926 /* Fiber NICs only allow 1000 gbps Full duplex */
4927 if ((hw->media_type == e1000_media_type_fiber) &&
4928 spd != SPEED_1000 &&
4929 dplx != DUPLEX_FULL)
4932 switch (spd + dplx) {
4933 case SPEED_10 + DUPLEX_HALF:
4934 hw->forced_speed_duplex = e1000_10_half;
4936 case SPEED_10 + DUPLEX_FULL:
4937 hw->forced_speed_duplex = e1000_10_full;
4939 case SPEED_100 + DUPLEX_HALF:
4940 hw->forced_speed_duplex = e1000_100_half;
4942 case SPEED_100 + DUPLEX_FULL:
4943 hw->forced_speed_duplex = e1000_100_full;
4945 case SPEED_1000 + DUPLEX_FULL:
4947 hw->autoneg_advertised = ADVERTISE_1000_FULL;
4949 case SPEED_1000 + DUPLEX_HALF: /* not supported */
4956 e_err(probe, "Unsupported Speed/Duplex configuration\n");
4960 static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
4962 struct net_device *netdev = pci_get_drvdata(pdev);
4963 struct e1000_adapter *adapter = netdev_priv(netdev);
4964 struct e1000_hw *hw = &adapter->hw;
4965 u32 ctrl, ctrl_ext, rctl, status;
4966 u32 wufc = adapter->wol;
4971 netif_device_detach(netdev);
4973 if (netif_running(netdev)) {
4974 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
4975 e1000_down(adapter);
4979 retval = pci_save_state(pdev);
4984 status = er32(STATUS);
4985 if (status & E1000_STATUS_LU)
4986 wufc &= ~E1000_WUFC_LNKC;
4989 e1000_setup_rctl(adapter);
4990 e1000_set_rx_mode(netdev);
4994 /* turn on all-multi mode if wake on multicast is enabled */
4995 if (wufc & E1000_WUFC_MC)
4996 rctl |= E1000_RCTL_MPE;
4998 /* enable receives in the hardware */
4999 ew32(RCTL, rctl | E1000_RCTL_EN);
5001 if (hw->mac_type >= e1000_82540) {
5003 /* advertise wake from D3Cold */
5004 #define E1000_CTRL_ADVD3WUC 0x00100000
5005 /* phy power management enable */
5006 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5007 ctrl |= E1000_CTRL_ADVD3WUC |
5008 E1000_CTRL_EN_PHY_PWR_MGMT;
5012 if (hw->media_type == e1000_media_type_fiber ||
5013 hw->media_type == e1000_media_type_internal_serdes) {
5014 /* keep the laser running in D3 */
5015 ctrl_ext = er32(CTRL_EXT);
5016 ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
5017 ew32(CTRL_EXT, ctrl_ext);
5020 ew32(WUC, E1000_WUC_PME_EN);
5027 e1000_release_manageability(adapter);
5029 *enable_wake = !!wufc;
5031 /* make sure adapter isn't asleep if manageability is enabled */
5032 if (adapter->en_mng_pt)
5033 *enable_wake = true;
5035 if (netif_running(netdev))
5036 e1000_free_irq(adapter);
5038 pci_disable_device(pdev);
5044 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
5049 retval = __e1000_shutdown(pdev, &wake);
5054 pci_prepare_to_sleep(pdev);
5056 pci_wake_from_d3(pdev, false);
5057 pci_set_power_state(pdev, PCI_D3hot);
5063 static int e1000_resume(struct pci_dev *pdev)
5065 struct net_device *netdev = pci_get_drvdata(pdev);
5066 struct e1000_adapter *adapter = netdev_priv(netdev);
5067 struct e1000_hw *hw = &adapter->hw;
5070 pci_set_power_state(pdev, PCI_D0);
5071 pci_restore_state(pdev);
5072 pci_save_state(pdev);
5074 if (adapter->need_ioport)
5075 err = pci_enable_device(pdev);
5077 err = pci_enable_device_mem(pdev);
5079 pr_err("Cannot enable PCI device from suspend\n");
5082 pci_set_master(pdev);
5084 pci_enable_wake(pdev, PCI_D3hot, 0);
5085 pci_enable_wake(pdev, PCI_D3cold, 0);
5087 if (netif_running(netdev)) {
5088 err = e1000_request_irq(adapter);
5093 e1000_power_up_phy(adapter);
5094 e1000_reset(adapter);
5097 e1000_init_manageability(adapter);
5099 if (netif_running(netdev))
5102 netif_device_attach(netdev);
5108 static void e1000_shutdown(struct pci_dev *pdev)
5112 __e1000_shutdown(pdev, &wake);
5114 if (system_state == SYSTEM_POWER_OFF) {
5115 pci_wake_from_d3(pdev, wake);
5116 pci_set_power_state(pdev, PCI_D3hot);
5120 #ifdef CONFIG_NET_POLL_CONTROLLER
5122 * Polling 'interrupt' - used by things like netconsole to send skbs
5123 * without having to re-enable interrupts. It's not called while
5124 * the interrupt routine is executing.
5126 static void e1000_netpoll(struct net_device *netdev)
5128 struct e1000_adapter *adapter = netdev_priv(netdev);
5130 disable_irq(adapter->pdev->irq);
5131 e1000_intr(adapter->pdev->irq, netdev);
5132 enable_irq(adapter->pdev->irq);
5137 * e1000_io_error_detected - called when PCI error is detected
5138 * @pdev: Pointer to PCI device
5139 * @state: The current pci connection state
5141 * This function is called after a PCI bus error affecting
5142 * this device has been detected.
5144 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
5145 pci_channel_state_t state)
5147 struct net_device *netdev = pci_get_drvdata(pdev);
5148 struct e1000_adapter *adapter = netdev_priv(netdev);
5150 netif_device_detach(netdev);
5152 if (state == pci_channel_io_perm_failure)
5153 return PCI_ERS_RESULT_DISCONNECT;
5155 if (netif_running(netdev))
5156 e1000_down(adapter);
5157 pci_disable_device(pdev);
5159 /* Request a slot slot reset. */
5160 return PCI_ERS_RESULT_NEED_RESET;
5164 * e1000_io_slot_reset - called after the pci bus has been reset.
5165 * @pdev: Pointer to PCI device
5167 * Restart the card from scratch, as if from a cold-boot. Implementation
5168 * resembles the first-half of the e1000_resume routine.
5170 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
5172 struct net_device *netdev = pci_get_drvdata(pdev);
5173 struct e1000_adapter *adapter = netdev_priv(netdev);
5174 struct e1000_hw *hw = &adapter->hw;
5177 if (adapter->need_ioport)
5178 err = pci_enable_device(pdev);
5180 err = pci_enable_device_mem(pdev);
5182 pr_err("Cannot re-enable PCI device after reset.\n");
5183 return PCI_ERS_RESULT_DISCONNECT;
5185 pci_set_master(pdev);
5187 pci_enable_wake(pdev, PCI_D3hot, 0);
5188 pci_enable_wake(pdev, PCI_D3cold, 0);
5190 e1000_reset(adapter);
5193 return PCI_ERS_RESULT_RECOVERED;
5197 * e1000_io_resume - called when traffic can start flowing again.
5198 * @pdev: Pointer to PCI device
5200 * This callback is called when the error recovery driver tells us that
5201 * its OK to resume normal operation. Implementation resembles the
5202 * second-half of the e1000_resume routine.
5204 static void e1000_io_resume(struct pci_dev *pdev)
5206 struct net_device *netdev = pci_get_drvdata(pdev);
5207 struct e1000_adapter *adapter = netdev_priv(netdev);
5209 e1000_init_manageability(adapter);
5211 if (netif_running(netdev)) {
5212 if (e1000_up(adapter)) {
5213 pr_info("can't bring device back up after reset\n");
5218 netif_device_attach(netdev);