]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/intel/e1000/e1000_main.c
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[karo-tx-linux.git] / drivers / net / ethernet / intel / e1000 / e1000_main.c
1 /*******************************************************************************
2
3   Intel PRO/1000 Linux driver
4   Copyright(c) 1999 - 2006 Intel Corporation.
5
6   This program is free software; you can redistribute it and/or modify it
7   under the terms and conditions of the GNU General Public License,
8   version 2, as published by the Free Software Foundation.
9
10   This program is distributed in the hope it will be useful, but WITHOUT
11   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13   more details.
14
15   You should have received a copy of the GNU General Public License along with
16   this program; if not, write to the Free Software Foundation, Inc.,
17   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19   The full GNU General Public License is included in this distribution in
20   the file called "COPYING".
21
22   Contact Information:
23   Linux NICS <linux.nics@intel.com>
24   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27 *******************************************************************************/
28
29 #include "e1000.h"
30 #include <net/ip6_checksum.h>
31 #include <linux/io.h>
32 #include <linux/prefetch.h>
33 #include <linux/bitops.h>
34 #include <linux/if_vlan.h>
35
36 char e1000_driver_name[] = "e1000";
37 static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
38 #define DRV_VERSION "7.3.21-k8-NAPI"
39 const char e1000_driver_version[] = DRV_VERSION;
40 static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
41
42 /* e1000_pci_tbl - PCI Device ID Table
43  *
44  * Last entry must be all 0s
45  *
46  * Macro expands to...
47  *   {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
48  */
49 static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
50         INTEL_E1000_ETHERNET_DEVICE(0x1000),
51         INTEL_E1000_ETHERNET_DEVICE(0x1001),
52         INTEL_E1000_ETHERNET_DEVICE(0x1004),
53         INTEL_E1000_ETHERNET_DEVICE(0x1008),
54         INTEL_E1000_ETHERNET_DEVICE(0x1009),
55         INTEL_E1000_ETHERNET_DEVICE(0x100C),
56         INTEL_E1000_ETHERNET_DEVICE(0x100D),
57         INTEL_E1000_ETHERNET_DEVICE(0x100E),
58         INTEL_E1000_ETHERNET_DEVICE(0x100F),
59         INTEL_E1000_ETHERNET_DEVICE(0x1010),
60         INTEL_E1000_ETHERNET_DEVICE(0x1011),
61         INTEL_E1000_ETHERNET_DEVICE(0x1012),
62         INTEL_E1000_ETHERNET_DEVICE(0x1013),
63         INTEL_E1000_ETHERNET_DEVICE(0x1014),
64         INTEL_E1000_ETHERNET_DEVICE(0x1015),
65         INTEL_E1000_ETHERNET_DEVICE(0x1016),
66         INTEL_E1000_ETHERNET_DEVICE(0x1017),
67         INTEL_E1000_ETHERNET_DEVICE(0x1018),
68         INTEL_E1000_ETHERNET_DEVICE(0x1019),
69         INTEL_E1000_ETHERNET_DEVICE(0x101A),
70         INTEL_E1000_ETHERNET_DEVICE(0x101D),
71         INTEL_E1000_ETHERNET_DEVICE(0x101E),
72         INTEL_E1000_ETHERNET_DEVICE(0x1026),
73         INTEL_E1000_ETHERNET_DEVICE(0x1027),
74         INTEL_E1000_ETHERNET_DEVICE(0x1028),
75         INTEL_E1000_ETHERNET_DEVICE(0x1075),
76         INTEL_E1000_ETHERNET_DEVICE(0x1076),
77         INTEL_E1000_ETHERNET_DEVICE(0x1077),
78         INTEL_E1000_ETHERNET_DEVICE(0x1078),
79         INTEL_E1000_ETHERNET_DEVICE(0x1079),
80         INTEL_E1000_ETHERNET_DEVICE(0x107A),
81         INTEL_E1000_ETHERNET_DEVICE(0x107B),
82         INTEL_E1000_ETHERNET_DEVICE(0x107C),
83         INTEL_E1000_ETHERNET_DEVICE(0x108A),
84         INTEL_E1000_ETHERNET_DEVICE(0x1099),
85         INTEL_E1000_ETHERNET_DEVICE(0x10B5),
86         INTEL_E1000_ETHERNET_DEVICE(0x2E6E),
87         /* required last entry */
88         {0,}
89 };
90
91 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
92
93 int e1000_up(struct e1000_adapter *adapter);
94 void e1000_down(struct e1000_adapter *adapter);
95 void e1000_reinit_locked(struct e1000_adapter *adapter);
96 void e1000_reset(struct e1000_adapter *adapter);
97 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
98 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
99 void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
100 void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
101 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
102                              struct e1000_tx_ring *txdr);
103 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
104                              struct e1000_rx_ring *rxdr);
105 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
106                              struct e1000_tx_ring *tx_ring);
107 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
108                              struct e1000_rx_ring *rx_ring);
109 void e1000_update_stats(struct e1000_adapter *adapter);
110
111 static int e1000_init_module(void);
112 static void e1000_exit_module(void);
113 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
114 static void __devexit e1000_remove(struct pci_dev *pdev);
115 static int e1000_alloc_queues(struct e1000_adapter *adapter);
116 static int e1000_sw_init(struct e1000_adapter *adapter);
117 static int e1000_open(struct net_device *netdev);
118 static int e1000_close(struct net_device *netdev);
119 static void e1000_configure_tx(struct e1000_adapter *adapter);
120 static void e1000_configure_rx(struct e1000_adapter *adapter);
121 static void e1000_setup_rctl(struct e1000_adapter *adapter);
122 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
123 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
124 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
125                                 struct e1000_tx_ring *tx_ring);
126 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
127                                 struct e1000_rx_ring *rx_ring);
128 static void e1000_set_rx_mode(struct net_device *netdev);
129 static void e1000_update_phy_info_task(struct work_struct *work);
130 static void e1000_watchdog(struct work_struct *work);
131 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work);
132 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
133                                     struct net_device *netdev);
134 static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
135 static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
136 static int e1000_set_mac(struct net_device *netdev, void *p);
137 static irqreturn_t e1000_intr(int irq, void *data);
138 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
139                                struct e1000_tx_ring *tx_ring);
140 static int e1000_clean(struct napi_struct *napi, int budget);
141 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
142                                struct e1000_rx_ring *rx_ring,
143                                int *work_done, int work_to_do);
144 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
145                                      struct e1000_rx_ring *rx_ring,
146                                      int *work_done, int work_to_do);
147 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
148                                    struct e1000_rx_ring *rx_ring,
149                                    int cleaned_count);
150 static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
151                                          struct e1000_rx_ring *rx_ring,
152                                          int cleaned_count);
153 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
154 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
155                            int cmd);
156 static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
157 static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
158 static void e1000_tx_timeout(struct net_device *dev);
159 static void e1000_reset_task(struct work_struct *work);
160 static void e1000_smartspeed(struct e1000_adapter *adapter);
161 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
162                                        struct sk_buff *skb);
163
164 static bool e1000_vlan_used(struct e1000_adapter *adapter);
165 static void e1000_vlan_mode(struct net_device *netdev,
166                             netdev_features_t features);
167 static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
168 static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
169 static void e1000_restore_vlan(struct e1000_adapter *adapter);
170
171 #ifdef CONFIG_PM
172 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
173 static int e1000_resume(struct pci_dev *pdev);
174 #endif
175 static void e1000_shutdown(struct pci_dev *pdev);
176
177 #ifdef CONFIG_NET_POLL_CONTROLLER
178 /* for netdump / net console */
179 static void e1000_netpoll (struct net_device *netdev);
180 #endif
181
182 #define COPYBREAK_DEFAULT 256
183 static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
184 module_param(copybreak, uint, 0644);
185 MODULE_PARM_DESC(copybreak,
186         "Maximum size of packet that is copied to a new buffer on receive");
187
188 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
189                      pci_channel_state_t state);
190 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
191 static void e1000_io_resume(struct pci_dev *pdev);
192
193 static struct pci_error_handlers e1000_err_handler = {
194         .error_detected = e1000_io_error_detected,
195         .slot_reset = e1000_io_slot_reset,
196         .resume = e1000_io_resume,
197 };
198
199 static struct pci_driver e1000_driver = {
200         .name     = e1000_driver_name,
201         .id_table = e1000_pci_tbl,
202         .probe    = e1000_probe,
203         .remove   = __devexit_p(e1000_remove),
204 #ifdef CONFIG_PM
205         /* Power Management Hooks */
206         .suspend  = e1000_suspend,
207         .resume   = e1000_resume,
208 #endif
209         .shutdown = e1000_shutdown,
210         .err_handler = &e1000_err_handler
211 };
212
213 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
214 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
215 MODULE_LICENSE("GPL");
216 MODULE_VERSION(DRV_VERSION);
217
218 static int debug = NETIF_MSG_DRV | NETIF_MSG_PROBE;
219 module_param(debug, int, 0);
220 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
221
222 /**
223  * e1000_get_hw_dev - return device
224  * used by hardware layer to print debugging information
225  *
226  **/
227 struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
228 {
229         struct e1000_adapter *adapter = hw->back;
230         return adapter->netdev;
231 }
232
233 /**
234  * e1000_init_module - Driver Registration Routine
235  *
236  * e1000_init_module is the first routine called when the driver is
237  * loaded. All it does is register with the PCI subsystem.
238  **/
239
240 static int __init e1000_init_module(void)
241 {
242         int ret;
243         pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version);
244
245         pr_info("%s\n", e1000_copyright);
246
247         ret = pci_register_driver(&e1000_driver);
248         if (copybreak != COPYBREAK_DEFAULT) {
249                 if (copybreak == 0)
250                         pr_info("copybreak disabled\n");
251                 else
252                         pr_info("copybreak enabled for "
253                                    "packets <= %u bytes\n", copybreak);
254         }
255         return ret;
256 }
257
258 module_init(e1000_init_module);
259
260 /**
261  * e1000_exit_module - Driver Exit Cleanup Routine
262  *
263  * e1000_exit_module is called just before the driver is removed
264  * from memory.
265  **/
266
267 static void __exit e1000_exit_module(void)
268 {
269         pci_unregister_driver(&e1000_driver);
270 }
271
272 module_exit(e1000_exit_module);
273
274 static int e1000_request_irq(struct e1000_adapter *adapter)
275 {
276         struct net_device *netdev = adapter->netdev;
277         irq_handler_t handler = e1000_intr;
278         int irq_flags = IRQF_SHARED;
279         int err;
280
281         err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
282                           netdev);
283         if (err) {
284                 e_err(probe, "Unable to allocate interrupt Error: %d\n", err);
285         }
286
287         return err;
288 }
289
290 static void e1000_free_irq(struct e1000_adapter *adapter)
291 {
292         struct net_device *netdev = adapter->netdev;
293
294         free_irq(adapter->pdev->irq, netdev);
295 }
296
297 /**
298  * e1000_irq_disable - Mask off interrupt generation on the NIC
299  * @adapter: board private structure
300  **/
301
302 static void e1000_irq_disable(struct e1000_adapter *adapter)
303 {
304         struct e1000_hw *hw = &adapter->hw;
305
306         ew32(IMC, ~0);
307         E1000_WRITE_FLUSH();
308         synchronize_irq(adapter->pdev->irq);
309 }
310
311 /**
312  * e1000_irq_enable - Enable default interrupt generation settings
313  * @adapter: board private structure
314  **/
315
316 static void e1000_irq_enable(struct e1000_adapter *adapter)
317 {
318         struct e1000_hw *hw = &adapter->hw;
319
320         ew32(IMS, IMS_ENABLE_MASK);
321         E1000_WRITE_FLUSH();
322 }
323
324 static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
325 {
326         struct e1000_hw *hw = &adapter->hw;
327         struct net_device *netdev = adapter->netdev;
328         u16 vid = hw->mng_cookie.vlan_id;
329         u16 old_vid = adapter->mng_vlan_id;
330
331         if (!e1000_vlan_used(adapter))
332                 return;
333
334         if (!test_bit(vid, adapter->active_vlans)) {
335                 if (hw->mng_cookie.status &
336                     E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
337                         e1000_vlan_rx_add_vid(netdev, vid);
338                         adapter->mng_vlan_id = vid;
339                 } else {
340                         adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
341                 }
342                 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
343                     (vid != old_vid) &&
344                     !test_bit(old_vid, adapter->active_vlans))
345                         e1000_vlan_rx_kill_vid(netdev, old_vid);
346         } else {
347                 adapter->mng_vlan_id = vid;
348         }
349 }
350
351 static void e1000_init_manageability(struct e1000_adapter *adapter)
352 {
353         struct e1000_hw *hw = &adapter->hw;
354
355         if (adapter->en_mng_pt) {
356                 u32 manc = er32(MANC);
357
358                 /* disable hardware interception of ARP */
359                 manc &= ~(E1000_MANC_ARP_EN);
360
361                 ew32(MANC, manc);
362         }
363 }
364
365 static void e1000_release_manageability(struct e1000_adapter *adapter)
366 {
367         struct e1000_hw *hw = &adapter->hw;
368
369         if (adapter->en_mng_pt) {
370                 u32 manc = er32(MANC);
371
372                 /* re-enable hardware interception of ARP */
373                 manc |= E1000_MANC_ARP_EN;
374
375                 ew32(MANC, manc);
376         }
377 }
378
379 /**
380  * e1000_configure - configure the hardware for RX and TX
381  * @adapter = private board structure
382  **/
383 static void e1000_configure(struct e1000_adapter *adapter)
384 {
385         struct net_device *netdev = adapter->netdev;
386         int i;
387
388         e1000_set_rx_mode(netdev);
389
390         e1000_restore_vlan(adapter);
391         e1000_init_manageability(adapter);
392
393         e1000_configure_tx(adapter);
394         e1000_setup_rctl(adapter);
395         e1000_configure_rx(adapter);
396         /* call E1000_DESC_UNUSED which always leaves
397          * at least 1 descriptor unused to make sure
398          * next_to_use != next_to_clean */
399         for (i = 0; i < adapter->num_rx_queues; i++) {
400                 struct e1000_rx_ring *ring = &adapter->rx_ring[i];
401                 adapter->alloc_rx_buf(adapter, ring,
402                                       E1000_DESC_UNUSED(ring));
403         }
404 }
405
406 int e1000_up(struct e1000_adapter *adapter)
407 {
408         struct e1000_hw *hw = &adapter->hw;
409
410         /* hardware has been reset, we need to reload some things */
411         e1000_configure(adapter);
412
413         clear_bit(__E1000_DOWN, &adapter->flags);
414
415         napi_enable(&adapter->napi);
416
417         e1000_irq_enable(adapter);
418
419         netif_wake_queue(adapter->netdev);
420
421         /* fire a link change interrupt to start the watchdog */
422         ew32(ICS, E1000_ICS_LSC);
423         return 0;
424 }
425
426 /**
427  * e1000_power_up_phy - restore link in case the phy was powered down
428  * @adapter: address of board private structure
429  *
430  * The phy may be powered down to save power and turn off link when the
431  * driver is unloaded and wake on lan is not enabled (among others)
432  * *** this routine MUST be followed by a call to e1000_reset ***
433  *
434  **/
435
436 void e1000_power_up_phy(struct e1000_adapter *adapter)
437 {
438         struct e1000_hw *hw = &adapter->hw;
439         u16 mii_reg = 0;
440
441         /* Just clear the power down bit to wake the phy back up */
442         if (hw->media_type == e1000_media_type_copper) {
443                 /* according to the manual, the phy will retain its
444                  * settings across a power-down/up cycle */
445                 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
446                 mii_reg &= ~MII_CR_POWER_DOWN;
447                 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
448         }
449 }
450
451 static void e1000_power_down_phy(struct e1000_adapter *adapter)
452 {
453         struct e1000_hw *hw = &adapter->hw;
454
455         /* Power down the PHY so no link is implied when interface is down *
456          * The PHY cannot be powered down if any of the following is true *
457          * (a) WoL is enabled
458          * (b) AMT is active
459          * (c) SoL/IDER session is active */
460         if (!adapter->wol && hw->mac_type >= e1000_82540 &&
461            hw->media_type == e1000_media_type_copper) {
462                 u16 mii_reg = 0;
463
464                 switch (hw->mac_type) {
465                 case e1000_82540:
466                 case e1000_82545:
467                 case e1000_82545_rev_3:
468                 case e1000_82546:
469                 case e1000_ce4100:
470                 case e1000_82546_rev_3:
471                 case e1000_82541:
472                 case e1000_82541_rev_2:
473                 case e1000_82547:
474                 case e1000_82547_rev_2:
475                         if (er32(MANC) & E1000_MANC_SMBUS_EN)
476                                 goto out;
477                         break;
478                 default:
479                         goto out;
480                 }
481                 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
482                 mii_reg |= MII_CR_POWER_DOWN;
483                 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
484                 msleep(1);
485         }
486 out:
487         return;
488 }
489
490 static void e1000_down_and_stop(struct e1000_adapter *adapter)
491 {
492         set_bit(__E1000_DOWN, &adapter->flags);
493         cancel_work_sync(&adapter->reset_task);
494         cancel_delayed_work_sync(&adapter->watchdog_task);
495         cancel_delayed_work_sync(&adapter->phy_info_task);
496         cancel_delayed_work_sync(&adapter->fifo_stall_task);
497 }
498
499 void e1000_down(struct e1000_adapter *adapter)
500 {
501         struct e1000_hw *hw = &adapter->hw;
502         struct net_device *netdev = adapter->netdev;
503         u32 rctl, tctl;
504
505
506         /* disable receives in the hardware */
507         rctl = er32(RCTL);
508         ew32(RCTL, rctl & ~E1000_RCTL_EN);
509         /* flush and sleep below */
510
511         netif_tx_disable(netdev);
512
513         /* disable transmits in the hardware */
514         tctl = er32(TCTL);
515         tctl &= ~E1000_TCTL_EN;
516         ew32(TCTL, tctl);
517         /* flush both disables and wait for them to finish */
518         E1000_WRITE_FLUSH();
519         msleep(10);
520
521         napi_disable(&adapter->napi);
522
523         e1000_irq_disable(adapter);
524
525         /*
526          * Setting DOWN must be after irq_disable to prevent
527          * a screaming interrupt.  Setting DOWN also prevents
528          * tasks from rescheduling.
529          */
530         e1000_down_and_stop(adapter);
531
532         adapter->link_speed = 0;
533         adapter->link_duplex = 0;
534         netif_carrier_off(netdev);
535
536         e1000_reset(adapter);
537         e1000_clean_all_tx_rings(adapter);
538         e1000_clean_all_rx_rings(adapter);
539 }
540
541 static void e1000_reinit_safe(struct e1000_adapter *adapter)
542 {
543         while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
544                 msleep(1);
545         mutex_lock(&adapter->mutex);
546         e1000_down(adapter);
547         e1000_up(adapter);
548         mutex_unlock(&adapter->mutex);
549         clear_bit(__E1000_RESETTING, &adapter->flags);
550 }
551
552 void e1000_reinit_locked(struct e1000_adapter *adapter)
553 {
554         /* if rtnl_lock is not held the call path is bogus */
555         ASSERT_RTNL();
556         WARN_ON(in_interrupt());
557         while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
558                 msleep(1);
559         e1000_down(adapter);
560         e1000_up(adapter);
561         clear_bit(__E1000_RESETTING, &adapter->flags);
562 }
563
564 void e1000_reset(struct e1000_adapter *adapter)
565 {
566         struct e1000_hw *hw = &adapter->hw;
567         u32 pba = 0, tx_space, min_tx_space, min_rx_space;
568         bool legacy_pba_adjust = false;
569         u16 hwm;
570
571         /* Repartition Pba for greater than 9k mtu
572          * To take effect CTRL.RST is required.
573          */
574
575         switch (hw->mac_type) {
576         case e1000_82542_rev2_0:
577         case e1000_82542_rev2_1:
578         case e1000_82543:
579         case e1000_82544:
580         case e1000_82540:
581         case e1000_82541:
582         case e1000_82541_rev_2:
583                 legacy_pba_adjust = true;
584                 pba = E1000_PBA_48K;
585                 break;
586         case e1000_82545:
587         case e1000_82545_rev_3:
588         case e1000_82546:
589         case e1000_ce4100:
590         case e1000_82546_rev_3:
591                 pba = E1000_PBA_48K;
592                 break;
593         case e1000_82547:
594         case e1000_82547_rev_2:
595                 legacy_pba_adjust = true;
596                 pba = E1000_PBA_30K;
597                 break;
598         case e1000_undefined:
599         case e1000_num_macs:
600                 break;
601         }
602
603         if (legacy_pba_adjust) {
604                 if (hw->max_frame_size > E1000_RXBUFFER_8192)
605                         pba -= 8; /* allocate more FIFO for Tx */
606
607                 if (hw->mac_type == e1000_82547) {
608                         adapter->tx_fifo_head = 0;
609                         adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
610                         adapter->tx_fifo_size =
611                                 (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
612                         atomic_set(&adapter->tx_fifo_stall, 0);
613                 }
614         } else if (hw->max_frame_size >  ETH_FRAME_LEN + ETH_FCS_LEN) {
615                 /* adjust PBA for jumbo frames */
616                 ew32(PBA, pba);
617
618                 /* To maintain wire speed transmits, the Tx FIFO should be
619                  * large enough to accommodate two full transmit packets,
620                  * rounded up to the next 1KB and expressed in KB.  Likewise,
621                  * the Rx FIFO should be large enough to accommodate at least
622                  * one full receive packet and is similarly rounded up and
623                  * expressed in KB. */
624                 pba = er32(PBA);
625                 /* upper 16 bits has Tx packet buffer allocation size in KB */
626                 tx_space = pba >> 16;
627                 /* lower 16 bits has Rx packet buffer allocation size in KB */
628                 pba &= 0xffff;
629                 /*
630                  * the tx fifo also stores 16 bytes of information about the tx
631                  * but don't include ethernet FCS because hardware appends it
632                  */
633                 min_tx_space = (hw->max_frame_size +
634                                 sizeof(struct e1000_tx_desc) -
635                                 ETH_FCS_LEN) * 2;
636                 min_tx_space = ALIGN(min_tx_space, 1024);
637                 min_tx_space >>= 10;
638                 /* software strips receive CRC, so leave room for it */
639                 min_rx_space = hw->max_frame_size;
640                 min_rx_space = ALIGN(min_rx_space, 1024);
641                 min_rx_space >>= 10;
642
643                 /* If current Tx allocation is less than the min Tx FIFO size,
644                  * and the min Tx FIFO size is less than the current Rx FIFO
645                  * allocation, take space away from current Rx allocation */
646                 if (tx_space < min_tx_space &&
647                     ((min_tx_space - tx_space) < pba)) {
648                         pba = pba - (min_tx_space - tx_space);
649
650                         /* PCI/PCIx hardware has PBA alignment constraints */
651                         switch (hw->mac_type) {
652                         case e1000_82545 ... e1000_82546_rev_3:
653                                 pba &= ~(E1000_PBA_8K - 1);
654                                 break;
655                         default:
656                                 break;
657                         }
658
659                         /* if short on rx space, rx wins and must trump tx
660                          * adjustment or use Early Receive if available */
661                         if (pba < min_rx_space)
662                                 pba = min_rx_space;
663                 }
664         }
665
666         ew32(PBA, pba);
667
668         /*
669          * flow control settings:
670          * The high water mark must be low enough to fit one full frame
671          * (or the size used for early receive) above it in the Rx FIFO.
672          * Set it to the lower of:
673          * - 90% of the Rx FIFO size, and
674          * - the full Rx FIFO size minus the early receive size (for parts
675          *   with ERT support assuming ERT set to E1000_ERT_2048), or
676          * - the full Rx FIFO size minus one full frame
677          */
678         hwm = min(((pba << 10) * 9 / 10),
679                   ((pba << 10) - hw->max_frame_size));
680
681         hw->fc_high_water = hwm & 0xFFF8;       /* 8-byte granularity */
682         hw->fc_low_water = hw->fc_high_water - 8;
683         hw->fc_pause_time = E1000_FC_PAUSE_TIME;
684         hw->fc_send_xon = 1;
685         hw->fc = hw->original_fc;
686
687         /* Allow time for pending master requests to run */
688         e1000_reset_hw(hw);
689         if (hw->mac_type >= e1000_82544)
690                 ew32(WUC, 0);
691
692         if (e1000_init_hw(hw))
693                 e_dev_err("Hardware Error\n");
694         e1000_update_mng_vlan(adapter);
695
696         /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
697         if (hw->mac_type >= e1000_82544 &&
698             hw->autoneg == 1 &&
699             hw->autoneg_advertised == ADVERTISE_1000_FULL) {
700                 u32 ctrl = er32(CTRL);
701                 /* clear phy power management bit if we are in gig only mode,
702                  * which if enabled will attempt negotiation to 100Mb, which
703                  * can cause a loss of link at power off or driver unload */
704                 ctrl &= ~E1000_CTRL_SWDPIN3;
705                 ew32(CTRL, ctrl);
706         }
707
708         /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
709         ew32(VET, ETHERNET_IEEE_VLAN_TYPE);
710
711         e1000_reset_adaptive(hw);
712         e1000_phy_get_info(hw, &adapter->phy_info);
713
714         e1000_release_manageability(adapter);
715 }
716
717 /**
718  *  Dump the eeprom for users having checksum issues
719  **/
720 static void e1000_dump_eeprom(struct e1000_adapter *adapter)
721 {
722         struct net_device *netdev = adapter->netdev;
723         struct ethtool_eeprom eeprom;
724         const struct ethtool_ops *ops = netdev->ethtool_ops;
725         u8 *data;
726         int i;
727         u16 csum_old, csum_new = 0;
728
729         eeprom.len = ops->get_eeprom_len(netdev);
730         eeprom.offset = 0;
731
732         data = kmalloc(eeprom.len, GFP_KERNEL);
733         if (!data)
734                 return;
735
736         ops->get_eeprom(netdev, &eeprom, data);
737
738         csum_old = (data[EEPROM_CHECKSUM_REG * 2]) +
739                    (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8);
740         for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2)
741                 csum_new += data[i] + (data[i + 1] << 8);
742         csum_new = EEPROM_SUM - csum_new;
743
744         pr_err("/*********************/\n");
745         pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old);
746         pr_err("Calculated              : 0x%04x\n", csum_new);
747
748         pr_err("Offset    Values\n");
749         pr_err("========  ======\n");
750         print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
751
752         pr_err("Include this output when contacting your support provider.\n");
753         pr_err("This is not a software error! Something bad happened to\n");
754         pr_err("your hardware or EEPROM image. Ignoring this problem could\n");
755         pr_err("result in further problems, possibly loss of data,\n");
756         pr_err("corruption or system hangs!\n");
757         pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n");
758         pr_err("which is invalid and requires you to set the proper MAC\n");
759         pr_err("address manually before continuing to enable this network\n");
760         pr_err("device. Please inspect the EEPROM dump and report the\n");
761         pr_err("issue to your hardware vendor or Intel Customer Support.\n");
762         pr_err("/*********************/\n");
763
764         kfree(data);
765 }
766
767 /**
768  * e1000_is_need_ioport - determine if an adapter needs ioport resources or not
769  * @pdev: PCI device information struct
770  *
771  * Return true if an adapter needs ioport resources
772  **/
773 static int e1000_is_need_ioport(struct pci_dev *pdev)
774 {
775         switch (pdev->device) {
776         case E1000_DEV_ID_82540EM:
777         case E1000_DEV_ID_82540EM_LOM:
778         case E1000_DEV_ID_82540EP:
779         case E1000_DEV_ID_82540EP_LOM:
780         case E1000_DEV_ID_82540EP_LP:
781         case E1000_DEV_ID_82541EI:
782         case E1000_DEV_ID_82541EI_MOBILE:
783         case E1000_DEV_ID_82541ER:
784         case E1000_DEV_ID_82541ER_LOM:
785         case E1000_DEV_ID_82541GI:
786         case E1000_DEV_ID_82541GI_LF:
787         case E1000_DEV_ID_82541GI_MOBILE:
788         case E1000_DEV_ID_82544EI_COPPER:
789         case E1000_DEV_ID_82544EI_FIBER:
790         case E1000_DEV_ID_82544GC_COPPER:
791         case E1000_DEV_ID_82544GC_LOM:
792         case E1000_DEV_ID_82545EM_COPPER:
793         case E1000_DEV_ID_82545EM_FIBER:
794         case E1000_DEV_ID_82546EB_COPPER:
795         case E1000_DEV_ID_82546EB_FIBER:
796         case E1000_DEV_ID_82546EB_QUAD_COPPER:
797                 return true;
798         default:
799                 return false;
800         }
801 }
802
803 static netdev_features_t e1000_fix_features(struct net_device *netdev,
804         netdev_features_t features)
805 {
806         /*
807          * Since there is no support for separate rx/tx vlan accel
808          * enable/disable make sure tx flag is always in same state as rx.
809          */
810         if (features & NETIF_F_HW_VLAN_RX)
811                 features |= NETIF_F_HW_VLAN_TX;
812         else
813                 features &= ~NETIF_F_HW_VLAN_TX;
814
815         return features;
816 }
817
818 static int e1000_set_features(struct net_device *netdev,
819         netdev_features_t features)
820 {
821         struct e1000_adapter *adapter = netdev_priv(netdev);
822         netdev_features_t changed = features ^ netdev->features;
823
824         if (changed & NETIF_F_HW_VLAN_RX)
825                 e1000_vlan_mode(netdev, features);
826
827         if (!(changed & NETIF_F_RXCSUM))
828                 return 0;
829
830         adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
831
832         if (netif_running(netdev))
833                 e1000_reinit_locked(adapter);
834         else
835                 e1000_reset(adapter);
836
837         return 0;
838 }
839
840 static const struct net_device_ops e1000_netdev_ops = {
841         .ndo_open               = e1000_open,
842         .ndo_stop               = e1000_close,
843         .ndo_start_xmit         = e1000_xmit_frame,
844         .ndo_get_stats          = e1000_get_stats,
845         .ndo_set_rx_mode        = e1000_set_rx_mode,
846         .ndo_set_mac_address    = e1000_set_mac,
847         .ndo_tx_timeout         = e1000_tx_timeout,
848         .ndo_change_mtu         = e1000_change_mtu,
849         .ndo_do_ioctl           = e1000_ioctl,
850         .ndo_validate_addr      = eth_validate_addr,
851         .ndo_vlan_rx_add_vid    = e1000_vlan_rx_add_vid,
852         .ndo_vlan_rx_kill_vid   = e1000_vlan_rx_kill_vid,
853 #ifdef CONFIG_NET_POLL_CONTROLLER
854         .ndo_poll_controller    = e1000_netpoll,
855 #endif
856         .ndo_fix_features       = e1000_fix_features,
857         .ndo_set_features       = e1000_set_features,
858 };
859
860 /**
861  * e1000_init_hw_struct - initialize members of hw struct
862  * @adapter: board private struct
863  * @hw: structure used by e1000_hw.c
864  *
865  * Factors out initialization of the e1000_hw struct to its own function
866  * that can be called very early at init (just after struct allocation).
867  * Fields are initialized based on PCI device information and
868  * OS network device settings (MTU size).
869  * Returns negative error codes if MAC type setup fails.
870  */
871 static int e1000_init_hw_struct(struct e1000_adapter *adapter,
872                                 struct e1000_hw *hw)
873 {
874         struct pci_dev *pdev = adapter->pdev;
875
876         /* PCI config space info */
877         hw->vendor_id = pdev->vendor;
878         hw->device_id = pdev->device;
879         hw->subsystem_vendor_id = pdev->subsystem_vendor;
880         hw->subsystem_id = pdev->subsystem_device;
881         hw->revision_id = pdev->revision;
882
883         pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
884
885         hw->max_frame_size = adapter->netdev->mtu +
886                              ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
887         hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
888
889         /* identify the MAC */
890         if (e1000_set_mac_type(hw)) {
891                 e_err(probe, "Unknown MAC Type\n");
892                 return -EIO;
893         }
894
895         switch (hw->mac_type) {
896         default:
897                 break;
898         case e1000_82541:
899         case e1000_82547:
900         case e1000_82541_rev_2:
901         case e1000_82547_rev_2:
902                 hw->phy_init_script = 1;
903                 break;
904         }
905
906         e1000_set_media_type(hw);
907         e1000_get_bus_info(hw);
908
909         hw->wait_autoneg_complete = false;
910         hw->tbi_compatibility_en = true;
911         hw->adaptive_ifs = true;
912
913         /* Copper options */
914
915         if (hw->media_type == e1000_media_type_copper) {
916                 hw->mdix = AUTO_ALL_MODES;
917                 hw->disable_polarity_correction = false;
918                 hw->master_slave = E1000_MASTER_SLAVE;
919         }
920
921         return 0;
922 }
923
924 /**
925  * e1000_probe - Device Initialization Routine
926  * @pdev: PCI device information struct
927  * @ent: entry in e1000_pci_tbl
928  *
929  * Returns 0 on success, negative on failure
930  *
931  * e1000_probe initializes an adapter identified by a pci_dev structure.
932  * The OS initialization, configuring of the adapter private structure,
933  * and a hardware reset occur.
934  **/
935 static int __devinit e1000_probe(struct pci_dev *pdev,
936                                  const struct pci_device_id *ent)
937 {
938         struct net_device *netdev;
939         struct e1000_adapter *adapter;
940         struct e1000_hw *hw;
941
942         static int cards_found = 0;
943         static int global_quad_port_a = 0; /* global ksp3 port a indication */
944         int i, err, pci_using_dac;
945         u16 eeprom_data = 0;
946         u16 tmp = 0;
947         u16 eeprom_apme_mask = E1000_EEPROM_APME;
948         int bars, need_ioport;
949
950         /* do not allocate ioport bars when not needed */
951         need_ioport = e1000_is_need_ioport(pdev);
952         if (need_ioport) {
953                 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
954                 err = pci_enable_device(pdev);
955         } else {
956                 bars = pci_select_bars(pdev, IORESOURCE_MEM);
957                 err = pci_enable_device_mem(pdev);
958         }
959         if (err)
960                 return err;
961
962         err = pci_request_selected_regions(pdev, bars, e1000_driver_name);
963         if (err)
964                 goto err_pci_reg;
965
966         pci_set_master(pdev);
967         err = pci_save_state(pdev);
968         if (err)
969                 goto err_alloc_etherdev;
970
971         err = -ENOMEM;
972         netdev = alloc_etherdev(sizeof(struct e1000_adapter));
973         if (!netdev)
974                 goto err_alloc_etherdev;
975
976         SET_NETDEV_DEV(netdev, &pdev->dev);
977
978         pci_set_drvdata(pdev, netdev);
979         adapter = netdev_priv(netdev);
980         adapter->netdev = netdev;
981         adapter->pdev = pdev;
982         adapter->msg_enable = (1 << debug) - 1;
983         adapter->bars = bars;
984         adapter->need_ioport = need_ioport;
985
986         hw = &adapter->hw;
987         hw->back = adapter;
988
989         err = -EIO;
990         hw->hw_addr = pci_ioremap_bar(pdev, BAR_0);
991         if (!hw->hw_addr)
992                 goto err_ioremap;
993
994         if (adapter->need_ioport) {
995                 for (i = BAR_1; i <= BAR_5; i++) {
996                         if (pci_resource_len(pdev, i) == 0)
997                                 continue;
998                         if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
999                                 hw->io_base = pci_resource_start(pdev, i);
1000                                 break;
1001                         }
1002                 }
1003         }
1004
1005         /* make ready for any if (hw->...) below */
1006         err = e1000_init_hw_struct(adapter, hw);
1007         if (err)
1008                 goto err_sw_init;
1009
1010         /*
1011          * there is a workaround being applied below that limits
1012          * 64-bit DMA addresses to 64-bit hardware.  There are some
1013          * 32-bit adapters that Tx hang when given 64-bit DMA addresses
1014          */
1015         pci_using_dac = 0;
1016         if ((hw->bus_type == e1000_bus_type_pcix) &&
1017             !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
1018                 /*
1019                  * according to DMA-API-HOWTO, coherent calls will always
1020                  * succeed if the set call did
1021                  */
1022                 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
1023                 pci_using_dac = 1;
1024         } else {
1025                 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
1026                 if (err) {
1027                         pr_err("No usable DMA config, aborting\n");
1028                         goto err_dma;
1029                 }
1030                 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
1031         }
1032
1033         netdev->netdev_ops = &e1000_netdev_ops;
1034         e1000_set_ethtool_ops(netdev);
1035         netdev->watchdog_timeo = 5 * HZ;
1036         netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
1037
1038         strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1039
1040         adapter->bd_number = cards_found;
1041
1042         /* setup the private structure */
1043
1044         err = e1000_sw_init(adapter);
1045         if (err)
1046                 goto err_sw_init;
1047
1048         err = -EIO;
1049         if (hw->mac_type == e1000_ce4100) {
1050                 hw->ce4100_gbe_mdio_base_virt =
1051                                         ioremap(pci_resource_start(pdev, BAR_1),
1052                                                 pci_resource_len(pdev, BAR_1));
1053
1054                 if (!hw->ce4100_gbe_mdio_base_virt)
1055                         goto err_mdio_ioremap;
1056         }
1057
1058         if (hw->mac_type >= e1000_82543) {
1059                 netdev->hw_features = NETIF_F_SG |
1060                                    NETIF_F_HW_CSUM |
1061                                    NETIF_F_HW_VLAN_RX;
1062                 netdev->features = NETIF_F_HW_VLAN_TX |
1063                                    NETIF_F_HW_VLAN_FILTER;
1064         }
1065
1066         if ((hw->mac_type >= e1000_82544) &&
1067            (hw->mac_type != e1000_82547))
1068                 netdev->hw_features |= NETIF_F_TSO;
1069
1070         netdev->features |= netdev->hw_features;
1071         netdev->hw_features |= NETIF_F_RXCSUM;
1072
1073         if (pci_using_dac) {
1074                 netdev->features |= NETIF_F_HIGHDMA;
1075                 netdev->vlan_features |= NETIF_F_HIGHDMA;
1076         }
1077
1078         netdev->vlan_features |= NETIF_F_TSO;
1079         netdev->vlan_features |= NETIF_F_HW_CSUM;
1080         netdev->vlan_features |= NETIF_F_SG;
1081
1082         netdev->priv_flags |= IFF_UNICAST_FLT;
1083
1084         adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
1085
1086         /* initialize eeprom parameters */
1087         if (e1000_init_eeprom_params(hw)) {
1088                 e_err(probe, "EEPROM initialization failed\n");
1089                 goto err_eeprom;
1090         }
1091
1092         /* before reading the EEPROM, reset the controller to
1093          * put the device in a known good starting state */
1094
1095         e1000_reset_hw(hw);
1096
1097         /* make sure the EEPROM is good */
1098         if (e1000_validate_eeprom_checksum(hw) < 0) {
1099                 e_err(probe, "The EEPROM Checksum Is Not Valid\n");
1100                 e1000_dump_eeprom(adapter);
1101                 /*
1102                  * set MAC address to all zeroes to invalidate and temporary
1103                  * disable this device for the user. This blocks regular
1104                  * traffic while still permitting ethtool ioctls from reaching
1105                  * the hardware as well as allowing the user to run the
1106                  * interface after manually setting a hw addr using
1107                  * `ip set address`
1108                  */
1109                 memset(hw->mac_addr, 0, netdev->addr_len);
1110         } else {
1111                 /* copy the MAC address out of the EEPROM */
1112                 if (e1000_read_mac_addr(hw))
1113                         e_err(probe, "EEPROM Read Error\n");
1114         }
1115         /* don't block initalization here due to bad MAC address */
1116         memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
1117         memcpy(netdev->perm_addr, hw->mac_addr, netdev->addr_len);
1118
1119         if (!is_valid_ether_addr(netdev->perm_addr))
1120                 e_err(probe, "Invalid MAC Address\n");
1121
1122
1123         INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog);
1124         INIT_DELAYED_WORK(&adapter->fifo_stall_task,
1125                           e1000_82547_tx_fifo_stall_task);
1126         INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task);
1127         INIT_WORK(&adapter->reset_task, e1000_reset_task);
1128
1129         e1000_check_options(adapter);
1130
1131         /* Initial Wake on LAN setting
1132          * If APM wake is enabled in the EEPROM,
1133          * enable the ACPI Magic Packet filter
1134          */
1135
1136         switch (hw->mac_type) {
1137         case e1000_82542_rev2_0:
1138         case e1000_82542_rev2_1:
1139         case e1000_82543:
1140                 break;
1141         case e1000_82544:
1142                 e1000_read_eeprom(hw,
1143                         EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
1144                 eeprom_apme_mask = E1000_EEPROM_82544_APM;
1145                 break;
1146         case e1000_82546:
1147         case e1000_82546_rev_3:
1148                 if (er32(STATUS) & E1000_STATUS_FUNC_1){
1149                         e1000_read_eeprom(hw,
1150                                 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1151                         break;
1152                 }
1153                 /* Fall Through */
1154         default:
1155                 e1000_read_eeprom(hw,
1156                         EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1157                 break;
1158         }
1159         if (eeprom_data & eeprom_apme_mask)
1160                 adapter->eeprom_wol |= E1000_WUFC_MAG;
1161
1162         /* now that we have the eeprom settings, apply the special cases
1163          * where the eeprom may be wrong or the board simply won't support
1164          * wake on lan on a particular port */
1165         switch (pdev->device) {
1166         case E1000_DEV_ID_82546GB_PCIE:
1167                 adapter->eeprom_wol = 0;
1168                 break;
1169         case E1000_DEV_ID_82546EB_FIBER:
1170         case E1000_DEV_ID_82546GB_FIBER:
1171                 /* Wake events only supported on port A for dual fiber
1172                  * regardless of eeprom setting */
1173                 if (er32(STATUS) & E1000_STATUS_FUNC_1)
1174                         adapter->eeprom_wol = 0;
1175                 break;
1176         case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1177                 /* if quad port adapter, disable WoL on all but port A */
1178                 if (global_quad_port_a != 0)
1179                         adapter->eeprom_wol = 0;
1180                 else
1181                         adapter->quad_port_a = true;
1182                 /* Reset for multiple quad port adapters */
1183                 if (++global_quad_port_a == 4)
1184                         global_quad_port_a = 0;
1185                 break;
1186         }
1187
1188         /* initialize the wol settings based on the eeprom settings */
1189         adapter->wol = adapter->eeprom_wol;
1190         device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1191
1192         /* Auto detect PHY address */
1193         if (hw->mac_type == e1000_ce4100) {
1194                 for (i = 0; i < 32; i++) {
1195                         hw->phy_addr = i;
1196                         e1000_read_phy_reg(hw, PHY_ID2, &tmp);
1197                         if (tmp == 0 || tmp == 0xFF) {
1198                                 if (i == 31)
1199                                         goto err_eeprom;
1200                                 continue;
1201                         } else
1202                                 break;
1203                 }
1204         }
1205
1206         /* reset the hardware with the new settings */
1207         e1000_reset(adapter);
1208
1209         strcpy(netdev->name, "eth%d");
1210         err = register_netdev(netdev);
1211         if (err)
1212                 goto err_register;
1213
1214         e1000_vlan_mode(netdev, netdev->features);
1215
1216         /* print bus type/speed/width info */
1217         e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n",
1218                ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
1219                ((hw->bus_speed == e1000_bus_speed_133) ? 133 :
1220                 (hw->bus_speed == e1000_bus_speed_120) ? 120 :
1221                 (hw->bus_speed == e1000_bus_speed_100) ? 100 :
1222                 (hw->bus_speed == e1000_bus_speed_66) ? 66 : 33),
1223                ((hw->bus_width == e1000_bus_width_64) ? 64 : 32),
1224                netdev->dev_addr);
1225
1226         /* carrier off reporting is important to ethtool even BEFORE open */
1227         netif_carrier_off(netdev);
1228
1229         e_info(probe, "Intel(R) PRO/1000 Network Connection\n");
1230
1231         cards_found++;
1232         return 0;
1233
1234 err_register:
1235 err_eeprom:
1236         e1000_phy_hw_reset(hw);
1237
1238         if (hw->flash_address)
1239                 iounmap(hw->flash_address);
1240         kfree(adapter->tx_ring);
1241         kfree(adapter->rx_ring);
1242 err_dma:
1243 err_sw_init:
1244 err_mdio_ioremap:
1245         iounmap(hw->ce4100_gbe_mdio_base_virt);
1246         iounmap(hw->hw_addr);
1247 err_ioremap:
1248         free_netdev(netdev);
1249 err_alloc_etherdev:
1250         pci_release_selected_regions(pdev, bars);
1251 err_pci_reg:
1252         pci_disable_device(pdev);
1253         return err;
1254 }
1255
1256 /**
1257  * e1000_remove - Device Removal Routine
1258  * @pdev: PCI device information struct
1259  *
1260  * e1000_remove is called by the PCI subsystem to alert the driver
1261  * that it should release a PCI device.  The could be caused by a
1262  * Hot-Plug event, or because the driver is going to be removed from
1263  * memory.
1264  **/
1265
1266 static void __devexit e1000_remove(struct pci_dev *pdev)
1267 {
1268         struct net_device *netdev = pci_get_drvdata(pdev);
1269         struct e1000_adapter *adapter = netdev_priv(netdev);
1270         struct e1000_hw *hw = &adapter->hw;
1271
1272         e1000_down_and_stop(adapter);
1273         e1000_release_manageability(adapter);
1274
1275         unregister_netdev(netdev);
1276
1277         e1000_phy_hw_reset(hw);
1278
1279         kfree(adapter->tx_ring);
1280         kfree(adapter->rx_ring);
1281
1282         if (hw->mac_type == e1000_ce4100)
1283                 iounmap(hw->ce4100_gbe_mdio_base_virt);
1284         iounmap(hw->hw_addr);
1285         if (hw->flash_address)
1286                 iounmap(hw->flash_address);
1287         pci_release_selected_regions(pdev, adapter->bars);
1288
1289         free_netdev(netdev);
1290
1291         pci_disable_device(pdev);
1292 }
1293
1294 /**
1295  * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
1296  * @adapter: board private structure to initialize
1297  *
1298  * e1000_sw_init initializes the Adapter private data structure.
1299  * e1000_init_hw_struct MUST be called before this function
1300  **/
1301
1302 static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
1303 {
1304         adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1305
1306         adapter->num_tx_queues = 1;
1307         adapter->num_rx_queues = 1;
1308
1309         if (e1000_alloc_queues(adapter)) {
1310                 e_err(probe, "Unable to allocate memory for queues\n");
1311                 return -ENOMEM;
1312         }
1313
1314         /* Explicitly disable IRQ since the NIC can be in any state. */
1315         e1000_irq_disable(adapter);
1316
1317         spin_lock_init(&adapter->stats_lock);
1318         mutex_init(&adapter->mutex);
1319
1320         set_bit(__E1000_DOWN, &adapter->flags);
1321
1322         return 0;
1323 }
1324
1325 /**
1326  * e1000_alloc_queues - Allocate memory for all rings
1327  * @adapter: board private structure to initialize
1328  *
1329  * We allocate one ring per queue at run-time since we don't know the
1330  * number of queues at compile-time.
1331  **/
1332
1333 static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter)
1334 {
1335         adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1336                                    sizeof(struct e1000_tx_ring), GFP_KERNEL);
1337         if (!adapter->tx_ring)
1338                 return -ENOMEM;
1339
1340         adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1341                                    sizeof(struct e1000_rx_ring), GFP_KERNEL);
1342         if (!adapter->rx_ring) {
1343                 kfree(adapter->tx_ring);
1344                 return -ENOMEM;
1345         }
1346
1347         return E1000_SUCCESS;
1348 }
1349
1350 /**
1351  * e1000_open - Called when a network interface is made active
1352  * @netdev: network interface device structure
1353  *
1354  * Returns 0 on success, negative value on failure
1355  *
1356  * The open entry point is called when a network interface is made
1357  * active by the system (IFF_UP).  At this point all resources needed
1358  * for transmit and receive operations are allocated, the interrupt
1359  * handler is registered with the OS, the watchdog task is started,
1360  * and the stack is notified that the interface is ready.
1361  **/
1362
1363 static int e1000_open(struct net_device *netdev)
1364 {
1365         struct e1000_adapter *adapter = netdev_priv(netdev);
1366         struct e1000_hw *hw = &adapter->hw;
1367         int err;
1368
1369         /* disallow open during test */
1370         if (test_bit(__E1000_TESTING, &adapter->flags))
1371                 return -EBUSY;
1372
1373         netif_carrier_off(netdev);
1374
1375         /* allocate transmit descriptors */
1376         err = e1000_setup_all_tx_resources(adapter);
1377         if (err)
1378                 goto err_setup_tx;
1379
1380         /* allocate receive descriptors */
1381         err = e1000_setup_all_rx_resources(adapter);
1382         if (err)
1383                 goto err_setup_rx;
1384
1385         e1000_power_up_phy(adapter);
1386
1387         adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
1388         if ((hw->mng_cookie.status &
1389                           E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1390                 e1000_update_mng_vlan(adapter);
1391         }
1392
1393         /* before we allocate an interrupt, we must be ready to handle it.
1394          * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1395          * as soon as we call pci_request_irq, so we have to setup our
1396          * clean_rx handler before we do so.  */
1397         e1000_configure(adapter);
1398
1399         err = e1000_request_irq(adapter);
1400         if (err)
1401                 goto err_req_irq;
1402
1403         /* From here on the code is the same as e1000_up() */
1404         clear_bit(__E1000_DOWN, &adapter->flags);
1405
1406         napi_enable(&adapter->napi);
1407
1408         e1000_irq_enable(adapter);
1409
1410         netif_start_queue(netdev);
1411
1412         /* fire a link status change interrupt to start the watchdog */
1413         ew32(ICS, E1000_ICS_LSC);
1414
1415         return E1000_SUCCESS;
1416
1417 err_req_irq:
1418         e1000_power_down_phy(adapter);
1419         e1000_free_all_rx_resources(adapter);
1420 err_setup_rx:
1421         e1000_free_all_tx_resources(adapter);
1422 err_setup_tx:
1423         e1000_reset(adapter);
1424
1425         return err;
1426 }
1427
1428 /**
1429  * e1000_close - Disables a network interface
1430  * @netdev: network interface device structure
1431  *
1432  * Returns 0, this is not allowed to fail
1433  *
1434  * The close entry point is called when an interface is de-activated
1435  * by the OS.  The hardware is still under the drivers control, but
1436  * needs to be disabled.  A global MAC reset is issued to stop the
1437  * hardware, and all transmit and receive resources are freed.
1438  **/
1439
1440 static int e1000_close(struct net_device *netdev)
1441 {
1442         struct e1000_adapter *adapter = netdev_priv(netdev);
1443         struct e1000_hw *hw = &adapter->hw;
1444
1445         WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
1446         e1000_down(adapter);
1447         e1000_power_down_phy(adapter);
1448         e1000_free_irq(adapter);
1449
1450         e1000_free_all_tx_resources(adapter);
1451         e1000_free_all_rx_resources(adapter);
1452
1453         /* kill manageability vlan ID if supported, but not if a vlan with
1454          * the same ID is registered on the host OS (let 8021q kill it) */
1455         if ((hw->mng_cookie.status &
1456                           E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
1457              !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
1458                 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
1459         }
1460
1461         return 0;
1462 }
1463
1464 /**
1465  * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
1466  * @adapter: address of board private structure
1467  * @start: address of beginning of memory
1468  * @len: length of memory
1469  **/
1470 static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
1471                                   unsigned long len)
1472 {
1473         struct e1000_hw *hw = &adapter->hw;
1474         unsigned long begin = (unsigned long)start;
1475         unsigned long end = begin + len;
1476
1477         /* First rev 82545 and 82546 need to not allow any memory
1478          * write location to cross 64k boundary due to errata 23 */
1479         if (hw->mac_type == e1000_82545 ||
1480             hw->mac_type == e1000_ce4100 ||
1481             hw->mac_type == e1000_82546) {
1482                 return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
1483         }
1484
1485         return true;
1486 }
1487
1488 /**
1489  * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
1490  * @adapter: board private structure
1491  * @txdr:    tx descriptor ring (for a specific queue) to setup
1492  *
1493  * Return 0 on success, negative on failure
1494  **/
1495
1496 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
1497                                     struct e1000_tx_ring *txdr)
1498 {
1499         struct pci_dev *pdev = adapter->pdev;
1500         int size;
1501
1502         size = sizeof(struct e1000_buffer) * txdr->count;
1503         txdr->buffer_info = vzalloc(size);
1504         if (!txdr->buffer_info) {
1505                 e_err(probe, "Unable to allocate memory for the Tx descriptor "
1506                       "ring\n");
1507                 return -ENOMEM;
1508         }
1509
1510         /* round up to nearest 4K */
1511
1512         txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
1513         txdr->size = ALIGN(txdr->size, 4096);
1514
1515         txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
1516                                         GFP_KERNEL);
1517         if (!txdr->desc) {
1518 setup_tx_desc_die:
1519                 vfree(txdr->buffer_info);
1520                 e_err(probe, "Unable to allocate memory for the Tx descriptor "
1521                       "ring\n");
1522                 return -ENOMEM;
1523         }
1524
1525         /* Fix for errata 23, can't cross 64kB boundary */
1526         if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1527                 void *olddesc = txdr->desc;
1528                 dma_addr_t olddma = txdr->dma;
1529                 e_err(tx_err, "txdr align check failed: %u bytes at %p\n",
1530                       txdr->size, txdr->desc);
1531                 /* Try again, without freeing the previous */
1532                 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size,
1533                                                 &txdr->dma, GFP_KERNEL);
1534                 /* Failed allocation, critical failure */
1535                 if (!txdr->desc) {
1536                         dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1537                                           olddma);
1538                         goto setup_tx_desc_die;
1539                 }
1540
1541                 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1542                         /* give up */
1543                         dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
1544                                           txdr->dma);
1545                         dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1546                                           olddma);
1547                         e_err(probe, "Unable to allocate aligned memory "
1548                               "for the transmit descriptor ring\n");
1549                         vfree(txdr->buffer_info);
1550                         return -ENOMEM;
1551                 } else {
1552                         /* Free old allocation, new allocation was successful */
1553                         dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1554                                           olddma);
1555                 }
1556         }
1557         memset(txdr->desc, 0, txdr->size);
1558
1559         txdr->next_to_use = 0;
1560         txdr->next_to_clean = 0;
1561
1562         return 0;
1563 }
1564
1565 /**
1566  * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
1567  *                                (Descriptors) for all queues
1568  * @adapter: board private structure
1569  *
1570  * Return 0 on success, negative on failure
1571  **/
1572
1573 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
1574 {
1575         int i, err = 0;
1576
1577         for (i = 0; i < adapter->num_tx_queues; i++) {
1578                 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1579                 if (err) {
1580                         e_err(probe, "Allocation for Tx Queue %u failed\n", i);
1581                         for (i-- ; i >= 0; i--)
1582                                 e1000_free_tx_resources(adapter,
1583                                                         &adapter->tx_ring[i]);
1584                         break;
1585                 }
1586         }
1587
1588         return err;
1589 }
1590
1591 /**
1592  * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
1593  * @adapter: board private structure
1594  *
1595  * Configure the Tx unit of the MAC after a reset.
1596  **/
1597
1598 static void e1000_configure_tx(struct e1000_adapter *adapter)
1599 {
1600         u64 tdba;
1601         struct e1000_hw *hw = &adapter->hw;
1602         u32 tdlen, tctl, tipg;
1603         u32 ipgr1, ipgr2;
1604
1605         /* Setup the HW Tx Head and Tail descriptor pointers */
1606
1607         switch (adapter->num_tx_queues) {
1608         case 1:
1609         default:
1610                 tdba = adapter->tx_ring[0].dma;
1611                 tdlen = adapter->tx_ring[0].count *
1612                         sizeof(struct e1000_tx_desc);
1613                 ew32(TDLEN, tdlen);
1614                 ew32(TDBAH, (tdba >> 32));
1615                 ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
1616                 ew32(TDT, 0);
1617                 ew32(TDH, 0);
1618                 adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? E1000_TDH : E1000_82542_TDH);
1619                 adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? E1000_TDT : E1000_82542_TDT);
1620                 break;
1621         }
1622
1623         /* Set the default values for the Tx Inter Packet Gap timer */
1624         if ((hw->media_type == e1000_media_type_fiber ||
1625              hw->media_type == e1000_media_type_internal_serdes))
1626                 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1627         else
1628                 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1629
1630         switch (hw->mac_type) {
1631         case e1000_82542_rev2_0:
1632         case e1000_82542_rev2_1:
1633                 tipg = DEFAULT_82542_TIPG_IPGT;
1634                 ipgr1 = DEFAULT_82542_TIPG_IPGR1;
1635                 ipgr2 = DEFAULT_82542_TIPG_IPGR2;
1636                 break;
1637         default:
1638                 ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1639                 ipgr2 = DEFAULT_82543_TIPG_IPGR2;
1640                 break;
1641         }
1642         tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
1643         tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
1644         ew32(TIPG, tipg);
1645
1646         /* Set the Tx Interrupt Delay register */
1647
1648         ew32(TIDV, adapter->tx_int_delay);
1649         if (hw->mac_type >= e1000_82540)
1650                 ew32(TADV, adapter->tx_abs_int_delay);
1651
1652         /* Program the Transmit Control Register */
1653
1654         tctl = er32(TCTL);
1655         tctl &= ~E1000_TCTL_CT;
1656         tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
1657                 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1658
1659         e1000_config_collision_dist(hw);
1660
1661         /* Setup Transmit Descriptor Settings for eop descriptor */
1662         adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
1663
1664         /* only set IDE if we are delaying interrupts using the timers */
1665         if (adapter->tx_int_delay)
1666                 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
1667
1668         if (hw->mac_type < e1000_82543)
1669                 adapter->txd_cmd |= E1000_TXD_CMD_RPS;
1670         else
1671                 adapter->txd_cmd |= E1000_TXD_CMD_RS;
1672
1673         /* Cache if we're 82544 running in PCI-X because we'll
1674          * need this to apply a workaround later in the send path. */
1675         if (hw->mac_type == e1000_82544 &&
1676             hw->bus_type == e1000_bus_type_pcix)
1677                 adapter->pcix_82544 = true;
1678
1679         ew32(TCTL, tctl);
1680
1681 }
1682
1683 /**
1684  * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
1685  * @adapter: board private structure
1686  * @rxdr:    rx descriptor ring (for a specific queue) to setup
1687  *
1688  * Returns 0 on success, negative on failure
1689  **/
1690
1691 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1692                                     struct e1000_rx_ring *rxdr)
1693 {
1694         struct pci_dev *pdev = adapter->pdev;
1695         int size, desc_len;
1696
1697         size = sizeof(struct e1000_buffer) * rxdr->count;
1698         rxdr->buffer_info = vzalloc(size);
1699         if (!rxdr->buffer_info) {
1700                 e_err(probe, "Unable to allocate memory for the Rx descriptor "
1701                       "ring\n");
1702                 return -ENOMEM;
1703         }
1704
1705         desc_len = sizeof(struct e1000_rx_desc);
1706
1707         /* Round up to nearest 4K */
1708
1709         rxdr->size = rxdr->count * desc_len;
1710         rxdr->size = ALIGN(rxdr->size, 4096);
1711
1712         rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
1713                                         GFP_KERNEL);
1714
1715         if (!rxdr->desc) {
1716                 e_err(probe, "Unable to allocate memory for the Rx descriptor "
1717                       "ring\n");
1718 setup_rx_desc_die:
1719                 vfree(rxdr->buffer_info);
1720                 return -ENOMEM;
1721         }
1722
1723         /* Fix for errata 23, can't cross 64kB boundary */
1724         if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1725                 void *olddesc = rxdr->desc;
1726                 dma_addr_t olddma = rxdr->dma;
1727                 e_err(rx_err, "rxdr align check failed: %u bytes at %p\n",
1728                       rxdr->size, rxdr->desc);
1729                 /* Try again, without freeing the previous */
1730                 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size,
1731                                                 &rxdr->dma, GFP_KERNEL);
1732                 /* Failed allocation, critical failure */
1733                 if (!rxdr->desc) {
1734                         dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1735                                           olddma);
1736                         e_err(probe, "Unable to allocate memory for the Rx "
1737                               "descriptor ring\n");
1738                         goto setup_rx_desc_die;
1739                 }
1740
1741                 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1742                         /* give up */
1743                         dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
1744                                           rxdr->dma);
1745                         dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1746                                           olddma);
1747                         e_err(probe, "Unable to allocate aligned memory for "
1748                               "the Rx descriptor ring\n");
1749                         goto setup_rx_desc_die;
1750                 } else {
1751                         /* Free old allocation, new allocation was successful */
1752                         dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1753                                           olddma);
1754                 }
1755         }
1756         memset(rxdr->desc, 0, rxdr->size);
1757
1758         rxdr->next_to_clean = 0;
1759         rxdr->next_to_use = 0;
1760         rxdr->rx_skb_top = NULL;
1761
1762         return 0;
1763 }
1764
1765 /**
1766  * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
1767  *                                (Descriptors) for all queues
1768  * @adapter: board private structure
1769  *
1770  * Return 0 on success, negative on failure
1771  **/
1772
1773 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
1774 {
1775         int i, err = 0;
1776
1777         for (i = 0; i < adapter->num_rx_queues; i++) {
1778                 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1779                 if (err) {
1780                         e_err(probe, "Allocation for Rx Queue %u failed\n", i);
1781                         for (i-- ; i >= 0; i--)
1782                                 e1000_free_rx_resources(adapter,
1783                                                         &adapter->rx_ring[i]);
1784                         break;
1785                 }
1786         }
1787
1788         return err;
1789 }
1790
1791 /**
1792  * e1000_setup_rctl - configure the receive control registers
1793  * @adapter: Board private structure
1794  **/
1795 static void e1000_setup_rctl(struct e1000_adapter *adapter)
1796 {
1797         struct e1000_hw *hw = &adapter->hw;
1798         u32 rctl;
1799
1800         rctl = er32(RCTL);
1801
1802         rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1803
1804         rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
1805                 E1000_RCTL_RDMTS_HALF |
1806                 (hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
1807
1808         if (hw->tbi_compatibility_on == 1)
1809                 rctl |= E1000_RCTL_SBP;
1810         else
1811                 rctl &= ~E1000_RCTL_SBP;
1812
1813         if (adapter->netdev->mtu <= ETH_DATA_LEN)
1814                 rctl &= ~E1000_RCTL_LPE;
1815         else
1816                 rctl |= E1000_RCTL_LPE;
1817
1818         /* Setup buffer sizes */
1819         rctl &= ~E1000_RCTL_SZ_4096;
1820         rctl |= E1000_RCTL_BSEX;
1821         switch (adapter->rx_buffer_len) {
1822                 case E1000_RXBUFFER_2048:
1823                 default:
1824                         rctl |= E1000_RCTL_SZ_2048;
1825                         rctl &= ~E1000_RCTL_BSEX;
1826                         break;
1827                 case E1000_RXBUFFER_4096:
1828                         rctl |= E1000_RCTL_SZ_4096;
1829                         break;
1830                 case E1000_RXBUFFER_8192:
1831                         rctl |= E1000_RCTL_SZ_8192;
1832                         break;
1833                 case E1000_RXBUFFER_16384:
1834                         rctl |= E1000_RCTL_SZ_16384;
1835                         break;
1836         }
1837
1838         ew32(RCTL, rctl);
1839 }
1840
1841 /**
1842  * e1000_configure_rx - Configure 8254x Receive Unit after Reset
1843  * @adapter: board private structure
1844  *
1845  * Configure the Rx unit of the MAC after a reset.
1846  **/
1847
1848 static void e1000_configure_rx(struct e1000_adapter *adapter)
1849 {
1850         u64 rdba;
1851         struct e1000_hw *hw = &adapter->hw;
1852         u32 rdlen, rctl, rxcsum;
1853
1854         if (adapter->netdev->mtu > ETH_DATA_LEN) {
1855                 rdlen = adapter->rx_ring[0].count *
1856                         sizeof(struct e1000_rx_desc);
1857                 adapter->clean_rx = e1000_clean_jumbo_rx_irq;
1858                 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
1859         } else {
1860                 rdlen = adapter->rx_ring[0].count *
1861                         sizeof(struct e1000_rx_desc);
1862                 adapter->clean_rx = e1000_clean_rx_irq;
1863                 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
1864         }
1865
1866         /* disable receives while setting up the descriptors */
1867         rctl = er32(RCTL);
1868         ew32(RCTL, rctl & ~E1000_RCTL_EN);
1869
1870         /* set the Receive Delay Timer Register */
1871         ew32(RDTR, adapter->rx_int_delay);
1872
1873         if (hw->mac_type >= e1000_82540) {
1874                 ew32(RADV, adapter->rx_abs_int_delay);
1875                 if (adapter->itr_setting != 0)
1876                         ew32(ITR, 1000000000 / (adapter->itr * 256));
1877         }
1878
1879         /* Setup the HW Rx Head and Tail Descriptor Pointers and
1880          * the Base and Length of the Rx Descriptor Ring */
1881         switch (adapter->num_rx_queues) {
1882         case 1:
1883         default:
1884                 rdba = adapter->rx_ring[0].dma;
1885                 ew32(RDLEN, rdlen);
1886                 ew32(RDBAH, (rdba >> 32));
1887                 ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
1888                 ew32(RDT, 0);
1889                 ew32(RDH, 0);
1890                 adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? E1000_RDH : E1000_82542_RDH);
1891                 adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? E1000_RDT : E1000_82542_RDT);
1892                 break;
1893         }
1894
1895         /* Enable 82543 Receive Checksum Offload for TCP and UDP */
1896         if (hw->mac_type >= e1000_82543) {
1897                 rxcsum = er32(RXCSUM);
1898                 if (adapter->rx_csum)
1899                         rxcsum |= E1000_RXCSUM_TUOFL;
1900                 else
1901                         /* don't need to clear IPPCSE as it defaults to 0 */
1902                         rxcsum &= ~E1000_RXCSUM_TUOFL;
1903                 ew32(RXCSUM, rxcsum);
1904         }
1905
1906         /* Enable Receives */
1907         ew32(RCTL, rctl | E1000_RCTL_EN);
1908 }
1909
1910 /**
1911  * e1000_free_tx_resources - Free Tx Resources per Queue
1912  * @adapter: board private structure
1913  * @tx_ring: Tx descriptor ring for a specific queue
1914  *
1915  * Free all transmit software resources
1916  **/
1917
1918 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
1919                                     struct e1000_tx_ring *tx_ring)
1920 {
1921         struct pci_dev *pdev = adapter->pdev;
1922
1923         e1000_clean_tx_ring(adapter, tx_ring);
1924
1925         vfree(tx_ring->buffer_info);
1926         tx_ring->buffer_info = NULL;
1927
1928         dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1929                           tx_ring->dma);
1930
1931         tx_ring->desc = NULL;
1932 }
1933
1934 /**
1935  * e1000_free_all_tx_resources - Free Tx Resources for All Queues
1936  * @adapter: board private structure
1937  *
1938  * Free all transmit software resources
1939  **/
1940
1941 void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
1942 {
1943         int i;
1944
1945         for (i = 0; i < adapter->num_tx_queues; i++)
1946                 e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
1947 }
1948
1949 static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1950                                              struct e1000_buffer *buffer_info)
1951 {
1952         if (buffer_info->dma) {
1953                 if (buffer_info->mapped_as_page)
1954                         dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
1955                                        buffer_info->length, DMA_TO_DEVICE);
1956                 else
1957                         dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1958                                          buffer_info->length,
1959                                          DMA_TO_DEVICE);
1960                 buffer_info->dma = 0;
1961         }
1962         if (buffer_info->skb) {
1963                 dev_kfree_skb_any(buffer_info->skb);
1964                 buffer_info->skb = NULL;
1965         }
1966         buffer_info->time_stamp = 0;
1967         /* buffer_info must be completely set up in the transmit path */
1968 }
1969
1970 /**
1971  * e1000_clean_tx_ring - Free Tx Buffers
1972  * @adapter: board private structure
1973  * @tx_ring: ring to be cleaned
1974  **/
1975
1976 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
1977                                 struct e1000_tx_ring *tx_ring)
1978 {
1979         struct e1000_hw *hw = &adapter->hw;
1980         struct e1000_buffer *buffer_info;
1981         unsigned long size;
1982         unsigned int i;
1983
1984         /* Free all the Tx ring sk_buffs */
1985
1986         for (i = 0; i < tx_ring->count; i++) {
1987                 buffer_info = &tx_ring->buffer_info[i];
1988                 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
1989         }
1990
1991         size = sizeof(struct e1000_buffer) * tx_ring->count;
1992         memset(tx_ring->buffer_info, 0, size);
1993
1994         /* Zero out the descriptor ring */
1995
1996         memset(tx_ring->desc, 0, tx_ring->size);
1997
1998         tx_ring->next_to_use = 0;
1999         tx_ring->next_to_clean = 0;
2000         tx_ring->last_tx_tso = false;
2001
2002         writel(0, hw->hw_addr + tx_ring->tdh);
2003         writel(0, hw->hw_addr + tx_ring->tdt);
2004 }
2005
2006 /**
2007  * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
2008  * @adapter: board private structure
2009  **/
2010
2011 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
2012 {
2013         int i;
2014
2015         for (i = 0; i < adapter->num_tx_queues; i++)
2016                 e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
2017 }
2018
2019 /**
2020  * e1000_free_rx_resources - Free Rx Resources
2021  * @adapter: board private structure
2022  * @rx_ring: ring to clean the resources from
2023  *
2024  * Free all receive software resources
2025  **/
2026
2027 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
2028                                     struct e1000_rx_ring *rx_ring)
2029 {
2030         struct pci_dev *pdev = adapter->pdev;
2031
2032         e1000_clean_rx_ring(adapter, rx_ring);
2033
2034         vfree(rx_ring->buffer_info);
2035         rx_ring->buffer_info = NULL;
2036
2037         dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2038                           rx_ring->dma);
2039
2040         rx_ring->desc = NULL;
2041 }
2042
2043 /**
2044  * e1000_free_all_rx_resources - Free Rx Resources for All Queues
2045  * @adapter: board private structure
2046  *
2047  * Free all receive software resources
2048  **/
2049
2050 void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
2051 {
2052         int i;
2053
2054         for (i = 0; i < adapter->num_rx_queues; i++)
2055                 e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
2056 }
2057
2058 /**
2059  * e1000_clean_rx_ring - Free Rx Buffers per Queue
2060  * @adapter: board private structure
2061  * @rx_ring: ring to free buffers from
2062  **/
2063
2064 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
2065                                 struct e1000_rx_ring *rx_ring)
2066 {
2067         struct e1000_hw *hw = &adapter->hw;
2068         struct e1000_buffer *buffer_info;
2069         struct pci_dev *pdev = adapter->pdev;
2070         unsigned long size;
2071         unsigned int i;
2072
2073         /* Free all the Rx ring sk_buffs */
2074         for (i = 0; i < rx_ring->count; i++) {
2075                 buffer_info = &rx_ring->buffer_info[i];
2076                 if (buffer_info->dma &&
2077                     adapter->clean_rx == e1000_clean_rx_irq) {
2078                         dma_unmap_single(&pdev->dev, buffer_info->dma,
2079                                          buffer_info->length,
2080                                          DMA_FROM_DEVICE);
2081                 } else if (buffer_info->dma &&
2082                            adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
2083                         dma_unmap_page(&pdev->dev, buffer_info->dma,
2084                                        buffer_info->length,
2085                                        DMA_FROM_DEVICE);
2086                 }
2087
2088                 buffer_info->dma = 0;
2089                 if (buffer_info->page) {
2090                         put_page(buffer_info->page);
2091                         buffer_info->page = NULL;
2092                 }
2093                 if (buffer_info->skb) {
2094                         dev_kfree_skb(buffer_info->skb);
2095                         buffer_info->skb = NULL;
2096                 }
2097         }
2098
2099         /* there also may be some cached data from a chained receive */
2100         if (rx_ring->rx_skb_top) {
2101                 dev_kfree_skb(rx_ring->rx_skb_top);
2102                 rx_ring->rx_skb_top = NULL;
2103         }
2104
2105         size = sizeof(struct e1000_buffer) * rx_ring->count;
2106         memset(rx_ring->buffer_info, 0, size);
2107
2108         /* Zero out the descriptor ring */
2109         memset(rx_ring->desc, 0, rx_ring->size);
2110
2111         rx_ring->next_to_clean = 0;
2112         rx_ring->next_to_use = 0;
2113
2114         writel(0, hw->hw_addr + rx_ring->rdh);
2115         writel(0, hw->hw_addr + rx_ring->rdt);
2116 }
2117
2118 /**
2119  * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
2120  * @adapter: board private structure
2121  **/
2122
2123 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
2124 {
2125         int i;
2126
2127         for (i = 0; i < adapter->num_rx_queues; i++)
2128                 e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
2129 }
2130
2131 /* The 82542 2.0 (revision 2) needs to have the receive unit in reset
2132  * and memory write and invalidate disabled for certain operations
2133  */
2134 static void e1000_enter_82542_rst(struct e1000_adapter *adapter)
2135 {
2136         struct e1000_hw *hw = &adapter->hw;
2137         struct net_device *netdev = adapter->netdev;
2138         u32 rctl;
2139
2140         e1000_pci_clear_mwi(hw);
2141
2142         rctl = er32(RCTL);
2143         rctl |= E1000_RCTL_RST;
2144         ew32(RCTL, rctl);
2145         E1000_WRITE_FLUSH();
2146         mdelay(5);
2147
2148         if (netif_running(netdev))
2149                 e1000_clean_all_rx_rings(adapter);
2150 }
2151
2152 static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
2153 {
2154         struct e1000_hw *hw = &adapter->hw;
2155         struct net_device *netdev = adapter->netdev;
2156         u32 rctl;
2157
2158         rctl = er32(RCTL);
2159         rctl &= ~E1000_RCTL_RST;
2160         ew32(RCTL, rctl);
2161         E1000_WRITE_FLUSH();
2162         mdelay(5);
2163
2164         if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
2165                 e1000_pci_set_mwi(hw);
2166
2167         if (netif_running(netdev)) {
2168                 /* No need to loop, because 82542 supports only 1 queue */
2169                 struct e1000_rx_ring *ring = &adapter->rx_ring[0];
2170                 e1000_configure_rx(adapter);
2171                 adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
2172         }
2173 }
2174
2175 /**
2176  * e1000_set_mac - Change the Ethernet Address of the NIC
2177  * @netdev: network interface device structure
2178  * @p: pointer to an address structure
2179  *
2180  * Returns 0 on success, negative on failure
2181  **/
2182
2183 static int e1000_set_mac(struct net_device *netdev, void *p)
2184 {
2185         struct e1000_adapter *adapter = netdev_priv(netdev);
2186         struct e1000_hw *hw = &adapter->hw;
2187         struct sockaddr *addr = p;
2188
2189         if (!is_valid_ether_addr(addr->sa_data))
2190                 return -EADDRNOTAVAIL;
2191
2192         /* 82542 2.0 needs to be in reset to write receive address registers */
2193
2194         if (hw->mac_type == e1000_82542_rev2_0)
2195                 e1000_enter_82542_rst(adapter);
2196
2197         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2198         memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
2199
2200         e1000_rar_set(hw, hw->mac_addr, 0);
2201
2202         if (hw->mac_type == e1000_82542_rev2_0)
2203                 e1000_leave_82542_rst(adapter);
2204
2205         return 0;
2206 }
2207
2208 /**
2209  * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
2210  * @netdev: network interface device structure
2211  *
2212  * The set_rx_mode entry point is called whenever the unicast or multicast
2213  * address lists or the network interface flags are updated. This routine is
2214  * responsible for configuring the hardware for proper unicast, multicast,
2215  * promiscuous mode, and all-multi behavior.
2216  **/
2217
2218 static void e1000_set_rx_mode(struct net_device *netdev)
2219 {
2220         struct e1000_adapter *adapter = netdev_priv(netdev);
2221         struct e1000_hw *hw = &adapter->hw;
2222         struct netdev_hw_addr *ha;
2223         bool use_uc = false;
2224         u32 rctl;
2225         u32 hash_value;
2226         int i, rar_entries = E1000_RAR_ENTRIES;
2227         int mta_reg_count = E1000_NUM_MTA_REGISTERS;
2228         u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
2229
2230         if (!mcarray) {
2231                 e_err(probe, "memory allocation failed\n");
2232                 return;
2233         }
2234
2235         /* Check for Promiscuous and All Multicast modes */
2236
2237         rctl = er32(RCTL);
2238
2239         if (netdev->flags & IFF_PROMISC) {
2240                 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2241                 rctl &= ~E1000_RCTL_VFE;
2242         } else {
2243                 if (netdev->flags & IFF_ALLMULTI)
2244                         rctl |= E1000_RCTL_MPE;
2245                 else
2246                         rctl &= ~E1000_RCTL_MPE;
2247                 /* Enable VLAN filter if there is a VLAN */
2248                 if (e1000_vlan_used(adapter))
2249                         rctl |= E1000_RCTL_VFE;
2250         }
2251
2252         if (netdev_uc_count(netdev) > rar_entries - 1) {
2253                 rctl |= E1000_RCTL_UPE;
2254         } else if (!(netdev->flags & IFF_PROMISC)) {
2255                 rctl &= ~E1000_RCTL_UPE;
2256                 use_uc = true;
2257         }
2258
2259         ew32(RCTL, rctl);
2260
2261         /* 82542 2.0 needs to be in reset to write receive address registers */
2262
2263         if (hw->mac_type == e1000_82542_rev2_0)
2264                 e1000_enter_82542_rst(adapter);
2265
2266         /* load the first 14 addresses into the exact filters 1-14. Unicast
2267          * addresses take precedence to avoid disabling unicast filtering
2268          * when possible.
2269          *
2270          * RAR 0 is used for the station MAC address
2271          * if there are not 14 addresses, go ahead and clear the filters
2272          */
2273         i = 1;
2274         if (use_uc)
2275                 netdev_for_each_uc_addr(ha, netdev) {
2276                         if (i == rar_entries)
2277                                 break;
2278                         e1000_rar_set(hw, ha->addr, i++);
2279                 }
2280
2281         netdev_for_each_mc_addr(ha, netdev) {
2282                 if (i == rar_entries) {
2283                         /* load any remaining addresses into the hash table */
2284                         u32 hash_reg, hash_bit, mta;
2285                         hash_value = e1000_hash_mc_addr(hw, ha->addr);
2286                         hash_reg = (hash_value >> 5) & 0x7F;
2287                         hash_bit = hash_value & 0x1F;
2288                         mta = (1 << hash_bit);
2289                         mcarray[hash_reg] |= mta;
2290                 } else {
2291                         e1000_rar_set(hw, ha->addr, i++);
2292                 }
2293         }
2294
2295         for (; i < rar_entries; i++) {
2296                 E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
2297                 E1000_WRITE_FLUSH();
2298                 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
2299                 E1000_WRITE_FLUSH();
2300         }
2301
2302         /* write the hash table completely, write from bottom to avoid
2303          * both stupid write combining chipsets, and flushing each write */
2304         for (i = mta_reg_count - 1; i >= 0 ; i--) {
2305                 /*
2306                  * If we are on an 82544 has an errata where writing odd
2307                  * offsets overwrites the previous even offset, but writing
2308                  * backwards over the range solves the issue by always
2309                  * writing the odd offset first
2310                  */
2311                 E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]);
2312         }
2313         E1000_WRITE_FLUSH();
2314
2315         if (hw->mac_type == e1000_82542_rev2_0)
2316                 e1000_leave_82542_rst(adapter);
2317
2318         kfree(mcarray);
2319 }
2320
2321 /**
2322  * e1000_update_phy_info_task - get phy info
2323  * @work: work struct contained inside adapter struct
2324  *
2325  * Need to wait a few seconds after link up to get diagnostic information from
2326  * the phy
2327  */
2328 static void e1000_update_phy_info_task(struct work_struct *work)
2329 {
2330         struct e1000_adapter *adapter = container_of(work,
2331                                                      struct e1000_adapter,
2332                                                      phy_info_task.work);
2333         if (test_bit(__E1000_DOWN, &adapter->flags))
2334                 return;
2335         mutex_lock(&adapter->mutex);
2336         e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
2337         mutex_unlock(&adapter->mutex);
2338 }
2339
2340 /**
2341  * e1000_82547_tx_fifo_stall_task - task to complete work
2342  * @work: work struct contained inside adapter struct
2343  **/
2344 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
2345 {
2346         struct e1000_adapter *adapter = container_of(work,
2347                                                      struct e1000_adapter,
2348                                                      fifo_stall_task.work);
2349         struct e1000_hw *hw = &adapter->hw;
2350         struct net_device *netdev = adapter->netdev;
2351         u32 tctl;
2352
2353         if (test_bit(__E1000_DOWN, &adapter->flags))
2354                 return;
2355         mutex_lock(&adapter->mutex);
2356         if (atomic_read(&adapter->tx_fifo_stall)) {
2357                 if ((er32(TDT) == er32(TDH)) &&
2358                    (er32(TDFT) == er32(TDFH)) &&
2359                    (er32(TDFTS) == er32(TDFHS))) {
2360                         tctl = er32(TCTL);
2361                         ew32(TCTL, tctl & ~E1000_TCTL_EN);
2362                         ew32(TDFT, adapter->tx_head_addr);
2363                         ew32(TDFH, adapter->tx_head_addr);
2364                         ew32(TDFTS, adapter->tx_head_addr);
2365                         ew32(TDFHS, adapter->tx_head_addr);
2366                         ew32(TCTL, tctl);
2367                         E1000_WRITE_FLUSH();
2368
2369                         adapter->tx_fifo_head = 0;
2370                         atomic_set(&adapter->tx_fifo_stall, 0);
2371                         netif_wake_queue(netdev);
2372                 } else if (!test_bit(__E1000_DOWN, &adapter->flags)) {
2373                         schedule_delayed_work(&adapter->fifo_stall_task, 1);
2374                 }
2375         }
2376         mutex_unlock(&adapter->mutex);
2377 }
2378
2379 bool e1000_has_link(struct e1000_adapter *adapter)
2380 {
2381         struct e1000_hw *hw = &adapter->hw;
2382         bool link_active = false;
2383
2384         /* get_link_status is set on LSC (link status) interrupt or rx
2385          * sequence error interrupt (except on intel ce4100).
2386          * get_link_status will stay false until the
2387          * e1000_check_for_link establishes link for copper adapters
2388          * ONLY
2389          */
2390         switch (hw->media_type) {
2391         case e1000_media_type_copper:
2392                 if (hw->mac_type == e1000_ce4100)
2393                         hw->get_link_status = 1;
2394                 if (hw->get_link_status) {
2395                         e1000_check_for_link(hw);
2396                         link_active = !hw->get_link_status;
2397                 } else {
2398                         link_active = true;
2399                 }
2400                 break;
2401         case e1000_media_type_fiber:
2402                 e1000_check_for_link(hw);
2403                 link_active = !!(er32(STATUS) & E1000_STATUS_LU);
2404                 break;
2405         case e1000_media_type_internal_serdes:
2406                 e1000_check_for_link(hw);
2407                 link_active = hw->serdes_has_link;
2408                 break;
2409         default:
2410                 break;
2411         }
2412
2413         return link_active;
2414 }
2415
2416 /**
2417  * e1000_watchdog - work function
2418  * @work: work struct contained inside adapter struct
2419  **/
2420 static void e1000_watchdog(struct work_struct *work)
2421 {
2422         struct e1000_adapter *adapter = container_of(work,
2423                                                      struct e1000_adapter,
2424                                                      watchdog_task.work);
2425         struct e1000_hw *hw = &adapter->hw;
2426         struct net_device *netdev = adapter->netdev;
2427         struct e1000_tx_ring *txdr = adapter->tx_ring;
2428         u32 link, tctl;
2429
2430         if (test_bit(__E1000_DOWN, &adapter->flags))
2431                 return;
2432
2433         mutex_lock(&adapter->mutex);
2434         link = e1000_has_link(adapter);
2435         if ((netif_carrier_ok(netdev)) && link)
2436                 goto link_up;
2437
2438         if (link) {
2439                 if (!netif_carrier_ok(netdev)) {
2440                         u32 ctrl;
2441                         bool txb2b = true;
2442                         /* update snapshot of PHY registers on LSC */
2443                         e1000_get_speed_and_duplex(hw,
2444                                                    &adapter->link_speed,
2445                                                    &adapter->link_duplex);
2446
2447                         ctrl = er32(CTRL);
2448                         pr_info("%s NIC Link is Up %d Mbps %s, "
2449                                 "Flow Control: %s\n",
2450                                 netdev->name,
2451                                 adapter->link_speed,
2452                                 adapter->link_duplex == FULL_DUPLEX ?
2453                                 "Full Duplex" : "Half Duplex",
2454                                 ((ctrl & E1000_CTRL_TFCE) && (ctrl &
2455                                 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2456                                 E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2457                                 E1000_CTRL_TFCE) ? "TX" : "None")));
2458
2459                         /* adjust timeout factor according to speed/duplex */
2460                         adapter->tx_timeout_factor = 1;
2461                         switch (adapter->link_speed) {
2462                         case SPEED_10:
2463                                 txb2b = false;
2464                                 adapter->tx_timeout_factor = 16;
2465                                 break;
2466                         case SPEED_100:
2467                                 txb2b = false;
2468                                 /* maybe add some timeout factor ? */
2469                                 break;
2470                         }
2471
2472                         /* enable transmits in the hardware */
2473                         tctl = er32(TCTL);
2474                         tctl |= E1000_TCTL_EN;
2475                         ew32(TCTL, tctl);
2476
2477                         netif_carrier_on(netdev);
2478                         if (!test_bit(__E1000_DOWN, &adapter->flags))
2479                                 schedule_delayed_work(&adapter->phy_info_task,
2480                                                       2 * HZ);
2481                         adapter->smartspeed = 0;
2482                 }
2483         } else {
2484                 if (netif_carrier_ok(netdev)) {
2485                         adapter->link_speed = 0;
2486                         adapter->link_duplex = 0;
2487                         pr_info("%s NIC Link is Down\n",
2488                                 netdev->name);
2489                         netif_carrier_off(netdev);
2490
2491                         if (!test_bit(__E1000_DOWN, &adapter->flags))
2492                                 schedule_delayed_work(&adapter->phy_info_task,
2493                                                       2 * HZ);
2494                 }
2495
2496                 e1000_smartspeed(adapter);
2497         }
2498
2499 link_up:
2500         e1000_update_stats(adapter);
2501
2502         hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
2503         adapter->tpt_old = adapter->stats.tpt;
2504         hw->collision_delta = adapter->stats.colc - adapter->colc_old;
2505         adapter->colc_old = adapter->stats.colc;
2506
2507         adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
2508         adapter->gorcl_old = adapter->stats.gorcl;
2509         adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
2510         adapter->gotcl_old = adapter->stats.gotcl;
2511
2512         e1000_update_adaptive(hw);
2513
2514         if (!netif_carrier_ok(netdev)) {
2515                 if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
2516                         /* We've lost link, so the controller stops DMA,
2517                          * but we've got queued Tx work that's never going
2518                          * to get done, so reset controller to flush Tx.
2519                          * (Do the reset outside of interrupt context). */
2520                         adapter->tx_timeout_count++;
2521                         schedule_work(&adapter->reset_task);
2522                         /* exit immediately since reset is imminent */
2523                         goto unlock;
2524                 }
2525         }
2526
2527         /* Simple mode for Interrupt Throttle Rate (ITR) */
2528         if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
2529                 /*
2530                  * Symmetric Tx/Rx gets a reduced ITR=2000;
2531                  * Total asymmetrical Tx or Rx gets ITR=8000;
2532                  * everyone else is between 2000-8000.
2533                  */
2534                 u32 goc = (adapter->gotcl + adapter->gorcl) / 10000;
2535                 u32 dif = (adapter->gotcl > adapter->gorcl ?
2536                             adapter->gotcl - adapter->gorcl :
2537                             adapter->gorcl - adapter->gotcl) / 10000;
2538                 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
2539
2540                 ew32(ITR, 1000000000 / (itr * 256));
2541         }
2542
2543         /* Cause software interrupt to ensure rx ring is cleaned */
2544         ew32(ICS, E1000_ICS_RXDMT0);
2545
2546         /* Force detection of hung controller every watchdog period */
2547         adapter->detect_tx_hung = true;
2548
2549         /* Reschedule the task */
2550         if (!test_bit(__E1000_DOWN, &adapter->flags))
2551                 schedule_delayed_work(&adapter->watchdog_task, 2 * HZ);
2552
2553 unlock:
2554         mutex_unlock(&adapter->mutex);
2555 }
2556
2557 enum latency_range {
2558         lowest_latency = 0,
2559         low_latency = 1,
2560         bulk_latency = 2,
2561         latency_invalid = 255
2562 };
2563
2564 /**
2565  * e1000_update_itr - update the dynamic ITR value based on statistics
2566  * @adapter: pointer to adapter
2567  * @itr_setting: current adapter->itr
2568  * @packets: the number of packets during this measurement interval
2569  * @bytes: the number of bytes during this measurement interval
2570  *
2571  *      Stores a new ITR value based on packets and byte
2572  *      counts during the last interrupt.  The advantage of per interrupt
2573  *      computation is faster updates and more accurate ITR for the current
2574  *      traffic pattern.  Constants in this function were computed
2575  *      based on theoretical maximum wire speed and thresholds were set based
2576  *      on testing data as well as attempting to minimize response time
2577  *      while increasing bulk throughput.
2578  *      this functionality is controlled by the InterruptThrottleRate module
2579  *      parameter (see e1000_param.c)
2580  **/
2581 static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
2582                                      u16 itr_setting, int packets, int bytes)
2583 {
2584         unsigned int retval = itr_setting;
2585         struct e1000_hw *hw = &adapter->hw;
2586
2587         if (unlikely(hw->mac_type < e1000_82540))
2588                 goto update_itr_done;
2589
2590         if (packets == 0)
2591                 goto update_itr_done;
2592
2593         switch (itr_setting) {
2594         case lowest_latency:
2595                 /* jumbo frames get bulk treatment*/
2596                 if (bytes/packets > 8000)
2597                         retval = bulk_latency;
2598                 else if ((packets < 5) && (bytes > 512))
2599                         retval = low_latency;
2600                 break;
2601         case low_latency:  /* 50 usec aka 20000 ints/s */
2602                 if (bytes > 10000) {
2603                         /* jumbo frames need bulk latency setting */
2604                         if (bytes/packets > 8000)
2605                                 retval = bulk_latency;
2606                         else if ((packets < 10) || ((bytes/packets) > 1200))
2607                                 retval = bulk_latency;
2608                         else if ((packets > 35))
2609                                 retval = lowest_latency;
2610                 } else if (bytes/packets > 2000)
2611                         retval = bulk_latency;
2612                 else if (packets <= 2 && bytes < 512)
2613                         retval = lowest_latency;
2614                 break;
2615         case bulk_latency: /* 250 usec aka 4000 ints/s */
2616                 if (bytes > 25000) {
2617                         if (packets > 35)
2618                                 retval = low_latency;
2619                 } else if (bytes < 6000) {
2620                         retval = low_latency;
2621                 }
2622                 break;
2623         }
2624
2625 update_itr_done:
2626         return retval;
2627 }
2628
2629 static void e1000_set_itr(struct e1000_adapter *adapter)
2630 {
2631         struct e1000_hw *hw = &adapter->hw;
2632         u16 current_itr;
2633         u32 new_itr = adapter->itr;
2634
2635         if (unlikely(hw->mac_type < e1000_82540))
2636                 return;
2637
2638         /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2639         if (unlikely(adapter->link_speed != SPEED_1000)) {
2640                 current_itr = 0;
2641                 new_itr = 4000;
2642                 goto set_itr_now;
2643         }
2644
2645         adapter->tx_itr = e1000_update_itr(adapter,
2646                                     adapter->tx_itr,
2647                                     adapter->total_tx_packets,
2648                                     adapter->total_tx_bytes);
2649         /* conservative mode (itr 3) eliminates the lowest_latency setting */
2650         if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2651                 adapter->tx_itr = low_latency;
2652
2653         adapter->rx_itr = e1000_update_itr(adapter,
2654                                     adapter->rx_itr,
2655                                     adapter->total_rx_packets,
2656                                     adapter->total_rx_bytes);
2657         /* conservative mode (itr 3) eliminates the lowest_latency setting */
2658         if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2659                 adapter->rx_itr = low_latency;
2660
2661         current_itr = max(adapter->rx_itr, adapter->tx_itr);
2662
2663         switch (current_itr) {
2664         /* counts and packets in update_itr are dependent on these numbers */
2665         case lowest_latency:
2666                 new_itr = 70000;
2667                 break;
2668         case low_latency:
2669                 new_itr = 20000; /* aka hwitr = ~200 */
2670                 break;
2671         case bulk_latency:
2672                 new_itr = 4000;
2673                 break;
2674         default:
2675                 break;
2676         }
2677
2678 set_itr_now:
2679         if (new_itr != adapter->itr) {
2680                 /* this attempts to bias the interrupt rate towards Bulk
2681                  * by adding intermediate steps when interrupt rate is
2682                  * increasing */
2683                 new_itr = new_itr > adapter->itr ?
2684                              min(adapter->itr + (new_itr >> 2), new_itr) :
2685                              new_itr;
2686                 adapter->itr = new_itr;
2687                 ew32(ITR, 1000000000 / (new_itr * 256));
2688         }
2689 }
2690
2691 #define E1000_TX_FLAGS_CSUM             0x00000001
2692 #define E1000_TX_FLAGS_VLAN             0x00000002
2693 #define E1000_TX_FLAGS_TSO              0x00000004
2694 #define E1000_TX_FLAGS_IPV4             0x00000008
2695 #define E1000_TX_FLAGS_VLAN_MASK        0xffff0000
2696 #define E1000_TX_FLAGS_VLAN_SHIFT       16
2697
2698 static int e1000_tso(struct e1000_adapter *adapter,
2699                      struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
2700 {
2701         struct e1000_context_desc *context_desc;
2702         struct e1000_buffer *buffer_info;
2703         unsigned int i;
2704         u32 cmd_length = 0;
2705         u16 ipcse = 0, tucse, mss;
2706         u8 ipcss, ipcso, tucss, tucso, hdr_len;
2707         int err;
2708
2709         if (skb_is_gso(skb)) {
2710                 if (skb_header_cloned(skb)) {
2711                         err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2712                         if (err)
2713                                 return err;
2714                 }
2715
2716                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2717                 mss = skb_shinfo(skb)->gso_size;
2718                 if (skb->protocol == htons(ETH_P_IP)) {
2719                         struct iphdr *iph = ip_hdr(skb);
2720                         iph->tot_len = 0;
2721                         iph->check = 0;
2722                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2723                                                                  iph->daddr, 0,
2724                                                                  IPPROTO_TCP,
2725                                                                  0);
2726                         cmd_length = E1000_TXD_CMD_IP;
2727                         ipcse = skb_transport_offset(skb) - 1;
2728                 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2729                         ipv6_hdr(skb)->payload_len = 0;
2730                         tcp_hdr(skb)->check =
2731                                 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2732                                                  &ipv6_hdr(skb)->daddr,
2733                                                  0, IPPROTO_TCP, 0);
2734                         ipcse = 0;
2735                 }
2736                 ipcss = skb_network_offset(skb);
2737                 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
2738                 tucss = skb_transport_offset(skb);
2739                 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
2740                 tucse = 0;
2741
2742                 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
2743                                E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
2744
2745                 i = tx_ring->next_to_use;
2746                 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2747                 buffer_info = &tx_ring->buffer_info[i];
2748
2749                 context_desc->lower_setup.ip_fields.ipcss  = ipcss;
2750                 context_desc->lower_setup.ip_fields.ipcso  = ipcso;
2751                 context_desc->lower_setup.ip_fields.ipcse  = cpu_to_le16(ipcse);
2752                 context_desc->upper_setup.tcp_fields.tucss = tucss;
2753                 context_desc->upper_setup.tcp_fields.tucso = tucso;
2754                 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
2755                 context_desc->tcp_seg_setup.fields.mss     = cpu_to_le16(mss);
2756                 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
2757                 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
2758
2759                 buffer_info->time_stamp = jiffies;
2760                 buffer_info->next_to_watch = i;
2761
2762                 if (++i == tx_ring->count) i = 0;
2763                 tx_ring->next_to_use = i;
2764
2765                 return true;
2766         }
2767         return false;
2768 }
2769
2770 static bool e1000_tx_csum(struct e1000_adapter *adapter,
2771                           struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
2772 {
2773         struct e1000_context_desc *context_desc;
2774         struct e1000_buffer *buffer_info;
2775         unsigned int i;
2776         u8 css;
2777         u32 cmd_len = E1000_TXD_CMD_DEXT;
2778
2779         if (skb->ip_summed != CHECKSUM_PARTIAL)
2780                 return false;
2781
2782         switch (skb->protocol) {
2783         case cpu_to_be16(ETH_P_IP):
2784                 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2785                         cmd_len |= E1000_TXD_CMD_TCP;
2786                 break;
2787         case cpu_to_be16(ETH_P_IPV6):
2788                 /* XXX not handling all IPV6 headers */
2789                 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2790                         cmd_len |= E1000_TXD_CMD_TCP;
2791                 break;
2792         default:
2793                 if (unlikely(net_ratelimit()))
2794                         e_warn(drv, "checksum_partial proto=%x!\n",
2795                                skb->protocol);
2796                 break;
2797         }
2798
2799         css = skb_checksum_start_offset(skb);
2800
2801         i = tx_ring->next_to_use;
2802         buffer_info = &tx_ring->buffer_info[i];
2803         context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2804
2805         context_desc->lower_setup.ip_config = 0;
2806         context_desc->upper_setup.tcp_fields.tucss = css;
2807         context_desc->upper_setup.tcp_fields.tucso =
2808                 css + skb->csum_offset;
2809         context_desc->upper_setup.tcp_fields.tucse = 0;
2810         context_desc->tcp_seg_setup.data = 0;
2811         context_desc->cmd_and_length = cpu_to_le32(cmd_len);
2812
2813         buffer_info->time_stamp = jiffies;
2814         buffer_info->next_to_watch = i;
2815
2816         if (unlikely(++i == tx_ring->count)) i = 0;
2817         tx_ring->next_to_use = i;
2818
2819         return true;
2820 }
2821
2822 #define E1000_MAX_TXD_PWR       12
2823 #define E1000_MAX_DATA_PER_TXD  (1<<E1000_MAX_TXD_PWR)
2824
2825 static int e1000_tx_map(struct e1000_adapter *adapter,
2826                         struct e1000_tx_ring *tx_ring,
2827                         struct sk_buff *skb, unsigned int first,
2828                         unsigned int max_per_txd, unsigned int nr_frags,
2829                         unsigned int mss)
2830 {
2831         struct e1000_hw *hw = &adapter->hw;
2832         struct pci_dev *pdev = adapter->pdev;
2833         struct e1000_buffer *buffer_info;
2834         unsigned int len = skb_headlen(skb);
2835         unsigned int offset = 0, size, count = 0, i;
2836         unsigned int f, bytecount, segs;
2837
2838         i = tx_ring->next_to_use;
2839
2840         while (len) {
2841                 buffer_info = &tx_ring->buffer_info[i];
2842                 size = min(len, max_per_txd);
2843                 /* Workaround for Controller erratum --
2844                  * descriptor for non-tso packet in a linear SKB that follows a
2845                  * tso gets written back prematurely before the data is fully
2846                  * DMA'd to the controller */
2847                 if (!skb->data_len && tx_ring->last_tx_tso &&
2848                     !skb_is_gso(skb)) {
2849                         tx_ring->last_tx_tso = false;
2850                         size -= 4;
2851                 }
2852
2853                 /* Workaround for premature desc write-backs
2854                  * in TSO mode.  Append 4-byte sentinel desc */
2855                 if (unlikely(mss && !nr_frags && size == len && size > 8))
2856                         size -= 4;
2857                 /* work-around for errata 10 and it applies
2858                  * to all controllers in PCI-X mode
2859                  * The fix is to make sure that the first descriptor of a
2860                  * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
2861                  */
2862                 if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
2863                                 (size > 2015) && count == 0))
2864                         size = 2015;
2865
2866                 /* Workaround for potential 82544 hang in PCI-X.  Avoid
2867                  * terminating buffers within evenly-aligned dwords. */
2868                 if (unlikely(adapter->pcix_82544 &&
2869                    !((unsigned long)(skb->data + offset + size - 1) & 4) &&
2870                    size > 4))
2871                         size -= 4;
2872
2873                 buffer_info->length = size;
2874                 /* set time_stamp *before* dma to help avoid a possible race */
2875                 buffer_info->time_stamp = jiffies;
2876                 buffer_info->mapped_as_page = false;
2877                 buffer_info->dma = dma_map_single(&pdev->dev,
2878                                                   skb->data + offset,
2879                                                   size, DMA_TO_DEVICE);
2880                 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2881                         goto dma_error;
2882                 buffer_info->next_to_watch = i;
2883
2884                 len -= size;
2885                 offset += size;
2886                 count++;
2887                 if (len) {
2888                         i++;
2889                         if (unlikely(i == tx_ring->count))
2890                                 i = 0;
2891                 }
2892         }
2893
2894         for (f = 0; f < nr_frags; f++) {
2895                 const struct skb_frag_struct *frag;
2896
2897                 frag = &skb_shinfo(skb)->frags[f];
2898                 len = skb_frag_size(frag);
2899                 offset = 0;
2900
2901                 while (len) {
2902                         unsigned long bufend;
2903                         i++;
2904                         if (unlikely(i == tx_ring->count))
2905                                 i = 0;
2906
2907                         buffer_info = &tx_ring->buffer_info[i];
2908                         size = min(len, max_per_txd);
2909                         /* Workaround for premature desc write-backs
2910                          * in TSO mode.  Append 4-byte sentinel desc */
2911                         if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
2912                                 size -= 4;
2913                         /* Workaround for potential 82544 hang in PCI-X.
2914                          * Avoid terminating buffers within evenly-aligned
2915                          * dwords. */
2916                         bufend = (unsigned long)
2917                                 page_to_phys(skb_frag_page(frag));
2918                         bufend += offset + size - 1;
2919                         if (unlikely(adapter->pcix_82544 &&
2920                                      !(bufend & 4) &&
2921                                      size > 4))
2922                                 size -= 4;
2923
2924                         buffer_info->length = size;
2925                         buffer_info->time_stamp = jiffies;
2926                         buffer_info->mapped_as_page = true;
2927                         buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
2928                                                 offset, size, DMA_TO_DEVICE);
2929                         if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2930                                 goto dma_error;
2931                         buffer_info->next_to_watch = i;
2932
2933                         len -= size;
2934                         offset += size;
2935                         count++;
2936                 }
2937         }
2938
2939         segs = skb_shinfo(skb)->gso_segs ?: 1;
2940         /* multiply data chunks by size of headers */
2941         bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
2942
2943         tx_ring->buffer_info[i].skb = skb;
2944         tx_ring->buffer_info[i].segs = segs;
2945         tx_ring->buffer_info[i].bytecount = bytecount;
2946         tx_ring->buffer_info[first].next_to_watch = i;
2947
2948         return count;
2949
2950 dma_error:
2951         dev_err(&pdev->dev, "TX DMA map failed\n");
2952         buffer_info->dma = 0;
2953         if (count)
2954                 count--;
2955
2956         while (count--) {
2957                 if (i==0)
2958                         i += tx_ring->count;
2959                 i--;
2960                 buffer_info = &tx_ring->buffer_info[i];
2961                 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2962         }
2963
2964         return 0;
2965 }
2966
2967 static void e1000_tx_queue(struct e1000_adapter *adapter,
2968                            struct e1000_tx_ring *tx_ring, int tx_flags,
2969                            int count)
2970 {
2971         struct e1000_hw *hw = &adapter->hw;
2972         struct e1000_tx_desc *tx_desc = NULL;
2973         struct e1000_buffer *buffer_info;
2974         u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
2975         unsigned int i;
2976
2977         if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
2978                 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
2979                              E1000_TXD_CMD_TSE;
2980                 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2981
2982                 if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
2983                         txd_upper |= E1000_TXD_POPTS_IXSM << 8;
2984         }
2985
2986         if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
2987                 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2988                 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2989         }
2990
2991         if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
2992                 txd_lower |= E1000_TXD_CMD_VLE;
2993                 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
2994         }
2995
2996         i = tx_ring->next_to_use;
2997
2998         while (count--) {
2999                 buffer_info = &tx_ring->buffer_info[i];
3000                 tx_desc = E1000_TX_DESC(*tx_ring, i);
3001                 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
3002                 tx_desc->lower.data =
3003                         cpu_to_le32(txd_lower | buffer_info->length);
3004                 tx_desc->upper.data = cpu_to_le32(txd_upper);
3005                 if (unlikely(++i == tx_ring->count)) i = 0;
3006         }
3007
3008         tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
3009
3010         /* Force memory writes to complete before letting h/w
3011          * know there are new descriptors to fetch.  (Only
3012          * applicable for weak-ordered memory model archs,
3013          * such as IA-64). */
3014         wmb();
3015
3016         tx_ring->next_to_use = i;
3017         writel(i, hw->hw_addr + tx_ring->tdt);
3018         /* we need this if more than one processor can write to our tail
3019          * at a time, it syncronizes IO on IA64/Altix systems */
3020         mmiowb();
3021 }
3022
3023 /**
3024  * 82547 workaround to avoid controller hang in half-duplex environment.
3025  * The workaround is to avoid queuing a large packet that would span
3026  * the internal Tx FIFO ring boundary by notifying the stack to resend
3027  * the packet at a later time.  This gives the Tx FIFO an opportunity to
3028  * flush all packets.  When that occurs, we reset the Tx FIFO pointers
3029  * to the beginning of the Tx FIFO.
3030  **/
3031
3032 #define E1000_FIFO_HDR                  0x10
3033 #define E1000_82547_PAD_LEN             0x3E0
3034
3035 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
3036                                        struct sk_buff *skb)
3037 {
3038         u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
3039         u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
3040
3041         skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
3042
3043         if (adapter->link_duplex != HALF_DUPLEX)
3044                 goto no_fifo_stall_required;
3045
3046         if (atomic_read(&adapter->tx_fifo_stall))
3047                 return 1;
3048
3049         if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
3050                 atomic_set(&adapter->tx_fifo_stall, 1);
3051                 return 1;
3052         }
3053
3054 no_fifo_stall_required:
3055         adapter->tx_fifo_head += skb_fifo_len;
3056         if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
3057                 adapter->tx_fifo_head -= adapter->tx_fifo_size;
3058         return 0;
3059 }
3060
3061 static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
3062 {
3063         struct e1000_adapter *adapter = netdev_priv(netdev);
3064         struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3065
3066         netif_stop_queue(netdev);
3067         /* Herbert's original patch had:
3068          *  smp_mb__after_netif_stop_queue();
3069          * but since that doesn't exist yet, just open code it. */
3070         smp_mb();
3071
3072         /* We need to check again in a case another CPU has just
3073          * made room available. */
3074         if (likely(E1000_DESC_UNUSED(tx_ring) < size))
3075                 return -EBUSY;
3076
3077         /* A reprieve! */
3078         netif_start_queue(netdev);
3079         ++adapter->restart_queue;
3080         return 0;
3081 }
3082
3083 static int e1000_maybe_stop_tx(struct net_device *netdev,
3084                                struct e1000_tx_ring *tx_ring, int size)
3085 {
3086         if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
3087                 return 0;
3088         return __e1000_maybe_stop_tx(netdev, size);
3089 }
3090
3091 #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
3092 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3093                                     struct net_device *netdev)
3094 {
3095         struct e1000_adapter *adapter = netdev_priv(netdev);
3096         struct e1000_hw *hw = &adapter->hw;
3097         struct e1000_tx_ring *tx_ring;
3098         unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
3099         unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
3100         unsigned int tx_flags = 0;
3101         unsigned int len = skb_headlen(skb);
3102         unsigned int nr_frags;
3103         unsigned int mss;
3104         int count = 0;
3105         int tso;
3106         unsigned int f;
3107
3108         /* This goes back to the question of how to logically map a tx queue
3109          * to a flow.  Right now, performance is impacted slightly negatively
3110          * if using multiple tx queues.  If the stack breaks away from a
3111          * single qdisc implementation, we can look at this again. */
3112         tx_ring = adapter->tx_ring;
3113
3114         if (unlikely(skb->len <= 0)) {
3115                 dev_kfree_skb_any(skb);
3116                 return NETDEV_TX_OK;
3117         }
3118
3119         mss = skb_shinfo(skb)->gso_size;
3120         /* The controller does a simple calculation to
3121          * make sure there is enough room in the FIFO before
3122          * initiating the DMA for each buffer.  The calc is:
3123          * 4 = ceil(buffer len/mss).  To make sure we don't
3124          * overrun the FIFO, adjust the max buffer len if mss
3125          * drops. */
3126         if (mss) {
3127                 u8 hdr_len;
3128                 max_per_txd = min(mss << 2, max_per_txd);
3129                 max_txd_pwr = fls(max_per_txd) - 1;
3130
3131                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3132                 if (skb->data_len && hdr_len == len) {
3133                         switch (hw->mac_type) {
3134                                 unsigned int pull_size;
3135                         case e1000_82544:
3136                                 /* Make sure we have room to chop off 4 bytes,
3137                                  * and that the end alignment will work out to
3138                                  * this hardware's requirements
3139                                  * NOTE: this is a TSO only workaround
3140                                  * if end byte alignment not correct move us
3141                                  * into the next dword */
3142                                 if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4)
3143                                         break;
3144                                 /* fall through */
3145                                 pull_size = min((unsigned int)4, skb->data_len);
3146                                 if (!__pskb_pull_tail(skb, pull_size)) {
3147                                         e_err(drv, "__pskb_pull_tail "
3148                                               "failed.\n");
3149                                         dev_kfree_skb_any(skb);
3150                                         return NETDEV_TX_OK;
3151                                 }
3152                                 len = skb_headlen(skb);
3153                                 break;
3154                         default:
3155                                 /* do nothing */
3156                                 break;
3157                         }
3158                 }
3159         }
3160
3161         /* reserve a descriptor for the offload context */
3162         if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
3163                 count++;
3164         count++;
3165
3166         /* Controller Erratum workaround */
3167         if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
3168                 count++;
3169
3170         count += TXD_USE_COUNT(len, max_txd_pwr);
3171
3172         if (adapter->pcix_82544)
3173                 count++;
3174
3175         /* work-around for errata 10 and it applies to all controllers
3176          * in PCI-X mode, so add one more descriptor to the count
3177          */
3178         if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
3179                         (len > 2015)))
3180                 count++;
3181
3182         nr_frags = skb_shinfo(skb)->nr_frags;
3183         for (f = 0; f < nr_frags; f++)
3184                 count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
3185                                        max_txd_pwr);
3186         if (adapter->pcix_82544)
3187                 count += nr_frags;
3188
3189         /* need: count + 2 desc gap to keep tail from touching
3190          * head, otherwise try next time */
3191         if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
3192                 return NETDEV_TX_BUSY;
3193
3194         if (unlikely((hw->mac_type == e1000_82547) &&
3195                      (e1000_82547_fifo_workaround(adapter, skb)))) {
3196                 netif_stop_queue(netdev);
3197                 if (!test_bit(__E1000_DOWN, &adapter->flags))
3198                         schedule_delayed_work(&adapter->fifo_stall_task, 1);
3199                 return NETDEV_TX_BUSY;
3200         }
3201
3202         if (vlan_tx_tag_present(skb)) {
3203                 tx_flags |= E1000_TX_FLAGS_VLAN;
3204                 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
3205         }
3206
3207         first = tx_ring->next_to_use;
3208
3209         tso = e1000_tso(adapter, tx_ring, skb);
3210         if (tso < 0) {
3211                 dev_kfree_skb_any(skb);
3212                 return NETDEV_TX_OK;
3213         }
3214
3215         if (likely(tso)) {
3216                 if (likely(hw->mac_type != e1000_82544))
3217                         tx_ring->last_tx_tso = true;
3218                 tx_flags |= E1000_TX_FLAGS_TSO;
3219         } else if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
3220                 tx_flags |= E1000_TX_FLAGS_CSUM;
3221
3222         if (likely(skb->protocol == htons(ETH_P_IP)))
3223                 tx_flags |= E1000_TX_FLAGS_IPV4;
3224
3225         count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
3226                              nr_frags, mss);
3227
3228         if (count) {
3229                 e1000_tx_queue(adapter, tx_ring, tx_flags, count);
3230                 /* Make sure there is space in the ring for the next send. */
3231                 e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
3232
3233         } else {
3234                 dev_kfree_skb_any(skb);
3235                 tx_ring->buffer_info[first].time_stamp = 0;
3236                 tx_ring->next_to_use = first;
3237         }
3238
3239         return NETDEV_TX_OK;
3240 }
3241
3242 #define NUM_REGS 38 /* 1 based count */
3243 static void e1000_regdump(struct e1000_adapter *adapter)
3244 {
3245         struct e1000_hw *hw = &adapter->hw;
3246         u32 regs[NUM_REGS];
3247         u32 *regs_buff = regs;
3248         int i = 0;
3249
3250         char *reg_name[] = {
3251         "CTRL",  "STATUS",
3252         "RCTL", "RDLEN", "RDH", "RDT", "RDTR",
3253         "TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT",
3254         "TIDV", "TXDCTL", "TADV", "TARC0",
3255         "TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1",
3256         "TXDCTL1", "TARC1",
3257         "CTRL_EXT", "ERT", "RDBAL", "RDBAH",
3258         "TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC",
3259         "RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC"
3260         };
3261
3262         regs_buff[0]  = er32(CTRL);
3263         regs_buff[1]  = er32(STATUS);
3264
3265         regs_buff[2]  = er32(RCTL);
3266         regs_buff[3]  = er32(RDLEN);
3267         regs_buff[4]  = er32(RDH);
3268         regs_buff[5]  = er32(RDT);
3269         regs_buff[6]  = er32(RDTR);
3270
3271         regs_buff[7]  = er32(TCTL);
3272         regs_buff[8]  = er32(TDBAL);
3273         regs_buff[9]  = er32(TDBAH);
3274         regs_buff[10] = er32(TDLEN);
3275         regs_buff[11] = er32(TDH);
3276         regs_buff[12] = er32(TDT);
3277         regs_buff[13] = er32(TIDV);
3278         regs_buff[14] = er32(TXDCTL);
3279         regs_buff[15] = er32(TADV);
3280         regs_buff[16] = er32(TARC0);
3281
3282         regs_buff[17] = er32(TDBAL1);
3283         regs_buff[18] = er32(TDBAH1);
3284         regs_buff[19] = er32(TDLEN1);
3285         regs_buff[20] = er32(TDH1);
3286         regs_buff[21] = er32(TDT1);
3287         regs_buff[22] = er32(TXDCTL1);
3288         regs_buff[23] = er32(TARC1);
3289         regs_buff[24] = er32(CTRL_EXT);
3290         regs_buff[25] = er32(ERT);
3291         regs_buff[26] = er32(RDBAL0);
3292         regs_buff[27] = er32(RDBAH0);
3293         regs_buff[28] = er32(TDFH);
3294         regs_buff[29] = er32(TDFT);
3295         regs_buff[30] = er32(TDFHS);
3296         regs_buff[31] = er32(TDFTS);
3297         regs_buff[32] = er32(TDFPC);
3298         regs_buff[33] = er32(RDFH);
3299         regs_buff[34] = er32(RDFT);
3300         regs_buff[35] = er32(RDFHS);
3301         regs_buff[36] = er32(RDFTS);
3302         regs_buff[37] = er32(RDFPC);
3303
3304         pr_info("Register dump\n");
3305         for (i = 0; i < NUM_REGS; i++) {
3306                 printk(KERN_INFO "%-15s  %08x\n",
3307                 reg_name[i], regs_buff[i]);
3308         }
3309 }
3310
3311 /*
3312  * e1000_dump: Print registers, tx ring and rx ring
3313  */
3314 static void e1000_dump(struct e1000_adapter *adapter)
3315 {
3316         /* this code doesn't handle multiple rings */
3317         struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3318         struct e1000_rx_ring *rx_ring = adapter->rx_ring;
3319         int i;
3320
3321         if (!netif_msg_hw(adapter))
3322                 return;
3323
3324         /* Print Registers */
3325         e1000_regdump(adapter);
3326
3327         /*
3328          * transmit dump
3329          */
3330         pr_info("TX Desc ring0 dump\n");
3331
3332         /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
3333          *
3334          * Legacy Transmit Descriptor
3335          *   +--------------------------------------------------------------+
3336          * 0 |         Buffer Address [63:0] (Reserved on Write Back)       |
3337          *   +--------------------------------------------------------------+
3338          * 8 | Special  |    CSS     | Status |  CMD    |  CSO   |  Length  |
3339          *   +--------------------------------------------------------------+
3340          *   63       48 47        36 35    32 31     24 23    16 15        0
3341          *
3342          * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
3343          *   63      48 47    40 39       32 31             16 15    8 7      0
3344          *   +----------------------------------------------------------------+
3345          * 0 |  TUCSE  | TUCS0  |   TUCSS   |     IPCSE       | IPCS0 | IPCSS |
3346          *   +----------------------------------------------------------------+
3347          * 8 |   MSS   | HDRLEN | RSV | STA | TUCMD | DTYP |      PAYLEN      |
3348          *   +----------------------------------------------------------------+
3349          *   63      48 47    40 39 36 35 32 31   24 23  20 19                0
3350          *
3351          * Extended Data Descriptor (DTYP=0x1)
3352          *   +----------------------------------------------------------------+
3353          * 0 |                     Buffer Address [63:0]                      |
3354          *   +----------------------------------------------------------------+
3355          * 8 | VLAN tag |  POPTS  | Rsvd | Status | Command | DTYP |  DTALEN  |
3356          *   +----------------------------------------------------------------+
3357          *   63       48 47     40 39  36 35    32 31     24 23  20 19        0
3358          */
3359         printk(KERN_INFO "Tc[desc]     [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma       ]"
3360                " leng  ntw timestmp         bi->skb\n");
3361         printk(KERN_INFO "Td[desc]     [address 63:0  ] [VlaPoRSCm1Dlen] [bi->dma       ]"
3362                " leng  ntw timestmp         bi->skb\n");
3363
3364         if (!netif_msg_tx_done(adapter))
3365                 goto rx_ring_summary;
3366
3367         for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
3368                 struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
3369                 struct e1000_buffer *buffer_info = &tx_ring->buffer_info[i];
3370                 struct my_u { u64 a; u64 b; };
3371                 struct my_u *u = (struct my_u *)tx_desc;
3372                 printk(KERN_INFO "T%c[0x%03X]    %016llX %016llX %016llX %04X  %3X "
3373                        "%016llX %p",
3374                        ((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i,
3375                        le64_to_cpu(u->a), le64_to_cpu(u->b),
3376                        (u64)buffer_info->dma, buffer_info->length,
3377                        buffer_info->next_to_watch, (u64)buffer_info->time_stamp,
3378                        buffer_info->skb);
3379                 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
3380                         printk(KERN_CONT" NTC/U\n");
3381                 else if (i == tx_ring->next_to_use)
3382                         printk(KERN_CONT " NTU\n");
3383                 else if (i == tx_ring->next_to_clean)
3384                         printk(KERN_CONT " NTC\n");
3385                 else
3386                         printk(KERN_CONT "\n");
3387
3388
3389                 if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
3390                         print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
3391                                         16, 1, phys_to_virt(buffer_info->dma),
3392                                         buffer_info->length, true);
3393         }
3394
3395 rx_ring_summary:
3396         /*
3397          * receive dump
3398          */
3399         pr_info("\nRX Desc ring dump\n");
3400
3401         /* Legacy Receive Descriptor Format
3402          *
3403          * +-----------------------------------------------------+
3404          * |                Buffer Address [63:0]                |
3405          * +-----------------------------------------------------+
3406          * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
3407          * +-----------------------------------------------------+
3408          * 63       48 47    40 39      32 31         16 15      0
3409          */
3410         printk(KERN_INFO "R[desc]      [address 63:0  ] [vl er S cks ln] "
3411                 "[bi->dma       ] [bi->skb]\n");
3412
3413         if (!netif_msg_rx_status(adapter))
3414                 goto exit;
3415
3416         for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
3417                 struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
3418                 struct e1000_buffer *buffer_info = &rx_ring->buffer_info[i];
3419                 struct my_u { u64 a; u64 b; };
3420                 struct my_u *u = (struct my_u *)rx_desc;
3421                 printk(KERN_INFO "R[0x%03X]     %016llX %016llX %016llX %p",
3422                         i, le64_to_cpu(u->a), le64_to_cpu(u->b),
3423                         (u64)buffer_info->dma, buffer_info->skb);
3424                 if (i == rx_ring->next_to_use)
3425                         printk(KERN_CONT " NTU\n");
3426                 else if (i == rx_ring->next_to_clean)
3427                         printk(KERN_CONT " NTC\n");
3428                 else
3429                         printk(KERN_CONT "\n");
3430
3431                 if (netif_msg_pktdata(adapter))
3432                         print_hex_dump(KERN_INFO, "",
3433                                 DUMP_PREFIX_ADDRESS, 16, 1,
3434                                 phys_to_virt(buffer_info->dma),
3435                                 buffer_info->length, true);
3436
3437         } /* for */
3438
3439         /* dump the descriptor caches */
3440         /* rx */
3441         printk(KERN_INFO "e1000: Rx descriptor cache in 64bit format\n");
3442         for (i = 0x6000; i <= 0x63FF ; i += 0x10) {
3443                 printk(KERN_INFO "R%04X: %08X|%08X %08X|%08X\n",
3444                                 i,
3445                                 readl(adapter->hw.hw_addr + i+4),
3446                                 readl(adapter->hw.hw_addr + i),
3447                                 readl(adapter->hw.hw_addr + i+12),
3448                                 readl(adapter->hw.hw_addr + i+8));
3449         }
3450         /* tx */
3451         printk(KERN_INFO "e1000: Tx descriptor cache in 64bit format\n");
3452         for (i = 0x7000; i <= 0x73FF ; i += 0x10) {
3453                 printk(KERN_INFO "T%04X: %08X|%08X %08X|%08X\n",
3454                                 i,
3455                                 readl(adapter->hw.hw_addr + i+4),
3456                                 readl(adapter->hw.hw_addr + i),
3457                                 readl(adapter->hw.hw_addr + i+12),
3458                                 readl(adapter->hw.hw_addr + i+8));
3459         }
3460 exit:
3461         return;
3462 }
3463
3464 /**
3465  * e1000_tx_timeout - Respond to a Tx Hang
3466  * @netdev: network interface device structure
3467  **/
3468
3469 static void e1000_tx_timeout(struct net_device *netdev)
3470 {
3471         struct e1000_adapter *adapter = netdev_priv(netdev);
3472
3473         /* Do the reset outside of interrupt context */
3474         adapter->tx_timeout_count++;
3475         schedule_work(&adapter->reset_task);
3476 }
3477
3478 static void e1000_reset_task(struct work_struct *work)
3479 {
3480         struct e1000_adapter *adapter =
3481                 container_of(work, struct e1000_adapter, reset_task);
3482
3483         if (test_bit(__E1000_DOWN, &adapter->flags))
3484                 return;
3485         e_err(drv, "Reset adapter\n");
3486         e1000_reinit_safe(adapter);
3487 }
3488
3489 /**
3490  * e1000_get_stats - Get System Network Statistics
3491  * @netdev: network interface device structure
3492  *
3493  * Returns the address of the device statistics structure.
3494  * The statistics are actually updated from the watchdog.
3495  **/
3496
3497 static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
3498 {
3499         /* only return the current stats */
3500         return &netdev->stats;
3501 }
3502
3503 /**
3504  * e1000_change_mtu - Change the Maximum Transfer Unit
3505  * @netdev: network interface device structure
3506  * @new_mtu: new value for maximum frame size
3507  *
3508  * Returns 0 on success, negative on failure
3509  **/
3510
3511 static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3512 {
3513         struct e1000_adapter *adapter = netdev_priv(netdev);
3514         struct e1000_hw *hw = &adapter->hw;
3515         int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
3516
3517         if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
3518             (max_frame > MAX_JUMBO_FRAME_SIZE)) {
3519                 e_err(probe, "Invalid MTU setting\n");
3520                 return -EINVAL;
3521         }
3522
3523         /* Adapter-specific max frame size limits. */
3524         switch (hw->mac_type) {
3525         case e1000_undefined ... e1000_82542_rev2_1:
3526                 if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
3527                         e_err(probe, "Jumbo Frames not supported.\n");
3528                         return -EINVAL;
3529                 }
3530                 break;
3531         default:
3532                 /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
3533                 break;
3534         }
3535
3536         while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
3537                 msleep(1);
3538         /* e1000_down has a dependency on max_frame_size */
3539         hw->max_frame_size = max_frame;
3540         if (netif_running(netdev))
3541                 e1000_down(adapter);
3542
3543         /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3544          * means we reserve 2 more, this pushes us to allocate from the next
3545          * larger slab size.
3546          * i.e. RXBUFFER_2048 --> size-4096 slab
3547          *  however with the new *_jumbo_rx* routines, jumbo receives will use
3548          *  fragmented skbs */
3549
3550         if (max_frame <= E1000_RXBUFFER_2048)
3551                 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
3552         else
3553 #if (PAGE_SIZE >= E1000_RXBUFFER_16384)
3554                 adapter->rx_buffer_len = E1000_RXBUFFER_16384;
3555 #elif (PAGE_SIZE >= E1000_RXBUFFER_4096)
3556                 adapter->rx_buffer_len = PAGE_SIZE;
3557 #endif
3558
3559         /* adjust allocation if LPE protects us, and we aren't using SBP */
3560         if (!hw->tbi_compatibility_on &&
3561             ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) ||
3562              (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
3563                 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3564
3565         pr_info("%s changing MTU from %d to %d\n",
3566                 netdev->name, netdev->mtu, new_mtu);
3567         netdev->mtu = new_mtu;
3568
3569         if (netif_running(netdev))
3570                 e1000_up(adapter);
3571         else
3572                 e1000_reset(adapter);
3573
3574         clear_bit(__E1000_RESETTING, &adapter->flags);
3575
3576         return 0;
3577 }
3578
3579 /**
3580  * e1000_update_stats - Update the board statistics counters
3581  * @adapter: board private structure
3582  **/
3583
3584 void e1000_update_stats(struct e1000_adapter *adapter)
3585 {
3586         struct net_device *netdev = adapter->netdev;
3587         struct e1000_hw *hw = &adapter->hw;
3588         struct pci_dev *pdev = adapter->pdev;
3589         unsigned long flags;
3590         u16 phy_tmp;
3591
3592 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3593
3594         /*
3595          * Prevent stats update while adapter is being reset, or if the pci
3596          * connection is down.
3597          */
3598         if (adapter->link_speed == 0)
3599                 return;
3600         if (pci_channel_offline(pdev))
3601                 return;
3602
3603         spin_lock_irqsave(&adapter->stats_lock, flags);
3604
3605         /* these counters are modified from e1000_tbi_adjust_stats,
3606          * called from the interrupt context, so they must only
3607          * be written while holding adapter->stats_lock
3608          */
3609
3610         adapter->stats.crcerrs += er32(CRCERRS);
3611         adapter->stats.gprc += er32(GPRC);
3612         adapter->stats.gorcl += er32(GORCL);
3613         adapter->stats.gorch += er32(GORCH);
3614         adapter->stats.bprc += er32(BPRC);
3615         adapter->stats.mprc += er32(MPRC);
3616         adapter->stats.roc += er32(ROC);
3617
3618         adapter->stats.prc64 += er32(PRC64);
3619         adapter->stats.prc127 += er32(PRC127);
3620         adapter->stats.prc255 += er32(PRC255);
3621         adapter->stats.prc511 += er32(PRC511);
3622         adapter->stats.prc1023 += er32(PRC1023);
3623         adapter->stats.prc1522 += er32(PRC1522);
3624
3625         adapter->stats.symerrs += er32(SYMERRS);
3626         adapter->stats.mpc += er32(MPC);
3627         adapter->stats.scc += er32(SCC);
3628         adapter->stats.ecol += er32(ECOL);
3629         adapter->stats.mcc += er32(MCC);
3630         adapter->stats.latecol += er32(LATECOL);
3631         adapter->stats.dc += er32(DC);
3632         adapter->stats.sec += er32(SEC);
3633         adapter->stats.rlec += er32(RLEC);
3634         adapter->stats.xonrxc += er32(XONRXC);
3635         adapter->stats.xontxc += er32(XONTXC);
3636         adapter->stats.xoffrxc += er32(XOFFRXC);
3637         adapter->stats.xofftxc += er32(XOFFTXC);
3638         adapter->stats.fcruc += er32(FCRUC);
3639         adapter->stats.gptc += er32(GPTC);
3640         adapter->stats.gotcl += er32(GOTCL);
3641         adapter->stats.gotch += er32(GOTCH);
3642         adapter->stats.rnbc += er32(RNBC);
3643         adapter->stats.ruc += er32(RUC);
3644         adapter->stats.rfc += er32(RFC);
3645         adapter->stats.rjc += er32(RJC);
3646         adapter->stats.torl += er32(TORL);
3647         adapter->stats.torh += er32(TORH);
3648         adapter->stats.totl += er32(TOTL);
3649         adapter->stats.toth += er32(TOTH);
3650         adapter->stats.tpr += er32(TPR);
3651
3652         adapter->stats.ptc64 += er32(PTC64);
3653         adapter->stats.ptc127 += er32(PTC127);
3654         adapter->stats.ptc255 += er32(PTC255);
3655         adapter->stats.ptc511 += er32(PTC511);
3656         adapter->stats.ptc1023 += er32(PTC1023);
3657         adapter->stats.ptc1522 += er32(PTC1522);
3658
3659         adapter->stats.mptc += er32(MPTC);
3660         adapter->stats.bptc += er32(BPTC);
3661
3662         /* used for adaptive IFS */
3663
3664         hw->tx_packet_delta = er32(TPT);
3665         adapter->stats.tpt += hw->tx_packet_delta;
3666         hw->collision_delta = er32(COLC);
3667         adapter->stats.colc += hw->collision_delta;
3668
3669         if (hw->mac_type >= e1000_82543) {
3670                 adapter->stats.algnerrc += er32(ALGNERRC);
3671                 adapter->stats.rxerrc += er32(RXERRC);
3672                 adapter->stats.tncrs += er32(TNCRS);
3673                 adapter->stats.cexterr += er32(CEXTERR);
3674                 adapter->stats.tsctc += er32(TSCTC);
3675                 adapter->stats.tsctfc += er32(TSCTFC);
3676         }
3677
3678         /* Fill out the OS statistics structure */
3679         netdev->stats.multicast = adapter->stats.mprc;
3680         netdev->stats.collisions = adapter->stats.colc;
3681
3682         /* Rx Errors */
3683
3684         /* RLEC on some newer hardware can be incorrect so build
3685         * our own version based on RUC and ROC */
3686         netdev->stats.rx_errors = adapter->stats.rxerrc +
3687                 adapter->stats.crcerrs + adapter->stats.algnerrc +
3688                 adapter->stats.ruc + adapter->stats.roc +
3689                 adapter->stats.cexterr;
3690         adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
3691         netdev->stats.rx_length_errors = adapter->stats.rlerrc;
3692         netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
3693         netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
3694         netdev->stats.rx_missed_errors = adapter->stats.mpc;
3695
3696         /* Tx Errors */
3697         adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
3698         netdev->stats.tx_errors = adapter->stats.txerrc;
3699         netdev->stats.tx_aborted_errors = adapter->stats.ecol;
3700         netdev->stats.tx_window_errors = adapter->stats.latecol;
3701         netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
3702         if (hw->bad_tx_carr_stats_fd &&
3703             adapter->link_duplex == FULL_DUPLEX) {
3704                 netdev->stats.tx_carrier_errors = 0;
3705                 adapter->stats.tncrs = 0;
3706         }
3707
3708         /* Tx Dropped needs to be maintained elsewhere */
3709
3710         /* Phy Stats */
3711         if (hw->media_type == e1000_media_type_copper) {
3712                 if ((adapter->link_speed == SPEED_1000) &&
3713                    (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
3714                         phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3715                         adapter->phy_stats.idle_errors += phy_tmp;
3716                 }
3717
3718                 if ((hw->mac_type <= e1000_82546) &&
3719                    (hw->phy_type == e1000_phy_m88) &&
3720                    !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
3721                         adapter->phy_stats.receive_errors += phy_tmp;
3722         }
3723
3724         /* Management Stats */
3725         if (hw->has_smbus) {
3726                 adapter->stats.mgptc += er32(MGTPTC);
3727                 adapter->stats.mgprc += er32(MGTPRC);
3728                 adapter->stats.mgpdc += er32(MGTPDC);
3729         }
3730
3731         spin_unlock_irqrestore(&adapter->stats_lock, flags);
3732 }
3733
3734 /**
3735  * e1000_intr - Interrupt Handler
3736  * @irq: interrupt number
3737  * @data: pointer to a network interface device structure
3738  **/
3739
3740 static irqreturn_t e1000_intr(int irq, void *data)
3741 {
3742         struct net_device *netdev = data;
3743         struct e1000_adapter *adapter = netdev_priv(netdev);
3744         struct e1000_hw *hw = &adapter->hw;
3745         u32 icr = er32(ICR);
3746
3747         if (unlikely((!icr)))
3748                 return IRQ_NONE;  /* Not our interrupt */
3749
3750         /*
3751          * we might have caused the interrupt, but the above
3752          * read cleared it, and just in case the driver is
3753          * down there is nothing to do so return handled
3754          */
3755         if (unlikely(test_bit(__E1000_DOWN, &adapter->flags)))
3756                 return IRQ_HANDLED;
3757
3758         if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
3759                 hw->get_link_status = 1;
3760                 /* guard against interrupt when we're going down */
3761                 if (!test_bit(__E1000_DOWN, &adapter->flags))
3762                         schedule_delayed_work(&adapter->watchdog_task, 1);
3763         }
3764
3765         /* disable interrupts, without the synchronize_irq bit */
3766         ew32(IMC, ~0);
3767         E1000_WRITE_FLUSH();
3768
3769         if (likely(napi_schedule_prep(&adapter->napi))) {
3770                 adapter->total_tx_bytes = 0;
3771                 adapter->total_tx_packets = 0;
3772                 adapter->total_rx_bytes = 0;
3773                 adapter->total_rx_packets = 0;
3774                 __napi_schedule(&adapter->napi);
3775         } else {
3776                 /* this really should not happen! if it does it is basically a
3777                  * bug, but not a hard error, so enable ints and continue */
3778                 if (!test_bit(__E1000_DOWN, &adapter->flags))
3779                         e1000_irq_enable(adapter);
3780         }
3781
3782         return IRQ_HANDLED;
3783 }
3784
3785 /**
3786  * e1000_clean - NAPI Rx polling callback
3787  * @adapter: board private structure
3788  **/
3789 static int e1000_clean(struct napi_struct *napi, int budget)
3790 {
3791         struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
3792         int tx_clean_complete = 0, work_done = 0;
3793
3794         tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
3795
3796         adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget);
3797
3798         if (!tx_clean_complete)
3799                 work_done = budget;
3800
3801         /* If budget not fully consumed, exit the polling mode */
3802         if (work_done < budget) {
3803                 if (likely(adapter->itr_setting & 3))
3804                         e1000_set_itr(adapter);
3805                 napi_complete(napi);
3806                 if (!test_bit(__E1000_DOWN, &adapter->flags))
3807                         e1000_irq_enable(adapter);
3808         }
3809
3810         return work_done;
3811 }
3812
3813 /**
3814  * e1000_clean_tx_irq - Reclaim resources after transmit completes
3815  * @adapter: board private structure
3816  **/
3817 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3818                                struct e1000_tx_ring *tx_ring)
3819 {
3820         struct e1000_hw *hw = &adapter->hw;
3821         struct net_device *netdev = adapter->netdev;
3822         struct e1000_tx_desc *tx_desc, *eop_desc;
3823         struct e1000_buffer *buffer_info;
3824         unsigned int i, eop;
3825         unsigned int count = 0;
3826         unsigned int total_tx_bytes=0, total_tx_packets=0;
3827
3828         i = tx_ring->next_to_clean;
3829         eop = tx_ring->buffer_info[i].next_to_watch;
3830         eop_desc = E1000_TX_DESC(*tx_ring, eop);
3831
3832         while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
3833                (count < tx_ring->count)) {
3834                 bool cleaned = false;
3835                 rmb();  /* read buffer_info after eop_desc */
3836                 for ( ; !cleaned; count++) {
3837                         tx_desc = E1000_TX_DESC(*tx_ring, i);
3838                         buffer_info = &tx_ring->buffer_info[i];
3839                         cleaned = (i == eop);
3840
3841                         if (cleaned) {
3842                                 total_tx_packets += buffer_info->segs;
3843                                 total_tx_bytes += buffer_info->bytecount;
3844                         }
3845                         e1000_unmap_and_free_tx_resource(adapter, buffer_info);
3846                         tx_desc->upper.data = 0;
3847
3848                         if (unlikely(++i == tx_ring->count)) i = 0;
3849                 }
3850
3851                 eop = tx_ring->buffer_info[i].next_to_watch;
3852                 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3853         }
3854
3855         tx_ring->next_to_clean = i;
3856
3857 #define TX_WAKE_THRESHOLD 32
3858         if (unlikely(count && netif_carrier_ok(netdev) &&
3859                      E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
3860                 /* Make sure that anybody stopping the queue after this
3861                  * sees the new next_to_clean.
3862                  */
3863                 smp_mb();
3864
3865                 if (netif_queue_stopped(netdev) &&
3866                     !(test_bit(__E1000_DOWN, &adapter->flags))) {
3867                         netif_wake_queue(netdev);
3868                         ++adapter->restart_queue;
3869                 }
3870         }
3871
3872         if (adapter->detect_tx_hung) {
3873                 /* Detect a transmit hang in hardware, this serializes the
3874                  * check with the clearing of time_stamp and movement of i */
3875                 adapter->detect_tx_hung = false;
3876                 if (tx_ring->buffer_info[eop].time_stamp &&
3877                     time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
3878                                (adapter->tx_timeout_factor * HZ)) &&
3879                     !(er32(STATUS) & E1000_STATUS_TXOFF)) {
3880
3881                         /* detected Tx unit hang */
3882                         e_err(drv, "Detected Tx Unit Hang\n"
3883                               "  Tx Queue             <%lu>\n"
3884                               "  TDH                  <%x>\n"
3885                               "  TDT                  <%x>\n"
3886                               "  next_to_use          <%x>\n"
3887                               "  next_to_clean        <%x>\n"
3888                               "buffer_info[next_to_clean]\n"
3889                               "  time_stamp           <%lx>\n"
3890                               "  next_to_watch        <%x>\n"
3891                               "  jiffies              <%lx>\n"
3892                               "  next_to_watch.status <%x>\n",
3893                                 (unsigned long)((tx_ring - adapter->tx_ring) /
3894                                         sizeof(struct e1000_tx_ring)),
3895                                 readl(hw->hw_addr + tx_ring->tdh),
3896                                 readl(hw->hw_addr + tx_ring->tdt),
3897                                 tx_ring->next_to_use,
3898                                 tx_ring->next_to_clean,
3899                                 tx_ring->buffer_info[eop].time_stamp,
3900                                 eop,
3901                                 jiffies,
3902                                 eop_desc->upper.fields.status);
3903                         e1000_dump(adapter);
3904                         netif_stop_queue(netdev);
3905                 }
3906         }
3907         adapter->total_tx_bytes += total_tx_bytes;
3908         adapter->total_tx_packets += total_tx_packets;
3909         netdev->stats.tx_bytes += total_tx_bytes;
3910         netdev->stats.tx_packets += total_tx_packets;
3911         return count < tx_ring->count;
3912 }
3913
3914 /**
3915  * e1000_rx_checksum - Receive Checksum Offload for 82543
3916  * @adapter:     board private structure
3917  * @status_err:  receive descriptor status and error fields
3918  * @csum:        receive descriptor csum field
3919  * @sk_buff:     socket buffer with received data
3920  **/
3921
3922 static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
3923                               u32 csum, struct sk_buff *skb)
3924 {
3925         struct e1000_hw *hw = &adapter->hw;
3926         u16 status = (u16)status_err;
3927         u8 errors = (u8)(status_err >> 24);
3928
3929         skb_checksum_none_assert(skb);
3930
3931         /* 82543 or newer only */
3932         if (unlikely(hw->mac_type < e1000_82543)) return;
3933         /* Ignore Checksum bit is set */
3934         if (unlikely(status & E1000_RXD_STAT_IXSM)) return;
3935         /* TCP/UDP checksum error bit is set */
3936         if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
3937                 /* let the stack verify checksum errors */
3938                 adapter->hw_csum_err++;
3939                 return;
3940         }
3941         /* TCP/UDP Checksum has not been calculated */
3942         if (!(status & E1000_RXD_STAT_TCPCS))
3943                 return;
3944
3945         /* It must be a TCP or UDP packet with a valid checksum */
3946         if (likely(status & E1000_RXD_STAT_TCPCS)) {
3947                 /* TCP checksum is good */
3948                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3949         }
3950         adapter->hw_csum_good++;
3951 }
3952
3953 /**
3954  * e1000_consume_page - helper function
3955  **/
3956 static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
3957                                u16 length)
3958 {
3959         bi->page = NULL;
3960         skb->len += length;
3961         skb->data_len += length;
3962         skb->truesize += PAGE_SIZE;
3963 }
3964
3965 /**
3966  * e1000_receive_skb - helper function to handle rx indications
3967  * @adapter: board private structure
3968  * @status: descriptor status field as written by hardware
3969  * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
3970  * @skb: pointer to sk_buff to be indicated to stack
3971  */
3972 static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
3973                               __le16 vlan, struct sk_buff *skb)
3974 {
3975         skb->protocol = eth_type_trans(skb, adapter->netdev);
3976
3977         if (status & E1000_RXD_STAT_VP) {
3978                 u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
3979
3980                 __vlan_hwaccel_put_tag(skb, vid);
3981         }
3982         napi_gro_receive(&adapter->napi, skb);
3983 }
3984
3985 /**
3986  * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
3987  * @adapter: board private structure
3988  * @rx_ring: ring to clean
3989  * @work_done: amount of napi work completed this call
3990  * @work_to_do: max amount of work allowed for this call to do
3991  *
3992  * the return value indicates whether actual cleaning was done, there
3993  * is no guarantee that everything was cleaned
3994  */
3995 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
3996                                      struct e1000_rx_ring *rx_ring,
3997                                      int *work_done, int work_to_do)
3998 {
3999         struct e1000_hw *hw = &adapter->hw;
4000         struct net_device *netdev = adapter->netdev;
4001         struct pci_dev *pdev = adapter->pdev;
4002         struct e1000_rx_desc *rx_desc, *next_rxd;
4003         struct e1000_buffer *buffer_info, *next_buffer;
4004         unsigned long irq_flags;
4005         u32 length;
4006         unsigned int i;
4007         int cleaned_count = 0;
4008         bool cleaned = false;
4009         unsigned int total_rx_bytes=0, total_rx_packets=0;
4010
4011         i = rx_ring->next_to_clean;
4012         rx_desc = E1000_RX_DESC(*rx_ring, i);
4013         buffer_info = &rx_ring->buffer_info[i];
4014
4015         while (rx_desc->status & E1000_RXD_STAT_DD) {
4016                 struct sk_buff *skb;
4017                 u8 status;
4018
4019                 if (*work_done >= work_to_do)
4020                         break;
4021                 (*work_done)++;
4022                 rmb(); /* read descriptor and rx_buffer_info after status DD */
4023
4024                 status = rx_desc->status;
4025                 skb = buffer_info->skb;
4026                 buffer_info->skb = NULL;
4027
4028                 if (++i == rx_ring->count) i = 0;
4029                 next_rxd = E1000_RX_DESC(*rx_ring, i);
4030                 prefetch(next_rxd);
4031
4032                 next_buffer = &rx_ring->buffer_info[i];
4033
4034                 cleaned = true;
4035                 cleaned_count++;
4036                 dma_unmap_page(&pdev->dev, buffer_info->dma,
4037                                buffer_info->length, DMA_FROM_DEVICE);
4038                 buffer_info->dma = 0;
4039
4040                 length = le16_to_cpu(rx_desc->length);
4041
4042                 /* errors is only valid for DD + EOP descriptors */
4043                 if (unlikely((status & E1000_RXD_STAT_EOP) &&
4044                     (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
4045                         u8 last_byte = *(skb->data + length - 1);
4046                         if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
4047                                        last_byte)) {
4048                                 spin_lock_irqsave(&adapter->stats_lock,
4049                                                   irq_flags);
4050                                 e1000_tbi_adjust_stats(hw, &adapter->stats,
4051                                                        length, skb->data);
4052                                 spin_unlock_irqrestore(&adapter->stats_lock,
4053                                                        irq_flags);
4054                                 length--;
4055                         } else {
4056                                 /* recycle both page and skb */
4057                                 buffer_info->skb = skb;
4058                                 /* an error means any chain goes out the window
4059                                  * too */
4060                                 if (rx_ring->rx_skb_top)
4061                                         dev_kfree_skb(rx_ring->rx_skb_top);
4062                                 rx_ring->rx_skb_top = NULL;
4063                                 goto next_desc;
4064                         }
4065                 }
4066
4067 #define rxtop rx_ring->rx_skb_top
4068                 if (!(status & E1000_RXD_STAT_EOP)) {
4069                         /* this descriptor is only the beginning (or middle) */
4070                         if (!rxtop) {
4071                                 /* this is the beginning of a chain */
4072                                 rxtop = skb;
4073                                 skb_fill_page_desc(rxtop, 0, buffer_info->page,
4074                                                    0, length);
4075                         } else {
4076                                 /* this is the middle of a chain */
4077                                 skb_fill_page_desc(rxtop,
4078                                     skb_shinfo(rxtop)->nr_frags,
4079                                     buffer_info->page, 0, length);
4080                                 /* re-use the skb, only consumed the page */
4081                                 buffer_info->skb = skb;
4082                         }
4083                         e1000_consume_page(buffer_info, rxtop, length);
4084                         goto next_desc;
4085                 } else {
4086                         if (rxtop) {
4087                                 /* end of the chain */
4088                                 skb_fill_page_desc(rxtop,
4089                                     skb_shinfo(rxtop)->nr_frags,
4090                                     buffer_info->page, 0, length);
4091                                 /* re-use the current skb, we only consumed the
4092                                  * page */
4093                                 buffer_info->skb = skb;
4094                                 skb = rxtop;
4095                                 rxtop = NULL;
4096                                 e1000_consume_page(buffer_info, skb, length);
4097                         } else {
4098                                 /* no chain, got EOP, this buf is the packet
4099                                  * copybreak to save the put_page/alloc_page */
4100                                 if (length <= copybreak &&
4101                                     skb_tailroom(skb) >= length) {
4102                                         u8 *vaddr;
4103                                         vaddr = kmap_atomic(buffer_info->page,
4104                                                             KM_SKB_DATA_SOFTIRQ);
4105                                         memcpy(skb_tail_pointer(skb), vaddr, length);
4106                                         kunmap_atomic(vaddr,
4107                                                       KM_SKB_DATA_SOFTIRQ);
4108                                         /* re-use the page, so don't erase
4109                                          * buffer_info->page */
4110                                         skb_put(skb, length);
4111                                 } else {
4112                                         skb_fill_page_desc(skb, 0,
4113                                                            buffer_info->page, 0,
4114                                                            length);
4115                                         e1000_consume_page(buffer_info, skb,
4116                                                            length);
4117                                 }
4118                         }
4119                 }
4120
4121                 /* Receive Checksum Offload XXX recompute due to CRC strip? */
4122                 e1000_rx_checksum(adapter,
4123                                   (u32)(status) |
4124                                   ((u32)(rx_desc->errors) << 24),
4125                                   le16_to_cpu(rx_desc->csum), skb);
4126
4127                 pskb_trim(skb, skb->len - 4);
4128
4129                 /* probably a little skewed due to removing CRC */
4130                 total_rx_bytes += skb->len;
4131                 total_rx_packets++;
4132
4133                 /* eth type trans needs skb->data to point to something */
4134                 if (!pskb_may_pull(skb, ETH_HLEN)) {
4135                         e_err(drv, "pskb_may_pull failed.\n");
4136                         dev_kfree_skb(skb);
4137                         goto next_desc;
4138                 }
4139
4140                 e1000_receive_skb(adapter, status, rx_desc->special, skb);
4141
4142 next_desc:
4143                 rx_desc->status = 0;
4144
4145                 /* return some buffers to hardware, one at a time is too slow */
4146                 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4147                         adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4148                         cleaned_count = 0;
4149                 }
4150
4151                 /* use prefetched values */
4152                 rx_desc = next_rxd;
4153                 buffer_info = next_buffer;
4154         }
4155         rx_ring->next_to_clean = i;
4156
4157         cleaned_count = E1000_DESC_UNUSED(rx_ring);
4158         if (cleaned_count)
4159                 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4160
4161         adapter->total_rx_packets += total_rx_packets;
4162         adapter->total_rx_bytes += total_rx_bytes;
4163         netdev->stats.rx_bytes += total_rx_bytes;
4164         netdev->stats.rx_packets += total_rx_packets;
4165         return cleaned;
4166 }
4167
4168 /*
4169  * this should improve performance for small packets with large amounts
4170  * of reassembly being done in the stack
4171  */
4172 static void e1000_check_copybreak(struct net_device *netdev,
4173                                  struct e1000_buffer *buffer_info,
4174                                  u32 length, struct sk_buff **skb)
4175 {
4176         struct sk_buff *new_skb;
4177
4178         if (length > copybreak)
4179                 return;
4180
4181         new_skb = netdev_alloc_skb_ip_align(netdev, length);
4182         if (!new_skb)
4183                 return;
4184
4185         skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN,
4186                                        (*skb)->data - NET_IP_ALIGN,
4187                                        length + NET_IP_ALIGN);
4188         /* save the skb in buffer_info as good */
4189         buffer_info->skb = *skb;
4190         *skb = new_skb;
4191 }
4192
4193 /**
4194  * e1000_clean_rx_irq - Send received data up the network stack; legacy
4195  * @adapter: board private structure
4196  * @rx_ring: ring to clean
4197  * @work_done: amount of napi work completed this call
4198  * @work_to_do: max amount of work allowed for this call to do
4199  */
4200 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
4201                                struct e1000_rx_ring *rx_ring,
4202                                int *work_done, int work_to_do)
4203 {
4204         struct e1000_hw *hw = &adapter->hw;
4205         struct net_device *netdev = adapter->netdev;
4206         struct pci_dev *pdev = adapter->pdev;
4207         struct e1000_rx_desc *rx_desc, *next_rxd;
4208         struct e1000_buffer *buffer_info, *next_buffer;
4209         unsigned long flags;
4210         u32 length;
4211         unsigned int i;
4212         int cleaned_count = 0;
4213         bool cleaned = false;
4214         unsigned int total_rx_bytes=0, total_rx_packets=0;
4215
4216         i = rx_ring->next_to_clean;
4217         rx_desc = E1000_RX_DESC(*rx_ring, i);
4218         buffer_info = &rx_ring->buffer_info[i];
4219
4220         while (rx_desc->status & E1000_RXD_STAT_DD) {
4221                 struct sk_buff *skb;
4222                 u8 status;
4223
4224                 if (*work_done >= work_to_do)
4225                         break;
4226                 (*work_done)++;
4227                 rmb(); /* read descriptor and rx_buffer_info after status DD */
4228
4229                 status = rx_desc->status;
4230                 skb = buffer_info->skb;
4231                 buffer_info->skb = NULL;
4232
4233                 prefetch(skb->data - NET_IP_ALIGN);
4234
4235                 if (++i == rx_ring->count) i = 0;
4236                 next_rxd = E1000_RX_DESC(*rx_ring, i);
4237                 prefetch(next_rxd);
4238
4239                 next_buffer = &rx_ring->buffer_info[i];
4240
4241                 cleaned = true;
4242                 cleaned_count++;
4243                 dma_unmap_single(&pdev->dev, buffer_info->dma,
4244                                  buffer_info->length, DMA_FROM_DEVICE);
4245                 buffer_info->dma = 0;
4246
4247                 length = le16_to_cpu(rx_desc->length);
4248                 /* !EOP means multiple descriptors were used to store a single
4249                  * packet, if thats the case we need to toss it.  In fact, we
4250                  * to toss every packet with the EOP bit clear and the next
4251                  * frame that _does_ have the EOP bit set, as it is by
4252                  * definition only a frame fragment
4253                  */
4254                 if (unlikely(!(status & E1000_RXD_STAT_EOP)))
4255                         adapter->discarding = true;
4256
4257                 if (adapter->discarding) {
4258                         /* All receives must fit into a single buffer */
4259                         e_dbg("Receive packet consumed multiple buffers\n");
4260                         /* recycle */
4261                         buffer_info->skb = skb;
4262                         if (status & E1000_RXD_STAT_EOP)
4263                                 adapter->discarding = false;
4264                         goto next_desc;
4265                 }
4266
4267                 if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
4268                         u8 last_byte = *(skb->data + length - 1);
4269                         if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
4270                                        last_byte)) {
4271                                 spin_lock_irqsave(&adapter->stats_lock, flags);
4272                                 e1000_tbi_adjust_stats(hw, &adapter->stats,
4273                                                        length, skb->data);
4274                                 spin_unlock_irqrestore(&adapter->stats_lock,
4275                                                        flags);
4276                                 length--;
4277                         } else {
4278                                 /* recycle */
4279                                 buffer_info->skb = skb;
4280                                 goto next_desc;
4281                         }
4282                 }
4283
4284                 /* adjust length to remove Ethernet CRC, this must be
4285                  * done after the TBI_ACCEPT workaround above */
4286                 length -= 4;
4287
4288                 /* probably a little skewed due to removing CRC */
4289                 total_rx_bytes += length;
4290                 total_rx_packets++;
4291
4292                 e1000_check_copybreak(netdev, buffer_info, length, &skb);
4293
4294                 skb_put(skb, length);
4295
4296                 /* Receive Checksum Offload */
4297                 e1000_rx_checksum(adapter,
4298                                   (u32)(status) |
4299                                   ((u32)(rx_desc->errors) << 24),
4300                                   le16_to_cpu(rx_desc->csum), skb);
4301
4302                 e1000_receive_skb(adapter, status, rx_desc->special, skb);
4303
4304 next_desc:
4305                 rx_desc->status = 0;
4306
4307                 /* return some buffers to hardware, one at a time is too slow */
4308                 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4309                         adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4310                         cleaned_count = 0;
4311                 }
4312
4313                 /* use prefetched values */
4314                 rx_desc = next_rxd;
4315                 buffer_info = next_buffer;
4316         }
4317         rx_ring->next_to_clean = i;
4318
4319         cleaned_count = E1000_DESC_UNUSED(rx_ring);
4320         if (cleaned_count)
4321                 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4322
4323         adapter->total_rx_packets += total_rx_packets;
4324         adapter->total_rx_bytes += total_rx_bytes;
4325         netdev->stats.rx_bytes += total_rx_bytes;
4326         netdev->stats.rx_packets += total_rx_packets;
4327         return cleaned;
4328 }
4329
4330 /**
4331  * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
4332  * @adapter: address of board private structure
4333  * @rx_ring: pointer to receive ring structure
4334  * @cleaned_count: number of buffers to allocate this pass
4335  **/
4336
4337 static void
4338 e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
4339                              struct e1000_rx_ring *rx_ring, int cleaned_count)
4340 {
4341         struct net_device *netdev = adapter->netdev;
4342         struct pci_dev *pdev = adapter->pdev;
4343         struct e1000_rx_desc *rx_desc;
4344         struct e1000_buffer *buffer_info;
4345         struct sk_buff *skb;
4346         unsigned int i;
4347         unsigned int bufsz = 256 - 16 /*for skb_reserve */ ;
4348
4349         i = rx_ring->next_to_use;
4350         buffer_info = &rx_ring->buffer_info[i];
4351
4352         while (cleaned_count--) {
4353                 skb = buffer_info->skb;
4354                 if (skb) {
4355                         skb_trim(skb, 0);
4356                         goto check_page;
4357                 }
4358
4359                 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4360                 if (unlikely(!skb)) {
4361                         /* Better luck next round */
4362                         adapter->alloc_rx_buff_failed++;
4363                         break;
4364                 }
4365
4366                 /* Fix for errata 23, can't cross 64kB boundary */
4367                 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4368                         struct sk_buff *oldskb = skb;
4369                         e_err(rx_err, "skb align check failed: %u bytes at "
4370                               "%p\n", bufsz, skb->data);
4371                         /* Try again, without freeing the previous */
4372                         skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4373                         /* Failed allocation, critical failure */
4374                         if (!skb) {
4375                                 dev_kfree_skb(oldskb);
4376                                 adapter->alloc_rx_buff_failed++;
4377                                 break;
4378                         }
4379
4380                         if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4381                                 /* give up */
4382                                 dev_kfree_skb(skb);
4383                                 dev_kfree_skb(oldskb);
4384                                 break; /* while (cleaned_count--) */
4385                         }
4386
4387                         /* Use new allocation */
4388                         dev_kfree_skb(oldskb);
4389                 }
4390                 buffer_info->skb = skb;
4391                 buffer_info->length = adapter->rx_buffer_len;
4392 check_page:
4393                 /* allocate a new page if necessary */
4394                 if (!buffer_info->page) {
4395                         buffer_info->page = alloc_page(GFP_ATOMIC);
4396                         if (unlikely(!buffer_info->page)) {
4397                                 adapter->alloc_rx_buff_failed++;
4398                                 break;
4399                         }
4400                 }
4401
4402                 if (!buffer_info->dma) {
4403                         buffer_info->dma = dma_map_page(&pdev->dev,
4404                                                         buffer_info->page, 0,
4405                                                         buffer_info->length,
4406                                                         DMA_FROM_DEVICE);
4407                         if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4408                                 put_page(buffer_info->page);
4409                                 dev_kfree_skb(skb);
4410                                 buffer_info->page = NULL;
4411                                 buffer_info->skb = NULL;
4412                                 buffer_info->dma = 0;
4413                                 adapter->alloc_rx_buff_failed++;
4414                                 break; /* while !buffer_info->skb */
4415                         }
4416                 }
4417
4418                 rx_desc = E1000_RX_DESC(*rx_ring, i);
4419                 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4420
4421                 if (unlikely(++i == rx_ring->count))
4422                         i = 0;
4423                 buffer_info = &rx_ring->buffer_info[i];
4424         }
4425
4426         if (likely(rx_ring->next_to_use != i)) {
4427                 rx_ring->next_to_use = i;
4428                 if (unlikely(i-- == 0))
4429                         i = (rx_ring->count - 1);
4430
4431                 /* Force memory writes to complete before letting h/w
4432                  * know there are new descriptors to fetch.  (Only
4433                  * applicable for weak-ordered memory model archs,
4434                  * such as IA-64). */
4435                 wmb();
4436                 writel(i, adapter->hw.hw_addr + rx_ring->rdt);
4437         }
4438 }
4439
4440 /**
4441  * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
4442  * @adapter: address of board private structure
4443  **/
4444
4445 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4446                                    struct e1000_rx_ring *rx_ring,
4447                                    int cleaned_count)
4448 {
4449         struct e1000_hw *hw = &adapter->hw;
4450         struct net_device *netdev = adapter->netdev;
4451         struct pci_dev *pdev = adapter->pdev;
4452         struct e1000_rx_desc *rx_desc;
4453         struct e1000_buffer *buffer_info;
4454         struct sk_buff *skb;
4455         unsigned int i;
4456         unsigned int bufsz = adapter->rx_buffer_len;
4457
4458         i = rx_ring->next_to_use;
4459         buffer_info = &rx_ring->buffer_info[i];
4460
4461         while (cleaned_count--) {
4462                 skb = buffer_info->skb;
4463                 if (skb) {
4464                         skb_trim(skb, 0);
4465                         goto map_skb;
4466                 }
4467
4468                 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4469                 if (unlikely(!skb)) {
4470                         /* Better luck next round */
4471                         adapter->alloc_rx_buff_failed++;
4472                         break;
4473                 }
4474
4475                 /* Fix for errata 23, can't cross 64kB boundary */
4476                 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4477                         struct sk_buff *oldskb = skb;
4478                         e_err(rx_err, "skb align check failed: %u bytes at "
4479                               "%p\n", bufsz, skb->data);
4480                         /* Try again, without freeing the previous */
4481                         skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4482                         /* Failed allocation, critical failure */
4483                         if (!skb) {
4484                                 dev_kfree_skb(oldskb);
4485                                 adapter->alloc_rx_buff_failed++;
4486                                 break;
4487                         }
4488
4489                         if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4490                                 /* give up */
4491                                 dev_kfree_skb(skb);
4492                                 dev_kfree_skb(oldskb);
4493                                 adapter->alloc_rx_buff_failed++;
4494                                 break; /* while !buffer_info->skb */
4495                         }
4496
4497                         /* Use new allocation */
4498                         dev_kfree_skb(oldskb);
4499                 }
4500                 buffer_info->skb = skb;
4501                 buffer_info->length = adapter->rx_buffer_len;
4502 map_skb:
4503                 buffer_info->dma = dma_map_single(&pdev->dev,
4504                                                   skb->data,
4505                                                   buffer_info->length,
4506                                                   DMA_FROM_DEVICE);
4507                 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4508                         dev_kfree_skb(skb);
4509                         buffer_info->skb = NULL;
4510                         buffer_info->dma = 0;
4511                         adapter->alloc_rx_buff_failed++;
4512                         break; /* while !buffer_info->skb */
4513                 }
4514
4515                 /*
4516                  * XXX if it was allocated cleanly it will never map to a
4517                  * boundary crossing
4518                  */
4519
4520                 /* Fix for errata 23, can't cross 64kB boundary */
4521                 if (!e1000_check_64k_bound(adapter,
4522                                         (void *)(unsigned long)buffer_info->dma,
4523                                         adapter->rx_buffer_len)) {
4524                         e_err(rx_err, "dma align check failed: %u bytes at "
4525                               "%p\n", adapter->rx_buffer_len,
4526                               (void *)(unsigned long)buffer_info->dma);
4527                         dev_kfree_skb(skb);
4528                         buffer_info->skb = NULL;
4529
4530                         dma_unmap_single(&pdev->dev, buffer_info->dma,
4531                                          adapter->rx_buffer_len,
4532                                          DMA_FROM_DEVICE);
4533                         buffer_info->dma = 0;
4534
4535                         adapter->alloc_rx_buff_failed++;
4536                         break; /* while !buffer_info->skb */
4537                 }
4538                 rx_desc = E1000_RX_DESC(*rx_ring, i);
4539                 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4540
4541                 if (unlikely(++i == rx_ring->count))
4542                         i = 0;
4543                 buffer_info = &rx_ring->buffer_info[i];
4544         }
4545
4546         if (likely(rx_ring->next_to_use != i)) {
4547                 rx_ring->next_to_use = i;
4548                 if (unlikely(i-- == 0))
4549                         i = (rx_ring->count - 1);
4550
4551                 /* Force memory writes to complete before letting h/w
4552                  * know there are new descriptors to fetch.  (Only
4553                  * applicable for weak-ordered memory model archs,
4554                  * such as IA-64). */
4555                 wmb();
4556                 writel(i, hw->hw_addr + rx_ring->rdt);
4557         }
4558 }
4559
4560 /**
4561  * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
4562  * @adapter:
4563  **/
4564
4565 static void e1000_smartspeed(struct e1000_adapter *adapter)
4566 {
4567         struct e1000_hw *hw = &adapter->hw;
4568         u16 phy_status;
4569         u16 phy_ctrl;
4570
4571         if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg ||
4572            !(hw->autoneg_advertised & ADVERTISE_1000_FULL))
4573                 return;
4574
4575         if (adapter->smartspeed == 0) {
4576                 /* If Master/Slave config fault is asserted twice,
4577                  * we assume back-to-back */
4578                 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4579                 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
4580                 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4581                 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
4582                 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4583                 if (phy_ctrl & CR_1000T_MS_ENABLE) {
4584                         phy_ctrl &= ~CR_1000T_MS_ENABLE;
4585                         e1000_write_phy_reg(hw, PHY_1000T_CTRL,
4586                                             phy_ctrl);
4587                         adapter->smartspeed++;
4588                         if (!e1000_phy_setup_autoneg(hw) &&
4589                            !e1000_read_phy_reg(hw, PHY_CTRL,
4590                                                &phy_ctrl)) {
4591                                 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4592                                              MII_CR_RESTART_AUTO_NEG);
4593                                 e1000_write_phy_reg(hw, PHY_CTRL,
4594                                                     phy_ctrl);
4595                         }
4596                 }
4597                 return;
4598         } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
4599                 /* If still no link, perhaps using 2/3 pair cable */
4600                 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4601                 phy_ctrl |= CR_1000T_MS_ENABLE;
4602                 e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
4603                 if (!e1000_phy_setup_autoneg(hw) &&
4604                    !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) {
4605                         phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4606                                      MII_CR_RESTART_AUTO_NEG);
4607                         e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl);
4608                 }
4609         }
4610         /* Restart process after E1000_SMARTSPEED_MAX iterations */
4611         if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
4612                 adapter->smartspeed = 0;
4613 }
4614
4615 /**
4616  * e1000_ioctl -
4617  * @netdev:
4618  * @ifreq:
4619  * @cmd:
4620  **/
4621
4622 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4623 {
4624         switch (cmd) {
4625         case SIOCGMIIPHY:
4626         case SIOCGMIIREG:
4627         case SIOCSMIIREG:
4628                 return e1000_mii_ioctl(netdev, ifr, cmd);
4629         default:
4630                 return -EOPNOTSUPP;
4631         }
4632 }
4633
4634 /**
4635  * e1000_mii_ioctl -
4636  * @netdev:
4637  * @ifreq:
4638  * @cmd:
4639  **/
4640
4641 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
4642                            int cmd)
4643 {
4644         struct e1000_adapter *adapter = netdev_priv(netdev);
4645         struct e1000_hw *hw = &adapter->hw;
4646         struct mii_ioctl_data *data = if_mii(ifr);
4647         int retval;
4648         u16 mii_reg;
4649         unsigned long flags;
4650
4651         if (hw->media_type != e1000_media_type_copper)
4652                 return -EOPNOTSUPP;
4653
4654         switch (cmd) {
4655         case SIOCGMIIPHY:
4656                 data->phy_id = hw->phy_addr;
4657                 break;
4658         case SIOCGMIIREG:
4659                 spin_lock_irqsave(&adapter->stats_lock, flags);
4660                 if (e1000_read_phy_reg(hw, data->reg_num & 0x1F,
4661                                    &data->val_out)) {
4662                         spin_unlock_irqrestore(&adapter->stats_lock, flags);
4663                         return -EIO;
4664                 }
4665                 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4666                 break;
4667         case SIOCSMIIREG:
4668                 if (data->reg_num & ~(0x1F))
4669                         return -EFAULT;
4670                 mii_reg = data->val_in;
4671                 spin_lock_irqsave(&adapter->stats_lock, flags);
4672                 if (e1000_write_phy_reg(hw, data->reg_num,
4673                                         mii_reg)) {
4674                         spin_unlock_irqrestore(&adapter->stats_lock, flags);
4675                         return -EIO;
4676                 }
4677                 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4678                 if (hw->media_type == e1000_media_type_copper) {
4679                         switch (data->reg_num) {
4680                         case PHY_CTRL:
4681                                 if (mii_reg & MII_CR_POWER_DOWN)
4682                                         break;
4683                                 if (mii_reg & MII_CR_AUTO_NEG_EN) {
4684                                         hw->autoneg = 1;
4685                                         hw->autoneg_advertised = 0x2F;
4686                                 } else {
4687                                         u32 speed;
4688                                         if (mii_reg & 0x40)
4689                                                 speed = SPEED_1000;
4690                                         else if (mii_reg & 0x2000)
4691                                                 speed = SPEED_100;
4692                                         else
4693                                                 speed = SPEED_10;
4694                                         retval = e1000_set_spd_dplx(
4695                                                 adapter, speed,
4696                                                 ((mii_reg & 0x100)
4697                                                  ? DUPLEX_FULL :
4698                                                  DUPLEX_HALF));
4699                                         if (retval)
4700                                                 return retval;
4701                                 }
4702                                 if (netif_running(adapter->netdev))
4703                                         e1000_reinit_locked(adapter);
4704                                 else
4705                                         e1000_reset(adapter);
4706                                 break;
4707                         case M88E1000_PHY_SPEC_CTRL:
4708                         case M88E1000_EXT_PHY_SPEC_CTRL:
4709                                 if (e1000_phy_reset(hw))
4710                                         return -EIO;
4711                                 break;
4712                         }
4713                 } else {
4714                         switch (data->reg_num) {
4715                         case PHY_CTRL:
4716                                 if (mii_reg & MII_CR_POWER_DOWN)
4717                                         break;
4718                                 if (netif_running(adapter->netdev))
4719                                         e1000_reinit_locked(adapter);
4720                                 else
4721                                         e1000_reset(adapter);
4722                                 break;
4723                         }
4724                 }
4725                 break;
4726         default:
4727                 return -EOPNOTSUPP;
4728         }
4729         return E1000_SUCCESS;
4730 }
4731
4732 void e1000_pci_set_mwi(struct e1000_hw *hw)
4733 {
4734         struct e1000_adapter *adapter = hw->back;
4735         int ret_val = pci_set_mwi(adapter->pdev);
4736
4737         if (ret_val)
4738                 e_err(probe, "Error in setting MWI\n");
4739 }
4740
4741 void e1000_pci_clear_mwi(struct e1000_hw *hw)
4742 {
4743         struct e1000_adapter *adapter = hw->back;
4744
4745         pci_clear_mwi(adapter->pdev);
4746 }
4747
4748 int e1000_pcix_get_mmrbc(struct e1000_hw *hw)
4749 {
4750         struct e1000_adapter *adapter = hw->back;
4751         return pcix_get_mmrbc(adapter->pdev);
4752 }
4753
4754 void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
4755 {
4756         struct e1000_adapter *adapter = hw->back;
4757         pcix_set_mmrbc(adapter->pdev, mmrbc);
4758 }
4759
4760 void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
4761 {
4762         outl(value, port);
4763 }
4764
4765 static bool e1000_vlan_used(struct e1000_adapter *adapter)
4766 {
4767         u16 vid;
4768
4769         for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4770                 return true;
4771         return false;
4772 }
4773
4774 static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
4775                                      bool filter_on)
4776 {
4777         struct e1000_hw *hw = &adapter->hw;
4778         u32 rctl;
4779
4780         if (!test_bit(__E1000_DOWN, &adapter->flags))
4781                 e1000_irq_disable(adapter);
4782
4783         if (filter_on) {
4784                 /* enable VLAN receive filtering */
4785                 rctl = er32(RCTL);
4786                 rctl &= ~E1000_RCTL_CFIEN;
4787                 if (!(adapter->netdev->flags & IFF_PROMISC))
4788                         rctl |= E1000_RCTL_VFE;
4789                 ew32(RCTL, rctl);
4790                 e1000_update_mng_vlan(adapter);
4791         } else {
4792                 /* disable VLAN receive filtering */
4793                 rctl = er32(RCTL);
4794                 rctl &= ~E1000_RCTL_VFE;
4795                 ew32(RCTL, rctl);
4796         }
4797
4798         if (!test_bit(__E1000_DOWN, &adapter->flags))
4799                 e1000_irq_enable(adapter);
4800 }
4801
4802 static void e1000_vlan_mode(struct net_device *netdev,
4803         netdev_features_t features)
4804 {
4805         struct e1000_adapter *adapter = netdev_priv(netdev);
4806         struct e1000_hw *hw = &adapter->hw;
4807         u32 ctrl;
4808
4809         if (!test_bit(__E1000_DOWN, &adapter->flags))
4810                 e1000_irq_disable(adapter);
4811
4812         ctrl = er32(CTRL);
4813         if (features & NETIF_F_HW_VLAN_RX) {
4814                 /* enable VLAN tag insert/strip */
4815                 ctrl |= E1000_CTRL_VME;
4816         } else {
4817                 /* disable VLAN tag insert/strip */
4818                 ctrl &= ~E1000_CTRL_VME;
4819         }
4820         ew32(CTRL, ctrl);
4821
4822         if (!test_bit(__E1000_DOWN, &adapter->flags))
4823                 e1000_irq_enable(adapter);
4824 }
4825
4826 static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
4827 {
4828         struct e1000_adapter *adapter = netdev_priv(netdev);
4829         struct e1000_hw *hw = &adapter->hw;
4830         u32 vfta, index;
4831
4832         if ((hw->mng_cookie.status &
4833              E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4834             (vid == adapter->mng_vlan_id))
4835                 return 0;
4836
4837         if (!e1000_vlan_used(adapter))
4838                 e1000_vlan_filter_on_off(adapter, true);
4839
4840         /* add VID to filter table */
4841         index = (vid >> 5) & 0x7F;
4842         vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4843         vfta |= (1 << (vid & 0x1F));
4844         e1000_write_vfta(hw, index, vfta);
4845
4846         set_bit(vid, adapter->active_vlans);
4847
4848         return 0;
4849 }
4850
4851 static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
4852 {
4853         struct e1000_adapter *adapter = netdev_priv(netdev);
4854         struct e1000_hw *hw = &adapter->hw;
4855         u32 vfta, index;
4856
4857         if (!test_bit(__E1000_DOWN, &adapter->flags))
4858                 e1000_irq_disable(adapter);
4859         if (!test_bit(__E1000_DOWN, &adapter->flags))
4860                 e1000_irq_enable(adapter);
4861
4862         /* remove VID from filter table */
4863         index = (vid >> 5) & 0x7F;
4864         vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4865         vfta &= ~(1 << (vid & 0x1F));
4866         e1000_write_vfta(hw, index, vfta);
4867
4868         clear_bit(vid, adapter->active_vlans);
4869
4870         if (!e1000_vlan_used(adapter))
4871                 e1000_vlan_filter_on_off(adapter, false);
4872
4873         return 0;
4874 }
4875
4876 static void e1000_restore_vlan(struct e1000_adapter *adapter)
4877 {
4878         u16 vid;
4879
4880         if (!e1000_vlan_used(adapter))
4881                 return;
4882
4883         e1000_vlan_filter_on_off(adapter, true);
4884         for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4885                 e1000_vlan_rx_add_vid(adapter->netdev, vid);
4886 }
4887
4888 int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
4889 {
4890         struct e1000_hw *hw = &adapter->hw;
4891
4892         hw->autoneg = 0;
4893
4894         /* Make sure dplx is at most 1 bit and lsb of speed is not set
4895          * for the switch() below to work */
4896         if ((spd & 1) || (dplx & ~1))
4897                 goto err_inval;
4898
4899         /* Fiber NICs only allow 1000 gbps Full duplex */
4900         if ((hw->media_type == e1000_media_type_fiber) &&
4901             spd != SPEED_1000 &&
4902             dplx != DUPLEX_FULL)
4903                 goto err_inval;
4904
4905         switch (spd + dplx) {
4906         case SPEED_10 + DUPLEX_HALF:
4907                 hw->forced_speed_duplex = e1000_10_half;
4908                 break;
4909         case SPEED_10 + DUPLEX_FULL:
4910                 hw->forced_speed_duplex = e1000_10_full;
4911                 break;
4912         case SPEED_100 + DUPLEX_HALF:
4913                 hw->forced_speed_duplex = e1000_100_half;
4914                 break;
4915         case SPEED_100 + DUPLEX_FULL:
4916                 hw->forced_speed_duplex = e1000_100_full;
4917                 break;
4918         case SPEED_1000 + DUPLEX_FULL:
4919                 hw->autoneg = 1;
4920                 hw->autoneg_advertised = ADVERTISE_1000_FULL;
4921                 break;
4922         case SPEED_1000 + DUPLEX_HALF: /* not supported */
4923         default:
4924                 goto err_inval;
4925         }
4926         return 0;
4927
4928 err_inval:
4929         e_err(probe, "Unsupported Speed/Duplex configuration\n");
4930         return -EINVAL;
4931 }
4932
4933 static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
4934 {
4935         struct net_device *netdev = pci_get_drvdata(pdev);
4936         struct e1000_adapter *adapter = netdev_priv(netdev);
4937         struct e1000_hw *hw = &adapter->hw;
4938         u32 ctrl, ctrl_ext, rctl, status;
4939         u32 wufc = adapter->wol;
4940 #ifdef CONFIG_PM
4941         int retval = 0;
4942 #endif
4943
4944         netif_device_detach(netdev);
4945
4946         if (netif_running(netdev)) {
4947                 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
4948                 e1000_down(adapter);
4949         }
4950
4951 #ifdef CONFIG_PM
4952         retval = pci_save_state(pdev);
4953         if (retval)
4954                 return retval;
4955 #endif
4956
4957         status = er32(STATUS);
4958         if (status & E1000_STATUS_LU)
4959                 wufc &= ~E1000_WUFC_LNKC;
4960
4961         if (wufc) {
4962                 e1000_setup_rctl(adapter);
4963                 e1000_set_rx_mode(netdev);
4964
4965                 rctl = er32(RCTL);
4966
4967                 /* turn on all-multi mode if wake on multicast is enabled */
4968                 if (wufc & E1000_WUFC_MC)
4969                         rctl |= E1000_RCTL_MPE;
4970
4971                 /* enable receives in the hardware */
4972                 ew32(RCTL, rctl | E1000_RCTL_EN);
4973
4974                 if (hw->mac_type >= e1000_82540) {
4975                         ctrl = er32(CTRL);
4976                         /* advertise wake from D3Cold */
4977                         #define E1000_CTRL_ADVD3WUC 0x00100000
4978                         /* phy power management enable */
4979                         #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
4980                         ctrl |= E1000_CTRL_ADVD3WUC |
4981                                 E1000_CTRL_EN_PHY_PWR_MGMT;
4982                         ew32(CTRL, ctrl);
4983                 }
4984
4985                 if (hw->media_type == e1000_media_type_fiber ||
4986                     hw->media_type == e1000_media_type_internal_serdes) {
4987                         /* keep the laser running in D3 */
4988                         ctrl_ext = er32(CTRL_EXT);
4989                         ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
4990                         ew32(CTRL_EXT, ctrl_ext);
4991                 }
4992
4993                 ew32(WUC, E1000_WUC_PME_EN);
4994                 ew32(WUFC, wufc);
4995         } else {
4996                 ew32(WUC, 0);
4997                 ew32(WUFC, 0);
4998         }
4999
5000         e1000_release_manageability(adapter);
5001
5002         *enable_wake = !!wufc;
5003
5004         /* make sure adapter isn't asleep if manageability is enabled */
5005         if (adapter->en_mng_pt)
5006                 *enable_wake = true;
5007
5008         if (netif_running(netdev))
5009                 e1000_free_irq(adapter);
5010
5011         pci_disable_device(pdev);
5012
5013         return 0;
5014 }
5015
5016 #ifdef CONFIG_PM
5017 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
5018 {
5019         int retval;
5020         bool wake;
5021
5022         retval = __e1000_shutdown(pdev, &wake);
5023         if (retval)
5024                 return retval;
5025
5026         if (wake) {
5027                 pci_prepare_to_sleep(pdev);
5028         } else {
5029                 pci_wake_from_d3(pdev, false);
5030                 pci_set_power_state(pdev, PCI_D3hot);
5031         }
5032
5033         return 0;
5034 }
5035
5036 static int e1000_resume(struct pci_dev *pdev)
5037 {
5038         struct net_device *netdev = pci_get_drvdata(pdev);
5039         struct e1000_adapter *adapter = netdev_priv(netdev);
5040         struct e1000_hw *hw = &adapter->hw;
5041         u32 err;
5042
5043         pci_set_power_state(pdev, PCI_D0);
5044         pci_restore_state(pdev);
5045         pci_save_state(pdev);
5046
5047         if (adapter->need_ioport)
5048                 err = pci_enable_device(pdev);
5049         else
5050                 err = pci_enable_device_mem(pdev);
5051         if (err) {
5052                 pr_err("Cannot enable PCI device from suspend\n");
5053                 return err;
5054         }
5055         pci_set_master(pdev);
5056
5057         pci_enable_wake(pdev, PCI_D3hot, 0);
5058         pci_enable_wake(pdev, PCI_D3cold, 0);
5059
5060         if (netif_running(netdev)) {
5061                 err = e1000_request_irq(adapter);
5062                 if (err)
5063                         return err;
5064         }
5065
5066         e1000_power_up_phy(adapter);
5067         e1000_reset(adapter);
5068         ew32(WUS, ~0);
5069
5070         e1000_init_manageability(adapter);
5071
5072         if (netif_running(netdev))
5073                 e1000_up(adapter);
5074
5075         netif_device_attach(netdev);
5076
5077         return 0;
5078 }
5079 #endif
5080
5081 static void e1000_shutdown(struct pci_dev *pdev)
5082 {
5083         bool wake;
5084
5085         __e1000_shutdown(pdev, &wake);
5086
5087         if (system_state == SYSTEM_POWER_OFF) {
5088                 pci_wake_from_d3(pdev, wake);
5089                 pci_set_power_state(pdev, PCI_D3hot);
5090         }
5091 }
5092
5093 #ifdef CONFIG_NET_POLL_CONTROLLER
5094 /*
5095  * Polling 'interrupt' - used by things like netconsole to send skbs
5096  * without having to re-enable interrupts. It's not called while
5097  * the interrupt routine is executing.
5098  */
5099 static void e1000_netpoll(struct net_device *netdev)
5100 {
5101         struct e1000_adapter *adapter = netdev_priv(netdev);
5102
5103         disable_irq(adapter->pdev->irq);
5104         e1000_intr(adapter->pdev->irq, netdev);
5105         enable_irq(adapter->pdev->irq);
5106 }
5107 #endif
5108
5109 /**
5110  * e1000_io_error_detected - called when PCI error is detected
5111  * @pdev: Pointer to PCI device
5112  * @state: The current pci connection state
5113  *
5114  * This function is called after a PCI bus error affecting
5115  * this device has been detected.
5116  */
5117 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
5118                                                 pci_channel_state_t state)
5119 {
5120         struct net_device *netdev = pci_get_drvdata(pdev);
5121         struct e1000_adapter *adapter = netdev_priv(netdev);
5122
5123         netif_device_detach(netdev);
5124
5125         if (state == pci_channel_io_perm_failure)
5126                 return PCI_ERS_RESULT_DISCONNECT;
5127
5128         if (netif_running(netdev))
5129                 e1000_down(adapter);
5130         pci_disable_device(pdev);
5131
5132         /* Request a slot slot reset. */
5133         return PCI_ERS_RESULT_NEED_RESET;
5134 }
5135
5136 /**
5137  * e1000_io_slot_reset - called after the pci bus has been reset.
5138  * @pdev: Pointer to PCI device
5139  *
5140  * Restart the card from scratch, as if from a cold-boot. Implementation
5141  * resembles the first-half of the e1000_resume routine.
5142  */
5143 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
5144 {
5145         struct net_device *netdev = pci_get_drvdata(pdev);
5146         struct e1000_adapter *adapter = netdev_priv(netdev);
5147         struct e1000_hw *hw = &adapter->hw;
5148         int err;
5149
5150         if (adapter->need_ioport)
5151                 err = pci_enable_device(pdev);
5152         else
5153                 err = pci_enable_device_mem(pdev);
5154         if (err) {
5155                 pr_err("Cannot re-enable PCI device after reset.\n");
5156                 return PCI_ERS_RESULT_DISCONNECT;
5157         }
5158         pci_set_master(pdev);
5159
5160         pci_enable_wake(pdev, PCI_D3hot, 0);
5161         pci_enable_wake(pdev, PCI_D3cold, 0);
5162
5163         e1000_reset(adapter);
5164         ew32(WUS, ~0);
5165
5166         return PCI_ERS_RESULT_RECOVERED;
5167 }
5168
5169 /**
5170  * e1000_io_resume - called when traffic can start flowing again.
5171  * @pdev: Pointer to PCI device
5172  *
5173  * This callback is called when the error recovery driver tells us that
5174  * its OK to resume normal operation. Implementation resembles the
5175  * second-half of the e1000_resume routine.
5176  */
5177 static void e1000_io_resume(struct pci_dev *pdev)
5178 {
5179         struct net_device *netdev = pci_get_drvdata(pdev);
5180         struct e1000_adapter *adapter = netdev_priv(netdev);
5181
5182         e1000_init_manageability(adapter);
5183
5184         if (netif_running(netdev)) {
5185                 if (e1000_up(adapter)) {
5186                         pr_info("can't bring device back up after reset\n");
5187                         return;
5188                 }
5189         }
5190
5191         netif_device_attach(netdev);
5192 }
5193
5194 /* e1000_main.c */