]> git.karo-electronics.de Git - mv-sheeva.git/blob
0905a82
[mv-sheeva.git] /
1 /*******************************************************************************
2
3   
4   Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
5   
6   This program is free software; you can redistribute it and/or modify it 
7   under the terms of the GNU General Public License as published by the Free 
8   Software Foundation; either version 2 of the License, or (at your option) 
9   any later version.
10   
11   This program is distributed in the hope that it will be useful, but WITHOUT 
12   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 
13   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for 
14   more details.
15   
16   You should have received a copy of the GNU General Public License along with
17   this program; if not, write to the Free Software Foundation, Inc., 59 
18   Temple Place - Suite 330, Boston, MA  02111-1307, USA.
19   
20   The full GNU General Public License is included in this distribution in the
21   file called LICENSE.
22   
23   Contact Information:
24   Linux NICS <linux.nics@intel.com>
25   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27 *******************************************************************************/
28
29 #include "ixgb.h"
30
31 /* Change Log
32  * 1.0.96 04/19/05
33  * - Make needlessly global code static -- bunk@stusta.de
34  * - ethtool cleanup -- shemminger@osdl.org
35  * - Support for MODULE_VERSION -- linville@tuxdriver.com
36  * - add skb_header_cloned check to the tso path -- herbert@apana.org.au
37  * 1.0.88 01/05/05
38  * - include fix to the condition that determines when to quit NAPI - Robert Olsson
39  * - use netif_poll_{disable/enable} to synchronize between NAPI and i/f up/down
40  * 1.0.84 10/26/04
41  * - reset buffer_info->dma in Tx resource cleanup logic
42  * 1.0.83 10/12/04
43  * - sparse cleanup - shemminger@osdl.org
44  * - fix tx resource cleanup logic
45  */
46
47 char ixgb_driver_name[] = "ixgb";
48 static char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver";
49
50 #ifndef CONFIG_IXGB_NAPI
51 #define DRIVERNAPI
52 #else
53 #define DRIVERNAPI "-NAPI"
54 #endif
55 #define DRV_VERSION             "1.0.104-k2"DRIVERNAPI
56 char ixgb_driver_version[] = DRV_VERSION;
57 static char ixgb_copyright[] = "Copyright (c) 1999-2005 Intel Corporation.";
58
59 /* ixgb_pci_tbl - PCI Device ID Table
60  *
61  * Wildcard entries (PCI_ANY_ID) should come last
62  * Last entry must be all 0s
63  *
64  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
65  *   Class, Class Mask, private data (not used) }
66  */
67 static struct pci_device_id ixgb_pci_tbl[] = {
68         {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX,
69          PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
70         {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_CX4,
71          PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
72         {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_SR,
73          PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
74         {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_LR,  
75          PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
76
77         /* required last entry */
78         {0,}
79 };
80
81 MODULE_DEVICE_TABLE(pci, ixgb_pci_tbl);
82
83 /* Local Function Prototypes */
84
85 int ixgb_up(struct ixgb_adapter *adapter);
86 void ixgb_down(struct ixgb_adapter *adapter, boolean_t kill_watchdog);
87 void ixgb_reset(struct ixgb_adapter *adapter);
88 int ixgb_setup_tx_resources(struct ixgb_adapter *adapter);
89 int ixgb_setup_rx_resources(struct ixgb_adapter *adapter);
90 void ixgb_free_tx_resources(struct ixgb_adapter *adapter);
91 void ixgb_free_rx_resources(struct ixgb_adapter *adapter);
92 void ixgb_update_stats(struct ixgb_adapter *adapter);
93
94 static int ixgb_init_module(void);
95 static void ixgb_exit_module(void);
96 static int ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
97 static void __devexit ixgb_remove(struct pci_dev *pdev);
98 static int ixgb_sw_init(struct ixgb_adapter *adapter);
99 static int ixgb_open(struct net_device *netdev);
100 static int ixgb_close(struct net_device *netdev);
101 static void ixgb_configure_tx(struct ixgb_adapter *adapter);
102 static void ixgb_configure_rx(struct ixgb_adapter *adapter);
103 static void ixgb_setup_rctl(struct ixgb_adapter *adapter);
104 static void ixgb_clean_tx_ring(struct ixgb_adapter *adapter);
105 static void ixgb_clean_rx_ring(struct ixgb_adapter *adapter);
106 static void ixgb_set_multi(struct net_device *netdev);
107 static void ixgb_watchdog(unsigned long data);
108 static int ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
109 static struct net_device_stats *ixgb_get_stats(struct net_device *netdev);
110 static int ixgb_change_mtu(struct net_device *netdev, int new_mtu);
111 static int ixgb_set_mac(struct net_device *netdev, void *p);
112 static irqreturn_t ixgb_intr(int irq, void *data, struct pt_regs *regs);
113 static boolean_t ixgb_clean_tx_irq(struct ixgb_adapter *adapter);
114
115 #ifdef CONFIG_IXGB_NAPI
116 static int ixgb_clean(struct net_device *netdev, int *budget);
117 static boolean_t ixgb_clean_rx_irq(struct ixgb_adapter *adapter,
118                                    int *work_done, int work_to_do);
119 #else
120 static boolean_t ixgb_clean_rx_irq(struct ixgb_adapter *adapter);
121 #endif
122 static void ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter);
123 void ixgb_set_ethtool_ops(struct net_device *netdev);
124 static void ixgb_tx_timeout(struct net_device *dev);
125 static void ixgb_tx_timeout_task(struct net_device *dev);
126 static void ixgb_vlan_rx_register(struct net_device *netdev,
127                                   struct vlan_group *grp);
128 static void ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid);
129 static void ixgb_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid);
130 static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
131
132 #ifdef CONFIG_NET_POLL_CONTROLLER
133 /* for netdump / net console */
134 static void ixgb_netpoll(struct net_device *dev);
135 #endif
136
137 /* Exported from other modules */
138
139 extern void ixgb_check_options(struct ixgb_adapter *adapter);
140
141 static struct pci_driver ixgb_driver = {
142         .name     = ixgb_driver_name,
143         .id_table = ixgb_pci_tbl,
144         .probe    = ixgb_probe,
145         .remove   = __devexit_p(ixgb_remove),
146 };
147
148 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
149 MODULE_DESCRIPTION("Intel(R) PRO/10GbE Network Driver");
150 MODULE_LICENSE("GPL");
151 MODULE_VERSION(DRV_VERSION);
152
153 #define DEFAULT_DEBUG_LEVEL_SHIFT 3
154 static int debug = DEFAULT_DEBUG_LEVEL_SHIFT;
155 module_param(debug, int, 0);
156 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
157
158 /* some defines for controlling descriptor fetches in h/w */
159 #define RXDCTL_WTHRESH_DEFAULT 16       /* chip writes back at this many or RXT0 */
160 #define RXDCTL_PTHRESH_DEFAULT 0                /* chip considers prefech below
161                                                  * this */
162 #define RXDCTL_HTHRESH_DEFAULT 0                /* chip will only prefetch if tail
163                                                  * is pushed this many descriptors
164                                                  * from head */
165
166 /**
167  * ixgb_init_module - Driver Registration Routine
168  *
169  * ixgb_init_module is the first routine called when the driver is
170  * loaded. All it does is register with the PCI subsystem.
171  **/
172
173 static int __init
174 ixgb_init_module(void)
175 {
176         printk(KERN_INFO "%s - version %s\n",
177                ixgb_driver_string, ixgb_driver_version);
178
179         printk(KERN_INFO "%s\n", ixgb_copyright);
180
181         return pci_module_init(&ixgb_driver);
182 }
183
184 module_init(ixgb_init_module);
185
186 /**
187  * ixgb_exit_module - Driver Exit Cleanup Routine
188  *
189  * ixgb_exit_module is called just before the driver is removed
190  * from memory.
191  **/
192
193 static void __exit
194 ixgb_exit_module(void)
195 {
196         pci_unregister_driver(&ixgb_driver);
197 }
198
199 module_exit(ixgb_exit_module);
200
201 /**
202  * ixgb_irq_disable - Mask off interrupt generation on the NIC
203  * @adapter: board private structure
204  **/
205
206 static inline void
207 ixgb_irq_disable(struct ixgb_adapter *adapter)
208 {
209         atomic_inc(&adapter->irq_sem);
210         IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
211         IXGB_WRITE_FLUSH(&adapter->hw);
212         synchronize_irq(adapter->pdev->irq);
213 }
214
215 /**
216  * ixgb_irq_enable - Enable default interrupt generation settings
217  * @adapter: board private structure
218  **/
219
220 static inline void
221 ixgb_irq_enable(struct ixgb_adapter *adapter)
222 {
223         if(atomic_dec_and_test(&adapter->irq_sem)) {
224                 IXGB_WRITE_REG(&adapter->hw, IMS,
225                                IXGB_INT_RXT0 | IXGB_INT_RXDMT0 | IXGB_INT_TXDW |
226                                IXGB_INT_LSC);
227                 IXGB_WRITE_FLUSH(&adapter->hw);
228         }
229 }
230
231 int
232 ixgb_up(struct ixgb_adapter *adapter)
233 {
234         struct net_device *netdev = adapter->netdev;
235         int err;
236         int max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
237         struct ixgb_hw *hw = &adapter->hw;
238
239         /* hardware has been reset, we need to reload some things */
240
241         ixgb_set_multi(netdev);
242
243         ixgb_restore_vlan(adapter);
244
245         ixgb_configure_tx(adapter);
246         ixgb_setup_rctl(adapter);
247         ixgb_configure_rx(adapter);
248         ixgb_alloc_rx_buffers(adapter);
249
250 #ifdef CONFIG_PCI_MSI
251         {
252         boolean_t pcix = (IXGB_READ_REG(&adapter->hw, STATUS) & 
253                                                   IXGB_STATUS_PCIX_MODE) ? TRUE : FALSE;
254         adapter->have_msi = TRUE;
255
256         if (!pcix)
257            adapter->have_msi = FALSE;
258         else if((err = pci_enable_msi(adapter->pdev))) {
259                 DPRINTK(PROBE, ERR,
260                  "Unable to allocate MSI interrupt Error: %d\n", err);
261                 adapter->have_msi = FALSE;
262                 /* proceed to try to request regular interrupt */
263         }
264         }
265
266 #endif
267         if((err = request_irq(adapter->pdev->irq, &ixgb_intr,
268                                   SA_SHIRQ | SA_SAMPLE_RANDOM,
269                                   netdev->name, netdev))) {
270                 DPRINTK(PROBE, ERR,
271                  "Unable to allocate interrupt Error: %d\n", err);
272                 return err;
273         }
274
275         /* disable interrupts and get the hardware into a known state */
276         IXGB_WRITE_REG(&adapter->hw, IMC, 0xffffffff);
277
278         if((hw->max_frame_size != max_frame) ||
279                 (hw->max_frame_size !=
280                 (IXGB_READ_REG(hw, MFS) >> IXGB_MFS_SHIFT))) {
281
282                 hw->max_frame_size = max_frame;
283
284                 IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT);
285
286                 if(hw->max_frame_size >
287                    IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) {
288                         uint32_t ctrl0 = IXGB_READ_REG(hw, CTRL0);
289
290                         if(!(ctrl0 & IXGB_CTRL0_JFE)) {
291                                 ctrl0 |= IXGB_CTRL0_JFE;
292                                 IXGB_WRITE_REG(hw, CTRL0, ctrl0);
293                         }
294                 }
295         }
296
297         mod_timer(&adapter->watchdog_timer, jiffies);
298         ixgb_irq_enable(adapter);
299
300 #ifdef CONFIG_IXGB_NAPI
301         netif_poll_enable(netdev);
302 #endif
303         return 0;
304 }
305
306 void
307 ixgb_down(struct ixgb_adapter *adapter, boolean_t kill_watchdog)
308 {
309         struct net_device *netdev = adapter->netdev;
310
311         ixgb_irq_disable(adapter);
312         free_irq(adapter->pdev->irq, netdev);
313 #ifdef CONFIG_PCI_MSI
314         if(adapter->have_msi == TRUE)
315                 pci_disable_msi(adapter->pdev);
316
317 #endif
318         if(kill_watchdog)
319                 del_timer_sync(&adapter->watchdog_timer);
320 #ifdef CONFIG_IXGB_NAPI
321         netif_poll_disable(netdev);
322 #endif
323         adapter->link_speed = 0;
324         adapter->link_duplex = 0;
325         netif_carrier_off(netdev);
326         netif_stop_queue(netdev);
327
328         ixgb_reset(adapter);
329         ixgb_clean_tx_ring(adapter);
330         ixgb_clean_rx_ring(adapter);
331 }
332
333 void
334 ixgb_reset(struct ixgb_adapter *adapter)
335 {
336
337         ixgb_adapter_stop(&adapter->hw);
338         if(!ixgb_init_hw(&adapter->hw))
339                 DPRINTK(PROBE, ERR, "ixgb_init_hw failed.\n");
340 }
341
342 /**
343  * ixgb_probe - Device Initialization Routine
344  * @pdev: PCI device information struct
345  * @ent: entry in ixgb_pci_tbl
346  *
347  * Returns 0 on success, negative on failure
348  *
349  * ixgb_probe initializes an adapter identified by a pci_dev structure.
350  * The OS initialization, configuring of the adapter private structure,
351  * and a hardware reset occur.
352  **/
353
354 static int __devinit
355 ixgb_probe(struct pci_dev *pdev,
356                 const struct pci_device_id *ent)
357 {
358         struct net_device *netdev = NULL;
359         struct ixgb_adapter *adapter;
360         static int cards_found = 0;
361         unsigned long mmio_start;
362         int mmio_len;
363         int pci_using_dac;
364         int i;
365         int err;
366
367         if((err = pci_enable_device(pdev)))
368                 return err;
369
370         if(!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK)) &&
371            !(err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))) {
372                 pci_using_dac = 1;
373         } else {
374                 if((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) ||
375                    (err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))) {
376                         printk(KERN_ERR
377                          "ixgb: No usable DMA configuration, aborting\n");
378                         goto err_dma_mask;
379                 }
380                 pci_using_dac = 0;
381         }
382
383         if((err = pci_request_regions(pdev, ixgb_driver_name)))
384                 goto err_request_regions;
385
386         pci_set_master(pdev);
387
388         netdev = alloc_etherdev(sizeof(struct ixgb_adapter));
389         if(!netdev) {
390                 err = -ENOMEM;
391                 goto err_alloc_etherdev;
392         }
393
394         SET_MODULE_OWNER(netdev);
395         SET_NETDEV_DEV(netdev, &pdev->dev);
396
397         pci_set_drvdata(pdev, netdev);
398         adapter = netdev_priv(netdev);
399         adapter->netdev = netdev;
400         adapter->pdev = pdev;
401         adapter->hw.back = adapter;
402         adapter->msg_enable = netif_msg_init(debug, DEFAULT_DEBUG_LEVEL_SHIFT);
403
404         mmio_start = pci_resource_start(pdev, BAR_0);
405         mmio_len = pci_resource_len(pdev, BAR_0);
406
407         adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
408         if(!adapter->hw.hw_addr) {
409                 err = -EIO;
410                 goto err_ioremap;
411         }
412
413         for(i = BAR_1; i <= BAR_5; i++) {
414                 if(pci_resource_len(pdev, i) == 0)
415                         continue;
416                 if(pci_resource_flags(pdev, i) & IORESOURCE_IO) {
417                         adapter->hw.io_base = pci_resource_start(pdev, i);
418                         break;
419                 }
420         }
421
422         netdev->open = &ixgb_open;
423         netdev->stop = &ixgb_close;
424         netdev->hard_start_xmit = &ixgb_xmit_frame;
425         netdev->get_stats = &ixgb_get_stats;
426         netdev->set_multicast_list = &ixgb_set_multi;
427         netdev->set_mac_address = &ixgb_set_mac;
428         netdev->change_mtu = &ixgb_change_mtu;
429         ixgb_set_ethtool_ops(netdev);
430         netdev->tx_timeout = &ixgb_tx_timeout;
431         netdev->watchdog_timeo = 5 * HZ;
432 #ifdef CONFIG_IXGB_NAPI
433         netdev->poll = &ixgb_clean;
434         netdev->weight = 64;
435 #endif
436         netdev->vlan_rx_register = ixgb_vlan_rx_register;
437         netdev->vlan_rx_add_vid = ixgb_vlan_rx_add_vid;
438         netdev->vlan_rx_kill_vid = ixgb_vlan_rx_kill_vid;
439 #ifdef CONFIG_NET_POLL_CONTROLLER
440         netdev->poll_controller = ixgb_netpoll;
441 #endif
442
443         strcpy(netdev->name, pci_name(pdev));
444         netdev->mem_start = mmio_start;
445         netdev->mem_end = mmio_start + mmio_len;
446         netdev->base_addr = adapter->hw.io_base;
447
448         adapter->bd_number = cards_found;
449         adapter->link_speed = 0;
450         adapter->link_duplex = 0;
451
452         /* setup the private structure */
453
454         if((err = ixgb_sw_init(adapter)))
455                 goto err_sw_init;
456
457         netdev->features = NETIF_F_SG |
458                            NETIF_F_HW_CSUM |
459                            NETIF_F_HW_VLAN_TX |
460                            NETIF_F_HW_VLAN_RX |
461                            NETIF_F_HW_VLAN_FILTER;
462 #ifdef NETIF_F_TSO
463         netdev->features |= NETIF_F_TSO;
464 #endif
465 #ifdef NETIF_F_LLTX
466         netdev->features |= NETIF_F_LLTX;
467 #endif
468
469         if(pci_using_dac)
470                 netdev->features |= NETIF_F_HIGHDMA;
471
472         /* make sure the EEPROM is good */
473
474         if(!ixgb_validate_eeprom_checksum(&adapter->hw)) {
475                 DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n");
476                 err = -EIO;
477                 goto err_eeprom;
478         }
479
480         ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
481         memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
482
483         if(!is_valid_ether_addr(netdev->perm_addr)) {
484                 DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
485                 err = -EIO;
486                 goto err_eeprom;
487         }
488
489         adapter->part_num = ixgb_get_ee_pba_number(&adapter->hw);
490
491         init_timer(&adapter->watchdog_timer);
492         adapter->watchdog_timer.function = &ixgb_watchdog;
493         adapter->watchdog_timer.data = (unsigned long)adapter;
494
495         INIT_WORK(&adapter->tx_timeout_task,
496                   (void (*)(void *))ixgb_tx_timeout_task, netdev);
497
498         strcpy(netdev->name, "eth%d");
499         if((err = register_netdev(netdev)))
500                 goto err_register;
501
502         /* we're going to reset, so assume we have no link for now */
503
504         netif_carrier_off(netdev);
505         netif_stop_queue(netdev);
506
507         DPRINTK(PROBE, INFO, "Intel(R) PRO/10GbE Network Connection\n");
508         ixgb_check_options(adapter);
509         /* reset the hardware with the new settings */
510
511         ixgb_reset(adapter);
512
513         cards_found++;
514         return 0;
515
516 err_register:
517 err_sw_init:
518 err_eeprom:
519         iounmap(adapter->hw.hw_addr);
520 err_ioremap:
521         free_netdev(netdev);
522 err_alloc_etherdev:
523         pci_release_regions(pdev);
524 err_request_regions:
525 err_dma_mask:
526         pci_disable_device(pdev);
527         return err;
528 }
529
530 /**
531  * ixgb_remove - Device Removal Routine
532  * @pdev: PCI device information struct
533  *
534  * ixgb_remove is called by the PCI subsystem to alert the driver
535  * that it should release a PCI device.  The could be caused by a
536  * Hot-Plug event, or because the driver is going to be removed from
537  * memory.
538  **/
539
540 static void __devexit
541 ixgb_remove(struct pci_dev *pdev)
542 {
543         struct net_device *netdev = pci_get_drvdata(pdev);
544         struct ixgb_adapter *adapter = netdev_priv(netdev);
545
546         unregister_netdev(netdev);
547
548         iounmap(adapter->hw.hw_addr);
549         pci_release_regions(pdev);
550
551         free_netdev(netdev);
552 }
553
554 /**
555  * ixgb_sw_init - Initialize general software structures (struct ixgb_adapter)
556  * @adapter: board private structure to initialize
557  *
558  * ixgb_sw_init initializes the Adapter private data structure.
559  * Fields are initialized based on PCI device information and
560  * OS network device settings (MTU size).
561  **/
562
563 static int __devinit
564 ixgb_sw_init(struct ixgb_adapter *adapter)
565 {
566         struct ixgb_hw *hw = &adapter->hw;
567         struct net_device *netdev = adapter->netdev;
568         struct pci_dev *pdev = adapter->pdev;
569
570         /* PCI config space info */
571
572         hw->vendor_id = pdev->vendor;
573         hw->device_id = pdev->device;
574         hw->subsystem_vendor_id = pdev->subsystem_vendor;
575         hw->subsystem_id = pdev->subsystem_device;
576
577         adapter->rx_buffer_len = IXGB_RXBUFFER_2048;
578
579         hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
580
581         if((hw->device_id == IXGB_DEVICE_ID_82597EX)
582            || (hw->device_id == IXGB_DEVICE_ID_82597EX_CX4)
583            || (hw->device_id == IXGB_DEVICE_ID_82597EX_LR)
584            || (hw->device_id == IXGB_DEVICE_ID_82597EX_SR))
585                         hw->mac_type = ixgb_82597;
586         else {
587                 /* should never have loaded on this device */
588                 DPRINTK(PROBE, ERR, "unsupported device id\n");
589         }
590
591         /* enable flow control to be programmed */
592         hw->fc.send_xon = 1;
593
594         atomic_set(&adapter->irq_sem, 1);
595         spin_lock_init(&adapter->tx_lock);
596
597         return 0;
598 }
599
600 /**
601  * ixgb_open - Called when a network interface is made active
602  * @netdev: network interface device structure
603  *
604  * Returns 0 on success, negative value on failure
605  *
606  * The open entry point is called when a network interface is made
607  * active by the system (IFF_UP).  At this point all resources needed
608  * for transmit and receive operations are allocated, the interrupt
609  * handler is registered with the OS, the watchdog timer is started,
610  * and the stack is notified that the interface is ready.
611  **/
612
613 static int
614 ixgb_open(struct net_device *netdev)
615 {
616         struct ixgb_adapter *adapter = netdev_priv(netdev);
617         int err;
618
619         /* allocate transmit descriptors */
620
621         if((err = ixgb_setup_tx_resources(adapter)))
622                 goto err_setup_tx;
623
624         /* allocate receive descriptors */
625
626         if((err = ixgb_setup_rx_resources(adapter)))
627                 goto err_setup_rx;
628
629         if((err = ixgb_up(adapter)))
630                 goto err_up;
631
632         return 0;
633
634 err_up:
635         ixgb_free_rx_resources(adapter);
636 err_setup_rx:
637         ixgb_free_tx_resources(adapter);
638 err_setup_tx:
639         ixgb_reset(adapter);
640
641         return err;
642 }
643
644 /**
645  * ixgb_close - Disables a network interface
646  * @netdev: network interface device structure
647  *
648  * Returns 0, this is not allowed to fail
649  *
650  * The close entry point is called when an interface is de-activated
651  * by the OS.  The hardware is still under the drivers control, but
652  * needs to be disabled.  A global MAC reset is issued to stop the
653  * hardware, and all transmit and receive resources are freed.
654  **/
655
656 static int
657 ixgb_close(struct net_device *netdev)
658 {
659         struct ixgb_adapter *adapter = netdev_priv(netdev);
660
661         ixgb_down(adapter, TRUE);
662
663         ixgb_free_tx_resources(adapter);
664         ixgb_free_rx_resources(adapter);
665
666         return 0;
667 }
668
669 /**
670  * ixgb_setup_tx_resources - allocate Tx resources (Descriptors)
671  * @adapter: board private structure
672  *
673  * Return 0 on success, negative on failure
674  **/
675
676 int
677 ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
678 {
679         struct ixgb_desc_ring *txdr = &adapter->tx_ring;
680         struct pci_dev *pdev = adapter->pdev;
681         int size;
682
683         size = sizeof(struct ixgb_buffer) * txdr->count;
684         txdr->buffer_info = vmalloc(size);
685         if(!txdr->buffer_info) {
686                 DPRINTK(PROBE, ERR,
687                  "Unable to allocate transmit descriptor ring memory\n");
688                 return -ENOMEM;
689         }
690         memset(txdr->buffer_info, 0, size);
691
692         /* round up to nearest 4K */
693
694         txdr->size = txdr->count * sizeof(struct ixgb_tx_desc);
695         IXGB_ROUNDUP(txdr->size, 4096);
696
697         txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
698         if(!txdr->desc) {
699                 vfree(txdr->buffer_info);
700                 DPRINTK(PROBE, ERR,
701                  "Unable to allocate transmit descriptor memory\n");
702                 return -ENOMEM;
703         }
704         memset(txdr->desc, 0, txdr->size);
705
706         txdr->next_to_use = 0;
707         txdr->next_to_clean = 0;
708
709         return 0;
710 }
711
712 /**
713  * ixgb_configure_tx - Configure 82597 Transmit Unit after Reset.
714  * @adapter: board private structure
715  *
716  * Configure the Tx unit of the MAC after a reset.
717  **/
718
719 static void
720 ixgb_configure_tx(struct ixgb_adapter *adapter)
721 {
722         uint64_t tdba = adapter->tx_ring.dma;
723         uint32_t tdlen = adapter->tx_ring.count * sizeof(struct ixgb_tx_desc);
724         uint32_t tctl;
725         struct ixgb_hw *hw = &adapter->hw;
726
727         /* Setup the Base and Length of the Tx Descriptor Ring 
728          * tx_ring.dma can be either a 32 or 64 bit value 
729          */
730
731         IXGB_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL));
732         IXGB_WRITE_REG(hw, TDBAH, (tdba >> 32));
733
734         IXGB_WRITE_REG(hw, TDLEN, tdlen);
735
736         /* Setup the HW Tx Head and Tail descriptor pointers */
737
738         IXGB_WRITE_REG(hw, TDH, 0);
739         IXGB_WRITE_REG(hw, TDT, 0);
740
741         /* don't set up txdctl, it induces performance problems if configured
742          * incorrectly */
743         /* Set the Tx Interrupt Delay register */
744
745         IXGB_WRITE_REG(hw, TIDV, adapter->tx_int_delay);
746
747         /* Program the Transmit Control Register */
748
749         tctl = IXGB_TCTL_TCE | IXGB_TCTL_TXEN | IXGB_TCTL_TPDE;
750         IXGB_WRITE_REG(hw, TCTL, tctl);
751
752         /* Setup Transmit Descriptor Settings for this adapter */
753         adapter->tx_cmd_type =
754                 IXGB_TX_DESC_TYPE 
755                 | (adapter->tx_int_delay_enable ? IXGB_TX_DESC_CMD_IDE : 0);
756 }
757
758 /**
759  * ixgb_setup_rx_resources - allocate Rx resources (Descriptors)
760  * @adapter: board private structure
761  *
762  * Returns 0 on success, negative on failure
763  **/
764
765 int
766 ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
767 {
768         struct ixgb_desc_ring *rxdr = &adapter->rx_ring;
769         struct pci_dev *pdev = adapter->pdev;
770         int size;
771
772         size = sizeof(struct ixgb_buffer) * rxdr->count;
773         rxdr->buffer_info = vmalloc(size);
774         if(!rxdr->buffer_info) {
775                 DPRINTK(PROBE, ERR,
776                  "Unable to allocate receive descriptor ring\n");
777                 return -ENOMEM;
778         }
779         memset(rxdr->buffer_info, 0, size);
780
781         /* Round up to nearest 4K */
782
783         rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc);
784         IXGB_ROUNDUP(rxdr->size, 4096);
785
786         rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
787
788         if(!rxdr->desc) {
789                 vfree(rxdr->buffer_info);
790                 DPRINTK(PROBE, ERR,
791                  "Unable to allocate receive descriptors\n");
792                 return -ENOMEM;
793         }
794         memset(rxdr->desc, 0, rxdr->size);
795
796         rxdr->next_to_clean = 0;
797         rxdr->next_to_use = 0;
798
799         return 0;
800 }
801
802 /**
803  * ixgb_setup_rctl - configure the receive control register
804  * @adapter: Board private structure
805  **/
806
807 static void
808 ixgb_setup_rctl(struct ixgb_adapter *adapter)
809 {
810         uint32_t rctl;
811
812         rctl = IXGB_READ_REG(&adapter->hw, RCTL);
813
814         rctl &= ~(3 << IXGB_RCTL_MO_SHIFT);
815
816         rctl |=
817                 IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 | 
818                 IXGB_RCTL_RXEN | IXGB_RCTL_CFF | 
819                 (adapter->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT);
820
821         rctl |= IXGB_RCTL_SECRC;
822
823         switch (adapter->rx_buffer_len) {
824         case IXGB_RXBUFFER_2048:
825         default:
826                 rctl |= IXGB_RCTL_BSIZE_2048;
827                 break;
828         case IXGB_RXBUFFER_4096:
829                 rctl |= IXGB_RCTL_BSIZE_4096;
830                 break;
831         case IXGB_RXBUFFER_8192:
832                 rctl |= IXGB_RCTL_BSIZE_8192;
833                 break;
834         case IXGB_RXBUFFER_16384:
835                 rctl |= IXGB_RCTL_BSIZE_16384;
836                 break;
837         }
838
839         IXGB_WRITE_REG(&adapter->hw, RCTL, rctl);
840 }
841
842 /**
843  * ixgb_configure_rx - Configure 82597 Receive Unit after Reset.
844  * @adapter: board private structure
845  *
846  * Configure the Rx unit of the MAC after a reset.
847  **/
848
849 static void
850 ixgb_configure_rx(struct ixgb_adapter *adapter)
851 {
852         uint64_t rdba = adapter->rx_ring.dma;
853         uint32_t rdlen = adapter->rx_ring.count * sizeof(struct ixgb_rx_desc);
854         struct ixgb_hw *hw = &adapter->hw;
855         uint32_t rctl;
856         uint32_t rxcsum;
857         uint32_t rxdctl;
858
859         /* make sure receives are disabled while setting up the descriptors */
860
861         rctl = IXGB_READ_REG(hw, RCTL);
862         IXGB_WRITE_REG(hw, RCTL, rctl & ~IXGB_RCTL_RXEN);
863
864         /* set the Receive Delay Timer Register */
865
866         IXGB_WRITE_REG(hw, RDTR, adapter->rx_int_delay);
867
868         /* Setup the Base and Length of the Rx Descriptor Ring */
869
870         IXGB_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL));
871         IXGB_WRITE_REG(hw, RDBAH, (rdba >> 32));
872
873         IXGB_WRITE_REG(hw, RDLEN, rdlen);
874
875         /* Setup the HW Rx Head and Tail Descriptor Pointers */
876         IXGB_WRITE_REG(hw, RDH, 0);
877         IXGB_WRITE_REG(hw, RDT, 0);
878
879         /* set up pre-fetching of receive buffers so we get some before we
880          * run out (default hardware behavior is to run out before fetching
881          * more).  This sets up to fetch if HTHRESH rx descriptors are avail
882          * and the descriptors in hw cache are below PTHRESH.  This avoids
883          * the hardware behavior of fetching <=512 descriptors in a single
884          * burst that pre-empts all other activity, usually causing fifo
885          * overflows. */
886         /* use WTHRESH to burst write 16 descriptors or burst when RXT0 */
887         rxdctl = RXDCTL_WTHRESH_DEFAULT << IXGB_RXDCTL_WTHRESH_SHIFT |
888                  RXDCTL_HTHRESH_DEFAULT << IXGB_RXDCTL_HTHRESH_SHIFT |
889                  RXDCTL_PTHRESH_DEFAULT << IXGB_RXDCTL_PTHRESH_SHIFT;
890         IXGB_WRITE_REG(hw, RXDCTL, rxdctl);
891
892         /* Enable Receive Checksum Offload for TCP and UDP */
893         if(adapter->rx_csum == TRUE) {
894                 rxcsum = IXGB_READ_REG(hw, RXCSUM);
895                 rxcsum |= IXGB_RXCSUM_TUOFL;
896                 IXGB_WRITE_REG(hw, RXCSUM, rxcsum);
897         }
898
899         /* Enable Receives */
900
901         IXGB_WRITE_REG(hw, RCTL, rctl);
902 }
903
904 /**
905  * ixgb_free_tx_resources - Free Tx Resources
906  * @adapter: board private structure
907  *
908  * Free all transmit software resources
909  **/
910
911 void
912 ixgb_free_tx_resources(struct ixgb_adapter *adapter)
913 {
914         struct pci_dev *pdev = adapter->pdev;
915
916         ixgb_clean_tx_ring(adapter);
917
918         vfree(adapter->tx_ring.buffer_info);
919         adapter->tx_ring.buffer_info = NULL;
920
921         pci_free_consistent(pdev, adapter->tx_ring.size,
922                             adapter->tx_ring.desc, adapter->tx_ring.dma);
923
924         adapter->tx_ring.desc = NULL;
925 }
926
927 static inline void
928 ixgb_unmap_and_free_tx_resource(struct ixgb_adapter *adapter,
929                                         struct ixgb_buffer *buffer_info)
930 {
931         struct pci_dev *pdev = adapter->pdev;
932         if(buffer_info->dma) {
933                 pci_unmap_page(pdev,
934                            buffer_info->dma,
935                            buffer_info->length,
936                            PCI_DMA_TODEVICE);
937                 buffer_info->dma = 0;
938         }
939         if(buffer_info->skb) {
940                 dev_kfree_skb_any(buffer_info->skb);
941                 buffer_info->skb = NULL;
942         }
943 }
944
945 /**
946  * ixgb_clean_tx_ring - Free Tx Buffers
947  * @adapter: board private structure
948  **/
949
950 static void
951 ixgb_clean_tx_ring(struct ixgb_adapter *adapter)
952 {
953         struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
954         struct ixgb_buffer *buffer_info;
955         unsigned long size;
956         unsigned int i;
957
958         /* Free all the Tx ring sk_buffs */
959
960         for(i = 0; i < tx_ring->count; i++) {
961                 buffer_info = &tx_ring->buffer_info[i];
962                 ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
963         }
964
965         size = sizeof(struct ixgb_buffer) * tx_ring->count;
966         memset(tx_ring->buffer_info, 0, size);
967
968         /* Zero out the descriptor ring */
969
970         memset(tx_ring->desc, 0, tx_ring->size);
971
972         tx_ring->next_to_use = 0;
973         tx_ring->next_to_clean = 0;
974
975         IXGB_WRITE_REG(&adapter->hw, TDH, 0);
976         IXGB_WRITE_REG(&adapter->hw, TDT, 0);
977 }
978
979 /**
980  * ixgb_free_rx_resources - Free Rx Resources
981  * @adapter: board private structure
982  *
983  * Free all receive software resources
984  **/
985
986 void
987 ixgb_free_rx_resources(struct ixgb_adapter *adapter)
988 {
989         struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
990         struct pci_dev *pdev = adapter->pdev;
991
992         ixgb_clean_rx_ring(adapter);
993
994         vfree(rx_ring->buffer_info);
995         rx_ring->buffer_info = NULL;
996
997         pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
998
999         rx_ring->desc = NULL;
1000 }
1001
1002 /**
1003  * ixgb_clean_rx_ring - Free Rx Buffers
1004  * @adapter: board private structure
1005  **/
1006
1007 static void
1008 ixgb_clean_rx_ring(struct ixgb_adapter *adapter)
1009 {
1010         struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
1011         struct ixgb_buffer *buffer_info;
1012         struct pci_dev *pdev = adapter->pdev;
1013         unsigned long size;
1014         unsigned int i;
1015
1016         /* Free all the Rx ring sk_buffs */
1017
1018         for(i = 0; i < rx_ring->count; i++) {
1019                 buffer_info = &rx_ring->buffer_info[i];
1020                 if(buffer_info->skb) {
1021
1022                         pci_unmap_single(pdev,
1023                                          buffer_info->dma,
1024                                          buffer_info->length,
1025                                          PCI_DMA_FROMDEVICE);
1026
1027                         dev_kfree_skb(buffer_info->skb);
1028
1029                         buffer_info->skb = NULL;
1030                 }
1031         }
1032
1033         size = sizeof(struct ixgb_buffer) * rx_ring->count;
1034         memset(rx_ring->buffer_info, 0, size);
1035
1036         /* Zero out the descriptor ring */
1037
1038         memset(rx_ring->desc, 0, rx_ring->size);
1039
1040         rx_ring->next_to_clean = 0;
1041         rx_ring->next_to_use = 0;
1042
1043         IXGB_WRITE_REG(&adapter->hw, RDH, 0);
1044         IXGB_WRITE_REG(&adapter->hw, RDT, 0);
1045 }
1046
1047 /**
1048  * ixgb_set_mac - Change the Ethernet Address of the NIC
1049  * @netdev: network interface device structure
1050  * @p: pointer to an address structure
1051  *
1052  * Returns 0 on success, negative on failure
1053  **/
1054
1055 static int
1056 ixgb_set_mac(struct net_device *netdev, void *p)
1057 {
1058         struct ixgb_adapter *adapter = netdev_priv(netdev);
1059         struct sockaddr *addr = p;
1060
1061         if(!is_valid_ether_addr(addr->sa_data))
1062                 return -EADDRNOTAVAIL;
1063
1064         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1065
1066         ixgb_rar_set(&adapter->hw, addr->sa_data, 0);
1067
1068         return 0;
1069 }
1070
1071 /**
1072  * ixgb_set_multi - Multicast and Promiscuous mode set
1073  * @netdev: network interface device structure
1074  *
1075  * The set_multi entry point is called whenever the multicast address
1076  * list or the network interface flags are updated.  This routine is
1077  * responsible for configuring the hardware for proper multicast,
1078  * promiscuous mode, and all-multi behavior.
1079  **/
1080
1081 static void
1082 ixgb_set_multi(struct net_device *netdev)
1083 {
1084         struct ixgb_adapter *adapter = netdev_priv(netdev);
1085         struct ixgb_hw *hw = &adapter->hw;
1086         struct dev_mc_list *mc_ptr;
1087         uint32_t rctl;
1088         int i;
1089
1090         /* Check for Promiscuous and All Multicast modes */
1091
1092         rctl = IXGB_READ_REG(hw, RCTL);
1093
1094         if(netdev->flags & IFF_PROMISC) {
1095                 rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE);
1096         } else if(netdev->flags & IFF_ALLMULTI) {
1097                 rctl |= IXGB_RCTL_MPE;
1098                 rctl &= ~IXGB_RCTL_UPE;
1099         } else {
1100                 rctl &= ~(IXGB_RCTL_UPE | IXGB_RCTL_MPE);
1101         }
1102
1103         if(netdev->mc_count > IXGB_MAX_NUM_MULTICAST_ADDRESSES) {
1104                 rctl |= IXGB_RCTL_MPE;
1105                 IXGB_WRITE_REG(hw, RCTL, rctl);
1106         } else {
1107                 uint8_t mta[netdev->mc_count * IXGB_ETH_LENGTH_OF_ADDRESS];
1108
1109                 IXGB_WRITE_REG(hw, RCTL, rctl);
1110
1111                 for(i = 0, mc_ptr = netdev->mc_list; mc_ptr;
1112                         i++, mc_ptr = mc_ptr->next)
1113                         memcpy(&mta[i * IXGB_ETH_LENGTH_OF_ADDRESS],
1114                                    mc_ptr->dmi_addr, IXGB_ETH_LENGTH_OF_ADDRESS);
1115
1116                 ixgb_mc_addr_list_update(hw, mta, netdev->mc_count, 0);
1117         }
1118 }
1119
1120 /**
1121  * ixgb_watchdog - Timer Call-back
1122  * @data: pointer to netdev cast into an unsigned long
1123  **/
1124
1125 static void
1126 ixgb_watchdog(unsigned long data)
1127 {
1128         struct ixgb_adapter *adapter = (struct ixgb_adapter *)data;
1129         struct net_device *netdev = adapter->netdev;
1130         struct ixgb_desc_ring *txdr = &adapter->tx_ring;
1131
1132         ixgb_check_for_link(&adapter->hw);
1133
1134         if (ixgb_check_for_bad_link(&adapter->hw)) {
1135                 /* force the reset path */
1136                 netif_stop_queue(netdev);
1137         }
1138
1139         if(adapter->hw.link_up) {
1140                 if(!netif_carrier_ok(netdev)) {
1141                         DPRINTK(LINK, INFO,
1142                                 "NIC Link is Up 10000 Mbps Full Duplex\n");
1143                         adapter->link_speed = 10000;
1144                         adapter->link_duplex = FULL_DUPLEX;
1145                         netif_carrier_on(netdev);
1146                         netif_wake_queue(netdev);
1147                 }
1148         } else {
1149                 if(netif_carrier_ok(netdev)) {
1150                         adapter->link_speed = 0;
1151                         adapter->link_duplex = 0;
1152                         DPRINTK(LINK, INFO, "NIC Link is Down\n");
1153                         netif_carrier_off(netdev);
1154                         netif_stop_queue(netdev);
1155
1156                 }
1157         }
1158
1159         ixgb_update_stats(adapter);
1160
1161         if(!netif_carrier_ok(netdev)) {
1162                 if(IXGB_DESC_UNUSED(txdr) + 1 < txdr->count) {
1163                         /* We've lost link, so the controller stops DMA,
1164                          * but we've got queued Tx work that's never going
1165                          * to get done, so reset controller to flush Tx.
1166                          * (Do the reset outside of interrupt context). */
1167                         schedule_work(&adapter->tx_timeout_task);
1168                 }
1169         }
1170
1171         /* Force detection of hung controller every watchdog period */
1172         adapter->detect_tx_hung = TRUE;
1173
1174         /* generate an interrupt to force clean up of any stragglers */
1175         IXGB_WRITE_REG(&adapter->hw, ICS, IXGB_INT_TXDW);
1176
1177         /* Reset the timer */
1178         mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
1179 }
1180
1181 #define IXGB_TX_FLAGS_CSUM              0x00000001
1182 #define IXGB_TX_FLAGS_VLAN              0x00000002
1183 #define IXGB_TX_FLAGS_TSO               0x00000004
1184
1185 static inline int
1186 ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
1187 {
1188 #ifdef NETIF_F_TSO
1189         struct ixgb_context_desc *context_desc;
1190         unsigned int i;
1191         uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
1192         uint16_t ipcse, tucse, mss;
1193         int err;
1194
1195         if(likely(skb_shinfo(skb)->tso_size)) {
1196                 if (skb_header_cloned(skb)) {
1197                         err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1198                         if (err)
1199                                 return err;
1200                 }
1201
1202                 hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
1203                 mss = skb_shinfo(skb)->tso_size;
1204                 skb->nh.iph->tot_len = 0;
1205                 skb->nh.iph->check = 0;
1206                 skb->h.th->check = ~csum_tcpudp_magic(skb->nh.iph->saddr,
1207                                                       skb->nh.iph->daddr,
1208                                                       0, IPPROTO_TCP, 0);
1209                 ipcss = skb->nh.raw - skb->data;
1210                 ipcso = (void *)&(skb->nh.iph->check) - (void *)skb->data;
1211                 ipcse = skb->h.raw - skb->data - 1;
1212                 tucss = skb->h.raw - skb->data;
1213                 tucso = (void *)&(skb->h.th->check) - (void *)skb->data;
1214                 tucse = 0;
1215
1216                 i = adapter->tx_ring.next_to_use;
1217                 context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
1218
1219                 context_desc->ipcss = ipcss;
1220                 context_desc->ipcso = ipcso;
1221                 context_desc->ipcse = cpu_to_le16(ipcse);
1222                 context_desc->tucss = tucss;
1223                 context_desc->tucso = tucso;
1224                 context_desc->tucse = cpu_to_le16(tucse);
1225                 context_desc->mss = cpu_to_le16(mss);
1226                 context_desc->hdr_len = hdr_len;
1227                 context_desc->status = 0;
1228                 context_desc->cmd_type_len = cpu_to_le32(
1229                                                   IXGB_CONTEXT_DESC_TYPE 
1230                                                 | IXGB_CONTEXT_DESC_CMD_TSE
1231                                                 | IXGB_CONTEXT_DESC_CMD_IP
1232                                                 | IXGB_CONTEXT_DESC_CMD_TCP
1233                                                 | IXGB_CONTEXT_DESC_CMD_IDE
1234                                                 | (skb->len - (hdr_len)));
1235
1236
1237                 if(++i == adapter->tx_ring.count) i = 0;
1238                 adapter->tx_ring.next_to_use = i;
1239
1240                 return 1;
1241         }
1242 #endif
1243
1244         return 0;
1245 }
1246
1247 static inline boolean_t
1248 ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
1249 {
1250         struct ixgb_context_desc *context_desc;
1251         unsigned int i;
1252         uint8_t css, cso;
1253
1254         if(likely(skb->ip_summed == CHECKSUM_HW)) {
1255                 css = skb->h.raw - skb->data;
1256                 cso = (skb->h.raw + skb->csum) - skb->data;
1257
1258                 i = adapter->tx_ring.next_to_use;
1259                 context_desc = IXGB_CONTEXT_DESC(adapter->tx_ring, i);
1260
1261                 context_desc->tucss = css;
1262                 context_desc->tucso = cso;
1263                 context_desc->tucse = 0;
1264                 /* zero out any previously existing data in one instruction */
1265                 *(uint32_t *)&(context_desc->ipcss) = 0;
1266                 context_desc->status = 0;
1267                 context_desc->hdr_len = 0;
1268                 context_desc->mss = 0;
1269                 context_desc->cmd_type_len =
1270                         cpu_to_le32(IXGB_CONTEXT_DESC_TYPE
1271                                     | IXGB_TX_DESC_CMD_IDE);
1272
1273                 if(++i == adapter->tx_ring.count) i = 0;
1274                 adapter->tx_ring.next_to_use = i;
1275
1276                 return TRUE;
1277         }
1278
1279         return FALSE;
1280 }
1281
1282 #define IXGB_MAX_TXD_PWR        14
1283 #define IXGB_MAX_DATA_PER_TXD   (1<<IXGB_MAX_TXD_PWR)
1284
1285 static inline int
1286 ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
1287             unsigned int first)
1288 {
1289         struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1290         struct ixgb_buffer *buffer_info;
1291         int len = skb->len;
1292         unsigned int offset = 0, size, count = 0, i;
1293         unsigned int mss = skb_shinfo(skb)->tso_size;
1294
1295         unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
1296         unsigned int f;
1297
1298         len -= skb->data_len;
1299
1300         i = tx_ring->next_to_use;
1301
1302         while(len) {
1303                 buffer_info = &tx_ring->buffer_info[i];
1304                 size = min(len, IXGB_MAX_JUMBO_FRAME_SIZE);
1305                 /* Workaround for premature desc write-backs
1306                  * in TSO mode.  Append 4-byte sentinel desc */
1307                 if(unlikely(mss && !nr_frags && size == len && size > 8))
1308                         size -= 4;
1309
1310                 buffer_info->length = size;
1311                 buffer_info->dma =
1312                         pci_map_single(adapter->pdev,
1313                                 skb->data + offset,
1314                                 size,
1315                                 PCI_DMA_TODEVICE);
1316                 buffer_info->time_stamp = jiffies;
1317
1318                 len -= size;
1319                 offset += size;
1320                 count++;
1321                 if(++i == tx_ring->count) i = 0;
1322         }
1323
1324         for(f = 0; f < nr_frags; f++) {
1325                 struct skb_frag_struct *frag;
1326
1327                 frag = &skb_shinfo(skb)->frags[f];
1328                 len = frag->size;
1329                 offset = 0;
1330
1331                 while(len) {
1332                         buffer_info = &tx_ring->buffer_info[i];
1333                         size = min(len, IXGB_MAX_JUMBO_FRAME_SIZE);
1334                         /* Workaround for premature desc write-backs
1335                          * in TSO mode.  Append 4-byte sentinel desc */
1336                         if(unlikely(mss && (f == (nr_frags-1)) && (size == len)
1337                                     && (size > 8)))
1338                                 size -= 4;
1339
1340                         buffer_info->length = size;
1341                         buffer_info->dma =
1342                                 pci_map_page(adapter->pdev,
1343                                         frag->page,
1344                                         frag->page_offset + offset,
1345                                         size,
1346                                         PCI_DMA_TODEVICE);
1347                         buffer_info->time_stamp = jiffies;
1348
1349                         len -= size;
1350                         offset += size;
1351                         count++;
1352                         if(++i == tx_ring->count) i = 0;
1353                 }
1354         }
1355         i = (i == 0) ? tx_ring->count - 1 : i - 1;
1356         tx_ring->buffer_info[i].skb = skb;
1357         tx_ring->buffer_info[first].next_to_watch = i;
1358
1359         return count;
1360 }
1361
1362 static inline void
1363 ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags)
1364 {
1365         struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1366         struct ixgb_tx_desc *tx_desc = NULL;
1367         struct ixgb_buffer *buffer_info;
1368         uint32_t cmd_type_len = adapter->tx_cmd_type;
1369         uint8_t status = 0;
1370         uint8_t popts = 0;
1371         unsigned int i;
1372
1373         if(tx_flags & IXGB_TX_FLAGS_TSO) {
1374                 cmd_type_len |= IXGB_TX_DESC_CMD_TSE;
1375                 popts |= (IXGB_TX_DESC_POPTS_IXSM | IXGB_TX_DESC_POPTS_TXSM);
1376         }
1377
1378         if(tx_flags & IXGB_TX_FLAGS_CSUM)
1379                 popts |= IXGB_TX_DESC_POPTS_TXSM;
1380
1381         if(tx_flags & IXGB_TX_FLAGS_VLAN) {
1382                 cmd_type_len |= IXGB_TX_DESC_CMD_VLE;
1383         }
1384
1385         i = tx_ring->next_to_use;
1386
1387         while(count--) {
1388                 buffer_info = &tx_ring->buffer_info[i];
1389                 tx_desc = IXGB_TX_DESC(*tx_ring, i);
1390                 tx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
1391                 tx_desc->cmd_type_len =
1392                         cpu_to_le32(cmd_type_len | buffer_info->length);
1393                 tx_desc->status = status;
1394                 tx_desc->popts = popts;
1395                 tx_desc->vlan = cpu_to_le16(vlan_id);
1396
1397                 if(++i == tx_ring->count) i = 0;
1398         }
1399
1400         tx_desc->cmd_type_len |= cpu_to_le32(IXGB_TX_DESC_CMD_EOP 
1401                                 | IXGB_TX_DESC_CMD_RS );
1402
1403         /* Force memory writes to complete before letting h/w
1404          * know there are new descriptors to fetch.  (Only
1405          * applicable for weak-ordered memory model archs,
1406          * such as IA-64). */
1407         wmb();
1408
1409         tx_ring->next_to_use = i;
1410         IXGB_WRITE_REG(&adapter->hw, TDT, i);
1411 }
1412
1413 /* Tx Descriptors needed, worst case */
1414 #define TXD_USE_COUNT(S) (((S) >> IXGB_MAX_TXD_PWR) + \
1415                          (((S) & (IXGB_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
1416 #define DESC_NEEDED TXD_USE_COUNT(IXGB_MAX_DATA_PER_TXD) + \
1417         MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1 \
1418         /* one more for TSO workaround */ + 1
1419
1420 static int
1421 ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1422 {
1423         struct ixgb_adapter *adapter = netdev_priv(netdev);
1424         unsigned int first;
1425         unsigned int tx_flags = 0;
1426         unsigned long flags;
1427         int vlan_id = 0;
1428         int tso;
1429
1430         if(skb->len <= 0) {
1431                 dev_kfree_skb_any(skb);
1432                 return 0;
1433         }
1434
1435 #ifdef NETIF_F_LLTX
1436         local_irq_save(flags);
1437         if (!spin_trylock(&adapter->tx_lock)) {
1438                 /* Collision - tell upper layer to requeue */
1439                 local_irq_restore(flags);
1440                 return NETDEV_TX_LOCKED;
1441         }
1442 #else
1443         spin_lock_irqsave(&adapter->tx_lock, flags);
1444 #endif
1445
1446         if(unlikely(IXGB_DESC_UNUSED(&adapter->tx_ring) < DESC_NEEDED)) {
1447                 netif_stop_queue(netdev);
1448                 spin_unlock_irqrestore(&adapter->tx_lock, flags);
1449                 return NETDEV_TX_BUSY;
1450         }
1451
1452 #ifndef NETIF_F_LLTX
1453         spin_unlock_irqrestore(&adapter->tx_lock, flags);
1454 #endif
1455
1456         if(adapter->vlgrp && vlan_tx_tag_present(skb)) {
1457                 tx_flags |= IXGB_TX_FLAGS_VLAN;
1458                 vlan_id = vlan_tx_tag_get(skb);
1459         }
1460
1461         first = adapter->tx_ring.next_to_use;
1462         
1463         tso = ixgb_tso(adapter, skb);
1464         if (tso < 0) {
1465                 dev_kfree_skb_any(skb);
1466 #ifdef NETIF_F_LLTX
1467                 spin_unlock_irqrestore(&adapter->tx_lock, flags);
1468 #endif
1469                 return NETDEV_TX_OK;
1470         }
1471
1472         if (likely(tso))
1473                 tx_flags |= IXGB_TX_FLAGS_TSO;
1474         else if(ixgb_tx_csum(adapter, skb))
1475                 tx_flags |= IXGB_TX_FLAGS_CSUM;
1476
1477         ixgb_tx_queue(adapter, ixgb_tx_map(adapter, skb, first), vlan_id,
1478                         tx_flags);
1479
1480         netdev->trans_start = jiffies;
1481
1482 #ifdef NETIF_F_LLTX
1483         /* Make sure there is space in the ring for the next send. */
1484         if(unlikely(IXGB_DESC_UNUSED(&adapter->tx_ring) < DESC_NEEDED))
1485                 netif_stop_queue(netdev);
1486
1487         spin_unlock_irqrestore(&adapter->tx_lock, flags);
1488
1489 #endif
1490         return NETDEV_TX_OK;
1491 }
1492
1493 /**
1494  * ixgb_tx_timeout - Respond to a Tx Hang
1495  * @netdev: network interface device structure
1496  **/
1497
1498 static void
1499 ixgb_tx_timeout(struct net_device *netdev)
1500 {
1501         struct ixgb_adapter *adapter = netdev_priv(netdev);
1502
1503         /* Do the reset outside of interrupt context */
1504         schedule_work(&adapter->tx_timeout_task);
1505 }
1506
1507 static void
1508 ixgb_tx_timeout_task(struct net_device *netdev)
1509 {
1510         struct ixgb_adapter *adapter = netdev_priv(netdev);
1511
1512         adapter->tx_timeout_count++;
1513         ixgb_down(adapter, TRUE);
1514         ixgb_up(adapter);
1515 }
1516
1517 /**
1518  * ixgb_get_stats - Get System Network Statistics
1519  * @netdev: network interface device structure
1520  *
1521  * Returns the address of the device statistics structure.
1522  * The statistics are actually updated from the timer callback.
1523  **/
1524
1525 static struct net_device_stats *
1526 ixgb_get_stats(struct net_device *netdev)
1527 {
1528         struct ixgb_adapter *adapter = netdev_priv(netdev);
1529
1530         return &adapter->net_stats;
1531 }
1532
1533 /**
1534  * ixgb_change_mtu - Change the Maximum Transfer Unit
1535  * @netdev: network interface device structure
1536  * @new_mtu: new value for maximum frame size
1537  *
1538  * Returns 0 on success, negative on failure
1539  **/
1540
1541 static int
1542 ixgb_change_mtu(struct net_device *netdev, int new_mtu)
1543 {
1544         struct ixgb_adapter *adapter = netdev_priv(netdev);
1545         int max_frame = new_mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
1546         int old_max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
1547
1548
1549         if((max_frame < IXGB_MIN_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH)
1550            || (max_frame > IXGB_MAX_JUMBO_FRAME_SIZE + ENET_FCS_LENGTH)) {
1551                 DPRINTK(PROBE, ERR, "Invalid MTU setting %d\n", new_mtu);
1552                 return -EINVAL;
1553         }
1554
1555         if((max_frame <= IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH)
1556            || (max_frame <= IXGB_RXBUFFER_2048)) {
1557                 adapter->rx_buffer_len = IXGB_RXBUFFER_2048;
1558
1559         } else if(max_frame <= IXGB_RXBUFFER_4096) {
1560                 adapter->rx_buffer_len = IXGB_RXBUFFER_4096;
1561
1562         } else if(max_frame <= IXGB_RXBUFFER_8192) {
1563                 adapter->rx_buffer_len = IXGB_RXBUFFER_8192;
1564
1565         } else {
1566                 adapter->rx_buffer_len = IXGB_RXBUFFER_16384;
1567         }
1568
1569         netdev->mtu = new_mtu;
1570
1571         if(old_max_frame != max_frame && netif_running(netdev)) {
1572
1573                 ixgb_down(adapter, TRUE);
1574                 ixgb_up(adapter);
1575         }
1576
1577         return 0;
1578 }
1579
1580 /**
1581  * ixgb_update_stats - Update the board statistics counters.
1582  * @adapter: board private structure
1583  **/
1584
1585 void
1586 ixgb_update_stats(struct ixgb_adapter *adapter)
1587 {
1588         struct net_device *netdev = adapter->netdev;
1589
1590         if((netdev->flags & IFF_PROMISC) || (netdev->flags & IFF_ALLMULTI) ||
1591            (netdev->mc_count > IXGB_MAX_NUM_MULTICAST_ADDRESSES)) {
1592                 u64 multi = IXGB_READ_REG(&adapter->hw, MPRCL);
1593                 u32 bcast_l = IXGB_READ_REG(&adapter->hw, BPRCL);
1594                 u32 bcast_h = IXGB_READ_REG(&adapter->hw, BPRCH);
1595                 u64 bcast = ((u64)bcast_h << 32) | bcast_l; 
1596
1597                 multi |= ((u64)IXGB_READ_REG(&adapter->hw, MPRCH) << 32);
1598                 /* fix up multicast stats by removing broadcasts */
1599                 if(multi >= bcast)
1600                         multi -= bcast;
1601                 
1602                 adapter->stats.mprcl += (multi & 0xFFFFFFFF);
1603                 adapter->stats.mprch += (multi >> 32);
1604                 adapter->stats.bprcl += bcast_l; 
1605                 adapter->stats.bprch += bcast_h;
1606         } else {
1607                 adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL);
1608                 adapter->stats.mprch += IXGB_READ_REG(&adapter->hw, MPRCH);
1609                 adapter->stats.bprcl += IXGB_READ_REG(&adapter->hw, BPRCL);
1610                 adapter->stats.bprch += IXGB_READ_REG(&adapter->hw, BPRCH);
1611         }
1612         adapter->stats.tprl += IXGB_READ_REG(&adapter->hw, TPRL);
1613         adapter->stats.tprh += IXGB_READ_REG(&adapter->hw, TPRH);
1614         adapter->stats.gprcl += IXGB_READ_REG(&adapter->hw, GPRCL);
1615         adapter->stats.gprch += IXGB_READ_REG(&adapter->hw, GPRCH);
1616         adapter->stats.uprcl += IXGB_READ_REG(&adapter->hw, UPRCL);
1617         adapter->stats.uprch += IXGB_READ_REG(&adapter->hw, UPRCH);
1618         adapter->stats.vprcl += IXGB_READ_REG(&adapter->hw, VPRCL);
1619         adapter->stats.vprch += IXGB_READ_REG(&adapter->hw, VPRCH);
1620         adapter->stats.jprcl += IXGB_READ_REG(&adapter->hw, JPRCL);
1621         adapter->stats.jprch += IXGB_READ_REG(&adapter->hw, JPRCH);
1622         adapter->stats.gorcl += IXGB_READ_REG(&adapter->hw, GORCL);
1623         adapter->stats.gorch += IXGB_READ_REG(&adapter->hw, GORCH);
1624         adapter->stats.torl += IXGB_READ_REG(&adapter->hw, TORL);
1625         adapter->stats.torh += IXGB_READ_REG(&adapter->hw, TORH);
1626         adapter->stats.rnbc += IXGB_READ_REG(&adapter->hw, RNBC);
1627         adapter->stats.ruc += IXGB_READ_REG(&adapter->hw, RUC);
1628         adapter->stats.roc += IXGB_READ_REG(&adapter->hw, ROC);
1629         adapter->stats.rlec += IXGB_READ_REG(&adapter->hw, RLEC);
1630         adapter->stats.crcerrs += IXGB_READ_REG(&adapter->hw, CRCERRS);
1631         adapter->stats.icbc += IXGB_READ_REG(&adapter->hw, ICBC);
1632         adapter->stats.ecbc += IXGB_READ_REG(&adapter->hw, ECBC);
1633         adapter->stats.mpc += IXGB_READ_REG(&adapter->hw, MPC);
1634         adapter->stats.tptl += IXGB_READ_REG(&adapter->hw, TPTL);
1635         adapter->stats.tpth += IXGB_READ_REG(&adapter->hw, TPTH);
1636         adapter->stats.gptcl += IXGB_READ_REG(&adapter->hw, GPTCL);
1637         adapter->stats.gptch += IXGB_READ_REG(&adapter->hw, GPTCH);
1638         adapter->stats.bptcl += IXGB_READ_REG(&adapter->hw, BPTCL);
1639         adapter->stats.bptch += IXGB_READ_REG(&adapter->hw, BPTCH);
1640         adapter->stats.mptcl += IXGB_READ_REG(&adapter->hw, MPTCL);
1641         adapter->stats.mptch += IXGB_READ_REG(&adapter->hw, MPTCH);
1642         adapter->stats.uptcl += IXGB_READ_REG(&adapter->hw, UPTCL);
1643         adapter->stats.uptch += IXGB_READ_REG(&adapter->hw, UPTCH);
1644         adapter->stats.vptcl += IXGB_READ_REG(&adapter->hw, VPTCL);
1645         adapter->stats.vptch += IXGB_READ_REG(&adapter->hw, VPTCH);
1646         adapter->stats.jptcl += IXGB_READ_REG(&adapter->hw, JPTCL);
1647         adapter->stats.jptch += IXGB_READ_REG(&adapter->hw, JPTCH);
1648         adapter->stats.gotcl += IXGB_READ_REG(&adapter->hw, GOTCL);
1649         adapter->stats.gotch += IXGB_READ_REG(&adapter->hw, GOTCH);
1650         adapter->stats.totl += IXGB_READ_REG(&adapter->hw, TOTL);
1651         adapter->stats.toth += IXGB_READ_REG(&adapter->hw, TOTH);
1652         adapter->stats.dc += IXGB_READ_REG(&adapter->hw, DC);
1653         adapter->stats.plt64c += IXGB_READ_REG(&adapter->hw, PLT64C);
1654         adapter->stats.tsctc += IXGB_READ_REG(&adapter->hw, TSCTC);
1655         adapter->stats.tsctfc += IXGB_READ_REG(&adapter->hw, TSCTFC);
1656         adapter->stats.ibic += IXGB_READ_REG(&adapter->hw, IBIC);
1657         adapter->stats.rfc += IXGB_READ_REG(&adapter->hw, RFC);
1658         adapter->stats.lfc += IXGB_READ_REG(&adapter->hw, LFC);
1659         adapter->stats.pfrc += IXGB_READ_REG(&adapter->hw, PFRC);
1660         adapter->stats.pftc += IXGB_READ_REG(&adapter->hw, PFTC);
1661         adapter->stats.mcfrc += IXGB_READ_REG(&adapter->hw, MCFRC);
1662         adapter->stats.mcftc += IXGB_READ_REG(&adapter->hw, MCFTC);
1663         adapter->stats.xonrxc += IXGB_READ_REG(&adapter->hw, XONRXC);
1664         adapter->stats.xontxc += IXGB_READ_REG(&adapter->hw, XONTXC);
1665         adapter->stats.xoffrxc += IXGB_READ_REG(&adapter->hw, XOFFRXC);
1666         adapter->stats.xofftxc += IXGB_READ_REG(&adapter->hw, XOFFTXC);
1667         adapter->stats.rjc += IXGB_READ_REG(&adapter->hw, RJC);
1668
1669         /* Fill out the OS statistics structure */
1670
1671         adapter->net_stats.rx_packets = adapter->stats.gprcl;
1672         adapter->net_stats.tx_packets = adapter->stats.gptcl;
1673         adapter->net_stats.rx_bytes = adapter->stats.gorcl;
1674         adapter->net_stats.tx_bytes = adapter->stats.gotcl;
1675         adapter->net_stats.multicast = adapter->stats.mprcl;
1676         adapter->net_stats.collisions = 0;
1677
1678         /* ignore RLEC as it reports errors for padded (<64bytes) frames
1679          * with a length in the type/len field */
1680         adapter->net_stats.rx_errors =
1681             /* adapter->stats.rnbc + */ adapter->stats.crcerrs +
1682             adapter->stats.ruc +
1683             adapter->stats.roc /*+ adapter->stats.rlec */  +
1684             adapter->stats.icbc +
1685             adapter->stats.ecbc + adapter->stats.mpc;
1686
1687         /* see above
1688          * adapter->net_stats.rx_length_errors = adapter->stats.rlec;
1689          */
1690
1691         adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
1692         adapter->net_stats.rx_fifo_errors = adapter->stats.mpc;
1693         adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
1694         adapter->net_stats.rx_over_errors = adapter->stats.mpc;
1695
1696         adapter->net_stats.tx_errors = 0;
1697         adapter->net_stats.rx_frame_errors = 0;
1698         adapter->net_stats.tx_aborted_errors = 0;
1699         adapter->net_stats.tx_carrier_errors = 0;
1700         adapter->net_stats.tx_fifo_errors = 0;
1701         adapter->net_stats.tx_heartbeat_errors = 0;
1702         adapter->net_stats.tx_window_errors = 0;
1703 }
1704
1705 #define IXGB_MAX_INTR 10
1706 /**
1707  * ixgb_intr - Interrupt Handler
1708  * @irq: interrupt number
1709  * @data: pointer to a network interface device structure
1710  * @pt_regs: CPU registers structure
1711  **/
1712
1713 static irqreturn_t
1714 ixgb_intr(int irq, void *data, struct pt_regs *regs)
1715 {
1716         struct net_device *netdev = data;
1717         struct ixgb_adapter *adapter = netdev_priv(netdev);
1718         struct ixgb_hw *hw = &adapter->hw;
1719         uint32_t icr = IXGB_READ_REG(hw, ICR);
1720 #ifndef CONFIG_IXGB_NAPI
1721         unsigned int i;
1722 #endif
1723
1724         if(unlikely(!icr))
1725                 return IRQ_NONE;  /* Not our interrupt */
1726
1727         if(unlikely(icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC))) {
1728                 mod_timer(&adapter->watchdog_timer, jiffies);
1729         }
1730
1731 #ifdef CONFIG_IXGB_NAPI
1732         if(netif_rx_schedule_prep(netdev)) {
1733
1734                 /* Disable interrupts and register for poll. The flush 
1735                   of the posted write is intentionally left out.
1736                 */
1737
1738                 atomic_inc(&adapter->irq_sem);
1739                 IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
1740                 __netif_rx_schedule(netdev);
1741         }
1742 #else
1743         /* yes, that is actually a & and it is meant to make sure that
1744          * every pass through this for loop checks both receive and
1745          * transmit queues for completed descriptors, intended to
1746          * avoid starvation issues and assist tx/rx fairness. */
1747         for(i = 0; i < IXGB_MAX_INTR; i++)
1748                 if(!ixgb_clean_rx_irq(adapter) &
1749                    !ixgb_clean_tx_irq(adapter))
1750                         break;
1751 #endif 
1752         return IRQ_HANDLED;
1753 }
1754
1755 #ifdef CONFIG_IXGB_NAPI
1756 /**
1757  * ixgb_clean - NAPI Rx polling callback
1758  * @adapter: board private structure
1759  **/
1760
1761 static int
1762 ixgb_clean(struct net_device *netdev, int *budget)
1763 {
1764         struct ixgb_adapter *adapter = netdev_priv(netdev);
1765         int work_to_do = min(*budget, netdev->quota);
1766         int tx_cleaned;
1767         int work_done = 0;
1768
1769         tx_cleaned = ixgb_clean_tx_irq(adapter);
1770         ixgb_clean_rx_irq(adapter, &work_done, work_to_do);
1771
1772         *budget -= work_done;
1773         netdev->quota -= work_done;
1774
1775         /* if no Tx and not enough Rx work done, exit the polling mode */
1776         if((!tx_cleaned && (work_done == 0)) || !netif_running(netdev)) {
1777                 netif_rx_complete(netdev);
1778                 ixgb_irq_enable(adapter);
1779                 return 0;
1780         }
1781
1782         return 1;
1783 }
1784 #endif
1785
1786 /**
1787  * ixgb_clean_tx_irq - Reclaim resources after transmit completes
1788  * @adapter: board private structure
1789  **/
1790
1791 static boolean_t
1792 ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
1793 {
1794         struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1795         struct net_device *netdev = adapter->netdev;
1796         struct ixgb_tx_desc *tx_desc, *eop_desc;
1797         struct ixgb_buffer *buffer_info;
1798         unsigned int i, eop;
1799         boolean_t cleaned = FALSE;
1800
1801         i = tx_ring->next_to_clean;
1802         eop = tx_ring->buffer_info[i].next_to_watch;
1803         eop_desc = IXGB_TX_DESC(*tx_ring, eop);
1804
1805         while(eop_desc->status & IXGB_TX_DESC_STATUS_DD) {
1806
1807                 for(cleaned = FALSE; !cleaned; ) {
1808                         tx_desc = IXGB_TX_DESC(*tx_ring, i);
1809                         buffer_info = &tx_ring->buffer_info[i];
1810
1811                         if (tx_desc->popts
1812                             & (IXGB_TX_DESC_POPTS_TXSM |
1813                                IXGB_TX_DESC_POPTS_IXSM))
1814                                 adapter->hw_csum_tx_good++;
1815
1816                         ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
1817
1818                         *(uint32_t *)&(tx_desc->status) = 0;
1819
1820                         cleaned = (i == eop);
1821                         if(++i == tx_ring->count) i = 0;
1822                 }
1823
1824                 eop = tx_ring->buffer_info[i].next_to_watch;
1825                 eop_desc = IXGB_TX_DESC(*tx_ring, eop);
1826         }
1827
1828         tx_ring->next_to_clean = i;
1829
1830         spin_lock(&adapter->tx_lock);
1831         if(cleaned && netif_queue_stopped(netdev) && netif_carrier_ok(netdev) &&
1832            (IXGB_DESC_UNUSED(tx_ring) > IXGB_TX_QUEUE_WAKE)) {
1833
1834                 netif_wake_queue(netdev);
1835         }
1836         spin_unlock(&adapter->tx_lock);
1837
1838         if(adapter->detect_tx_hung) {
1839                 /* detect a transmit hang in hardware, this serializes the
1840                  * check with the clearing of time_stamp and movement of i */
1841                 adapter->detect_tx_hung = FALSE;
1842                 if (tx_ring->buffer_info[eop].dma &&
1843                    time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + HZ)
1844                    && !(IXGB_READ_REG(&adapter->hw, STATUS) &
1845                         IXGB_STATUS_TXOFF)) {
1846                         /* detected Tx unit hang */
1847                         DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
1848                                         "  TDH                  <%x>\n"
1849                                         "  TDT                  <%x>\n"
1850                                         "  next_to_use          <%x>\n"
1851                                         "  next_to_clean        <%x>\n"
1852                                         "buffer_info[next_to_clean]\n"
1853                                         "  time_stamp           <%lx>\n"
1854                                         "  next_to_watch        <%x>\n"
1855                                         "  jiffies              <%lx>\n"
1856                                         "  next_to_watch.status <%x>\n",
1857                                 IXGB_READ_REG(&adapter->hw, TDH),
1858                                 IXGB_READ_REG(&adapter->hw, TDT),
1859                                 tx_ring->next_to_use,
1860                                 tx_ring->next_to_clean,
1861                                 tx_ring->buffer_info[eop].time_stamp,
1862                                 eop,
1863                                 jiffies,
1864                                 eop_desc->status);
1865                         netif_stop_queue(netdev);
1866                 }
1867         }
1868
1869         return cleaned;
1870 }
1871
1872 /**
1873  * ixgb_rx_checksum - Receive Checksum Offload for 82597.
1874  * @adapter: board private structure
1875  * @rx_desc: receive descriptor
1876  * @sk_buff: socket buffer with received data
1877  **/
1878
1879 static inline void
1880 ixgb_rx_checksum(struct ixgb_adapter *adapter,
1881                  struct ixgb_rx_desc *rx_desc,
1882                  struct sk_buff *skb)
1883 {
1884         /* Ignore Checksum bit is set OR
1885          * TCP Checksum has not been calculated
1886          */
1887         if((rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) ||
1888            (!(rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS))) {
1889                 skb->ip_summed = CHECKSUM_NONE;
1890                 return;
1891         }
1892
1893         /* At this point we know the hardware did the TCP checksum */
1894         /* now look at the TCP checksum error bit */
1895         if(rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE) {
1896                 /* let the stack verify checksum errors */
1897                 skb->ip_summed = CHECKSUM_NONE;
1898                 adapter->hw_csum_rx_error++;
1899         } else {
1900                 /* TCP checksum is good */
1901                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1902                 adapter->hw_csum_rx_good++;
1903         }
1904 }
1905
1906 /**
1907  * ixgb_clean_rx_irq - Send received data up the network stack,
1908  * @adapter: board private structure
1909  **/
1910
1911 static boolean_t
1912 #ifdef CONFIG_IXGB_NAPI
1913 ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
1914 #else
1915 ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
1916 #endif
1917 {
1918         struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
1919         struct net_device *netdev = adapter->netdev;
1920         struct pci_dev *pdev = adapter->pdev;
1921         struct ixgb_rx_desc *rx_desc, *next_rxd;
1922         struct ixgb_buffer *buffer_info, *next_buffer, *next2_buffer;
1923         uint32_t length;
1924         unsigned int i, j;
1925         boolean_t cleaned = FALSE;
1926
1927         i = rx_ring->next_to_clean;
1928         rx_desc = IXGB_RX_DESC(*rx_ring, i);
1929         buffer_info = &rx_ring->buffer_info[i];
1930
1931         while(rx_desc->status & IXGB_RX_DESC_STATUS_DD) {
1932                 struct sk_buff *skb, *next_skb;
1933                 u8 status;
1934
1935 #ifdef CONFIG_IXGB_NAPI
1936                 if(*work_done >= work_to_do)
1937                         break;
1938
1939                 (*work_done)++;
1940 #endif
1941                 status = rx_desc->status;
1942                 skb = buffer_info->skb;
1943
1944                 prefetch(skb->data);
1945
1946                 if(++i == rx_ring->count) i = 0;
1947                 next_rxd = IXGB_RX_DESC(*rx_ring, i);
1948                 prefetch(next_rxd);
1949
1950                 if((j = i + 1) == rx_ring->count) j = 0;
1951                 next2_buffer = &rx_ring->buffer_info[j];
1952                 prefetch(next2_buffer);
1953
1954                 next_buffer = &rx_ring->buffer_info[i];
1955                 next_skb = next_buffer->skb;
1956                 prefetch(next_skb);
1957
1958                 cleaned = TRUE;
1959
1960                 pci_unmap_single(pdev,
1961                                  buffer_info->dma,
1962                                  buffer_info->length,
1963                                  PCI_DMA_FROMDEVICE);
1964
1965                 length = le16_to_cpu(rx_desc->length);
1966
1967                 if(unlikely(!(status & IXGB_RX_DESC_STATUS_EOP))) {
1968
1969                         /* All receives must fit into a single buffer */
1970
1971                         IXGB_DBG("Receive packet consumed multiple buffers "
1972                                          "length<%x>\n", length);
1973
1974                         dev_kfree_skb_irq(skb);
1975                         goto rxdesc_done;
1976                 }
1977
1978                 if (unlikely(rx_desc->errors
1979                              & (IXGB_RX_DESC_ERRORS_CE | IXGB_RX_DESC_ERRORS_SE
1980                                 | IXGB_RX_DESC_ERRORS_P |
1981                                 IXGB_RX_DESC_ERRORS_RXE))) {
1982
1983                         dev_kfree_skb_irq(skb);
1984                         goto rxdesc_done;
1985                 }
1986
1987                 /* Good Receive */
1988                 skb_put(skb, length);
1989
1990                 /* Receive Checksum Offload */
1991                 ixgb_rx_checksum(adapter, rx_desc, skb);
1992
1993                 skb->protocol = eth_type_trans(skb, netdev);
1994 #ifdef CONFIG_IXGB_NAPI
1995                 if(adapter->vlgrp && (status & IXGB_RX_DESC_STATUS_VP)) {
1996                         vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
1997                                 le16_to_cpu(rx_desc->special) &
1998                                         IXGB_RX_DESC_SPECIAL_VLAN_MASK);
1999                 } else {
2000                         netif_receive_skb(skb);
2001                 }
2002 #else /* CONFIG_IXGB_NAPI */
2003                 if(adapter->vlgrp && (status & IXGB_RX_DESC_STATUS_VP)) {
2004                         vlan_hwaccel_rx(skb, adapter->vlgrp,
2005                                 le16_to_cpu(rx_desc->special) &
2006                                         IXGB_RX_DESC_SPECIAL_VLAN_MASK);
2007                 } else {
2008                         netif_rx(skb);
2009                 }
2010 #endif /* CONFIG_IXGB_NAPI */
2011                 netdev->last_rx = jiffies;
2012
2013 rxdesc_done:
2014                 /* clean up descriptor, might be written over by hw */
2015                 rx_desc->status = 0;
2016                 buffer_info->skb = NULL;
2017
2018                 /* use prefetched values */
2019                 rx_desc = next_rxd;
2020                 buffer_info = next_buffer;
2021         }
2022
2023         rx_ring->next_to_clean = i;
2024
2025         ixgb_alloc_rx_buffers(adapter);
2026
2027         return cleaned;
2028 }
2029
2030 /**
2031  * ixgb_alloc_rx_buffers - Replace used receive buffers
2032  * @adapter: address of board private structure
2033  **/
2034
2035 static void
2036 ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter)
2037 {
2038         struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
2039         struct net_device *netdev = adapter->netdev;
2040         struct pci_dev *pdev = adapter->pdev;
2041         struct ixgb_rx_desc *rx_desc;
2042         struct ixgb_buffer *buffer_info;
2043         struct sk_buff *skb;
2044         unsigned int i;
2045         int num_group_tail_writes;
2046         long cleancount;
2047
2048         i = rx_ring->next_to_use;
2049         buffer_info = &rx_ring->buffer_info[i];
2050         cleancount = IXGB_DESC_UNUSED(rx_ring);
2051
2052         num_group_tail_writes = IXGB_RX_BUFFER_WRITE;
2053
2054         /* leave three descriptors unused */
2055         while(--cleancount > 2) {
2056                 rx_desc = IXGB_RX_DESC(*rx_ring, i);
2057
2058                 skb = dev_alloc_skb(adapter->rx_buffer_len + NET_IP_ALIGN);
2059
2060                 if(unlikely(!skb)) {
2061                         /* Better luck next round */
2062                         break;
2063                 }
2064
2065                 /* Make buffer alignment 2 beyond a 16 byte boundary
2066                  * this will result in a 16 byte aligned IP header after
2067                  * the 14 byte MAC header is removed
2068                  */
2069                 skb_reserve(skb, NET_IP_ALIGN);
2070
2071                 skb->dev = netdev;
2072
2073                 buffer_info->skb = skb;
2074                 buffer_info->length = adapter->rx_buffer_len;
2075                 buffer_info->dma =
2076                         pci_map_single(pdev,
2077                                    skb->data,
2078                                    adapter->rx_buffer_len,
2079                                    PCI_DMA_FROMDEVICE);
2080
2081                 rx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
2082                 /* guarantee DD bit not set now before h/w gets descriptor
2083                  * this is the rest of the workaround for h/w double 
2084                  * writeback. */
2085                 rx_desc->status = 0;
2086
2087                 if((i & ~(num_group_tail_writes- 1)) == i) {
2088                         /* Force memory writes to complete before letting h/w
2089                          * know there are new descriptors to fetch.  (Only
2090                          * applicable for weak-ordered memory model archs,
2091                          * such as IA-64). */
2092                         wmb();
2093
2094                         IXGB_WRITE_REG(&adapter->hw, RDT, i);
2095                 }
2096
2097                 if(++i == rx_ring->count) i = 0;
2098                 buffer_info = &rx_ring->buffer_info[i];
2099         }
2100
2101         rx_ring->next_to_use = i;
2102 }
2103
2104 /**
2105  * ixgb_vlan_rx_register - enables or disables vlan tagging/stripping.
2106  * 
2107  * @param netdev network interface device structure
2108  * @param grp indicates to enable or disable tagging/stripping
2109  **/
2110 static void
2111 ixgb_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
2112 {
2113         struct ixgb_adapter *adapter = netdev_priv(netdev);
2114         uint32_t ctrl, rctl;
2115
2116         ixgb_irq_disable(adapter);
2117         adapter->vlgrp = grp;
2118
2119         if(grp) {
2120                 /* enable VLAN tag insert/strip */
2121                 ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
2122                 ctrl |= IXGB_CTRL0_VME;
2123                 IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
2124
2125                 /* enable VLAN receive filtering */
2126
2127                 rctl = IXGB_READ_REG(&adapter->hw, RCTL);
2128                 rctl |= IXGB_RCTL_VFE;
2129                 rctl &= ~IXGB_RCTL_CFIEN;
2130                 IXGB_WRITE_REG(&adapter->hw, RCTL, rctl);
2131         } else {
2132                 /* disable VLAN tag insert/strip */
2133
2134                 ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
2135                 ctrl &= ~IXGB_CTRL0_VME;
2136                 IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
2137
2138                 /* disable VLAN filtering */
2139
2140                 rctl = IXGB_READ_REG(&adapter->hw, RCTL);
2141                 rctl &= ~IXGB_RCTL_VFE;
2142                 IXGB_WRITE_REG(&adapter->hw, RCTL, rctl);
2143         }
2144
2145         ixgb_irq_enable(adapter);
2146 }
2147
2148 static void
2149 ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid)
2150 {
2151         struct ixgb_adapter *adapter = netdev_priv(netdev);
2152         uint32_t vfta, index;
2153
2154         /* add VID to filter table */
2155
2156         index = (vid >> 5) & 0x7F;
2157         vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
2158         vfta |= (1 << (vid & 0x1F));
2159         ixgb_write_vfta(&adapter->hw, index, vfta);
2160 }
2161
2162 static void
2163 ixgb_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid)
2164 {
2165         struct ixgb_adapter *adapter = netdev_priv(netdev);
2166         uint32_t vfta, index;
2167
2168         ixgb_irq_disable(adapter);
2169
2170         if(adapter->vlgrp)
2171                 adapter->vlgrp->vlan_devices[vid] = NULL;
2172
2173         ixgb_irq_enable(adapter);
2174
2175         /* remove VID from filter table*/
2176
2177         index = (vid >> 5) & 0x7F;
2178         vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
2179         vfta &= ~(1 << (vid & 0x1F));
2180         ixgb_write_vfta(&adapter->hw, index, vfta);
2181 }
2182
2183 static void
2184 ixgb_restore_vlan(struct ixgb_adapter *adapter)
2185 {
2186         ixgb_vlan_rx_register(adapter->netdev, adapter->vlgrp);
2187
2188         if(adapter->vlgrp) {
2189                 uint16_t vid;
2190                 for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
2191                         if(!adapter->vlgrp->vlan_devices[vid])
2192                                 continue;
2193                         ixgb_vlan_rx_add_vid(adapter->netdev, vid);
2194                 }
2195         }
2196 }
2197
2198 #ifdef CONFIG_NET_POLL_CONTROLLER
2199 /*
2200  * Polling 'interrupt' - used by things like netconsole to send skbs
2201  * without having to re-enable interrupts. It's not called while
2202  * the interrupt routine is executing.
2203  */
2204
2205 static void ixgb_netpoll(struct net_device *dev)
2206 {
2207         struct ixgb_adapter *adapter = dev->priv;
2208
2209         disable_irq(adapter->pdev->irq);
2210         ixgb_intr(adapter->pdev->irq, dev, NULL);
2211         enable_irq(adapter->pdev->irq);
2212 }
2213 #endif
2214
2215 /* ixgb_main.c */