]> git.karo-electronics.de Git - linux-beck.git/blob - drivers/net/ixgbe/ixgbe_main.c
intel: convert drivers to netdev_tx_t
[linux-beck.git] / drivers / net / ixgbe / ixgbe_main.c
1 /*******************************************************************************
2
3   Intel 10 Gigabit PCI Express Linux driver
4   Copyright(c) 1999 - 2009 Intel Corporation.
5
6   This program is free software; you can redistribute it and/or modify it
7   under the terms and conditions of the GNU General Public License,
8   version 2, as published by the Free Software Foundation.
9
10   This program is distributed in the hope it will be useful, but WITHOUT
11   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13   more details.
14
15   You should have received a copy of the GNU General Public License along with
16   this program; if not, write to the Free Software Foundation, Inc.,
17   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19   The full GNU General Public License is included in this distribution in
20   the file called "COPYING".
21
22   Contact Information:
23   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26 *******************************************************************************/
27
28 #include <linux/types.h>
29 #include <linux/module.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/vmalloc.h>
33 #include <linux/string.h>
34 #include <linux/in.h>
35 #include <linux/ip.h>
36 #include <linux/tcp.h>
37 #include <linux/pkt_sched.h>
38 #include <linux/ipv6.h>
39 #include <net/checksum.h>
40 #include <net/ip6_checksum.h>
41 #include <linux/ethtool.h>
42 #include <linux/if_vlan.h>
43 #include <scsi/fc/fc_fcoe.h>
44
45 #include "ixgbe.h"
46 #include "ixgbe_common.h"
47
48 char ixgbe_driver_name[] = "ixgbe";
49 static const char ixgbe_driver_string[] =
50                               "Intel(R) 10 Gigabit PCI Express Network Driver";
51
52 #define DRV_VERSION "2.0.37-k2"
53 const char ixgbe_driver_version[] = DRV_VERSION;
54 static char ixgbe_copyright[] = "Copyright (c) 1999-2009 Intel Corporation.";
55
56 static const struct ixgbe_info *ixgbe_info_tbl[] = {
57         [board_82598] = &ixgbe_82598_info,
58         [board_82599] = &ixgbe_82599_info,
59 };
60
61 /* ixgbe_pci_tbl - PCI Device ID Table
62  *
63  * Wildcard entries (PCI_ANY_ID) should come last
64  * Last entry must be all 0s
65  *
66  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
67  *   Class, Class Mask, private data (not used) }
68  */
69 static struct pci_device_id ixgbe_pci_tbl[] = {
70         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598),
71          board_82598 },
72         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT),
73          board_82598 },
74         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT),
75          board_82598 },
76         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT),
77          board_82598 },
78         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2),
79          board_82598 },
80         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4),
81          board_82598 },
82         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT),
83          board_82598 },
84         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT),
85          board_82598 },
86         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM),
87          board_82598 },
88         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR),
89          board_82598 },
90         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM),
91          board_82598 },
92         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX),
93          board_82598 },
94         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4),
95          board_82599 },
96         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM),
97          board_82599 },
98         {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP),
99          board_82599 },
100
101         /* required last entry */
102         {0, }
103 };
104 MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
105
106 #ifdef CONFIG_IXGBE_DCA
107 static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
108                             void *p);
109 static struct notifier_block dca_notifier = {
110         .notifier_call = ixgbe_notify_dca,
111         .next          = NULL,
112         .priority      = 0
113 };
114 #endif
115
116 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
117 MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
118 MODULE_LICENSE("GPL");
119 MODULE_VERSION(DRV_VERSION);
120
121 #define DEFAULT_DEBUG_LEVEL_SHIFT 3
122
123 static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
124 {
125         u32 ctrl_ext;
126
127         /* Let firmware take over control of h/w */
128         ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
129         IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
130                         ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
131 }
132
133 static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
134 {
135         u32 ctrl_ext;
136
137         /* Let firmware know the driver has taken over */
138         ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
139         IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
140                         ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
141 }
142
143 /*
144  * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
145  * @adapter: pointer to adapter struct
146  * @direction: 0 for Rx, 1 for Tx, -1 for other causes
147  * @queue: queue to map the corresponding interrupt to
148  * @msix_vector: the vector to map to the corresponding queue
149  *
150  */
151 static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
152                            u8 queue, u8 msix_vector)
153 {
154         u32 ivar, index;
155         struct ixgbe_hw *hw = &adapter->hw;
156         switch (hw->mac.type) {
157         case ixgbe_mac_82598EB:
158                 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
159                 if (direction == -1)
160                         direction = 0;
161                 index = (((direction * 64) + queue) >> 2) & 0x1F;
162                 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
163                 ivar &= ~(0xFF << (8 * (queue & 0x3)));
164                 ivar |= (msix_vector << (8 * (queue & 0x3)));
165                 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
166                 break;
167         case ixgbe_mac_82599EB:
168                 if (direction == -1) {
169                         /* other causes */
170                         msix_vector |= IXGBE_IVAR_ALLOC_VAL;
171                         index = ((queue & 1) * 8);
172                         ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC);
173                         ivar &= ~(0xFF << index);
174                         ivar |= (msix_vector << index);
175                         IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar);
176                         break;
177                 } else {
178                         /* tx or rx causes */
179                         msix_vector |= IXGBE_IVAR_ALLOC_VAL;
180                         index = ((16 * (queue & 1)) + (8 * direction));
181                         ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
182                         ivar &= ~(0xFF << index);
183                         ivar |= (msix_vector << index);
184                         IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar);
185                         break;
186                 }
187         default:
188                 break;
189         }
190 }
191
192 static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
193                                           u64 qmask)
194 {
195         u32 mask;
196
197         if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
198                 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
199                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
200         } else {
201                 mask = (qmask & 0xFFFFFFFF);
202                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
203                 mask = (qmask >> 32);
204                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
205         }
206 }
207
208 static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
209                                              struct ixgbe_tx_buffer
210                                              *tx_buffer_info)
211 {
212         tx_buffer_info->dma = 0;
213         if (tx_buffer_info->skb) {
214                 skb_dma_unmap(&adapter->pdev->dev, tx_buffer_info->skb,
215                               DMA_TO_DEVICE);
216                 dev_kfree_skb_any(tx_buffer_info->skb);
217                 tx_buffer_info->skb = NULL;
218         }
219         tx_buffer_info->time_stamp = 0;
220         /* tx_buffer_info must be completely set up in the transmit path */
221 }
222
223 static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
224                                        struct ixgbe_ring *tx_ring,
225                                        unsigned int eop)
226 {
227         struct ixgbe_hw *hw = &adapter->hw;
228
229         /* Detect a transmit hang in hardware, this serializes the
230          * check with the clearing of time_stamp and movement of eop */
231         adapter->detect_tx_hung = false;
232         if (tx_ring->tx_buffer_info[eop].time_stamp &&
233             time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) &&
234             !(IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)) {
235                 /* detected Tx unit hang */
236                 union ixgbe_adv_tx_desc *tx_desc;
237                 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
238                 DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
239                         "  Tx Queue             <%d>\n"
240                         "  TDH, TDT             <%x>, <%x>\n"
241                         "  next_to_use          <%x>\n"
242                         "  next_to_clean        <%x>\n"
243                         "tx_buffer_info[next_to_clean]\n"
244                         "  time_stamp           <%lx>\n"
245                         "  jiffies              <%lx>\n",
246                         tx_ring->queue_index,
247                         IXGBE_READ_REG(hw, tx_ring->head),
248                         IXGBE_READ_REG(hw, tx_ring->tail),
249                         tx_ring->next_to_use, eop,
250                         tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
251                 return true;
252         }
253
254         return false;
255 }
256
257 #define IXGBE_MAX_TXD_PWR       14
258 #define IXGBE_MAX_DATA_PER_TXD  (1 << IXGBE_MAX_TXD_PWR)
259
260 /* Tx Descriptors needed, worst case */
261 #define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
262                          (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
263 #define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
264         MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
265
266 static void ixgbe_tx_timeout(struct net_device *netdev);
267
268 /**
269  * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
270  * @q_vector: structure containing interrupt and ring information
271  * @tx_ring: tx ring to clean
272  **/
273 static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
274                                struct ixgbe_ring *tx_ring)
275 {
276         struct ixgbe_adapter *adapter = q_vector->adapter;
277         struct net_device *netdev = adapter->netdev;
278         union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
279         struct ixgbe_tx_buffer *tx_buffer_info;
280         unsigned int i, eop, count = 0;
281         unsigned int total_bytes = 0, total_packets = 0;
282
283         i = tx_ring->next_to_clean;
284         eop = tx_ring->tx_buffer_info[i].next_to_watch;
285         eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
286
287         while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
288                (count < tx_ring->work_limit)) {
289                 bool cleaned = false;
290                 for ( ; !cleaned; count++) {
291                         struct sk_buff *skb;
292                         tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
293                         tx_buffer_info = &tx_ring->tx_buffer_info[i];
294                         cleaned = (i == eop);
295                         skb = tx_buffer_info->skb;
296
297                         if (cleaned && skb) {
298                                 unsigned int segs, bytecount;
299                                 unsigned int hlen = skb_headlen(skb);
300
301                                 /* gso_segs is currently only valid for tcp */
302                                 segs = skb_shinfo(skb)->gso_segs ?: 1;
303 #ifdef IXGBE_FCOE
304                                 /* adjust for FCoE Sequence Offload */
305                                 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
306                                     && (skb->protocol == htons(ETH_P_FCOE)) &&
307                                     skb_is_gso(skb)) {
308                                         hlen = skb_transport_offset(skb) +
309                                                 sizeof(struct fc_frame_header) +
310                                                 sizeof(struct fcoe_crc_eof);
311                                         segs = DIV_ROUND_UP(skb->len - hlen,
312                                                 skb_shinfo(skb)->gso_size);
313                                 }
314 #endif /* IXGBE_FCOE */
315                                 /* multiply data chunks by size of headers */
316                                 bytecount = ((segs - 1) * hlen) + skb->len;
317                                 total_packets += segs;
318                                 total_bytes += bytecount;
319                         }
320
321                         ixgbe_unmap_and_free_tx_resource(adapter,
322                                                          tx_buffer_info);
323
324                         tx_desc->wb.status = 0;
325
326                         i++;
327                         if (i == tx_ring->count)
328                                 i = 0;
329                 }
330
331                 eop = tx_ring->tx_buffer_info[i].next_to_watch;
332                 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
333         }
334
335         tx_ring->next_to_clean = i;
336
337 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
338         if (unlikely(count && netif_carrier_ok(netdev) &&
339                      (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
340                 /* Make sure that anybody stopping the queue after this
341                  * sees the new next_to_clean.
342                  */
343                 smp_mb();
344                 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
345                     !test_bit(__IXGBE_DOWN, &adapter->state)) {
346                         netif_wake_subqueue(netdev, tx_ring->queue_index);
347                         ++adapter->restart_queue;
348                 }
349         }
350
351         if (adapter->detect_tx_hung) {
352                 if (ixgbe_check_tx_hang(adapter, tx_ring, i)) {
353                         /* schedule immediate reset if we believe we hung */
354                         DPRINTK(PROBE, INFO,
355                                 "tx hang %d detected, resetting adapter\n",
356                                 adapter->tx_timeout_count + 1);
357                         ixgbe_tx_timeout(adapter->netdev);
358                 }
359         }
360
361         /* re-arm the interrupt */
362         if (count >= tx_ring->work_limit)
363                 ixgbe_irq_rearm_queues(adapter, ((u64)1 << q_vector->v_idx));
364
365         tx_ring->total_bytes += total_bytes;
366         tx_ring->total_packets += total_packets;
367         tx_ring->stats.packets += total_packets;
368         tx_ring->stats.bytes += total_bytes;
369         adapter->net_stats.tx_bytes += total_bytes;
370         adapter->net_stats.tx_packets += total_packets;
371         return (count < tx_ring->work_limit);
372 }
373
374 #ifdef CONFIG_IXGBE_DCA
375 static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
376                                 struct ixgbe_ring *rx_ring)
377 {
378         u32 rxctrl;
379         int cpu = get_cpu();
380         int q = rx_ring - adapter->rx_ring;
381
382         if (rx_ring->cpu != cpu) {
383                 rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q));
384                 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
385                         rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
386                         rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
387                 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
388                         rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599;
389                         rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
390                                    IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599);
391                 }
392                 rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
393                 rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
394                 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN);
395                 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
396                             IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
397                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl);
398                 rx_ring->cpu = cpu;
399         }
400         put_cpu();
401 }
402
403 static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
404                                 struct ixgbe_ring *tx_ring)
405 {
406         u32 txctrl;
407         int cpu = get_cpu();
408         int q = tx_ring - adapter->tx_ring;
409
410         if (tx_ring->cpu != cpu) {
411                 txctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q));
412                 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
413                         txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
414                         txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
415                 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
416                         txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599;
417                         txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
418                                    IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599);
419                 }
420                 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
421                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q), txctrl);
422                 tx_ring->cpu = cpu;
423         }
424         put_cpu();
425 }
426
427 static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
428 {
429         int i;
430
431         if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
432                 return;
433
434         /* always use CB2 mode, difference is masked in the CB driver */
435         IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
436
437         for (i = 0; i < adapter->num_tx_queues; i++) {
438                 adapter->tx_ring[i].cpu = -1;
439                 ixgbe_update_tx_dca(adapter, &adapter->tx_ring[i]);
440         }
441         for (i = 0; i < adapter->num_rx_queues; i++) {
442                 adapter->rx_ring[i].cpu = -1;
443                 ixgbe_update_rx_dca(adapter, &adapter->rx_ring[i]);
444         }
445 }
446
447 static int __ixgbe_notify_dca(struct device *dev, void *data)
448 {
449         struct net_device *netdev = dev_get_drvdata(dev);
450         struct ixgbe_adapter *adapter = netdev_priv(netdev);
451         unsigned long event = *(unsigned long *)data;
452
453         switch (event) {
454         case DCA_PROVIDER_ADD:
455                 /* if we're already enabled, don't do it again */
456                 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
457                         break;
458                 if (dca_add_requester(dev) == 0) {
459                         adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
460                         ixgbe_setup_dca(adapter);
461                         break;
462                 }
463                 /* Fall Through since DCA is disabled. */
464         case DCA_PROVIDER_REMOVE:
465                 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
466                         dca_remove_requester(dev);
467                         adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
468                         IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
469                 }
470                 break;
471         }
472
473         return 0;
474 }
475
476 #endif /* CONFIG_IXGBE_DCA */
477 /**
478  * ixgbe_receive_skb - Send a completed packet up the stack
479  * @adapter: board private structure
480  * @skb: packet to send up
481  * @status: hardware indication of status of receive
482  * @rx_ring: rx descriptor ring (for a specific queue) to setup
483  * @rx_desc: rx descriptor
484  **/
485 static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector,
486                               struct sk_buff *skb, u8 status,
487                               struct ixgbe_ring *ring,
488                               union ixgbe_adv_rx_desc *rx_desc)
489 {
490         struct ixgbe_adapter *adapter = q_vector->adapter;
491         struct napi_struct *napi = &q_vector->napi;
492         bool is_vlan = (status & IXGBE_RXD_STAT_VP);
493         u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
494
495         skb_record_rx_queue(skb, ring->queue_index);
496         if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
497                 if (adapter->vlgrp && is_vlan && (tag & VLAN_VID_MASK))
498                         vlan_gro_receive(napi, adapter->vlgrp, tag, skb);
499                 else
500                         napi_gro_receive(napi, skb);
501         } else {
502                 if (adapter->vlgrp && is_vlan && (tag & VLAN_VID_MASK))
503                         vlan_hwaccel_rx(skb, adapter->vlgrp, tag);
504                 else
505                         netif_rx(skb);
506         }
507 }
508
509 /**
510  * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum
511  * @adapter: address of board private structure
512  * @status_err: hardware indication of status of receive
513  * @skb: skb currently being received and modified
514  **/
515 static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
516                                      union ixgbe_adv_rx_desc *rx_desc,
517                                      struct sk_buff *skb)
518 {
519         u32 status_err = le32_to_cpu(rx_desc->wb.upper.status_error);
520
521         skb->ip_summed = CHECKSUM_NONE;
522
523         /* Rx csum disabled */
524         if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
525                 return;
526
527         /* if IP and error */
528         if ((status_err & IXGBE_RXD_STAT_IPCS) &&
529             (status_err & IXGBE_RXDADV_ERR_IPE)) {
530                 adapter->hw_csum_rx_error++;
531                 return;
532         }
533
534         if (!(status_err & IXGBE_RXD_STAT_L4CS))
535                 return;
536
537         if (status_err & IXGBE_RXDADV_ERR_TCPE) {
538                 u16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
539
540                 /*
541                  * 82599 errata, UDP frames with a 0 checksum can be marked as
542                  * checksum errors.
543                  */
544                 if ((pkt_info & IXGBE_RXDADV_PKTTYPE_UDP) &&
545                     (adapter->hw.mac.type == ixgbe_mac_82599EB))
546                         return;
547
548                 adapter->hw_csum_rx_error++;
549                 return;
550         }
551
552         /* It must be a TCP or UDP packet with a valid checksum */
553         skb->ip_summed = CHECKSUM_UNNECESSARY;
554         adapter->hw_csum_rx_good++;
555 }
556
557 static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw,
558                                          struct ixgbe_ring *rx_ring, u32 val)
559 {
560         /*
561          * Force memory writes to complete before letting h/w
562          * know there are new descriptors to fetch.  (Only
563          * applicable for weak-ordered memory model archs,
564          * such as IA-64).
565          */
566         wmb();
567         IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->reg_idx), val);
568 }
569
570 /**
571  * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split
572  * @adapter: address of board private structure
573  **/
574 static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
575                                    struct ixgbe_ring *rx_ring,
576                                    int cleaned_count)
577 {
578         struct pci_dev *pdev = adapter->pdev;
579         union ixgbe_adv_rx_desc *rx_desc;
580         struct ixgbe_rx_buffer *bi;
581         unsigned int i;
582
583         i = rx_ring->next_to_use;
584         bi = &rx_ring->rx_buffer_info[i];
585
586         while (cleaned_count--) {
587                 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
588
589                 if (!bi->page_dma &&
590                     (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)) {
591                         if (!bi->page) {
592                                 bi->page = alloc_page(GFP_ATOMIC);
593                                 if (!bi->page) {
594                                         adapter->alloc_rx_page_failed++;
595                                         goto no_buffers;
596                                 }
597                                 bi->page_offset = 0;
598                         } else {
599                                 /* use a half page if we're re-using */
600                                 bi->page_offset ^= (PAGE_SIZE / 2);
601                         }
602
603                         bi->page_dma = pci_map_page(pdev, bi->page,
604                                                     bi->page_offset,
605                                                     (PAGE_SIZE / 2),
606                                                     PCI_DMA_FROMDEVICE);
607                 }
608
609                 if (!bi->skb) {
610                         struct sk_buff *skb;
611                         skb = netdev_alloc_skb(adapter->netdev,
612                                                (rx_ring->rx_buf_len +
613                                                 NET_IP_ALIGN));
614
615                         if (!skb) {
616                                 adapter->alloc_rx_buff_failed++;
617                                 goto no_buffers;
618                         }
619
620                         /*
621                          * Make buffer alignment 2 beyond a 16 byte boundary
622                          * this will result in a 16 byte aligned IP header after
623                          * the 14 byte MAC header is removed
624                          */
625                         skb_reserve(skb, NET_IP_ALIGN);
626
627                         bi->skb = skb;
628                         bi->dma = pci_map_single(pdev, skb->data,
629                                                  rx_ring->rx_buf_len,
630                                                  PCI_DMA_FROMDEVICE);
631                 }
632                 /* Refresh the desc even if buffer_addrs didn't change because
633                  * each write-back erases this info. */
634                 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
635                         rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
636                         rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
637                 } else {
638                         rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
639                 }
640
641                 i++;
642                 if (i == rx_ring->count)
643                         i = 0;
644                 bi = &rx_ring->rx_buffer_info[i];
645         }
646
647 no_buffers:
648         if (rx_ring->next_to_use != i) {
649                 rx_ring->next_to_use = i;
650                 if (i-- == 0)
651                         i = (rx_ring->count - 1);
652
653                 ixgbe_release_rx_desc(&adapter->hw, rx_ring, i);
654         }
655 }
656
657 static inline u16 ixgbe_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc)
658 {
659         return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
660 }
661
662 static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
663 {
664         return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
665 }
666
667 static inline u32 ixgbe_get_rsc_count(union ixgbe_adv_rx_desc *rx_desc)
668 {
669         return (le32_to_cpu(rx_desc->wb.lower.lo_dword.data) &
670                 IXGBE_RXDADV_RSCCNT_MASK) >>
671                 IXGBE_RXDADV_RSCCNT_SHIFT;
672 }
673
674 /**
675  * ixgbe_transform_rsc_queue - change rsc queue into a full packet
676  * @skb: pointer to the last skb in the rsc queue
677  *
678  * This function changes a queue full of hw rsc buffers into a completed
679  * packet.  It uses the ->prev pointers to find the first packet and then
680  * turns it into the frag list owner.
681  **/
682 static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb)
683 {
684         unsigned int frag_list_size = 0;
685
686         while (skb->prev) {
687                 struct sk_buff *prev = skb->prev;
688                 frag_list_size += skb->len;
689                 skb->prev = NULL;
690                 skb = prev;
691         }
692
693         skb_shinfo(skb)->frag_list = skb->next;
694         skb->next = NULL;
695         skb->len += frag_list_size;
696         skb->data_len += frag_list_size;
697         skb->truesize += frag_list_size;
698         return skb;
699 }
700
701 static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
702                                struct ixgbe_ring *rx_ring,
703                                int *work_done, int work_to_do)
704 {
705         struct ixgbe_adapter *adapter = q_vector->adapter;
706         struct pci_dev *pdev = adapter->pdev;
707         union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
708         struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
709         struct sk_buff *skb;
710         unsigned int i, rsc_count = 0;
711         u32 len, staterr;
712         u16 hdr_info;
713         bool cleaned = false;
714         int cleaned_count = 0;
715         unsigned int total_rx_bytes = 0, total_rx_packets = 0;
716 #ifdef IXGBE_FCOE
717         int ddp_bytes = 0;
718 #endif /* IXGBE_FCOE */
719
720         i = rx_ring->next_to_clean;
721         rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
722         staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
723         rx_buffer_info = &rx_ring->rx_buffer_info[i];
724
725         while (staterr & IXGBE_RXD_STAT_DD) {
726                 u32 upper_len = 0;
727                 if (*work_done >= work_to_do)
728                         break;
729                 (*work_done)++;
730
731                 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
732                         hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
733                         len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
734                                IXGBE_RXDADV_HDRBUFLEN_SHIFT;
735                         if (hdr_info & IXGBE_RXDADV_SPH)
736                                 adapter->rx_hdr_split++;
737                         if (len > IXGBE_RX_HDR_SIZE)
738                                 len = IXGBE_RX_HDR_SIZE;
739                         upper_len = le16_to_cpu(rx_desc->wb.upper.length);
740                 } else {
741                         len = le16_to_cpu(rx_desc->wb.upper.length);
742                 }
743
744                 cleaned = true;
745                 skb = rx_buffer_info->skb;
746                 prefetch(skb->data - NET_IP_ALIGN);
747                 rx_buffer_info->skb = NULL;
748
749                 if (rx_buffer_info->dma) {
750                         pci_unmap_single(pdev, rx_buffer_info->dma,
751                                          rx_ring->rx_buf_len,
752                                          PCI_DMA_FROMDEVICE);
753                         rx_buffer_info->dma = 0;
754                         skb_put(skb, len);
755                 }
756
757                 if (upper_len) {
758                         pci_unmap_page(pdev, rx_buffer_info->page_dma,
759                                        PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
760                         rx_buffer_info->page_dma = 0;
761                         skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
762                                            rx_buffer_info->page,
763                                            rx_buffer_info->page_offset,
764                                            upper_len);
765
766                         if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) ||
767                             (page_count(rx_buffer_info->page) != 1))
768                                 rx_buffer_info->page = NULL;
769                         else
770                                 get_page(rx_buffer_info->page);
771
772                         skb->len += upper_len;
773                         skb->data_len += upper_len;
774                         skb->truesize += upper_len;
775                 }
776
777                 i++;
778                 if (i == rx_ring->count)
779                         i = 0;
780
781                 next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i);
782                 prefetch(next_rxd);
783                 cleaned_count++;
784
785                 if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
786                         rsc_count = ixgbe_get_rsc_count(rx_desc);
787
788                 if (rsc_count) {
789                         u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
790                                      IXGBE_RXDADV_NEXTP_SHIFT;
791                         next_buffer = &rx_ring->rx_buffer_info[nextp];
792                         rx_ring->rsc_count += (rsc_count - 1);
793                 } else {
794                         next_buffer = &rx_ring->rx_buffer_info[i];
795                 }
796
797                 if (staterr & IXGBE_RXD_STAT_EOP) {
798                         if (skb->prev)
799                                 skb = ixgbe_transform_rsc_queue(skb);
800                         rx_ring->stats.packets++;
801                         rx_ring->stats.bytes += skb->len;
802                 } else {
803                         if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
804                                 rx_buffer_info->skb = next_buffer->skb;
805                                 rx_buffer_info->dma = next_buffer->dma;
806                                 next_buffer->skb = skb;
807                                 next_buffer->dma = 0;
808                         } else {
809                                 skb->next = next_buffer->skb;
810                                 skb->next->prev = skb;
811                         }
812                         adapter->non_eop_descs++;
813                         goto next_desc;
814                 }
815
816                 if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) {
817                         dev_kfree_skb_irq(skb);
818                         goto next_desc;
819                 }
820
821                 ixgbe_rx_checksum(adapter, rx_desc, skb);
822
823                 /* probably a little skewed due to removing CRC */
824                 total_rx_bytes += skb->len;
825                 total_rx_packets++;
826
827                 skb->protocol = eth_type_trans(skb, adapter->netdev);
828 #ifdef IXGBE_FCOE
829                 /* if ddp, not passing to ULD unless for FCP_RSP or error */
830                 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
831                         ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb);
832                         if (!ddp_bytes)
833                                 goto next_desc;
834                 }
835 #endif /* IXGBE_FCOE */
836                 ixgbe_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
837
838 next_desc:
839                 rx_desc->wb.upper.status_error = 0;
840
841                 /* return some buffers to hardware, one at a time is too slow */
842                 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
843                         ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
844                         cleaned_count = 0;
845                 }
846
847                 /* use prefetched values */
848                 rx_desc = next_rxd;
849                 rx_buffer_info = &rx_ring->rx_buffer_info[i];
850
851                 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
852         }
853
854         rx_ring->next_to_clean = i;
855         cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
856
857         if (cleaned_count)
858                 ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
859
860 #ifdef IXGBE_FCOE
861         /* include DDPed FCoE data */
862         if (ddp_bytes > 0) {
863                 unsigned int mss;
864
865                 mss = adapter->netdev->mtu - sizeof(struct fcoe_hdr) -
866                         sizeof(struct fc_frame_header) -
867                         sizeof(struct fcoe_crc_eof);
868                 if (mss > 512)
869                         mss &= ~511;
870                 total_rx_bytes += ddp_bytes;
871                 total_rx_packets += DIV_ROUND_UP(ddp_bytes, mss);
872         }
873 #endif /* IXGBE_FCOE */
874
875         rx_ring->total_packets += total_rx_packets;
876         rx_ring->total_bytes += total_rx_bytes;
877         adapter->net_stats.rx_bytes += total_rx_bytes;
878         adapter->net_stats.rx_packets += total_rx_packets;
879
880         return cleaned;
881 }
882
883 static int ixgbe_clean_rxonly(struct napi_struct *, int);
884 /**
885  * ixgbe_configure_msix - Configure MSI-X hardware
886  * @adapter: board private structure
887  *
888  * ixgbe_configure_msix sets up the hardware to properly generate MSI-X
889  * interrupts.
890  **/
891 static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
892 {
893         struct ixgbe_q_vector *q_vector;
894         int i, j, q_vectors, v_idx, r_idx;
895         u32 mask;
896
897         q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
898
899         /*
900          * Populate the IVAR table and set the ITR values to the
901          * corresponding register.
902          */
903         for (v_idx = 0; v_idx < q_vectors; v_idx++) {
904                 q_vector = adapter->q_vector[v_idx];
905                 /* XXX for_each_bit(...) */
906                 r_idx = find_first_bit(q_vector->rxr_idx,
907                                        adapter->num_rx_queues);
908
909                 for (i = 0; i < q_vector->rxr_count; i++) {
910                         j = adapter->rx_ring[r_idx].reg_idx;
911                         ixgbe_set_ivar(adapter, 0, j, v_idx);
912                         r_idx = find_next_bit(q_vector->rxr_idx,
913                                               adapter->num_rx_queues,
914                                               r_idx + 1);
915                 }
916                 r_idx = find_first_bit(q_vector->txr_idx,
917                                        adapter->num_tx_queues);
918
919                 for (i = 0; i < q_vector->txr_count; i++) {
920                         j = adapter->tx_ring[r_idx].reg_idx;
921                         ixgbe_set_ivar(adapter, 1, j, v_idx);
922                         r_idx = find_next_bit(q_vector->txr_idx,
923                                               adapter->num_tx_queues,
924                                               r_idx + 1);
925                 }
926
927                 /* if this is a tx only vector halve the interrupt rate */
928                 if (q_vector->txr_count && !q_vector->rxr_count)
929                         q_vector->eitr = (adapter->eitr_param >> 1);
930                 else if (q_vector->rxr_count)
931                         /* rx only */
932                         q_vector->eitr = adapter->eitr_param;
933
934                 ixgbe_write_eitr(q_vector);
935         }
936
937         if (adapter->hw.mac.type == ixgbe_mac_82598EB)
938                 ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
939                                v_idx);
940         else if (adapter->hw.mac.type == ixgbe_mac_82599EB)
941                 ixgbe_set_ivar(adapter, -1, 1, v_idx);
942         IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
943
944         /* set up to autoclear timer, and the vectors */
945         mask = IXGBE_EIMS_ENABLE_MASK;
946         mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
947         IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
948 }
949
950 enum latency_range {
951         lowest_latency = 0,
952         low_latency = 1,
953         bulk_latency = 2,
954         latency_invalid = 255
955 };
956
957 /**
958  * ixgbe_update_itr - update the dynamic ITR value based on statistics
959  * @adapter: pointer to adapter
960  * @eitr: eitr setting (ints per sec) to give last timeslice
961  * @itr_setting: current throttle rate in ints/second
962  * @packets: the number of packets during this measurement interval
963  * @bytes: the number of bytes during this measurement interval
964  *
965  *      Stores a new ITR value based on packets and byte
966  *      counts during the last interrupt.  The advantage of per interrupt
967  *      computation is faster updates and more accurate ITR for the current
968  *      traffic pattern.  Constants in this function were computed
969  *      based on theoretical maximum wire speed and thresholds were set based
970  *      on testing data as well as attempting to minimize response time
971  *      while increasing bulk throughput.
972  *      this functionality is controlled by the InterruptThrottleRate module
973  *      parameter (see ixgbe_param.c)
974  **/
975 static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter,
976                            u32 eitr, u8 itr_setting,
977                            int packets, int bytes)
978 {
979         unsigned int retval = itr_setting;
980         u32 timepassed_us;
981         u64 bytes_perint;
982
983         if (packets == 0)
984                 goto update_itr_done;
985
986
987         /* simple throttlerate management
988          *    0-20MB/s lowest (100000 ints/s)
989          *   20-100MB/s low   (20000 ints/s)
990          *  100-1249MB/s bulk (8000 ints/s)
991          */
992         /* what was last interrupt timeslice? */
993         timepassed_us = 1000000/eitr;
994         bytes_perint = bytes / timepassed_us; /* bytes/usec */
995
996         switch (itr_setting) {
997         case lowest_latency:
998                 if (bytes_perint > adapter->eitr_low)
999                         retval = low_latency;
1000                 break;
1001         case low_latency:
1002                 if (bytes_perint > adapter->eitr_high)
1003                         retval = bulk_latency;
1004                 else if (bytes_perint <= adapter->eitr_low)
1005                         retval = lowest_latency;
1006                 break;
1007         case bulk_latency:
1008                 if (bytes_perint <= adapter->eitr_high)
1009                         retval = low_latency;
1010                 break;
1011         }
1012
1013 update_itr_done:
1014         return retval;
1015 }
1016
1017 /**
1018  * ixgbe_write_eitr - write EITR register in hardware specific way
1019  * @q_vector: structure containing interrupt and ring information
1020  *
1021  * This function is made to be called by ethtool and by the driver
1022  * when it needs to update EITR registers at runtime.  Hardware
1023  * specific quirks/differences are taken care of here.
1024  */
1025 void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
1026 {
1027         struct ixgbe_adapter *adapter = q_vector->adapter;
1028         struct ixgbe_hw *hw = &adapter->hw;
1029         int v_idx = q_vector->v_idx;
1030         u32 itr_reg = EITR_INTS_PER_SEC_TO_REG(q_vector->eitr);
1031
1032         if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1033                 /* must write high and low 16 bits to reset counter */
1034                 itr_reg |= (itr_reg << 16);
1035         } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
1036                 /*
1037                  * set the WDIS bit to not clear the timer bits and cause an
1038                  * immediate assertion of the interrupt
1039                  */
1040                 itr_reg |= IXGBE_EITR_CNT_WDIS;
1041         }
1042         IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg);
1043 }
1044
1045 static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
1046 {
1047         struct ixgbe_adapter *adapter = q_vector->adapter;
1048         u32 new_itr;
1049         u8 current_itr, ret_itr;
1050         int i, r_idx;
1051         struct ixgbe_ring *rx_ring, *tx_ring;
1052
1053         r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1054         for (i = 0; i < q_vector->txr_count; i++) {
1055                 tx_ring = &(adapter->tx_ring[r_idx]);
1056                 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
1057                                            q_vector->tx_itr,
1058                                            tx_ring->total_packets,
1059                                            tx_ring->total_bytes);
1060                 /* if the result for this queue would decrease interrupt
1061                  * rate for this vector then use that result */
1062                 q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ?
1063                                     q_vector->tx_itr - 1 : ret_itr);
1064                 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
1065                                       r_idx + 1);
1066         }
1067
1068         r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1069         for (i = 0; i < q_vector->rxr_count; i++) {
1070                 rx_ring = &(adapter->rx_ring[r_idx]);
1071                 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
1072                                            q_vector->rx_itr,
1073                                            rx_ring->total_packets,
1074                                            rx_ring->total_bytes);
1075                 /* if the result for this queue would decrease interrupt
1076                  * rate for this vector then use that result */
1077                 q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ?
1078                                     q_vector->rx_itr - 1 : ret_itr);
1079                 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1080                                       r_idx + 1);
1081         }
1082
1083         current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
1084
1085         switch (current_itr) {
1086         /* counts and packets in update_itr are dependent on these numbers */
1087         case lowest_latency:
1088                 new_itr = 100000;
1089                 break;
1090         case low_latency:
1091                 new_itr = 20000; /* aka hwitr = ~200 */
1092                 break;
1093         case bulk_latency:
1094         default:
1095                 new_itr = 8000;
1096                 break;
1097         }
1098
1099         if (new_itr != q_vector->eitr) {
1100                 /* do an exponential smoothing */
1101                 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
1102
1103                 /* save the algorithm value here, not the smoothed one */
1104                 q_vector->eitr = new_itr;
1105
1106                 ixgbe_write_eitr(q_vector);
1107         }
1108
1109         return;
1110 }
1111
1112 static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
1113 {
1114         struct ixgbe_hw *hw = &adapter->hw;
1115
1116         if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
1117             (eicr & IXGBE_EICR_GPI_SDP1)) {
1118                 DPRINTK(PROBE, CRIT, "Fan has stopped, replace the adapter\n");
1119                 /* write to clear the interrupt */
1120                 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1121         }
1122 }
1123
1124 static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
1125 {
1126         struct ixgbe_hw *hw = &adapter->hw;
1127
1128         if (eicr & IXGBE_EICR_GPI_SDP1) {
1129                 /* Clear the interrupt */
1130                 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1131                 schedule_work(&adapter->multispeed_fiber_task);
1132         } else if (eicr & IXGBE_EICR_GPI_SDP2) {
1133                 /* Clear the interrupt */
1134                 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
1135                 schedule_work(&adapter->sfp_config_module_task);
1136         } else {
1137                 /* Interrupt isn't for us... */
1138                 return;
1139         }
1140 }
1141
1142 static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
1143 {
1144         struct ixgbe_hw *hw = &adapter->hw;
1145
1146         adapter->lsc_int++;
1147         adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
1148         adapter->link_check_timeout = jiffies;
1149         if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
1150                 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
1151                 schedule_work(&adapter->watchdog_task);
1152         }
1153 }
1154
1155 static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
1156 {
1157         struct net_device *netdev = data;
1158         struct ixgbe_adapter *adapter = netdev_priv(netdev);
1159         struct ixgbe_hw *hw = &adapter->hw;
1160         u32 eicr;
1161
1162         /*
1163          * Workaround for Silicon errata.  Use clear-by-write instead
1164          * of clear-by-read.  Reading with EICS will return the
1165          * interrupt causes without clearing, which later be done
1166          * with the write to EICR.
1167          */
1168         eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
1169         IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
1170
1171         if (eicr & IXGBE_EICR_LSC)
1172                 ixgbe_check_lsc(adapter);
1173
1174         if (hw->mac.type == ixgbe_mac_82598EB)
1175                 ixgbe_check_fan_failure(adapter, eicr);
1176
1177         if (hw->mac.type == ixgbe_mac_82599EB) {
1178                 ixgbe_check_sfp_event(adapter, eicr);
1179
1180                 /* Handle Flow Director Full threshold interrupt */
1181                 if (eicr & IXGBE_EICR_FLOW_DIR) {
1182                         int i;
1183                         IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_FLOW_DIR);
1184                         /* Disable transmits before FDIR Re-initialization */
1185                         netif_tx_stop_all_queues(netdev);
1186                         for (i = 0; i < adapter->num_tx_queues; i++) {
1187                                 struct ixgbe_ring *tx_ring =
1188                                                            &adapter->tx_ring[i];
1189                                 if (test_and_clear_bit(__IXGBE_FDIR_INIT_DONE,
1190                                                        &tx_ring->reinit_state))
1191                                         schedule_work(&adapter->fdir_reinit_task);
1192                         }
1193                 }
1194         }
1195         if (!test_bit(__IXGBE_DOWN, &adapter->state))
1196                 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
1197
1198         return IRQ_HANDLED;
1199 }
1200
1201 static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
1202                                            u64 qmask)
1203 {
1204         u32 mask;
1205
1206         if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1207                 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
1208                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
1209         } else {
1210                 mask = (qmask & 0xFFFFFFFF);
1211                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(0), mask);
1212                 mask = (qmask >> 32);
1213                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(1), mask);
1214         }
1215         /* skip the flush */
1216 }
1217
1218 static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
1219                                             u64 qmask)
1220 {
1221         u32 mask;
1222
1223         if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1224                 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
1225                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, mask);
1226         } else {
1227                 mask = (qmask & 0xFFFFFFFF);
1228                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), mask);
1229                 mask = (qmask >> 32);
1230                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), mask);
1231         }
1232         /* skip the flush */
1233 }
1234
1235 static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
1236 {
1237         struct ixgbe_q_vector *q_vector = data;
1238         struct ixgbe_adapter  *adapter = q_vector->adapter;
1239         struct ixgbe_ring     *tx_ring;
1240         int i, r_idx;
1241
1242         if (!q_vector->txr_count)
1243                 return IRQ_HANDLED;
1244
1245         r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1246         for (i = 0; i < q_vector->txr_count; i++) {
1247                 tx_ring = &(adapter->tx_ring[r_idx]);
1248                 tx_ring->total_bytes = 0;
1249                 tx_ring->total_packets = 0;
1250                 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
1251                                       r_idx + 1);
1252         }
1253
1254         /* disable interrupts on this vector only */
1255         ixgbe_irq_disable_queues(adapter, ((u64)1 << q_vector->v_idx));
1256         napi_schedule(&q_vector->napi);
1257
1258         return IRQ_HANDLED;
1259 }
1260
1261 /**
1262  * ixgbe_msix_clean_rx - single unshared vector rx clean (all queues)
1263  * @irq: unused
1264  * @data: pointer to our q_vector struct for this interrupt vector
1265  **/
1266 static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
1267 {
1268         struct ixgbe_q_vector *q_vector = data;
1269         struct ixgbe_adapter  *adapter = q_vector->adapter;
1270         struct ixgbe_ring  *rx_ring;
1271         int r_idx;
1272         int i;
1273
1274         r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1275         for (i = 0;  i < q_vector->rxr_count; i++) {
1276                 rx_ring = &(adapter->rx_ring[r_idx]);
1277                 rx_ring->total_bytes = 0;
1278                 rx_ring->total_packets = 0;
1279                 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1280                                       r_idx + 1);
1281         }
1282
1283         if (!q_vector->rxr_count)
1284                 return IRQ_HANDLED;
1285
1286         r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1287         rx_ring = &(adapter->rx_ring[r_idx]);
1288         /* disable interrupts on this vector only */
1289         ixgbe_irq_disable_queues(adapter, ((u64)1 << q_vector->v_idx));
1290         napi_schedule(&q_vector->napi);
1291
1292         return IRQ_HANDLED;
1293 }
1294
1295 static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
1296 {
1297         struct ixgbe_q_vector *q_vector = data;
1298         struct ixgbe_adapter  *adapter = q_vector->adapter;
1299         struct ixgbe_ring  *ring;
1300         int r_idx;
1301         int i;
1302
1303         if (!q_vector->txr_count && !q_vector->rxr_count)
1304                 return IRQ_HANDLED;
1305
1306         r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1307         for (i = 0; i < q_vector->txr_count; i++) {
1308                 ring = &(adapter->tx_ring[r_idx]);
1309                 ring->total_bytes = 0;
1310                 ring->total_packets = 0;
1311                 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
1312                                       r_idx + 1);
1313         }
1314
1315         r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1316         for (i = 0; i < q_vector->rxr_count; i++) {
1317                 ring = &(adapter->rx_ring[r_idx]);
1318                 ring->total_bytes = 0;
1319                 ring->total_packets = 0;
1320                 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1321                                       r_idx + 1);
1322         }
1323
1324         /* disable interrupts on this vector only */
1325         ixgbe_irq_disable_queues(adapter, ((u64)1 << q_vector->v_idx));
1326         napi_schedule(&q_vector->napi);
1327
1328         return IRQ_HANDLED;
1329 }
1330
1331 /**
1332  * ixgbe_clean_rxonly - msix (aka one shot) rx clean routine
1333  * @napi: napi struct with our devices info in it
1334  * @budget: amount of work driver is allowed to do this pass, in packets
1335  *
1336  * This function is optimized for cleaning one queue only on a single
1337  * q_vector!!!
1338  **/
1339 static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
1340 {
1341         struct ixgbe_q_vector *q_vector =
1342                                container_of(napi, struct ixgbe_q_vector, napi);
1343         struct ixgbe_adapter *adapter = q_vector->adapter;
1344         struct ixgbe_ring *rx_ring = NULL;
1345         int work_done = 0;
1346         long r_idx;
1347
1348         r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1349         rx_ring = &(adapter->rx_ring[r_idx]);
1350 #ifdef CONFIG_IXGBE_DCA
1351         if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1352                 ixgbe_update_rx_dca(adapter, rx_ring);
1353 #endif
1354
1355         ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
1356
1357         /* If all Rx work done, exit the polling mode */
1358         if (work_done < budget) {
1359                 napi_complete(napi);
1360                 if (adapter->itr_setting & 1)
1361                         ixgbe_set_itr_msix(q_vector);
1362                 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1363                         ixgbe_irq_enable_queues(adapter,
1364                                                 ((u64)1 << q_vector->v_idx));
1365         }
1366
1367         return work_done;
1368 }
1369
1370 /**
1371  * ixgbe_clean_rxtx_many - msix (aka one shot) rx clean routine
1372  * @napi: napi struct with our devices info in it
1373  * @budget: amount of work driver is allowed to do this pass, in packets
1374  *
1375  * This function will clean more than one rx queue associated with a
1376  * q_vector.
1377  **/
1378 static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
1379 {
1380         struct ixgbe_q_vector *q_vector =
1381                                container_of(napi, struct ixgbe_q_vector, napi);
1382         struct ixgbe_adapter *adapter = q_vector->adapter;
1383         struct ixgbe_ring *ring = NULL;
1384         int work_done = 0, i;
1385         long r_idx;
1386         bool tx_clean_complete = true;
1387
1388         r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1389         for (i = 0; i < q_vector->txr_count; i++) {
1390                 ring = &(adapter->tx_ring[r_idx]);
1391 #ifdef CONFIG_IXGBE_DCA
1392                 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1393                         ixgbe_update_tx_dca(adapter, ring);
1394 #endif
1395                 tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring);
1396                 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
1397                                       r_idx + 1);
1398         }
1399
1400         /* attempt to distribute budget to each queue fairly, but don't allow
1401          * the budget to go below 1 because we'll exit polling */
1402         budget /= (q_vector->rxr_count ?: 1);
1403         budget = max(budget, 1);
1404         r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1405         for (i = 0; i < q_vector->rxr_count; i++) {
1406                 ring = &(adapter->rx_ring[r_idx]);
1407 #ifdef CONFIG_IXGBE_DCA
1408                 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1409                         ixgbe_update_rx_dca(adapter, ring);
1410 #endif
1411                 ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget);
1412                 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1413                                       r_idx + 1);
1414         }
1415
1416         r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1417         ring = &(adapter->rx_ring[r_idx]);
1418         /* If all Rx work done, exit the polling mode */
1419         if (work_done < budget) {
1420                 napi_complete(napi);
1421                 if (adapter->itr_setting & 1)
1422                         ixgbe_set_itr_msix(q_vector);
1423                 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1424                         ixgbe_irq_enable_queues(adapter,
1425                                                 ((u64)1 << q_vector->v_idx));
1426                 return 0;
1427         }
1428
1429         return work_done;
1430 }
1431
1432 /**
1433  * ixgbe_clean_txonly - msix (aka one shot) tx clean routine
1434  * @napi: napi struct with our devices info in it
1435  * @budget: amount of work driver is allowed to do this pass, in packets
1436  *
1437  * This function is optimized for cleaning one queue only on a single
1438  * q_vector!!!
1439  **/
1440 static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
1441 {
1442         struct ixgbe_q_vector *q_vector =
1443                                container_of(napi, struct ixgbe_q_vector, napi);
1444         struct ixgbe_adapter *adapter = q_vector->adapter;
1445         struct ixgbe_ring *tx_ring = NULL;
1446         int work_done = 0;
1447         long r_idx;
1448
1449         r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1450         tx_ring = &(adapter->tx_ring[r_idx]);
1451 #ifdef CONFIG_IXGBE_DCA
1452         if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1453                 ixgbe_update_tx_dca(adapter, tx_ring);
1454 #endif
1455
1456         if (!ixgbe_clean_tx_irq(q_vector, tx_ring))
1457                 work_done = budget;
1458
1459         /* If all Rx work done, exit the polling mode */
1460         if (work_done < budget) {
1461                 napi_complete(napi);
1462                 if (adapter->itr_setting & 1)
1463                         ixgbe_set_itr_msix(q_vector);
1464                 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1465                         ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx));
1466         }
1467
1468         return work_done;
1469 }
1470
1471 static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
1472                                      int r_idx)
1473 {
1474         struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
1475
1476         set_bit(r_idx, q_vector->rxr_idx);
1477         q_vector->rxr_count++;
1478 }
1479
1480 static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
1481                                      int t_idx)
1482 {
1483         struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
1484
1485         set_bit(t_idx, q_vector->txr_idx);
1486         q_vector->txr_count++;
1487 }
1488
1489 /**
1490  * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors
1491  * @adapter: board private structure to initialize
1492  * @vectors: allotted vector count for descriptor rings
1493  *
1494  * This function maps descriptor rings to the queue-specific vectors
1495  * we were allotted through the MSI-X enabling code.  Ideally, we'd have
1496  * one vector per ring/queue, but on a constrained vector budget, we
1497  * group the rings as "efficiently" as possible.  You would add new
1498  * mapping configurations in here.
1499  **/
1500 static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter,
1501                                       int vectors)
1502 {
1503         int v_start = 0;
1504         int rxr_idx = 0, txr_idx = 0;
1505         int rxr_remaining = adapter->num_rx_queues;
1506         int txr_remaining = adapter->num_tx_queues;
1507         int i, j;
1508         int rqpv, tqpv;
1509         int err = 0;
1510
1511         /* No mapping required if MSI-X is disabled. */
1512         if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
1513                 goto out;
1514
1515         /*
1516          * The ideal configuration...
1517          * We have enough vectors to map one per queue.
1518          */
1519         if (vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
1520                 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
1521                         map_vector_to_rxq(adapter, v_start, rxr_idx);
1522
1523                 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
1524                         map_vector_to_txq(adapter, v_start, txr_idx);
1525
1526                 goto out;
1527         }
1528
1529         /*
1530          * If we don't have enough vectors for a 1-to-1
1531          * mapping, we'll have to group them so there are
1532          * multiple queues per vector.
1533          */
1534         /* Re-adjusting *qpv takes care of the remainder. */
1535         for (i = v_start; i < vectors; i++) {
1536                 rqpv = DIV_ROUND_UP(rxr_remaining, vectors - i);
1537                 for (j = 0; j < rqpv; j++) {
1538                         map_vector_to_rxq(adapter, i, rxr_idx);
1539                         rxr_idx++;
1540                         rxr_remaining--;
1541                 }
1542         }
1543         for (i = v_start; i < vectors; i++) {
1544                 tqpv = DIV_ROUND_UP(txr_remaining, vectors - i);
1545                 for (j = 0; j < tqpv; j++) {
1546                         map_vector_to_txq(adapter, i, txr_idx);
1547                         txr_idx++;
1548                         txr_remaining--;
1549                 }
1550         }
1551
1552 out:
1553         return err;
1554 }
1555
1556 /**
1557  * ixgbe_request_msix_irqs - Initialize MSI-X interrupts
1558  * @adapter: board private structure
1559  *
1560  * ixgbe_request_msix_irqs allocates MSI-X vectors and requests
1561  * interrupts from the kernel.
1562  **/
1563 static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
1564 {
1565         struct net_device *netdev = adapter->netdev;
1566         irqreturn_t (*handler)(int, void *);
1567         int i, vector, q_vectors, err;
1568         int ri=0, ti=0;
1569
1570         /* Decrement for Other and TCP Timer vectors */
1571         q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1572
1573         /* Map the Tx/Rx rings to the vectors we were allotted. */
1574         err = ixgbe_map_rings_to_vectors(adapter, q_vectors);
1575         if (err)
1576                 goto out;
1577
1578 #define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \
1579                          (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \
1580                          &ixgbe_msix_clean_many)
1581         for (vector = 0; vector < q_vectors; vector++) {
1582                 handler = SET_HANDLER(adapter->q_vector[vector]);
1583
1584                 if(handler == &ixgbe_msix_clean_rx) {
1585                         sprintf(adapter->name[vector], "%s-%s-%d",
1586                                 netdev->name, "rx", ri++);
1587                 }
1588                 else if(handler == &ixgbe_msix_clean_tx) {
1589                         sprintf(adapter->name[vector], "%s-%s-%d",
1590                                 netdev->name, "tx", ti++);
1591                 }
1592                 else
1593                         sprintf(adapter->name[vector], "%s-%s-%d",
1594                                 netdev->name, "TxRx", vector);
1595
1596                 err = request_irq(adapter->msix_entries[vector].vector,
1597                                   handler, 0, adapter->name[vector],
1598                                   adapter->q_vector[vector]);
1599                 if (err) {
1600                         DPRINTK(PROBE, ERR,
1601                                 "request_irq failed for MSIX interrupt "
1602                                 "Error: %d\n", err);
1603                         goto free_queue_irqs;
1604                 }
1605         }
1606
1607         sprintf(adapter->name[vector], "%s:lsc", netdev->name);
1608         err = request_irq(adapter->msix_entries[vector].vector,
1609                           &ixgbe_msix_lsc, 0, adapter->name[vector], netdev);
1610         if (err) {
1611                 DPRINTK(PROBE, ERR,
1612                         "request_irq for msix_lsc failed: %d\n", err);
1613                 goto free_queue_irqs;
1614         }
1615
1616         return 0;
1617
1618 free_queue_irqs:
1619         for (i = vector - 1; i >= 0; i--)
1620                 free_irq(adapter->msix_entries[--vector].vector,
1621                          adapter->q_vector[i]);
1622         adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
1623         pci_disable_msix(adapter->pdev);
1624         kfree(adapter->msix_entries);
1625         adapter->msix_entries = NULL;
1626 out:
1627         return err;
1628 }
1629
1630 static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
1631 {
1632         struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
1633         u8 current_itr;
1634         u32 new_itr = q_vector->eitr;
1635         struct ixgbe_ring *rx_ring = &adapter->rx_ring[0];
1636         struct ixgbe_ring *tx_ring = &adapter->tx_ring[0];
1637
1638         q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr,
1639                                             q_vector->tx_itr,
1640                                             tx_ring->total_packets,
1641                                             tx_ring->total_bytes);
1642         q_vector->rx_itr = ixgbe_update_itr(adapter, new_itr,
1643                                             q_vector->rx_itr,
1644                                             rx_ring->total_packets,
1645                                             rx_ring->total_bytes);
1646
1647         current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
1648
1649         switch (current_itr) {
1650         /* counts and packets in update_itr are dependent on these numbers */
1651         case lowest_latency:
1652                 new_itr = 100000;
1653                 break;
1654         case low_latency:
1655                 new_itr = 20000; /* aka hwitr = ~200 */
1656                 break;
1657         case bulk_latency:
1658                 new_itr = 8000;
1659                 break;
1660         default:
1661                 break;
1662         }
1663
1664         if (new_itr != q_vector->eitr) {
1665                 /* do an exponential smoothing */
1666                 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
1667
1668                 /* save the algorithm value here, not the smoothed one */
1669                 q_vector->eitr = new_itr;
1670
1671                 ixgbe_write_eitr(q_vector);
1672         }
1673
1674         return;
1675 }
1676
1677 /**
1678  * ixgbe_irq_enable - Enable default interrupt generation settings
1679  * @adapter: board private structure
1680  **/
1681 static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
1682 {
1683         u32 mask;
1684
1685         mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
1686         if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
1687                 mask |= IXGBE_EIMS_GPI_SDP1;
1688         if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
1689                 mask |= IXGBE_EIMS_ECC;
1690                 mask |= IXGBE_EIMS_GPI_SDP1;
1691                 mask |= IXGBE_EIMS_GPI_SDP2;
1692         }
1693         if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
1694             adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
1695                 mask |= IXGBE_EIMS_FLOW_DIR;
1696
1697         IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
1698         ixgbe_irq_enable_queues(adapter, ~0);
1699         IXGBE_WRITE_FLUSH(&adapter->hw);
1700 }
1701
1702 /**
1703  * ixgbe_intr - legacy mode Interrupt Handler
1704  * @irq: interrupt number
1705  * @data: pointer to a network interface device structure
1706  **/
1707 static irqreturn_t ixgbe_intr(int irq, void *data)
1708 {
1709         struct net_device *netdev = data;
1710         struct ixgbe_adapter *adapter = netdev_priv(netdev);
1711         struct ixgbe_hw *hw = &adapter->hw;
1712         struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
1713         u32 eicr;
1714
1715         /*
1716          * Workaround for silicon errata.  Mask the interrupts
1717          * before the read of EICR.
1718          */
1719         IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
1720
1721         /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read
1722          * therefore no explict interrupt disable is necessary */
1723         eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
1724         if (!eicr) {
1725                 /* shared interrupt alert!
1726                  * make sure interrupts are enabled because the read will
1727                  * have disabled interrupts due to EIAM */
1728                 ixgbe_irq_enable(adapter);
1729                 return IRQ_NONE;        /* Not our interrupt */
1730         }
1731
1732         if (eicr & IXGBE_EICR_LSC)
1733                 ixgbe_check_lsc(adapter);
1734
1735         if (hw->mac.type == ixgbe_mac_82599EB)
1736                 ixgbe_check_sfp_event(adapter, eicr);
1737
1738         ixgbe_check_fan_failure(adapter, eicr);
1739
1740         if (napi_schedule_prep(&(q_vector->napi))) {
1741                 adapter->tx_ring[0].total_packets = 0;
1742                 adapter->tx_ring[0].total_bytes = 0;
1743                 adapter->rx_ring[0].total_packets = 0;
1744                 adapter->rx_ring[0].total_bytes = 0;
1745                 /* would disable interrupts here but EIAM disabled it */
1746                 __napi_schedule(&(q_vector->napi));
1747         }
1748
1749         return IRQ_HANDLED;
1750 }
1751
1752 static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter *adapter)
1753 {
1754         int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1755
1756         for (i = 0; i < q_vectors; i++) {
1757                 struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
1758                 bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES);
1759                 bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES);
1760                 q_vector->rxr_count = 0;
1761                 q_vector->txr_count = 0;
1762         }
1763 }
1764
1765 /**
1766  * ixgbe_request_irq - initialize interrupts
1767  * @adapter: board private structure
1768  *
1769  * Attempts to configure interrupts using the best available
1770  * capabilities of the hardware and kernel.
1771  **/
1772 static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
1773 {
1774         struct net_device *netdev = adapter->netdev;
1775         int err;
1776
1777         if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
1778                 err = ixgbe_request_msix_irqs(adapter);
1779         } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
1780                 err = request_irq(adapter->pdev->irq, &ixgbe_intr, 0,
1781                                   netdev->name, netdev);
1782         } else {
1783                 err = request_irq(adapter->pdev->irq, &ixgbe_intr, IRQF_SHARED,
1784                                   netdev->name, netdev);
1785         }
1786
1787         if (err)
1788                 DPRINTK(PROBE, ERR, "request_irq failed, Error %d\n", err);
1789
1790         return err;
1791 }
1792
1793 static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
1794 {
1795         struct net_device *netdev = adapter->netdev;
1796
1797         if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
1798                 int i, q_vectors;
1799
1800                 q_vectors = adapter->num_msix_vectors;
1801
1802                 i = q_vectors - 1;
1803                 free_irq(adapter->msix_entries[i].vector, netdev);
1804
1805                 i--;
1806                 for (; i >= 0; i--) {
1807                         free_irq(adapter->msix_entries[i].vector,
1808                                  adapter->q_vector[i]);
1809                 }
1810
1811                 ixgbe_reset_q_vectors(adapter);
1812         } else {
1813                 free_irq(adapter->pdev->irq, netdev);
1814         }
1815 }
1816
1817 /**
1818  * ixgbe_irq_disable - Mask off interrupt generation on the NIC
1819  * @adapter: board private structure
1820  **/
1821 static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
1822 {
1823         if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1824                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
1825         } else {
1826                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
1827                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
1828                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
1829         }
1830         IXGBE_WRITE_FLUSH(&adapter->hw);
1831         if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
1832                 int i;
1833                 for (i = 0; i < adapter->num_msix_vectors; i++)
1834                         synchronize_irq(adapter->msix_entries[i].vector);
1835         } else {
1836                 synchronize_irq(adapter->pdev->irq);
1837         }
1838 }
1839
1840 /**
1841  * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts
1842  *
1843  **/
1844 static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
1845 {
1846         struct ixgbe_hw *hw = &adapter->hw;
1847
1848         IXGBE_WRITE_REG(hw, IXGBE_EITR(0),
1849                         EITR_INTS_PER_SEC_TO_REG(adapter->eitr_param));
1850
1851         ixgbe_set_ivar(adapter, 0, 0, 0);
1852         ixgbe_set_ivar(adapter, 1, 0, 0);
1853
1854         map_vector_to_rxq(adapter, 0, 0);
1855         map_vector_to_txq(adapter, 0, 0);
1856
1857         DPRINTK(HW, INFO, "Legacy interrupt IVAR setup done\n");
1858 }
1859
1860 /**
1861  * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset
1862  * @adapter: board private structure
1863  *
1864  * Configure the Tx unit of the MAC after a reset.
1865  **/
1866 static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
1867 {
1868         u64 tdba;
1869         struct ixgbe_hw *hw = &adapter->hw;
1870         u32 i, j, tdlen, txctrl;
1871
1872         /* Setup the HW Tx Head and Tail descriptor pointers */
1873         for (i = 0; i < adapter->num_tx_queues; i++) {
1874                 struct ixgbe_ring *ring = &adapter->tx_ring[i];
1875                 j = ring->reg_idx;
1876                 tdba = ring->dma;
1877                 tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
1878                 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
1879                                 (tdba & DMA_BIT_MASK(32)));
1880                 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
1881                 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), tdlen);
1882                 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
1883                 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
1884                 adapter->tx_ring[i].head = IXGBE_TDH(j);
1885                 adapter->tx_ring[i].tail = IXGBE_TDT(j);
1886                 /* Disable Tx Head Writeback RO bit, since this hoses
1887                  * bookkeeping if things aren't delivered in order.
1888                  */
1889                 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
1890                 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
1891                 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
1892         }
1893         if (hw->mac.type == ixgbe_mac_82599EB) {
1894                 /* We enable 8 traffic classes, DCB only */
1895                 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
1896                         IXGBE_WRITE_REG(hw, IXGBE_MTQC, (IXGBE_MTQC_RT_ENA |
1897                                         IXGBE_MTQC_8TC_8TQ));
1898         }
1899 }
1900
1901 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1902
1903 static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
1904                                    struct ixgbe_ring *rx_ring)
1905 {
1906         u32 srrctl;
1907         int index;
1908         struct ixgbe_ring_feature *feature = adapter->ring_feature;
1909
1910         index = rx_ring->reg_idx;
1911         if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
1912                 unsigned long mask;
1913                 mask = (unsigned long) feature[RING_F_RSS].mask;
1914                 index = index & mask;
1915         }
1916         srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(index));
1917
1918         srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
1919         srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
1920
1921         srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
1922                   IXGBE_SRRCTL_BSIZEHDR_MASK;
1923
1924         if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
1925 #if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER
1926                 srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1927 #else
1928                 srrctl |= (PAGE_SIZE / 2) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1929 #endif
1930                 srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
1931         } else {
1932                 srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >>
1933                           IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1934                 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1935         }
1936
1937         IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl);
1938 }
1939
1940 static u32 ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
1941 {
1942         u32 mrqc = 0;
1943         int mask;
1944
1945         if (!(adapter->hw.mac.type == ixgbe_mac_82599EB))
1946                 return mrqc;
1947
1948         mask = adapter->flags & (IXGBE_FLAG_RSS_ENABLED
1949 #ifdef CONFIG_IXGBE_DCB
1950                                  | IXGBE_FLAG_DCB_ENABLED
1951 #endif
1952                                 );
1953
1954         switch (mask) {
1955         case (IXGBE_FLAG_RSS_ENABLED):
1956                 mrqc = IXGBE_MRQC_RSSEN;
1957                 break;
1958 #ifdef CONFIG_IXGBE_DCB
1959         case (IXGBE_FLAG_DCB_ENABLED):
1960                 mrqc = IXGBE_MRQC_RT8TCEN;
1961                 break;
1962 #endif /* CONFIG_IXGBE_DCB */
1963         default:
1964                 break;
1965         }
1966
1967         return mrqc;
1968 }
1969
1970 /**
1971  * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset
1972  * @adapter: board private structure
1973  *
1974  * Configure the Rx unit of the MAC after a reset.
1975  **/
1976 static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
1977 {
1978         u64 rdba;
1979         struct ixgbe_hw *hw = &adapter->hw;
1980         struct ixgbe_ring *rx_ring;
1981         struct net_device *netdev = adapter->netdev;
1982         int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1983         int i, j;
1984         u32 rdlen, rxctrl, rxcsum;
1985         static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D,
1986                           0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE,
1987                           0x6A3E67EA, 0x14364D17, 0x3BED200D};
1988         u32 fctrl, hlreg0;
1989         u32 reta = 0, mrqc = 0;
1990         u32 rdrxctl;
1991         u32 rscctrl;
1992         int rx_buf_len;
1993
1994         /* Decide whether to use packet split mode or not */
1995         adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
1996
1997         /* Set the RX buffer length according to the mode */
1998         if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
1999                 rx_buf_len = IXGBE_RX_HDR_SIZE;
2000                 if (hw->mac.type == ixgbe_mac_82599EB) {
2001                         /* PSRTYPE must be initialized in 82599 */
2002                         u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
2003                                       IXGBE_PSRTYPE_UDPHDR |
2004                                       IXGBE_PSRTYPE_IPV4HDR |
2005                                       IXGBE_PSRTYPE_IPV6HDR |
2006                                       IXGBE_PSRTYPE_L2HDR;
2007                         IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
2008                 }
2009         } else {
2010                 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
2011                     (netdev->mtu <= ETH_DATA_LEN))
2012                         rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
2013                 else
2014                         rx_buf_len = ALIGN(max_frame, 1024);
2015         }
2016
2017         fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
2018         fctrl |= IXGBE_FCTRL_BAM;
2019         fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */
2020         fctrl |= IXGBE_FCTRL_PMCF;
2021         IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
2022
2023         hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2024         if (adapter->netdev->mtu <= ETH_DATA_LEN)
2025                 hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
2026         else
2027                 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
2028 #ifdef IXGBE_FCOE
2029         if (netdev->features & NETIF_F_FCOE_MTU)
2030                 hlreg0 |= IXGBE_HLREG0_JUMBOEN;
2031 #endif
2032         IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
2033
2034         rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
2035         /* disable receives while setting up the descriptors */
2036         rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
2037         IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
2038
2039         /*
2040          * Setup the HW Rx Head and Tail Descriptor Pointers and
2041          * the Base and Length of the Rx Descriptor Ring
2042          */
2043         for (i = 0; i < adapter->num_rx_queues; i++) {
2044                 rx_ring = &adapter->rx_ring[i];
2045                 rdba = rx_ring->dma;
2046                 j = rx_ring->reg_idx;
2047                 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), (rdba & DMA_BIT_MASK(32)));
2048                 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(j), (rdba >> 32));
2049                 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(j), rdlen);
2050                 IXGBE_WRITE_REG(hw, IXGBE_RDH(j), 0);
2051                 IXGBE_WRITE_REG(hw, IXGBE_RDT(j), 0);
2052                 rx_ring->head = IXGBE_RDH(j);
2053                 rx_ring->tail = IXGBE_RDT(j);
2054                 rx_ring->rx_buf_len = rx_buf_len;
2055
2056                 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)
2057                         rx_ring->flags |= IXGBE_RING_RX_PS_ENABLED;
2058
2059 #ifdef IXGBE_FCOE
2060                 if (netdev->features & NETIF_F_FCOE_MTU) {
2061                         struct ixgbe_ring_feature *f;
2062                         f = &adapter->ring_feature[RING_F_FCOE];
2063                         if ((i >= f->mask) && (i < f->mask + f->indices)) {
2064                                 rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED;
2065                                 if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE)
2066                                         rx_ring->rx_buf_len =
2067                                                 IXGBE_FCOE_JUMBO_FRAME_SIZE;
2068                         }
2069                 }
2070
2071 #endif /* IXGBE_FCOE */
2072                 ixgbe_configure_srrctl(adapter, rx_ring);
2073         }
2074
2075         if (hw->mac.type == ixgbe_mac_82598EB) {
2076                 /*
2077                  * For VMDq support of different descriptor types or
2078                  * buffer sizes through the use of multiple SRRCTL
2079                  * registers, RDRXCTL.MVMEN must be set to 1
2080                  *
2081                  * also, the manual doesn't mention it clearly but DCA hints
2082                  * will only use queue 0's tags unless this bit is set.  Side
2083                  * effects of setting this bit are only that SRRCTL must be
2084                  * fully programmed [0..15]
2085                  */
2086                 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2087                 rdrxctl |= IXGBE_RDRXCTL_MVMEN;
2088                 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
2089         }
2090
2091         /* Program MRQC for the distribution of queues */
2092         mrqc = ixgbe_setup_mrqc(adapter);
2093
2094         if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
2095                 /* Fill out redirection table */
2096                 for (i = 0, j = 0; i < 128; i++, j++) {
2097                         if (j == adapter->ring_feature[RING_F_RSS].indices)
2098                                 j = 0;
2099                         /* reta = 4-byte sliding window of
2100                          * 0x00..(indices-1)(indices-1)00..etc. */
2101                         reta = (reta << 8) | (j * 0x11);
2102                         if ((i & 3) == 3)
2103                                 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
2104                 }
2105
2106                 /* Fill out hash function seeds */
2107                 for (i = 0; i < 10; i++)
2108                         IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]);
2109
2110                 if (hw->mac.type == ixgbe_mac_82598EB)
2111                         mrqc |= IXGBE_MRQC_RSSEN;
2112                     /* Perform hash on these packet types */
2113                 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4
2114                       | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
2115                       | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
2116                       | IXGBE_MRQC_RSS_FIELD_IPV6
2117                       | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
2118                       | IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
2119         }
2120         IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2121
2122         rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2123
2124         if (adapter->flags & IXGBE_FLAG_RSS_ENABLED ||
2125             adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED) {
2126                 /* Disable indicating checksum in descriptor, enables
2127                  * RSS hash */
2128                 rxcsum |= IXGBE_RXCSUM_PCSD;
2129         }
2130         if (!(rxcsum & IXGBE_RXCSUM_PCSD)) {
2131                 /* Enable IPv4 payload checksum for UDP fragments
2132                  * if PCSD is not set */
2133                 rxcsum |= IXGBE_RXCSUM_IPPCSE;
2134         }
2135
2136         IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2137
2138         if (hw->mac.type == ixgbe_mac_82599EB) {
2139                 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2140                 rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
2141                 rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
2142                 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
2143         }
2144
2145         if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
2146                 /* Enable 82599 HW-RSC */
2147                 for (i = 0; i < adapter->num_rx_queues; i++) {
2148                         rx_ring = &adapter->rx_ring[i];
2149                         j = rx_ring->reg_idx;
2150                         rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(j));
2151                         rscctrl |= IXGBE_RSCCTL_RSCEN;
2152                         /*
2153                          * we must limit the number of descriptors so that the
2154                          * total size of max desc * buf_len is not greater
2155                          * than 65535
2156                          */
2157                         if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
2158 #if (MAX_SKB_FRAGS > 16)
2159                                 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
2160 #elif (MAX_SKB_FRAGS > 8)
2161                                 rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
2162 #elif (MAX_SKB_FRAGS > 4)
2163                                 rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
2164 #else
2165                                 rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
2166 #endif
2167                         } else {
2168                                 if (rx_buf_len < IXGBE_RXBUFFER_4096)
2169                                         rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
2170                                 else if (rx_buf_len < IXGBE_RXBUFFER_8192)
2171                                         rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
2172                                 else
2173                                         rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
2174                         }
2175                         IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(j), rscctrl);
2176                 }
2177                 /* Disable RSC for ACK packets */
2178                 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
2179                    (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
2180         }
2181 }
2182
2183 static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
2184 {
2185         struct ixgbe_adapter *adapter = netdev_priv(netdev);
2186         struct ixgbe_hw *hw = &adapter->hw;
2187
2188         /* add VID to filter table */
2189         hw->mac.ops.set_vfta(&adapter->hw, vid, 0, true);
2190 }
2191
2192 static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
2193 {
2194         struct ixgbe_adapter *adapter = netdev_priv(netdev);
2195         struct ixgbe_hw *hw = &adapter->hw;
2196
2197         if (!test_bit(__IXGBE_DOWN, &adapter->state))
2198                 ixgbe_irq_disable(adapter);
2199
2200         vlan_group_set_device(adapter->vlgrp, vid, NULL);
2201
2202         if (!test_bit(__IXGBE_DOWN, &adapter->state))
2203                 ixgbe_irq_enable(adapter);
2204
2205         /* remove VID from filter table */
2206         hw->mac.ops.set_vfta(&adapter->hw, vid, 0, false);
2207 }
2208
2209 static void ixgbe_vlan_rx_register(struct net_device *netdev,
2210                                    struct vlan_group *grp)
2211 {
2212         struct ixgbe_adapter *adapter = netdev_priv(netdev);
2213         u32 ctrl;
2214         int i, j;
2215
2216         if (!test_bit(__IXGBE_DOWN, &adapter->state))
2217                 ixgbe_irq_disable(adapter);
2218         adapter->vlgrp = grp;
2219
2220         /*
2221          * For a DCB driver, always enable VLAN tag stripping so we can
2222          * still receive traffic from a DCB-enabled host even if we're
2223          * not in DCB mode.
2224          */
2225         ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
2226         if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
2227                 ctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE;
2228                 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
2229                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
2230         } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
2231                 ctrl |= IXGBE_VLNCTRL_VFE;
2232                 /* enable VLAN tag insert/strip */
2233                 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
2234                 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
2235                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
2236                 for (i = 0; i < adapter->num_rx_queues; i++) {
2237                         j = adapter->rx_ring[i].reg_idx;
2238                         ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(j));
2239                         ctrl |= IXGBE_RXDCTL_VME;
2240                         IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(j), ctrl);
2241                 }
2242         }
2243         ixgbe_vlan_rx_add_vid(netdev, 0);
2244
2245         if (!test_bit(__IXGBE_DOWN, &adapter->state))
2246                 ixgbe_irq_enable(adapter);
2247 }
2248
2249 static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
2250 {
2251         ixgbe_vlan_rx_register(adapter->netdev, adapter->vlgrp);
2252
2253         if (adapter->vlgrp) {
2254                 u16 vid;
2255                 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
2256                         if (!vlan_group_get_device(adapter->vlgrp, vid))
2257                                 continue;
2258                         ixgbe_vlan_rx_add_vid(adapter->netdev, vid);
2259                 }
2260         }
2261 }
2262
2263 static u8 *ixgbe_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr, u32 *vmdq)
2264 {
2265         struct dev_mc_list *mc_ptr;
2266         u8 *addr = *mc_addr_ptr;
2267         *vmdq = 0;
2268
2269         mc_ptr = container_of(addr, struct dev_mc_list, dmi_addr[0]);
2270         if (mc_ptr->next)
2271                 *mc_addr_ptr = mc_ptr->next->dmi_addr;
2272         else
2273                 *mc_addr_ptr = NULL;
2274
2275         return addr;
2276 }
2277
2278 /**
2279  * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set
2280  * @netdev: network interface device structure
2281  *
2282  * The set_rx_method entry point is called whenever the unicast/multicast
2283  * address list or the network interface flags are updated.  This routine is
2284  * responsible for configuring the hardware for proper unicast, multicast and
2285  * promiscuous mode.
2286  **/
2287 static void ixgbe_set_rx_mode(struct net_device *netdev)
2288 {
2289         struct ixgbe_adapter *adapter = netdev_priv(netdev);
2290         struct ixgbe_hw *hw = &adapter->hw;
2291         u32 fctrl, vlnctrl;
2292         u8 *addr_list = NULL;
2293         int addr_count = 0;
2294
2295         /* Check for Promiscuous and All Multicast modes */
2296
2297         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2298         vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2299
2300         if (netdev->flags & IFF_PROMISC) {
2301                 hw->addr_ctrl.user_set_promisc = 1;
2302                 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2303                 vlnctrl &= ~IXGBE_VLNCTRL_VFE;
2304         } else {
2305                 if (netdev->flags & IFF_ALLMULTI) {
2306                         fctrl |= IXGBE_FCTRL_MPE;
2307                         fctrl &= ~IXGBE_FCTRL_UPE;
2308                 } else {
2309                         fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2310                 }
2311                 vlnctrl |= IXGBE_VLNCTRL_VFE;
2312                 hw->addr_ctrl.user_set_promisc = 0;
2313         }
2314
2315         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2316         IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
2317
2318         /* reprogram secondary unicast list */
2319         hw->mac.ops.update_uc_addr_list(hw, &netdev->uc.list);
2320
2321         /* reprogram multicast list */
2322         addr_count = netdev->mc_count;
2323         if (addr_count)
2324                 addr_list = netdev->mc_list->dmi_addr;
2325         hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count,
2326                                         ixgbe_addr_list_itr);
2327 }
2328
2329 static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
2330 {
2331         int q_idx;
2332         struct ixgbe_q_vector *q_vector;
2333         int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2334
2335         /* legacy and MSI only use one vector */
2336         if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
2337                 q_vectors = 1;
2338
2339         for (q_idx = 0; q_idx < q_vectors; q_idx++) {
2340                 struct napi_struct *napi;
2341                 q_vector = adapter->q_vector[q_idx];
2342                 napi = &q_vector->napi;
2343                 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2344                         if (!q_vector->rxr_count || !q_vector->txr_count) {
2345                                 if (q_vector->txr_count == 1)
2346                                         napi->poll = &ixgbe_clean_txonly;
2347                                 else if (q_vector->rxr_count == 1)
2348                                         napi->poll = &ixgbe_clean_rxonly;
2349                         }
2350                 }
2351
2352                 napi_enable(napi);
2353         }
2354 }
2355
2356 static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
2357 {
2358         int q_idx;
2359         struct ixgbe_q_vector *q_vector;
2360         int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2361
2362         /* legacy and MSI only use one vector */
2363         if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
2364                 q_vectors = 1;
2365
2366         for (q_idx = 0; q_idx < q_vectors; q_idx++) {
2367                 q_vector = adapter->q_vector[q_idx];
2368                 napi_disable(&q_vector->napi);
2369         }
2370 }
2371
2372 #ifdef CONFIG_IXGBE_DCB
2373 /*
2374  * ixgbe_configure_dcb - Configure DCB hardware
2375  * @adapter: ixgbe adapter struct
2376  *
2377  * This is called by the driver on open to configure the DCB hardware.
2378  * This is also called by the gennetlink interface when reconfiguring
2379  * the DCB state.
2380  */
2381 static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
2382 {
2383         struct ixgbe_hw *hw = &adapter->hw;
2384         u32 txdctl, vlnctrl;
2385         int i, j;
2386
2387         ixgbe_dcb_check_config(&adapter->dcb_cfg);
2388         ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_TX_CONFIG);
2389         ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_RX_CONFIG);
2390
2391         /* reconfigure the hardware */
2392         ixgbe_dcb_hw_config(&adapter->hw, &adapter->dcb_cfg);
2393
2394         for (i = 0; i < adapter->num_tx_queues; i++) {
2395                 j = adapter->tx_ring[i].reg_idx;
2396                 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
2397                 /* PThresh workaround for Tx hang with DFP enabled. */
2398                 txdctl |= 32;
2399                 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
2400         }
2401         /* Enable VLAN tag insert/strip */
2402         vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2403         if (hw->mac.type == ixgbe_mac_82598EB) {
2404                 vlnctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE;
2405                 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
2406                 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
2407         } else if (hw->mac.type == ixgbe_mac_82599EB) {
2408                 vlnctrl |= IXGBE_VLNCTRL_VFE;
2409                 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
2410                 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
2411                 for (i = 0; i < adapter->num_rx_queues; i++) {
2412                         j = adapter->rx_ring[i].reg_idx;
2413                         vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
2414                         vlnctrl |= IXGBE_RXDCTL_VME;
2415                         IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
2416                 }
2417         }
2418         hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
2419 }
2420
2421 #endif
2422 static void ixgbe_configure(struct ixgbe_adapter *adapter)
2423 {
2424         struct net_device *netdev = adapter->netdev;
2425         struct ixgbe_hw *hw = &adapter->hw;
2426         int i;
2427
2428         ixgbe_set_rx_mode(netdev);
2429
2430         ixgbe_restore_vlan(adapter);
2431 #ifdef CONFIG_IXGBE_DCB
2432         if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
2433                 netif_set_gso_max_size(netdev, 32768);
2434                 ixgbe_configure_dcb(adapter);
2435         } else {
2436                 netif_set_gso_max_size(netdev, 65536);
2437         }
2438 #else
2439         netif_set_gso_max_size(netdev, 65536);
2440 #endif
2441
2442 #ifdef IXGBE_FCOE
2443         if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
2444                 ixgbe_configure_fcoe(adapter);
2445
2446 #endif /* IXGBE_FCOE */
2447         if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
2448                 for (i = 0; i < adapter->num_tx_queues; i++)
2449                         adapter->tx_ring[i].atr_sample_rate =
2450                                                        adapter->atr_sample_rate;
2451                 ixgbe_init_fdir_signature_82599(hw, adapter->fdir_pballoc);
2452         } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
2453                 ixgbe_init_fdir_perfect_82599(hw, adapter->fdir_pballoc);
2454         }
2455
2456         ixgbe_configure_tx(adapter);
2457         ixgbe_configure_rx(adapter);
2458         for (i = 0; i < adapter->num_rx_queues; i++)
2459                 ixgbe_alloc_rx_buffers(adapter, &adapter->rx_ring[i],
2460                                        (adapter->rx_ring[i].count - 1));
2461 }
2462
2463 static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
2464 {
2465         switch (hw->phy.type) {
2466         case ixgbe_phy_sfp_avago:
2467         case ixgbe_phy_sfp_ftl:
2468         case ixgbe_phy_sfp_intel:
2469         case ixgbe_phy_sfp_unknown:
2470         case ixgbe_phy_tw_tyco:
2471         case ixgbe_phy_tw_unknown:
2472                 return true;
2473         default:
2474                 return false;
2475         }
2476 }
2477
2478 /**
2479  * ixgbe_sfp_link_config - set up SFP+ link
2480  * @adapter: pointer to private adapter struct
2481  **/
2482 static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
2483 {
2484         struct ixgbe_hw *hw = &adapter->hw;
2485
2486                 if (hw->phy.multispeed_fiber) {
2487                         /*
2488                          * In multispeed fiber setups, the device may not have
2489                          * had a physical connection when the driver loaded.
2490                          * If that's the case, the initial link configuration
2491                          * couldn't get the MAC into 10G or 1G mode, so we'll
2492                          * never have a link status change interrupt fire.
2493                          * We need to try and force an autonegotiation
2494                          * session, then bring up link.
2495                          */
2496                         hw->mac.ops.setup_sfp(hw);
2497                         if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
2498                                 schedule_work(&adapter->multispeed_fiber_task);
2499                 } else {
2500                         /*
2501                          * Direct Attach Cu and non-multispeed fiber modules
2502                          * still need to be configured properly prior to
2503                          * attempting link.
2504                          */
2505                         if (!(adapter->flags & IXGBE_FLAG_IN_SFP_MOD_TASK))
2506                                 schedule_work(&adapter->sfp_config_module_task);
2507                 }
2508 }
2509
2510 /**
2511  * ixgbe_non_sfp_link_config - set up non-SFP+ link
2512  * @hw: pointer to private hardware struct
2513  *
2514  * Returns 0 on success, negative on failure
2515  **/
2516 static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
2517 {
2518         u32 autoneg;
2519         bool link_up = false;
2520         u32 ret = IXGBE_ERR_LINK_SETUP;
2521
2522         if (hw->mac.ops.check_link)
2523                 ret = hw->mac.ops.check_link(hw, &autoneg, &link_up, false);
2524
2525         if (ret)
2526                 goto link_cfg_out;
2527
2528         if (hw->mac.ops.get_link_capabilities)
2529                 ret = hw->mac.ops.get_link_capabilities(hw, &autoneg,
2530                                                         &hw->mac.autoneg);
2531         if (ret)
2532                 goto link_cfg_out;
2533
2534         if (hw->mac.ops.setup_link_speed)
2535                 ret = hw->mac.ops.setup_link_speed(hw, autoneg, true, link_up);
2536 link_cfg_out:
2537         return ret;
2538 }
2539
2540 #define IXGBE_MAX_RX_DESC_POLL 10
2541 static inline void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
2542                                               int rxr)
2543 {
2544         int j = adapter->rx_ring[rxr].reg_idx;
2545         int k;
2546
2547         for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) {
2548                 if (IXGBE_READ_REG(&adapter->hw,
2549                                    IXGBE_RXDCTL(j)) & IXGBE_RXDCTL_ENABLE)
2550                         break;
2551                 else
2552                         msleep(1);
2553         }
2554         if (k >= IXGBE_MAX_RX_DESC_POLL) {
2555                 DPRINTK(DRV, ERR, "RXDCTL.ENABLE on Rx queue %d "
2556                         "not set within the polling period\n", rxr);
2557         }
2558         ixgbe_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr],
2559                               (adapter->rx_ring[rxr].count - 1));
2560 }
2561
2562 static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
2563 {
2564         struct net_device *netdev = adapter->netdev;
2565         struct ixgbe_hw *hw = &adapter->hw;
2566         int i, j = 0;
2567         int num_rx_rings = adapter->num_rx_queues;
2568         int err;
2569         int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
2570         u32 txdctl, rxdctl, mhadd;
2571         u32 dmatxctl;
2572         u32 gpie;
2573
2574         ixgbe_get_hw_control(adapter);
2575
2576         if ((adapter->flags & IXGBE_FLAG_MSIX_ENABLED) ||
2577             (adapter->flags & IXGBE_FLAG_MSI_ENABLED)) {
2578                 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2579                         gpie = (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME |
2580                                 IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD);
2581                 } else {
2582                         /* MSI only */
2583                         gpie = 0;
2584                 }
2585                 /* XXX: to interrupt immediately for EICS writes, enable this */
2586                 /* gpie |= IXGBE_GPIE_EIMEN; */
2587                 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
2588         }
2589
2590         if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
2591                 /* legacy interrupts, use EIAM to auto-mask when reading EICR,
2592                  * specifically only auto mask tx and rx interrupts */
2593                 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
2594         }
2595
2596         /* Enable fan failure interrupt if media type is copper */
2597         if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
2598                 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
2599                 gpie |= IXGBE_SDP1_GPIEN;
2600                 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
2601         }
2602
2603         if (hw->mac.type == ixgbe_mac_82599EB) {
2604                 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
2605                 gpie |= IXGBE_SDP1_GPIEN;
2606                 gpie |= IXGBE_SDP2_GPIEN;
2607                 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
2608         }
2609
2610 #ifdef IXGBE_FCOE
2611         /* adjust max frame to be able to do baby jumbo for FCoE */
2612         if ((netdev->features & NETIF_F_FCOE_MTU) &&
2613             (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE))
2614                 max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
2615
2616 #endif /* IXGBE_FCOE */
2617         mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
2618         if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
2619                 mhadd &= ~IXGBE_MHADD_MFS_MASK;
2620                 mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
2621
2622                 IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
2623         }
2624
2625         for (i = 0; i < adapter->num_tx_queues; i++) {
2626                 j = adapter->tx_ring[i].reg_idx;
2627                 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
2628                 /* enable WTHRESH=8 descriptors, to encourage burst writeback */
2629                 txdctl |= (8 << 16);
2630                 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
2631         }
2632
2633         if (hw->mac.type == ixgbe_mac_82599EB) {
2634                 /* DMATXCTL.EN must be set after all Tx queue config is done */
2635                 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2636                 dmatxctl |= IXGBE_DMATXCTL_TE;
2637                 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
2638         }
2639         for (i = 0; i < adapter->num_tx_queues; i++) {
2640                 j = adapter->tx_ring[i].reg_idx;
2641                 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
2642                 txdctl |= IXGBE_TXDCTL_ENABLE;
2643                 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
2644         }
2645
2646         for (i = 0; i < num_rx_rings; i++) {
2647                 j = adapter->rx_ring[i].reg_idx;
2648                 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
2649                 /* enable PTHRESH=32 descriptors (half the internal cache)
2650                  * and HTHRESH=0 descriptors (to minimize latency on fetch),
2651                  * this also removes a pesky rx_no_buffer_count increment */
2652                 rxdctl |= 0x0020;
2653                 rxdctl |= IXGBE_RXDCTL_ENABLE;
2654                 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), rxdctl);
2655                 if (hw->mac.type == ixgbe_mac_82599EB)
2656                         ixgbe_rx_desc_queue_enable(adapter, i);
2657         }
2658         /* enable all receives */
2659         rxdctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
2660         if (hw->mac.type == ixgbe_mac_82598EB)
2661                 rxdctl |= (IXGBE_RXCTRL_DMBYPS | IXGBE_RXCTRL_RXEN);
2662         else
2663                 rxdctl |= IXGBE_RXCTRL_RXEN;
2664         hw->mac.ops.enable_rx_dma(hw, rxdctl);
2665
2666         if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
2667                 ixgbe_configure_msix(adapter);
2668         else
2669                 ixgbe_configure_msi_and_legacy(adapter);
2670
2671         clear_bit(__IXGBE_DOWN, &adapter->state);
2672         ixgbe_napi_enable_all(adapter);
2673
2674         /* clear any pending interrupts, may auto mask */
2675         IXGBE_READ_REG(hw, IXGBE_EICR);
2676
2677         ixgbe_irq_enable(adapter);
2678
2679         /*
2680          * If this adapter has a fan, check to see if we had a failure
2681          * before we enabled the interrupt.
2682          */
2683         if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
2684                 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
2685                 if (esdp & IXGBE_ESDP_SDP1)
2686                         DPRINTK(DRV, CRIT,
2687                                 "Fan has stopped, replace the adapter\n");
2688         }
2689
2690         /*
2691          * For hot-pluggable SFP+ devices, a new SFP+ module may have
2692          * arrived before interrupts were enabled but after probe.  Such
2693          * devices wouldn't have their type identified yet. We need to
2694          * kick off the SFP+ module setup first, then try to bring up link.
2695          * If we're not hot-pluggable SFP+, we just need to configure link
2696          * and bring it up.
2697          */
2698         if (hw->phy.type == ixgbe_phy_unknown) {
2699                 err = hw->phy.ops.identify(hw);
2700                 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
2701                         /*
2702                          * Take the device down and schedule the sfp tasklet
2703                          * which will unregister_netdev and log it.
2704                          */
2705                         ixgbe_down(adapter);
2706                         schedule_work(&adapter->sfp_config_module_task);
2707                         return err;
2708                 }
2709         }
2710
2711         if (ixgbe_is_sfp(hw)) {
2712                 ixgbe_sfp_link_config(adapter);
2713         } else {
2714                 err = ixgbe_non_sfp_link_config(hw);
2715                 if (err)
2716                         DPRINTK(PROBE, ERR, "link_config FAILED %d\n", err);
2717         }
2718
2719         for (i = 0; i < adapter->num_tx_queues; i++)
2720                 set_bit(__IXGBE_FDIR_INIT_DONE,
2721                         &(adapter->tx_ring[i].reinit_state));
2722
2723         /* enable transmits */
2724         netif_tx_start_all_queues(netdev);
2725
2726         /* bring the link up in the watchdog, this could race with our first
2727          * link up interrupt but shouldn't be a problem */
2728         adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
2729         adapter->link_check_timeout = jiffies;
2730         mod_timer(&adapter->watchdog_timer, jiffies);
2731         return 0;
2732 }
2733
2734 void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
2735 {
2736         WARN_ON(in_interrupt());
2737         while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
2738                 msleep(1);
2739         ixgbe_down(adapter);
2740         ixgbe_up(adapter);
2741         clear_bit(__IXGBE_RESETTING, &adapter->state);
2742 }
2743
2744 int ixgbe_up(struct ixgbe_adapter *adapter)
2745 {
2746         /* hardware has been reset, we need to reload some things */
2747         ixgbe_configure(adapter);
2748
2749         return ixgbe_up_complete(adapter);
2750 }
2751
2752 void ixgbe_reset(struct ixgbe_adapter *adapter)
2753 {
2754         struct ixgbe_hw *hw = &adapter->hw;
2755         int err;
2756
2757         err = hw->mac.ops.init_hw(hw);
2758         switch (err) {
2759         case 0:
2760         case IXGBE_ERR_SFP_NOT_PRESENT:
2761                 break;
2762         case IXGBE_ERR_MASTER_REQUESTS_PENDING:
2763                 dev_err(&adapter->pdev->dev, "master disable timed out\n");
2764                 break;
2765         case IXGBE_ERR_EEPROM_VERSION:
2766                 /* We are running on a pre-production device, log a warning */
2767                 dev_warn(&adapter->pdev->dev, "This device is a pre-production "
2768                          "adapter/LOM.  Please be aware there may be issues "
2769                          "associated with your hardware.  If you are "
2770                          "experiencing problems please contact your Intel or "
2771                          "hardware representative who provided you with this "
2772                          "hardware.\n");
2773                 break;
2774         default:
2775                 dev_err(&adapter->pdev->dev, "Hardware Error: %d\n", err);
2776         }
2777
2778         /* reprogram the RAR[0] in case user changed it. */
2779         hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
2780 }
2781
2782 /**
2783  * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
2784  * @adapter: board private structure
2785  * @rx_ring: ring to free buffers from
2786  **/
2787 static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
2788                                 struct ixgbe_ring *rx_ring)
2789 {
2790         struct pci_dev *pdev = adapter->pdev;
2791         unsigned long size;
2792         unsigned int i;
2793
2794         /* Free all the Rx ring sk_buffs */
2795
2796         for (i = 0; i < rx_ring->count; i++) {
2797                 struct ixgbe_rx_buffer *rx_buffer_info;
2798
2799                 rx_buffer_info = &rx_ring->rx_buffer_info[i];
2800                 if (rx_buffer_info->dma) {
2801                         pci_unmap_single(pdev, rx_buffer_info->dma,
2802                                          rx_ring->rx_buf_len,
2803                                          PCI_DMA_FROMDEVICE);
2804                         rx_buffer_info->dma = 0;
2805                 }
2806                 if (rx_buffer_info->skb) {
2807                         struct sk_buff *skb = rx_buffer_info->skb;
2808                         rx_buffer_info->skb = NULL;
2809                         do {
2810                                 struct sk_buff *this = skb;
2811                                 skb = skb->prev;
2812                                 dev_kfree_skb(this);
2813                         } while (skb);
2814                 }
2815                 if (!rx_buffer_info->page)
2816                         continue;
2817                 if (rx_buffer_info->page_dma) {
2818                         pci_unmap_page(pdev, rx_buffer_info->page_dma,
2819                                        PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
2820                         rx_buffer_info->page_dma = 0;
2821                 }
2822                 put_page(rx_buffer_info->page);
2823                 rx_buffer_info->page = NULL;
2824                 rx_buffer_info->page_offset = 0;
2825         }
2826
2827         size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
2828         memset(rx_ring->rx_buffer_info, 0, size);
2829
2830         /* Zero out the descriptor ring */
2831         memset(rx_ring->desc, 0, rx_ring->size);
2832
2833         rx_ring->next_to_clean = 0;
2834         rx_ring->next_to_use = 0;
2835
2836         if (rx_ring->head)
2837                 writel(0, adapter->hw.hw_addr + rx_ring->head);
2838         if (rx_ring->tail)
2839                 writel(0, adapter->hw.hw_addr + rx_ring->tail);
2840 }
2841
2842 /**
2843  * ixgbe_clean_tx_ring - Free Tx Buffers
2844  * @adapter: board private structure
2845  * @tx_ring: ring to be cleaned
2846  **/
2847 static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
2848                                 struct ixgbe_ring *tx_ring)
2849 {
2850         struct ixgbe_tx_buffer *tx_buffer_info;
2851         unsigned long size;
2852         unsigned int i;
2853
2854         /* Free all the Tx ring sk_buffs */
2855
2856         for (i = 0; i < tx_ring->count; i++) {
2857                 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2858                 ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
2859         }
2860
2861         size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
2862         memset(tx_ring->tx_buffer_info, 0, size);
2863
2864         /* Zero out the descriptor ring */
2865         memset(tx_ring->desc, 0, tx_ring->size);
2866
2867         tx_ring->next_to_use = 0;
2868         tx_ring->next_to_clean = 0;
2869
2870         if (tx_ring->head)
2871                 writel(0, adapter->hw.hw_addr + tx_ring->head);
2872         if (tx_ring->tail)
2873                 writel(0, adapter->hw.hw_addr + tx_ring->tail);
2874 }
2875
2876 /**
2877  * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues
2878  * @adapter: board private structure
2879  **/
2880 static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
2881 {
2882         int i;
2883
2884         for (i = 0; i < adapter->num_rx_queues; i++)
2885                 ixgbe_clean_rx_ring(adapter, &adapter->rx_ring[i]);
2886 }
2887
2888 /**
2889  * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues
2890  * @adapter: board private structure
2891  **/
2892 static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
2893 {
2894         int i;
2895
2896         for (i = 0; i < adapter->num_tx_queues; i++)
2897                 ixgbe_clean_tx_ring(adapter, &adapter->tx_ring[i]);
2898 }
2899
2900 void ixgbe_down(struct ixgbe_adapter *adapter)
2901 {
2902         struct net_device *netdev = adapter->netdev;
2903         struct ixgbe_hw *hw = &adapter->hw;
2904         u32 rxctrl;
2905         u32 txdctl;
2906         int i, j;
2907
2908         /* signal that we are down to the interrupt handler */
2909         set_bit(__IXGBE_DOWN, &adapter->state);
2910
2911         /* disable receives */
2912         rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
2913         IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
2914
2915         netif_tx_disable(netdev);
2916
2917         IXGBE_WRITE_FLUSH(hw);
2918         msleep(10);
2919
2920         netif_tx_stop_all_queues(netdev);
2921
2922         ixgbe_irq_disable(adapter);
2923
2924         ixgbe_napi_disable_all(adapter);
2925
2926         del_timer_sync(&adapter->watchdog_timer);
2927         cancel_work_sync(&adapter->watchdog_task);
2928
2929         if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
2930             adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
2931                 cancel_work_sync(&adapter->fdir_reinit_task);
2932
2933         /* disable transmits in the hardware now that interrupts are off */
2934         for (i = 0; i < adapter->num_tx_queues; i++) {
2935                 j = adapter->tx_ring[i].reg_idx;
2936                 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
2937                 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j),
2938                                 (txdctl & ~IXGBE_TXDCTL_ENABLE));
2939         }
2940         /* Disable the Tx DMA engine on 82599 */
2941         if (hw->mac.type == ixgbe_mac_82599EB)
2942                 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
2943                                 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
2944                                  ~IXGBE_DMATXCTL_TE));
2945
2946         netif_carrier_off(netdev);
2947
2948         if (!pci_channel_offline(adapter->pdev))
2949                 ixgbe_reset(adapter);
2950         ixgbe_clean_all_tx_rings(adapter);
2951         ixgbe_clean_all_rx_rings(adapter);
2952
2953 #ifdef CONFIG_IXGBE_DCA
2954         /* since we reset the hardware DCA settings were cleared */
2955         ixgbe_setup_dca(adapter);
2956 #endif
2957 }
2958
2959 /**
2960  * ixgbe_poll - NAPI Rx polling callback
2961  * @napi: structure for representing this polling device
2962  * @budget: how many packets driver is allowed to clean
2963  *
2964  * This function is used for legacy and MSI, NAPI mode
2965  **/
2966 static int ixgbe_poll(struct napi_struct *napi, int budget)
2967 {
2968         struct ixgbe_q_vector *q_vector =
2969                                 container_of(napi, struct ixgbe_q_vector, napi);
2970         struct ixgbe_adapter *adapter = q_vector->adapter;
2971         int tx_clean_complete, work_done = 0;
2972
2973 #ifdef CONFIG_IXGBE_DCA
2974         if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
2975                 ixgbe_update_tx_dca(adapter, adapter->tx_ring);
2976                 ixgbe_update_rx_dca(adapter, adapter->rx_ring);
2977         }
2978 #endif
2979
2980         tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring);
2981         ixgbe_clean_rx_irq(q_vector, adapter->rx_ring, &work_done, budget);
2982
2983         if (!tx_clean_complete)
2984                 work_done = budget;
2985
2986         /* If budget not fully consumed, exit the polling mode */
2987         if (work_done < budget) {
2988                 napi_complete(napi);
2989                 if (adapter->itr_setting & 1)
2990                         ixgbe_set_itr(adapter);
2991                 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2992                         ixgbe_irq_enable_queues(adapter, IXGBE_EIMS_RTX_QUEUE);
2993         }
2994         return work_done;
2995 }
2996
2997 /**
2998  * ixgbe_tx_timeout - Respond to a Tx Hang
2999  * @netdev: network interface device structure
3000  **/
3001 static void ixgbe_tx_timeout(struct net_device *netdev)
3002 {
3003         struct ixgbe_adapter *adapter = netdev_priv(netdev);
3004
3005         /* Do the reset outside of interrupt context */
3006         schedule_work(&adapter->reset_task);
3007 }
3008
3009 static void ixgbe_reset_task(struct work_struct *work)
3010 {
3011         struct ixgbe_adapter *adapter;
3012         adapter = container_of(work, struct ixgbe_adapter, reset_task);
3013
3014         /* If we're already down or resetting, just bail */
3015         if (test_bit(__IXGBE_DOWN, &adapter->state) ||
3016             test_bit(__IXGBE_RESETTING, &adapter->state))
3017                 return;
3018
3019         adapter->tx_timeout_count++;
3020
3021         ixgbe_reinit_locked(adapter);
3022 }
3023
3024 #ifdef CONFIG_IXGBE_DCB
3025 static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
3026 {
3027         bool ret = false;
3028         struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_DCB];
3029
3030         if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
3031                 return ret;
3032
3033         f->mask = 0x7 << 3;
3034         adapter->num_rx_queues = f->indices;
3035         adapter->num_tx_queues = f->indices;
3036         ret = true;
3037
3038         return ret;
3039 }
3040 #endif
3041
3042 /**
3043  * ixgbe_set_rss_queues: Allocate queues for RSS
3044  * @adapter: board private structure to initialize
3045  *
3046  * This is our "base" multiqueue mode.  RSS (Receive Side Scaling) will try
3047  * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
3048  *
3049  **/
3050 static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
3051 {
3052         bool ret = false;
3053         struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_RSS];
3054
3055         if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
3056                 f->mask = 0xF;
3057                 adapter->num_rx_queues = f->indices;
3058                 adapter->num_tx_queues = f->indices;
3059                 ret = true;
3060         } else {
3061                 ret = false;
3062         }
3063
3064         return ret;
3065 }
3066
3067 /**
3068  * ixgbe_set_fdir_queues: Allocate queues for Flow Director
3069  * @adapter: board private structure to initialize
3070  *
3071  * Flow Director is an advanced Rx filter, attempting to get Rx flows back
3072  * to the original CPU that initiated the Tx session.  This runs in addition
3073  * to RSS, so if a packet doesn't match an FDIR filter, we can still spread the
3074  * Rx load across CPUs using RSS.
3075  *
3076  **/
3077 static bool inline ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter)
3078 {
3079         bool ret = false;
3080         struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR];
3081
3082         f_fdir->indices = min((int)num_online_cpus(), f_fdir->indices);
3083         f_fdir->mask = 0;
3084
3085         /* Flow Director must have RSS enabled */
3086         if (adapter->flags & IXGBE_FLAG_RSS_ENABLED &&
3087             ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
3088              (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)))) {
3089                 adapter->num_tx_queues = f_fdir->indices;
3090                 adapter->num_rx_queues = f_fdir->indices;
3091                 ret = true;
3092         } else {
3093                 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
3094                 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
3095         }
3096         return ret;
3097 }
3098
3099 #ifdef IXGBE_FCOE
3100 /**
3101  * ixgbe_set_fcoe_queues: Allocate queues for Fiber Channel over Ethernet (FCoE)
3102  * @adapter: board private structure to initialize
3103  *
3104  * FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges.
3105  * The ring feature mask is not used as a mask for FCoE, as it can take any 8
3106  * rx queues out of the max number of rx queues, instead, it is used as the
3107  * index of the first rx queue used by FCoE.
3108  *
3109  **/
3110 static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
3111 {
3112         bool ret = false;
3113         struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
3114
3115         f->indices = min((int)num_online_cpus(), f->indices);
3116         if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
3117 #ifdef CONFIG_IXGBE_DCB
3118                 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
3119                         DPRINTK(PROBE, INFO, "FCOE enabled with DCB \n");
3120                         ixgbe_set_dcb_queues(adapter);
3121                 }
3122 #endif
3123                 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
3124                         DPRINTK(PROBE, INFO, "FCOE enabled with RSS \n");
3125                         if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
3126                             (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
3127                                 ixgbe_set_fdir_queues(adapter);
3128                         else
3129                                 ixgbe_set_rss_queues(adapter);
3130                 }
3131                 /* adding FCoE rx rings to the end */
3132                 f->mask = adapter->num_rx_queues;
3133                 adapter->num_rx_queues += f->indices;
3134                 if (adapter->num_tx_queues == 0)
3135                         adapter->num_tx_queues = f->indices;
3136
3137                 ret = true;
3138         }
3139
3140         return ret;
3141 }
3142
3143 #endif /* IXGBE_FCOE */
3144 /*
3145  * ixgbe_set_num_queues: Allocate queues for device, feature dependant
3146  * @adapter: board private structure to initialize
3147  *
3148  * This is the top level queue allocation routine.  The order here is very
3149  * important, starting with the "most" number of features turned on at once,
3150  * and ending with the smallest set of features.  This way large combinations
3151  * can be allocated if they're turned on, and smaller combinations are the
3152  * fallthrough conditions.
3153  *
3154  **/
3155 static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
3156 {
3157 #ifdef IXGBE_FCOE
3158         if (ixgbe_set_fcoe_queues(adapter))
3159                 goto done;
3160
3161 #endif /* IXGBE_FCOE */
3162 #ifdef CONFIG_IXGBE_DCB
3163         if (ixgbe_set_dcb_queues(adapter))
3164                 goto done;
3165
3166 #endif
3167         if (ixgbe_set_fdir_queues(adapter))
3168                 goto done;
3169
3170         if (ixgbe_set_rss_queues(adapter))
3171                 goto done;
3172
3173         /* fallback to base case */
3174         adapter->num_rx_queues = 1;
3175         adapter->num_tx_queues = 1;
3176
3177 done:
3178         /* Notify the stack of the (possibly) reduced Tx Queue count. */
3179         adapter->netdev->real_num_tx_queues = adapter->num_tx_queues;
3180 }
3181
3182 static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
3183                                        int vectors)
3184 {
3185         int err, vector_threshold;
3186
3187         /* We'll want at least 3 (vector_threshold):
3188          * 1) TxQ[0] Cleanup
3189          * 2) RxQ[0] Cleanup
3190          * 3) Other (Link Status Change, etc.)
3191          * 4) TCP Timer (optional)
3192          */
3193         vector_threshold = MIN_MSIX_COUNT;
3194
3195         /* The more we get, the more we will assign to Tx/Rx Cleanup
3196          * for the separate queues...where Rx Cleanup >= Tx Cleanup.
3197          * Right now, we simply care about how many we'll get; we'll
3198          * set them up later while requesting irq's.
3199          */
3200         while (vectors >= vector_threshold) {
3201                 err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
3202                                       vectors);
3203                 if (!err) /* Success in acquiring all requested vectors. */
3204                         break;
3205                 else if (err < 0)
3206                         vectors = 0; /* Nasty failure, quit now */
3207                 else /* err == number of vectors we should try again with */
3208                         vectors = err;
3209         }
3210
3211         if (vectors < vector_threshold) {
3212                 /* Can't allocate enough MSI-X interrupts?  Oh well.
3213                  * This just means we'll go with either a single MSI
3214                  * vector or fall back to legacy interrupts.
3215                  */
3216                 DPRINTK(HW, DEBUG, "Unable to allocate MSI-X interrupts\n");
3217                 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
3218                 kfree(adapter->msix_entries);
3219                 adapter->msix_entries = NULL;
3220         } else {
3221                 adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */
3222                 /*
3223                  * Adjust for only the vectors we'll use, which is minimum
3224                  * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
3225                  * vectors we were allocated.
3226                  */
3227                 adapter->num_msix_vectors = min(vectors,
3228                                    adapter->max_msix_q_vectors + NON_Q_VECTORS);
3229         }
3230 }
3231
3232 /**
3233  * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
3234  * @adapter: board private structure to initialize
3235  *
3236  * Cache the descriptor ring offsets for RSS to the assigned rings.
3237  *
3238  **/
3239 static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
3240 {
3241         int i;
3242         bool ret = false;
3243
3244         if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
3245                 for (i = 0; i < adapter->num_rx_queues; i++)
3246                         adapter->rx_ring[i].reg_idx = i;
3247                 for (i = 0; i < adapter->num_tx_queues; i++)
3248                         adapter->tx_ring[i].reg_idx = i;
3249                 ret = true;
3250         } else {
3251                 ret = false;
3252         }
3253
3254         return ret;
3255 }
3256
3257 #ifdef CONFIG_IXGBE_DCB
3258 /**
3259  * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
3260  * @adapter: board private structure to initialize
3261  *
3262  * Cache the descriptor ring offsets for DCB to the assigned rings.
3263  *
3264  **/
3265 static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
3266 {
3267         int i;
3268         bool ret = false;
3269         int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
3270
3271         if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
3272                 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
3273                         /* the number of queues is assumed to be symmetric */
3274                         for (i = 0; i < dcb_i; i++) {
3275                                 adapter->rx_ring[i].reg_idx = i << 3;
3276                                 adapter->tx_ring[i].reg_idx = i << 2;
3277                         }
3278                         ret = true;
3279                 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
3280                         if (dcb_i == 8) {
3281                                 /*
3282                                  * Tx TC0 starts at: descriptor queue 0
3283                                  * Tx TC1 starts at: descriptor queue 32
3284                                  * Tx TC2 starts at: descriptor queue 64
3285                                  * Tx TC3 starts at: descriptor queue 80
3286                                  * Tx TC4 starts at: descriptor queue 96
3287                                  * Tx TC5 starts at: descriptor queue 104
3288                                  * Tx TC6 starts at: descriptor queue 112
3289                                  * Tx TC7 starts at: descriptor queue 120
3290                                  *
3291                                  * Rx TC0-TC7 are offset by 16 queues each
3292                                  */
3293                                 for (i = 0; i < 3; i++) {
3294                                         adapter->tx_ring[i].reg_idx = i << 5;
3295                                         adapter->rx_ring[i].reg_idx = i << 4;
3296                                 }
3297                                 for ( ; i < 5; i++) {
3298                                         adapter->tx_ring[i].reg_idx =
3299                                                                  ((i + 2) << 4);
3300                                         adapter->rx_ring[i].reg_idx = i << 4;
3301                                 }
3302                                 for ( ; i < dcb_i; i++) {
3303                                         adapter->tx_ring[i].reg_idx =
3304                                                                  ((i + 8) << 3);
3305                                         adapter->rx_ring[i].reg_idx = i << 4;
3306                                 }
3307
3308                                 ret = true;
3309                         } else if (dcb_i == 4) {
3310                                 /*
3311                                  * Tx TC0 starts at: descriptor queue 0
3312                                  * Tx TC1 starts at: descriptor queue 64
3313                                  * Tx TC2 starts at: descriptor queue 96
3314                                  * Tx TC3 starts at: descriptor queue 112
3315                                  *
3316                                  * Rx TC0-TC3 are offset by 32 queues each
3317                                  */
3318                                 adapter->tx_ring[0].reg_idx = 0;
3319                                 adapter->tx_ring[1].reg_idx = 64;
3320                                 adapter->tx_ring[2].reg_idx = 96;
3321                                 adapter->tx_ring[3].reg_idx = 112;
3322                                 for (i = 0 ; i < dcb_i; i++)
3323                                         adapter->rx_ring[i].reg_idx = i << 5;
3324
3325                                 ret = true;
3326                         } else {
3327                                 ret = false;
3328                         }
3329                 } else {
3330                         ret = false;
3331                 }
3332         } else {
3333                 ret = false;
3334         }
3335
3336         return ret;
3337 }
3338 #endif
3339
3340 /**
3341  * ixgbe_cache_ring_fdir - Descriptor ring to register mapping for Flow Director
3342  * @adapter: board private structure to initialize
3343  *
3344  * Cache the descriptor ring offsets for Flow Director to the assigned rings.
3345  *
3346  **/
3347 static bool inline ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
3348 {
3349         int i;
3350         bool ret = false;
3351
3352         if (adapter->flags & IXGBE_FLAG_RSS_ENABLED &&
3353             ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
3354              (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))) {
3355                 for (i = 0; i < adapter->num_rx_queues; i++)
3356                         adapter->rx_ring[i].reg_idx = i;
3357                 for (i = 0; i < adapter->num_tx_queues; i++)
3358                         adapter->tx_ring[i].reg_idx = i;
3359                 ret = true;
3360         }
3361
3362         return ret;
3363 }
3364
3365 #ifdef IXGBE_FCOE
3366 /**
3367  * ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE
3368  * @adapter: board private structure to initialize
3369  *
3370  * Cache the descriptor ring offsets for FCoE mode to the assigned rings.
3371  *
3372  */
3373 static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
3374 {
3375         int i, fcoe_i = 0;
3376         bool ret = false;
3377         struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
3378
3379         if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
3380 #ifdef CONFIG_IXGBE_DCB
3381                 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
3382                         ixgbe_cache_ring_dcb(adapter);
3383                         fcoe_i = adapter->rx_ring[0].reg_idx + 1;
3384                 }
3385 #endif /* CONFIG_IXGBE_DCB */
3386                 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
3387                         if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
3388                             (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
3389                                 ixgbe_cache_ring_fdir(adapter);
3390                         else
3391                                 ixgbe_cache_ring_rss(adapter);
3392
3393                         fcoe_i = f->mask;
3394                 }
3395                 for (i = 0; i < f->indices; i++, fcoe_i++)
3396                         adapter->rx_ring[f->mask + i].reg_idx = fcoe_i;
3397                 ret = true;
3398         }
3399         return ret;
3400 }
3401
3402 #endif /* IXGBE_FCOE */
3403 /**
3404  * ixgbe_cache_ring_register - Descriptor ring to register mapping
3405  * @adapter: board private structure to initialize
3406  *
3407  * Once we know the feature-set enabled for the device, we'll cache
3408  * the register offset the descriptor ring is assigned to.
3409  *
3410  * Note, the order the various feature calls is important.  It must start with
3411  * the "most" features enabled at the same time, then trickle down to the
3412  * least amount of features turned on at once.
3413  **/
3414 static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
3415 {
3416         /* start with default case */
3417         adapter->rx_ring[0].reg_idx = 0;
3418         adapter->tx_ring[0].reg_idx = 0;
3419
3420 #ifdef IXGBE_FCOE
3421         if (ixgbe_cache_ring_fcoe(adapter))
3422                 return;
3423
3424 #endif /* IXGBE_FCOE */
3425 #ifdef CONFIG_IXGBE_DCB
3426         if (ixgbe_cache_ring_dcb(adapter))
3427                 return;
3428
3429 #endif
3430         if (ixgbe_cache_ring_fdir(adapter))
3431                 return;
3432
3433         if (ixgbe_cache_ring_rss(adapter))
3434                 return;
3435 }
3436
3437 /**
3438  * ixgbe_alloc_queues - Allocate memory for all rings
3439  * @adapter: board private structure to initialize
3440  *
3441  * We allocate one ring per queue at run-time since we don't know the
3442  * number of queues at compile-time.  The polling_netdev array is
3443  * intended for Multiqueue, but should work fine with a single queue.
3444  **/
3445 static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
3446 {
3447         int i;
3448
3449         adapter->tx_ring = kcalloc(adapter->num_tx_queues,
3450                                    sizeof(struct ixgbe_ring), GFP_KERNEL);
3451         if (!adapter->tx_ring)
3452                 goto err_tx_ring_allocation;
3453
3454         adapter->rx_ring = kcalloc(adapter->num_rx_queues,
3455                                    sizeof(struct ixgbe_ring), GFP_KERNEL);
3456         if (!adapter->rx_ring)
3457                 goto err_rx_ring_allocation;
3458
3459         for (i = 0; i < adapter->num_tx_queues; i++) {
3460                 adapter->tx_ring[i].count = adapter->tx_ring_count;
3461                 adapter->tx_ring[i].queue_index = i;
3462         }
3463
3464         for (i = 0; i < adapter->num_rx_queues; i++) {
3465                 adapter->rx_ring[i].count = adapter->rx_ring_count;
3466                 adapter->rx_ring[i].queue_index = i;
3467         }
3468
3469         ixgbe_cache_ring_register(adapter);
3470
3471         return 0;
3472
3473 err_rx_ring_allocation:
3474         kfree(adapter->tx_ring);
3475 err_tx_ring_allocation:
3476         return -ENOMEM;
3477 }
3478
3479 /**
3480  * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
3481  * @adapter: board private structure to initialize
3482  *
3483  * Attempt to configure the interrupts using the best available
3484  * capabilities of the hardware and the kernel.
3485  **/
3486 static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
3487 {
3488         struct ixgbe_hw *hw = &adapter->hw;
3489         int err = 0;
3490         int vector, v_budget;
3491
3492         /*
3493          * It's easy to be greedy for MSI-X vectors, but it really
3494          * doesn't do us much good if we have a lot more vectors
3495          * than CPU's.  So let's be conservative and only ask for
3496          * (roughly) twice the number of vectors as there are CPU's.
3497          */
3498         v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
3499                        (int)(num_online_cpus() * 2)) + NON_Q_VECTORS;
3500
3501         /*
3502          * At the same time, hardware can only support a maximum of
3503          * hw.mac->max_msix_vectors vectors.  With features
3504          * such as RSS and VMDq, we can easily surpass the number of Rx and Tx
3505          * descriptor queues supported by our device.  Thus, we cap it off in
3506          * those rare cases where the cpu count also exceeds our vector limit.
3507          */
3508         v_budget = min(v_budget, (int)hw->mac.max_msix_vectors);
3509
3510         /* A failure in MSI-X entry allocation isn't fatal, but it does
3511          * mean we disable MSI-X capabilities of the adapter. */
3512         adapter->msix_entries = kcalloc(v_budget,
3513                                         sizeof(struct msix_entry), GFP_KERNEL);
3514         if (adapter->msix_entries) {
3515                 for (vector = 0; vector < v_budget; vector++)
3516                         adapter->msix_entries[vector].entry = vector;
3517
3518                 ixgbe_acquire_msix_vectors(adapter, v_budget);
3519
3520                 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
3521                         goto out;
3522         }
3523
3524         adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
3525         adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
3526         adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
3527         adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
3528         adapter->atr_sample_rate = 0;
3529         ixgbe_set_num_queues(adapter);
3530
3531         err = pci_enable_msi(adapter->pdev);
3532         if (!err) {
3533                 adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
3534         } else {
3535                 DPRINTK(HW, DEBUG, "Unable to allocate MSI interrupt, "
3536                         "falling back to legacy.  Error: %d\n", err);
3537                 /* reset err */
3538                 err = 0;
3539         }
3540
3541 out:
3542         return err;
3543 }
3544
3545 /**
3546  * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors
3547  * @adapter: board private structure to initialize
3548  *
3549  * We allocate one q_vector per queue interrupt.  If allocation fails we
3550  * return -ENOMEM.
3551  **/
3552 static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
3553 {
3554         int q_idx, num_q_vectors;
3555         struct ixgbe_q_vector *q_vector;
3556         int napi_vectors;
3557         int (*poll)(struct napi_struct *, int);
3558
3559         if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3560                 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
3561                 napi_vectors = adapter->num_rx_queues;
3562                 poll = &ixgbe_clean_rxtx_many;
3563         } else {
3564                 num_q_vectors = 1;
3565                 napi_vectors = 1;
3566                 poll = &ixgbe_poll;
3567         }
3568
3569         for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
3570                 q_vector = kzalloc(sizeof(struct ixgbe_q_vector), GFP_KERNEL);
3571                 if (!q_vector)
3572                         goto err_out;
3573                 q_vector->adapter = adapter;
3574                 q_vector->eitr = adapter->eitr_param;
3575                 q_vector->v_idx = q_idx;
3576                 netif_napi_add(adapter->netdev, &q_vector->napi, (*poll), 64);
3577                 adapter->q_vector[q_idx] = q_vector;
3578         }
3579
3580         return 0;
3581
3582 err_out:
3583         while (q_idx) {
3584                 q_idx--;
3585                 q_vector = adapter->q_vector[q_idx];
3586                 netif_napi_del(&q_vector->napi);
3587                 kfree(q_vector);
3588                 adapter->q_vector[q_idx] = NULL;
3589         }
3590         return -ENOMEM;
3591 }
3592
3593 /**
3594  * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors
3595  * @adapter: board private structure to initialize
3596  *
3597  * This function frees the memory allocated to the q_vectors.  In addition if
3598  * NAPI is enabled it will delete any references to the NAPI struct prior
3599  * to freeing the q_vector.
3600  **/
3601 static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
3602 {
3603         int q_idx, num_q_vectors;
3604
3605         if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
3606                 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
3607         else
3608                 num_q_vectors = 1;
3609
3610         for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
3611                 struct ixgbe_q_vector *q_vector = adapter->q_vector[q_idx];
3612                 adapter->q_vector[q_idx] = NULL;
3613                 netif_napi_del(&q_vector->napi);
3614                 kfree(q_vector);
3615         }
3616 }
3617
3618 static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
3619 {
3620         if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3621                 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
3622                 pci_disable_msix(adapter->pdev);
3623                 kfree(adapter->msix_entries);
3624                 adapter->msix_entries = NULL;
3625         } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
3626                 adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
3627                 pci_disable_msi(adapter->pdev);
3628         }
3629         return;
3630 }
3631
3632 /**
3633  * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
3634  * @adapter: board private structure to initialize
3635  *
3636  * We determine which interrupt scheme to use based on...
3637  * - Kernel support (MSI, MSI-X)
3638  *   - which can be user-defined (via MODULE_PARAM)
3639  * - Hardware queue count (num_*_queues)
3640  *   - defined by miscellaneous hardware support/features (RSS, etc.)
3641  **/
3642 int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
3643 {
3644         int err;
3645
3646         /* Number of supported queues */
3647         ixgbe_set_num_queues(adapter);
3648
3649         err = ixgbe_set_interrupt_capability(adapter);
3650         if (err) {
3651                 DPRINTK(PROBE, ERR, "Unable to setup interrupt capabilities\n");
3652                 goto err_set_interrupt;
3653         }
3654
3655         err = ixgbe_alloc_q_vectors(adapter);
3656         if (err) {
3657                 DPRINTK(PROBE, ERR, "Unable to allocate memory for queue "
3658                         "vectors\n");
3659                 goto err_alloc_q_vectors;
3660         }
3661
3662         err = ixgbe_alloc_queues(adapter);
3663         if (err) {
3664                 DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
3665                 goto err_alloc_queues;
3666         }
3667
3668         DPRINTK(DRV, INFO, "Multiqueue %s: Rx Queue count = %u, "
3669                 "Tx Queue count = %u\n",
3670                 (adapter->num_rx_queues > 1) ? "Enabled" :
3671                 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
3672
3673         set_bit(__IXGBE_DOWN, &adapter->state);
3674
3675         return 0;
3676
3677 err_alloc_queues:
3678         ixgbe_free_q_vectors(adapter);
3679 err_alloc_q_vectors:
3680         ixgbe_reset_interrupt_capability(adapter);
3681 err_set_interrupt:
3682         return err;
3683 }
3684
3685 /**
3686  * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
3687  * @adapter: board private structure to clear interrupt scheme on
3688  *
3689  * We go through and clear interrupt specific resources and reset the structure
3690  * to pre-load conditions
3691  **/
3692 void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
3693 {
3694         kfree(adapter->tx_ring);
3695         kfree(adapter->rx_ring);
3696         adapter->tx_ring = NULL;
3697         adapter->rx_ring = NULL;
3698
3699         ixgbe_free_q_vectors(adapter);
3700         ixgbe_reset_interrupt_capability(adapter);
3701 }
3702
3703 /**
3704  * ixgbe_sfp_timer - worker thread to find a missing module
3705  * @data: pointer to our adapter struct
3706  **/
3707 static void ixgbe_sfp_timer(unsigned long data)
3708 {
3709         struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
3710
3711         /*
3712          * Do the sfp_timer outside of interrupt context due to the
3713          * delays that sfp+ detection requires
3714          */
3715         schedule_work(&adapter->sfp_task);
3716 }
3717
3718 /**
3719  * ixgbe_sfp_task - worker thread to find a missing module
3720  * @work: pointer to work_struct containing our data
3721  **/
3722 static void ixgbe_sfp_task(struct work_struct *work)
3723 {
3724         struct ixgbe_adapter *adapter = container_of(work,
3725                                                      struct ixgbe_adapter,
3726                                                      sfp_task);
3727         struct ixgbe_hw *hw = &adapter->hw;
3728
3729         if ((hw->phy.type == ixgbe_phy_nl) &&
3730             (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
3731                 s32 ret = hw->phy.ops.identify_sfp(hw);
3732                 if (ret == IXGBE_ERR_SFP_NOT_PRESENT)
3733                         goto reschedule;
3734                 ret = hw->phy.ops.reset(hw);
3735                 if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3736                         dev_err(&adapter->pdev->dev, "failed to initialize "
3737                                 "because an unsupported SFP+ module type "
3738                                 "was detected.\n"
3739                                 "Reload the driver after installing a "
3740                                 "supported module.\n");
3741                         unregister_netdev(adapter->netdev);
3742                 } else {
3743                         DPRINTK(PROBE, INFO, "detected SFP+: %d\n",
3744                                 hw->phy.sfp_type);
3745                 }
3746                 /* don't need this routine any more */
3747                 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
3748         }
3749         return;
3750 reschedule:
3751         if (test_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state))
3752                 mod_timer(&adapter->sfp_timer,
3753                           round_jiffies(jiffies + (2 * HZ)));
3754 }
3755
3756 /**
3757  * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter)
3758  * @adapter: board private structure to initialize
3759  *
3760  * ixgbe_sw_init initializes the Adapter private data structure.
3761  * Fields are initialized based on PCI device information and
3762  * OS network device settings (MTU size).
3763  **/
3764 static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
3765 {
3766         struct ixgbe_hw *hw = &adapter->hw;
3767         struct pci_dev *pdev = adapter->pdev;
3768         unsigned int rss;
3769 #ifdef CONFIG_IXGBE_DCB
3770         int j;
3771         struct tc_configuration *tc;
3772 #endif
3773
3774         /* PCI config space info */
3775
3776         hw->vendor_id = pdev->vendor;
3777         hw->device_id = pdev->device;
3778         hw->revision_id = pdev->revision;
3779         hw->subsystem_vendor_id = pdev->subsystem_vendor;
3780         hw->subsystem_device_id = pdev->subsystem_device;
3781
3782         /* Set capability flags */
3783         rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus());
3784         adapter->ring_feature[RING_F_RSS].indices = rss;
3785         adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
3786         adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES;
3787         if (hw->mac.type == ixgbe_mac_82598EB) {
3788                 if (hw->device_id == IXGBE_DEV_ID_82598AT)
3789                         adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
3790                 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598;
3791         } else if (hw->mac.type == ixgbe_mac_82599EB) {
3792                 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599;
3793                 adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
3794                 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
3795                 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
3796                 adapter->ring_feature[RING_F_FDIR].indices =
3797                                                          IXGBE_MAX_FDIR_INDICES;
3798                 adapter->atr_sample_rate = 20;
3799                 adapter->fdir_pballoc = 0;
3800 #ifdef IXGBE_FCOE
3801                 adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
3802                 adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
3803                 adapter->ring_feature[RING_F_FCOE].indices = 0;
3804 #endif /* IXGBE_FCOE */
3805         }
3806
3807 #ifdef CONFIG_IXGBE_DCB
3808         /* Configure DCB traffic classes */
3809         for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
3810                 tc = &adapter->dcb_cfg.tc_config[j];
3811                 tc->path[DCB_TX_CONFIG].bwg_id = 0;
3812                 tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1);
3813                 tc->path[DCB_RX_CONFIG].bwg_id = 0;
3814                 tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
3815                 tc->dcb_pfc = pfc_disabled;
3816         }
3817         adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
3818         adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
3819         adapter->dcb_cfg.rx_pba_cfg = pba_equal;
3820         adapter->dcb_cfg.pfc_mode_enable = false;
3821         adapter->dcb_cfg.round_robin_enable = false;
3822         adapter->dcb_set_bitmap = 0x00;
3823         ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg,
3824                            adapter->ring_feature[RING_F_DCB].indices);
3825
3826 #endif
3827
3828         /* default flow control settings */
3829         hw->fc.requested_mode = ixgbe_fc_full;
3830         hw->fc.current_mode = ixgbe_fc_full;    /* init for ethtool output */
3831 #ifdef CONFIG_DCB
3832         adapter->last_lfc_mode = hw->fc.current_mode;
3833 #endif
3834         hw->fc.high_water = IXGBE_DEFAULT_FCRTH;
3835         hw->fc.low_water = IXGBE_DEFAULT_FCRTL;
3836         hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
3837         hw->fc.send_xon = true;
3838         hw->fc.disable_fc_autoneg = false;
3839
3840         /* enable itr by default in dynamic mode */
3841         adapter->itr_setting = 1;
3842         adapter->eitr_param = 20000;
3843
3844         /* set defaults for eitr in MegaBytes */
3845         adapter->eitr_low = 10;
3846         adapter->eitr_high = 20;
3847
3848         /* set default ring sizes */
3849         adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
3850         adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
3851
3852         /* initialize eeprom parameters */
3853         if (ixgbe_init_eeprom_params_generic(hw)) {
3854                 dev_err(&pdev->dev, "EEPROM initialization failed\n");
3855                 return -EIO;
3856         }
3857
3858         /* enable rx csum by default */
3859         adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
3860
3861         set_bit(__IXGBE_DOWN, &adapter->state);
3862
3863         return 0;
3864 }
3865
3866 /**
3867  * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
3868  * @adapter: board private structure
3869  * @tx_ring:    tx descriptor ring (for a specific queue) to setup
3870  *
3871  * Return 0 on success, negative on failure
3872  **/
3873 int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
3874                              struct ixgbe_ring *tx_ring)
3875 {
3876         struct pci_dev *pdev = adapter->pdev;
3877         int size;
3878
3879         size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
3880         tx_ring->tx_buffer_info = vmalloc(size);
3881         if (!tx_ring->tx_buffer_info)
3882                 goto err;
3883         memset(tx_ring->tx_buffer_info, 0, size);
3884
3885         /* round up to nearest 4K */
3886         tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
3887         tx_ring->size = ALIGN(tx_ring->size, 4096);
3888
3889         tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
3890                                              &tx_ring->dma);
3891         if (!tx_ring->desc)
3892                 goto err;
3893
3894         tx_ring->next_to_use = 0;
3895         tx_ring->next_to_clean = 0;
3896         tx_ring->work_limit = tx_ring->count;
3897         return 0;
3898
3899 err:
3900         vfree(tx_ring->tx_buffer_info);
3901         tx_ring->tx_buffer_info = NULL;
3902         DPRINTK(PROBE, ERR, "Unable to allocate memory for the transmit "
3903                             "descriptor ring\n");
3904         return -ENOMEM;
3905 }
3906
3907 /**
3908  * ixgbe_setup_all_tx_resources - allocate all queues Tx resources
3909  * @adapter: board private structure
3910  *
3911  * If this function returns with an error, then it's possible one or
3912  * more of the rings is populated (while the rest are not).  It is the
3913  * callers duty to clean those orphaned rings.
3914  *
3915  * Return 0 on success, negative on failure
3916  **/
3917 static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
3918 {
3919         int i, err = 0;
3920
3921         for (i = 0; i < adapter->num_tx_queues; i++) {
3922                 err = ixgbe_setup_tx_resources(adapter, &adapter->tx_ring[i]);
3923                 if (!err)
3924                         continue;
3925                 DPRINTK(PROBE, ERR, "Allocation for Tx Queue %u failed\n", i);
3926                 break;
3927         }
3928
3929         return err;
3930 }
3931
3932 /**
3933  * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
3934  * @adapter: board private structure
3935  * @rx_ring:    rx descriptor ring (for a specific queue) to setup
3936  *
3937  * Returns 0 on success, negative on failure
3938  **/
3939 int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
3940                              struct ixgbe_ring *rx_ring)
3941 {
3942         struct pci_dev *pdev = adapter->pdev;
3943         int size;
3944
3945         size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
3946         rx_ring->rx_buffer_info = vmalloc(size);
3947         if (!rx_ring->rx_buffer_info) {
3948                 DPRINTK(PROBE, ERR,
3949                         "vmalloc allocation failed for the rx desc ring\n");
3950                 goto alloc_failed;
3951         }
3952         memset(rx_ring->rx_buffer_info, 0, size);
3953
3954         /* Round up to nearest 4K */
3955         rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
3956         rx_ring->size = ALIGN(rx_ring->size, 4096);
3957
3958         rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, &rx_ring->dma);
3959
3960         if (!rx_ring->desc) {
3961                 DPRINTK(PROBE, ERR,
3962                         "Memory allocation failed for the rx desc ring\n");
3963                 vfree(rx_ring->rx_buffer_info);
3964                 goto alloc_failed;
3965         }
3966
3967         rx_ring->next_to_clean = 0;
3968         rx_ring->next_to_use = 0;
3969
3970         return 0;
3971
3972 alloc_failed:
3973         return -ENOMEM;
3974 }
3975
3976 /**
3977  * ixgbe_setup_all_rx_resources - allocate all queues Rx resources
3978  * @adapter: board private structure
3979  *
3980  * If this function returns with an error, then it's possible one or
3981  * more of the rings is populated (while the rest are not).  It is the
3982  * callers duty to clean those orphaned rings.
3983  *
3984  * Return 0 on success, negative on failure
3985  **/
3986
3987 static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
3988 {
3989         int i, err = 0;
3990
3991         for (i = 0; i < adapter->num_rx_queues; i++) {
3992                 err = ixgbe_setup_rx_resources(adapter, &adapter->rx_ring[i]);
3993                 if (!err)
3994                         continue;
3995                 DPRINTK(PROBE, ERR, "Allocation for Rx Queue %u failed\n", i);
3996                 break;
3997         }
3998
3999         return err;
4000 }
4001
4002 /**
4003  * ixgbe_free_tx_resources - Free Tx Resources per Queue
4004  * @adapter: board private structure
4005  * @tx_ring: Tx descriptor ring for a specific queue
4006  *
4007  * Free all transmit software resources
4008  **/
4009 void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter,
4010                              struct ixgbe_ring *tx_ring)
4011 {
4012         struct pci_dev *pdev = adapter->pdev;
4013
4014         ixgbe_clean_tx_ring(adapter, tx_ring);
4015
4016         vfree(tx_ring->tx_buffer_info);
4017         tx_ring->tx_buffer_info = NULL;
4018
4019         pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
4020
4021         tx_ring->desc = NULL;
4022 }
4023
4024 /**
4025  * ixgbe_free_all_tx_resources - Free Tx Resources for All Queues
4026  * @adapter: board private structure
4027  *
4028  * Free all transmit software resources
4029  **/
4030 static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
4031 {
4032         int i;
4033
4034         for (i = 0; i < adapter->num_tx_queues; i++)
4035                 if (adapter->tx_ring[i].desc)
4036                         ixgbe_free_tx_resources(adapter, &adapter->tx_ring[i]);
4037 }
4038
4039 /**
4040  * ixgbe_free_rx_resources - Free Rx Resources
4041  * @adapter: board private structure
4042  * @rx_ring: ring to clean the resources from
4043  *
4044  * Free all receive software resources
4045  **/
4046 void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter,
4047                              struct ixgbe_ring *rx_ring)
4048 {
4049         struct pci_dev *pdev = adapter->pdev;
4050
4051         ixgbe_clean_rx_ring(adapter, rx_ring);
4052
4053         vfree(rx_ring->rx_buffer_info);
4054         rx_ring->rx_buffer_info = NULL;
4055
4056         pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
4057
4058         rx_ring->desc = NULL;
4059 }
4060
4061 /**
4062  * ixgbe_free_all_rx_resources - Free Rx Resources for All Queues
4063  * @adapter: board private structure
4064  *
4065  * Free all receive software resources
4066  **/
4067 static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
4068 {
4069         int i;
4070
4071         for (i = 0; i < adapter->num_rx_queues; i++)
4072                 if (adapter->rx_ring[i].desc)
4073                         ixgbe_free_rx_resources(adapter, &adapter->rx_ring[i]);
4074 }
4075
4076 /**
4077  * ixgbe_change_mtu - Change the Maximum Transfer Unit
4078  * @netdev: network interface device structure
4079  * @new_mtu: new value for maximum frame size
4080  *
4081  * Returns 0 on success, negative on failure
4082  **/
4083 static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
4084 {
4085         struct ixgbe_adapter *adapter = netdev_priv(netdev);
4086         int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
4087
4088         /* MTU < 68 is an error and causes problems on some kernels */
4089         if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
4090                 return -EINVAL;
4091
4092         DPRINTK(PROBE, INFO, "changing MTU from %d to %d\n",
4093                 netdev->mtu, new_mtu);
4094         /* must set new MTU before calling down or up */
4095         netdev->mtu = new_mtu;
4096
4097         if (netif_running(netdev))
4098                 ixgbe_reinit_locked(adapter);
4099
4100         return 0;
4101 }
4102
4103 /**
4104  * ixgbe_open - Called when a network interface is made active
4105  * @netdev: network interface device structure
4106  *
4107  * Returns 0 on success, negative value on failure
4108  *
4109  * The open entry point is called when a network interface is made
4110  * active by the system (IFF_UP).  At this point all resources needed
4111  * for transmit and receive operations are allocated, the interrupt
4112  * handler is registered with the OS, the watchdog timer is started,
4113  * and the stack is notified that the interface is ready.
4114  **/
4115 static int ixgbe_open(struct net_device *netdev)
4116 {
4117         struct ixgbe_adapter *adapter = netdev_priv(netdev);
4118         int err;
4119
4120         /* disallow open during test */
4121         if (test_bit(__IXGBE_TESTING, &adapter->state))
4122                 return -EBUSY;
4123
4124         netif_carrier_off(netdev);
4125
4126         /* allocate transmit descriptors */
4127         err = ixgbe_setup_all_tx_resources(adapter);
4128         if (err)
4129                 goto err_setup_tx;
4130
4131         /* allocate receive descriptors */
4132         err = ixgbe_setup_all_rx_resources(adapter);
4133         if (err)
4134                 goto err_setup_rx;
4135
4136         ixgbe_configure(adapter);
4137
4138         err = ixgbe_request_irq(adapter);
4139         if (err)
4140                 goto err_req_irq;
4141
4142         err = ixgbe_up_complete(adapter);
4143         if (err)
4144                 goto err_up;
4145
4146         netif_tx_start_all_queues(netdev);
4147
4148         return 0;
4149
4150 err_up:
4151         ixgbe_release_hw_control(adapter);
4152         ixgbe_free_irq(adapter);
4153 err_req_irq:
4154 err_setup_rx:
4155         ixgbe_free_all_rx_resources(adapter);
4156 err_setup_tx:
4157         ixgbe_free_all_tx_resources(adapter);
4158         ixgbe_reset(adapter);
4159
4160         return err;
4161 }
4162
4163 /**
4164  * ixgbe_close - Disables a network interface
4165  * @netdev: network interface device structure
4166  *
4167  * Returns 0, this is not allowed to fail
4168  *
4169  * The close entry point is called when an interface is de-activated
4170  * by the OS.  The hardware is still under the drivers control, but
4171  * needs to be disabled.  A global MAC reset is issued to stop the
4172  * hardware, and all transmit and receive resources are freed.
4173  **/
4174 static int ixgbe_close(struct net_device *netdev)
4175 {
4176         struct ixgbe_adapter *adapter = netdev_priv(netdev);
4177
4178         ixgbe_down(adapter);
4179         ixgbe_free_irq(adapter);
4180
4181         ixgbe_free_all_tx_resources(adapter);
4182         ixgbe_free_all_rx_resources(adapter);
4183
4184         ixgbe_release_hw_control(adapter);
4185
4186         return 0;
4187 }
4188
4189 #ifdef CONFIG_PM
4190 static int ixgbe_resume(struct pci_dev *pdev)
4191 {
4192         struct net_device *netdev = pci_get_drvdata(pdev);
4193         struct ixgbe_adapter *adapter = netdev_priv(netdev);
4194         u32 err;
4195
4196         pci_set_power_state(pdev, PCI_D0);
4197         pci_restore_state(pdev);
4198
4199         err = pci_enable_device_mem(pdev);
4200         if (err) {
4201                 printk(KERN_ERR "ixgbe: Cannot enable PCI device from "
4202                                 "suspend\n");
4203                 return err;
4204         }
4205         pci_set_master(pdev);
4206
4207         pci_wake_from_d3(pdev, false);
4208
4209         err = ixgbe_init_interrupt_scheme(adapter);
4210         if (err) {
4211                 printk(KERN_ERR "ixgbe: Cannot initialize interrupts for "
4212                                 "device\n");
4213                 return err;
4214         }
4215
4216         ixgbe_reset(adapter);
4217
4218         IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
4219
4220         if (netif_running(netdev)) {
4221                 err = ixgbe_open(adapter->netdev);
4222                 if (err)
4223                         return err;
4224         }
4225
4226         netif_device_attach(netdev);
4227
4228         return 0;
4229 }
4230 #endif /* CONFIG_PM */
4231
4232 static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
4233 {
4234         struct net_device *netdev = pci_get_drvdata(pdev);
4235         struct ixgbe_adapter *adapter = netdev_priv(netdev);
4236         struct ixgbe_hw *hw = &adapter->hw;
4237         u32 ctrl, fctrl;
4238         u32 wufc = adapter->wol;
4239 #ifdef CONFIG_PM
4240         int retval = 0;
4241 #endif
4242
4243         netif_device_detach(netdev);
4244
4245         if (netif_running(netdev)) {
4246                 ixgbe_down(adapter);
4247                 ixgbe_free_irq(adapter);
4248                 ixgbe_free_all_tx_resources(adapter);
4249                 ixgbe_free_all_rx_resources(adapter);
4250         }
4251         ixgbe_clear_interrupt_scheme(adapter);
4252
4253 #ifdef CONFIG_PM
4254         retval = pci_save_state(pdev);
4255         if (retval)
4256                 return retval;
4257
4258 #endif
4259         if (wufc) {
4260                 ixgbe_set_rx_mode(netdev);
4261
4262                 /* turn on all-multi mode if wake on multicast is enabled */
4263                 if (wufc & IXGBE_WUFC_MC) {
4264                         fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4265                         fctrl |= IXGBE_FCTRL_MPE;
4266                         IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
4267                 }
4268
4269                 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
4270                 ctrl |= IXGBE_CTRL_GIO_DIS;
4271                 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
4272
4273                 IXGBE_WRITE_REG(hw, IXGBE_WUFC, wufc);
4274         } else {
4275                 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
4276                 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
4277         }
4278
4279         if (wufc && hw->mac.type == ixgbe_mac_82599EB)
4280                 pci_wake_from_d3(pdev, true);
4281         else
4282                 pci_wake_from_d3(pdev, false);
4283
4284         *enable_wake = !!wufc;
4285
4286         ixgbe_release_hw_control(adapter);
4287
4288         pci_disable_device(pdev);
4289
4290         return 0;
4291 }
4292
4293 #ifdef CONFIG_PM
4294 static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state)
4295 {
4296         int retval;
4297         bool wake;
4298
4299         retval = __ixgbe_shutdown(pdev, &wake);
4300         if (retval)
4301                 return retval;
4302
4303         if (wake) {
4304                 pci_prepare_to_sleep(pdev);
4305         } else {
4306                 pci_wake_from_d3(pdev, false);
4307                 pci_set_power_state(pdev, PCI_D3hot);
4308         }
4309
4310         return 0;
4311 }
4312 #endif /* CONFIG_PM */
4313
4314 static void ixgbe_shutdown(struct pci_dev *pdev)
4315 {
4316         bool wake;
4317
4318         __ixgbe_shutdown(pdev, &wake);
4319
4320         if (system_state == SYSTEM_POWER_OFF) {
4321                 pci_wake_from_d3(pdev, wake);
4322                 pci_set_power_state(pdev, PCI_D3hot);
4323         }
4324 }
4325
4326 /**
4327  * ixgbe_update_stats - Update the board statistics counters.
4328  * @adapter: board private structure
4329  **/
4330 void ixgbe_update_stats(struct ixgbe_adapter *adapter)
4331 {
4332         struct ixgbe_hw *hw = &adapter->hw;
4333         u64 total_mpc = 0;
4334         u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
4335
4336         if (hw->mac.type == ixgbe_mac_82599EB) {
4337                 u64 rsc_count = 0;
4338                 for (i = 0; i < 16; i++)
4339                         adapter->hw_rx_no_dma_resources +=
4340                                              IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
4341                 for (i = 0; i < adapter->num_rx_queues; i++)
4342                         rsc_count += adapter->rx_ring[i].rsc_count;
4343                 adapter->rsc_count = rsc_count;
4344         }
4345
4346         adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
4347         for (i = 0; i < 8; i++) {
4348                 /* for packet buffers not used, the register should read 0 */
4349                 mpc = IXGBE_READ_REG(hw, IXGBE_MPC(i));
4350                 missed_rx += mpc;
4351                 adapter->stats.mpc[i] += mpc;
4352                 total_mpc += adapter->stats.mpc[i];
4353                 if (hw->mac.type == ixgbe_mac_82598EB)
4354                         adapter->stats.rnbc[i] += IXGBE_READ_REG(hw, IXGBE_RNBC(i));
4355                 adapter->stats.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
4356                 adapter->stats.qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
4357                 adapter->stats.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
4358                 adapter->stats.qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
4359                 if (hw->mac.type == ixgbe_mac_82599EB) {
4360                         adapter->stats.pxonrxc[i] += IXGBE_READ_REG(hw,
4361                                                             IXGBE_PXONRXCNT(i));
4362                         adapter->stats.pxoffrxc[i] += IXGBE_READ_REG(hw,
4363                                                            IXGBE_PXOFFRXCNT(i));
4364                         adapter->stats.qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
4365                 } else {
4366                         adapter->stats.pxonrxc[i] += IXGBE_READ_REG(hw,
4367                                                               IXGBE_PXONRXC(i));
4368                         adapter->stats.pxoffrxc[i] += IXGBE_READ_REG(hw,
4369                                                              IXGBE_PXOFFRXC(i));
4370                 }
4371                 adapter->stats.pxontxc[i] += IXGBE_READ_REG(hw,
4372                                                             IXGBE_PXONTXC(i));
4373                 adapter->stats.pxofftxc[i] += IXGBE_READ_REG(hw,
4374                                                              IXGBE_PXOFFTXC(i));
4375         }
4376         adapter->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
4377         /* work around hardware counting issue */
4378         adapter->stats.gprc -= missed_rx;
4379
4380         /* 82598 hardware only has a 32 bit counter in the high register */
4381         if (hw->mac.type == ixgbe_mac_82599EB) {
4382                 adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
4383                 IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */
4384                 adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
4385                 IXGBE_READ_REG(hw, IXGBE_GOTCH); /* to clear */
4386                 adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORL);
4387                 IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
4388                 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
4389                 adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
4390                 adapter->stats.fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
4391                 adapter->stats.fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
4392 #ifdef IXGBE_FCOE
4393                 adapter->stats.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
4394                 adapter->stats.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
4395                 adapter->stats.fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
4396                 adapter->stats.fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
4397                 adapter->stats.fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
4398                 adapter->stats.fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
4399 #endif /* IXGBE_FCOE */
4400         } else {
4401                 adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
4402                 adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
4403                 adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
4404                 adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
4405                 adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
4406         }
4407         bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
4408         adapter->stats.bprc += bprc;
4409         adapter->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
4410         if (hw->mac.type == ixgbe_mac_82598EB)
4411                 adapter->stats.mprc -= bprc;
4412         adapter->stats.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
4413         adapter->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
4414         adapter->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
4415         adapter->stats.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
4416         adapter->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
4417         adapter->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
4418         adapter->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
4419         adapter->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
4420         lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
4421         adapter->stats.lxontxc += lxon;
4422         lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
4423         adapter->stats.lxofftxc += lxoff;
4424         adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
4425         adapter->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
4426         adapter->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
4427         /*
4428          * 82598 errata - tx of flow control packets is included in tx counters
4429          */
4430         xon_off_tot = lxon + lxoff;
4431         adapter->stats.gptc -= xon_off_tot;
4432         adapter->stats.mptc -= xon_off_tot;
4433         adapter->stats.gotc -= (xon_off_tot * (ETH_ZLEN + ETH_FCS_LEN));
4434         adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
4435         adapter->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
4436         adapter->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
4437         adapter->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
4438         adapter->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
4439         adapter->stats.ptc64 -= xon_off_tot;
4440         adapter->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
4441         adapter->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
4442         adapter->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
4443         adapter->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
4444         adapter->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
4445         adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
4446
4447         /* Fill out the OS statistics structure */
4448         adapter->net_stats.multicast = adapter->stats.mprc;
4449
4450         /* Rx Errors */
4451         adapter->net_stats.rx_errors = adapter->stats.crcerrs +
4452                                        adapter->stats.rlec;
4453         adapter->net_stats.rx_dropped = 0;
4454         adapter->net_stats.rx_length_errors = adapter->stats.rlec;
4455         adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
4456         adapter->net_stats.rx_missed_errors = total_mpc;
4457 }
4458
4459 /**
4460  * ixgbe_watchdog - Timer Call-back
4461  * @data: pointer to adapter cast into an unsigned long
4462  **/
4463 static void ixgbe_watchdog(unsigned long data)
4464 {
4465         struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
4466         struct ixgbe_hw *hw = &adapter->hw;
4467         u64 eics = 0;
4468         int i;
4469
4470         /*
4471          *  Do the watchdog outside of interrupt context due to the lovely
4472          * delays that some of the newer hardware requires
4473          */
4474
4475         if (test_bit(__IXGBE_DOWN, &adapter->state))
4476                 goto watchdog_short_circuit;
4477
4478         if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
4479                 /*
4480                  * for legacy and MSI interrupts don't set any bits
4481                  * that are enabled for EIAM, because this operation
4482                  * would set *both* EIMS and EICS for any bit in EIAM
4483                  */
4484                 IXGBE_WRITE_REG(hw, IXGBE_EICS,
4485                         (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
4486                 goto watchdog_reschedule;
4487         }
4488
4489         /* get one bit for every active tx/rx interrupt vector */
4490         for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
4491                 struct ixgbe_q_vector *qv = adapter->q_vector[i];
4492                 if (qv->rxr_count || qv->txr_count)
4493                         eics |= ((u64)1 << i);
4494         }
4495
4496         /* Cause software interrupt to ensure rx rings are cleaned */
4497         ixgbe_irq_rearm_queues(adapter, eics);
4498
4499 watchdog_reschedule:
4500         /* Reset the timer */
4501         mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 2 * HZ));
4502
4503 watchdog_short_circuit:
4504         schedule_work(&adapter->watchdog_task);
4505 }
4506
4507 /**
4508  * ixgbe_multispeed_fiber_task - worker thread to configure multispeed fiber
4509  * @work: pointer to work_struct containing our data
4510  **/
4511 static void ixgbe_multispeed_fiber_task(struct work_struct *work)
4512 {
4513         struct ixgbe_adapter *adapter = container_of(work,
4514                                                      struct ixgbe_adapter,
4515                                                      multispeed_fiber_task);
4516         struct ixgbe_hw *hw = &adapter->hw;
4517         u32 autoneg;
4518
4519         adapter->flags |= IXGBE_FLAG_IN_SFP_LINK_TASK;
4520         autoneg = hw->phy.autoneg_advertised;
4521         if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
4522                 hw->mac.ops.get_link_capabilities(hw, &autoneg,
4523                                                   &hw->mac.autoneg);
4524         if (hw->mac.ops.setup_link_speed)
4525                 hw->mac.ops.setup_link_speed(hw, autoneg, true, true);
4526         adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
4527         adapter->flags &= ~IXGBE_FLAG_IN_SFP_LINK_TASK;
4528 }
4529
4530 /**
4531  * ixgbe_sfp_config_module_task - worker thread to configure a new SFP+ module
4532  * @work: pointer to work_struct containing our data
4533  **/
4534 static void ixgbe_sfp_config_module_task(struct work_struct *work)
4535 {
4536         struct ixgbe_adapter *adapter = container_of(work,
4537                                                      struct ixgbe_adapter,
4538                                                      sfp_config_module_task);
4539         struct ixgbe_hw *hw = &adapter->hw;
4540         u32 err;
4541
4542         adapter->flags |= IXGBE_FLAG_IN_SFP_MOD_TASK;
4543
4544         /* Time for electrical oscillations to settle down */
4545         msleep(100);
4546         err = hw->phy.ops.identify_sfp(hw);
4547
4548         if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
4549                 dev_err(&adapter->pdev->dev, "failed to initialize because "
4550                         "an unsupported SFP+ module type was detected.\n"
4551                         "Reload the driver after installing a supported "
4552                         "module.\n");
4553                 unregister_netdev(adapter->netdev);
4554                 return;
4555         }
4556         hw->mac.ops.setup_sfp(hw);
4557
4558         if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
4559                 /* This will also work for DA Twinax connections */
4560                 schedule_work(&adapter->multispeed_fiber_task);
4561         adapter->flags &= ~IXGBE_FLAG_IN_SFP_MOD_TASK;
4562 }
4563
4564 /**
4565  * ixgbe_fdir_reinit_task - worker thread to reinit FDIR filter table
4566  * @work: pointer to work_struct containing our data
4567  **/
4568 static void ixgbe_fdir_reinit_task(struct work_struct *work)
4569 {
4570         struct ixgbe_adapter *adapter = container_of(work,
4571                                                      struct ixgbe_adapter,
4572                                                      fdir_reinit_task);
4573         struct ixgbe_hw *hw = &adapter->hw;
4574         int i;
4575
4576         if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
4577                 for (i = 0; i < adapter->num_tx_queues; i++)
4578                         set_bit(__IXGBE_FDIR_INIT_DONE,
4579                                 &(adapter->tx_ring[i].reinit_state));
4580         } else {
4581                 DPRINTK(PROBE, ERR, "failed to finish FDIR re-initialization, "
4582                         "ignored adding FDIR ATR filters \n");
4583         }
4584         /* Done FDIR Re-initialization, enable transmits */
4585         netif_tx_start_all_queues(adapter->netdev);
4586 }
4587
4588 /**
4589  * ixgbe_watchdog_task - worker thread to bring link up
4590  * @work: pointer to work_struct containing our data
4591  **/
4592 static void ixgbe_watchdog_task(struct work_struct *work)
4593 {
4594         struct ixgbe_adapter *adapter = container_of(work,
4595                                                      struct ixgbe_adapter,
4596                                                      watchdog_task);
4597         struct net_device *netdev = adapter->netdev;
4598         struct ixgbe_hw *hw = &adapter->hw;
4599         u32 link_speed = adapter->link_speed;
4600         bool link_up = adapter->link_up;
4601         int i;
4602         struct ixgbe_ring *tx_ring;
4603         int some_tx_pending = 0;
4604
4605         adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
4606
4607         if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
4608                 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
4609                 if (link_up) {
4610 #ifdef CONFIG_DCB
4611                         if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
4612                                 for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
4613                                         hw->mac.ops.fc_enable(hw, i);
4614                         } else {
4615                                 hw->mac.ops.fc_enable(hw, 0);
4616                         }
4617 #else
4618                         hw->mac.ops.fc_enable(hw, 0);
4619 #endif
4620                 }
4621
4622                 if (link_up ||
4623                     time_after(jiffies, (adapter->link_check_timeout +
4624                                          IXGBE_TRY_LINK_TIMEOUT))) {
4625                         adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
4626                         IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
4627                 }
4628                 adapter->link_up = link_up;
4629                 adapter->link_speed = link_speed;
4630         }
4631
4632         if (link_up) {
4633                 if (!netif_carrier_ok(netdev)) {
4634                         bool flow_rx, flow_tx;
4635
4636                         if (hw->mac.type == ixgbe_mac_82599EB) {
4637                                 u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
4638                                 u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
4639                                 flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
4640                                 flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
4641                         } else {
4642                                 u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
4643                                 u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
4644                                 flow_rx = !!(frctl & IXGBE_FCTRL_RFCE);
4645                                 flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
4646                         }
4647
4648                         printk(KERN_INFO "ixgbe: %s NIC Link is Up %s, "
4649                                "Flow Control: %s\n",
4650                                netdev->name,
4651                                (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
4652                                 "10 Gbps" :
4653                                 (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
4654                                  "1 Gbps" : "unknown speed")),
4655                                ((flow_rx && flow_tx) ? "RX/TX" :
4656                                 (flow_rx ? "RX" :
4657                                 (flow_tx ? "TX" : "None"))));
4658
4659                         netif_carrier_on(netdev);
4660                 } else {
4661                         /* Force detection of hung controller */
4662                         adapter->detect_tx_hung = true;
4663                 }
4664         } else {
4665                 adapter->link_up = false;
4666                 adapter->link_speed = 0;
4667                 if (netif_carrier_ok(netdev)) {
4668                         printk(KERN_INFO "ixgbe: %s NIC Link is Down\n",
4669                                netdev->name);
4670                         netif_carrier_off(netdev);
4671                 }
4672         }
4673
4674         if (!netif_carrier_ok(netdev)) {
4675                 for (i = 0; i < adapter->num_tx_queues; i++) {
4676                         tx_ring = &adapter->tx_ring[i];
4677                         if (tx_ring->next_to_use != tx_ring->next_to_clean) {
4678                                 some_tx_pending = 1;
4679                                 break;
4680                         }
4681                 }
4682
4683                 if (some_tx_pending) {
4684                         /* We've lost link, so the controller stops DMA,
4685                          * but we've got queued Tx work that's never going
4686                          * to get done, so reset controller to flush Tx.
4687                          * (Do the reset outside of interrupt context).
4688                          */
4689                          schedule_work(&adapter->reset_task);
4690                 }
4691         }
4692
4693         ixgbe_update_stats(adapter);
4694         adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
4695 }
4696
4697 static int ixgbe_tso(struct ixgbe_adapter *adapter,
4698                      struct ixgbe_ring *tx_ring, struct sk_buff *skb,
4699                      u32 tx_flags, u8 *hdr_len)
4700 {
4701         struct ixgbe_adv_tx_context_desc *context_desc;
4702         unsigned int i;
4703         int err;
4704         struct ixgbe_tx_buffer *tx_buffer_info;
4705         u32 vlan_macip_lens = 0, type_tucmd_mlhl;
4706         u32 mss_l4len_idx, l4len;
4707
4708         if (skb_is_gso(skb)) {
4709                 if (skb_header_cloned(skb)) {
4710                         err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
4711                         if (err)
4712                                 return err;
4713                 }
4714                 l4len = tcp_hdrlen(skb);
4715                 *hdr_len += l4len;
4716
4717                 if (skb->protocol == htons(ETH_P_IP)) {
4718                         struct iphdr *iph = ip_hdr(skb);
4719                         iph->tot_len = 0;
4720                         iph->check = 0;
4721                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4722                                                                  iph->daddr, 0,
4723                                                                  IPPROTO_TCP,
4724                                                                  0);
4725                         adapter->hw_tso_ctxt++;
4726                 } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
4727                         ipv6_hdr(skb)->payload_len = 0;
4728                         tcp_hdr(skb)->check =
4729                             ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
4730                                              &ipv6_hdr(skb)->daddr,
4731                                              0, IPPROTO_TCP, 0);
4732                         adapter->hw_tso6_ctxt++;
4733                 }
4734
4735                 i = tx_ring->next_to_use;
4736
4737                 tx_buffer_info = &tx_ring->tx_buffer_info[i];
4738                 context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
4739
4740                 /* VLAN MACLEN IPLEN */
4741                 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
4742                         vlan_macip_lens |=
4743                             (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
4744                 vlan_macip_lens |= ((skb_network_offset(skb)) <<
4745                                     IXGBE_ADVTXD_MACLEN_SHIFT);
4746                 *hdr_len += skb_network_offset(skb);
4747                 vlan_macip_lens |=
4748                     (skb_transport_header(skb) - skb_network_header(skb));
4749                 *hdr_len +=
4750                     (skb_transport_header(skb) - skb_network_header(skb));
4751                 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
4752                 context_desc->seqnum_seed = 0;
4753
4754                 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
4755                 type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
4756                                    IXGBE_ADVTXD_DTYP_CTXT);
4757
4758                 if (skb->protocol == htons(ETH_P_IP))
4759                         type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
4760                 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
4761                 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
4762
4763                 /* MSS L4LEN IDX */
4764                 mss_l4len_idx =
4765                     (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT);
4766                 mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT);
4767                 /* use index 1 for TSO */
4768                 mss_l4len_idx |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
4769                 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
4770
4771                 tx_buffer_info->time_stamp = jiffies;
4772                 tx_buffer_info->next_to_watch = i;
4773
4774                 i++;
4775                 if (i == tx_ring->count)
4776                         i = 0;
4777                 tx_ring->next_to_use = i;
4778
4779                 return true;
4780         }
4781         return false;
4782 }
4783
4784 static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
4785                           struct ixgbe_ring *tx_ring,
4786                           struct sk_buff *skb, u32 tx_flags)
4787 {
4788         struct ixgbe_adv_tx_context_desc *context_desc;
4789         unsigned int i;
4790         struct ixgbe_tx_buffer *tx_buffer_info;
4791         u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
4792
4793         if (skb->ip_summed == CHECKSUM_PARTIAL ||
4794             (tx_flags & IXGBE_TX_FLAGS_VLAN)) {
4795                 i = tx_ring->next_to_use;
4796                 tx_buffer_info = &tx_ring->tx_buffer_info[i];
4797                 context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
4798
4799                 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
4800                         vlan_macip_lens |=
4801                             (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
4802                 vlan_macip_lens |= (skb_network_offset(skb) <<
4803                                     IXGBE_ADVTXD_MACLEN_SHIFT);
4804                 if (skb->ip_summed == CHECKSUM_PARTIAL)
4805                         vlan_macip_lens |= (skb_transport_header(skb) -
4806                                             skb_network_header(skb));
4807
4808                 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
4809                 context_desc->seqnum_seed = 0;
4810
4811                 type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
4812                                     IXGBE_ADVTXD_DTYP_CTXT);
4813
4814                 if (skb->ip_summed == CHECKSUM_PARTIAL) {
4815                         switch (skb->protocol) {
4816                         case cpu_to_be16(ETH_P_IP):
4817                                 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
4818                                 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
4819                                         type_tucmd_mlhl |=
4820                                                 IXGBE_ADVTXD_TUCMD_L4T_TCP;
4821                                 else if (ip_hdr(skb)->protocol == IPPROTO_SCTP)
4822                                         type_tucmd_mlhl |=
4823                                                 IXGBE_ADVTXD_TUCMD_L4T_SCTP;
4824                                 break;
4825                         case cpu_to_be16(ETH_P_IPV6):
4826                                 /* XXX what about other V6 headers?? */
4827                                 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
4828                                         type_tucmd_mlhl |=
4829                                                 IXGBE_ADVTXD_TUCMD_L4T_TCP;
4830                                 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_SCTP)
4831                                         type_tucmd_mlhl |=
4832                                                 IXGBE_ADVTXD_TUCMD_L4T_SCTP;
4833                                 break;
4834                         default:
4835                                 if (unlikely(net_ratelimit())) {
4836                                         DPRINTK(PROBE, WARNING,
4837                                          "partial checksum but proto=%x!\n",
4838                                          skb->protocol);
4839                                 }
4840                                 break;
4841                         }
4842                 }
4843
4844                 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
4845                 /* use index zero for tx checksum offload */
4846                 context_desc->mss_l4len_idx = 0;
4847
4848                 tx_buffer_info->time_stamp = jiffies;
4849                 tx_buffer_info->next_to_watch = i;
4850
4851                 adapter->hw_csum_tx_good++;
4852                 i++;
4853                 if (i == tx_ring->count)
4854                         i = 0;
4855                 tx_ring->next_to_use = i;
4856
4857                 return true;
4858         }
4859
4860         return false;
4861 }
4862
4863 static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
4864                         struct ixgbe_ring *tx_ring,
4865                         struct sk_buff *skb, u32 tx_flags,
4866                         unsigned int first)
4867 {
4868         struct ixgbe_tx_buffer *tx_buffer_info;
4869         unsigned int len;
4870         unsigned int total = skb->len;
4871         unsigned int offset = 0, size, count = 0, i;
4872         unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
4873         unsigned int f;
4874         dma_addr_t *map;
4875
4876         i = tx_ring->next_to_use;
4877
4878         if (skb_dma_map(&adapter->pdev->dev, skb, DMA_TO_DEVICE)) {
4879                 dev_err(&adapter->pdev->dev, "TX DMA map failed\n");
4880                 return 0;
4881         }
4882
4883         map = skb_shinfo(skb)->dma_maps;
4884
4885         if (tx_flags & IXGBE_TX_FLAGS_FCOE)
4886                 /* excluding fcoe_crc_eof for FCoE */
4887                 total -= sizeof(struct fcoe_crc_eof);
4888
4889         len = min(skb_headlen(skb), total);
4890         while (len) {
4891                 tx_buffer_info = &tx_ring->tx_buffer_info[i];
4892                 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
4893
4894                 tx_buffer_info->length = size;
4895                 tx_buffer_info->dma = skb_shinfo(skb)->dma_head + offset;
4896                 tx_buffer_info->time_stamp = jiffies;
4897                 tx_buffer_info->next_to_watch = i;
4898
4899                 len -= size;
4900                 total -= size;
4901                 offset += size;
4902                 count++;
4903
4904                 if (len) {
4905                         i++;
4906                         if (i == tx_ring->count)
4907                                 i = 0;
4908                 }
4909         }
4910
4911         for (f = 0; f < nr_frags; f++) {
4912                 struct skb_frag_struct *frag;
4913
4914                 frag = &skb_shinfo(skb)->frags[f];
4915                 len = min((unsigned int)frag->size, total);
4916                 offset = 0;
4917
4918                 while (len) {
4919                         i++;
4920                         if (i == tx_ring->count)
4921                                 i = 0;
4922
4923                         tx_buffer_info = &tx_ring->tx_buffer_info[i];
4924                         size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
4925
4926                         tx_buffer_info->length = size;
4927                         tx_buffer_info->dma = map[f] + offset;
4928                         tx_buffer_info->time_stamp = jiffies;
4929                         tx_buffer_info->next_to_watch = i;
4930
4931                         len -= size;
4932                         total -= size;
4933                         offset += size;
4934                         count++;
4935                 }
4936                 if (total == 0)
4937                         break;
4938         }
4939
4940         tx_ring->tx_buffer_info[i].skb = skb;
4941         tx_ring->tx_buffer_info[first].next_to_watch = i;
4942
4943         return count;
4944 }
4945
4946 static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
4947                            struct ixgbe_ring *tx_ring,
4948                            int tx_flags, int count, u32 paylen, u8 hdr_len)
4949 {
4950         union ixgbe_adv_tx_desc *tx_desc = NULL;
4951         struct ixgbe_tx_buffer *tx_buffer_info;
4952         u32 olinfo_status = 0, cmd_type_len = 0;
4953         unsigned int i;
4954         u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
4955
4956         cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
4957
4958         cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
4959
4960         if (tx_flags & IXGBE_TX_FLAGS_VLAN)
4961                 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
4962
4963         if (tx_flags & IXGBE_TX_FLAGS_TSO) {
4964                 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
4965
4966                 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
4967                                  IXGBE_ADVTXD_POPTS_SHIFT;
4968
4969                 /* use index 1 context for tso */
4970                 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
4971                 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
4972                         olinfo_status |= IXGBE_TXD_POPTS_IXSM <<
4973                                          IXGBE_ADVTXD_POPTS_SHIFT;
4974
4975         } else if (tx_flags & IXGBE_TX_FLAGS_CSUM)
4976                 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
4977                                  IXGBE_ADVTXD_POPTS_SHIFT;
4978
4979         if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
4980                 olinfo_status |= IXGBE_ADVTXD_CC;
4981                 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
4982                 if (tx_flags & IXGBE_TX_FLAGS_FSO)
4983                         cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
4984         }
4985
4986         olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
4987
4988         i = tx_ring->next_to_use;
4989         while (count--) {
4990                 tx_buffer_info = &tx_ring->tx_buffer_info[i];
4991                 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
4992                 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
4993                 tx_desc->read.cmd_type_len =
4994                         cpu_to_le32(cmd_type_len | tx_buffer_info->length);
4995                 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
4996                 i++;
4997                 if (i == tx_ring->count)
4998                         i = 0;
4999         }
5000
5001         tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
5002
5003         /*
5004          * Force memory writes to complete before letting h/w
5005          * know there are new descriptors to fetch.  (Only
5006          * applicable for weak-ordered memory model archs,
5007          * such as IA-64).
5008          */
5009         wmb();
5010
5011         tx_ring->next_to_use = i;
5012         writel(i, adapter->hw.hw_addr + tx_ring->tail);
5013 }
5014
5015 static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
5016                       int queue, u32 tx_flags)
5017 {
5018         /* Right now, we support IPv4 only */
5019         struct ixgbe_atr_input atr_input;
5020         struct tcphdr *th;
5021         struct udphdr *uh;
5022         struct iphdr *iph = ip_hdr(skb);
5023         struct ethhdr *eth = (struct ethhdr *)skb->data;
5024         u16 vlan_id, src_port, dst_port, flex_bytes;
5025         u32 src_ipv4_addr, dst_ipv4_addr;
5026         u8 l4type = 0;
5027
5028         /* check if we're UDP or TCP */
5029         if (iph->protocol == IPPROTO_TCP) {
5030                 th = tcp_hdr(skb);
5031                 src_port = th->source;
5032                 dst_port = th->dest;
5033                 l4type |= IXGBE_ATR_L4TYPE_TCP;
5034                 /* l4type IPv4 type is 0, no need to assign */
5035         } else if(iph->protocol == IPPROTO_UDP) {
5036                 uh = udp_hdr(skb);
5037                 src_port = uh->source;
5038                 dst_port = uh->dest;
5039                 l4type |= IXGBE_ATR_L4TYPE_UDP;
5040                 /* l4type IPv4 type is 0, no need to assign */
5041         } else {
5042                 /* Unsupported L4 header, just bail here */
5043                 return;
5044         }
5045
5046         memset(&atr_input, 0, sizeof(struct ixgbe_atr_input));
5047
5048         vlan_id = (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK) >>
5049                    IXGBE_TX_FLAGS_VLAN_SHIFT;
5050         src_ipv4_addr = iph->saddr;
5051         dst_ipv4_addr = iph->daddr;
5052         flex_bytes = eth->h_proto;
5053
5054         ixgbe_atr_set_vlan_id_82599(&atr_input, vlan_id);
5055         ixgbe_atr_set_src_port_82599(&atr_input, dst_port);
5056         ixgbe_atr_set_dst_port_82599(&atr_input, src_port);
5057         ixgbe_atr_set_flex_byte_82599(&atr_input, flex_bytes);
5058         ixgbe_atr_set_l4type_82599(&atr_input, l4type);
5059         /* src and dst are inverted, think how the receiver sees them */
5060         ixgbe_atr_set_src_ipv4_82599(&atr_input, dst_ipv4_addr);
5061         ixgbe_atr_set_dst_ipv4_82599(&atr_input, src_ipv4_addr);
5062
5063         /* This assumes the Rx queue and Tx queue are bound to the same CPU */
5064         ixgbe_fdir_add_signature_filter_82599(&adapter->hw, &atr_input, queue);
5065 }
5066
5067 static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
5068                                  struct ixgbe_ring *tx_ring, int size)
5069 {
5070         struct ixgbe_adapter *adapter = netdev_priv(netdev);
5071
5072         netif_stop_subqueue(netdev, tx_ring->queue_index);
5073         /* Herbert's original patch had:
5074          *  smp_mb__after_netif_stop_queue();
5075          * but since that doesn't exist yet, just open code it. */
5076         smp_mb();
5077
5078         /* We need to check again in a case another CPU has just
5079          * made room available. */
5080         if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
5081                 return -EBUSY;
5082
5083         /* A reprieve! - use start_queue because it doesn't call schedule */
5084         netif_start_subqueue(netdev, tx_ring->queue_index);
5085         ++adapter->restart_queue;
5086         return 0;
5087 }
5088
5089 static int ixgbe_maybe_stop_tx(struct net_device *netdev,
5090                               struct ixgbe_ring *tx_ring, int size)
5091 {
5092         if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
5093                 return 0;
5094         return __ixgbe_maybe_stop_tx(netdev, tx_ring, size);
5095 }
5096
5097 static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
5098 {
5099         struct ixgbe_adapter *adapter = netdev_priv(dev);
5100
5101         if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
5102                 return smp_processor_id();
5103
5104         if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
5105                 return (skb->vlan_tci & IXGBE_TX_FLAGS_VLAN_PRIO_MASK) >> 13;
5106
5107         return skb_tx_hash(dev, skb);
5108 }
5109
5110 static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
5111                                     struct net_device *netdev)
5112 {
5113         struct ixgbe_adapter *adapter = netdev_priv(netdev);
5114         struct ixgbe_ring *tx_ring;
5115         unsigned int first;
5116         unsigned int tx_flags = 0;
5117         u8 hdr_len = 0;
5118         int r_idx = 0, tso;
5119         int count = 0;
5120         unsigned int f;
5121
5122         if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
5123                 tx_flags |= vlan_tx_tag_get(skb);
5124                 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
5125                         tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK;
5126                         tx_flags |= (skb->queue_mapping << 13);
5127                 }
5128                 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
5129                 tx_flags |= IXGBE_TX_FLAGS_VLAN;
5130         } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
5131                 if (skb->priority != TC_PRIO_CONTROL) {
5132                         tx_flags |= (skb->queue_mapping << 13);
5133                         tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
5134                         tx_flags |= IXGBE_TX_FLAGS_VLAN;
5135                 } else {
5136                         skb->queue_mapping =
5137                                 adapter->ring_feature[RING_F_DCB].indices-1;
5138                 }
5139         }
5140
5141         r_idx = skb->queue_mapping;
5142         tx_ring = &adapter->tx_ring[r_idx];
5143
5144         if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
5145             (skb->protocol == htons(ETH_P_FCOE)))
5146                 tx_flags |= IXGBE_TX_FLAGS_FCOE;
5147
5148         /* four things can cause us to need a context descriptor */
5149         if (skb_is_gso(skb) ||
5150             (skb->ip_summed == CHECKSUM_PARTIAL) ||
5151             (tx_flags & IXGBE_TX_FLAGS_VLAN) ||
5152             (tx_flags & IXGBE_TX_FLAGS_FCOE))
5153                 count++;
5154
5155         count += TXD_USE_COUNT(skb_headlen(skb));
5156         for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
5157                 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
5158
5159         if (ixgbe_maybe_stop_tx(netdev, tx_ring, count)) {
5160                 adapter->tx_busy++;
5161                 return NETDEV_TX_BUSY;
5162         }
5163
5164         first = tx_ring->next_to_use;
5165         if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
5166 #ifdef IXGBE_FCOE
5167                 /* setup tx offload for FCoE */
5168                 tso = ixgbe_fso(adapter, tx_ring, skb, tx_flags, &hdr_len);
5169                 if (tso < 0) {
5170                         dev_kfree_skb_any(skb);
5171                         return NETDEV_TX_OK;
5172                 }
5173                 if (tso)
5174                         tx_flags |= IXGBE_TX_FLAGS_FSO;
5175 #endif /* IXGBE_FCOE */
5176         } else {
5177                 if (skb->protocol == htons(ETH_P_IP))
5178                         tx_flags |= IXGBE_TX_FLAGS_IPV4;
5179                 tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len);
5180                 if (tso < 0) {
5181                         dev_kfree_skb_any(skb);
5182                         return NETDEV_TX_OK;
5183                 }
5184
5185                 if (tso)
5186                         tx_flags |= IXGBE_TX_FLAGS_TSO;
5187                 else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) &&
5188                          (skb->ip_summed == CHECKSUM_PARTIAL))
5189                         tx_flags |= IXGBE_TX_FLAGS_CSUM;
5190         }
5191
5192         count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first);
5193         if (count) {
5194                 /* add the ATR filter if ATR is on */
5195                 if (tx_ring->atr_sample_rate) {
5196                         ++tx_ring->atr_count;
5197                         if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) &&
5198                              test_bit(__IXGBE_FDIR_INIT_DONE,
5199                                       &tx_ring->reinit_state)) {
5200                                 ixgbe_atr(adapter, skb, tx_ring->queue_index,
5201                                           tx_flags);
5202                                 tx_ring->atr_count = 0;
5203                         }
5204                 }
5205                 ixgbe_tx_queue(adapter, tx_ring, tx_flags, count, skb->len,
5206                                hdr_len);
5207                 ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
5208
5209         } else {
5210                 dev_kfree_skb_any(skb);
5211                 tx_ring->tx_buffer_info[first].time_stamp = 0;
5212                 tx_ring->next_to_use = first;
5213         }
5214
5215         return NETDEV_TX_OK;
5216 }
5217
5218 /**
5219  * ixgbe_get_stats - Get System Network Statistics
5220  * @netdev: network interface device structure
5221  *
5222  * Returns the address of the device statistics structure.
5223  * The statistics are actually updated from the timer callback.
5224  **/
5225 static struct net_device_stats *ixgbe_get_stats(struct net_device *netdev)
5226 {
5227         struct ixgbe_adapter *adapter = netdev_priv(netdev);
5228
5229         /* only return the current stats */
5230         return &adapter->net_stats;
5231 }
5232
5233 /**
5234  * ixgbe_set_mac - Change the Ethernet Address of the NIC
5235  * @netdev: network interface device structure
5236  * @p: pointer to an address structure
5237  *
5238  * Returns 0 on success, negative on failure
5239  **/
5240 static int ixgbe_set_mac(struct net_device *netdev, void *p)
5241 {
5242         struct ixgbe_adapter *adapter = netdev_priv(netdev);
5243         struct ixgbe_hw *hw = &adapter->hw;
5244         struct sockaddr *addr = p;
5245
5246         if (!is_valid_ether_addr(addr->sa_data))
5247                 return -EADDRNOTAVAIL;
5248
5249         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
5250         memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
5251
5252         hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
5253
5254         return 0;
5255 }
5256
5257 static int
5258 ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr)
5259 {
5260         struct ixgbe_adapter *adapter = netdev_priv(netdev);
5261         struct ixgbe_hw *hw = &adapter->hw;
5262         u16 value;
5263         int rc;
5264
5265         if (prtad != hw->phy.mdio.prtad)
5266                 return -EINVAL;
5267         rc = hw->phy.ops.read_reg(hw, addr, devad, &value);
5268         if (!rc)
5269                 rc = value;
5270         return rc;
5271 }
5272
5273 static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad,
5274                             u16 addr, u16 value)
5275 {
5276         struct ixgbe_adapter *adapter = netdev_priv(netdev);
5277         struct ixgbe_hw *hw = &adapter->hw;
5278
5279         if (prtad != hw->phy.mdio.prtad)
5280                 return -EINVAL;
5281         return hw->phy.ops.write_reg(hw, addr, devad, value);
5282 }
5283
5284 static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
5285 {
5286         struct ixgbe_adapter *adapter = netdev_priv(netdev);
5287
5288         return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
5289 }
5290
5291 /**
5292  * ixgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding
5293  * netdev->dev_addrs
5294  * @netdev: network interface device structure
5295  *
5296  * Returns non-zero on failure
5297  **/
5298 static int ixgbe_add_sanmac_netdev(struct net_device *dev)
5299 {
5300         int err = 0;
5301         struct ixgbe_adapter *adapter = netdev_priv(dev);
5302         struct ixgbe_mac_info *mac = &adapter->hw.mac;
5303
5304         if (is_valid_ether_addr(mac->san_addr)) {
5305                 rtnl_lock();
5306                 err = dev_addr_add(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
5307                 rtnl_unlock();
5308         }
5309         return err;
5310 }
5311
5312 /**
5313  * ixgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding
5314  * netdev->dev_addrs
5315  * @netdev: network interface device structure
5316  *
5317  * Returns non-zero on failure
5318  **/
5319 static int ixgbe_del_sanmac_netdev(struct net_device *dev)
5320 {
5321         int err = 0;
5322         struct ixgbe_adapter *adapter = netdev_priv(dev);
5323         struct ixgbe_mac_info *mac = &adapter->hw.mac;
5324
5325         if (is_valid_ether_addr(mac->san_addr)) {
5326                 rtnl_lock();
5327                 err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
5328                 rtnl_unlock();
5329         }
5330         return err;
5331 }
5332
5333 #ifdef CONFIG_NET_POLL_CONTROLLER
5334 /*
5335  * Polling 'interrupt' - used by things like netconsole to send skbs
5336  * without having to re-enable interrupts. It's not called while
5337  * the interrupt routine is executing.
5338  */
5339 static void ixgbe_netpoll(struct net_device *netdev)
5340 {
5341         struct ixgbe_adapter *adapter = netdev_priv(netdev);
5342         int i;
5343
5344         adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
5345         if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
5346                 int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
5347                 for (i = 0; i < num_q_vectors; i++) {
5348                         struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
5349                         ixgbe_msix_clean_many(0, q_vector);
5350                 }
5351         } else {
5352                 ixgbe_intr(adapter->pdev->irq, netdev);
5353         }
5354         adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
5355 }
5356 #endif
5357
5358 static const struct net_device_ops ixgbe_netdev_ops = {
5359         .ndo_open               = ixgbe_open,
5360         .ndo_stop               = ixgbe_close,
5361         .ndo_start_xmit         = ixgbe_xmit_frame,
5362         .ndo_select_queue       = ixgbe_select_queue,
5363         .ndo_get_stats          = ixgbe_get_stats,
5364         .ndo_set_rx_mode        = ixgbe_set_rx_mode,
5365         .ndo_set_multicast_list = ixgbe_set_rx_mode,
5366         .ndo_validate_addr      = eth_validate_addr,
5367         .ndo_set_mac_address    = ixgbe_set_mac,
5368         .ndo_change_mtu         = ixgbe_change_mtu,
5369         .ndo_tx_timeout         = ixgbe_tx_timeout,
5370         .ndo_vlan_rx_register   = ixgbe_vlan_rx_register,
5371         .ndo_vlan_rx_add_vid    = ixgbe_vlan_rx_add_vid,
5372         .ndo_vlan_rx_kill_vid   = ixgbe_vlan_rx_kill_vid,
5373         .ndo_do_ioctl           = ixgbe_ioctl,
5374 #ifdef CONFIG_NET_POLL_CONTROLLER
5375         .ndo_poll_controller    = ixgbe_netpoll,
5376 #endif
5377 #ifdef IXGBE_FCOE
5378         .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
5379         .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
5380 #endif /* IXGBE_FCOE */
5381 };
5382
5383 /**
5384  * ixgbe_probe - Device Initialization Routine
5385  * @pdev: PCI device information struct
5386  * @ent: entry in ixgbe_pci_tbl
5387  *
5388  * Returns 0 on success, negative on failure
5389  *
5390  * ixgbe_probe initializes an adapter identified by a pci_dev structure.
5391  * The OS initialization, configuring of the adapter private structure,
5392  * and a hardware reset occur.
5393  **/
5394 static int __devinit ixgbe_probe(struct pci_dev *pdev,
5395                                  const struct pci_device_id *ent)
5396 {
5397         struct net_device *netdev;
5398         struct ixgbe_adapter *adapter = NULL;
5399         struct ixgbe_hw *hw;
5400         const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
5401         static int cards_found;
5402         int i, err, pci_using_dac;
5403 #ifdef IXGBE_FCOE
5404         u16 device_caps;
5405 #endif
5406         u32 part_num, eec;
5407
5408         err = pci_enable_device_mem(pdev);
5409         if (err)
5410                 return err;
5411
5412         if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
5413             !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
5414                 pci_using_dac = 1;
5415         } else {
5416                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5417                 if (err) {
5418                         err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
5419                         if (err) {
5420                                 dev_err(&pdev->dev, "No usable DMA "
5421                                         "configuration, aborting\n");
5422                                 goto err_dma;
5423                         }
5424                 }
5425                 pci_using_dac = 0;
5426         }
5427
5428         err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
5429                                            IORESOURCE_MEM), ixgbe_driver_name);
5430         if (err) {
5431                 dev_err(&pdev->dev,
5432                         "pci_request_selected_regions failed 0x%x\n", err);
5433                 goto err_pci_reg;
5434         }
5435
5436         err = pci_enable_pcie_error_reporting(pdev);
5437         if (err) {
5438                 dev_err(&pdev->dev, "pci_enable_pcie_error_reporting failed "
5439                                     "0x%x\n", err);
5440                 /* non-fatal, continue */
5441         }
5442
5443         pci_set_master(pdev);
5444         pci_save_state(pdev);
5445
5446         netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), MAX_TX_QUEUES);
5447         if (!netdev) {
5448                 err = -ENOMEM;
5449                 goto err_alloc_etherdev;
5450         }
5451
5452         SET_NETDEV_DEV(netdev, &pdev->dev);
5453
5454         pci_set_drvdata(pdev, netdev);
5455         adapter = netdev_priv(netdev);
5456
5457         adapter->netdev = netdev;
5458         adapter->pdev = pdev;
5459         hw = &adapter->hw;
5460         hw->back = adapter;
5461         adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
5462
5463         hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
5464                               pci_resource_len(pdev, 0));
5465         if (!hw->hw_addr) {
5466                 err = -EIO;
5467                 goto err_ioremap;
5468         }
5469
5470         for (i = 1; i <= 5; i++) {
5471                 if (pci_resource_len(pdev, i) == 0)
5472                         continue;
5473         }
5474
5475         netdev->netdev_ops = &ixgbe_netdev_ops;
5476         ixgbe_set_ethtool_ops(netdev);
5477         netdev->watchdog_timeo = 5 * HZ;
5478         strcpy(netdev->name, pci_name(pdev));
5479
5480         adapter->bd_number = cards_found;
5481
5482         /* Setup hw api */
5483         memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
5484         hw->mac.type  = ii->mac;
5485
5486         /* EEPROM */
5487         memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
5488         eec = IXGBE_READ_REG(hw, IXGBE_EEC);
5489         /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */
5490         if (!(eec & (1 << 8)))
5491                 hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
5492
5493         /* PHY */
5494         memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops));
5495         hw->phy.sfp_type = ixgbe_sfp_type_unknown;
5496         /* ixgbe_identify_phy_generic will set prtad and mmds properly */
5497         hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
5498         hw->phy.mdio.mmds = 0;
5499         hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
5500         hw->phy.mdio.dev = netdev;
5501         hw->phy.mdio.mdio_read = ixgbe_mdio_read;
5502         hw->phy.mdio.mdio_write = ixgbe_mdio_write;
5503
5504         /* set up this timer and work struct before calling get_invariants
5505          * which might start the timer
5506          */
5507         init_timer(&adapter->sfp_timer);
5508         adapter->sfp_timer.function = &ixgbe_sfp_timer;
5509         adapter->sfp_timer.data = (unsigned long) adapter;
5510
5511         INIT_WORK(&adapter->sfp_task, ixgbe_sfp_task);
5512
5513         /* multispeed fiber has its own tasklet, called from GPI SDP1 context */
5514         INIT_WORK(&adapter->multispeed_fiber_task, ixgbe_multispeed_fiber_task);
5515
5516         /* a new SFP+ module arrival, called from GPI SDP2 context */
5517         INIT_WORK(&adapter->sfp_config_module_task,
5518                   ixgbe_sfp_config_module_task);
5519
5520         ii->get_invariants(hw);
5521
5522         /* setup the private structure */
5523         err = ixgbe_sw_init(adapter);
5524         if (err)
5525                 goto err_sw_init;
5526
5527         /*
5528          * If there is a fan on this device and it has failed log the
5529          * failure.
5530          */
5531         if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
5532                 u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
5533                 if (esdp & IXGBE_ESDP_SDP1)
5534                         DPRINTK(PROBE, CRIT,
5535                                 "Fan has stopped, replace the adapter\n");
5536         }
5537
5538         /* reset_hw fills in the perm_addr as well */
5539         err = hw->mac.ops.reset_hw(hw);
5540         if (err == IXGBE_ERR_SFP_NOT_PRESENT &&
5541             hw->mac.type == ixgbe_mac_82598EB) {
5542                 /*
5543                  * Start a kernel thread to watch for a module to arrive.
5544                  * Only do this for 82598, since 82599 will generate
5545                  * interrupts on module arrival.
5546                  */
5547                 set_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
5548                 mod_timer(&adapter->sfp_timer,
5549                           round_jiffies(jiffies + (2 * HZ)));
5550                 err = 0;
5551         } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
5552                 dev_err(&adapter->pdev->dev, "failed to initialize because "
5553                         "an unsupported SFP+ module type was detected.\n"
5554                         "Reload the driver after installing a supported "
5555                         "module.\n");
5556                 goto err_sw_init;
5557         } else if (err) {
5558                 dev_err(&adapter->pdev->dev, "HW Init failed: %d\n", err);
5559                 goto err_sw_init;
5560         }
5561
5562         netdev->features = NETIF_F_SG |
5563                            NETIF_F_IP_CSUM |
5564                            NETIF_F_HW_VLAN_TX |
5565                            NETIF_F_HW_VLAN_RX |
5566                            NETIF_F_HW_VLAN_FILTER;
5567
5568         netdev->features |= NETIF_F_IPV6_CSUM;
5569         netdev->features |= NETIF_F_TSO;
5570         netdev->features |= NETIF_F_TSO6;
5571         netdev->features |= NETIF_F_GRO;
5572
5573         if (adapter->hw.mac.type == ixgbe_mac_82599EB)
5574                 netdev->features |= NETIF_F_SCTP_CSUM;
5575
5576         netdev->vlan_features |= NETIF_F_TSO;
5577         netdev->vlan_features |= NETIF_F_TSO6;
5578         netdev->vlan_features |= NETIF_F_IP_CSUM;
5579         netdev->vlan_features |= NETIF_F_IPV6_CSUM;
5580         netdev->vlan_features |= NETIF_F_SG;
5581
5582         if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
5583                 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
5584
5585 #ifdef CONFIG_IXGBE_DCB
5586         netdev->dcbnl_ops = &dcbnl_ops;
5587 #endif
5588
5589 #ifdef IXGBE_FCOE
5590         if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
5591                 if (hw->mac.ops.get_device_caps) {
5592                         hw->mac.ops.get_device_caps(hw, &device_caps);
5593                         if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)
5594                                 adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
5595                 }
5596         }
5597 #endif /* IXGBE_FCOE */
5598         if (pci_using_dac)
5599                 netdev->features |= NETIF_F_HIGHDMA;
5600
5601         if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
5602                 netdev->features |= NETIF_F_LRO;
5603
5604         /* make sure the EEPROM is good */
5605         if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
5606                 dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n");
5607                 err = -EIO;
5608                 goto err_eeprom;
5609         }
5610
5611         memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
5612         memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len);
5613
5614         if (ixgbe_validate_mac_addr(netdev->perm_addr)) {
5615                 dev_err(&pdev->dev, "invalid MAC address\n");
5616                 err = -EIO;
5617                 goto err_eeprom;
5618         }
5619
5620         init_timer(&adapter->watchdog_timer);
5621         adapter->watchdog_timer.function = &ixgbe_watchdog;
5622         adapter->watchdog_timer.data = (unsigned long)adapter;
5623
5624         INIT_WORK(&adapter->reset_task, ixgbe_reset_task);
5625         INIT_WORK(&adapter->watchdog_task, ixgbe_watchdog_task);
5626
5627         err = ixgbe_init_interrupt_scheme(adapter);
5628         if (err)
5629                 goto err_sw_init;
5630
5631         switch (pdev->device) {
5632         case IXGBE_DEV_ID_82599_KX4:
5633                 adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
5634                                 IXGBE_WUFC_MC | IXGBE_WUFC_BC);
5635                 /* Enable ACPI wakeup in GRC */
5636                 IXGBE_WRITE_REG(hw, IXGBE_GRC,
5637                              (IXGBE_READ_REG(hw, IXGBE_GRC) & ~IXGBE_GRC_APME));
5638                 break;
5639         default:
5640                 adapter->wol = 0;
5641                 break;
5642         }
5643         device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
5644
5645         /* pick up the PCI bus settings for reporting later */
5646         hw->mac.ops.get_bus_info(hw);
5647
5648         /* print bus type/speed/width info */
5649         dev_info(&pdev->dev, "(PCI Express:%s:%s) %pM\n",
5650                 ((hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0Gb/s":
5651                  (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5Gb/s":"Unknown"),
5652                 ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
5653                  (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
5654                  (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
5655                  "Unknown"),
5656                 netdev->dev_addr);
5657         ixgbe_read_pba_num_generic(hw, &part_num);
5658         if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
5659                 dev_info(&pdev->dev, "MAC: %d, PHY: %d, SFP+: %d, PBA No: %06x-%03x\n",
5660                          hw->mac.type, hw->phy.type, hw->phy.sfp_type,
5661                          (part_num >> 8), (part_num & 0xff));
5662         else
5663                 dev_info(&pdev->dev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
5664                          hw->mac.type, hw->phy.type,
5665                          (part_num >> 8), (part_num & 0xff));
5666
5667         if (hw->bus.width <= ixgbe_bus_width_pcie_x4) {
5668                 dev_warn(&pdev->dev, "PCI-Express bandwidth available for "
5669                          "this card is not sufficient for optimal "
5670                          "performance.\n");
5671                 dev_warn(&pdev->dev, "For optimal performance a x8 "
5672                          "PCI-Express slot is required.\n");
5673         }
5674
5675         /* save off EEPROM version number */
5676         hw->eeprom.ops.read(hw, 0x29, &adapter->eeprom_version);
5677
5678         /* reset the hardware with the new settings */
5679         err = hw->mac.ops.start_hw(hw);
5680
5681         if (err == IXGBE_ERR_EEPROM_VERSION) {
5682                 /* We are running on a pre-production device, log a warning */
5683                 dev_warn(&pdev->dev, "This device is a pre-production "
5684                          "adapter/LOM.  Please be aware there may be issues "
5685                          "associated with your hardware.  If you are "
5686                          "experiencing problems please contact your Intel or "
5687                          "hardware representative who provided you with this "
5688                          "hardware.\n");
5689         }
5690         strcpy(netdev->name, "eth%d");
5691         err = register_netdev(netdev);
5692         if (err)
5693                 goto err_register;
5694
5695         /* carrier off reporting is important to ethtool even BEFORE open */
5696         netif_carrier_off(netdev);
5697
5698         if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
5699             adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
5700                 INIT_WORK(&adapter->fdir_reinit_task, ixgbe_fdir_reinit_task);
5701
5702 #ifdef CONFIG_IXGBE_DCA
5703         if (dca_add_requester(&pdev->dev) == 0) {
5704                 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
5705                 ixgbe_setup_dca(adapter);
5706         }
5707 #endif
5708         /* add san mac addr to netdev */
5709         ixgbe_add_sanmac_netdev(netdev);
5710
5711         dev_info(&pdev->dev, "Intel(R) 10 Gigabit Network Connection\n");
5712         cards_found++;
5713         return 0;
5714
5715 err_register:
5716         ixgbe_release_hw_control(adapter);
5717         ixgbe_clear_interrupt_scheme(adapter);
5718 err_sw_init:
5719 err_eeprom:
5720         clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
5721         del_timer_sync(&adapter->sfp_timer);
5722         cancel_work_sync(&adapter->sfp_task);
5723         cancel_work_sync(&adapter->multispeed_fiber_task);
5724         cancel_work_sync(&adapter->sfp_config_module_task);
5725         iounmap(hw->hw_addr);
5726 err_ioremap:
5727         free_netdev(netdev);
5728 err_alloc_etherdev:
5729         pci_release_selected_regions(pdev, pci_select_bars(pdev,
5730                                      IORESOURCE_MEM));
5731 err_pci_reg:
5732 err_dma:
5733         pci_disable_device(pdev);
5734         return err;
5735 }
5736
5737 /**
5738  * ixgbe_remove - Device Removal Routine
5739  * @pdev: PCI device information struct
5740  *
5741  * ixgbe_remove is called by the PCI subsystem to alert the driver
5742  * that it should release a PCI device.  The could be caused by a
5743  * Hot-Plug event, or because the driver is going to be removed from
5744  * memory.
5745  **/
5746 static void __devexit ixgbe_remove(struct pci_dev *pdev)
5747 {
5748         struct net_device *netdev = pci_get_drvdata(pdev);
5749         struct ixgbe_adapter *adapter = netdev_priv(netdev);
5750         int err;
5751
5752         set_bit(__IXGBE_DOWN, &adapter->state);
5753         /* clear the module not found bit to make sure the worker won't
5754          * reschedule
5755          */
5756         clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
5757         del_timer_sync(&adapter->watchdog_timer);
5758
5759         del_timer_sync(&adapter->sfp_timer);
5760         cancel_work_sync(&adapter->watchdog_task);
5761         cancel_work_sync(&adapter->sfp_task);
5762         cancel_work_sync(&adapter->multispeed_fiber_task);
5763         cancel_work_sync(&adapter->sfp_config_module_task);
5764         if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
5765             adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
5766                 cancel_work_sync(&adapter->fdir_reinit_task);
5767         flush_scheduled_work();
5768
5769 #ifdef CONFIG_IXGBE_DCA
5770         if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
5771                 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
5772                 dca_remove_requester(&pdev->dev);
5773                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
5774         }
5775
5776 #endif
5777 #ifdef IXGBE_FCOE
5778         if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
5779                 ixgbe_cleanup_fcoe(adapter);
5780
5781 #endif /* IXGBE_FCOE */
5782
5783         /* remove the added san mac */
5784         ixgbe_del_sanmac_netdev(netdev);
5785
5786         if (netdev->reg_state == NETREG_REGISTERED)
5787                 unregister_netdev(netdev);
5788
5789         ixgbe_clear_interrupt_scheme(adapter);
5790
5791         ixgbe_release_hw_control(adapter);
5792
5793         iounmap(adapter->hw.hw_addr);
5794         pci_release_selected_regions(pdev, pci_select_bars(pdev,
5795                                      IORESOURCE_MEM));
5796
5797         DPRINTK(PROBE, INFO, "complete\n");
5798
5799         free_netdev(netdev);
5800
5801         err = pci_disable_pcie_error_reporting(pdev);
5802         if (err)
5803                 dev_err(&pdev->dev,
5804                         "pci_disable_pcie_error_reporting failed 0x%x\n", err);
5805
5806         pci_disable_device(pdev);
5807 }
5808
5809 /**
5810  * ixgbe_io_error_detected - called when PCI error is detected
5811  * @pdev: Pointer to PCI device
5812  * @state: The current pci connection state
5813  *
5814  * This function is called after a PCI bus error affecting
5815  * this device has been detected.
5816  */
5817 static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
5818                                                 pci_channel_state_t state)
5819 {
5820         struct net_device *netdev = pci_get_drvdata(pdev);
5821         struct ixgbe_adapter *adapter = netdev_priv(netdev);
5822
5823         netif_device_detach(netdev);
5824
5825         if (state == pci_channel_io_perm_failure)
5826                 return PCI_ERS_RESULT_DISCONNECT;
5827
5828         if (netif_running(netdev))
5829                 ixgbe_down(adapter);
5830         pci_disable_device(pdev);
5831
5832         /* Request a slot reset. */
5833         return PCI_ERS_RESULT_NEED_RESET;
5834 }
5835
5836 /**
5837  * ixgbe_io_slot_reset - called after the pci bus has been reset.
5838  * @pdev: Pointer to PCI device
5839  *
5840  * Restart the card from scratch, as if from a cold-boot.
5841  */
5842 static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
5843 {
5844         struct net_device *netdev = pci_get_drvdata(pdev);
5845         struct ixgbe_adapter *adapter = netdev_priv(netdev);
5846         pci_ers_result_t result;
5847         int err;
5848
5849         if (pci_enable_device_mem(pdev)) {
5850                 DPRINTK(PROBE, ERR,
5851                         "Cannot re-enable PCI device after reset.\n");
5852                 result = PCI_ERS_RESULT_DISCONNECT;
5853         } else {
5854                 pci_set_master(pdev);
5855                 pci_restore_state(pdev);
5856
5857                 pci_wake_from_d3(pdev, false);
5858
5859                 ixgbe_reset(adapter);
5860                 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
5861                 result = PCI_ERS_RESULT_RECOVERED;
5862         }
5863
5864         err = pci_cleanup_aer_uncorrect_error_status(pdev);
5865         if (err) {
5866                 dev_err(&pdev->dev,
5867                   "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", err);
5868                 /* non-fatal, continue */
5869         }
5870
5871         return result;
5872 }
5873
5874 /**
5875  * ixgbe_io_resume - called when traffic can start flowing again.
5876  * @pdev: Pointer to PCI device
5877  *
5878  * This callback is called when the error recovery driver tells us that
5879  * its OK to resume normal operation.
5880  */
5881 static void ixgbe_io_resume(struct pci_dev *pdev)
5882 {
5883         struct net_device *netdev = pci_get_drvdata(pdev);
5884         struct ixgbe_adapter *adapter = netdev_priv(netdev);
5885
5886         if (netif_running(netdev)) {
5887                 if (ixgbe_up(adapter)) {
5888                         DPRINTK(PROBE, INFO, "ixgbe_up failed after reset\n");
5889                         return;
5890                 }
5891         }
5892
5893         netif_device_attach(netdev);
5894 }
5895
5896 static struct pci_error_handlers ixgbe_err_handler = {
5897         .error_detected = ixgbe_io_error_detected,
5898         .slot_reset = ixgbe_io_slot_reset,
5899         .resume = ixgbe_io_resume,
5900 };
5901
5902 static struct pci_driver ixgbe_driver = {
5903         .name     = ixgbe_driver_name,
5904         .id_table = ixgbe_pci_tbl,
5905         .probe    = ixgbe_probe,
5906         .remove   = __devexit_p(ixgbe_remove),
5907 #ifdef CONFIG_PM
5908         .suspend  = ixgbe_suspend,
5909         .resume   = ixgbe_resume,
5910 #endif
5911         .shutdown = ixgbe_shutdown,
5912         .err_handler = &ixgbe_err_handler
5913 };
5914
5915 /**
5916  * ixgbe_init_module - Driver Registration Routine
5917  *
5918  * ixgbe_init_module is the first routine called when the driver is
5919  * loaded. All it does is register with the PCI subsystem.
5920  **/
5921 static int __init ixgbe_init_module(void)
5922 {
5923         int ret;
5924         printk(KERN_INFO "%s: %s - version %s\n", ixgbe_driver_name,
5925                ixgbe_driver_string, ixgbe_driver_version);
5926
5927         printk(KERN_INFO "%s: %s\n", ixgbe_driver_name, ixgbe_copyright);
5928
5929 #ifdef CONFIG_IXGBE_DCA
5930         dca_register_notify(&dca_notifier);
5931 #endif
5932
5933         ret = pci_register_driver(&ixgbe_driver);
5934         return ret;
5935 }
5936
5937 module_init(ixgbe_init_module);
5938
5939 /**
5940  * ixgbe_exit_module - Driver Exit Cleanup Routine
5941  *
5942  * ixgbe_exit_module is called just before the driver is removed
5943  * from memory.
5944  **/
5945 static void __exit ixgbe_exit_module(void)
5946 {
5947 #ifdef CONFIG_IXGBE_DCA
5948         dca_unregister_notify(&dca_notifier);
5949 #endif
5950         pci_unregister_driver(&ixgbe_driver);
5951 }
5952
5953 #ifdef CONFIG_IXGBE_DCA
5954 static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
5955                             void *p)
5956 {
5957         int ret_val;
5958
5959         ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event,
5960                                          __ixgbe_notify_dca);
5961
5962         return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
5963 }
5964
5965 #endif /* CONFIG_IXGBE_DCA */
5966 #ifdef DEBUG
5967 /**
5968  * ixgbe_get_hw_dev_name - return device name string
5969  * used by hardware layer to print debugging information
5970  **/
5971 char *ixgbe_get_hw_dev_name(struct ixgbe_hw *hw)
5972 {
5973         struct ixgbe_adapter *adapter = hw->back;
5974         return adapter->netdev->name;
5975 }
5976
5977 #endif
5978 module_exit(ixgbe_exit_module);
5979
5980 /* ixgbe_main.c */