]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - drivers/net/ixgbe/ixgbe_main.c
ixgbe: Enable jumbo frame for FCoE feature in 82599
[karo-tx-linux.git] / drivers / net / ixgbe / ixgbe_main.c
index 01a88265d40148474683b3bb875966513b44ac01..1b4af3f541b76d37e323d9d9a06d43ea84f5692e 100644 (file)
@@ -39,6 +39,7 @@
 #include <net/ip6_checksum.h>
 #include <linux/ethtool.h>
 #include <linux/if_vlan.h>
+#include <scsi/fc/fc_fcoe.h>
 
 #include "ixgbe.h"
 #include "ixgbe_common.h"
@@ -461,6 +462,7 @@ static int __ixgbe_notify_dca(struct device *dev, void *data)
  **/
 static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector,
                               struct sk_buff *skb, u8 status,
+                              struct ixgbe_ring *ring,
                               union ixgbe_adv_rx_desc *rx_desc)
 {
        struct ixgbe_adapter *adapter = q_vector->adapter;
@@ -468,7 +470,7 @@ static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector,
        bool is_vlan = (status & IXGBE_RXD_STAT_VP);
        u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
 
-       skb_record_rx_queue(skb, q_vector - &adapter->q_vector[0]);
+       skb_record_rx_queue(skb, ring->queue_index);
        if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
                if (adapter->vlgrp && is_vlan && (tag != 0))
                        vlan_gro_receive(napi, adapter->vlgrp, tag, skb);
@@ -782,7 +784,13 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                total_rx_packets++;
 
                skb->protocol = eth_type_trans(skb, adapter->netdev);
-               ixgbe_receive_skb(q_vector, skb, staterr, rx_desc);
+#ifdef IXGBE_FCOE
+               /* if ddp, not passing to ULD unless for FCP_RSP or error */
+               if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
+                       if (!ixgbe_fcoe_ddp(adapter, rx_desc, skb))
+                               goto next_desc;
+#endif /* IXGBE_FCOE */
+               ixgbe_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
 
 next_desc:
                rx_desc->wb.upper.status_error = 0;
@@ -835,7 +843,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
         * corresponding register.
         */
        for (v_idx = 0; v_idx < q_vectors; v_idx++) {
-               q_vector = &adapter->q_vector[v_idx];
+               q_vector = adapter->q_vector[v_idx];
                /* XXX for_each_bit(...) */
                r_idx = find_first_bit(q_vector->rxr_idx,
                                       adapter->num_rx_queues);
@@ -984,8 +992,7 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
        struct ixgbe_adapter *adapter = q_vector->adapter;
        u32 new_itr;
        u8 current_itr, ret_itr;
-       int i, r_idx, v_idx = ((void *)q_vector - (void *)(adapter->q_vector)) /
-                              sizeof(struct ixgbe_q_vector);
+       int i, r_idx, v_idx = q_vector->v_idx;
        struct ixgbe_ring *rx_ring, *tx_ring;
 
        r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
@@ -1303,19 +1310,21 @@ static int ixgbe_clean_rxonly_many(struct napi_struct *napi, int budget)
 static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
                                      int r_idx)
 {
-       a->q_vector[v_idx].adapter = a;
-       set_bit(r_idx, a->q_vector[v_idx].rxr_idx);
-       a->q_vector[v_idx].rxr_count++;
-       a->rx_ring[r_idx].v_idx = 1 << v_idx;
+       struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
+
+       set_bit(r_idx, q_vector->rxr_idx);
+       q_vector->rxr_count++;
+       a->rx_ring[r_idx].v_idx = (u64)1 << v_idx;
 }
 
 static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
-                                     int r_idx)
+                                     int t_idx)
 {
-       a->q_vector[v_idx].adapter = a;
-       set_bit(r_idx, a->q_vector[v_idx].txr_idx);
-       a->q_vector[v_idx].txr_count++;
-       a->tx_ring[r_idx].v_idx = 1 << v_idx;
+       struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
+
+       set_bit(t_idx, q_vector->txr_idx);
+       q_vector->txr_count++;
+       a->tx_ring[t_idx].v_idx = (u64)1 << v_idx;
 }
 
 /**
@@ -1411,7 +1420,7 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
                          (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \
                          &ixgbe_msix_clean_many)
        for (vector = 0; vector < q_vectors; vector++) {
-               handler = SET_HANDLER(&adapter->q_vector[vector]);
+               handler = SET_HANDLER(adapter->q_vector[vector]);
 
                if(handler == &ixgbe_msix_clean_rx) {
                        sprintf(adapter->name[vector], "%s-%s-%d",
@@ -1427,7 +1436,7 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
 
                err = request_irq(adapter->msix_entries[vector].vector,
                                  handler, 0, adapter->name[vector],
-                                 &(adapter->q_vector[vector]));
+                                 adapter->q_vector[vector]);
                if (err) {
                        DPRINTK(PROBE, ERR,
                                "request_irq failed for MSIX interrupt "
@@ -1450,7 +1459,7 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
 free_queue_irqs:
        for (i = vector - 1; i >= 0; i--)
                free_irq(adapter->msix_entries[--vector].vector,
-                        &(adapter->q_vector[i]));
+                        adapter->q_vector[i]);
        adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
        pci_disable_msix(adapter->pdev);
        kfree(adapter->msix_entries);
@@ -1461,7 +1470,7 @@ out:
 
 static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
 {
-       struct ixgbe_q_vector *q_vector = adapter->q_vector;
+       struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
        u8 current_itr;
        u32 new_itr = q_vector->eitr;
        struct ixgbe_ring *rx_ring = &adapter->rx_ring[0];
@@ -1539,6 +1548,7 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
        struct net_device *netdev = data;
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
+       struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
        u32 eicr;
 
        /*
@@ -1566,13 +1576,13 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
 
        ixgbe_check_fan_failure(adapter, eicr);
 
-       if (napi_schedule_prep(&adapter->q_vector[0].napi)) {
+       if (napi_schedule_prep(&(q_vector->napi))) {
                adapter->tx_ring[0].total_packets = 0;
                adapter->tx_ring[0].total_bytes = 0;
                adapter->rx_ring[0].total_packets = 0;
                adapter->rx_ring[0].total_bytes = 0;
                /* would disable interrupts here but EIAM disabled it */
-               __napi_schedule(&adapter->q_vector[0].napi);
+               __napi_schedule(&(q_vector->napi));
        }
 
        return IRQ_HANDLED;
@@ -1583,7 +1593,7 @@ static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter *adapter)
        int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
 
        for (i = 0; i < q_vectors; i++) {
-               struct ixgbe_q_vector *q_vector = &adapter->q_vector[i];
+               struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
                bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES);
                bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES);
                q_vector->rxr_count = 0;
@@ -1634,7 +1644,7 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
                i--;
                for (; i >= 0; i--) {
                        free_irq(adapter->msix_entries[i].vector,
-                                &(adapter->q_vector[i]));
+                                adapter->q_vector[i]);
                }
 
                ixgbe_reset_q_vectors(adapter);
@@ -1737,7 +1747,29 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, int index)
        unsigned long mask;
 
        if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
-               queue0 = index;
+               if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
+                       int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
+                       if (dcb_i == 8)
+                               queue0 = index >> 4;
+                       else if (dcb_i == 4)
+                               queue0 = index >> 5;
+                       else
+                               dev_err(&adapter->pdev->dev, "Invalid DCB "
+                                       "configuration\n");
+#ifdef IXGBE_FCOE
+                       if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
+                               struct ixgbe_ring_feature *f;
+
+                               rx_ring = &adapter->rx_ring[queue0];
+                               f = &adapter->ring_feature[RING_F_FCOE];
+                               if ((queue0 == 0) && (index > rx_ring->reg_idx))
+                                       queue0 = f->mask + index -
+                                                rx_ring->reg_idx - 1;
+                       }
+#endif /* IXGBE_FCOE */
+               } else {
+                       queue0 = index;
+               }
        } else {
                mask = (unsigned long) adapter->ring_feature[RING_F_RSS].mask;
                queue0 = index & mask;
@@ -1751,28 +1783,20 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, int index)
        srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
        srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
 
+       srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
+                 IXGBE_SRRCTL_BSIZEHDR_MASK;
+
        if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
-               u16 bufsz = IXGBE_RXBUFFER_2048;
-               /* grow the amount we can receive on large page machines */
-               if (bufsz < (PAGE_SIZE / 2))
-                       bufsz = (PAGE_SIZE / 2);
-               /* cap the bufsz at our largest descriptor size */
-               bufsz = min((u16)IXGBE_MAX_RXBUFFER, bufsz);
-
-               srrctl |= bufsz >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+#if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER
+               srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+#else
+               srrctl |= (PAGE_SIZE / 2) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+#endif
                srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
-               srrctl |= ((IXGBE_RX_HDR_SIZE <<
-                           IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
-                          IXGBE_SRRCTL_BSIZEHDR_MASK);
        } else {
+               srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >>
+                         IXGBE_SRRCTL_BSIZEPKT_SHIFT;
                srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
-
-               if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE)
-                       srrctl |= IXGBE_RXBUFFER_2048 >>
-                                 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
-               else
-                       srrctl |= rx_ring->rx_buf_len >>
-                                 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
        }
 
        IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl);
@@ -1804,6 +1828,11 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
        /* Decide whether to use packet split mode or not */
        adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
 
+#ifdef IXGBE_FCOE
+       if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
+               adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
+#endif /* IXGBE_FCOE */
+
        /* Set the RX buffer length according to the mode */
        if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
                rx_buf_len = IXGBE_RX_HDR_SIZE;
@@ -1812,7 +1841,8 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
                        u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
                                      IXGBE_PSRTYPE_UDPHDR |
                                      IXGBE_PSRTYPE_IPV4HDR |
-                                     IXGBE_PSRTYPE_IPV6HDR;
+                                     IXGBE_PSRTYPE_IPV6HDR |
+                                     IXGBE_PSRTYPE_L2HDR;
                        IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
                }
        } else {
@@ -1834,6 +1864,10 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
                hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
        else
                hlreg0 |= IXGBE_HLREG0_JUMBOEN;
+#ifdef IXGBE_FCOE
+       if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
+               hlreg0 |= IXGBE_HLREG0_JUMBOEN;
+#endif
        IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
 
        rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
@@ -1855,6 +1889,17 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
                adapter->rx_ring[i].tail = IXGBE_RDT(j);
                adapter->rx_ring[i].rx_buf_len = rx_buf_len;
 
+#ifdef IXGBE_FCOE
+               if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
+                       struct ixgbe_ring_feature *f;
+                       f = &adapter->ring_feature[RING_F_FCOE];
+                       if ((rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
+                           (i >= f->mask) && (i < f->mask + f->indices))
+                               adapter->rx_ring[i].rx_buf_len =
+                                       IXGBE_FCOE_JUMBO_FRAME_SIZE;
+               }
+
+#endif /* IXGBE_FCOE */
                ixgbe_configure_srrctl(adapter, j);
        }
 
@@ -2135,7 +2180,7 @@ static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
 
        for (q_idx = 0; q_idx < q_vectors; q_idx++) {
                struct napi_struct *napi;
-               q_vector = &adapter->q_vector[q_idx];
+               q_vector = adapter->q_vector[q_idx];
                if (!q_vector->rxr_count)
                        continue;
                napi = &q_vector->napi;
@@ -2158,7 +2203,7 @@ static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
                q_vectors = 1;
 
        for (q_idx = 0; q_idx < q_vectors; q_idx++) {
-               q_vector = &adapter->q_vector[q_idx];
+               q_vector = adapter->q_vector[q_idx];
                if (!q_vector->rxr_count)
                        continue;
                napi_disable(&q_vector->napi);
@@ -2234,6 +2279,11 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
        netif_set_gso_max_size(netdev, 65536);
 #endif
 
+#ifdef IXGBE_FCOE
+       if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
+               ixgbe_configure_fcoe(adapter);
+
+#endif /* IXGBE_FCOE */
        ixgbe_configure_tx(adapter);
        ixgbe_configure_rx(adapter);
        for (i = 0; i < adapter->num_rx_queues; i++)
@@ -2388,6 +2438,13 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
                IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
        }
 
+#ifdef IXGBE_FCOE
+       /* adjust max frame to be able to do baby jumbo for FCoE */
+       if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
+           (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE))
+               max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
+
+#endif /* IXGBE_FCOE */
        mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
        if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
                mhadd &= ~IXGBE_MHADD_MFS_MASK;
@@ -2450,6 +2507,17 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
 
        ixgbe_irq_enable(adapter);
 
+       /*
+        * If this adapter has a fan, check to see if we had a failure
+        * before we enabled the interrupt.
+        */
+       if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
+               u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+               if (esdp & IXGBE_ESDP_SDP1)
+                       DPRINTK(DRV, CRIT,
+                               "Fan has stopped, replace the adapter\n");
+       }
+
        /*
         * For hot-pluggable SFP+ devices, a new SFP+ module may have
         * arrived before interrupts were enabled.  We need to kick off
@@ -2498,8 +2566,6 @@ int ixgbe_up(struct ixgbe_adapter *adapter)
        /* hardware has been reset, we need to reload some things */
        ixgbe_configure(adapter);
 
-       ixgbe_napi_add_all(adapter);
-
        return ixgbe_up_complete(adapter);
 }
 
@@ -2809,6 +2875,47 @@ static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
        return ret;
 }
 
+#ifdef IXGBE_FCOE
+/**
+ * ixgbe_set_fcoe_queues: Allocate queues for Fiber Channel over Ethernet (FCoE)
+ * @adapter: board private structure to initialize
+ *
+ * FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges.
+ * The ring feature mask is not used as a mask for FCoE, as it can take any 8
+ * rx queues out of the max number of rx queues, instead, it is used as the
+ * index of the first rx queue used by FCoE.
+ *
+ **/
+static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
+{
+       bool ret = false;
+       struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
+
+       f->indices = min((int)num_online_cpus(), f->indices);
+       if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
+#ifdef CONFIG_IXGBE_DCB
+               if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
+                       DPRINTK(PROBE, INFO, "FCOE enabled with DCB \n");
+                       ixgbe_set_dcb_queues(adapter);
+               }
+#endif
+               if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
+                       DPRINTK(PROBE, INFO, "FCOE enabled with RSS \n");
+                       ixgbe_set_rss_queues(adapter);
+               }
+               /* adding FCoE rx rings to the end */
+               f->mask = adapter->num_rx_queues;
+               adapter->num_rx_queues += f->indices;
+               if (adapter->num_tx_queues == 0)
+                       adapter->num_tx_queues = f->indices;
+
+               ret = true;
+       }
+
+       return ret;
+}
+
+#endif /* IXGBE_FCOE */
 /*
  * ixgbe_set_num_queues: Allocate queues for device, feature dependant
  * @adapter: board private structure to initialize
@@ -2822,6 +2929,11 @@ static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
  **/
 static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
 {
+#ifdef IXGBE_FCOE
+       if (ixgbe_set_fcoe_queues(adapter))
+               goto done;
+
+#endif /* IXGBE_FCOE */
 #ifdef CONFIG_IXGBE_DCB
        if (ixgbe_set_dcb_queues(adapter))
                goto done;
@@ -2877,9 +2989,6 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
                adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
                kfree(adapter->msix_entries);
                adapter->msix_entries = NULL;
-               adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
-               adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
-               ixgbe_set_num_queues(adapter);
        } else {
                adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */
                /*
@@ -3000,6 +3109,39 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
 }
 #endif
 
+#ifdef IXGBE_FCOE
+/**
+ * ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE
+ * @adapter: board private structure to initialize
+ *
+ * Cache the descriptor ring offsets for FCoE mode to the assigned rings.
+ *
+ */
+static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
+{
+       int i, fcoe_i = 0;
+       bool ret = false;
+       struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
+
+       if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
+#ifdef CONFIG_IXGBE_DCB
+               if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
+                       ixgbe_cache_ring_dcb(adapter);
+                       fcoe_i = adapter->rx_ring[0].reg_idx + 1;
+               }
+#endif /* CONFIG_IXGBE_DCB */
+               if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
+                       ixgbe_cache_ring_rss(adapter);
+                       fcoe_i = f->mask;
+               }
+               for (i = 0; i < f->indices; i++, fcoe_i++)
+                       adapter->rx_ring[f->mask + i].reg_idx = fcoe_i;
+               ret = true;
+       }
+       return ret;
+}
+
+#endif /* IXGBE_FCOE */
 /**
  * ixgbe_cache_ring_register - Descriptor ring to register mapping
  * @adapter: board private structure to initialize
@@ -3017,6 +3159,11 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
        adapter->rx_ring[0].reg_idx = 0;
        adapter->tx_ring[0].reg_idx = 0;
 
+#ifdef IXGBE_FCOE
+       if (ixgbe_cache_ring_fcoe(adapter))
+               return;
+
+#endif /* IXGBE_FCOE */
 #ifdef CONFIG_IXGBE_DCB
        if (ixgbe_cache_ring_dcb(adapter))
                return;
@@ -3103,31 +3250,20 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
         * mean we disable MSI-X capabilities of the adapter. */
        adapter->msix_entries = kcalloc(v_budget,
                                        sizeof(struct msix_entry), GFP_KERNEL);
-       if (!adapter->msix_entries) {
-               adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
-               adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
-               ixgbe_set_num_queues(adapter);
-               kfree(adapter->tx_ring);
-               kfree(adapter->rx_ring);
-               err = ixgbe_alloc_queues(adapter);
-               if (err) {
-                       DPRINTK(PROBE, ERR, "Unable to allocate memory "
-                               "for queues\n");
-                       goto out;
-               }
+       if (adapter->msix_entries) {
+               for (vector = 0; vector < v_budget; vector++)
+                       adapter->msix_entries[vector].entry = vector;
 
-               goto try_msi;
-       }
-
-       for (vector = 0; vector < v_budget; vector++)
-               adapter->msix_entries[vector].entry = vector;
+               ixgbe_acquire_msix_vectors(adapter, v_budget);
 
-       ixgbe_acquire_msix_vectors(adapter, v_budget);
+               if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
+                       goto out;
+       }
 
-       if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
-               goto out;
+       adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
+       adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
+       ixgbe_set_num_queues(adapter);
 
-try_msi:
        err = pci_enable_msi(adapter->pdev);
        if (!err) {
                adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
@@ -3142,6 +3278,87 @@ out:
        return err;
 }
 
+/**
+ * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * We allocate one q_vector per queue interrupt.  If allocation fails we
+ * return -ENOMEM.
+ **/
+static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
+{
+       int q_idx, num_q_vectors;
+       struct ixgbe_q_vector *q_vector;
+       int napi_vectors;
+       int (*poll)(struct napi_struct *, int);
+
+       if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
+               num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+               napi_vectors = adapter->num_rx_queues;
+               poll = &ixgbe_clean_rxonly;
+       } else {
+               num_q_vectors = 1;
+               napi_vectors = 1;
+               poll = &ixgbe_poll;
+       }
+
+       for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
+               q_vector = kzalloc(sizeof(struct ixgbe_q_vector), GFP_KERNEL);
+               if (!q_vector)
+                       goto err_out;
+               q_vector->adapter = adapter;
+               q_vector->v_idx = q_idx;
+               q_vector->eitr = adapter->eitr_param;
+               if (q_idx < napi_vectors)
+                       netif_napi_add(adapter->netdev, &q_vector->napi,
+                                      (*poll), 64);
+               adapter->q_vector[q_idx] = q_vector;
+       }
+
+       return 0;
+
+err_out:
+       while (q_idx) {
+               q_idx--;
+               q_vector = adapter->q_vector[q_idx];
+               netif_napi_del(&q_vector->napi);
+               kfree(q_vector);
+               adapter->q_vector[q_idx] = NULL;
+       }
+       return -ENOMEM;
+}
+
+/**
+ * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * This function frees the memory allocated to the q_vectors.  In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ **/
+static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
+{
+       int q_idx, num_q_vectors;
+       int napi_vectors;
+
+       if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
+               num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+               napi_vectors = adapter->num_rx_queues;
+       } else {
+               num_q_vectors = 1;
+               napi_vectors = 1;
+       }
+
+       for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
+               struct ixgbe_q_vector *q_vector = adapter->q_vector[q_idx];
+
+               adapter->q_vector[q_idx] = NULL;
+               if (q_idx < napi_vectors)
+                       netif_napi_del(&q_vector->napi);
+               kfree(q_vector);
+       }
+}
+
 void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
 {
        if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
@@ -3173,18 +3390,25 @@ int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
        /* Number of supported queues */
        ixgbe_set_num_queues(adapter);
 
-       err = ixgbe_alloc_queues(adapter);
-       if (err) {
-               DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
-               goto err_alloc_queues;
-       }
-
        err = ixgbe_set_interrupt_capability(adapter);
        if (err) {
                DPRINTK(PROBE, ERR, "Unable to setup interrupt capabilities\n");
                goto err_set_interrupt;
        }
 
+       err = ixgbe_alloc_q_vectors(adapter);
+       if (err) {
+               DPRINTK(PROBE, ERR, "Unable to allocate memory for queue "
+                       "vectors\n");
+               goto err_alloc_q_vectors;
+       }
+
+       err = ixgbe_alloc_queues(adapter);
+       if (err) {
+               DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
+               goto err_alloc_queues;
+       }
+
        DPRINTK(DRV, INFO, "Multiqueue %s: Rx Queue count = %u, "
                "Tx Queue count = %u\n",
                (adapter->num_rx_queues > 1) ? "Enabled" :
@@ -3194,11 +3418,30 @@ int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
 
        return 0;
 
+err_alloc_queues:
+       ixgbe_free_q_vectors(adapter);
+err_alloc_q_vectors:
+       ixgbe_reset_interrupt_capability(adapter);
 err_set_interrupt:
+       return err;
+}
+
+/**
+ * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
+ * @adapter: board private structure to clear interrupt scheme on
+ *
+ * We go through and clear interrupt specific resources and reset the structure
+ * to pre-load conditions
+ **/
+void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
+{
        kfree(adapter->tx_ring);
        kfree(adapter->rx_ring);
-err_alloc_queues:
-       return err;
+       adapter->tx_ring = NULL;
+       adapter->rx_ring = NULL;
+
+       ixgbe_free_q_vectors(adapter);
+       ixgbe_reset_interrupt_capability(adapter);
 }
 
 /**
@@ -3284,12 +3527,18 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
        adapter->ring_feature[RING_F_RSS].indices = rss;
        adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
        adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES;
-       if (hw->mac.type == ixgbe_mac_82598EB)
+       if (hw->mac.type == ixgbe_mac_82598EB) {
+               if (hw->device_id == IXGBE_DEV_ID_82598AT)
+                       adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
                adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598;
-       else if (hw->mac.type == ixgbe_mac_82599EB) {
+       else if (hw->mac.type == ixgbe_mac_82599EB) {
                adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599;
                adapter->flags |= IXGBE_FLAG_RSC_CAPABLE;
                adapter->flags |= IXGBE_FLAG_RSC_ENABLED;
+#ifdef IXGBE_FCOE
+               adapter->flags |= IXGBE_FLAG_FCOE_ENABLED;
+               adapter->ring_feature[RING_F_FCOE].indices = IXGBE_FCRETA_SIZE;
+#endif /* IXGBE_FCOE */
        }
 
 #ifdef CONFIG_IXGBE_DCB
@@ -3619,8 +3868,6 @@ static int ixgbe_open(struct net_device *netdev)
 
        ixgbe_configure(adapter);
 
-       ixgbe_napi_add_all(adapter);
-
        err = ixgbe_request_irq(adapter);
        if (err)
                goto err_req_irq;
@@ -3672,55 +3919,6 @@ static int ixgbe_close(struct net_device *netdev)
        return 0;
 }
 
-/**
- * ixgbe_napi_add_all - prep napi structs for use
- * @adapter: private struct
- *
- * helper function to napi_add each possible q_vector->napi
- */
-void ixgbe_napi_add_all(struct ixgbe_adapter *adapter)
-{
-       int q_idx, q_vectors;
-       struct net_device *netdev = adapter->netdev;
-       int (*poll)(struct napi_struct *, int);
-
-       /* check if we already have our netdev->napi_list populated */
-       if (&netdev->napi_list != netdev->napi_list.next)
-               return;
-
-       if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
-               poll = &ixgbe_clean_rxonly;
-               /* Only enable as many vectors as we have rx queues. */
-               q_vectors = adapter->num_rx_queues;
-       } else {
-               poll = &ixgbe_poll;
-               /* only one q_vector for legacy modes */
-               q_vectors = 1;
-       }
-
-       for (q_idx = 0; q_idx < q_vectors; q_idx++) {
-               struct ixgbe_q_vector *q_vector = &adapter->q_vector[q_idx];
-               netif_napi_add(adapter->netdev, &q_vector->napi, (*poll), 64);
-       }
-}
-
-void ixgbe_napi_del_all(struct ixgbe_adapter *adapter)
-{
-       int q_idx;
-       int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
-
-       /* legacy and MSI only use one vector */
-       if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
-               q_vectors = 1;
-
-       for (q_idx = 0; q_idx < q_vectors; q_idx++) {
-               struct ixgbe_q_vector *q_vector = &adapter->q_vector[q_idx];
-               if (!q_vector->rxr_count)
-                       continue;
-               netif_napi_del(&q_vector->napi);
-       }
-}
-
 #ifdef CONFIG_PM
 static int ixgbe_resume(struct pci_dev *pdev)
 {
@@ -3730,7 +3928,8 @@ static int ixgbe_resume(struct pci_dev *pdev)
 
        pci_set_power_state(pdev, PCI_D0);
        pci_restore_state(pdev);
-       err = pci_enable_device(pdev);
+
+       err = pci_enable_device_mem(pdev);
        if (err) {
                printk(KERN_ERR "ixgbe: Cannot enable PCI device from "
                                "suspend\n");
@@ -3749,6 +3948,8 @@ static int ixgbe_resume(struct pci_dev *pdev)
 
        ixgbe_reset(adapter);
 
+       IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
+
        if (netif_running(netdev)) {
                err = ixgbe_open(adapter->netdev);
                if (err)
@@ -3780,11 +3981,7 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
                ixgbe_free_all_tx_resources(adapter);
                ixgbe_free_all_rx_resources(adapter);
        }
-       ixgbe_reset_interrupt_capability(adapter);
-       ixgbe_napi_del_all(adapter);
-       INIT_LIST_HEAD(&netdev->napi_list);
-       kfree(adapter->tx_ring);
-       kfree(adapter->rx_ring);
+       ixgbe_clear_interrupt_scheme(adapter);
 
 #ifdef CONFIG_PM
        retval = pci_save_state(pdev);
@@ -3923,6 +4120,14 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
                IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
                adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
                adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
+#ifdef IXGBE_FCOE
+               adapter->stats.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
+               adapter->stats.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
+               adapter->stats.fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
+               adapter->stats.fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
+               adapter->stats.fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
+               adapter->stats.fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
+#endif /* IXGBE_FCOE */
        } else {
                adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
                adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
@@ -3998,7 +4203,7 @@ static void ixgbe_watchdog(unsigned long data)
                int i;
 
                for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++)
-                       eics |= (1 << i);
+                       eics |= ((u64)1 << i);
 
                /* Cause software interrupt to ensure rx rings are cleaned */
                switch (hw->mac.type) {
@@ -4357,10 +4562,12 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
 
 static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
                         struct ixgbe_ring *tx_ring,
-                        struct sk_buff *skb, unsigned int first)
+                        struct sk_buff *skb, u32 tx_flags,
+                        unsigned int first)
 {
        struct ixgbe_tx_buffer *tx_buffer_info;
-       unsigned int len = skb_headlen(skb);
+       unsigned int len;
+       unsigned int total = skb->len;
        unsigned int offset = 0, size, count = 0, i;
        unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
        unsigned int f;
@@ -4375,6 +4582,11 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
 
        map = skb_shinfo(skb)->dma_maps;
 
+       if (tx_flags & IXGBE_TX_FLAGS_FCOE)
+               /* excluding fcoe_crc_eof for FCoE */
+               total -= sizeof(struct fcoe_crc_eof);
+
+       len = min(skb_headlen(skb), total);
        while (len) {
                tx_buffer_info = &tx_ring->tx_buffer_info[i];
                size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
@@ -4385,6 +4597,7 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
                tx_buffer_info->next_to_watch = i;
 
                len -= size;
+               total -= size;
                offset += size;
                count++;
 
@@ -4399,7 +4612,7 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
                struct skb_frag_struct *frag;
 
                frag = &skb_shinfo(skb)->frags[f];
-               len = frag->size;
+               len = min((unsigned int)frag->size, total);
                offset = 0;
 
                while (len) {
@@ -4416,9 +4629,12 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
                        tx_buffer_info->next_to_watch = i;
 
                        len -= size;
+                       total -= size;
                        offset += size;
                        count++;
                }
+               if (total == 0)
+                       break;
        }
 
        tx_ring->tx_buffer_info[i].skb = skb;
@@ -4460,6 +4676,13 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
                olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
                                 IXGBE_ADVTXD_POPTS_SHIFT;
 
+       if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
+               olinfo_status |= IXGBE_ADVTXD_CC;
+               olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
+               if (tx_flags & IXGBE_TX_FLAGS_FSO)
+                       cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
+       }
+
        olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
 
        i = tx_ring->next_to_use;
@@ -4556,10 +4779,16 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
                tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
                tx_flags |= IXGBE_TX_FLAGS_VLAN;
        }
-       /* three things can cause us to need a context descriptor */
+
+       if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
+           (skb->protocol == htons(ETH_P_FCOE)))
+               tx_flags |= IXGBE_TX_FLAGS_FCOE;
+
+       /* four things can cause us to need a context descriptor */
        if (skb_is_gso(skb) ||
            (skb->ip_summed == CHECKSUM_PARTIAL) ||
-           (tx_flags & IXGBE_TX_FLAGS_VLAN))
+           (tx_flags & IXGBE_TX_FLAGS_VLAN) ||
+           (tx_flags & IXGBE_TX_FLAGS_FCOE))
                count++;
 
        count += TXD_USE_COUNT(skb_headlen(skb));
@@ -4571,23 +4800,35 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
                return NETDEV_TX_BUSY;
        }
 
-       if (skb->protocol == htons(ETH_P_IP))
-               tx_flags |= IXGBE_TX_FLAGS_IPV4;
        first = tx_ring->next_to_use;
-       tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len);
-       if (tso < 0) {
-               dev_kfree_skb_any(skb);
-               return NETDEV_TX_OK;
-       }
-
-       if (tso)
-               tx_flags |= IXGBE_TX_FLAGS_TSO;
-       else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) &&
-                (skb->ip_summed == CHECKSUM_PARTIAL))
-               tx_flags |= IXGBE_TX_FLAGS_CSUM;
+       if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
+#ifdef IXGBE_FCOE
+               /* setup tx offload for FCoE */
+               tso = ixgbe_fso(adapter, tx_ring, skb, tx_flags, &hdr_len);
+               if (tso < 0) {
+                       dev_kfree_skb_any(skb);
+                       return NETDEV_TX_OK;
+               }
+               if (tso)
+                       tx_flags |= IXGBE_TX_FLAGS_FSO;
+#endif /* IXGBE_FCOE */
+       } else {
+               if (skb->protocol == htons(ETH_P_IP))
+                       tx_flags |= IXGBE_TX_FLAGS_IPV4;
+               tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len);
+               if (tso < 0) {
+                       dev_kfree_skb_any(skb);
+                       return NETDEV_TX_OK;
+               }
 
-       count = ixgbe_tx_map(adapter, tx_ring, skb, first);
+               if (tso)
+                       tx_flags |= IXGBE_TX_FLAGS_TSO;
+               else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) &&
+                        (skb->ip_summed == CHECKSUM_PARTIAL))
+                       tx_flags |= IXGBE_TX_FLAGS_CSUM;
+       }
 
+       count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first);
        if (count) {
                ixgbe_tx_queue(adapter, tx_ring, tx_flags, count, skb->len,
                               hdr_len);
@@ -4642,6 +4883,82 @@ static int ixgbe_set_mac(struct net_device *netdev, void *p)
        return 0;
 }
 
+static int
+ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr)
+{
+       struct ixgbe_adapter *adapter = netdev_priv(netdev);
+       struct ixgbe_hw *hw = &adapter->hw;
+       u16 value;
+       int rc;
+
+       if (prtad != hw->phy.mdio.prtad)
+               return -EINVAL;
+       rc = hw->phy.ops.read_reg(hw, addr, devad, &value);
+       if (!rc)
+               rc = value;
+       return rc;
+}
+
+static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad,
+                           u16 addr, u16 value)
+{
+       struct ixgbe_adapter *adapter = netdev_priv(netdev);
+       struct ixgbe_hw *hw = &adapter->hw;
+
+       if (prtad != hw->phy.mdio.prtad)
+               return -EINVAL;
+       return hw->phy.ops.write_reg(hw, addr, devad, value);
+}
+
+static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
+{
+       struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+       return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
+}
+
+/**
+ * ixgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding
+ * netdev->dev_addr_list
+ * @netdev: network interface device structure
+ *
+ * Returns non-zero on failure
+ **/
+static int ixgbe_add_sanmac_netdev(struct net_device *dev)
+{
+       int err = 0;
+       struct ixgbe_adapter *adapter = netdev_priv(dev);
+       struct ixgbe_mac_info *mac = &adapter->hw.mac;
+
+       if (is_valid_ether_addr(mac->san_addr)) {
+               rtnl_lock();
+               err = dev_addr_add(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
+               rtnl_unlock();
+       }
+       return err;
+}
+
+/**
+ * ixgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding
+ * netdev->dev_addr_list
+ * @netdev: network interface device structure
+ *
+ * Returns non-zero on failure
+ **/
+static int ixgbe_del_sanmac_netdev(struct net_device *dev)
+{
+       int err = 0;
+       struct ixgbe_adapter *adapter = netdev_priv(dev);
+       struct ixgbe_mac_info *mac = &adapter->hw.mac;
+
+       if (is_valid_ether_addr(mac->san_addr)) {
+               rtnl_lock();
+               err = dev_addr_del(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
+               rtnl_unlock();
+       }
+       return err;
+}
+
 #ifdef CONFIG_NET_POLL_CONTROLLER
 /*
  * Polling 'interrupt' - used by things like netconsole to send skbs
@@ -4675,9 +4992,14 @@ static const struct net_device_ops ixgbe_netdev_ops = {
        .ndo_vlan_rx_register   = ixgbe_vlan_rx_register,
        .ndo_vlan_rx_add_vid    = ixgbe_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid   = ixgbe_vlan_rx_kill_vid,
+       .ndo_do_ioctl           = ixgbe_ioctl,
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = ixgbe_netpoll,
 #endif
+#ifdef IXGBE_FCOE
+       .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
+       .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
+#endif /* IXGBE_FCOE */
 };
 
 /**
@@ -4700,10 +5022,12 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
        const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
        static int cards_found;
        int i, err, pci_using_dac;
-       u16 pm_value = 0;
+#ifdef IXGBE_FCOE
+       u16 device_caps;
+#endif
        u32 part_num, eec;
 
-       err = pci_enable_device(pdev);
+       err = pci_enable_device_mem(pdev);
        if (err)
                return err;
 
@@ -4723,9 +5047,11 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
                pci_using_dac = 0;
        }
 
-       err = pci_request_regions(pdev, ixgbe_driver_name);
+       err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
+                                          IORESOURCE_MEM), ixgbe_driver_name);
        if (err) {
-               dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
+               dev_err(&pdev->dev,
+                       "pci_request_selected_regions failed 0x%x\n", err);
                goto err_pci_reg;
        }
 
@@ -4789,6 +5115,13 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
        /* PHY */
        memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops));
        hw->phy.sfp_type = ixgbe_sfp_type_unknown;
+       /* ixgbe_identify_phy_generic will set prtad and mmds properly */
+       hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
+       hw->phy.mdio.mmds = 0;
+       hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
+       hw->phy.mdio.dev = netdev;
+       hw->phy.mdio.mdio_read = ixgbe_mdio_read;
+       hw->phy.mdio.mdio_write = ixgbe_mdio_write;
 
        /* set up this timer and work struct before calling get_invariants
         * which might start the timer
@@ -4826,6 +5159,17 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
        if (err)
                goto err_sw_init;
 
+       /*
+        * If there is a fan on this device and it has failed log the
+        * failure.
+        */
+       if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) {
+               u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+               if (esdp & IXGBE_ESDP_SDP1)
+                       DPRINTK(PROBE, CRIT,
+                               "Fan has stopped, replace the adapter\n");
+       }
+
        /* reset_hw fills in the perm_addr as well */
        err = hw->mac.ops.reset_hw(hw);
        if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
@@ -4863,6 +5207,20 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
        netdev->dcbnl_ops = &dcbnl_ops;
 #endif
 
+#ifdef IXGBE_FCOE
+       if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
+               if (hw->mac.ops.get_device_caps) {
+                       hw->mac.ops.get_device_caps(hw, &device_caps);
+                       if (!(device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)) {
+                               netdev->features |= NETIF_F_FCOE_CRC;
+                               netdev->features |= NETIF_F_FSO;
+                               netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1;
+                       } else {
+                               adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
+                       }
+               }
+       }
+#endif /* IXGBE_FCOE */
        if (pci_using_dac)
                netdev->features |= NETIF_F_HIGHDMA;
 
@@ -4898,11 +5256,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
 
        switch (pdev->device) {
        case IXGBE_DEV_ID_82599_KX4:
-#define IXGBE_PCIE_PMCSR 0x44
-               adapter->wol = IXGBE_WUFC_MAG;
-               pci_read_config_word(pdev, IXGBE_PCIE_PMCSR, &pm_value);
-               pci_write_config_word(pdev, IXGBE_PCIE_PMCSR,
-                                     (pm_value | (1 << 8)));
+               adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
+                               IXGBE_WUFC_MC | IXGBE_WUFC_BC);
                break;
        default:
                adapter->wol = 0;
@@ -4964,6 +5319,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
                ixgbe_setup_dca(adapter);
        }
 #endif
+       /* add san mac addr to netdev */
+       ixgbe_add_sanmac_netdev(netdev);
 
        dev_info(&pdev->dev, "Intel(R) 10 Gigabit Network Connection\n");
        cards_found++;
@@ -4972,8 +5329,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
 err_register:
        ixgbe_release_hw_control(adapter);
 err_hw_init:
+       ixgbe_clear_interrupt_scheme(adapter);
 err_sw_init:
-       ixgbe_reset_interrupt_capability(adapter);
 err_eeprom:
        clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
        del_timer_sync(&adapter->sfp_timer);
@@ -4984,7 +5341,8 @@ err_eeprom:
 err_ioremap:
        free_netdev(netdev);
 err_alloc_etherdev:
-       pci_release_regions(pdev);
+       pci_release_selected_regions(pdev, pci_select_bars(pdev,
+                                    IORESOURCE_MEM));
 err_pci_reg:
 err_dma:
        pci_disable_device(pdev);
@@ -5028,19 +5386,27 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
        }
 
 #endif
+#ifdef IXGBE_FCOE
+       if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
+               ixgbe_cleanup_fcoe(adapter);
+
+#endif /* IXGBE_FCOE */
+
+       /* remove the added san mac */
+       ixgbe_del_sanmac_netdev(netdev);
+
        if (netdev->reg_state == NETREG_REGISTERED)
                unregister_netdev(netdev);
 
-       ixgbe_reset_interrupt_capability(adapter);
+       ixgbe_clear_interrupt_scheme(adapter);
 
        ixgbe_release_hw_control(adapter);
 
        iounmap(adapter->hw.hw_addr);
-       pci_release_regions(pdev);
+       pci_release_selected_regions(pdev, pci_select_bars(pdev,
+                                    IORESOURCE_MEM));
 
        DPRINTK(PROBE, INFO, "complete\n");
-       kfree(adapter->tx_ring);
-       kfree(adapter->rx_ring);
 
        free_netdev(netdev);
 
@@ -5068,6 +5434,9 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
 
        netif_device_detach(netdev);
 
+       if (state == pci_channel_io_perm_failure)
+               return PCI_ERS_RESULT_DISCONNECT;
+
        if (netif_running(netdev))
                ixgbe_down(adapter);
        pci_disable_device(pdev);
@@ -5089,7 +5458,7 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
        pci_ers_result_t result;
        int err;
 
-       if (pci_enable_device(pdev)) {
+       if (pci_enable_device_mem(pdev)) {
                DPRINTK(PROBE, ERR,
                        "Cannot re-enable PCI device after reset.\n");
                result = PCI_ERS_RESULT_DISCONNECT;