]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
ixgbe: Use length to determine if descriptor is done
[karo-tx-linux.git] / drivers / net / ethernet / intel / ixgbe / ixgbe_main.c
index 1e2f39ebd82495495c872fc310c0aec637f3c5fd..6fc26cdacba2557646dfc0d0a3e74bcf5758944b 100644 (file)
@@ -72,7 +72,7 @@ char ixgbe_default_device_descr[] =
 static char ixgbe_default_device_descr[] =
                              "Intel(R) 10 Gigabit Network Connection";
 #endif
-#define DRV_VERSION "4.4.0-k"
+#define DRV_VERSION "5.0.0-k"
 const char ixgbe_driver_version[] = DRV_VERSION;
 static const char ixgbe_copyright[] =
                                "Copyright (c) 1999-2016 Intel Corporation.";
@@ -86,6 +86,7 @@ static const struct ixgbe_info *ixgbe_info_tbl[] = {
        [board_X550]            = &ixgbe_X550_info,
        [board_X550EM_x]        = &ixgbe_X550EM_x_info,
        [board_x550em_a]        = &ixgbe_x550em_a_info,
+       [board_x550em_a_fw]     = &ixgbe_x550em_a_fw_info,
 };
 
 /* ixgbe_pci_tbl - PCI Device ID Table
@@ -140,6 +141,8 @@ static const struct pci_device_id ixgbe_pci_tbl[] = {
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII_L), board_x550em_a },
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_10G_T), board_x550em_a},
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP), board_x550em_a },
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T), board_x550em_a_fw },
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T_L), board_x550em_a_fw },
        /* required last entry */
        {0, }
 };
@@ -180,6 +183,7 @@ MODULE_VERSION(DRV_VERSION);
 static struct workqueue_struct *ixgbe_wq;
 
 static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev);
+static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *);
 
 static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter,
                                          u32 reg, u16 *value)
@@ -607,12 +611,11 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
        if (netdev) {
                dev_info(&adapter->pdev->dev, "Net device Info\n");
                pr_info("Device Name     state            "
-                       "trans_start      last_rx\n");
-               pr_info("%-15s %016lX %016lX %016lX\n",
+                       "trans_start\n");
+               pr_info("%-15s %016lX %016lX\n",
                        netdev->name,
                        netdev->state,
-                       dev_trans_start(netdev),
-                       netdev->last_rx);
+                       dev_trans_start(netdev));
        }
 
        /* Print Registers */
@@ -1567,8 +1570,10 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
        }
 
        /* map page for use */
-       dma = dma_map_page(rx_ring->dev, page, 0,
-                          ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
+       dma = dma_map_page_attrs(rx_ring->dev, page, 0,
+                                ixgbe_rx_pg_size(rx_ring),
+                                DMA_FROM_DEVICE,
+                                IXGBE_RX_DMA_ATTR);
 
        /*
         * if mapping failed free memory back to system since
@@ -1584,6 +1589,7 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
        bi->dma = dma;
        bi->page = page;
        bi->page_offset = 0;
+       bi->pagecnt_bias = 1;
 
        return true;
 }
@@ -1598,6 +1604,7 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
        union ixgbe_adv_rx_desc *rx_desc;
        struct ixgbe_rx_buffer *bi;
        u16 i = rx_ring->next_to_use;
+       u16 bufsz;
 
        /* nothing to do */
        if (!cleaned_count)
@@ -1607,10 +1614,17 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
        bi = &rx_ring->rx_buffer_info[i];
        i -= rx_ring->count;
 
+       bufsz = ixgbe_rx_bufsz(rx_ring);
+
        do {
                if (!ixgbe_alloc_mapped_page(rx_ring, bi))
                        break;
 
+               /* sync the buffer for use by the device */
+               dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
+                                                bi->page_offset, bufsz,
+                                                DMA_FROM_DEVICE);
+
                /*
                 * Refresh the desc even if buffer_addrs didn't change
                 * because each write-back erases this info.
@@ -1626,8 +1640,8 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
                        i -= rx_ring->count;
                }
 
-               /* clear the status bits for the next_to_use descriptor */
-               rx_desc->wb.upper.status_error = 0;
+               /* clear the length for the next_to_use descriptor */
+               rx_desc->wb.upper.length = 0;
 
                cleaned_count--;
        } while (cleaned_count);
@@ -1717,11 +1731,7 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
 static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
                         struct sk_buff *skb)
 {
-       skb_mark_napi_id(skb, &q_vector->napi);
-       if (ixgbe_qv_busy_polling(q_vector))
-               netif_receive_skb(skb);
-       else
-               napi_gro_receive(&q_vector->napi, skb);
+       napi_gro_receive(&q_vector->napi, skb);
 }
 
 /**
@@ -1833,8 +1843,10 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
 {
        /* if the page was released unmap it, else just sync our portion */
        if (unlikely(IXGBE_CB(skb)->page_released)) {
-               dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma,
-                              ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
+               dma_unmap_page_attrs(rx_ring->dev, IXGBE_CB(skb)->dma,
+                                    ixgbe_rx_pg_size(rx_ring),
+                                    DMA_FROM_DEVICE,
+                                    IXGBE_RX_DMA_ATTR);
                IXGBE_CB(skb)->page_released = false;
        } else {
                struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
@@ -1842,7 +1854,7 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
                dma_sync_single_range_for_cpu(rx_ring->dev,
                                              IXGBE_CB(skb)->dma,
                                              frag->page_offset,
-                                             ixgbe_rx_bufsz(rx_ring),
+                                             skb_frag_size(frag),
                                              DMA_FROM_DEVICE);
        }
        IXGBE_CB(skb)->dma = 0;
@@ -1918,12 +1930,6 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
 
        /* transfer page from old buffer to new buffer */
        *new_buff = *old_buff;
-
-       /* sync the buffer for use by the device */
-       dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma,
-                                        new_buff->page_offset,
-                                        ixgbe_rx_bufsz(rx_ring),
-                                        DMA_FROM_DEVICE);
 }
 
 static inline bool ixgbe_page_is_reserved(struct page *page)
@@ -1931,6 +1937,48 @@ static inline bool ixgbe_page_is_reserved(struct page *page)
        return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
 }
 
+static bool ixgbe_can_reuse_rx_page(struct ixgbe_ring *rx_ring,
+                                   struct ixgbe_rx_buffer *rx_buffer,
+                                   struct page *page,
+                                   const unsigned int truesize)
+{
+#if (PAGE_SIZE >= 8192)
+       unsigned int last_offset = ixgbe_rx_pg_size(rx_ring) -
+                                  ixgbe_rx_bufsz(rx_ring);
+#endif
+       unsigned int pagecnt_bias = rx_buffer->pagecnt_bias--;
+
+       /* avoid re-using remote pages */
+       if (unlikely(ixgbe_page_is_reserved(page)))
+               return false;
+
+#if (PAGE_SIZE < 8192)
+       /* if we are only owner of page we can reuse it */
+       if (unlikely(page_count(page) != pagecnt_bias))
+               return false;
+
+       /* flip page offset to other buffer */
+       rx_buffer->page_offset ^= truesize;
+#else
+       /* move offset up to the next cache line */
+       rx_buffer->page_offset += truesize;
+
+       if (rx_buffer->page_offset > last_offset)
+               return false;
+#endif
+
+       /* If we have drained the page fragment pool we need to update
+        * the pagecnt_bias and page count so that we fully restock the
+        * number of references the driver holds.
+        */
+       if (unlikely(pagecnt_bias == 1)) {
+               page_ref_add(page, USHRT_MAX);
+               rx_buffer->pagecnt_bias = USHRT_MAX;
+       }
+
+       return true;
+}
+
 /**
  * ixgbe_add_rx_frag - Add contents of Rx buffer to sk_buff
  * @rx_ring: rx descriptor ring to transact packets on
@@ -1948,22 +1996,21 @@ static inline bool ixgbe_page_is_reserved(struct page *page)
  **/
 static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
                              struct ixgbe_rx_buffer *rx_buffer,
-                             union ixgbe_adv_rx_desc *rx_desc,
+                             unsigned int size,
                              struct sk_buff *skb)
 {
        struct page *page = rx_buffer->page;
-       unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
+       unsigned char *va = page_address(page) + rx_buffer->page_offset;
 #if (PAGE_SIZE < 8192)
-       unsigned int truesize = ixgbe_rx_bufsz(rx_ring);
+       unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
 #else
-       unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
-       unsigned int last_offset = ixgbe_rx_pg_size(rx_ring) -
-                                  ixgbe_rx_bufsz(rx_ring);
+       unsigned int truesize = SKB_DATA_ALIGN(size);
 #endif
 
-       if ((size <= IXGBE_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) {
-               unsigned char *va = page_address(page) + rx_buffer->page_offset;
+       if (unlikely(skb_is_nonlinear(skb)))
+               goto add_tail_frag;
 
+       if (size <= IXGBE_RX_HDR_SIZE) {
                memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
 
                /* page is not reserved, we can reuse buffer as-is */
@@ -1971,43 +2018,20 @@ static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
                        return true;
 
                /* this page cannot be reused so discard it */
-               __free_pages(page, ixgbe_rx_pg_order(rx_ring));
                return false;
        }
 
+add_tail_frag:
        skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
                        rx_buffer->page_offset, size, truesize);
 
-       /* avoid re-using remote pages */
-       if (unlikely(ixgbe_page_is_reserved(page)))
-               return false;
-
-#if (PAGE_SIZE < 8192)
-       /* if we are only owner of page we can reuse it */
-       if (unlikely(page_count(page) != 1))
-               return false;
-
-       /* flip page offset to other buffer */
-       rx_buffer->page_offset ^= truesize;
-#else
-       /* move offset up to the next cache line */
-       rx_buffer->page_offset += truesize;
-
-       if (rx_buffer->page_offset > last_offset)
-               return false;
-#endif
-
-       /* Even if we own the page, we are not allowed to use atomic_set()
-        * This would break get_page_unless_zero() users.
-        */
-       page_ref_inc(page);
-
-       return true;
+       return ixgbe_can_reuse_rx_page(rx_ring, rx_buffer, page, truesize);
 }
 
 static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring,
                                             union ixgbe_adv_rx_desc *rx_desc)
 {
+       unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
        struct ixgbe_rx_buffer *rx_buffer;
        struct sk_buff *skb;
        struct page *page;
@@ -2062,24 +2086,29 @@ dma_sync:
                dma_sync_single_range_for_cpu(rx_ring->dev,
                                              rx_buffer->dma,
                                              rx_buffer->page_offset,
-                                             ixgbe_rx_bufsz(rx_ring),
+                                             size,
                                              DMA_FROM_DEVICE);
 
                rx_buffer->skb = NULL;
        }
 
        /* pull page into skb */
-       if (ixgbe_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
+       if (ixgbe_add_rx_frag(rx_ring, rx_buffer, size, skb)) {
                /* hand second half of page back to the ring */
                ixgbe_reuse_rx_page(rx_ring, rx_buffer);
-       } else if (IXGBE_CB(skb)->dma == rx_buffer->dma) {
-               /* the page has been released from the ring */
-               IXGBE_CB(skb)->page_released = true;
        } else {
-               /* we are not reusing the buffer so unmap it */
-               dma_unmap_page(rx_ring->dev, rx_buffer->dma,
-                              ixgbe_rx_pg_size(rx_ring),
-                              DMA_FROM_DEVICE);
+               if (IXGBE_CB(skb)->dma == rx_buffer->dma) {
+                       /* the page has been released from the ring */
+                       IXGBE_CB(skb)->page_released = true;
+               } else {
+                       /* we are not reusing the buffer so unmap it */
+                       dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
+                                            ixgbe_rx_pg_size(rx_ring),
+                                            DMA_FROM_DEVICE,
+                                            IXGBE_RX_DMA_ATTR);
+               }
+               __page_frag_cache_drain(page,
+                                       rx_buffer->pagecnt_bias);
        }
 
        /* clear contents of buffer_info */
@@ -2125,7 +2154,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
 
                rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);
 
-               if (!rx_desc->wb.upper.status_error)
+               if (!rx_desc->wb.upper.length)
                        break;
 
                /* This memory barrier is needed to keep us from reading
@@ -2198,40 +2227,6 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
        return total_rx_packets;
 }
 
-#ifdef CONFIG_NET_RX_BUSY_POLL
-/* must be called with local_bh_disable()d */
-static int ixgbe_low_latency_recv(struct napi_struct *napi)
-{
-       struct ixgbe_q_vector *q_vector =
-                       container_of(napi, struct ixgbe_q_vector, napi);
-       struct ixgbe_adapter *adapter = q_vector->adapter;
-       struct ixgbe_ring  *ring;
-       int found = 0;
-
-       if (test_bit(__IXGBE_DOWN, &adapter->state))
-               return LL_FLUSH_FAILED;
-
-       if (!ixgbe_qv_lock_poll(q_vector))
-               return LL_FLUSH_BUSY;
-
-       ixgbe_for_each_ring(ring, q_vector->rx) {
-               found = ixgbe_clean_rx_irq(q_vector, ring, 4);
-#ifdef BP_EXTENDED_STATS
-               if (found)
-                       ring->stats.cleaned += found;
-               else
-                       ring->stats.misses++;
-#endif
-               if (found)
-                       break;
-       }
-
-       ixgbe_qv_unlock_poll(q_vector);
-
-       return found;
-}
-#endif /* CONFIG_NET_RX_BUSY_POLL */
-
 /**
  * ixgbe_configure_msix - Configure MSI-X hardware
  * @adapter: board private structure
@@ -2447,6 +2442,7 @@ static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter)
 {
        struct ixgbe_hw *hw = &adapter->hw;
        u32 eicr = adapter->interrupt_event;
+       s32 rc;
 
        if (test_bit(__IXGBE_DOWN, &adapter->state))
                return;
@@ -2485,6 +2481,12 @@ static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter)
                        return;
 
                break;
+       case IXGBE_DEV_ID_X550EM_A_1G_T:
+       case IXGBE_DEV_ID_X550EM_A_1G_T_L:
+               rc = hw->phy.ops.check_overtemp(hw);
+               if (rc != IXGBE_ERR_OVERTEMP)
+                       return;
+               break;
        default:
                if (adapter->hw.mac.type >= ixgbe_mac_X540)
                        return;
@@ -2531,6 +2533,18 @@ static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr)
                        return;
                }
                return;
+       case ixgbe_mac_x550em_a:
+               if (eicr & IXGBE_EICR_GPI_SDP0_X550EM_a) {
+                       adapter->interrupt_event = eicr;
+                       adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT;
+                       ixgbe_service_event_schedule(adapter);
+                       IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
+                                       IXGBE_EICR_GPI_SDP0_X550EM_a);
+                       IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICR,
+                                       IXGBE_EICR_GPI_SDP0_X550EM_a);
+               }
+               return;
+       case ixgbe_mac_X550:
        case ixgbe_mac_X540:
                if (!(eicr & IXGBE_EICR_TS))
                        return;
@@ -2856,8 +2870,8 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
                        clean_complete = false;
        }
 
-       /* Exit if we are called by netpoll or busy polling is active */
-       if ((budget <= 0) || !ixgbe_qv_lock_napi(q_vector))
+       /* Exit if we are called by netpoll */
+       if (budget <= 0)
                return budget;
 
        /* attempt to distribute budget to each queue fairly, but don't allow
@@ -2876,7 +2890,6 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
                        clean_complete = false;
        }
 
-       ixgbe_qv_unlock_napi(q_vector);
        /* If all work not completed, return budget and keep polling */
        if (!clean_complete)
                return budget;
@@ -3685,6 +3698,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
                             struct ixgbe_ring *ring)
 {
        struct ixgbe_hw *hw = &adapter->hw;
+       union ixgbe_adv_rx_desc *rx_desc;
        u64 rdba = ring->dma;
        u32 rxdctl;
        u8 reg_idx = ring->reg_idx;
@@ -3719,6 +3733,10 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
                rxdctl |=  0x080420;
        }
 
+       /* initialize Rx descriptor 0 */
+       rx_desc = IXGBE_RX_DESC(ring, 0);
+       rx_desc->wb.upper.length = 0;
+
        /* enable receive descriptor ring */
        rxdctl |= IXGBE_RXDCTL_ENABLE;
        IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
@@ -3855,10 +3873,15 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
         */
        for (i = 0; i < adapter->num_rx_queues; i++) {
                rx_ring = adapter->rx_ring[i];
+
+               clear_ring_rsc_enabled(rx_ring);
+               clear_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
+
                if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
                        set_ring_rsc_enabled(rx_ring);
-               else
-                       clear_ring_rsc_enabled(rx_ring);
+
+               if (test_bit(__IXGBE_RX_FCOE, &rx_ring->state))
+                       set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
        }
 }
 
@@ -4559,23 +4582,16 @@ static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
 {
        int q_idx;
 
-       for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) {
-               ixgbe_qv_init_lock(adapter->q_vector[q_idx]);
+       for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++)
                napi_enable(&adapter->q_vector[q_idx]->napi);
-       }
 }
 
 static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
 {
        int q_idx;
 
-       for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) {
+       for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++)
                napi_disable(&adapter->q_vector[q_idx]->napi);
-               while (!ixgbe_qv_disable(adapter->q_vector[q_idx])) {
-                       pr_info("QV %d locked\n", q_idx);
-                       usleep_range(1000, 20000);
-               }
-       }
 }
 
 static void ixgbe_clear_udp_tunnel_port(struct ixgbe_adapter *adapter, u32 mask)
@@ -4894,10 +4910,11 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
                if (rx_buffer->skb) {
                        struct sk_buff *skb = rx_buffer->skb;
                        if (IXGBE_CB(skb)->page_released)
-                               dma_unmap_page(dev,
-                                              IXGBE_CB(skb)->dma,
-                                              ixgbe_rx_bufsz(rx_ring),
-                                              DMA_FROM_DEVICE);
+                               dma_unmap_page_attrs(dev,
+                                                    IXGBE_CB(skb)->dma,
+                                                    ixgbe_rx_pg_size(rx_ring),
+                                                    DMA_FROM_DEVICE,
+                                                    IXGBE_RX_DMA_ATTR);
                        dev_kfree_skb(skb);
                        rx_buffer->skb = NULL;
                }
@@ -4905,9 +4922,22 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
                if (!rx_buffer->page)
                        continue;
 
-               dma_unmap_page(dev, rx_buffer->dma,
-                              ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE);
-               __free_pages(rx_buffer->page, ixgbe_rx_pg_order(rx_ring));
+               /* Invalidate cache lines that may have been written to by
+                * device so that we avoid corrupting memory.
+                */
+               dma_sync_single_range_for_cpu(rx_ring->dev,
+                                             rx_buffer->dma,
+                                             rx_buffer->page_offset,
+                                             ixgbe_rx_bufsz(rx_ring),
+                                             DMA_FROM_DEVICE);
+
+               /* free resources associated with mapping */
+               dma_unmap_page_attrs(dev, rx_buffer->dma,
+                                    ixgbe_rx_pg_size(rx_ring),
+                                    DMA_FROM_DEVICE,
+                                    IXGBE_RX_DMA_ATTR);
+               __page_frag_cache_drain(rx_buffer->page,
+                                       rx_buffer->pagecnt_bias);
 
                rx_buffer->page = NULL;
        }
@@ -4915,9 +4945,6 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
        size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
        memset(rx_ring->rx_buffer_info, 0, size);
 
-       /* Zero out the descriptor ring */
-       memset(rx_ring->desc, 0, rx_ring->size);
-
        rx_ring->next_to_alloc = 0;
        rx_ring->next_to_clean = 0;
        rx_ring->next_to_use = 0;
@@ -5294,6 +5321,8 @@ void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
 
        while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
                usleep_range(1000, 2000);
+       if (adapter->hw.phy.type == ixgbe_phy_fw)
+               ixgbe_watchdog_link_is_down(adapter);
        ixgbe_down(adapter);
        /*
         * If SR-IOV enabled then wait a bit before bringing the adapter
@@ -5553,6 +5582,31 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
        ixgbe_clean_all_rx_rings(adapter);
 }
 
+/**
+ * ixgbe_eee_capable - helper function to determine EEE support on X550
+ * @adapter: board private structure
+ */
+static void ixgbe_set_eee_capable(struct ixgbe_adapter *adapter)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+
+       switch (hw->device_id) {
+       case IXGBE_DEV_ID_X550EM_A_1G_T:
+       case IXGBE_DEV_ID_X550EM_A_1G_T_L:
+               if (!hw->phy.eee_speeds_supported)
+                       break;
+               adapter->flags2 |= IXGBE_FLAG2_EEE_CAPABLE;
+               if (!hw->phy.eee_speeds_advertised)
+                       break;
+               adapter->flags2 |= IXGBE_FLAG2_EEE_ENABLED;
+               break;
+       default:
+               adapter->flags2 &= ~IXGBE_FLAG2_EEE_CAPABLE;
+               adapter->flags2 &= ~IXGBE_FLAG2_EEE_ENABLED;
+               break;
+       }
+}
+
 /**
  * ixgbe_tx_timeout - Respond to a Tx Hang
  * @netdev: network interface device structure
@@ -5717,6 +5771,14 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
                break;
        case ixgbe_mac_x550em_a:
                adapter->flags |= IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE;
+               switch (hw->device_id) {
+               case IXGBE_DEV_ID_X550EM_A_1G_T:
+               case IXGBE_DEV_ID_X550EM_A_1G_T_L:
+                       adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
+                       break;
+               default:
+                       break;
+               }
        /* fall through */
        case ixgbe_mac_X550EM_x:
 #ifdef CONFIG_IXGBE_DCB
@@ -5730,6 +5792,8 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
 #endif /* IXGBE_FCOE */
        /* Fall Through */
        case ixgbe_mac_X550:
+               if (hw->mac.type == ixgbe_mac_X550)
+                       adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
 #ifdef CONFIG_IXGBE_DCA
                adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE;
 #endif
@@ -6200,7 +6264,8 @@ int ixgbe_close(struct net_device *netdev)
 
        ixgbe_ptp_stop(adapter);
 
-       ixgbe_close_suspend(adapter);
+       if (netif_device_present(netdev))
+               ixgbe_close_suspend(adapter);
 
        ixgbe_fdir_filter_exit(adapter);
 
@@ -6245,14 +6310,12 @@ static int ixgbe_resume(struct pci_dev *pdev)
        if (!err && netif_running(netdev))
                err = ixgbe_open(netdev);
 
-       rtnl_unlock();
 
-       if (err)
-               return err;
-
-       netif_device_attach(netdev);
+       if (!err)
+               netif_device_attach(netdev);
+       rtnl_unlock();
 
-       return 0;
+       return err;
 }
 #endif /* CONFIG_PM */
 
@@ -6267,14 +6330,14 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
        int retval = 0;
 #endif
 
+       rtnl_lock();
        netif_device_detach(netdev);
 
-       rtnl_lock();
        if (netif_running(netdev))
                ixgbe_close_suspend(adapter);
-       rtnl_unlock();
 
        ixgbe_clear_interrupt_scheme(adapter);
+       rtnl_unlock();
 
 #ifdef CONFIG_PM
        retval = pci_save_state(pdev);
@@ -6808,6 +6871,9 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
        case IXGBE_LINK_SPEED_100_FULL:
                speed_str = "100 Mbps";
                break;
+       case IXGBE_LINK_SPEED_10_FULL:
+               speed_str = "10 Mbps";
+               break;
        default:
                speed_str = "unknown speed";
                break;
@@ -8111,8 +8177,9 @@ static void ixgbe_netpoll(struct net_device *netdev)
 }
 
 #endif
-static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
-                                                  struct rtnl_link_stats64 *stats)
+
+static void ixgbe_get_stats64(struct net_device *netdev,
+                             struct rtnl_link_stats64 *stats)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        int i;
@@ -8150,13 +8217,13 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
                }
        }
        rcu_read_unlock();
+
        /* following stats updated by ixgbe_watchdog_task() */
        stats->multicast        = netdev->stats.multicast;
        stats->rx_errors        = netdev->stats.rx_errors;
        stats->rx_length_errors = netdev->stats.rx_length_errors;
        stats->rx_crc_errors    = netdev->stats.rx_crc_errors;
        stats->rx_missed_errors = netdev->stats.rx_missed_errors;
-       return stats;
 }
 
 #ifdef CONFIG_IXGBE_DCB
@@ -9290,9 +9357,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = ixgbe_netpoll,
 #endif
-#ifdef CONFIG_NET_RX_BUSY_POLL
-       .ndo_busy_poll          = ixgbe_low_latency_recv,
-#endif
 #ifdef IXGBE_FCOE
        .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
        .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target,
@@ -9596,6 +9660,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        hw->phy.reset_if_overtemp = true;
        err = hw->mac.ops.reset_hw(hw);
        hw->phy.reset_if_overtemp = false;
+       ixgbe_set_eee_capable(adapter);
        if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
                err = 0;
        } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
@@ -9673,7 +9738,7 @@ skip_sriov:
 
 #ifdef CONFIG_IXGBE_DCB
        if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE)
-               netdev->dcbnl_ops = &dcbnl_ops;
+               netdev->dcbnl_ops = &ixgbe_dcbnl_ops;
 #endif
 
 #ifdef IXGBE_FCOE
@@ -9833,8 +9898,9 @@ skip_sriov:
         * since os does not support feature
         */
        if (hw->mac.ops.set_fw_drv_ver)
-               hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF,
-                                          0xFF);
+               hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF, 0xFF,
+                                          sizeof(ixgbe_driver_version) - 1,
+                                          ixgbe_driver_version);
 
        /* add san mac addr to netdev */
        ixgbe_add_sanmac_netdev(netdev);
@@ -10082,7 +10148,7 @@ skip_bad_vf_detection:
        }
 
        if (netif_running(netdev))
-               ixgbe_down(adapter);
+               ixgbe_close_suspend(adapter);
 
        if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
                pci_disable_device(pdev);
@@ -10152,10 +10218,12 @@ static void ixgbe_io_resume(struct pci_dev *pdev)
        }
 
 #endif
+       rtnl_lock();
        if (netif_running(netdev))
-               ixgbe_up(adapter);
+               ixgbe_open(netdev);
 
        netif_device_attach(netdev);
+       rtnl_unlock();
 }
 
 static const struct pci_error_handlers ixgbe_err_handler = {