]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - drivers/net/ethernet/intel/i40e/i40e_main.c
i40e: remove unnecessary msleep() delay in i40e_free_vfs
[karo-tx-linux.git] / drivers / net / ethernet / intel / i40e / i40e_main.c
index 82a95cc2c8ee386c725dfd01e5367bc95e26ca0a..20850a646e6cefb58005c0ebe2fc4de49c87c561 100644 (file)
 #include "i40e.h"
 #include "i40e_diag.h"
 #include <net/udp_tunnel.h>
+/* All i40e tracepoints are defined by the include below, which
+ * must be included exactly once across the whole kernel with
+ * CREATE_TRACE_POINTS defined
+ */
+#define CREATE_TRACE_POINTS
+#include "i40e_trace.h"
 
 const char i40e_driver_name[] = "i40e";
 static const char i40e_driver_string[] =
@@ -39,9 +45,9 @@ static const char i40e_driver_string[] =
 
 #define DRV_KERN "-k"
 
-#define DRV_VERSION_MAJOR 1
-#define DRV_VERSION_MINOR 6
-#define DRV_VERSION_BUILD 27
+#define DRV_VERSION_MAJOR 2
+#define DRV_VERSION_MINOR 1
+#define DRV_VERSION_BUILD 14
 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
             __stringify(DRV_VERSION_MINOR) "." \
             __stringify(DRV_VERSION_BUILD)    DRV_KERN
@@ -50,13 +56,16 @@ static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporatio
 
 /* a bit of forward declarations */
 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
-static void i40e_handle_reset_warning(struct i40e_pf *pf);
+static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired);
 static int i40e_add_vsi(struct i40e_vsi *vsi);
 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
 static int i40e_setup_misc_vector(struct i40e_pf *pf);
 static void i40e_determine_queue_usage(struct i40e_pf *pf);
 static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
+static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired);
+static int i40e_reset(struct i40e_pf *pf);
+static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired);
 static void i40e_fdir_sb_setup(struct i40e_pf *pf);
 static int i40e_veb_get_bw_info(struct i40e_veb *veb);
 
@@ -299,11 +308,7 @@ void i40e_service_event_schedule(struct i40e_pf *pf)
  * device is munged, not just the one netdev port, so go for the full
  * reset.
  **/
-#ifdef I40E_FCOE
-void i40e_tx_timeout(struct net_device *netdev)
-#else
 static void i40e_tx_timeout(struct net_device *netdev)
-#endif
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
@@ -408,10 +413,7 @@ struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
  * Returns the address of the device statistics structure.
  * The statistics are actually updated from the service task.
  **/
-#ifndef I40E_FCOE
-static
-#endif
-void i40e_get_netdev_stats_struct(struct net_device *netdev,
+static void i40e_get_netdev_stats_struct(struct net_device *netdev,
                                  struct rtnl_link_stats64 *stats)
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
@@ -723,55 +725,6 @@ static void i40e_update_veb_stats(struct i40e_veb *veb)
        veb->stat_offsets_loaded = true;
 }
 
-#ifdef I40E_FCOE
-/**
- * i40e_update_fcoe_stats - Update FCoE-specific ethernet statistics counters.
- * @vsi: the VSI that is capable of doing FCoE
- **/
-static void i40e_update_fcoe_stats(struct i40e_vsi *vsi)
-{
-       struct i40e_pf *pf = vsi->back;
-       struct i40e_hw *hw = &pf->hw;
-       struct i40e_fcoe_stats *ofs;
-       struct i40e_fcoe_stats *fs;     /* device's eth stats */
-       int idx;
-
-       if (vsi->type != I40E_VSI_FCOE)
-               return;
-
-       idx = hw->pf_id + I40E_FCOE_PF_STAT_OFFSET;
-       fs = &vsi->fcoe_stats;
-       ofs = &vsi->fcoe_stats_offsets;
-
-       i40e_stat_update32(hw, I40E_GL_FCOEPRC(idx),
-                          vsi->fcoe_stat_offsets_loaded,
-                          &ofs->rx_fcoe_packets, &fs->rx_fcoe_packets);
-       i40e_stat_update48(hw, I40E_GL_FCOEDWRCH(idx), I40E_GL_FCOEDWRCL(idx),
-                          vsi->fcoe_stat_offsets_loaded,
-                          &ofs->rx_fcoe_dwords, &fs->rx_fcoe_dwords);
-       i40e_stat_update32(hw, I40E_GL_FCOERPDC(idx),
-                          vsi->fcoe_stat_offsets_loaded,
-                          &ofs->rx_fcoe_dropped, &fs->rx_fcoe_dropped);
-       i40e_stat_update32(hw, I40E_GL_FCOEPTC(idx),
-                          vsi->fcoe_stat_offsets_loaded,
-                          &ofs->tx_fcoe_packets, &fs->tx_fcoe_packets);
-       i40e_stat_update48(hw, I40E_GL_FCOEDWTCH(idx), I40E_GL_FCOEDWTCL(idx),
-                          vsi->fcoe_stat_offsets_loaded,
-                          &ofs->tx_fcoe_dwords, &fs->tx_fcoe_dwords);
-       i40e_stat_update32(hw, I40E_GL_FCOECRC(idx),
-                          vsi->fcoe_stat_offsets_loaded,
-                          &ofs->fcoe_bad_fccrc, &fs->fcoe_bad_fccrc);
-       i40e_stat_update32(hw, I40E_GL_FCOELAST(idx),
-                          vsi->fcoe_stat_offsets_loaded,
-                          &ofs->fcoe_last_error, &fs->fcoe_last_error);
-       i40e_stat_update32(hw, I40E_GL_FCOEDDPC(idx),
-                          vsi->fcoe_stat_offsets_loaded,
-                          &ofs->fcoe_ddp_count, &fs->fcoe_ddp_count);
-
-       vsi->fcoe_stat_offsets_loaded = true;
-}
-
-#endif
 /**
  * i40e_update_vsi_stats - Update the vsi statistics counters.
  * @vsi: the VSI to be updated
@@ -790,7 +743,6 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
        struct i40e_eth_stats *oes;
        struct i40e_eth_stats *es;     /* device's eth stats */
        u32 tx_restart, tx_busy;
-       u64 tx_lost_interrupt;
        struct i40e_ring *p;
        u32 rx_page, rx_buf;
        u64 bytes, packets;
@@ -816,7 +768,6 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
        rx_b = rx_p = 0;
        tx_b = tx_p = 0;
        tx_restart = tx_busy = tx_linearize = tx_force_wb = 0;
-       tx_lost_interrupt = 0;
        rx_page = 0;
        rx_buf = 0;
        rcu_read_lock();
@@ -835,7 +786,6 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
                tx_busy += p->tx_stats.tx_busy;
                tx_linearize += p->tx_stats.tx_linearize;
                tx_force_wb += p->tx_stats.tx_force_wb;
-               tx_lost_interrupt += p->tx_stats.tx_lost_interrupt;
 
                /* Rx queue is part of the same block as Tx queue */
                p = &p[1];
@@ -854,7 +804,6 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
        vsi->tx_busy = tx_busy;
        vsi->tx_linearize = tx_linearize;
        vsi->tx_force_wb = tx_force_wb;
-       vsi->tx_lost_interrupt = tx_lost_interrupt;
        vsi->rx_page_failed = rx_page;
        vsi->rx_buf_failed = rx_buf;
 
@@ -1101,13 +1050,13 @@ static void i40e_update_pf_stats(struct i40e_pf *pf)
                           &osd->rx_lpi_count, &nsd->rx_lpi_count);
 
        if (pf->flags & I40E_FLAG_FD_SB_ENABLED &&
-           !(pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED))
+           !(pf->hw_disabled_flags & I40E_FLAG_FD_SB_ENABLED))
                nsd->fd_sb_status = true;
        else
                nsd->fd_sb_status = false;
 
        if (pf->flags & I40E_FLAG_FD_ATR_ENABLED &&
-           !(pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
+           !(pf->hw_disabled_flags & I40E_FLAG_FD_ATR_ENABLED))
                nsd->fd_atr_status = true;
        else
                nsd->fd_atr_status = false;
@@ -1129,9 +1078,6 @@ void i40e_update_stats(struct i40e_vsi *vsi)
                i40e_update_pf_stats(pf);
 
        i40e_update_vsi_stats(vsi);
-#ifdef I40E_FCOE
-       i40e_update_fcoe_stats(vsi);
-#endif
 }
 
 /**
@@ -1562,11 +1508,7 @@ int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr)
  *
  * Returns 0 on success, negative on failure
  **/
-#ifdef I40E_FCOE
-int i40e_set_mac(struct net_device *netdev, void *p)
-#else
 static int i40e_set_mac(struct net_device *netdev, void *p)
-#endif
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
@@ -1626,17 +1568,10 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
  *
  * Setup VSI queue mapping for enabled traffic classes.
  **/
-#ifdef I40E_FCOE
-void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
-                             struct i40e_vsi_context *ctxt,
-                             u8 enabled_tc,
-                             bool is_add)
-#else
 static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
                                     struct i40e_vsi_context *ctxt,
                                     u8 enabled_tc,
                                     bool is_add)
-#endif
 {
        struct i40e_pf *pf = vsi->back;
        u16 sections = 0;
@@ -1686,11 +1621,6 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
                                qcount = min_t(int, pf->alloc_rss_size,
                                               num_tc_qps);
                                break;
-#ifdef I40E_FCOE
-                       case I40E_VSI_FCOE:
-                               qcount = num_tc_qps;
-                               break;
-#endif
                        case I40E_VSI_FDIR:
                        case I40E_VSI_SRIOV:
                        case I40E_VSI_VMDQ2:
@@ -1800,11 +1730,7 @@ static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr)
  * i40e_set_rx_mode - NDO callback to set the netdev filters
  * @netdev: network interface device structure
  **/
-#ifdef I40E_FCOE
-void i40e_set_rx_mode(struct net_device *netdev)
-#else
 static void i40e_set_rx_mode(struct net_device *netdev)
-#endif
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
@@ -1883,19 +1809,12 @@ static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi,
 static
 struct i40e_new_mac_filter *i40e_next_filter(struct i40e_new_mac_filter *next)
 {
-       while (next) {
-               next = hlist_entry(next->hlist.next,
-                                  typeof(struct i40e_new_mac_filter),
-                                  hlist);
-
-               /* keep going if we found a broadcast filter */
-               if (next && is_broadcast_ether_addr(next->f->macaddr))
-                       continue;
-
-               break;
+       hlist_for_each_entry_continue(next, hlist) {
+               if (!is_broadcast_ether_addr(next->f->macaddr))
+                       return next;
        }
 
-       return next;
+       return NULL;
 }
 
 /**
@@ -2487,13 +2406,15 @@ static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
+       struct i40e_pf *pf = vsi->back;
 
        netdev_info(netdev, "changing MTU from %d to %d\n",
                    netdev->mtu, new_mtu);
        netdev->mtu = new_mtu;
        if (netif_running(netdev))
                i40e_vsi_reinit_locked(vsi);
-       i40e_notify_client_of_l2_param_changes(vsi);
+       pf->flags |= (I40E_FLAG_SERVICE_CLIENT_REQUESTED |
+                     I40E_FLAG_CLIENT_L2_CHANGE);
        return 0;
 }
 
@@ -2707,13 +2628,8 @@ void i40e_vsi_kill_vlan(struct i40e_vsi *vsi, u16 vid)
  *
  * net_device_ops implementation for adding vlan ids
  **/
-#ifdef I40E_FCOE
-int i40e_vlan_rx_add_vid(struct net_device *netdev,
-                        __always_unused __be16 proto, u16 vid)
-#else
 static int i40e_vlan_rx_add_vid(struct net_device *netdev,
                                __always_unused __be16 proto, u16 vid)
-#endif
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
@@ -2744,13 +2660,8 @@ static int i40e_vlan_rx_add_vid(struct net_device *netdev,
  *
  * net_device_ops implementation for removing vlan ids
  **/
-#ifdef I40E_FCOE
-int i40e_vlan_rx_kill_vid(struct net_device *netdev,
-                         __always_unused __be16 proto, u16 vid)
-#else
 static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
                                 __always_unused __be16 proto, u16 vid)
-#endif
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
@@ -2920,9 +2831,6 @@ static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
 
        for (i = 0; i < vsi->num_queue_pairs && !err; i++)
                err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
-#ifdef I40E_FCOE
-       i40e_fcoe_setup_ddp_resources(vsi);
-#endif
        return err;
 }
 
@@ -2942,9 +2850,6 @@ static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
        for (i = 0; i < vsi->num_queue_pairs; i++)
                if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
                        i40e_free_rx_resources(vsi->rx_rings[i]);
-#ifdef I40E_FCOE
-       i40e_fcoe_free_ddp_resources(vsi);
-#endif
 }
 
 /**
@@ -3015,9 +2920,6 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
        tx_ctx.qlen = ring->count;
        tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
                                               I40E_FLAG_FD_ATR_ENABLED));
-#ifdef I40E_FCOE
-       tx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
-#endif
        tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
        /* FDIR VSI tx ring can still use RS bit and writebacks */
        if (vsi->type != I40E_VSI_FDIR)
@@ -3098,7 +3000,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
 
        ring->rx_buf_len = vsi->rx_buf_len;
 
-       rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT;
+       rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len,
+                                   BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
 
        rx_ctx.base = (ring->dma / 128);
        rx_ctx.qlen = ring->count;
@@ -3120,9 +3023,6 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
        rx_ctx.l2tsel = 1;
        /* this controls whether VLAN is stripped from inner headers */
        rx_ctx.showiv = 0;
-#ifdef I40E_FCOE
-       rx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
-#endif
        /* set the prefena field to 1 because the manual says to */
        rx_ctx.prefena = 1;
 
@@ -3144,6 +3044,12 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
                return -ENOMEM;
        }
 
+       /* configure Rx buffer alignment */
+       if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX))
+               clear_ring_build_skb_enabled(ring);
+       else
+               set_ring_build_skb_enabled(ring);
+
        /* cache tail for quicker writes, and clear the reg before use */
        ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
        writel(0, ring->tail);
@@ -3181,27 +3087,21 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
        int err = 0;
        u16 i;
 
-       if (vsi->netdev && (vsi->netdev->mtu > ETH_DATA_LEN))
-               vsi->max_frame = vsi->netdev->mtu + ETH_HLEN
-                              + ETH_FCS_LEN + VLAN_HLEN;
-       else
-               vsi->max_frame = I40E_RXBUFFER_2048;
-
-       vsi->rx_buf_len = I40E_RXBUFFER_2048;
-
-#ifdef I40E_FCOE
-       /* setup rx buffer for FCoE */
-       if ((vsi->type == I40E_VSI_FCOE) &&
-           (vsi->back->flags & I40E_FLAG_FCOE_ENABLED)) {
-               vsi->rx_buf_len = I40E_RXBUFFER_3072;
-               vsi->max_frame = I40E_RXBUFFER_3072;
+       if (!vsi->netdev || (vsi->back->flags & I40E_FLAG_LEGACY_RX)) {
+               vsi->max_frame = I40E_MAX_RXBUFFER;
+               vsi->rx_buf_len = I40E_RXBUFFER_2048;
+#if (PAGE_SIZE < 8192)
+       } else if (!I40E_2K_TOO_SMALL_WITH_PADDING &&
+                  (vsi->netdev->mtu <= ETH_DATA_LEN)) {
+               vsi->max_frame = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
+               vsi->rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN;
+#endif
+       } else {
+               vsi->max_frame = I40E_MAX_RXBUFFER;
+               vsi->rx_buf_len = (PAGE_SIZE < 8192) ? I40E_RXBUFFER_3072 :
+                                                      I40E_RXBUFFER_2048;
        }
 
-#endif /* I40E_FCOE */
-       /* round up for the chip's needs */
-       vsi->rx_buf_len = ALIGN(vsi->rx_buf_len,
-                               BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
-
        /* set up individual rings */
        for (i = 0; i < vsi->num_queue_pairs && !err; i++)
                err = i40e_configure_rx_ring(vsi->rx_rings[i]);
@@ -3281,6 +3181,12 @@ static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
        if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
                return;
 
+       /* Reset FDir counters as we're replaying all existing filters */
+       pf->fd_tcp4_filter_cnt = 0;
+       pf->fd_udp4_filter_cnt = 0;
+       pf->fd_sctp4_filter_cnt = 0;
+       pf->fd_ip4_filter_cnt = 0;
+
        hlist_for_each_entry_safe(filter, node,
                                  &pf->fdir_filter_list, fdir_node) {
                i40e_add_del_fdir(vsi, filter, true);
@@ -3993,11 +3899,7 @@ static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
  * This is used by netconsole to send skbs without having to re-enable
  * interrupts.  It's not called while the normal interrupt routine is executing.
  **/
-#ifdef I40E_FCOE
-void i40e_netpoll(struct net_device *netdev)
-#else
 static void i40e_netpoll(struct net_device *netdev)
-#endif
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
@@ -4017,6 +3919,8 @@ static void i40e_netpoll(struct net_device *netdev)
 }
 #endif
 
+#define I40E_QTX_ENA_WAIT_COUNT 50
+
 /**
  * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled
  * @pf: the PF being configured
@@ -4046,6 +3950,50 @@ static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)
        return 0;
 }
 
+/**
+ * i40e_control_tx_q - Start or stop a particular Tx queue
+ * @pf: the PF structure
+ * @pf_q: the PF queue to configure
+ * @enable: start or stop the queue
+ *
+ * This function enables or disables a single queue. Note that any delay
+ * required after the operation is expected to be handled by the caller of
+ * this function.
+ **/
+static void i40e_control_tx_q(struct i40e_pf *pf, int pf_q, bool enable)
+{
+       struct i40e_hw *hw = &pf->hw;
+       u32 tx_reg;
+       int i;
+
+       /* warn the TX unit of coming changes */
+       i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
+       if (!enable)
+               usleep_range(10, 20);
+
+       for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
+               tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
+               if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
+                   ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
+                       break;
+               usleep_range(1000, 2000);
+       }
+
+       /* Skip if the queue is already in the requested state */
+       if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
+               return;
+
+       /* turn on/off the queue */
+       if (enable) {
+               wr32(hw, I40E_QTX_HEAD(pf_q), 0);
+               tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
+       } else {
+               tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
+       }
+
+       wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
+}
+
 /**
  * i40e_vsi_control_tx - Start or stop a VSI's rings
  * @vsi: the VSI being configured
@@ -4054,41 +4002,11 @@ static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)
 static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
 {
        struct i40e_pf *pf = vsi->back;
-       struct i40e_hw *hw = &pf->hw;
-       int i, j, pf_q, ret = 0;
-       u32 tx_reg;
+       int i, pf_q, ret = 0;
 
        pf_q = vsi->base_queue;
        for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
-
-               /* warn the TX unit of coming changes */
-               i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
-               if (!enable)
-                       usleep_range(10, 20);
-
-               for (j = 0; j < 50; j++) {
-                       tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
-                       if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
-                           ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
-                               break;
-                       usleep_range(1000, 2000);
-               }
-               /* Skip if the queue is already in the requested state */
-               if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
-                       continue;
-
-               /* turn on/off the queue */
-               if (enable) {
-                       wr32(hw, I40E_QTX_HEAD(pf_q), 0);
-                       tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
-               } else {
-                       tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
-               }
-
-               wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
-               /* No waiting for the Tx queue to disable */
-               if (!enable && test_bit(__I40E_PORT_TX_SUSPENDED, &pf->state))
-                       continue;
+               i40e_control_tx_q(pf, pf_q, enable);
 
                /* wait for the change to finish */
                ret = i40e_pf_txq_wait(pf, pf_q, enable);
@@ -4100,8 +4018,6 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
                }
        }
 
-       if (hw->revision_id == 0)
-               mdelay(50);
        return ret;
 }
 
@@ -4134,6 +4050,43 @@ static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
        return 0;
 }
 
+/**
+ * i40e_control_rx_q - Start or stop a particular Rx queue
+ * @pf: the PF structure
+ * @pf_q: the PF queue to configure
+ * @enable: start or stop the queue
+ *
+ * This function enables or disables a single queue. Note that any delay
+ * required after the operation is expected to be handled by the caller of
+ * this function.
+ **/
+static void i40e_control_rx_q(struct i40e_pf *pf, int pf_q, bool enable)
+{
+       struct i40e_hw *hw = &pf->hw;
+       u32 rx_reg;
+       int i;
+
+       for (i = 0; i < I40E_QTX_ENA_WAIT_COUNT; i++) {
+               rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
+               if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
+                   ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
+                       break;
+               usleep_range(1000, 2000);
+       }
+
+       /* Skip if the queue is already in the requested state */
+       if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
+               return;
+
+       /* turn on/off the queue */
+       if (enable)
+               rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
+       else
+               rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
+
+       wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
+}
+
 /**
  * i40e_vsi_control_rx - Start or stop a VSI's rings
  * @vsi: the VSI being configured
@@ -4142,33 +4095,11 @@ static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
 static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
 {
        struct i40e_pf *pf = vsi->back;
-       struct i40e_hw *hw = &pf->hw;
-       int i, j, pf_q, ret = 0;
-       u32 rx_reg;
+       int i, pf_q, ret = 0;
 
        pf_q = vsi->base_queue;
        for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
-               for (j = 0; j < 50; j++) {
-                       rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
-                       if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
-                           ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
-                               break;
-                       usleep_range(1000, 2000);
-               }
-
-               /* Skip if the queue is already in the requested state */
-               if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
-                       continue;
-
-               /* turn on/off the queue */
-               if (enable)
-                       rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
-               else
-                       rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
-               wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
-               /* No waiting for the Tx queue to disable */
-               if (!enable && test_bit(__I40E_PORT_TX_SUSPENDED, &pf->state))
-                       continue;
+               i40e_control_rx_q(pf, pf_q, enable);
 
                /* wait for the change to finish */
                ret = i40e_pf_rxq_wait(pf, pf_q, enable);
@@ -4180,6 +4111,12 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
                }
        }
 
+       /* Due to HW errata, on Rx disable only, the register can indicate done
+        * before it really is. Needs 50ms to be sure
+        */
+       if (!enable)
+               mdelay(50);
+
        return ret;
 }
 
@@ -4206,6 +4143,10 @@ int i40e_vsi_start_rings(struct i40e_vsi *vsi)
  **/
 void i40e_vsi_stop_rings(struct i40e_vsi *vsi)
 {
+       /* When port TX is suspended, don't wait */
+       if (test_bit(__I40E_PORT_SUSPENDED, &vsi->back->state))
+               return i40e_vsi_stop_rings_no_wait(vsi);
+
        /* do rx first for enable and last for disable
         * Ignore return value, we need to shutdown whatever we can
         */
@@ -4213,6 +4154,29 @@ void i40e_vsi_stop_rings(struct i40e_vsi *vsi)
        i40e_vsi_control_rx(vsi, false);
 }
 
+/**
+ * i40e_vsi_stop_rings_no_wait - Stop a VSI's rings and do not delay
+ * @vsi: the VSI being shutdown
+ *
+ * This function stops all the rings for a VSI but does not delay to verify
+ * that rings have been disabled. It is expected that the caller is shutting
+ * down multiple VSIs at once and will delay together for all the VSIs after
+ * initiating the shutdown. This is particularly useful for shutting down lots
+ * of VFs together. Otherwise, a large delay can be incurred while configuring
+ * each VSI in serial.
+ **/
+void i40e_vsi_stop_rings_no_wait(struct i40e_vsi *vsi)
+{
+       struct i40e_pf *pf = vsi->back;
+       int i, pf_q;
+
+       pf_q = vsi->base_queue;
+       for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
+               i40e_control_tx_q(pf, pf_q, false);
+               i40e_control_rx_q(pf, pf_q, false);
+       }
+}
+
 /**
  * i40e_vsi_free_irq - Free the irq association with the OS
  * @vsi: the VSI being configured
@@ -4471,17 +4435,16 @@ static void i40e_napi_disable_all(struct i40e_vsi *vsi)
  **/
 static void i40e_vsi_close(struct i40e_vsi *vsi)
 {
-       bool reset = false;
-
+       struct i40e_pf *pf = vsi->back;
        if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
                i40e_down(vsi);
        i40e_vsi_free_irq(vsi);
        i40e_vsi_free_tx_resources(vsi);
        i40e_vsi_free_rx_resources(vsi);
        vsi->current_netdev_flags = 0;
-       if (test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
-               reset = true;
-       i40e_notify_client_of_netdev_close(vsi, reset);
+       pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
+       if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
+               pf->flags |=  I40E_FLAG_CLIENT_RESET;
 }
 
 /**
@@ -4493,14 +4456,6 @@ static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
        if (test_bit(__I40E_DOWN, &vsi->state))
                return;
 
-       /* No need to disable FCoE VSI when Tx suspended */
-       if ((test_bit(__I40E_PORT_TX_SUSPENDED, &vsi->back->state)) &&
-           vsi->type == I40E_VSI_FCOE) {
-               dev_dbg(&vsi->back->pdev->dev,
-                        "VSI seid %d skipping FCoE VSI disable\n", vsi->seid);
-               return;
-       }
-
        set_bit(__I40E_NEEDS_RESTART, &vsi->state);
        if (vsi->netdev && netif_running(vsi->netdev))
                vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
@@ -4552,21 +4507,20 @@ static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
        }
 }
 
-#ifdef CONFIG_I40E_DCB
 /**
  * i40e_vsi_wait_queues_disabled - Wait for VSI's queues to be disabled
  * @vsi: the VSI being configured
  *
- * This function waits for the given VSI's queues to be disabled.
+ * Wait until all queues on a given VSI have been disabled.
  **/
-static int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
+int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
 {
        struct i40e_pf *pf = vsi->back;
        int i, pf_q, ret;
 
        pf_q = vsi->base_queue;
        for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
-               /* Check and wait for the disable status of the queue */
+               /* Check and wait for the Tx queue */
                ret = i40e_pf_txq_wait(pf, pf_q, false);
                if (ret) {
                        dev_info(&pf->pdev->dev,
@@ -4574,11 +4528,7 @@ static int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
                                 vsi->seid, pf_q);
                        return ret;
                }
-       }
-
-       pf_q = vsi->base_queue;
-       for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
-               /* Check and wait for the disable status of the queue */
+               /* Check and wait for the Tx queue */
                ret = i40e_pf_rxq_wait(pf, pf_q, false);
                if (ret) {
                        dev_info(&pf->pdev->dev,
@@ -4591,6 +4541,7 @@ static int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
        return 0;
 }
 
+#ifdef CONFIG_I40E_DCB
 /**
  * i40e_pf_wait_queues_disabled - Wait for all queues of PF VSIs to be disabled
  * @pf: the PF
@@ -4603,8 +4554,7 @@ static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
        int v, ret = 0;
 
        for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
-               /* No need to wait for FCoE VSI queues */
-               if (pf->vsi[v] && pf->vsi[v]->type != I40E_VSI_FCOE) {
+               if (pf->vsi[v]) {
                        ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]);
                        if (ret)
                                break;
@@ -4622,16 +4572,15 @@ static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
  * @vsi: Pointer to VSI struct
  *
  * This function checks specified queue for given VSI. Detects hung condition.
- * Sets hung bit since it is two step process. Before next run of service task
- * if napi_poll runs, it reset 'hung' bit for respective q_vector. If not,
- * hung condition remain unchanged and during subsequent run, this function
- * issues SW interrupt to recover from hung condition.
+ * We proactively detect hung TX queues by checking if interrupts are disabled
+ * but there are pending descriptors.  If it appears hung, attempt to recover
+ * by triggering a SW interrupt.
  **/
 static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi)
 {
        struct i40e_ring *tx_ring = NULL;
        struct i40e_pf  *pf;
-       u32 head, val, tx_pending_hw;
+       u32 val, tx_pending;
        int i;
 
        pf = vsi->back;
@@ -4657,47 +4606,15 @@ static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi)
        else
                val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
 
-       head = i40e_get_head(tx_ring);
-
-       tx_pending_hw = i40e_get_tx_pending(tx_ring, false);
+       tx_pending = i40e_get_tx_pending(tx_ring);
 
-       /* HW is done executing descriptors, updated HEAD write back,
-        * but SW hasn't processed those descriptors. If interrupt is
-        * not generated from this point ON, it could result into
-        * dev_watchdog detecting timeout on those netdev_queue,
-        * hence proactively trigger SW interrupt.
+       /* Interrupts are disabled and TX pending is non-zero,
+        * trigger the SW interrupt (don't wait). Worst case
+        * there will be one extra interrupt which may result
+        * into not cleaning any queues because queues are cleaned.
         */
-       if (tx_pending_hw && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))) {
-               /* NAPI Poll didn't run and clear since it was set */
-               if (test_and_clear_bit(I40E_Q_VECTOR_HUNG_DETECT,
-                                      &tx_ring->q_vector->hung_detected)) {
-                       netdev_info(vsi->netdev, "VSI_seid %d, Hung TX queue %d, tx_pending_hw: %d, NTC:0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x\n",
-                                   vsi->seid, q_idx, tx_pending_hw,
-                                   tx_ring->next_to_clean, head,
-                                   tx_ring->next_to_use,
-                                   readl(tx_ring->tail));
-                       netdev_info(vsi->netdev, "VSI_seid %d, Issuing force_wb for TX queue %d, Interrupt Reg: 0x%x\n",
-                                   vsi->seid, q_idx, val);
-                       i40e_force_wb(vsi, tx_ring->q_vector);
-               } else {
-                       /* First Chance - detected possible hung */
-                       set_bit(I40E_Q_VECTOR_HUNG_DETECT,
-                               &tx_ring->q_vector->hung_detected);
-               }
-       }
-
-       /* This is the case where we have interrupts missing,
-        * so the tx_pending in HW will most likely be 0, but we
-        * will have tx_pending in SW since the WB happened but the
-        * interrupt got lost.
-        */
-       if ((!tx_pending_hw) && i40e_get_tx_pending(tx_ring, true) &&
-           (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))) {
-               local_bh_disable();
-               if (napi_reschedule(&tx_ring->q_vector->napi))
-                       tx_ring->tx_stats.tx_lost_interrupt++;
-               local_bh_enable();
-       }
+       if (tx_pending && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK)))
+               i40e_force_wb(vsi, tx_ring->q_vector);
 }
 
 /**
@@ -5228,20 +5145,12 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf)
                        continue;
 
                /* - Enable all TCs for the LAN VSI
-#ifdef I40E_FCOE
-                * - For FCoE VSI only enable the TC configured
-                *   as per the APP TLV
-#endif
                 * - For all others keep them at TC0 for now
                 */
                if (v == pf->lan_vsi)
                        tc_map = i40e_pf_get_tc_map(pf);
                else
                        tc_map = I40E_DEFAULT_TRAFFIC_CLASS;
-#ifdef I40E_FCOE
-               if (pf->vsi[v]->type == I40E_VSI_FCOE)
-                       tc_map = i40e_get_fcoe_tc_map(pf);
-#endif /* #ifdef I40E_FCOE */
 
                ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
                if (ret) {
@@ -5308,10 +5217,6 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
                    (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
                        dev_info(&pf->pdev->dev,
                                 "DCBX offload is not supported or is disabled for this PF.\n");
-
-                       if (pf->flags & I40E_FLAG_MFP_ENABLED)
-                               goto out;
-
                } else {
                        /* When status is not DISABLED then DCBX in FW */
                        pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
@@ -5472,13 +5377,8 @@ static int i40e_up_complete(struct i40e_vsi *vsi)
        /* replay FDIR SB filters */
        if (vsi->type == I40E_VSI_FDIR) {
                /* reset fd counters */
-               pf->fd_add_err = pf->fd_atr_cnt = 0;
-               if (pf->fd_tcp_rule > 0) {
-                       pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
-                       if (I40E_DEBUG_FD & pf->hw.debug_mask)
-                               dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n");
-                       pf->fd_tcp_rule = 0;
-               }
+               pf->fd_add_err = 0;
+               pf->fd_atr_cnt = 0;
                i40e_fdir_filter_restore(vsi);
        }
 
@@ -5550,8 +5450,6 @@ void i40e_down(struct i40e_vsi *vsi)
                i40e_clean_rx_ring(vsi->rx_rings[i]);
        }
 
-       i40e_notify_client_of_netdev_close(vsi, false);
-
 }
 
 /**
@@ -5612,17 +5510,15 @@ exit:
        return ret;
 }
 
-#ifdef I40E_FCOE
-int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
-                   struct tc_to_netdev *tc)
-#else
 static int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
                           struct tc_to_netdev *tc)
-#endif
 {
-       if (handle != TC_H_ROOT || tc->type != TC_SETUP_MQPRIO)
+       if (tc->type != TC_SETUP_MQPRIO)
                return -EINVAL;
-       return i40e_setup_tc(netdev, tc->tc);
+
+       tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+
+       return i40e_setup_tc(netdev, tc->mqprio->num_tc);
 }
 
 /**
@@ -5675,6 +5571,8 @@ int i40e_open(struct net_device *netdev)
  * Finish initialization of the VSI.
  *
  * Returns 0 on success, negative value on failure
+ *
+ * Note: expects to be called while under rtnl_lock()
  **/
 int i40e_vsi_open(struct i40e_vsi *vsi)
 {
@@ -5738,7 +5636,7 @@ err_setup_rx:
 err_setup_tx:
        i40e_vsi_free_tx_resources(vsi);
        if (vsi == pf->vsi[pf->lan_vsi])
-               i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
+               i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED), true);
 
        return err;
 }
@@ -5753,6 +5651,7 @@ err_setup_tx:
 static void i40e_fdir_filter_exit(struct i40e_pf *pf)
 {
        struct i40e_fdir_filter *filter;
+       struct i40e_flex_pit *pit_entry, *tmp;
        struct hlist_node *node2;
 
        hlist_for_each_entry_safe(filter, node2,
@@ -5760,7 +5659,43 @@ static void i40e_fdir_filter_exit(struct i40e_pf *pf)
                hlist_del(&filter->fdir_node);
                kfree(filter);
        }
+
+       list_for_each_entry_safe(pit_entry, tmp, &pf->l3_flex_pit_list, list) {
+               list_del(&pit_entry->list);
+               kfree(pit_entry);
+       }
+       INIT_LIST_HEAD(&pf->l3_flex_pit_list);
+
+       list_for_each_entry_safe(pit_entry, tmp, &pf->l4_flex_pit_list, list) {
+               list_del(&pit_entry->list);
+               kfree(pit_entry);
+       }
+       INIT_LIST_HEAD(&pf->l4_flex_pit_list);
+
        pf->fdir_pf_active_filters = 0;
+       pf->fd_tcp4_filter_cnt = 0;
+       pf->fd_udp4_filter_cnt = 0;
+       pf->fd_sctp4_filter_cnt = 0;
+       pf->fd_ip4_filter_cnt = 0;
+
+       /* Reprogram the default input set for TCP/IPv4 */
+       i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
+                               I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
+                               I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
+
+       /* Reprogram the default input set for UDP/IPv4 */
+       i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
+                               I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
+                               I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
+
+       /* Reprogram the default input set for SCTP/IPv4 */
+       i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
+                               I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
+                               I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
+
+       /* Reprogram the default input set for Other/IPv4 */
+       i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
+                               I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
 }
 
 /**
@@ -5787,12 +5722,14 @@ int i40e_close(struct net_device *netdev)
  * i40e_do_reset - Start a PF or Core Reset sequence
  * @pf: board private structure
  * @reset_flags: which reset is requested
+ * @lock_acquired: indicates whether or not the lock has been acquired
+ * before this function was called.
  *
  * The essential difference in resets is that the PF Reset
  * doesn't clear the packet buffers, doesn't reset the PE
  * firmware, and doesn't bother the other PFs on the chip.
  **/
-void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
+void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
 {
        u32 val;
 
@@ -5838,7 +5775,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
                 * for the Core Reset.
                 */
                dev_dbg(&pf->pdev->dev, "PFR requested\n");
-               i40e_handle_reset_warning(pf);
+               i40e_handle_reset_warning(pf, lock_acquired);
 
        } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
                int v;
@@ -6007,7 +5944,7 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
        else
                pf->flags &= ~I40E_FLAG_DCB_ENABLED;
 
-       set_bit(__I40E_PORT_TX_SUSPENDED, &pf->state);
+       set_bit(__I40E_PORT_SUSPENDED, &pf->state);
        /* Reconfiguration needed quiesce all VSIs */
        i40e_pf_quiesce_all_vsi(pf);
 
@@ -6016,7 +5953,7 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
 
        ret = i40e_resume_port_tx(pf);
 
-       clear_bit(__I40E_PORT_TX_SUSPENDED, &pf->state);
+       clear_bit(__I40E_PORT_SUSPENDED, &pf->state);
        /* In case of error no point in resuming VSIs */
        if (ret)
                goto exit;
@@ -6029,8 +5966,8 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
                i40e_service_event_schedule(pf);
        } else {
                i40e_pf_unquiesce_all_vsi(pf);
-               /* Notify the client for the DCB changes */
-               i40e_notify_client_of_l2_param_changes(pf->vsi[pf->lan_vsi]);
+       pf->flags |= (I40E_FLAG_SERVICE_CLIENT_REQUESTED |
+                     I40E_FLAG_CLIENT_L2_CHANGE);
        }
 
 exit:
@@ -6047,7 +5984,7 @@ exit:
 void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
 {
        rtnl_lock();
-       i40e_do_reset(pf, reset_flags);
+       i40e_do_reset(pf, reset_flags, true);
        rtnl_unlock();
 }
 
@@ -6152,8 +6089,8 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
            (pf->fd_add_err == 0) ||
            (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) {
                if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
-                   (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
-                       pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
+                   (pf->hw_disabled_flags & I40E_FLAG_FD_SB_ENABLED)) {
+                       pf->hw_disabled_flags &= ~I40E_FLAG_FD_SB_ENABLED;
                        if (I40E_DEBUG_FD & pf->hw.debug_mask)
                                dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
                }
@@ -6164,9 +6101,9 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
         */
        if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM * 2)) {
                if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
-                   (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED) &&
-                   (pf->fd_tcp_rule == 0)) {
-                       pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
+                   (pf->hw_disabled_flags & I40E_FLAG_FD_ATR_ENABLED) &&
+                   (pf->fd_tcp4_filter_cnt == 0)) {
+                       pf->hw_disabled_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
                        if (I40E_DEBUG_FD & pf->hw.debug_mask)
                                dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n");
                }
@@ -6218,7 +6155,7 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
        }
 
        pf->fd_flush_timestamp = jiffies;
-       pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
+       pf->hw_disabled_flags |= I40E_FLAG_FD_ATR_ENABLED;
        /* flush all filters */
        wr32(&pf->hw, I40E_PFQF_CTL_1,
             I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
@@ -6237,8 +6174,8 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
        } else {
                /* replay sideband filters */
                i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
-               if (!disable_atr)
-                       pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
+               if (!disable_atr && !pf->fd_tcp4_filter_cnt)
+                       pf->hw_disabled_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
                clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
                if (I40E_DEBUG_FD & pf->hw.debug_mask)
                        dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
@@ -6291,9 +6228,6 @@ static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
 
        switch (vsi->type) {
        case I40E_VSI_MAIN:
-#ifdef I40E_FCOE
-       case I40E_VSI_FCOE:
-#endif
                if (!vsi->netdev || !vsi->netdev_registered)
                        break;
 
@@ -6452,7 +6386,6 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
 {
        u32 reset_flags = 0;
 
-       rtnl_lock();
        if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) {
                reset_flags |= BIT(__I40E_REINIT_REQUESTED);
                clear_bit(__I40E_REINIT_REQUESTED, &pf->state);
@@ -6478,18 +6411,19 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
         * precedence before starting a new reset sequence.
         */
        if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) {
-               i40e_handle_reset_warning(pf);
-               goto unlock;
+               i40e_prep_for_reset(pf, false);
+               i40e_reset(pf);
+               i40e_rebuild(pf, false, false);
        }
 
        /* If we're already down or resetting, just bail */
        if (reset_flags &&
            !test_bit(__I40E_DOWN, &pf->state) &&
-           !test_bit(__I40E_CONFIG_BUSY, &pf->state))
-               i40e_do_reset(pf, reset_flags);
-
-unlock:
-       rtnl_unlock();
+           !test_bit(__I40E_CONFIG_BUSY, &pf->state)) {
+               rtnl_lock();
+               i40e_do_reset(pf, reset_flags, true);
+               rtnl_unlock();
+       }
 }
 
 /**
@@ -6635,9 +6569,11 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
                                 opcode);
                        break;
                }
-       } while (pending && (i++ < pf->adminq_work_limit));
+       } while (i++ < pf->adminq_work_limit);
+
+       if (i < pf->adminq_work_limit)
+               clear_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
 
-       clear_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
        /* re-enable Admin queue interrupt cause */
        val = rd32(hw, I40E_PFINT_ICR0_ENA);
        val |=  I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
@@ -6975,10 +6911,12 @@ static void i40e_fdir_teardown(struct i40e_pf *pf)
 /**
  * i40e_prep_for_reset - prep for the core to reset
  * @pf: board private structure
+ * @lock_acquired: indicates whether or not the lock has been acquired
+ * before this function was called.
  *
  * Close up the VFs and other things in prep for PF Reset.
   **/
-static void i40e_prep_for_reset(struct i40e_pf *pf)
+static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired)
 {
        struct i40e_hw *hw = &pf->hw;
        i40e_status ret = 0;
@@ -6993,7 +6931,12 @@ static void i40e_prep_for_reset(struct i40e_pf *pf)
        dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
 
        /* quiesce the VSIs and their queues that are not already DOWN */
+       /* pf_quiesce_all_vsi modifies netdev structures -rtnl_lock needed */
+       if (!lock_acquired)
+               rtnl_lock();
        i40e_pf_quiesce_all_vsi(pf);
+       if (!lock_acquired)
+               rtnl_unlock();
 
        for (v = 0; v < pf->num_alloc_vsi; v++) {
                if (pf->vsi[v])
@@ -7028,29 +6971,39 @@ static void i40e_send_version(struct i40e_pf *pf)
 }
 
 /**
- * i40e_reset_and_rebuild - reset and rebuild using a saved config
+ * i40e_reset - wait for core reset to finish reset, reset pf if corer not seen
  * @pf: board private structure
- * @reinit: if the Main VSI needs to re-initialized.
  **/
-static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
+static int i40e_reset(struct i40e_pf *pf)
 {
        struct i40e_hw *hw = &pf->hw;
-       u8 set_fc_aq_fail = 0;
        i40e_status ret;
-       u32 val;
-       u32 v;
 
-       /* Now we wait for GRST to settle out.
-        * We don't have to delete the VEBs or VSIs from the hw switch
-        * because the reset will make them disappear.
-        */
        ret = i40e_pf_reset(hw);
        if (ret) {
                dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
                set_bit(__I40E_RESET_FAILED, &pf->state);
-               goto clear_recovery;
+               clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
+       } else {
+               pf->pfr_count++;
        }
-       pf->pfr_count++;
+       return ret;
+}
+
+/**
+ * i40e_rebuild - rebuild using a saved config
+ * @pf: board private structure
+ * @reinit: if the Main VSI needs to re-initialized.
+ * @lock_acquired: indicates whether or not the lock has been acquired
+ * before this function was called.
+ **/
+static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
+{
+       struct i40e_hw *hw = &pf->hw;
+       u8 set_fc_aq_fail = 0;
+       i40e_status ret;
+       u32 val;
+       int v;
 
        if (test_bit(__I40E_DOWN, &pf->state))
                goto clear_recovery;
@@ -7075,8 +7028,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
                goto end_core_reset;
 
        ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
-                               hw->func_caps.num_rx_qp,
-                               pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
+                               hw->func_caps.num_rx_qp, 0, 0);
        if (ret) {
                dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
                goto end_core_reset;
@@ -7095,14 +7047,12 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
                /* Continue without DCB enabled */
        }
 #endif /* CONFIG_I40E_DCB */
-#ifdef I40E_FCOE
-       i40e_init_pf_fcoe(pf);
-
-#endif
        /* do basic switch setup */
+       if (!lock_acquired)
+               rtnl_lock();
        ret = i40e_setup_pf_switch(pf, reinit);
        if (ret)
-               goto end_core_reset;
+               goto end_unlock;
 
        /* The driver only wants link up/down and module qualification
         * reports from firmware.  Note the negative logic.
@@ -7173,7 +7123,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
                if (ret) {
                        dev_info(&pf->pdev->dev,
                                 "rebuild of Main VSI failed: %d\n", ret);
-                       goto end_core_reset;
+                       goto end_unlock;
                }
        }
 
@@ -7216,31 +7166,60 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
        /* restart the VSIs that were rebuilt and running before the reset */
        i40e_pf_unquiesce_all_vsi(pf);
 
-       if (pf->num_alloc_vfs) {
-               for (v = 0; v < pf->num_alloc_vfs; v++)
-                       i40e_reset_vf(&pf->vf[v], true);
-       }
+       /* Release the RTNL lock before we start resetting VFs */
+       if (!lock_acquired)
+               rtnl_unlock();
+
+       i40e_reset_all_vfs(pf, true);
 
        /* tell the firmware that we're starting */
        i40e_send_version(pf);
 
+       /* We've already released the lock, so don't do it again */
+       goto end_core_reset;
+
+end_unlock:
+       if (!lock_acquired)
+               rtnl_unlock();
 end_core_reset:
        clear_bit(__I40E_RESET_FAILED, &pf->state);
 clear_recovery:
        clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
 }
 
+/**
+ * i40e_reset_and_rebuild - reset and rebuild using a saved config
+ * @pf: board private structure
+ * @reinit: if the Main VSI needs to re-initialized.
+ * @lock_acquired: indicates whether or not the lock has been acquired
+ * before this function was called.
+ **/
+static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
+                                  bool lock_acquired)
+{
+       int ret;
+       /* Now we wait for GRST to settle out.
+        * We don't have to delete the VEBs or VSIs from the hw switch
+        * because the reset will make them disappear.
+        */
+       ret = i40e_reset(pf);
+       if (!ret)
+               i40e_rebuild(pf, reinit, lock_acquired);
+}
+
 /**
  * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild
  * @pf: board private structure
  *
  * Close up the VFs and other things in prep for a Core Reset,
  * then get ready to rebuild the world.
+ * @lock_acquired: indicates whether or not the lock has been acquired
+ * before this function was called.
  **/
-static void i40e_handle_reset_warning(struct i40e_pf *pf)
+static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired)
 {
-       i40e_prep_for_reset(pf);
-       i40e_reset_and_rebuild(pf, false);
+       i40e_prep_for_reset(pf, lock_acquired);
+       i40e_reset_and_rebuild(pf, false, lock_acquired);
 }
 
 /**
@@ -7339,7 +7318,7 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
                                 "Too many MDD events on VF %d, disabled\n", i);
                        dev_info(&pf->pdev->dev,
                                 "Use PF Control I/F to re-enable the VF\n");
-                       set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
+                       set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
                }
        }
 
@@ -7351,6 +7330,23 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
        i40e_flush(hw);
 }
 
+/**
+ * i40e_sync_udp_filters - Trigger a sync event for existing UDP filters
+ * @pf: board private structure
+ **/
+static void i40e_sync_udp_filters(struct i40e_pf *pf)
+{
+       int i;
+
+       /* loop through and set pending bit for all active UDP filters */
+       for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
+               if (pf->udp_ports[i].port)
+                       pf->pending_udp_bitmap |= BIT_ULL(i);
+       }
+
+       pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
+}
+
 /**
  * i40e_sync_udp_filters_subtask - Sync the VSI filter list with HW
  * @pf: board private structure
@@ -7359,7 +7355,7 @@ static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
 {
        struct i40e_hw *hw = &pf->hw;
        i40e_status ret;
-       __be16 port;
+       u16 port;
        int i;
 
        if (!(pf->flags & I40E_FLAG_UDP_FILTER_SYNC))
@@ -7370,7 +7366,7 @@ static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
        for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
                if (pf->pending_udp_bitmap & BIT_ULL(i)) {
                        pf->pending_udp_bitmap &= ~BIT_ULL(i);
-                       port = pf->udp_ports[i].index;
+                       port = pf->udp_ports[i].port;
                        if (port)
                                ret = i40e_aq_add_udp_tunnel(hw, port,
                                                        pf->udp_ports[i].type,
@@ -7383,11 +7379,11 @@ static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
                                        "%s %s port %d, index %d failed, err %s aq_err %s\n",
                                        pf->udp_ports[i].type ? "vxlan" : "geneve",
                                        port ? "add" : "delete",
-                                       ntohs(port), i,
+                                       port, i,
                                        i40e_stat_str(&pf->hw, ret),
                                        i40e_aq_str(&pf->hw,
                                                    pf->hw.aq.asq_last_status));
-                               pf->udp_ports[i].index = 0;
+                               pf->udp_ports[i].port = 0;
                        }
                }
        }
@@ -7419,7 +7415,18 @@ static void i40e_service_task(struct work_struct *work)
        i40e_vc_process_vflr_event(pf);
        i40e_watchdog_subtask(pf);
        i40e_fdir_reinit_subtask(pf);
-       i40e_client_subtask(pf);
+       if (pf->flags & I40E_FLAG_CLIENT_RESET) {
+               /* Client subtask will reopen next time through. */
+               i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], true);
+               pf->flags &= ~I40E_FLAG_CLIENT_RESET;
+       } else {
+               i40e_client_subtask(pf);
+               if (pf->flags & I40E_FLAG_CLIENT_L2_CHANGE) {
+                       i40e_notify_client_of_l2_param_changes(
+                                                       pf->vsi[pf->lan_vsi]);
+                       pf->flags &= ~I40E_FLAG_CLIENT_L2_CHANGE;
+               }
+       }
        i40e_sync_filters_subtask(pf);
        i40e_sync_udp_filters_subtask(pf);
        i40e_clean_adminq_subtask(pf);
@@ -7492,15 +7499,6 @@ static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
                                      I40E_REQ_DESCRIPTOR_MULTIPLE);
                break;
 
-#ifdef I40E_FCOE
-       case I40E_VSI_FCOE:
-               vsi->alloc_queue_pairs = pf->num_fcoe_qps;
-               vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
-                                     I40E_REQ_DESCRIPTOR_MULTIPLE);
-               vsi->num_q_vectors = pf->num_fcoe_msix;
-               break;
-
-#endif /* I40E_FCOE */
        default:
                WARN_ON(1);
                return -ENODATA;
@@ -7817,6 +7815,7 @@ static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
 static int i40e_init_msix(struct i40e_pf *pf)
 {
        struct i40e_hw *hw = &pf->hw;
+       int cpus, extra_vectors;
        int vectors_left;
        int v_budget, i;
        int v_actual;
@@ -7835,9 +7834,6 @@ static int i40e_init_msix(struct i40e_pf *pf)
         *      - assumes symmetric Tx/Rx pairing
         *   - The number of VMDq pairs
         *   - The CPU count within the NUMA node if iWARP is enabled
-#ifdef I40E_FCOE
-        *   - The number of FCOE qps.
-#endif
         * Once we count this up, try the request.
         *
         * If we can't get what we want, we'll simplify to nearly nothing
@@ -7852,10 +7848,16 @@ static int i40e_init_msix(struct i40e_pf *pf)
                vectors_left--;
        }
 
-       /* reserve vectors for the main PF traffic queues */
-       pf->num_lan_msix = min_t(int, num_online_cpus(), vectors_left);
+       /* reserve some vectors for the main PF traffic queues. Initially we
+        * only reserve at most 50% of the available vectors, in the case that
+        * the number of online CPUs is large. This ensures that we can enable
+        * extra features as well. Once we've enabled the other features, we
+        * will use any remaining vectors to reach as close as we can to the
+        * number of online CPUs.
+        */
+       cpus = num_online_cpus();
+       pf->num_lan_msix = min_t(int, cpus, vectors_left / 2);
        vectors_left -= pf->num_lan_msix;
-       v_budget += pf->num_lan_msix;
 
        /* reserve one vector for sideband flow director */
        if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
@@ -7868,20 +7870,6 @@ static int i40e_init_msix(struct i40e_pf *pf)
                }
        }
 
-#ifdef I40E_FCOE
-       /* can we reserve enough for FCoE? */
-       if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
-               if (!vectors_left)
-                       pf->num_fcoe_msix = 0;
-               else if (vectors_left >= pf->num_fcoe_qps)
-                       pf->num_fcoe_msix = pf->num_fcoe_qps;
-               else
-                       pf->num_fcoe_msix = 1;
-               v_budget += pf->num_fcoe_msix;
-               vectors_left -= pf->num_fcoe_msix;
-       }
-
-#endif
        /* can we reserve enough for iWARP? */
        if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
                iwarp_requested = pf->num_iwarp_msix;
@@ -7918,6 +7906,23 @@ static int i40e_init_msix(struct i40e_pf *pf)
                }
        }
 
+       /* On systems with a large number of SMP cores, we previously limited
+        * the number of vectors for num_lan_msix to be at most 50% of the
+        * available vectors, to allow for other features. Now, we add back
+        * the remaining vectors. However, we ensure that the total
+        * num_lan_msix will not exceed num_online_cpus(). To do this, we
+        * calculate the number of vectors we can add without going over the
+        * cap of CPUs. For systems with a small number of CPUs this will be
+        * zero.
+        */
+       extra_vectors = min_t(int, cpus - pf->num_lan_msix, vectors_left);
+       pf->num_lan_msix += extra_vectors;
+       vectors_left -= extra_vectors;
+
+       WARN(vectors_left < 0,
+            "Calculation of remaining vectors underflowed. This is an accounting bug when determining total MSI-X vectors.\n");
+
+       v_budget += pf->num_lan_msix;
        pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
                                   GFP_KERNEL);
        if (!pf->msix_entries)
@@ -7958,10 +7963,6 @@ static int i40e_init_msix(struct i40e_pf *pf)
                pf->num_vmdq_msix = 1;    /* force VMDqs to only one vector */
                pf->num_vmdq_vsis = 1;
                pf->num_vmdq_qps = 1;
-#ifdef I40E_FCOE
-               pf->num_fcoe_qps = 0;
-               pf->num_fcoe_msix = 0;
-#endif
 
                /* partition out the remaining vectors */
                switch (vec) {
@@ -7975,13 +7976,6 @@ static int i40e_init_msix(struct i40e_pf *pf)
                        } else {
                                pf->num_lan_msix = 2;
                        }
-#ifdef I40E_FCOE
-                       /* give one vector to FCoE */
-                       if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
-                               pf->num_lan_msix = 1;
-                               pf->num_fcoe_msix = 1;
-                       }
-#endif
                        break;
                default:
                        if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
@@ -8001,13 +7995,6 @@ static int i40e_init_msix(struct i40e_pf *pf)
                               (vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)),
                                                              pf->num_lan_msix);
                        pf->num_lan_qps = pf->num_lan_msix;
-#ifdef I40E_FCOE
-                       /* give one vector to FCoE */
-                       if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
-                               pf->num_fcoe_msix = 1;
-                               vec--;
-                       }
-#endif
                        break;
                }
        }
@@ -8028,13 +8015,6 @@ static int i40e_init_msix(struct i40e_pf *pf)
                dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n");
                pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
        }
-#ifdef I40E_FCOE
-
-       if ((pf->flags & I40E_FLAG_FCOE_ENABLED) && (pf->num_fcoe_msix == 0)) {
-               dev_info(&pf->pdev->dev, "FCOE disabled, not enough MSI-X vectors\n");
-               pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
-       }
-#endif
        i40e_debug(&pf->hw, I40E_DEBUG_INIT,
                   "MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n",
                   pf->num_lan_msix,
@@ -8133,9 +8113,6 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
                if (vectors < 0) {
                        pf->flags &= ~(I40E_FLAG_MSIX_ENABLED   |
                                       I40E_FLAG_IWARP_ENABLED  |
-#ifdef I40E_FCOE
-                                      I40E_FLAG_FCOE_ENABLED   |
-#endif
                                       I40E_FLAG_RSS_ENABLED    |
                                       I40E_FLAG_DCB_CAPABLE    |
                                       I40E_FLAG_DCB_ENABLED    |
@@ -8368,13 +8345,10 @@ static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
 
                if (vsi->type == I40E_VSI_MAIN) {
                        for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
-                               i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i),
-                                                 seed_dw[i]);
+                               wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
                } else if (vsi->type == I40E_VSI_SRIOV) {
                        for (i = 0; i <= I40E_VFQF_HKEY1_MAX_INDEX; i++)
-                               i40e_write_rx_ctl(hw,
-                                                 I40E_VFQF_HKEY1(i, vf_id),
-                                                 seed_dw[i]);
+                               wr32(hw, I40E_VFQF_HKEY1(i, vf_id), seed_dw[i]);
                } else {
                        dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n");
                }
@@ -8392,9 +8366,7 @@ static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
                        if (lut_size != I40E_VF_HLUT_ARRAY_SIZE)
                                return -EINVAL;
                        for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
-                               i40e_write_rx_ctl(hw,
-                                                 I40E_VFQF_HLUT1(i, vf_id),
-                                                 lut_dw[i]);
+                               wr32(hw, I40E_VFQF_HLUT1(i, vf_id), lut_dw[i]);
                } else {
                        dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
                }
@@ -8522,9 +8494,12 @@ static int i40e_pf_config_rss(struct i40e_pf *pf)
        i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, reg_val);
 
        /* Determine the RSS size of the VSI */
-       if (!vsi->rss_size)
-               vsi->rss_size = min_t(int, pf->alloc_rss_size,
-                                     vsi->num_queue_pairs);
+       if (!vsi->rss_size) {
+               u16 qcount;
+
+               qcount = vsi->num_queue_pairs / vsi->tc_config.numtc;
+               vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
+       }
        if (!vsi->rss_size)
                return -EINVAL;
 
@@ -8558,6 +8533,7 @@ static int i40e_pf_config_rss(struct i40e_pf *pf)
  *
  * returns 0 if rss is not enabled, if enabled returns the final rss queue
  * count which may be different from the requested queue count.
+ * Note: expects to be called while under rtnl_lock()
  **/
 int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
 {
@@ -8570,12 +8546,14 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
        new_rss_size = min_t(int, queue_count, pf->rss_size_max);
 
        if (queue_count != vsi->num_queue_pairs) {
+               u16 qcount;
+
                vsi->req_queue_pairs = queue_count;
-               i40e_prep_for_reset(pf);
+               i40e_prep_for_reset(pf, true);
 
                pf->alloc_rss_size = new_rss_size;
 
-               i40e_reset_and_rebuild(pf, true);
+               i40e_reset_and_rebuild(pf, true, true);
 
                /* Discard the user configured hash keys and lut, if less
                 * queues are enabled.
@@ -8587,8 +8565,8 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
                }
 
                /* Reset vsi->rss_size, as number of enabled queues changed */
-               vsi->rss_size = min_t(int, pf->alloc_rss_size,
-                                     vsi->num_queue_pairs);
+               qcount = vsi->num_queue_pairs / vsi->tc_config.numtc;
+               vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount);
 
                i40e_pf_config_rss(pf);
        }
@@ -8821,10 +8799,6 @@ static int i40e_sw_init(struct i40e_pf *pf)
                pf->num_iwarp_msix = (int)num_online_cpus() + 1;
        }
 
-#ifdef I40E_FCOE
-       i40e_init_pf_fcoe(pf);
-
-#endif /* I40E_FCOE */
 #ifdef CONFIG_PCI_IOV
        if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
                pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
@@ -8851,9 +8825,9 @@ static int i40e_sw_init(struct i40e_pf *pf)
                    (pf->hw.aq.api_min_ver > 4))) {
                /* Supported in FW API version higher than 1.4 */
                pf->flags |= I40E_FLAG_GENEVE_OFFLOAD_CAPABLE;
-               pf->auto_disable_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE;
+               pf->hw_disabled_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE;
        } else {
-               pf->auto_disable_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE;
+               pf->hw_disabled_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE;
        }
 
        pf->eeprom_version = 0xDEAD;
@@ -8914,14 +8888,14 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
                        i40e_fdir_filter_exit(pf);
                }
                pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
-               pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
+               pf->hw_disabled_flags &= ~I40E_FLAG_FD_SB_ENABLED;
                /* reset fd counters */
-               pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0;
-               pf->fdir_pf_active_filters = 0;
+               pf->fd_add_err = 0;
+               pf->fd_atr_cnt = 0;
                /* if ATR was auto disabled it can be re-enabled. */
                if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
-                   (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) {
-                       pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
+                   (pf->hw_disabled_flags & I40E_FLAG_FD_ATR_ENABLED)) {
+                       pf->hw_disabled_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
                        if (I40E_DEBUG_FD & pf->hw.debug_mask)
                                dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
                }
@@ -8955,6 +8929,7 @@ static void i40e_clear_rss_lut(struct i40e_vsi *vsi)
  * i40e_set_features - set the netdev feature flags
  * @netdev: ptr to the netdev being adjusted
  * @features: the feature set that the stack is suggesting
+ * Note: expects to be called while under rtnl_lock()
  **/
 static int i40e_set_features(struct net_device *netdev,
                             netdev_features_t features)
@@ -8978,7 +8953,7 @@ static int i40e_set_features(struct net_device *netdev,
        need_reset = i40e_set_ntuple(pf, features);
 
        if (need_reset)
-               i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
+               i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED), true);
 
        return 0;
 }
@@ -8990,12 +8965,12 @@ static int i40e_set_features(struct net_device *netdev,
  *
  * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
  **/
-static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, __be16 port)
+static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, u16 port)
 {
        u8 i;
 
        for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
-               if (pf->udp_ports[i].index == port)
+               if (pf->udp_ports[i].port == port)
                        return i;
        }
 
@@ -9013,7 +8988,7 @@ static void i40e_udp_tunnel_add(struct net_device *netdev,
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
        struct i40e_pf *pf = vsi->back;
-       __be16 port = ti->port;
+       u16 port = ntohs(ti->port);
        u8 next_idx;
        u8 idx;
 
@@ -9021,8 +8996,7 @@ static void i40e_udp_tunnel_add(struct net_device *netdev,
 
        /* Check if port already exists */
        if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
-               netdev_info(netdev, "port %d already offloaded\n",
-                           ntohs(port));
+               netdev_info(netdev, "port %d already offloaded\n", port);
                return;
        }
 
@@ -9031,7 +9005,7 @@ static void i40e_udp_tunnel_add(struct net_device *netdev,
 
        if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
                netdev_info(netdev, "maximum number of offloaded UDP ports reached, not adding port %d\n",
-                           ntohs(port));
+                           port);
                return;
        }
 
@@ -9049,7 +9023,7 @@ static void i40e_udp_tunnel_add(struct net_device *netdev,
        }
 
        /* New port: add it and mark its index in the bitmap */
-       pf->udp_ports[next_idx].index = port;
+       pf->udp_ports[next_idx].port = port;
        pf->pending_udp_bitmap |= BIT_ULL(next_idx);
        pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
 }
@@ -9065,7 +9039,7 @@ static void i40e_udp_tunnel_del(struct net_device *netdev,
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
        struct i40e_pf *pf = vsi->back;
-       __be16 port = ti->port;
+       u16 port = ntohs(ti->port);
        u8 idx;
 
        idx = i40e_get_udp_port_idx(pf, port);
@@ -9090,14 +9064,14 @@ static void i40e_udp_tunnel_del(struct net_device *netdev,
        /* if port exists, set it to 0 (mark for deletion)
         * and make it pending
         */
-       pf->udp_ports[idx].index = 0;
+       pf->udp_ports[idx].port = 0;
        pf->pending_udp_bitmap |= BIT_ULL(idx);
        pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
 
        return;
 not_found:
        netdev_warn(netdev, "UDP port %d was not found, not deleting\n",
-                   ntohs(port));
+                   port);
 }
 
 static int i40e_get_phys_port_id(struct net_device *netdev,
@@ -9174,6 +9148,8 @@ static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
  * is to change the mode then that requires a PF reset to
  * allow rebuild of the components with required hardware
  * bridge mode enabled.
+ *
+ * Note: expects to be called while under rtnl_lock()
  **/
 static int i40e_ndo_bridge_setlink(struct net_device *dev,
                                   struct nlmsghdr *nlh,
@@ -9229,7 +9205,8 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev,
                                pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
                        else
                                pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
-                       i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
+                       i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED),
+                                     true);
                        break;
                }
        }
@@ -9352,10 +9329,6 @@ static const struct net_device_ops i40e_netdev_ops = {
        .ndo_poll_controller    = i40e_netpoll,
 #endif
        .ndo_setup_tc           = __i40e_setup_tc,
-#ifdef I40E_FCOE
-       .ndo_fcoe_enable        = i40e_fcoe_enable,
-       .ndo_fcoe_disable       = i40e_fcoe_disable,
-#endif
        .ndo_set_features       = i40e_set_features,
        .ndo_set_vf_mac         = i40e_ndo_set_vf_mac,
        .ndo_set_vf_vlan        = i40e_ndo_set_vf_port_vlan,
@@ -9388,6 +9361,8 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
        u8 broadcast[ETH_ALEN];
        u8 mac_addr[ETH_ALEN];
        int etherdev_size;
+       netdev_features_t hw_enc_features;
+       netdev_features_t hw_features;
 
        etherdev_size = sizeof(struct i40e_netdev_priv);
        netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
@@ -9398,52 +9373,57 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
        np = netdev_priv(netdev);
        np->vsi = vsi;
 
-       netdev->hw_enc_features |= NETIF_F_SG                   |
-                                  NETIF_F_IP_CSUM              |
-                                  NETIF_F_IPV6_CSUM            |
-                                  NETIF_F_HIGHDMA              |
-                                  NETIF_F_SOFT_FEATURES        |
-                                  NETIF_F_TSO                  |
-                                  NETIF_F_TSO_ECN              |
-                                  NETIF_F_TSO6                 |
-                                  NETIF_F_GSO_GRE              |
-                                  NETIF_F_GSO_GRE_CSUM         |
-                                  NETIF_F_GSO_IPXIP4           |
-                                  NETIF_F_GSO_IPXIP6           |
-                                  NETIF_F_GSO_UDP_TUNNEL       |
-                                  NETIF_F_GSO_UDP_TUNNEL_CSUM  |
-                                  NETIF_F_GSO_PARTIAL          |
-                                  NETIF_F_SCTP_CRC             |
-                                  NETIF_F_RXHASH               |
-                                  NETIF_F_RXCSUM               |
-                                  0;
+       hw_enc_features = NETIF_F_SG                    |
+                         NETIF_F_IP_CSUM               |
+                         NETIF_F_IPV6_CSUM             |
+                         NETIF_F_HIGHDMA               |
+                         NETIF_F_SOFT_FEATURES         |
+                         NETIF_F_TSO                   |
+                         NETIF_F_TSO_ECN               |
+                         NETIF_F_TSO6                  |
+                         NETIF_F_GSO_GRE               |
+                         NETIF_F_GSO_GRE_CSUM          |
+                         NETIF_F_GSO_PARTIAL           |
+                         NETIF_F_GSO_UDP_TUNNEL        |
+                         NETIF_F_GSO_UDP_TUNNEL_CSUM   |
+                         NETIF_F_SCTP_CRC              |
+                         NETIF_F_RXHASH                |
+                         NETIF_F_RXCSUM                |
+                         0;
 
        if (!(pf->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE))
                netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
 
        netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
 
+       netdev->hw_enc_features |= hw_enc_features;
+
        /* record features VLANs can make use of */
-       netdev->vlan_features |= netdev->hw_enc_features |
-                                NETIF_F_TSO_MANGLEID;
+       netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
 
        if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
                netdev->hw_features |= NETIF_F_NTUPLE;
+       hw_features = hw_enc_features           |
+                     NETIF_F_HW_VLAN_CTAG_TX   |
+                     NETIF_F_HW_VLAN_CTAG_RX;
 
-       netdev->hw_features |= netdev->hw_enc_features  |
-                              NETIF_F_HW_VLAN_CTAG_TX  |
-                              NETIF_F_HW_VLAN_CTAG_RX;
+       netdev->hw_features |= hw_features;
 
-       netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
+       netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
        netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
 
        if (vsi->type == I40E_VSI_MAIN) {
                SET_NETDEV_DEV(netdev, &pf->pdev->dev);
                ether_addr_copy(mac_addr, hw->mac.perm_addr);
-               /* The following steps are necessary to prevent reception
-                * of tagged packets - some older NVM configurations load a
-                * default a MAC-VLAN filter that accepts any tagged packet
-                * which must be replaced by a normal filter.
+               /* The following steps are necessary for two reasons. First,
+                * some older NVM configurations load a default MAC-VLAN
+                * filter that will accept any tagged packet, and we want to
+                * replace this with a normal filter. Additionally, it is
+                * possible our MAC address was provided by the platform using
+                * Open Firmware or similar.
+                *
+                * Thus, we need to remove the default filter and install one
+                * specific to the MAC address.
                 */
                i40e_rm_default_mac_filter(vsi, mac_addr);
                spin_lock_bh(&vsi->mac_filter_hash_lock);
@@ -9489,9 +9469,6 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
        netdev->netdev_ops = &i40e_netdev_ops;
        netdev->watchdog_timeo = 5 * HZ;
        i40e_set_ethtool_ops(netdev);
-#ifdef I40E_FCOE
-       i40e_fcoe_config_netdev(netdev, vsi);
-#endif
 
        /* MTU range: 68 - 9706 */
        netdev->min_mtu = ETH_MIN_MTU;
@@ -9715,16 +9692,6 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
                i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
                break;
 
-#ifdef I40E_FCOE
-       case I40E_VSI_FCOE:
-               ret = i40e_fcoe_vsi_init(vsi, &ctxt);
-               if (ret) {
-                       dev_info(&pf->pdev->dev, "failed to initialize FCoE VSI\n");
-                       return ret;
-               }
-               break;
-
-#endif /* I40E_FCOE */
        case I40E_VSI_IWARP:
                /* send down message to iWARP */
                break;
@@ -10141,7 +10108,6 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
                        }
                }
        case I40E_VSI_VMDQ2:
-       case I40E_VSI_FCOE:
                ret = i40e_config_netdev(vsi);
                if (ret)
                        goto err_netdev;
@@ -10789,6 +10755,9 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
 
        i40e_ptp_init(pf);
 
+       /* repopulate tunnel port filters */
+       i40e_sync_udp_filters(pf);
+
        return ret;
 }
 
@@ -10801,9 +10770,6 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
        int queues_left;
 
        pf->num_lan_qps = 0;
-#ifdef I40E_FCOE
-       pf->num_fcoe_qps = 0;
-#endif
 
        /* Find the max queues to be put into basic use.  We'll always be
         * using TC0, whether or not DCB is running, and TC0 will get the
@@ -10820,9 +10786,6 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
                /* make sure all the fancies are disabled */
                pf->flags &= ~(I40E_FLAG_RSS_ENABLED    |
                               I40E_FLAG_IWARP_ENABLED  |
-#ifdef I40E_FCOE
-                              I40E_FLAG_FCOE_ENABLED   |
-#endif
                               I40E_FLAG_FD_SB_ENABLED  |
                               I40E_FLAG_FD_ATR_ENABLED |
                               I40E_FLAG_DCB_CAPABLE    |
@@ -10839,9 +10802,6 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
 
                pf->flags &= ~(I40E_FLAG_RSS_ENABLED    |
                               I40E_FLAG_IWARP_ENABLED  |
-#ifdef I40E_FCOE
-                              I40E_FLAG_FCOE_ENABLED   |
-#endif
                               I40E_FLAG_FD_SB_ENABLED  |
                               I40E_FLAG_FD_ATR_ENABLED |
                               I40E_FLAG_DCB_ENABLED    |
@@ -10862,22 +10822,6 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
                queues_left -= pf->num_lan_qps;
        }
 
-#ifdef I40E_FCOE
-       if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
-               if (I40E_DEFAULT_FCOE <= queues_left) {
-                       pf->num_fcoe_qps = I40E_DEFAULT_FCOE;
-               } else if (I40E_MINIMUM_FCOE <= queues_left) {
-                       pf->num_fcoe_qps = I40E_MINIMUM_FCOE;
-               } else {
-                       pf->num_fcoe_qps = 0;
-                       pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
-                       dev_info(&pf->pdev->dev, "not enough queues for FCoE. FCoE feature will be disabled\n");
-               }
-
-               queues_left -= pf->num_fcoe_qps;
-       }
-
-#endif
        if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
                if (queues_left > 1) {
                        queues_left -= 1; /* save 1 queue for FD */
@@ -10909,9 +10853,6 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
                pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs,
                pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps,
                queues_left);
-#ifdef I40E_FCOE
-       dev_dbg(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps);
-#endif
 }
 
 /**
@@ -10978,10 +10919,6 @@ static void i40e_print_features(struct i40e_pf *pf)
        i += snprintf(&buf[i], REMAIN(i), " Geneve");
        if (pf->flags & I40E_FLAG_PTP)
                i += snprintf(&buf[i], REMAIN(i), " PTP");
-#ifdef I40E_FCOE
-       if (pf->flags & I40E_FLAG_FCOE_ENABLED)
-               i += snprintf(&buf[i], REMAIN(i), " FCOE");
-#endif
        if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
                i += snprintf(&buf[i], REMAIN(i), " VEB");
        else
@@ -10994,20 +10931,18 @@ static void i40e_print_features(struct i40e_pf *pf)
 
 /**
  * i40e_get_platform_mac_addr - get platform-specific MAC address
- *
  * @pdev: PCI device information struct
  * @pf: board private structure
  *
- * Look up the MAC address in Open Firmware  on systems that support it,
- * and use IDPROM on SPARC if no OF address is found. On return, the
- * I40E_FLAG_PF_MAC will be wset in pf->flags if a platform-specific value
- * has been selected.
+ * Look up the MAC address for the device. First we'll try
+ * eth_platform_get_mac_address, which will check Open Firmware, or arch
+ * specific fallback. Otherwise, we'll default to the stored value in
+ * firmware.
  **/
 static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf)
 {
-       pf->flags &= ~I40E_FLAG_PF_MAC;
-       if (!eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr))
-               pf->flags |= I40E_FLAG_PF_MAC;
+       if (eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr))
+               i40e_get_mac_addr(&pf->hw, pf->hw.mac.addr);
 }
 
 /**
@@ -11098,6 +11033,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        hw->bus.bus_id = pdev->bus->number;
        pf->instance = pfs_found;
 
+       INIT_LIST_HEAD(&pf->l3_flex_pit_list);
+       INIT_LIST_HEAD(&pf->l4_flex_pit_list);
+
        /* set up the locks for the AQ, do this only once in probe
         * and destroy them only once in remove
         */
@@ -11196,8 +11134,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        }
 
        err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
-                               hw->func_caps.num_rx_qp,
-                               pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
+                               hw->func_caps.num_rx_qp, 0, 0);
        if (err) {
                dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
                goto err_init_lan_hmc;
@@ -11219,9 +11156,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                i40e_aq_stop_lldp(hw, true, NULL);
        }
 
-       i40e_get_mac_addr(hw, hw->mac.addr);
        /* allow a platform config to override the HW addr */
        i40e_get_platform_mac_addr(pdev, pf);
+
        if (!is_valid_ether_addr(hw->mac.addr)) {
                dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
                err = -EIO;
@@ -11232,18 +11169,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        i40e_get_port_mac_addr(hw, hw->mac.port_addr);
        if (is_valid_ether_addr(hw->mac.port_addr))
                pf->flags |= I40E_FLAG_PORT_ID_VALID;
-#ifdef I40E_FCOE
-       err = i40e_get_san_mac_addr(hw, hw->mac.san_addr);
-       if (err)
-               dev_info(&pdev->dev,
-                        "(non-fatal) SAN MAC retrieval failed: %d\n", err);
-       if (!is_valid_ether_addr(hw->mac.san_addr)) {
-               dev_warn(&pdev->dev, "invalid SAN MAC address %pM, falling back to LAN MAC\n",
-                        hw->mac.san_addr);
-               ether_addr_copy(hw->mac.san_addr, hw->mac.addr);
-       }
-       dev_info(&pf->pdev->dev, "SAN MAC: %pM\n", hw->mac.san_addr);
-#endif /* I40E_FCOE */
 
        pci_set_drvdata(pdev, pf);
        pci_save_state(pdev);
@@ -11262,7 +11187,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        INIT_WORK(&pf->service_task, i40e_service_task);
        clear_bit(__I40E_SERVICE_SCHED, &pf->state);
-       pf->flags |= I40E_FLAG_NEED_LINK_UPDATE;
 
        /* NVM bit on means WoL disabled for the port */
        i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
@@ -11434,16 +11358,13 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                  round_jiffies(jiffies + pf->service_timer_period));
 
        /* add this PF to client device list and launch a client service task */
-       err = i40e_lan_add_device(pf);
-       if (err)
-               dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n",
-                        err);
-
-#ifdef I40E_FCOE
-       /* create FCoE interface */
-       i40e_fcoe_vsi_setup(pf);
+       if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
+               err = i40e_lan_add_device(pf);
+               if (err)
+                       dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n",
+                                err);
+       }
 
-#endif
 #define PCI_SPEED_SIZE 8
 #define PCI_WIDTH_SIZE 8
        /* Devices on the IOSF bus do not have this information
@@ -11589,6 +11510,11 @@ static void i40e_remove(struct pci_dev *pdev)
        if (pf->service_task.func)
                cancel_work_sync(&pf->service_task);
 
+       /* Client close must be called explicitly here because the timer
+        * has been stopped.
+        */
+       i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
+
        if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
                i40e_free_vfs(pf);
                pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
@@ -11615,10 +11541,11 @@ static void i40e_remove(struct pci_dev *pdev)
                i40e_vsi_release(pf->vsi[pf->lan_vsi]);
 
        /* remove attached clients */
-       ret_code = i40e_lan_del_device(pf);
-       if (ret_code) {
-               dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
-                        ret_code);
+       if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
+               ret_code = i40e_lan_del_device(pf);
+               if (ret_code)
+                       dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
+                                ret_code);
        }
 
        /* shutdown and destroy the HMC */
@@ -11687,7 +11614,7 @@ static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
        /* shutdown all operations */
        if (!test_bit(__I40E_SUSPENDED, &pf->state)) {
                rtnl_lock();
-               i40e_prep_for_reset(pf);
+               i40e_prep_for_reset(pf, true);
                rtnl_unlock();
        }
 
@@ -11756,7 +11683,7 @@ static void i40e_pci_error_resume(struct pci_dev *pdev)
                return;
 
        rtnl_lock();
-       i40e_handle_reset_warning(pf);
+       i40e_handle_reset_warning(pf, true);
        rtnl_unlock();
 }
 
@@ -11819,7 +11746,7 @@ static void i40e_shutdown(struct pci_dev *pdev)
        set_bit(__I40E_SUSPENDED, &pf->state);
        set_bit(__I40E_DOWN, &pf->state);
        rtnl_lock();
-       i40e_prep_for_reset(pf);
+       i40e_prep_for_reset(pf, true);
        rtnl_unlock();
 
        wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
@@ -11829,11 +11756,16 @@ static void i40e_shutdown(struct pci_dev *pdev)
        cancel_work_sync(&pf->service_task);
        i40e_fdir_teardown(pf);
 
+       /* Client close must be called explicitly here because the timer
+        * has been stopped.
+        */
+       i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
+
        if (pf->wol_en && (pf->flags & I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE))
                i40e_enable_mc_magic_wake(pf);
 
        rtnl_lock();
-       i40e_prep_for_reset(pf);
+       i40e_prep_for_reset(pf, true);
        rtnl_unlock();
 
        wr32(hw, I40E_PFPM_APM,
@@ -11867,7 +11799,7 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
                i40e_enable_mc_magic_wake(pf);
 
        rtnl_lock();
-       i40e_prep_for_reset(pf);
+       i40e_prep_for_reset(pf, true);
        rtnl_unlock();
 
        wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
@@ -11915,7 +11847,7 @@ static int i40e_resume(struct pci_dev *pdev)
        if (test_and_clear_bit(__I40E_SUSPENDED, &pf->state)) {
                clear_bit(__I40E_DOWN, &pf->state);
                rtnl_lock();
-               i40e_reset_and_rebuild(pf, false);
+               i40e_reset_and_rebuild(pf, false, true);
                rtnl_unlock();
        }