]> git.karo-electronics.de Git - linux-beck.git/commitdiff
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
authorDavid S. Miller <davem@davemloft.net>
Tue, 20 Oct 2015 13:08:27 +0000 (06:08 -0700)
committerDavid S. Miller <davem@davemloft.net>
Tue, 20 Oct 2015 13:08:27 +0000 (06:08 -0700)
Conflicts:
drivers/net/usb/asix_common.c
net/ipv4/inet_connection_sock.c
net/switchdev/switchdev.c

In the inet_connection_sock.c case the request socket hashing scheme
is completely different in net-next.

The other two conflicts were overlapping changes.

Signed-off-by: David S. Miller <davem@davemloft.net>
47 files changed:
1  2 
MAINTAINERS
arch/arm/net/bpf_jit_32.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
drivers/net/ethernet/broadcom/genet/bcmgenet.c
drivers/net/ethernet/intel/i40e/i40e_adminq.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40evf/i40e_adminq.c
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/mellanox/mlx5/core/port.c
drivers/net/ethernet/mellanox/mlxsw/core.c
drivers/net/ethernet/mellanox/mlxsw/item.h
drivers/net/ethernet/mellanox/mlxsw/pci.c
drivers/net/ethernet/mellanox/mlxsw/switchx2.c
drivers/net/phy/Kconfig
drivers/net/usb/Kconfig
drivers/net/vxlan.c
drivers/net/wireless/ath/ath10k/hw.h
drivers/net/wireless/ath/ath9k/init.c
drivers/net/wireless/iwlwifi/iwl-7000.c
drivers/net/wireless/iwlwifi/mvm/d3.c
drivers/net/wireless/iwlwifi/mvm/fw.c
drivers/net/wireless/iwlwifi/mvm/mac80211.c
drivers/net/wireless/iwlwifi/mvm/mvm.h
drivers/net/wireless/iwlwifi/mvm/ops.c
include/net/inet_timewait_sock.h
include/net/sock.h
include/uapi/linux/openvswitch.h
include/uapi/linux/rtnetlink.h
net/bluetooth/hci_core.c
net/bluetooth/hci_event.c
net/core/filter.c
net/dsa/dsa.c
net/ipv4/arp.c
net/ipv6/addrconf.c
net/ipv6/ip6_output.c
net/ipv6/route.c
net/mac80211/debugfs.c
net/mac80211/status.c
net/mac80211/tx.c
net/openvswitch/actions.c
net/openvswitch/conntrack.c
net/openvswitch/flow.h
net/openvswitch/flow_netlink.c
net/openvswitch/flow_table.c
net/switchdev/switchdev.c
net/tipc/msg.h
net/tipc/node.c

diff --combined MAINTAINERS
index 9bf8683defd9c9f4765ff7893a7404daa41a2a79,fb7d2e4af2003f28c81489e314c4070de2a647d8..fb8603e2a3f35d8b6365989c2d953c05ea21ea6d
@@@ -3591,6 -3591,13 +3591,13 @@@ F:    drivers/gpu/drm/i915
  F:    include/drm/i915*
  F:    include/uapi/drm/i915*
  
+ DRM DRIVERS FOR ATMEL HLCDC
+ M:    Boris Brezillon <boris.brezillon@free-electrons.com>
+ L:    dri-devel@lists.freedesktop.org
+ S:    Supported
+ F:    drivers/gpu/drm/atmel-hlcdc/
+ F:    Documentation/devicetree/bindings/drm/atmel/
  DRM DRIVERS FOR EXYNOS
  M:    Inki Dae <inki.dae@samsung.com>
  M:    Joonyoung Shim <jy0922.shim@samsung.com>
@@@ -3619,6 -3626,14 +3626,14 @@@ S:    Maintaine
  F:    drivers/gpu/drm/imx/
  F:    Documentation/devicetree/bindings/drm/imx/
  
+ DRM DRIVERS FOR GMA500 (Poulsbo, Moorestown and derivative chipsets)
+ M:    Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
+ L:    dri-devel@lists.freedesktop.org
+ T:    git git://github.com/patjak/drm-gma500
+ S:    Maintained
+ F:    drivers/gpu/drm/gma500
+ F:    include/drm/gma500*
  DRM DRIVERS FOR NVIDIA TEGRA
  M:    Thierry Reding <thierry.reding@gmail.com>
  M:    Terje Bergström <tbergstrom@nvidia.com>
@@@ -4003,7 -4018,7 +4018,7 @@@ S:      Maintaine
  F:    sound/usb/misc/ua101.c
  
  EXTENSIBLE FIRMWARE INTERFACE (EFI)
- M:    Matt Fleming <matt.fleming@intel.com>
+ M:    Matt Fleming <matt@codeblueprint.co.uk>
  L:    linux-efi@vger.kernel.org
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/mfleming/efi.git
  S:    Maintained
@@@ -4018,7 -4033,7 +4033,7 @@@ F:      include/linux/efi*.
  EFI VARIABLE FILESYSTEM
  M:    Matthew Garrett <matthew.garrett@nebula.com>
  M:    Jeremy Kerr <jk@ozlabs.org>
- M:    Matt Fleming <matt.fleming@intel.com>
+ M:    Matt Fleming <matt@codeblueprint.co.uk>
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/mfleming/efi.git
  L:    linux-efi@vger.kernel.org
  S:    Maintained
@@@ -5546,7 -5561,7 +5561,7 @@@ F:      drivers/net/wireless/iwlegacy
  INTEL WIRELESS WIFI LINK (iwlwifi)
  M:    Johannes Berg <johannes.berg@intel.com>
  M:    Emmanuel Grumbach <emmanuel.grumbach@intel.com>
 -M:    Intel Linux Wireless <ilw@linux.intel.com>
 +M:    Intel Linux Wireless <linuxwifi@intel.com>
  L:    linux-wireless@vger.kernel.org
  W:    http://intellinuxwireless.org
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi.git
@@@ -6093,13 -6108,6 +6108,13 @@@ F:    Documentation/auxdisplay/ks010
  F:    drivers/auxdisplay/ks0108.c
  F:    include/linux/ks0108.h
  
 +L3MDEV
 +M:    David Ahern <dsa@cumulusnetworks.com>
 +L:    netdev@vger.kernel.org
 +S:    Maintained
 +F:    net/l3mdev
 +F:    include/net/l3mdev.h
 +
  LAPB module
  L:    linux-x25@vger.kernel.org
  S:    Orphan
@@@ -6785,7 -6793,6 +6800,6 @@@ F:      drivers/scsi/megaraid
  
  MELLANOX ETHERNET DRIVER (mlx4_en)
  M:    Amir Vadai <amirv@mellanox.com>
- M:    Ido Shamay <idos@mellanox.com>
  L:    netdev@vger.kernel.org
  S:    Supported
  W:    http://www.mellanox.com
@@@ -6978,7 -6985,6 +6992,7 @@@ M:      Alan Ott <alan@signal11.us
  L:    linux-wpan@vger.kernel.org
  S:    Maintained
  F:    drivers/net/ieee802154/mrf24j40.c
 +F:    Documentation/devicetree/bindings/net/ieee802154/mrf24j40.txt
  
  MSI LAPTOP SUPPORT
  M:    "Lee, Chun-Yi" <jlee@suse.com>
@@@ -7313,6 -7319,7 +7327,6 @@@ S:      Odd Fixe
  F:    drivers/net/
  F:    include/linux/if_*
  F:    include/linux/netdevice.h
 -F:    include/linux/arcdevice.h
  F:    include/linux/etherdevice.h
  F:    include/linux/fcdevice.h
  F:    include/linux/fddidevice.h
@@@ -9108,6 -9115,15 +9122,15 @@@ S: Supporte
  F: Documentation/devicetree/bindings/net/snps,dwc-qos-ethernet.txt
  F: drivers/net/ethernet/synopsys/dwc_eth_qos.c
  
+ SYNOPSYS DESIGNWARE I2C DRIVER
+ M:    Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ M:    Jarkko Nikula <jarkko.nikula@linux.intel.com>
+ M:    Mika Westerberg <mika.westerberg@linux.intel.com>
+ L:    linux-i2c@vger.kernel.org
+ S:    Maintained
+ F:    drivers/i2c/busses/i2c-designware-*
+ F:    include/linux/platform_data/i2c-designware.h
  SYNOPSYS DESIGNWARE MMC/SD/SDIO DRIVER
  M:    Seungwon Jeon <tgih.jun@samsung.com>
  M:    Jaehoon Chung <jh80.chung@samsung.com>
@@@ -9921,7 -9937,6 +9944,6 @@@ S:      Maintaine
  F:    drivers/staging/lustre
  
  STAGING - NVIDIA COMPLIANT EMBEDDED CONTROLLER INTERFACE (nvec)
- M:    Julian Andres Klode <jak@jak-linux.org>
  M:    Marc Dietrich <marvin24@gmx.de>
  L:    ac100@lists.launchpad.net (moderated for non-subscribers)
  L:    linux-tegra@vger.kernel.org
@@@ -11272,6 -11287,7 +11294,6 @@@ M:   Shrijeet Mukherjee <shm@cumulusnetwo
  L:    netdev@vger.kernel.org
  S:    Maintained
  F:    drivers/net/vrf.c
 -F:    include/net/vrf.h
  F:    Documentation/networking/vrf.txt
  
  VT1211 HARDWARE MONITOR DRIVER
@@@ -11384,15 -11400,6 +11406,6 @@@ W:  http://oops.ghostprotocols.net:81/bl
  S:    Maintained
  F:    drivers/net/wireless/wl3501*
  
- WM97XX TOUCHSCREEN DRIVERS
- M:    Mark Brown <broonie@kernel.org>
- M:    Liam Girdwood <lrg@slimlogic.co.uk>
- L:    linux-input@vger.kernel.org
- W:    https://github.com/CirrusLogic/linux-drivers/wiki
- S:    Supported
- F:    drivers/input/touchscreen/*wm97*
- F:    include/linux/wm97xx.h
  WOLFSON MICROELECTRONICS DRIVERS
  L:    patches@opensource.wolfsonmicro.com
  T:    git https://github.com/CirrusLogic/linux-drivers.git
index 6be415111eec09b058c5aa88b8a0693ee1cbb2f2,b8efb8cd1f73ee1cb7de196ae88d9173203c4d2a..2f4b14cfddb4762e88818464dcd6c57d5eff2a5c
@@@ -125,7 -125,7 +125,7 @@@ static u64 jit_get_skb_w(struct sk_buf
  }
  
  /*
 - * Wrapper that handles both OABI and EABI and assures Thumb2 interworking
 + * Wrappers which handle both OABI and EABI and assures Thumb2 interworking
   * (where the assembly routines like __aeabi_uidiv could cause problems).
   */
  static u32 jit_udiv(u32 dividend, u32 divisor)
        return dividend / divisor;
  }
  
 +static u32 jit_mod(u32 dividend, u32 divisor)
 +{
 +      return dividend % divisor;
 +}
 +
  static inline void _emit(int cond, u32 inst, struct jit_ctx *ctx)
  {
        inst |= (cond << 28);
@@@ -476,17 -471,11 +476,17 @@@ static inline void emit_blx_r(u8 tgt_re
  #endif
  }
  
 -static inline void emit_udiv(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx)
 +static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx,
 +                              int bpf_op)
  {
  #if __LINUX_ARM_ARCH__ == 7
        if (elf_hwcap & HWCAP_IDIVA) {
 -              emit(ARM_UDIV(rd, rm, rn), ctx);
 +              if (bpf_op == BPF_DIV)
 +                      emit(ARM_UDIV(rd, rm, rn), ctx);
 +              else {
 +                      emit(ARM_UDIV(ARM_R3, rm, rn), ctx);
 +                      emit(ARM_MLS(rd, rn, ARM_R3, rm), ctx);
 +              }
                return;
        }
  #endif
                emit(ARM_MOV_R(ARM_R0, rm), ctx);
  
        ctx->seen |= SEEN_CALL;
 -      emit_mov_i(ARM_R3, (u32)jit_udiv, ctx);
 +      emit_mov_i(ARM_R3, bpf_op == BPF_DIV ? (u32)jit_udiv : (u32)jit_mod,
 +                 ctx);
        emit_blx_r(ARM_R3, ctx);
  
        if (rd != ARM_R0)
@@@ -626,6 -614,7 +626,7 @@@ load_common
                case BPF_LD | BPF_B | BPF_IND:
                        load_order = 0;
  load_ind:
+                       update_on_xread(ctx);
                        OP_IMM3(ARM_ADD, r_off, r_X, k, ctx);
                        goto load_common;
                case BPF_LDX | BPF_IMM:
                        if (k == 1)
                                break;
                        emit_mov_i(r_scratch, k, ctx);
 -                      emit_udiv(r_A, r_A, r_scratch, ctx);
 +                      emit_udivmod(r_A, r_A, r_scratch, ctx, BPF_DIV);
                        break;
                case BPF_ALU | BPF_DIV | BPF_X:
                        update_on_xread(ctx);
                        emit(ARM_CMP_I(r_X, 0), ctx);
                        emit_err_ret(ARM_COND_EQ, ctx);
 -                      emit_udiv(r_A, r_A, r_X, ctx);
 +                      emit_udivmod(r_A, r_A, r_X, ctx, BPF_DIV);
 +                      break;
 +              case BPF_ALU | BPF_MOD | BPF_K:
 +                      if (k == 1) {
 +                              emit_mov_i(r_A, 0, ctx);
 +                              break;
 +                      }
 +                      emit_mov_i(r_scratch, k, ctx);
 +                      emit_udivmod(r_A, r_A, r_scratch, ctx, BPF_MOD);
 +                      break;
 +              case BPF_ALU | BPF_MOD | BPF_X:
 +                      update_on_xread(ctx);
 +                      emit(ARM_CMP_I(r_X, 0), ctx);
 +                      emit_err_ret(ARM_COND_EQ, ctx);
 +                      emit_udivmod(r_A, r_A, r_X, ctx, BPF_MOD);
                        break;
                case BPF_ALU | BPF_OR | BPF_K:
                        /* A |= K */
@@@ -1073,7 -1048,7 +1074,7 @@@ void bpf_jit_compile(struct bpf_prog *f
  
        set_memory_ro((unsigned long)header, header->pages);
        fp->bpf_func = (void *)ctx.target;
 -      fp->jited = true;
 +      fp->jited = 1;
  out:
        kfree(ctx.offsets);
        return;
index a2bc5314a62b193916957d2a432c63fe8ac7baf2,be628bd9fb18b6f0116125e5a3f9ea16ad1d3561..d84efcd34fac3da6ce1b87ad44a9f2ca449c0a02
@@@ -1090,6 -1090,10 +1090,6 @@@ static void bnx2x_get_drvinfo(struct ne
        bnx2x_fill_fw_str(bp, info->fw_version, sizeof(info->fw_version));
  
        strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
 -      info->n_stats = BNX2X_NUM_STATS;
 -      info->testinfo_len = BNX2X_NUM_TESTS(bp);
 -      info->eedump_len = bp->common.flash_size;
 -      info->regdump_len = bnx2x_get_regs_len(dev);
  }
  
  static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
@@@ -3347,6 -3351,13 +3347,13 @@@ static int bnx2x_set_rss_flags(struct b
                        udp_rss_requested = 0;
                else
                        return -EINVAL;
+               if (CHIP_IS_E1x(bp) && udp_rss_requested) {
+                       DP(BNX2X_MSG_ETHTOOL,
+                          "57710, 57711 boards don't support RSS according to UDP 4-tuple\n");
+                       return -EINVAL;
+               }
                if ((info->flow_type == UDP_V4_FLOW) &&
                    (bp->rss_conf_obj.udp_rss_v4 != udp_rss_requested)) {
                        bp->rss_conf_obj.udp_rss_v4 = udp_rss_requested;
index 410995cd7ea491fc365a0ce1b893a6117a1ac0ff,1805541b4240e72c79445ea6f50bbc2a74ba13c7..50f63b7f3c3e4f97e9b0f456ad96c96c009e8300
@@@ -205,23 -205,6 +205,23 @@@ enum dma_reg 
        DMA_INDEX2RING_5,
        DMA_INDEX2RING_6,
        DMA_INDEX2RING_7,
 +      DMA_RING0_TIMEOUT,
 +      DMA_RING1_TIMEOUT,
 +      DMA_RING2_TIMEOUT,
 +      DMA_RING3_TIMEOUT,
 +      DMA_RING4_TIMEOUT,
 +      DMA_RING5_TIMEOUT,
 +      DMA_RING6_TIMEOUT,
 +      DMA_RING7_TIMEOUT,
 +      DMA_RING8_TIMEOUT,
 +      DMA_RING9_TIMEOUT,
 +      DMA_RING10_TIMEOUT,
 +      DMA_RING11_TIMEOUT,
 +      DMA_RING12_TIMEOUT,
 +      DMA_RING13_TIMEOUT,
 +      DMA_RING14_TIMEOUT,
 +      DMA_RING15_TIMEOUT,
 +      DMA_RING16_TIMEOUT,
  };
  
  static const u8 bcmgenet_dma_regs_v3plus[] = {
        [DMA_PRIORITY_0]        = 0x30,
        [DMA_PRIORITY_1]        = 0x34,
        [DMA_PRIORITY_2]        = 0x38,
 +      [DMA_RING0_TIMEOUT]     = 0x2C,
 +      [DMA_RING1_TIMEOUT]     = 0x30,
 +      [DMA_RING2_TIMEOUT]     = 0x34,
 +      [DMA_RING3_TIMEOUT]     = 0x38,
 +      [DMA_RING4_TIMEOUT]     = 0x3c,
 +      [DMA_RING5_TIMEOUT]     = 0x40,
 +      [DMA_RING6_TIMEOUT]     = 0x44,
 +      [DMA_RING7_TIMEOUT]     = 0x48,
 +      [DMA_RING8_TIMEOUT]     = 0x4c,
 +      [DMA_RING9_TIMEOUT]     = 0x50,
 +      [DMA_RING10_TIMEOUT]    = 0x54,
 +      [DMA_RING11_TIMEOUT]    = 0x58,
 +      [DMA_RING12_TIMEOUT]    = 0x5c,
 +      [DMA_RING13_TIMEOUT]    = 0x60,
 +      [DMA_RING14_TIMEOUT]    = 0x64,
 +      [DMA_RING15_TIMEOUT]    = 0x68,
 +      [DMA_RING16_TIMEOUT]    = 0x6C,
        [DMA_INDEX2RING_0]      = 0x70,
        [DMA_INDEX2RING_1]      = 0x74,
        [DMA_INDEX2RING_2]      = 0x78,
@@@ -269,23 -235,6 +269,23 @@@ static const u8 bcmgenet_dma_regs_v2[] 
        [DMA_PRIORITY_0]        = 0x34,
        [DMA_PRIORITY_1]        = 0x38,
        [DMA_PRIORITY_2]        = 0x3C,
 +      [DMA_RING0_TIMEOUT]     = 0x2C,
 +      [DMA_RING1_TIMEOUT]     = 0x30,
 +      [DMA_RING2_TIMEOUT]     = 0x34,
 +      [DMA_RING3_TIMEOUT]     = 0x38,
 +      [DMA_RING4_TIMEOUT]     = 0x3c,
 +      [DMA_RING5_TIMEOUT]     = 0x40,
 +      [DMA_RING6_TIMEOUT]     = 0x44,
 +      [DMA_RING7_TIMEOUT]     = 0x48,
 +      [DMA_RING8_TIMEOUT]     = 0x4c,
 +      [DMA_RING9_TIMEOUT]     = 0x50,
 +      [DMA_RING10_TIMEOUT]    = 0x54,
 +      [DMA_RING11_TIMEOUT]    = 0x58,
 +      [DMA_RING12_TIMEOUT]    = 0x5c,
 +      [DMA_RING13_TIMEOUT]    = 0x60,
 +      [DMA_RING14_TIMEOUT]    = 0x64,
 +      [DMA_RING15_TIMEOUT]    = 0x68,
 +      [DMA_RING16_TIMEOUT]    = 0x6C,
  };
  
  static const u8 bcmgenet_dma_regs_v1[] = {
        [DMA_PRIORITY_0]        = 0x34,
        [DMA_PRIORITY_1]        = 0x38,
        [DMA_PRIORITY_2]        = 0x3C,
 +      [DMA_RING0_TIMEOUT]     = 0x2C,
 +      [DMA_RING1_TIMEOUT]     = 0x30,
 +      [DMA_RING2_TIMEOUT]     = 0x34,
 +      [DMA_RING3_TIMEOUT]     = 0x38,
 +      [DMA_RING4_TIMEOUT]     = 0x3c,
 +      [DMA_RING5_TIMEOUT]     = 0x40,
 +      [DMA_RING6_TIMEOUT]     = 0x44,
 +      [DMA_RING7_TIMEOUT]     = 0x48,
 +      [DMA_RING8_TIMEOUT]     = 0x4c,
 +      [DMA_RING9_TIMEOUT]     = 0x50,
 +      [DMA_RING10_TIMEOUT]    = 0x54,
 +      [DMA_RING11_TIMEOUT]    = 0x58,
 +      [DMA_RING12_TIMEOUT]    = 0x5c,
 +      [DMA_RING13_TIMEOUT]    = 0x60,
 +      [DMA_RING14_TIMEOUT]    = 0x64,
 +      [DMA_RING15_TIMEOUT]    = 0x68,
 +      [DMA_RING16_TIMEOUT]    = 0x6C,
  };
  
  /* Set at runtime once bcmgenet version is known */
@@@ -566,85 -498,6 +566,85 @@@ static void bcmgenet_set_msglevel(struc
        priv->msg_enable = level;
  }
  
 +static int bcmgenet_get_coalesce(struct net_device *dev,
 +                               struct ethtool_coalesce *ec)
 +{
 +      struct bcmgenet_priv *priv = netdev_priv(dev);
 +
 +      ec->tx_max_coalesced_frames =
 +              bcmgenet_tdma_ring_readl(priv, DESC_INDEX,
 +                                       DMA_MBUF_DONE_THRESH);
 +      ec->rx_max_coalesced_frames =
 +              bcmgenet_rdma_ring_readl(priv, DESC_INDEX,
 +                                       DMA_MBUF_DONE_THRESH);
 +      ec->rx_coalesce_usecs =
 +              bcmgenet_rdma_readl(priv, DMA_RING16_TIMEOUT) * 8192 / 1000;
 +
 +      return 0;
 +}
 +
 +static int bcmgenet_set_coalesce(struct net_device *dev,
 +                               struct ethtool_coalesce *ec)
 +{
 +      struct bcmgenet_priv *priv = netdev_priv(dev);
 +      unsigned int i;
 +      u32 reg;
 +
 +      /* Base system clock is 125Mhz, DMA timeout is this reference clock
 +       * divided by 1024, which yields roughly 8.192us, our maximum value
 +       * has to fit in the DMA_TIMEOUT_MASK (16 bits)
 +       */
 +      if (ec->tx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK ||
 +          ec->tx_max_coalesced_frames == 0 ||
 +          ec->rx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK ||
 +          ec->rx_coalesce_usecs > (DMA_TIMEOUT_MASK * 8) + 1)
 +              return -EINVAL;
 +
 +      if (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0)
 +              return -EINVAL;
 +
 +      /* GENET TDMA hardware does not support a configurable timeout, but will
 +       * always generate an interrupt either after MBDONE packets have been
 +       * transmitted, or when the ring is emtpy.
 +       */
 +      if (ec->tx_coalesce_usecs || ec->tx_coalesce_usecs_high ||
 +          ec->tx_coalesce_usecs_irq || ec->tx_coalesce_usecs_low)
 +              return -EOPNOTSUPP;
 +
 +      /* Program all TX queues with the same values, as there is no
 +       * ethtool knob to do coalescing on a per-queue basis
 +       */
 +      for (i = 0; i < priv->hw_params->tx_queues; i++)
 +              bcmgenet_tdma_ring_writel(priv, i,
 +                                        ec->tx_max_coalesced_frames,
 +                                        DMA_MBUF_DONE_THRESH);
 +      bcmgenet_tdma_ring_writel(priv, DESC_INDEX,
 +                                ec->tx_max_coalesced_frames,
 +                                DMA_MBUF_DONE_THRESH);
 +
 +      for (i = 0; i < priv->hw_params->rx_queues; i++) {
 +              bcmgenet_rdma_ring_writel(priv, i,
 +                                        ec->rx_max_coalesced_frames,
 +                                        DMA_MBUF_DONE_THRESH);
 +
 +              reg = bcmgenet_rdma_readl(priv, DMA_RING0_TIMEOUT + i);
 +              reg &= ~DMA_TIMEOUT_MASK;
 +              reg |= DIV_ROUND_UP(ec->rx_coalesce_usecs * 1000, 8192);
 +              bcmgenet_rdma_writel(priv, reg, DMA_RING0_TIMEOUT + i);
 +      }
 +
 +      bcmgenet_rdma_ring_writel(priv, DESC_INDEX,
 +                                ec->rx_max_coalesced_frames,
 +                                DMA_MBUF_DONE_THRESH);
 +
 +      reg = bcmgenet_rdma_readl(priv, DMA_RING16_TIMEOUT);
 +      reg &= ~DMA_TIMEOUT_MASK;
 +      reg |= DIV_ROUND_UP(ec->rx_coalesce_usecs * 1000, 8192);
 +      bcmgenet_rdma_writel(priv, reg, DMA_RING16_TIMEOUT);
 +
 +      return 0;
 +}
 +
  /* standard ethtool support functions. */
  enum bcmgenet_stat_type {
        BCMGENET_STAT_NETDEV = -1,
@@@ -793,6 -646,7 +793,6 @@@ static void bcmgenet_get_drvinfo(struc
  {
        strlcpy(info->driver, "bcmgenet", sizeof(info->driver));
        strlcpy(info->version, "v2.0", sizeof(info->version));
 -      info->n_stats = BCMGENET_STATS_LEN;
  }
  
  static int bcmgenet_get_sset_count(struct net_device *dev, int string_set)
@@@ -990,8 -844,6 +990,8 @@@ static struct ethtool_ops bcmgenet_etht
        .get_eee                = bcmgenet_get_eee,
        .set_eee                = bcmgenet_set_eee,
        .nway_reset             = bcmgenet_nway_reset,
 +      .get_coalesce           = bcmgenet_get_coalesce,
 +      .set_coalesce           = bcmgenet_set_coalesce,
  };
  
  /* Power down the unimac, based on mode. */
@@@ -1831,6 -1683,24 +1831,24 @@@ static void bcmgenet_intr_disable(struc
        bcmgenet_intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
  }
  
+ static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv)
+ {
+       u32 int0_enable = 0;
+       /* Monitor cable plug/unplugged event for internal PHY, external PHY
+        * and MoCA PHY
+        */
+       if (priv->internal_phy) {
+               int0_enable |= UMAC_IRQ_LINK_EVENT;
+       } else if (priv->ext_phy) {
+               int0_enable |= UMAC_IRQ_LINK_EVENT;
+       } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
+               if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
+                       int0_enable |= UMAC_IRQ_LINK_EVENT;
+       }
+       bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
+ }
  static int init_umac(struct bcmgenet_priv *priv)
  {
        struct device *kdev = &priv->pdev->dev;
        /* Enable Tx default queue 16 interrupts */
        int0_enable |= UMAC_IRQ_TXDMA_DONE;
  
-       /* Monitor cable plug/unplugged event for internal PHY */
-       if (priv->internal_phy) {
-               int0_enable |= UMAC_IRQ_LINK_EVENT;
-       } else if (priv->ext_phy) {
-               int0_enable |= UMAC_IRQ_LINK_EVENT;
-       } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
-               if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
-                       int0_enable |= UMAC_IRQ_LINK_EVENT;
+       /* Configure backpressure vectors for MoCA */
+       if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
                reg = bcmgenet_bp_mc_get(priv);
                reg |= BIT(priv->hw_params->bp_in_en_shift);
  
@@@ -2793,6 -2656,9 +2804,9 @@@ static void bcmgenet_netif_start(struc
  
        netif_tx_start_all_queues(dev);
  
+       /* Monitor link interrupts now */
+       bcmgenet_link_intr_enable(priv);
        phy_start(priv->phydev);
  }
  
index 5c950e20f2ee2d615979a32380dff9136db60f1c,c0e943aecd1394262757e790b04a6ce1eccda72a..0ff8f01e57ee5a4a7de890485e5987fd9eec7f8a
@@@ -386,7 -386,6 +386,6 @@@ static i40e_status i40e_init_asq(struc
  
        hw->aq.asq.next_to_use = 0;
        hw->aq.asq.next_to_clean = 0;
-       hw->aq.asq.count = hw->aq.num_asq_entries;
  
        /* allocate the ring memory */
        ret_code = i40e_alloc_adminq_asq_ring(hw);
                goto init_adminq_free_rings;
  
        /* success! */
+       hw->aq.asq.count = hw->aq.num_asq_entries;
        goto init_adminq_exit;
  
  init_adminq_free_rings:
@@@ -445,7 -445,6 +445,6 @@@ static i40e_status i40e_init_arq(struc
  
        hw->aq.arq.next_to_use = 0;
        hw->aq.arq.next_to_clean = 0;
-       hw->aq.arq.count = hw->aq.num_arq_entries;
  
        /* allocate the ring memory */
        ret_code = i40e_alloc_adminq_arq_ring(hw);
                goto init_adminq_free_rings;
  
        /* success! */
+       hw->aq.arq.count = hw->aq.num_arq_entries;
        goto init_adminq_exit;
  
  init_adminq_free_rings:
@@@ -482,12 -482,8 +482,12 @@@ static i40e_status i40e_shutdown_asq(st
  {
        i40e_status ret_code = 0;
  
 -      if (hw->aq.asq.count == 0)
 -              return I40E_ERR_NOT_READY;
 +      mutex_lock(&hw->aq.asq_mutex);
 +
 +      if (hw->aq.asq.count == 0) {
 +              ret_code = I40E_ERR_NOT_READY;
 +              goto shutdown_asq_out;
 +      }
  
        /* Stop firmware AdminQ processing */
        wr32(hw, hw->aq.asq.head, 0);
        wr32(hw, hw->aq.asq.bal, 0);
        wr32(hw, hw->aq.asq.bah, 0);
  
 -      /* make sure lock is available */
 -      mutex_lock(&hw->aq.asq_mutex);
 -
        hw->aq.asq.count = 0; /* to indicate uninitialized queue */
  
        /* free ring buffers */
        i40e_free_asq_bufs(hw);
  
 +shutdown_asq_out:
        mutex_unlock(&hw->aq.asq_mutex);
 -
        return ret_code;
  }
  
@@@ -516,12 -515,8 +516,12 @@@ static i40e_status i40e_shutdown_arq(st
  {
        i40e_status ret_code = 0;
  
 -      if (hw->aq.arq.count == 0)
 -              return I40E_ERR_NOT_READY;
 +      mutex_lock(&hw->aq.arq_mutex);
 +
 +      if (hw->aq.arq.count == 0) {
 +              ret_code = I40E_ERR_NOT_READY;
 +              goto shutdown_arq_out;
 +      }
  
        /* Stop firmware AdminQ processing */
        wr32(hw, hw->aq.arq.head, 0);
        wr32(hw, hw->aq.arq.bal, 0);
        wr32(hw, hw->aq.arq.bah, 0);
  
 -      /* make sure lock is available */
 -      mutex_lock(&hw->aq.arq_mutex);
 -
        hw->aq.arq.count = 0; /* to indicate uninitialized queue */
  
        /* free ring buffers */
        i40e_free_arq_bufs(hw);
  
 +shutdown_arq_out:
        mutex_unlock(&hw->aq.arq_mutex);
 -
        return ret_code;
  }
  
   **/
  i40e_status i40e_init_adminq(struct i40e_hw *hw)
  {
 -      i40e_status ret_code;
 +      u16 cfg_ptr, oem_hi, oem_lo;
        u16 eetrack_lo, eetrack_hi;
 +      i40e_status ret_code;
        int retry = 0;
  
        /* verify input for valid configuration */
        i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
        i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
        hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
 +      i40e_read_nvm_word(hw, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr);
 +      i40e_read_nvm_word(hw, (cfg_ptr + I40E_NVM_OEM_VER_OFF),
 +                         &oem_hi);
 +      i40e_read_nvm_word(hw, (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)),
 +                         &oem_lo);
 +      hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo;
  
        if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
                ret_code = I40E_ERR_FIRMWARE_API_VERSION;
@@@ -666,9 -657,6 +666,9 @@@ i40e_status i40e_shutdown_adminq(struc
  
        /* destroy the locks */
  
 +      if (hw->nvm_buff.va)
 +              i40e_free_virt_mem(hw, &hw->nvm_buff);
 +
        return ret_code;
  }
  
@@@ -690,7 -678,8 +690,7 @@@ static u16 i40e_clean_asq(struct i40e_h
        details = I40E_ADMINQ_DETAILS(*asq, ntc);
        while (rd32(hw, hw->aq.asq.head) != ntc) {
                i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
 -                         "%s: ntc %d head %d.\n", __func__, ntc,
 -                         rd32(hw, hw->aq.asq.head));
 +                         "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
  
                if (details->callback) {
                        I40E_ADMINQ_CALLBACK cb_func =
@@@ -753,23 -742,19 +753,23 @@@ i40e_status i40e_asq_send_command(struc
        u16  retval = 0;
        u32  val = 0;
  
 -      val = rd32(hw, hw->aq.asq.head);
 -      if (val >= hw->aq.num_asq_entries) {
 +      mutex_lock(&hw->aq.asq_mutex);
 +
 +      if (hw->aq.asq.count == 0) {
                i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
 -                         "AQTX: head overrun at %d\n", val);
 +                         "AQTX: Admin queue not initialized.\n");
                status = I40E_ERR_QUEUE_EMPTY;
 -              goto asq_send_command_exit;
 +              goto asq_send_command_error;
        }
  
 -      if (hw->aq.asq.count == 0) {
 +      hw->aq.asq_last_status = I40E_AQ_RC_OK;
 +
 +      val = rd32(hw, hw->aq.asq.head);
 +      if (val >= hw->aq.num_asq_entries) {
                i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
 -                         "AQTX: Admin queue not initialized.\n");
 +                         "AQTX: head overrun at %d\n", val);
                status = I40E_ERR_QUEUE_EMPTY;
 -              goto asq_send_command_exit;
 +              goto asq_send_command_error;
        }
  
        details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
        desc->flags &= ~cpu_to_le16(details->flags_dis);
        desc->flags |= cpu_to_le16(details->flags_ena);
  
 -      mutex_lock(&hw->aq.asq_mutex);
 -
        if (buff_size > hw->aq.asq_buf_size) {
                i40e_debug(hw,
                           I40E_DEBUG_AQ_MESSAGE,
                   "AQTX: desc and buffer writeback:\n");
        i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
  
 +      /* save writeback aq if requested */
 +      if (details->wb_desc)
 +              *details->wb_desc = *desc_on_ring;
 +
        /* update the error if time out occurred */
        if ((!cmd_completed) &&
            (!details->async && !details->postpone)) {
  
  asq_send_command_error:
        mutex_unlock(&hw->aq.asq_mutex);
 -asq_send_command_exit:
        return status;
  }
  
@@@ -1039,19 -1023,6 +1039,19 @@@ clean_arq_element_err
                        i40e_release_nvm(hw);
                        hw->aq.nvm_release_on_done = false;
                }
 +
 +              switch (hw->nvmupd_state) {
 +              case I40E_NVMUPD_STATE_INIT_WAIT:
 +                      hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
 +                      break;
 +
 +              case I40E_NVMUPD_STATE_WRITE_WAIT:
 +                      hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
 +                      break;
 +
 +              default:
 +                      break;
 +              }
        }
  
        return ret_code;
index 87a5d09cb087c08859af0b1344e2c11dd76de1e2,dd44fafd8798613be8c440c36c2366233d0958d0..f7ed3131d037c2cc8ada1ecb946c4d68e0c3ad4d
@@@ -39,7 -39,7 +39,7 @@@ static const char i40e_driver_string[] 
  
  #define DRV_VERSION_MAJOR 1
  #define DRV_VERSION_MINOR 3
 -#define DRV_VERSION_BUILD 9
 +#define DRV_VERSION_BUILD 34
  #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
             __stringify(DRV_VERSION_MINOR) "." \
             __stringify(DRV_VERSION_BUILD)    DRV_KERN
@@@ -75,13 -75,10 +75,13 @@@ static const struct pci_device_id i40e_
        {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
        {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
        {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
 +      {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
        {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
        {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
        {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
        {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
 +      {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
 +      {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
        /* required last entry */
        {0, }
  };
@@@ -216,10 -213,10 +216,10 @@@ static int i40e_get_lump(struct i40e_p
                        ret = i;
                        pile->search_hint = i + j;
                        break;
 -              } else {
 -                      /* not enough, so skip over it and continue looking */
 -                      i += j;
                }
 +
 +              /* not enough, so skip over it and continue looking */
 +              i += j;
        }
  
        return ret;
@@@ -302,69 -299,25 +302,69 @@@ static void i40e_tx_timeout(struct net_
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
        struct i40e_pf *pf = vsi->back;
 +      struct i40e_ring *tx_ring = NULL;
 +      unsigned int i, hung_queue = 0;
 +      u32 head, val;
  
        pf->tx_timeout_count++;
  
 +      /* find the stopped queue the same way the stack does */
 +      for (i = 0; i < netdev->num_tx_queues; i++) {
 +              struct netdev_queue *q;
 +              unsigned long trans_start;
 +
 +              q = netdev_get_tx_queue(netdev, i);
 +              trans_start = q->trans_start ? : netdev->trans_start;
 +              if (netif_xmit_stopped(q) &&
 +                  time_after(jiffies,
 +                             (trans_start + netdev->watchdog_timeo))) {
 +                      hung_queue = i;
 +                      break;
 +              }
 +      }
 +
 +      if (i == netdev->num_tx_queues) {
 +              netdev_info(netdev, "tx_timeout: no netdev hung queue found\n");
 +      } else {
 +              /* now that we have an index, find the tx_ring struct */
 +              for (i = 0; i < vsi->num_queue_pairs; i++) {
 +                      if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
 +                              if (hung_queue ==
 +                                  vsi->tx_rings[i]->queue_index) {
 +                                      tx_ring = vsi->tx_rings[i];
 +                                      break;
 +                              }
 +                      }
 +              }
 +      }
 +
        if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
 -              pf->tx_timeout_recovery_level = 1;
 +              pf->tx_timeout_recovery_level = 1;  /* reset after some time */
 +      else if (time_before(jiffies,
 +                    (pf->tx_timeout_last_recovery + netdev->watchdog_timeo)))
 +              return;   /* don't do any new action before the next timeout */
 +
 +      if (tx_ring) {
 +              head = i40e_get_head(tx_ring);
 +              /* Read interrupt register */
 +              if (pf->flags & I40E_FLAG_MSIX_ENABLED)
 +                      val = rd32(&pf->hw,
 +                           I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
 +                                              tx_ring->vsi->base_vector - 1));
 +              else
 +                      val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
 +
 +              netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
 +                          vsi->seid, hung_queue, tx_ring->next_to_clean,
 +                          head, tx_ring->next_to_use,
 +                          readl(tx_ring->tail), val);
 +      }
 +
        pf->tx_timeout_last_recovery = jiffies;
 -      netdev_info(netdev, "tx_timeout recovery level %d\n",
 -                  pf->tx_timeout_recovery_level);
 +      netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n",
 +                  pf->tx_timeout_recovery_level, hung_queue);
  
        switch (pf->tx_timeout_recovery_level) {
 -      case 0:
 -              /* disable and re-enable queues for the VSI */
 -              if (in_interrupt()) {
 -                      set_bit(__I40E_REINIT_REQUESTED, &pf->state);
 -                      set_bit(__I40E_REINIT_REQUESTED, &vsi->state);
 -              } else {
 -                      i40e_vsi_reinit_locked(vsi);
 -              }
 -              break;
        case 1:
                set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
                break;
                break;
        default:
                netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
 -              set_bit(__I40E_DOWN_REQUESTED, &pf->state);
 -              set_bit(__I40E_DOWN_REQUESTED, &vsi->state);
                break;
        }
 +
        i40e_service_event_schedule(pf);
        pf->tx_timeout_recovery_level++;
  }
@@@ -477,7 -431,6 +477,7 @@@ static struct rtnl_link_stats64 *i40e_g
        stats->tx_errors        = vsi_stats->tx_errors;
        stats->tx_dropped       = vsi_stats->tx_dropped;
        stats->rx_errors        = vsi_stats->rx_errors;
 +      stats->rx_dropped       = vsi_stats->rx_dropped;
        stats->rx_crc_errors    = vsi_stats->rx_crc_errors;
        stats->rx_length_errors = vsi_stats->rx_length_errors;
  
@@@ -503,11 -456,11 +503,11 @@@ void i40e_vsi_reset_stats(struct i40e_v
        memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
        if (vsi->rx_rings && vsi->rx_rings[0]) {
                for (i = 0; i < vsi->num_queue_pairs; i++) {
 -                      memset(&vsi->rx_rings[i]->stats, 0 ,
 +                      memset(&vsi->rx_rings[i]->stats, 0,
                               sizeof(vsi->rx_rings[i]->stats));
 -                      memset(&vsi->rx_rings[i]->rx_stats, 0 ,
 +                      memset(&vsi->rx_rings[i]->rx_stats, 0,
                               sizeof(vsi->rx_rings[i]->rx_stats));
 -                      memset(&vsi->tx_rings[i]->stats, 0 ,
 +                      memset(&vsi->tx_rings[i]->stats, 0,
                               sizeof(vsi->tx_rings[i]->stats));
                        memset(&vsi->tx_rings[i]->tx_stats, 0,
                               sizeof(vsi->tx_rings[i]->tx_stats));
@@@ -801,6 -754,7 +801,6 @@@ static void i40e_update_link_xoff_rx(st
        struct i40e_hw_port_stats *nsd = &pf->stats;
        struct i40e_hw *hw = &pf->hw;
        u64 xoff = 0;
 -      u16 i, v;
  
        if ((hw->fc.current_mode != I40E_FC_FULL) &&
            (hw->fc.current_mode != I40E_FC_RX_PAUSE))
        if (!(nsd->link_xoff_rx - xoff))
                return;
  
 -      /* Clear the __I40E_HANG_CHECK_ARMED bit for all Tx rings */
 -      for (v = 0; v < pf->num_alloc_vsi; v++) {
 -              struct i40e_vsi *vsi = pf->vsi[v];
 -
 -              if (!vsi || !vsi->tx_rings[0])
 -                      continue;
 -
 -              for (i = 0; i < vsi->num_queue_pairs; i++) {
 -                      struct i40e_ring *ring = vsi->tx_rings[i];
 -                      clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
 -              }
 -      }
  }
  
  /**
@@@ -830,7 -796,7 +830,7 @@@ static void i40e_update_prio_xoff_rx(st
        bool xoff[I40E_MAX_TRAFFIC_CLASS] = {false};
        struct i40e_dcbx_config *dcb_cfg;
        struct i40e_hw *hw = &pf->hw;
 -      u16 i, v;
 +      u16 i;
        u8 tc;
  
        dcb_cfg = &hw->local_dcbx_config;
  
        for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
                u64 prio_xoff = nsd->priority_xoff_rx[i];
 +
                i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
                                   pf->stat_offsets_loaded,
                                   &osd->priority_xoff_rx[i],
                tc = dcb_cfg->etscfg.prioritytable[i];
                xoff[tc] = true;
        }
 -
 -      /* Clear the __I40E_HANG_CHECK_ARMED bit for Tx rings */
 -      for (v = 0; v < pf->num_alloc_vsi; v++) {
 -              struct i40e_vsi *vsi = pf->vsi[v];
 -
 -              if (!vsi || !vsi->tx_rings[0])
 -                      continue;
 -
 -              for (i = 0; i < vsi->num_queue_pairs; i++) {
 -                      struct i40e_ring *ring = vsi->tx_rings[i];
 -
 -                      tc = ring->dcb_tc;
 -                      if (xoff[tc])
 -                              clear_bit(__I40E_HANG_CHECK_ARMED,
 -                                        &ring->state);
 -              }
 -      }
  }
  
  /**
@@@ -880,7 -862,6 +880,7 @@@ static void i40e_update_vsi_stats(struc
        u32 rx_page, rx_buf;
        u64 bytes, packets;
        unsigned int start;
 +      u64 tx_linearize;
        u64 rx_p, rx_b;
        u64 tx_p, tx_b;
        u16 q;
         */
        rx_b = rx_p = 0;
        tx_b = tx_p = 0;
 -      tx_restart = tx_busy = 0;
 +      tx_restart = tx_busy = tx_linearize = 0;
        rx_page = 0;
        rx_buf = 0;
        rcu_read_lock();
                tx_p += packets;
                tx_restart += p->tx_stats.restart_queue;
                tx_busy += p->tx_stats.tx_busy;
 +              tx_linearize += p->tx_stats.tx_linearize;
  
                /* Rx queue is part of the same block as Tx queue */
                p = &p[1];
        rcu_read_unlock();
        vsi->tx_restart = tx_restart;
        vsi->tx_busy = tx_busy;
 +      vsi->tx_linearize = tx_linearize;
        vsi->rx_page_failed = rx_page;
        vsi->rx_buf_failed = rx_buf;
  
@@@ -1277,7 -1256,7 +1277,7 @@@ bool i40e_is_vsi_in_vlan(struct i40e_vs
         * so we have to go through all the list in order to make sure
         */
        list_for_each_entry(f, &vsi->mac_filter_list, list) {
 -              if (f->vlan >= 0)
 +              if (f->vlan >= 0 || vsi->info.pvid)
                        return true;
        }
  
@@@ -1440,7 -1419,6 +1440,7 @@@ void i40e_del_filter(struct i40e_vsi *v
        } else {
                /* make sure we don't remove a filter in use by VF or netdev */
                int min_f = 0;
 +
                min_f += (f->is_vf ? 1 : 0);
                min_f += (f->is_netdev ? 1 : 0);
  
@@@ -1499,7 -1477,6 +1499,7 @@@ static int i40e_set_mac(struct net_devi
  
        if (vsi->type == I40E_VSI_MAIN) {
                i40e_status ret;
 +
                ret = i40e_aq_mac_address_write(&vsi->back->hw,
                                                I40E_AQC_WRITE_TYPE_LAA_WOL,
                                                addr->sa_data, NULL);
                        f->is_laa = true;
        }
  
 -      i40e_sync_vsi_filters(vsi);
 +      i40e_sync_vsi_filters(vsi, false);
        ether_addr_copy(netdev->dev_addr, addr->sa_data);
  
        return 0;
@@@ -1732,27 -1709,36 +1732,27 @@@ static void i40e_set_rx_mode(struct net
  
        /* remove filter if not in netdev list */
        list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
 -              bool found = false;
  
                if (!f->is_netdev)
                        continue;
  
 -              if (is_multicast_ether_addr(f->macaddr)) {
 -                      netdev_for_each_mc_addr(mca, netdev) {
 -                              if (ether_addr_equal(mca->addr, f->macaddr)) {
 -                                      found = true;
 -                                      break;
 -                              }
 -                      }
 -              } else {
 -                      netdev_for_each_uc_addr(uca, netdev) {
 -                              if (ether_addr_equal(uca->addr, f->macaddr)) {
 -                                      found = true;
 -                                      break;
 -                              }
 -                      }
 +              netdev_for_each_mc_addr(mca, netdev)
 +                      if (ether_addr_equal(mca->addr, f->macaddr))
 +                              goto bottom_of_search_loop;
  
 -                      for_each_dev_addr(netdev, ha) {
 -                              if (ether_addr_equal(ha->addr, f->macaddr)) {
 -                                      found = true;
 -                                      break;
 -                              }
 -                      }
 -              }
 -              if (!found)
 -                      i40e_del_filter(
 -                         vsi, f->macaddr, I40E_VLAN_ANY, false, true);
 +              netdev_for_each_uc_addr(uca, netdev)
 +                      if (ether_addr_equal(uca->addr, f->macaddr))
 +                              goto bottom_of_search_loop;
 +
 +              for_each_dev_addr(netdev, ha)
 +                      if (ether_addr_equal(ha->addr, f->macaddr))
 +                              goto bottom_of_search_loop;
 +
 +              /* f->macaddr wasn't found in uc, mc, or ha list so delete it */
 +              i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY, false, true);
 +
 +bottom_of_search_loop:
 +              continue;
        }
  
        /* check for other flag changes */
  /**
   * i40e_sync_vsi_filters - Update the VSI filter list to the HW
   * @vsi: ptr to the VSI
 + * @grab_rtnl: whether RTNL needs to be grabbed
   *
   * Push any outstanding VSI filter changes through the AdminQ.
   *
   * Returns 0 or error value
   **/
 -int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
 +int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl)
  {
        struct i40e_mac_filter *f, *ftmp;
        bool promisc_forced_on = false;
        /* check for changes in promiscuous modes */
        if (changed_flags & IFF_ALLMULTI) {
                bool cur_multipromisc;
 +
                cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
                ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
                                                            vsi->seid,
        }
        if ((changed_flags & IFF_PROMISC) || promisc_forced_on) {
                bool cur_promisc;
 +
                cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
                               test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
                                        &vsi->state));
                         */
                        if (pf->cur_promisc != cur_promisc) {
                                pf->cur_promisc = cur_promisc;
 -                              i40e_do_reset_safe(pf,
 +                              if (grab_rtnl)
 +                                      i40e_do_reset_safe(pf,
 +                                              BIT(__I40E_PF_RESET_REQUESTED));
 +                              else
 +                                      i40e_do_reset(pf,
                                                BIT(__I40E_PF_RESET_REQUESTED));
                        }
                } else {
@@@ -2017,7 -1996,7 +2017,7 @@@ static void i40e_sync_filters_subtask(s
        for (v = 0; v < pf->num_alloc_vsi; v++) {
                if (pf->vsi[v] &&
                    (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED))
 -                      i40e_sync_vsi_filters(pf->vsi[v]);
 +                      i40e_sync_vsi_filters(pf->vsi[v], true);
        }
  }
  
@@@ -2224,7 -2203,7 +2224,7 @@@ int i40e_vsi_add_vlan(struct i40e_vsi *
            test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
                return 0;
  
 -      return i40e_sync_vsi_filters(vsi);
 +      return i40e_sync_vsi_filters(vsi, false);
  }
  
  /**
@@@ -2296,7 -2275,7 +2296,7 @@@ int i40e_vsi_kill_vlan(struct i40e_vsi 
            test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
                return 0;
  
 -      return i40e_sync_vsi_filters(vsi);
 +      return i40e_sync_vsi_filters(vsi, false);
  }
  
  /**
@@@ -2630,6 -2609,8 +2630,6 @@@ static int i40e_configure_tx_ring(struc
        wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
        i40e_flush(hw);
  
 -      clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
 -
        /* cache tail off for easier writes later */
        ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
  
@@@ -2901,9 -2882,11 +2901,9 @@@ static int i40e_vsi_configure(struct i4
  static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
  {
        struct i40e_pf *pf = vsi->back;
 -      struct i40e_q_vector *q_vector;
        struct i40e_hw *hw = &pf->hw;
        u16 vector;
        int i, q;
 -      u32 val;
        u32 qp;
  
        /* The interrupt indexing is offset by 1 in the PFINT_ITRn
        qp = vsi->base_queue;
        vector = vsi->base_vector;
        for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
 -              q_vector = vsi->q_vectors[i];
 +              struct i40e_q_vector *q_vector = vsi->q_vectors[i];
 +
                q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
                q_vector->rx.latency_range = I40E_LOW_LATENCY;
                wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
                q_vector->tx.latency_range = I40E_LOW_LATENCY;
                wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
                     q_vector->tx.itr);
 +              wr32(hw, I40E_PFINT_RATEN(vector - 1),
 +                   INTRL_USEC_TO_REG(vsi->int_rate_limit));
  
                /* Linked list for the queuepairs assigned to this vector */
                wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
                for (q = 0; q < q_vector->num_ringpairs; q++) {
 +                      u32 val;
 +
                        val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
                              (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)  |
                              (vector      << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
@@@ -3067,6 -3045,24 +3067,6 @@@ void i40e_irq_dynamic_enable_icr0(struc
        i40e_flush(hw);
  }
  
 -/**
 - * i40e_irq_dynamic_enable - Enable default interrupt generation settings
 - * @vsi: pointer to a vsi
 - * @vector: enable a particular Hw Interrupt vector
 - **/
 -void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
 -{
 -      struct i40e_pf *pf = vsi->back;
 -      struct i40e_hw *hw = &pf->hw;
 -      u32 val;
 -
 -      val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
 -            I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
 -            (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
 -      wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
 -      /* skip the flush */
 -}
 -
  /**
   * i40e_irq_dynamic_disable - Disable default interrupt generation settings
   * @vsi: pointer to a vsi
@@@ -3140,7 -3136,8 +3140,7 @@@ static int i40e_vsi_request_irq_msix(st
                                  q_vector);
                if (err) {
                        dev_info(&pf->pdev->dev,
 -                               "%s: request_irq failed, error: %d\n",
 -                               __func__, err);
 +                               "MSIX request_irq failed, error: %d\n", err);
                        goto free_queue_irqs;
                }
                /* assign the mask for this irq */
@@@ -3205,7 -3202,8 +3205,7 @@@ static int i40e_vsi_enable_irq(struct i
        int i;
  
        if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
 -              for (i = vsi->base_vector;
 -                   i < (vsi->num_q_vectors + vsi->base_vector); i++)
 +              for (i = 0; i < vsi->num_q_vectors; i++)
                        i40e_irq_dynamic_enable(vsi, i);
        } else {
                i40e_irq_dynamic_enable_icr0(pf);
@@@ -3267,7 -3265,6 +3267,7 @@@ static irqreturn_t i40e_intr(int irq, v
  
                /* temporarily disable queue cause for NAPI processing */
                u32 qval = rd32(hw, I40E_QINT_RQCTL(0));
 +
                qval &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
                wr32(hw, I40E_QINT_RQCTL(0), qval);
  
@@@ -3437,9 -3434,10 +3437,9 @@@ static bool i40e_clean_fdir_tx_irq(stru
        i += tx_ring->count;
        tx_ring->next_to_clean = i;
  
 -      if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
 -              i40e_irq_dynamic_enable(vsi,
 -                              tx_ring->q_vector->v_idx + vsi->base_vector);
 -      }
 +      if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED)
 +              i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx);
 +
        return budget > 0;
  }
  
@@@ -3577,12 -3575,14 +3577,12 @@@ static void i40e_netpoll(struct net_dev
        if (test_bit(__I40E_DOWN, &vsi->state))
                return;
  
 -      pf->flags |= I40E_FLAG_IN_NETPOLL;
        if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
                for (i = 0; i < vsi->num_q_vectors; i++)
                        i40e_msix_clean_rings(0, vsi->q_vectors[i]);
        } else {
                i40e_intr(pf->pdev->irq, netdev);
        }
 -      pf->flags &= ~I40E_FLAG_IN_NETPOLL;
  }
  #endif
  
@@@ -3663,8 -3663,9 +3663,8 @@@ static int i40e_vsi_control_tx(struct i
                ret = i40e_pf_txq_wait(pf, pf_q, enable);
                if (ret) {
                        dev_info(&pf->pdev->dev,
 -                               "%s: VSI seid %d Tx ring %d %sable timeout\n",
 -                               __func__, vsi->seid, pf_q,
 -                               (enable ? "en" : "dis"));
 +                               "VSI seid %d Tx ring %d %sable timeout\n",
 +                               vsi->seid, pf_q, (enable ? "en" : "dis"));
                        break;
                }
        }
@@@ -3740,8 -3741,9 +3740,8 @@@ static int i40e_vsi_control_rx(struct i
                ret = i40e_pf_rxq_wait(pf, pf_q, enable);
                if (ret) {
                        dev_info(&pf->pdev->dev,
 -                               "%s: VSI seid %d Rx ring %d %sable timeout\n",
 -                               __func__, vsi->seid, pf_q,
 -                               (enable ? "en" : "dis"));
 +                               "VSI seid %d Rx ring %d %sable timeout\n",
 +                               vsi->seid, pf_q, (enable ? "en" : "dis"));
                        break;
                }
        }
@@@ -4036,15 -4038,17 +4036,15 @@@ static void i40e_quiesce_vsi(struct i40
        if ((test_bit(__I40E_PORT_TX_SUSPENDED, &vsi->back->state)) &&
            vsi->type == I40E_VSI_FCOE) {
                dev_dbg(&vsi->back->pdev->dev,
 -                      "%s: VSI seid %d skipping FCoE VSI disable\n",
 -                       __func__, vsi->seid);
 +                       "VSI seid %d skipping FCoE VSI disable\n", vsi->seid);
                return;
        }
  
        set_bit(__I40E_NEEDS_RESTART, &vsi->state);
 -      if (vsi->netdev && netif_running(vsi->netdev)) {
 +      if (vsi->netdev && netif_running(vsi->netdev))
                vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
 -      } else {
 +      else
                i40e_vsi_close(vsi);
 -      }
  }
  
  /**
@@@ -4109,8 -4113,8 +4109,8 @@@ static int i40e_vsi_wait_txq_disabled(s
                ret = i40e_pf_txq_wait(pf, pf_q, false);
                if (ret) {
                        dev_info(&pf->pdev->dev,
 -                               "%s: VSI seid %d Tx ring %d disable timeout\n",
 -                               __func__, vsi->seid, pf_q);
 +                               "VSI seid %d Tx ring %d disable timeout\n",
 +                               vsi->seid, pf_q);
                        return ret;
                }
        }
@@@ -4142,108 -4146,6 +4142,108 @@@ static int i40e_pf_wait_txq_disabled(st
  }
  
  #endif
 +
 +/**
 + * i40e_detect_recover_hung_queue - Function to detect and recover hung_queue
 + * @q_idx: TX queue number
 + * @vsi: Pointer to VSI struct
 + *
 + * This function checks specified queue for given VSI. Detects hung condition.
 + * Sets hung bit since it is two step process. Before next run of service task
 + * if napi_poll runs, it reset 'hung' bit for respective q_vector. If not,
 + * hung condition remain unchanged and during subsequent run, this function
 + * issues SW interrupt to recover from hung condition.
 + **/
 +static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi)
 +{
 +      struct i40e_ring *tx_ring = NULL;
 +      struct i40e_pf  *pf;
 +      u32 head, val, tx_pending;
 +      int i;
 +
 +      pf = vsi->back;
 +
 +      /* now that we have an index, find the tx_ring struct */
 +      for (i = 0; i < vsi->num_queue_pairs; i++) {
 +              if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
 +                      if (q_idx == vsi->tx_rings[i]->queue_index) {
 +                              tx_ring = vsi->tx_rings[i];
 +                              break;
 +                      }
 +              }
 +      }
 +
 +      if (!tx_ring)
 +              return;
 +
 +      /* Read interrupt register */
 +      if (pf->flags & I40E_FLAG_MSIX_ENABLED)
 +              val = rd32(&pf->hw,
 +                         I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
 +                                             tx_ring->vsi->base_vector - 1));
 +      else
 +              val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
 +
 +      head = i40e_get_head(tx_ring);
 +
 +      tx_pending = i40e_get_tx_pending(tx_ring);
 +
 +      /* Interrupts are disabled and TX pending is non-zero,
 +       * trigger the SW interrupt (don't wait). Worst case
 +       * there will be one extra interrupt which may result
 +       * into not cleaning any queues because queues are cleaned.
 +       */
 +      if (tx_pending && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK)))
 +              i40e_force_wb(vsi, tx_ring->q_vector);
 +}
 +
 +/**
 + * i40e_detect_recover_hung - Function to detect and recover hung_queues
 + * @pf:  pointer to PF struct
 + *
 + * LAN VSI has netdev and netdev has TX queues. This function is to check
 + * each of those TX queues if they are hung, trigger recovery by issuing
 + * SW interrupt.
 + **/
 +static void i40e_detect_recover_hung(struct i40e_pf *pf)
 +{
 +      struct net_device *netdev;
 +      struct i40e_vsi *vsi;
 +      int i;
 +
 +      /* Only for LAN VSI */
 +      vsi = pf->vsi[pf->lan_vsi];
 +
 +      if (!vsi)
 +              return;
 +
 +      /* Make sure, VSI state is not DOWN/RECOVERY_PENDING */
 +      if (test_bit(__I40E_DOWN, &vsi->back->state) ||
 +          test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
 +              return;
 +
 +      /* Make sure type is MAIN VSI */
 +      if (vsi->type != I40E_VSI_MAIN)
 +              return;
 +
 +      netdev = vsi->netdev;
 +      if (!netdev)
 +              return;
 +
 +      /* Bail out if netif_carrier is not OK */
 +      if (!netif_carrier_ok(netdev))
 +              return;
 +
 +      /* Go thru' TX queues for netdev */
 +      for (i = 0; i < netdev->num_tx_queues; i++) {
 +              struct netdev_queue *q;
 +
 +              q = netdev_get_tx_queue(netdev, i);
 +              if (q)
 +                      i40e_detect_recover_hung_queue(i, vsi);
 +      }
 +}
 +
  /**
   * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
   * @pf: pointer to PF
   * i40e_print_link_message - print link up or down
   * @vsi: the VSI for which link needs a message
   */
 -static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
 +void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
  {
 -      char speed[SPEED_SIZE] = "Unknown";
 -      char fc[FC_SIZE] = "RX/TX";
 +      char *speed = "Unknown";
 +      char *fc = "Unknown";
  
 +      if (vsi->current_isup == isup)
 +              return;
 +      vsi->current_isup = isup;
        if (!isup) {
                netdev_info(vsi->netdev, "NIC Link is Down\n");
                return;
  
        switch (vsi->back->hw.phy.link_info.link_speed) {
        case I40E_LINK_SPEED_40GB:
 -              strlcpy(speed, "40 Gbps", SPEED_SIZE);
 +              speed = "40 G";
                break;
        case I40E_LINK_SPEED_20GB:
 -              strncpy(speed, "20 Gbps", SPEED_SIZE);
 +              speed = "20 G";
                break;
        case I40E_LINK_SPEED_10GB:
 -              strlcpy(speed, "10 Gbps", SPEED_SIZE);
 +              speed = "10 G";
                break;
        case I40E_LINK_SPEED_1GB:
 -              strlcpy(speed, "1000 Mbps", SPEED_SIZE);
 +              speed = "1000 M";
                break;
        case I40E_LINK_SPEED_100MB:
 -              strncpy(speed, "100 Mbps", SPEED_SIZE);
 +              speed = "100 M";
                break;
        default:
                break;
  
        switch (vsi->back->hw.fc.current_mode) {
        case I40E_FC_FULL:
 -              strlcpy(fc, "RX/TX", FC_SIZE);
 +              fc = "RX/TX";
                break;
        case I40E_FC_TX_PAUSE:
 -              strlcpy(fc, "TX", FC_SIZE);
 +              fc = "TX";
                break;
        case I40E_FC_RX_PAUSE:
 -              strlcpy(fc, "RX", FC_SIZE);
 +              fc = "RX";
                break;
        default:
 -              strlcpy(fc, "None", FC_SIZE);
 +              fc = "None";
                break;
        }
  
 -      netdev_info(vsi->netdev, "NIC Link is Up %s Full Duplex, Flow Control: %s\n",
 +      netdev_info(vsi->netdev, "NIC Link is Up %sbps Full Duplex, Flow Control: %s\n",
                    speed, fc);
  }
  
@@@ -5319,13 -5218,15 +5319,13 @@@ void i40e_do_reset(struct i40e_pf *pf, 
                         "VSI reinit requested\n");
                for (v = 0; v < pf->num_alloc_vsi; v++) {
                        struct i40e_vsi *vsi = pf->vsi[v];
 +
                        if (vsi != NULL &&
                            test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) {
                                i40e_vsi_reinit_locked(pf->vsi[v]);
                                clear_bit(__I40E_REINIT_REQUESTED, &vsi->state);
                        }
                }
 -
 -              /* no further action needed, so return now */
 -              return;
        } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
                int v;
  
                dev_info(&pf->pdev->dev, "VSI down requested\n");
                for (v = 0; v < pf->num_alloc_vsi; v++) {
                        struct i40e_vsi *vsi = pf->vsi[v];
 +
                        if (vsi != NULL &&
                            test_bit(__I40E_DOWN_REQUESTED, &vsi->state)) {
                                set_bit(__I40E_DOWN, &vsi->state);
                                clear_bit(__I40E_DOWN_REQUESTED, &vsi->state);
                        }
                }
 -
 -              /* no further action needed, so return now */
 -              return;
        } else {
                dev_info(&pf->pdev->dev,
                         "bad reset request 0x%08x\n", reset_flags);
 -              return;
        }
  }
  
@@@ -5399,7 -5303,8 +5399,7 @@@ bool i40e_dcb_need_reconfig(struct i40e
                dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
        }
  
 -      dev_dbg(&pf->pdev->dev, "%s: need_reconfig=%d\n", __func__,
 -              need_reconfig);
 +      dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig);
        return need_reconfig;
  }
  
@@@ -5426,14 -5331,16 +5426,14 @@@ static int i40e_handle_lldp_event(struc
        /* Ignore if event is not for Nearest Bridge */
        type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
                & I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
 -      dev_dbg(&pf->pdev->dev,
 -              "%s: LLDP event mib bridge type 0x%x\n", __func__, type);
 +      dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type);
        if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
                return ret;
  
        /* Check MIB Type and return if event for Remote MIB update */
        type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
        dev_dbg(&pf->pdev->dev,
 -              "%s: LLDP event mib type %s\n", __func__,
 -              type ? "remote" : "local");
 +              "LLDP event mib type %s\n", type ? "remote" : "local");
        if (type == I40E_AQ_LLDP_MIB_REMOTE) {
                /* Update the remote cached instance and return */
                ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
@@@ -5618,9 -5525,7 +5618,9 @@@ u32 i40e_get_global_fd_count(struct i40
   **/
  void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
  {
 +      struct i40e_fdir_filter *filter;
        u32 fcnt_prog, fcnt_avail;
 +      struct hlist_node *node;
  
        if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
                return;
                                dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n");
                }
        }
 +
 +      /* if hw had a problem adding a filter, delete it */
 +      if (pf->fd_inv > 0) {
 +              hlist_for_each_entry_safe(filter, node,
 +                                        &pf->fdir_filter_list, fdir_node) {
 +                      if (filter->fd_id == pf->fd_inv) {
 +                              hlist_del(&filter->fdir_node);
 +                              kfree(filter);
 +                              pf->fdir_pf_active_filters--;
 +                      }
 +              }
 +      }
  }
  
  #define I40E_MIN_FD_FLUSH_INTERVAL 10
@@@ -5680,51 -5573,49 +5680,51 @@@ static void i40e_fdir_flush_and_replay(
        if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)))
                return;
  
 -      if (time_after(jiffies, pf->fd_flush_timestamp +
 -                              (I40E_MIN_FD_FLUSH_INTERVAL * HZ))) {
 -              /* If the flush is happening too quick and we have mostly
 -               * SB rules we should not re-enable ATR for some time.
 -               */
 -              min_flush_time = pf->fd_flush_timestamp
 -                              + (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
 -              fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
 +      if (!time_after(jiffies, pf->fd_flush_timestamp +
 +                               (I40E_MIN_FD_FLUSH_INTERVAL * HZ)))
 +              return;
  
 -              if (!(time_after(jiffies, min_flush_time)) &&
 -                  (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
 -                      if (I40E_DEBUG_FD & pf->hw.debug_mask)
 -                              dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
 -                      disable_atr = true;
 -              }
 +      /* If the flush is happening too quick and we have mostly SB rules we
 +       * should not re-enable ATR for some time.
 +       */
 +      min_flush_time = pf->fd_flush_timestamp +
 +                       (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
 +      fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
  
 -              pf->fd_flush_timestamp = jiffies;
 -              pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
 -              /* flush all filters */
 -              wr32(&pf->hw, I40E_PFQF_CTL_1,
 -                   I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
 -              i40e_flush(&pf->hw);
 -              pf->fd_flush_cnt++;
 -              pf->fd_add_err = 0;
 -              do {
 -                      /* Check FD flush status every 5-6msec */
 -                      usleep_range(5000, 6000);
 -                      reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
 -                      if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
 -                              break;
 -              } while (flush_wait_retry--);
 -              if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
 -                      dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
 -              } else {
 -                      /* replay sideband filters */
 -                      i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
 -                      if (!disable_atr)
 -                              pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
 -                      clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
 -                      if (I40E_DEBUG_FD & pf->hw.debug_mask)
 -                              dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
 -              }
 +      if (!(time_after(jiffies, min_flush_time)) &&
 +          (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
 +              if (I40E_DEBUG_FD & pf->hw.debug_mask)
 +                      dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
 +              disable_atr = true;
        }
 +
 +      pf->fd_flush_timestamp = jiffies;
 +      pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
 +      /* flush all filters */
 +      wr32(&pf->hw, I40E_PFQF_CTL_1,
 +           I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
 +      i40e_flush(&pf->hw);
 +      pf->fd_flush_cnt++;
 +      pf->fd_add_err = 0;
 +      do {
 +              /* Check FD flush status every 5-6msec */
 +              usleep_range(5000, 6000);
 +              reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
 +              if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
 +                      break;
 +      } while (flush_wait_retry--);
 +      if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
 +              dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
 +      } else {
 +              /* replay sideband filters */
 +              i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
 +              if (!disable_atr)
 +                      pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
 +              clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
 +              if (I40E_DEBUG_FD & pf->hw.debug_mask)
 +                      dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
 +      }
 +
  }
  
  /**
@@@ -5832,23 -5723,15 +5832,23 @@@ static void i40e_veb_link_event(struct 
   **/
  static void i40e_link_event(struct i40e_pf *pf)
  {
 -      bool new_link, old_link;
        struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
        u8 new_link_speed, old_link_speed;
 +      i40e_status status;
 +      bool new_link, old_link;
  
        /* set this to force the get_link_status call to refresh state */
        pf->hw.phy.get_link_info = true;
  
        old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
 -      new_link = i40e_get_link_status(&pf->hw);
 +
 +      status = i40e_get_link_status(&pf->hw, &new_link);
 +      if (status) {
 +              dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n",
 +                      status);
 +              return;
 +      }
 +
        old_link_speed = pf->hw.phy.link_info_old.link_speed;
        new_link_speed = pf->hw.phy.link_info.link_speed;
  
                i40e_ptp_set_increment(pf);
  }
  
 -/**
 - * i40e_check_hang_subtask - Check for hung queues and dropped interrupts
 - * @pf: board private structure
 - *
 - * Set the per-queue flags to request a check for stuck queues in the irq
 - * clean functions, then force interrupts to be sure the irq clean is called.
 - **/
 -static void i40e_check_hang_subtask(struct i40e_pf *pf)
 -{
 -      int i, v;
 -
 -      /* If we're down or resetting, just bail */
 -      if (test_bit(__I40E_DOWN, &pf->state) ||
 -          test_bit(__I40E_CONFIG_BUSY, &pf->state))
 -              return;
 -
 -      /* for each VSI/netdev
 -       *     for each Tx queue
 -       *         set the check flag
 -       *     for each q_vector
 -       *         force an interrupt
 -       */
 -      for (v = 0; v < pf->num_alloc_vsi; v++) {
 -              struct i40e_vsi *vsi = pf->vsi[v];
 -              int armed = 0;
 -
 -              if (!pf->vsi[v] ||
 -                  test_bit(__I40E_DOWN, &vsi->state) ||
 -                  (vsi->netdev && !netif_carrier_ok(vsi->netdev)))
 -                      continue;
 -
 -              for (i = 0; i < vsi->num_queue_pairs; i++) {
 -                      set_check_for_tx_hang(vsi->tx_rings[i]);
 -                      if (test_bit(__I40E_HANG_CHECK_ARMED,
 -                                   &vsi->tx_rings[i]->state))
 -                              armed++;
 -              }
 -
 -              if (armed) {
 -                      if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
 -                              wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0,
 -                                   (I40E_PFINT_DYN_CTL0_INTENA_MASK |
 -                                    I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
 -                                    I40E_PFINT_DYN_CTL0_ITR_INDX_MASK |
 -                                    I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK |
 -                                    I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK));
 -                      } else {
 -                              u16 vec = vsi->base_vector - 1;
 -                              u32 val = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
 -                                    I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
 -                                    I40E_PFINT_DYN_CTLN_ITR_INDX_MASK |
 -                                    I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK |
 -                                    I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK);
 -                              for (i = 0; i < vsi->num_q_vectors; i++, vec++)
 -                                      wr32(&vsi->back->hw,
 -                                           I40E_PFINT_DYN_CTLN(vec), val);
 -                      }
 -                      i40e_flush(&vsi->back->hw);
 -              }
 -      }
 -}
 -
  /**
   * i40e_watchdog_subtask - periodic checks not using event driven response
   * @pf: board private structure
@@@ -5895,8 -5840,8 +5895,8 @@@ static void i40e_watchdog_subtask(struc
                return;
        pf->service_timer_previous = jiffies;
  
 -      i40e_check_hang_subtask(pf);
 -      i40e_link_event(pf);
 +      if (pf->flags & I40E_FLAG_LINK_POLLING_ENABLED)
 +              i40e_link_event(pf);
  
        /* Update the stats for active netdevs so the network stack
         * can look at updated numbers whenever it cares to
                if (pf->vsi[i] && pf->vsi[i]->netdev)
                        i40e_update_stats(pf->vsi[i]);
  
 -      /* Update the stats for the active switching components */
 -      for (i = 0; i < I40E_MAX_VEB; i++)
 -              if (pf->veb[i])
 -                      i40e_update_veb_stats(pf->veb[i]);
 +      if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) {
 +              /* Update the stats for the active switching components */
 +              for (i = 0; i < I40E_MAX_VEB; i++)
 +                      if (pf->veb[i])
 +                              i40e_update_veb_stats(pf->veb[i]);
 +      }
  
        i40e_ptp_rx_hang(pf->vsi[pf->lan_vsi]);
  }
@@@ -6221,9 -6164,8 +6221,9 @@@ static void i40e_config_bridge_mode(str
  {
        struct i40e_pf *pf = veb->pf;
  
 -      dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n",
 -               veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
 +      if (pf->hw.debug_mask & I40E_DEBUG_LAN)
 +              dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n",
 +                       veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
        if (veb->bridge_mode & BRIDGE_MODE_VEPA)
                i40e_disable_pf_switch_lb(pf);
        else
@@@ -6290,7 -6232,6 +6290,7 @@@ static int i40e_reconstitute_veb(struc
  
                if (pf->vsi[v]->veb_idx == veb->idx) {
                        struct i40e_vsi *vsi = pf->vsi[v];
 +
                        vsi->uplink_seid = veb->seid;
                        ret = i40e_add_vsi(vsi);
                        if (ret) {
@@@ -6355,6 -6296,12 +6355,6 @@@ static int i40e_get_capabilities(struc
                }
        } while (err);
  
 -      if (((pf->hw.aq.fw_maj_ver == 2) && (pf->hw.aq.fw_min_ver < 22)) ||
 -          (pf->hw.aq.fw_maj_ver < 2)) {
 -              pf->hw.func_caps.num_msix_vectors++;
 -              pf->hw.func_caps.num_msix_vectors_vf++;
 -      }
 -
        if (pf->hw.debug_mask & I40E_DEBUG_USER)
                dev_info(&pf->pdev->dev,
                         "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
@@@ -6567,7 -6514,9 +6567,7 @@@ static void i40e_reset_and_rebuild(stru
        }
  #endif /* CONFIG_I40E_DCB */
  #ifdef I40E_FCOE
 -      ret = i40e_init_pf_fcoe(pf);
 -      if (ret)
 -              dev_info(&pf->pdev->dev, "init_pf_fcoe failed: %d\n", ret);
 +      i40e_init_pf_fcoe(pf);
  
  #endif
        /* do basic switch setup */
        /* make sure our flow control settings are restored */
        ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
        if (ret)
 -              dev_info(&pf->pdev->dev, "set fc fail, err %s aq_err %s\n",
 -                       i40e_stat_str(&pf->hw, ret),
 -                       i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 +              dev_dbg(&pf->pdev->dev, "setting flow control: ret = %s last_status = %s\n",
 +                      i40e_stat_str(&pf->hw, ret),
 +                      i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
  
        /* Rebuild the VSIs and VEBs that existed before reset.
         * They are still in our local switch element arrays, so only
@@@ -6859,7 -6808,6 +6859,7 @@@ static void i40e_service_task(struct wo
                return;
        }
  
 +      i40e_detect_recover_hung(pf);
        i40e_reset_subtask(pf);
        i40e_handle_mdd_event(pf);
        i40e_vc_process_vflr_event(pf);
@@@ -7043,7 -6991,6 +7043,7 @@@ static int i40e_vsi_mem_alloc(struct i4
        vsi->idx = vsi_idx;
        vsi->rx_itr_setting = pf->rx_itr_default;
        vsi->tx_itr_setting = pf->tx_itr_default;
 +      vsi->int_rate_limit = 0;
        vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ?
                                pf->rss_table_size : 64;
        vsi->netdev_registered = false;
@@@ -7619,7 -7566,7 +7619,7 @@@ static int i40e_config_rss_aq(struct i4
                         "Cannot set RSS key, err %s aq_err %s\n",
                         i40e_stat_str(&pf->hw, ret),
                         i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 -              return ret;
 +              goto config_rss_aq_out;
        }
  
        if (vsi->type == I40E_VSI_MAIN)
                         i40e_stat_str(&pf->hw, ret),
                         i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
  
 +config_rss_aq_out:
 +      kfree(rss_lut);
        return ret;
  }
  
@@@ -7909,7 -7854,6 +7909,7 @@@ static int i40e_sw_init(struct i40e_pf 
        /* Set default capability flags */
        pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
                    I40E_FLAG_MSI_ENABLED     |
 +                  I40E_FLAG_LINK_POLLING_ENABLED |
                    I40E_FLAG_MSIX_ENABLED;
  
        if (iommu_present(&pci_bus_type))
            (pf->hw.func_caps.fd_filters_best_effort > 0)) {
                pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
                pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
 -              if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) {
 -                      pf->flags |= I40E_FLAG_FD_SB_ENABLED;
 -              } else {
 +              if (pf->flags & I40E_FLAG_MFP_ENABLED &&
 +                  pf->hw.num_partitions > 1)
                        dev_info(&pf->pdev->dev,
                                 "Flow Director Sideband mode Disabled in MFP mode\n");
 -              }
 +              else
 +                      pf->flags |= I40E_FLAG_FD_SB_ENABLED;
                pf->fdir_pf_filter_count =
                                 pf->hw.func_caps.fd_filters_guaranteed;
                pf->hw.fdir_shared_filter_count =
        }
  
  #ifdef I40E_FCOE
 -      err = i40e_init_pf_fcoe(pf);
 -      if (err)
 -              dev_info(&pf->pdev->dev, "init_pf_fcoe failed: %d\n", err);
 +      i40e_init_pf_fcoe(pf);
  
  #endif /* I40E_FCOE */
  #ifdef CONFIG_PCI_IOV
        pf->lan_veb = I40E_NO_VEB;
        pf->lan_vsi = I40E_NO_VSI;
  
 +      /* By default FW has this off for performance reasons */
 +      pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
 +
        /* set up queue assignment tracking */
        size = sizeof(struct i40e_lump_tracking)
                + (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
@@@ -8176,6 -8119,9 +8176,6 @@@ static void i40e_del_vxlan_port(struct 
                pf->vxlan_ports[idx] = 0;
                pf->pending_vxlan_bitmap |= BIT_ULL(idx);
                pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
 -
 -              dev_info(&pf->pdev->dev, "deleting vxlan port %d\n",
 -                       ntohs(port));
        } else {
                netdev_warn(netdev, "vxlan port %d was not found, not deleting\n",
                            ntohs(port));
@@@ -8327,15 -8273,13 +8327,15 @@@ static int i40e_ndo_bridge_setlink(stru
   * @seq: RTNL message seq #
   * @dev: the netdev being configured
   * @filter_mask: unused
 + * @nlflags: netlink flags passed in
   *
   * Return the mode in which the hardware bridge is operating in
   * i.e VEB or VEPA.
   **/
  static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
                                   struct net_device *dev,
 -                                 u32 filter_mask, int nlflags)
 +                                 u32 __always_unused filter_mask,
 +                                 int nlflags)
  {
        struct i40e_netdev_priv *np = netdev_priv(dev);
        struct i40e_vsi *vsi = np->vsi;
@@@ -8445,6 -8389,7 +8445,7 @@@ static int i40e_config_netdev(struct i4
  
        netdev->hw_enc_features |= NETIF_F_IP_CSUM       |
                                  NETIF_F_GSO_UDP_TUNNEL |
+                                 NETIF_F_GSO_GRE        |
                                  NETIF_F_TSO;
  
        netdev->features = NETIF_F_SG                  |
                           NETIF_F_SCTP_CSUM           |
                           NETIF_F_HIGHDMA             |
                           NETIF_F_GSO_UDP_TUNNEL      |
+                          NETIF_F_GSO_GRE             |
                           NETIF_F_HW_VLAN_CTAG_TX     |
                           NETIF_F_HW_VLAN_CTAG_RX     |
                           NETIF_F_HW_VLAN_CTAG_FILTER |
@@@ -8827,7 -8773,7 +8829,7 @@@ int i40e_vsi_release(struct i40e_vsi *v
        list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list)
                i40e_del_filter(vsi, f->macaddr, f->vlan,
                                f->is_vf, f->is_netdev);
 -      i40e_sync_vsi_filters(vsi);
 +      i40e_sync_vsi_filters(vsi, false);
  
        i40e_vsi_delete(vsi);
        i40e_vsi_free_q_vectors(vsi);
@@@ -9052,7 -8998,8 +9054,7 @@@ struct i40e_vsi *i40e_vsi_setup(struct 
                if (veb) {
                        if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) {
                                dev_info(&vsi->back->pdev->dev,
 -                                       "%s: New VSI creation error, uplink seid of LAN VSI expected.\n",
 -                                       __func__);
 +                                       "New VSI creation error, uplink seid of LAN VSI expected.\n");
                                return NULL;
                        }
                        /* We come up by default in VEPA mode if SRIOV is not
@@@ -9702,7 -9649,6 +9704,7 @@@ static int i40e_setup_pf_switch(struct 
        } else {
                /* force a reset of TC and queue layout configurations */
                u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
 +
                pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
                pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
                i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
                i40e_config_rss(pf);
  
        /* fill in link information and enable LSE reporting */
 -      i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
 +      i40e_update_link_info(&pf->hw);
        i40e_link_event(pf);
  
        /* Initialize user-specific link properties */
@@@ -9844,14 -9790,8 +9846,14 @@@ static void i40e_determine_queue_usage(
        }
  
        pf->queues_left = queues_left;
 +      dev_dbg(&pf->pdev->dev,
 +              "qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n",
 +              pf->hw.func_caps.num_tx_qp,
 +              !!(pf->flags & I40E_FLAG_FD_SB_ENABLED),
 +              pf->num_lan_qps, pf->rss_size, pf->num_req_vfs, pf->num_vf_qps,
 +              pf->num_vmdq_vsis, pf->num_vmdq_qps, queues_left);
  #ifdef I40E_FCOE
 -      dev_info(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps);
 +      dev_dbg(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps);
  #endif
  }
  
@@@ -9919,19 -9859,12 +9921,19 @@@ static void i40e_print_features(struct 
        }
        if (pf->flags & I40E_FLAG_DCB_CAPABLE)
                buf += sprintf(buf, "DCB ");
 +#if IS_ENABLED(CONFIG_VXLAN)
 +      buf += sprintf(buf, "VxLAN ");
 +#endif
        if (pf->flags & I40E_FLAG_PTP)
                buf += sprintf(buf, "PTP ");
  #ifdef I40E_FCOE
        if (pf->flags & I40E_FLAG_FCOE_ENABLED)
                buf += sprintf(buf, "FCOE ");
  #endif
 +      if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
 +              buf += sprintf(buf, "VEB ");
 +      else
 +              buf += sprintf(buf, "VEPA ");
  
        BUG_ON(buf > (string + INFO_STRING_LEN));
        dev_info(&pf->pdev->dev, "%s\n", string);
  static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  {
        struct i40e_aq_get_phy_abilities_resp abilities;
 -      unsigned long ioremap_len;
        struct i40e_pf *pf;
        struct i40e_hw *hw;
        static u16 pfs_found;
 +      u16 wol_nvm_bits;
        u16 link_status;
        int err = 0;
        u32 len;
        hw = &pf->hw;
        hw->back = pf;
  
 -      ioremap_len = min_t(unsigned long, pci_resource_len(pdev, 0),
 -                          I40E_MAX_CSR_SPACE);
 +      pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0),
 +                              I40E_MAX_CSR_SPACE);
  
 -      hw->hw_addr = ioremap(pci_resource_start(pdev, 0), ioremap_len);
 +      hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len);
        if (!hw->hw_addr) {
                err = -EIO;
                dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
                         (unsigned int)pci_resource_start(pdev, 0),
 -                       (unsigned int)pci_resource_len(pdev, 0), err);
 +                       pf->ioremap_len, err);
                goto err_ioremap;
        }
        hw->vendor_id = pdev->vendor;
        pf->hw.fc.requested_mode = I40E_FC_NONE;
  
        err = i40e_init_adminq(hw);
 -      dev_info(&pdev->dev, "%s\n", i40e_fw_version_str(hw));
 +
 +      /* provide nvm, fw, api versions */
 +      dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s\n",
 +               hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
 +               hw->aq.api_maj_ver, hw->aq.api_min_ver,
 +               i40e_nvm_version_str(hw));
 +
        if (err) {
                dev_info(&pdev->dev,
                         "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
        INIT_WORK(&pf->service_task, i40e_service_task);
        clear_bit(__I40E_SERVICE_SCHED, &pf->state);
        pf->flags |= I40E_FLAG_NEED_LINK_UPDATE;
 -      pf->link_check_timeout = jiffies;
  
 -      /* WoL defaults to disabled */
 -      pf->wol_en = false;
 +      /* NVM bit on means WoL disabled for the port */
 +      i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
 +      if ((1 << hw->port) & wol_nvm_bits || hw->partition_id != 1)
 +              pf->wol_en = false;
 +      else
 +              pf->wol_en = true;
        device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
  
        /* set up the main switch operations */
        i40e_fcoe_vsi_setup(pf);
  
  #endif
 -      /* Get the negotiated link width and speed from PCI config space */
 -      pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA, &link_status);
 +#define PCI_SPEED_SIZE 8
 +#define PCI_WIDTH_SIZE 8
 +      /* Devices on the IOSF bus do not have this information
 +       * and will report PCI Gen 1 x 1 by default so don't bother
 +       * checking them.
 +       */
 +      if (!(pf->flags & I40E_FLAG_NO_PCI_LINK_CHECK)) {
 +              char speed[PCI_SPEED_SIZE] = "Unknown";
 +              char width[PCI_WIDTH_SIZE] = "Unknown";
  
 -      i40e_set_pci_config_data(hw, link_status);
 +              /* Get the negotiated link width and speed from PCI config
 +               * space
 +               */
 +              pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA,
 +                                        &link_status);
 +
 +              i40e_set_pci_config_data(hw, link_status);
 +
 +              switch (hw->bus.speed) {
 +              case i40e_bus_speed_8000:
 +                      strncpy(speed, "8.0", PCI_SPEED_SIZE); break;
 +              case i40e_bus_speed_5000:
 +                      strncpy(speed, "5.0", PCI_SPEED_SIZE); break;
 +              case i40e_bus_speed_2500:
 +                      strncpy(speed, "2.5", PCI_SPEED_SIZE); break;
 +              default:
 +                      break;
 +              }
 +              switch (hw->bus.width) {
 +              case i40e_bus_width_pcie_x8:
 +                      strncpy(width, "8", PCI_WIDTH_SIZE); break;
 +              case i40e_bus_width_pcie_x4:
 +                      strncpy(width, "4", PCI_WIDTH_SIZE); break;
 +              case i40e_bus_width_pcie_x2:
 +                      strncpy(width, "2", PCI_WIDTH_SIZE); break;
 +              case i40e_bus_width_pcie_x1:
 +                      strncpy(width, "1", PCI_WIDTH_SIZE); break;
 +              default:
 +                      break;
 +              }
  
 -      dev_info(&pdev->dev, "PCI-Express: %s %s\n",
 -              (hw->bus.speed == i40e_bus_speed_8000 ? "Speed 8.0GT/s" :
 -               hw->bus.speed == i40e_bus_speed_5000 ? "Speed 5.0GT/s" :
 -               hw->bus.speed == i40e_bus_speed_2500 ? "Speed 2.5GT/s" :
 -               "Unknown"),
 -              (hw->bus.width == i40e_bus_width_pcie_x8 ? "Width x8" :
 -               hw->bus.width == i40e_bus_width_pcie_x4 ? "Width x4" :
 -               hw->bus.width == i40e_bus_width_pcie_x2 ? "Width x2" :
 -               hw->bus.width == i40e_bus_width_pcie_x1 ? "Width x1" :
 -               "Unknown"));
 +              dev_info(&pdev->dev, "PCI-Express: Speed %sGT/s Width x%s\n",
 +                       speed, width);
  
 -      if (hw->bus.width < i40e_bus_width_pcie_x8 ||
 -          hw->bus.speed < i40e_bus_speed_8000) {
 -              dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
 -              dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
 +              if (hw->bus.width < i40e_bus_width_pcie_x8 ||
 +                  hw->bus.speed < i40e_bus_speed_8000) {
 +                      dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
 +                      dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
 +              }
        }
  
        /* get the requested speeds from the fw */
        err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
        if (err)
 -              dev_info(&pf->pdev->dev,
 -                       "get phy capabilities failed, err %s aq_err %s, advertised speed settings may not be correct\n",
 -                       i40e_stat_str(&pf->hw, err),
 -                       i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 +              dev_dbg(&pf->pdev->dev, "get requested speeds ret =  %s last_status =  %s\n",
 +                      i40e_stat_str(&pf->hw, err),
 +                      i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
        pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
  
 +      /* get the supported phy types from the fw */
 +      err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
 +      if (err)
 +              dev_dbg(&pf->pdev->dev, "get supported phy types ret =  %s last_status =  %s\n",
 +                      i40e_stat_str(&pf->hw, err),
 +                      i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
 +      pf->hw.phy.phy_types = le32_to_cpu(abilities.phy_type);
 +
        /* print a string summarizing features */
        i40e_print_features(pf);
  
@@@ -10551,7 -10439,7 +10553,7 @@@ static pci_ers_result_t i40e_pci_error_
        int err;
        u32 reg;
  
 -      dev_info(&pdev->dev, "%s\n", __func__);
 +      dev_dbg(&pdev->dev, "%s\n", __func__);
        if (pci_enable_device_mem(pdev)) {
                dev_info(&pdev->dev,
                         "Cannot re-enable PCI device after reset.\n");
@@@ -10591,13 -10479,13 +10593,13 @@@ static void i40e_pci_error_resume(struc
  {
        struct i40e_pf *pf = pci_get_drvdata(pdev);
  
 -      dev_info(&pdev->dev, "%s\n", __func__);
 +      dev_dbg(&pdev->dev, "%s\n", __func__);
        if (test_bit(__I40E_SUSPENDED, &pf->state))
                return;
  
        rtnl_lock();
        i40e_handle_reset_warning(pf);
 -      rtnl_lock();
 +      rtnl_unlock();
  }
  
  /**
@@@ -10683,7 -10571,9 +10685,7 @@@ static int i40e_resume(struct pci_dev *
  
        err = pci_enable_device_mem(pdev);
        if (err) {
 -              dev_err(&pdev->dev,
 -                      "%s: Cannot enable PCI device from suspend\n",
 -                      __func__);
 +              dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
                return err;
        }
        pci_set_master(pdev);
index 3eba36913c1d18d98e8042a793d55411dd4f3277,a23ebfd5cd254014cd0632979aa441a2a6f6eadf..fd123ca60761e84721a02ab991ed677925338126
@@@ -373,7 -373,6 +373,6 @@@ static i40e_status i40e_init_asq(struc
  
        hw->aq.asq.next_to_use = 0;
        hw->aq.asq.next_to_clean = 0;
-       hw->aq.asq.count = hw->aq.num_asq_entries;
  
        /* allocate the ring memory */
        ret_code = i40e_alloc_adminq_asq_ring(hw);
                goto init_adminq_free_rings;
  
        /* success! */
+       hw->aq.asq.count = hw->aq.num_asq_entries;
        goto init_adminq_exit;
  
  init_adminq_free_rings:
@@@ -432,7 -432,6 +432,6 @@@ static i40e_status i40e_init_arq(struc
  
        hw->aq.arq.next_to_use = 0;
        hw->aq.arq.next_to_clean = 0;
-       hw->aq.arq.count = hw->aq.num_arq_entries;
  
        /* allocate the ring memory */
        ret_code = i40e_alloc_adminq_arq_ring(hw);
                goto init_adminq_free_rings;
  
        /* success! */
+       hw->aq.arq.count = hw->aq.num_arq_entries;
        goto init_adminq_exit;
  
  init_adminq_free_rings:
@@@ -469,12 -469,8 +469,12 @@@ static i40e_status i40e_shutdown_asq(st
  {
        i40e_status ret_code = 0;
  
 -      if (hw->aq.asq.count == 0)
 -              return I40E_ERR_NOT_READY;
 +      mutex_lock(&hw->aq.asq_mutex);
 +
 +      if (hw->aq.asq.count == 0) {
 +              ret_code = I40E_ERR_NOT_READY;
 +              goto shutdown_asq_out;
 +      }
  
        /* Stop firmware AdminQ processing */
        wr32(hw, hw->aq.asq.head, 0);
        wr32(hw, hw->aq.asq.bal, 0);
        wr32(hw, hw->aq.asq.bah, 0);
  
 -      /* make sure lock is available */
 -      mutex_lock(&hw->aq.asq_mutex);
 -
        hw->aq.asq.count = 0; /* to indicate uninitialized queue */
  
        /* free ring buffers */
        i40e_free_asq_bufs(hw);
  
 +shutdown_asq_out:
        mutex_unlock(&hw->aq.asq_mutex);
 -
        return ret_code;
  }
  
@@@ -503,12 -502,8 +503,12 @@@ static i40e_status i40e_shutdown_arq(st
  {
        i40e_status ret_code = 0;
  
 -      if (hw->aq.arq.count == 0)
 -              return I40E_ERR_NOT_READY;
 +      mutex_lock(&hw->aq.arq_mutex);
 +
 +      if (hw->aq.arq.count == 0) {
 +              ret_code = I40E_ERR_NOT_READY;
 +              goto shutdown_arq_out;
 +      }
  
        /* Stop firmware AdminQ processing */
        wr32(hw, hw->aq.arq.head, 0);
        wr32(hw, hw->aq.arq.bal, 0);
        wr32(hw, hw->aq.arq.bah, 0);
  
 -      /* make sure lock is available */
 -      mutex_lock(&hw->aq.arq_mutex);
 -
        hw->aq.arq.count = 0; /* to indicate uninitialized queue */
  
        /* free ring buffers */
        i40e_free_arq_bufs(hw);
  
 +shutdown_arq_out:
        mutex_unlock(&hw->aq.arq_mutex);
 -
        return ret_code;
  }
  
@@@ -598,9 -596,6 +598,9 @@@ i40e_status i40evf_shutdown_adminq(stru
  
        /* destroy the locks */
  
 +      if (hw->nvm_buff.va)
 +              i40e_free_virt_mem(hw, &hw->nvm_buff);
 +
        return ret_code;
  }
  
@@@ -622,7 -617,8 +622,7 @@@ static u16 i40e_clean_asq(struct i40e_h
        details = I40E_ADMINQ_DETAILS(*asq, ntc);
        while (rd32(hw, hw->aq.asq.head) != ntc) {
                i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
 -                         "%s: ntc %d head %d.\n", __func__, ntc,
 -                         rd32(hw, hw->aq.asq.head));
 +                         "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
  
                if (details->callback) {
                        I40E_ADMINQ_CALLBACK cb_func =
@@@ -686,23 -682,19 +686,23 @@@ i40e_status i40evf_asq_send_command(str
        u16  retval = 0;
        u32  val = 0;
  
 -      val = rd32(hw, hw->aq.asq.head);
 -      if (val >= hw->aq.num_asq_entries) {
 +      mutex_lock(&hw->aq.asq_mutex);
 +
 +      if (hw->aq.asq.count == 0) {
                i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
 -                         "AQTX: head overrun at %d\n", val);
 +                         "AQTX: Admin queue not initialized.\n");
                status = I40E_ERR_QUEUE_EMPTY;
 -              goto asq_send_command_exit;
 +              goto asq_send_command_error;
        }
  
 -      if (hw->aq.asq.count == 0) {
 +      hw->aq.asq_last_status = I40E_AQ_RC_OK;
 +
 +      val = rd32(hw, hw->aq.asq.head);
 +      if (val >= hw->aq.num_asq_entries) {
                i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
 -                         "AQTX: Admin queue not initialized.\n");
 +                         "AQTX: head overrun at %d\n", val);
                status = I40E_ERR_QUEUE_EMPTY;
 -              goto asq_send_command_exit;
 +              goto asq_send_command_error;
        }
  
        details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
        desc->flags &= ~cpu_to_le16(details->flags_dis);
        desc->flags |= cpu_to_le16(details->flags_ena);
  
 -      mutex_lock(&hw->aq.asq_mutex);
 -
        if (buff_size > hw->aq.asq_buf_size) {
                i40e_debug(hw,
                           I40E_DEBUG_AQ_MESSAGE,
        i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff,
                        buff_size);
  
 +      /* save writeback aq if requested */
 +      if (details->wb_desc)
 +              *details->wb_desc = *desc_on_ring;
 +
        /* update the error if time out occurred */
        if ((!cmd_completed) &&
            (!details->async && !details->postpone)) {
  
  asq_send_command_error:
        mutex_unlock(&hw->aq.asq_mutex);
 -asq_send_command_exit:
        return status;
  }
  
index bcbdfab1fe19b137f35046eddf87ae30847fd9e5,cc3a9897574c542ee368ab067c109128ce89b290..85f1b1e7e505727bcdb7f424348d9dce0c5825d3
@@@ -863,8 -863,6 +863,8 @@@ static int mlx4_slave_cap(struct mlx4_d
                return -ENODEV;
        }
  
 +      mlx4_replace_zero_macs(dev);
 +
        dev->caps.qp0_qkey = kcalloc(dev->caps.num_ports, sizeof(u32), GFP_KERNEL);
        dev->caps.qp0_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
        dev->caps.qp0_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
@@@ -2671,14 -2669,11 +2671,11 @@@ static void mlx4_enable_msi_x(struct ml
  
        if (msi_x) {
                int nreq = dev->caps.num_ports * num_online_cpus() + 1;
-               bool shared_ports = false;
  
                nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
                             nreq);
-               if (nreq > MAX_MSIX) {
+               if (nreq > MAX_MSIX)
                        nreq = MAX_MSIX;
-                       shared_ports = true;
-               }
  
                entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
                if (!entries)
                bitmap_zero(priv->eq_table.eq[MLX4_EQ_ASYNC].actv_ports.ports,
                            dev->caps.num_ports);
  
-               if (MLX4_IS_LEGACY_EQ_MODE(dev->caps))
-                       shared_ports = true;
                for (i = 0; i < dev->caps.num_comp_vectors + 1; i++) {
                        if (i == MLX4_EQ_ASYNC)
                                continue;
                        priv->eq_table.eq[i].irq =
                                entries[i + 1 - !!(i > MLX4_EQ_ASYNC)].vector;
  
-                       if (shared_ports) {
+                       if (MLX4_IS_LEGACY_EQ_MODE(dev->caps)) {
                                bitmap_fill(priv->eq_table.eq[i].actv_ports.ports,
                                            dev->caps.num_ports);
                                /* We don't set affinity hint when there
index ae302614e74be279d028feaffc4982153bc9e266,3b9480fa3403dee018931c16b678f5ec6c98e913..a87e773e93f3439dbd4cf93bc08a9a7563f4977b
@@@ -90,13 -90,16 +90,13 @@@ int mlx5_set_port_caps(struct mlx5_core
  {
        struct mlx5_reg_pcap in;
        struct mlx5_reg_pcap out;
 -      int err;
  
        memset(&in, 0, sizeof(in));
        in.caps_127_96 = cpu_to_be32(caps);
        in.port_num = port_num;
  
 -      err = mlx5_core_access_reg(dev, &in, sizeof(in), &out,
 -                                 sizeof(out), MLX5_REG_PCAP, 0, 1);
 -
 -      return err;
 +      return mlx5_core_access_reg(dev, &in, sizeof(in), &out,
 +                                  sizeof(out), MLX5_REG_PCAP, 0, 1);
  }
  EXPORT_SYMBOL_GPL(mlx5_set_port_caps);
  
@@@ -104,13 -107,16 +104,13 @@@ int mlx5_query_port_ptys(struct mlx5_co
                         int ptys_size, int proto_mask, u8 local_port)
  {
        u32 in[MLX5_ST_SZ_DW(ptys_reg)];
 -      int err;
  
        memset(in, 0, sizeof(in));
        MLX5_SET(ptys_reg, in, local_port, local_port);
        MLX5_SET(ptys_reg, in, proto_mask, proto_mask);
  
 -      err = mlx5_core_access_reg(dev, in, sizeof(in), ptys,
 -                                 ptys_size, MLX5_REG_PTYS, 0, 0);
 -
 -      return err;
 +      return mlx5_core_access_reg(dev, in, sizeof(in), ptys,
 +                                  ptys_size, MLX5_REG_PTYS, 0, 0);
  }
  EXPORT_SYMBOL_GPL(mlx5_query_port_ptys);
  
@@@ -193,6 -199,7 +193,6 @@@ int mlx5_set_port_proto(struct mlx5_cor
  {
        u32 in[MLX5_ST_SZ_DW(ptys_reg)];
        u32 out[MLX5_ST_SZ_DW(ptys_reg)];
 -      int err;
  
        memset(in, 0, sizeof(in));
  
        else
                MLX5_SET(ptys_reg, in, ib_proto_admin, proto_admin);
  
 -      err = mlx5_core_access_reg(dev, in, sizeof(in), out,
 -                                 sizeof(out), MLX5_REG_PTYS, 0, 1);
 -      return err;
 +      return mlx5_core_access_reg(dev, in, sizeof(in), out,
 +                                  sizeof(out), MLX5_REG_PTYS, 0, 1);
  }
  EXPORT_SYMBOL_GPL(mlx5_set_port_proto);
  
@@@ -242,7 -250,7 +242,7 @@@ int mlx5_query_port_admin_status(struc
                return err;
  
        *status = MLX5_GET(paos_reg, out, admin_status);
 -      return err;
 +      return 0;
  }
  EXPORT_SYMBOL_GPL(mlx5_query_port_admin_status);
  
@@@ -300,12 -308,15 +300,12 @@@ static int mlx5_query_port_pvlc(struct 
                                int pvlc_size,  u8 local_port)
  {
        u32 in[MLX5_ST_SZ_DW(pvlc_reg)];
 -      int err;
  
        memset(in, 0, sizeof(in));
-       MLX5_SET(ptys_reg, in, local_port, local_port);
+       MLX5_SET(pvlc_reg, in, local_port, local_port);
  
 -      err = mlx5_core_access_reg(dev, in, sizeof(in), pvlc,
 -                                 pvlc_size, MLX5_REG_PVLC, 0, 0);
 -
 -      return err;
 +      return mlx5_core_access_reg(dev, in, sizeof(in), pvlc,
 +                                  pvlc_size, MLX5_REG_PVLC, 0, 0);
  }
  
  int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
@@@ -328,14 -339,16 +328,14 @@@ int mlx5_set_port_pause(struct mlx5_cor
  {
        u32 in[MLX5_ST_SZ_DW(pfcc_reg)];
        u32 out[MLX5_ST_SZ_DW(pfcc_reg)];
 -      int err;
  
        memset(in, 0, sizeof(in));
        MLX5_SET(pfcc_reg, in, local_port, 1);
        MLX5_SET(pfcc_reg, in, pptx, tx_pause);
        MLX5_SET(pfcc_reg, in, pprx, rx_pause);
  
 -      err = mlx5_core_access_reg(dev, in, sizeof(in), out,
 -                                 sizeof(out), MLX5_REG_PFCC, 0, 1);
 -      return err;
 +      return mlx5_core_access_reg(dev, in, sizeof(in), out,
 +                                  sizeof(out), MLX5_REG_PFCC, 0, 1);
  }
  EXPORT_SYMBOL_GPL(mlx5_set_port_pause);
  
index 9f4a0bf013365c65880ee6f42030ae50cf257a9b,28c19cc1a17c5f44a7b15103ce168651502843d8..bd80ac714a8a0a03d81ec77942bfed253a9362fe
@@@ -374,26 -374,31 +374,31 @@@ static int __mlxsw_emad_transmit(struc
        int err;
        int ret;
  
+       mlxsw_core->emad.trans_active = true;
        err = mlxsw_core_skb_transmit(mlxsw_core->driver_priv, skb, tx_info);
        if (err) {
                dev_err(mlxsw_core->bus_info->dev, "Failed to transmit EMAD (tid=%llx)\n",
                        mlxsw_core->emad.tid);
                dev_kfree_skb(skb);
-               return err;
+               goto trans_inactive_out;
        }
  
-       mlxsw_core->emad.trans_active = true;
        ret = wait_event_timeout(mlxsw_core->emad.wait,
                                 !(mlxsw_core->emad.trans_active),
                                 msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS));
        if (!ret) {
                dev_warn(mlxsw_core->bus_info->dev, "EMAD timed-out (tid=%llx)\n",
                         mlxsw_core->emad.tid);
-               mlxsw_core->emad.trans_active = false;
-               return -EIO;
+               err = -EIO;
+               goto trans_inactive_out;
        }
  
        return 0;
+ trans_inactive_out:
+       mlxsw_core->emad.trans_active = false;
+       return err;
  }
  
  static int mlxsw_emad_process_status(struct mlxsw_core *mlxsw_core,
@@@ -506,6 -511,7 +511,6 @@@ static int mlxsw_emad_traps_set(struct 
                return err;
  
        mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
 -                          MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
                            MLXSW_TRAP_ID_ETHEMAD);
        return mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
  }
@@@ -550,8 -556,8 +555,8 @@@ static void mlxsw_emad_fini(struct mlxs
  {
        char hpkt_pl[MLXSW_REG_HPKT_LEN];
  
 +      mlxsw_core->emad.use_emad = false;
        mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD,
 -                          MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
                            MLXSW_TRAP_ID_ETHEMAD);
        mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
  
index 1c5e43eb9b4f4236f3820783d5386e378564a5c6,36fb1cec53c98e9c7dad0cb7b9b59051d7e0c352..a94dbda6590b36ee8a59a90c36f1ede510a2ae14
@@@ -171,21 -171,15 +171,21 @@@ static inline void __mlxsw_item_set64(c
  }
  
  static inline void __mlxsw_item_memcpy_from(char *buf, char *dst,
 -                                          struct mlxsw_item *item)
 +                                          struct mlxsw_item *item,
 +                                          unsigned short index)
  {
 -      memcpy(dst, &buf[item->offset], item->size.bytes);
 +      unsigned int offset = __mlxsw_item_offset(item, index, sizeof(char));
 +
 +      memcpy(dst, &buf[offset], item->size.bytes);
  }
  
 -static inline void __mlxsw_item_memcpy_to(char *buf, char *src,
 -                                        struct mlxsw_item *item)
 +static inline void __mlxsw_item_memcpy_to(char *buf, const char *src,
 +                                        struct mlxsw_item *item,
 +                                        unsigned short index)
  {
 -      memcpy(&buf[item->offset], src, item->size.bytes);
 +      unsigned int offset = __mlxsw_item_offset(item, index, sizeof(char));
 +
 +      memcpy(&buf[offset], src, item->size.bytes);
  }
  
  static inline u16
@@@ -193,6 -187,7 +193,7 @@@ __mlxsw_item_bit_array_offset(struct ml
  {
        u16 max_index, be_index;
        u16 offset;             /* byte offset inside the array */
+       u8 in_byte_index;
  
        BUG_ON(index && !item->element_size);
        if (item->offset % sizeof(u32) != 0 ||
        max_index = (item->size.bytes << 3) / item->element_size - 1;
        be_index = max_index - index;
        offset = be_index * item->element_size >> 3;
-       *shift = index % (BITS_PER_BYTE / item->element_size) << 1;
+       in_byte_index  = index % (BITS_PER_BYTE / item->element_size);
+       *shift = in_byte_index * item->element_size;
  
        return item->offset + offset;
  }
@@@ -377,40 -373,12 +379,40 @@@ static struct mlxsw_item __ITEM_NAME(_t
  static inline void                                                            \
  mlxsw_##_type##_##_cname##_##_iname##_memcpy_from(char *buf, char *dst)               \
  {                                                                             \
 -      __mlxsw_item_memcpy_from(buf, dst, &__ITEM_NAME(_type, _cname, _iname));\
 +      __mlxsw_item_memcpy_from(buf, dst,                                      \
 +                               &__ITEM_NAME(_type, _cname, _iname), 0);       \
 +}                                                                             \
 +static inline void                                                            \
 +mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, const char *src)   \
 +{                                                                             \
 +      __mlxsw_item_memcpy_to(buf, src,                                        \
 +                             &__ITEM_NAME(_type, _cname, _iname), 0);         \
 +}
 +
 +#define MLXSW_ITEM_BUF_INDEXED(_type, _cname, _iname, _offset, _sizebytes,    \
 +                             _step, _instepoffset)                            \
 +static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = {                       \
 +      .offset = _offset,                                                      \
 +      .step = _step,                                                          \
 +      .in_step_offset = _instepoffset,                                        \
 +      .size = {.bytes = _sizebytes,},                                         \
 +      .name = #_type "_" #_cname "_" #_iname,                                 \
 +};                                                                            \
 +static inline void                                                            \
 +mlxsw_##_type##_##_cname##_##_iname##_memcpy_from(char *buf,                  \
 +                                                unsigned short index,         \
 +                                                char *dst)                    \
 +{                                                                             \
 +      __mlxsw_item_memcpy_from(buf, dst,                                      \
 +                               &__ITEM_NAME(_type, _cname, _iname), index);   \
  }                                                                             \
  static inline void                                                            \
 -mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, char *src)         \
 +mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf,                    \
 +                                              unsigned short index,           \
 +                                              const char *src)                \
  {                                                                             \
 -      __mlxsw_item_memcpy_to(buf, src, &__ITEM_NAME(_type, _cname, _iname));  \
 +      __mlxsw_item_memcpy_to(buf, src,                                        \
 +                             &__ITEM_NAME(_type, _cname, _iname), index);     \
  }
  
  #define MLXSW_ITEM_BIT_ARRAY(_type, _cname, _iname, _offset, _sizebytes,      \
index 879e000684c3eabeda1e862ed1bbc19eed40ff72,cef866c37648ca0c93d4f109eb9d4f3671fcd890..371ea3f56aed1c5d9f3154869e812dee47930471
@@@ -57,7 -57,6 +57,7 @@@ static const char mlxsw_pci_driver_name
  
  static const struct pci_device_id mlxsw_pci_id_table[] = {
        {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SWITCHX2), 0},
 +      {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0},
        {0, }
  };
  
@@@ -68,8 -67,6 +68,8 @@@ static const char *mlxsw_pci_device_kin
        switch (id->device) {
        case PCI_DEVICE_ID_MELLANOX_SWITCHX2:
                return MLXSW_DEVICE_KIND_SWITCHX2;
 +      case PCI_DEVICE_ID_MELLANOX_SPECTRUM:
 +              return MLXSW_DEVICE_KIND_SPECTRUM;
        default:
                BUG();
        }
@@@ -174,8 -171,8 +174,8 @@@ struct mlxsw_pci 
        struct msix_entry msix_entry;
        struct mlxsw_core *core;
        struct {
 -              u16 num_pages;
                struct mlxsw_pci_mem_item *items;
 +              unsigned int count;
        } fw_area;
        struct {
                struct mlxsw_pci_mem_item out_mbox;
@@@ -434,7 -431,8 +434,7 @@@ static int mlxsw_pci_wqe_frag_map(struc
  
        mapaddr = pci_map_single(pdev, frag_data, frag_len, direction);
        if (unlikely(pci_dma_mapping_error(pdev, mapaddr))) {
 -              if (net_ratelimit())
 -                      dev_err(&pdev->dev, "failed to dma map tx frag\n");
 +              dev_err_ratelimited(&pdev->dev, "failed to dma map tx frag\n");
                return -EIO;
        }
        mlxsw_pci_wqe_address_set(wqe, index, mapaddr);
@@@ -499,7 -497,6 +499,7 @@@ static int mlxsw_pci_rdq_init(struct ml
                              struct mlxsw_pci_queue *q)
  {
        struct mlxsw_pci_queue_elem_info *elem_info;
 +      u8 sdq_count = mlxsw_pci_sdq_count(mlxsw_pci);
        int i;
        int err;
  
        q->consumer_counter = 0;
  
        /* Set CQ of same number of this RDQ with base
 -       * above MLXSW_PCI_SDQS_MAX as the lower ones are assigned to SDQs.
 +       * above SDQ count as the lower ones are assigned to SDQs.
         */
 -      mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num + MLXSW_PCI_SDQS_COUNT);
 +      mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, sdq_count + q->num);
        mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
        for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
                dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
@@@ -702,8 -699,8 +702,8 @@@ static void mlxsw_pci_cqe_rdq_handle(st
  put_new_skb:
        memset(wqe, 0, q->elem_size);
        err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
 -      if (err && net_ratelimit())
 -              dev_dbg(&pdev->dev, "Failed to alloc skb for RDQ\n");
 +      if (err)
 +              dev_dbg_ratelimited(&pdev->dev, "Failed to alloc skb for RDQ\n");
        /* Everything is set up, ring doorbell to pass elem to HW */
        q->producer_counter++;
        mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
@@@ -833,8 -830,7 +833,8 @@@ static void mlxsw_pci_eq_tasklet(unsign
  {
        struct mlxsw_pci_queue *q = (struct mlxsw_pci_queue *) data;
        struct mlxsw_pci *mlxsw_pci = q->pci;
 -      unsigned long active_cqns[BITS_TO_LONGS(MLXSW_PCI_CQS_COUNT)];
 +      u8 cq_count = mlxsw_pci_cq_count(mlxsw_pci);
 +      unsigned long active_cqns[BITS_TO_LONGS(MLXSW_PCI_CQS_MAX)];
        char *eqe;
        u8 cqn;
        bool cq_handle = false;
  
        if (!cq_handle)
                return;
 -      for_each_set_bit(cqn, active_cqns, MLXSW_PCI_CQS_COUNT) {
 +      for_each_set_bit(cqn, active_cqns, cq_count) {
                q = mlxsw_pci_cq_get(mlxsw_pci, cqn);
                mlxsw_pci_queue_tasklet_schedule(q);
        }
@@@ -1071,8 -1067,10 +1071,8 @@@ static int mlxsw_pci_aqs_init(struct ml
        num_eqs = mlxsw_cmd_mbox_query_aq_cap_max_num_eqs_get(mbox);
        eq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_eq_sz_get(mbox);
  
 -      if ((num_sdqs != MLXSW_PCI_SDQS_COUNT) ||
 -          (num_rdqs != MLXSW_PCI_RDQS_COUNT) ||
 -          (num_cqs != MLXSW_PCI_CQS_COUNT) ||
 -          (num_eqs != MLXSW_PCI_EQS_COUNT)) {
 +      if (num_sdqs + num_rdqs > num_cqs ||
 +          num_cqs > MLXSW_PCI_CQS_MAX || num_eqs != MLXSW_PCI_EQS_COUNT) {
                dev_err(&pdev->dev, "Unsupported number of queues\n");
                return -EINVAL;
        }
@@@ -1217,14 -1215,6 +1217,14 @@@ static int mlxsw_pci_config_profile(str
                        mbox, profile->max_flood_tables);
                mlxsw_cmd_mbox_config_profile_max_vid_flood_tables_set(
                        mbox, profile->max_vid_flood_tables);
 +              mlxsw_cmd_mbox_config_profile_max_fid_offset_flood_tables_set(
 +                      mbox, profile->max_fid_offset_flood_tables);
 +              mlxsw_cmd_mbox_config_profile_fid_offset_flood_table_size_set(
 +                      mbox, profile->fid_offset_flood_table_size);
 +              mlxsw_cmd_mbox_config_profile_max_fid_flood_tables_set(
 +                      mbox, profile->max_fid_flood_tables);
 +              mlxsw_cmd_mbox_config_profile_fid_flood_table_size_set(
 +                      mbox, profile->fid_flood_table_size);
        }
        if (profile->used_flood_mode) {
                mlxsw_cmd_mbox_config_profile_set_flood_mode_set(
@@@ -1282,7 -1272,6 +1282,7 @@@ static int mlxsw_pci_fw_area_init(struc
                                  u16 num_pages)
  {
        struct mlxsw_pci_mem_item *mem_item;
 +      int nent = 0;
        int i;
        int err;
  
                                           GFP_KERNEL);
        if (!mlxsw_pci->fw_area.items)
                return -ENOMEM;
 -      mlxsw_pci->fw_area.num_pages = num_pages;
 +      mlxsw_pci->fw_area.count = num_pages;
  
        mlxsw_cmd_mbox_zero(mbox);
        for (i = 0; i < num_pages; i++) {
                        err = -ENOMEM;
                        goto err_alloc;
                }
 -              mlxsw_cmd_mbox_map_fa_pa_set(mbox, i, mem_item->mapaddr);
 -              mlxsw_cmd_mbox_map_fa_log2size_set(mbox, i, 0); /* 1 page */
 +              mlxsw_cmd_mbox_map_fa_pa_set(mbox, nent, mem_item->mapaddr);
 +              mlxsw_cmd_mbox_map_fa_log2size_set(mbox, nent, 0); /* 1 page */
 +              if (++nent == MLXSW_CMD_MAP_FA_VPM_ENTRIES_MAX) {
 +                      err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, nent);
 +                      if (err)
 +                              goto err_cmd_map_fa;
 +                      nent = 0;
 +                      mlxsw_cmd_mbox_zero(mbox);
 +              }
        }
  
 -      err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, num_pages);
 -      if (err)
 -              goto err_cmd_map_fa;
 +      if (nent) {
 +              err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, nent);
 +              if (err)
 +                      goto err_cmd_map_fa;
 +      }
  
        return 0;
  
@@@ -1342,7 -1322,7 +1342,7 @@@ static void mlxsw_pci_fw_area_fini(stru
  
        mlxsw_cmd_unmap_fa(mlxsw_pci->core);
  
 -      for (i = 0; i < mlxsw_pci->fw_area.num_pages; i++) {
 +      for (i = 0; i < mlxsw_pci->fw_area.count; i++) {
                mem_item = &mlxsw_pci->fw_area.items[i];
  
                pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
@@@ -1602,11 -1582,11 +1602,11 @@@ static int mlxsw_pci_cmd_exec(void *bus
  
        if (in_mbox)
                memcpy(mlxsw_pci->cmd.in_mbox.buf, in_mbox, in_mbox_size);
-       mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_HI, in_mapaddr >> 32);
-       mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_LO, in_mapaddr);
+       mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_HI, upper_32_bits(in_mapaddr));
+       mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_LO, lower_32_bits(in_mapaddr));
  
-       mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_HI, out_mapaddr >> 32);
-       mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_LO, out_mapaddr);
+       mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_HI, upper_32_bits(out_mapaddr));
+       mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_LO, lower_32_bits(out_mapaddr));
  
        mlxsw_pci_write32(mlxsw_pci, CIR_IN_MODIFIER, in_mod);
        mlxsw_pci_write32(mlxsw_pci, CIR_TOKEN, 0);
index 2fd2279b628e2c45f1f21a9e46930d1e0c100247,62cbbd1ada8da6a5d8343beb394b67dc5af6a81c..50e29c4879dbe7fb20967ba46f0bdb25c4e5e8ee
@@@ -57,11 -57,13 +57,11 @@@ static const char mlxsw_sx_driver_versi
  
  struct mlxsw_sx_port;
  
 -#define MLXSW_SW_HW_ID_LEN 6
 -
  struct mlxsw_sx {
        struct mlxsw_sx_port **ports;
        struct mlxsw_core *core;
        const struct mlxsw_bus_info *bus_info;
 -      u8 hw_id[MLXSW_SW_HW_ID_LEN];
 +      u8 hw_id[ETH_ALEN];
  };
  
  struct mlxsw_sx_port_pcpu_stats {
@@@ -866,7 -868,7 +866,7 @@@ static int mlxsw_sx_port_attr_get(struc
        struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
  
        switch (attr->id) {
 -      case SWITCHDEV_ATTR_PORT_PARENT_ID:
 +      case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
                attr->u.ppid.id_len = sizeof(mlxsw_sx->hw_id);
                memcpy(&attr->u.ppid.id, &mlxsw_sx->hw_id, attr->u.ppid.id_len);
                break;
@@@ -923,8 -925,7 +923,8 @@@ static int mlxsw_sx_port_stp_state_set(
        spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
        if (!spms_pl)
                return -ENOMEM;
 -      mlxsw_reg_spms_pack(spms_pl, mlxsw_sx_port->local_port, vid, state);
 +      mlxsw_reg_spms_pack(spms_pl, mlxsw_sx_port->local_port);
 +      mlxsw_reg_spms_vid_pack(spms_pl, vid, state);
        err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(spms), spms_pl);
        kfree(spms_pl);
        return err;
@@@ -1068,9 -1069,9 +1068,9 @@@ static int mlxsw_sx_port_create(struct 
        return 0;
  
  err_register_netdev:
- err_port_admin_status_set:
  err_port_mac_learning_mode_set:
  err_port_stp_state_set:
+ err_port_admin_status_set:
  err_port_mtu_set:
  err_port_speed_set:
  err_port_swid_set:
@@@ -1177,7 -1178,8 +1177,7 @@@ static int mlxsw_sx_event_register(stru
        if (err)
                return err;
  
 -      mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
 -                          MLXSW_REG_HTGT_TRAP_GROUP_EMAD, trap_id);
 +      mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, trap_id);
        err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
        if (err)
                goto err_event_trap_set;
@@@ -1210,8 -1212,9 +1210,8 @@@ static void mlxsw_sx_rx_listener_func(s
        struct mlxsw_sx_port_pcpu_stats *pcpu_stats;
  
        if (unlikely(!mlxsw_sx_port)) {
 -              if (net_ratelimit())
 -                      dev_warn(mlxsw_sx->bus_info->dev, "Port %d: skb received for non-existent port\n",
 -                               local_port);
 +              dev_warn_ratelimited(mlxsw_sx->bus_info->dev, "Port %d: skb received for non-existent port\n",
 +                                   local_port);
                return;
        }
  
@@@ -1313,11 -1316,6 +1313,11 @@@ static int mlxsw_sx_traps_init(struct m
        if (err)
                return err;
  
 +      mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_CTRL);
 +      err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(htgt), htgt_pl);
 +      if (err)
 +              return err;
 +
        for (i = 0; i < ARRAY_SIZE(mlxsw_sx_rx_listener); i++) {
                err = mlxsw_core_rx_listener_register(mlxsw_sx->core,
                                                      &mlxsw_sx_rx_listener[i],
                        goto err_rx_listener_register;
  
                mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
 -                                  MLXSW_REG_HTGT_TRAP_GROUP_RX,
                                    mlxsw_sx_rx_listener[i].trap_id);
                err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
                if (err)
@@@ -1340,6 -1339,7 +1340,6 @@@ err_rx_trap_set
  err_rx_listener_register:
        for (i--; i >= 0; i--) {
                mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
 -                                  MLXSW_REG_HTGT_TRAP_GROUP_RX,
                                    mlxsw_sx_rx_listener[i].trap_id);
                mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
  
@@@ -1357,6 -1357,7 +1357,6 @@@ static void mlxsw_sx_traps_fini(struct 
  
        for (i = 0; i < ARRAY_SIZE(mlxsw_sx_rx_listener); i++) {
                mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
 -                                  MLXSW_REG_HTGT_TRAP_GROUP_RX,
                                    mlxsw_sx_rx_listener[i].trap_id);
                mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
  
@@@ -1370,15 -1371,25 +1370,15 @@@ static int mlxsw_sx_flood_init(struct m
  {
        char sfgc_pl[MLXSW_REG_SFGC_LEN];
        char sgcr_pl[MLXSW_REG_SGCR_LEN];
 -      char *smid_pl;
        char *sftr_pl;
        int err;
  
 -      /* Due to FW bug, we must configure SMID. */
 -      smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
 -      if (!smid_pl)
 -              return -ENOMEM;
 -      mlxsw_reg_smid_pack(smid_pl, MLXSW_PORT_MID);
 -      err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(smid), smid_pl);
 -      kfree(smid_pl);
 -      if (err)
 -              return err;
 -
        /* Configure a flooding table, which includes only CPU port. */
        sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
        if (!sftr_pl)
                return -ENOMEM;
 -      mlxsw_reg_sftr_pack(sftr_pl, 0, 0, MLXSW_REG_SFGC_TABLE_TYPE_SINGLE, 0);
 +      mlxsw_reg_sftr_pack(sftr_pl, 0, 0, MLXSW_REG_SFGC_TABLE_TYPE_SINGLE, 0,
 +                          MLXSW_PORT_CPU_PORT, true);
        err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sftr), sftr_pl);
        kfree(sftr_pl);
        if (err)
diff --combined drivers/net/phy/Kconfig
index 9d097ae54fb2aaa7cab5040dcb1008920079f49f,11e3975485c1eaa377b7ee82a3d3eef5a3b00ffe..a7fb66580cee2b408ae1817e304cf16392a872c4
@@@ -69,39 -69,20 +69,39 @@@ config SMSC_PH
        ---help---
          Currently supports the LAN83C185, LAN8187 and LAN8700 PHYs
  
 +config BCM_NET_PHYLIB
 +      tristate
 +
  config BROADCOM_PHY
        tristate "Drivers for Broadcom PHYs"
 +      select BCM_NET_PHYLIB
        ---help---
          Currently supports the BCM5411, BCM5421, BCM5461, BCM54616S, BCM5464,
          BCM5481 and BCM5482 PHYs.
  
 +config BCM_CYGNUS_PHY
 +      tristate "Drivers for Broadcom Cygnus SoC internal PHY"
 +      depends on ARCH_BCM_CYGNUS || COMPILE_TEST
 +      depends on MDIO_BCM_IPROC
 +      select BCM_NET_PHYLIB
 +      ---help---
 +        This PHY driver is for the 1G internal PHYs of the Broadcom
 +        Cygnus Family SoC.
 +
 +        Currently supports internal PHY's used in the BCM11300,
 +        BCM11320, BCM11350, BCM11360, BCM58300, BCM58302,
 +        BCM58303 & BCM58305 Broadcom Cygnus SoCs.
 +
  config BCM63XX_PHY
        tristate "Drivers for Broadcom 63xx SOCs internal PHY"
        depends on BCM63XX
 +      select BCM_NET_PHYLIB
        ---help---
          Currently supports the 6348 and 6358 PHYs.
  
  config BCM7XXX_PHY
        tristate "Drivers for Broadcom 7xxx SOCs internal PHYs"
 +      select BCM_NET_PHYLIB
        ---help---
          Currently supports the BCM7366, BCM7439, BCM7445, and
          40nm and 65nm generation of BCM7xxx Set Top Box SoCs.
@@@ -187,8 -168,6 +187,6 @@@ config MDIO_OCTEO
          busses. It is required by the Octeon and ThunderX ethernet device
          drivers.
  
-         If in doubt, say Y.
  config MDIO_SUN4I
        tristate "Allwinner sun4i MDIO interface support"
        depends on ARCH_SUNXI
@@@ -244,15 -223,6 +242,15 @@@ config MDIO_BCM_UNIMA
          This hardware can be found in the Broadcom GENET Ethernet MAC
          controllers as well as some Broadcom Ethernet switches such as the
          Starfighter 2 switches.
 +
 +config MDIO_BCM_IPROC
 +      tristate "Broadcom iProc MDIO bus controller"
 +      depends on ARCH_BCM_IPROC || COMPILE_TEST
 +      depends on HAS_IOMEM && OF_MDIO
 +      help
 +        This module provides a driver for the MDIO busses found in the
 +        Broadcom iProc SoC's.
 +
  endif # PHYLIB
  
  config MICREL_KS8995MA
diff --combined drivers/net/usb/Kconfig
index 3a8a36c8ded16b4e237c1687143fa4ff2cbff57b,e66805eeffb45f014a9c61d20e0bd6f3b473098b..7f83504dfa69bba2c8db612d1d25196fa5b72d91
@@@ -109,8 -109,6 +109,8 @@@ config USB_RTL815
  config USB_LAN78XX
        tristate "Microchip LAN78XX Based USB Ethernet Adapters"
        select MII
 +      select PHYLIB
 +      select MICROCHIP_PHY
        help
          This option adds support for Microchip LAN78XX based USB 2
          & USB 3 10/100/1000 Ethernet adapters.
@@@ -166,6 -164,7 +166,7 @@@ config USB_NET_AX8817
            * Aten UC210T
            * ASIX AX88172
            * Billionton Systems, USB2AR
+           * Billionton Systems, GUSB2AM-1G-B
            * Buffalo LUA-U2-KTX
            * Corega FEther USB2-TX
            * D-Link DUB-E100
@@@ -543,7 -542,7 +544,7 @@@ config USB_NET_INT51X
  
  config USB_CDC_PHONET
        tristate "CDC Phonet support"
 -      depends on PHONET
 +      depends on PHONET && USB_USBNET
        help
          Choose this option to support the Phonet interface to a Nokia
          cellular modem, as found on most Nokia handsets with the
diff --combined drivers/net/vxlan.c
index ce704df7681bda7d364b4244e3b0dab32a98da21,afdc65fd5bc57c9101c9ffcb4aeac7cfe2431a63..cf262ccf504739c986397022f2833666a8d7d93d
@@@ -75,7 -75,8 +75,7 @@@ static struct rtnl_link_ops vxlan_link_
  
  static const u8 all_zeros_mac[ETH_ALEN];
  
 -static struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
 -                                       bool no_share, u32 flags);
 +static int vxlan_sock_add(struct vxlan_dev *vxlan);
  
  /* per-network namespace private data for this module */
  struct vxlan_net {
@@@ -993,30 -994,19 +993,30 @@@ static bool vxlan_snoop(struct net_devi
  static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev)
  {
        struct vxlan_dev *vxlan;
 +      unsigned short family = dev->default_dst.remote_ip.sa.sa_family;
  
        /* The vxlan_sock is only used by dev, leaving group has
         * no effect on other vxlan devices.
         */
 -      if (atomic_read(&dev->vn_sock->refcnt) == 1)
 +      if (family == AF_INET && dev->vn4_sock &&
 +          atomic_read(&dev->vn4_sock->refcnt) == 1)
                return false;
 +#if IS_ENABLED(CONFIG_IPV6)
 +      if (family == AF_INET6 && dev->vn6_sock &&
 +          atomic_read(&dev->vn6_sock->refcnt) == 1)
 +              return false;
 +#endif
  
        list_for_each_entry(vxlan, &vn->vxlan_list, next) {
                if (!netif_running(vxlan->dev) || vxlan == dev)
                        continue;
  
 -              if (vxlan->vn_sock != dev->vn_sock)
 +              if (family == AF_INET && vxlan->vn4_sock != dev->vn4_sock)
                        continue;
 +#if IS_ENABLED(CONFIG_IPV6)
 +              if (family == AF_INET6 && vxlan->vn6_sock != dev->vn6_sock)
 +                      continue;
 +#endif
  
                if (!vxlan_addr_equal(&vxlan->default_dst.remote_ip,
                                      &dev->default_dst.remote_ip))
        return false;
  }
  
 -static void vxlan_sock_release(struct vxlan_sock *vs)
 +static void __vxlan_sock_release(struct vxlan_sock *vs)
  {
 -      struct sock *sk = vs->sock->sk;
 -      struct net *net = sock_net(sk);
 -      struct vxlan_net *vn = net_generic(net, vxlan_net_id);
 +      struct vxlan_net *vn;
  
 +      if (!vs)
 +              return;
        if (!atomic_dec_and_test(&vs->refcnt))
                return;
  
 +      vn = net_generic(sock_net(vs->sock->sk), vxlan_net_id);
        spin_lock(&vn->sock_lock);
        hlist_del_rcu(&vs->hlist);
        vxlan_notify_del_rx_port(vs);
        queue_work(vxlan_wq, &vs->del_work);
  }
  
 +static void vxlan_sock_release(struct vxlan_dev *vxlan)
 +{
 +      __vxlan_sock_release(vxlan->vn4_sock);
 +#if IS_ENABLED(CONFIG_IPV6)
 +      __vxlan_sock_release(vxlan->vn6_sock);
 +#endif
 +}
 +
  /* Update multicast group membership when first VNI on
   * multicast address is brought up
   */
  static int vxlan_igmp_join(struct vxlan_dev *vxlan)
  {
 -      struct vxlan_sock *vs = vxlan->vn_sock;
 -      struct sock *sk = vs->sock->sk;
 +      struct sock *sk;
        union vxlan_addr *ip = &vxlan->default_dst.remote_ip;
        int ifindex = vxlan->default_dst.remote_ifindex;
        int ret = -EINVAL;
  
 -      lock_sock(sk);
        if (ip->sa.sa_family == AF_INET) {
                struct ip_mreqn mreq = {
                        .imr_multiaddr.s_addr   = ip->sin.sin_addr.s_addr,
                        .imr_ifindex            = ifindex,
                };
  
 +              sk = vxlan->vn4_sock->sock->sk;
 +              lock_sock(sk);
                ret = ip_mc_join_group(sk, &mreq);
 +              release_sock(sk);
  #if IS_ENABLED(CONFIG_IPV6)
        } else {
 +              sk = vxlan->vn6_sock->sock->sk;
 +              lock_sock(sk);
                ret = ipv6_stub->ipv6_sock_mc_join(sk, ifindex,
                                                   &ip->sin6.sin6_addr);
 +              release_sock(sk);
  #endif
        }
 -      release_sock(sk);
  
        return ret;
  }
  /* Inverse of vxlan_igmp_join when last VNI is brought down */
  static int vxlan_igmp_leave(struct vxlan_dev *vxlan)
  {
 -      struct vxlan_sock *vs = vxlan->vn_sock;
 -      struct sock *sk = vs->sock->sk;
 +      struct sock *sk;
        union vxlan_addr *ip = &vxlan->default_dst.remote_ip;
        int ifindex = vxlan->default_dst.remote_ifindex;
        int ret = -EINVAL;
  
 -      lock_sock(sk);
        if (ip->sa.sa_family == AF_INET) {
                struct ip_mreqn mreq = {
                        .imr_multiaddr.s_addr   = ip->sin.sin_addr.s_addr,
                        .imr_ifindex            = ifindex,
                };
  
 +              sk = vxlan->vn4_sock->sock->sk;
 +              lock_sock(sk);
                ret = ip_mc_leave_group(sk, &mreq);
 +              release_sock(sk);
  #if IS_ENABLED(CONFIG_IPV6)
        } else {
 +              sk = vxlan->vn6_sock->sock->sk;
 +              lock_sock(sk);
                ret = ipv6_stub->ipv6_sock_mc_drop(sk, ifindex,
                                                   &ip->sin6.sin6_addr);
 +              release_sock(sk);
  #endif
        }
 -      release_sock(sk);
  
        return ret;
  }
@@@ -1898,7 -1873,8 +1898,7 @@@ static void vxlan_xmit_one(struct sk_bu
  {
        struct ip_tunnel_info *info;
        struct vxlan_dev *vxlan = netdev_priv(dev);
 -      struct sock *sk = vxlan->vn_sock->sock->sk;
 -      unsigned short family = vxlan_get_sk_family(vxlan->vn_sock);
 +      struct sock *sk;
        struct rtable *rt = NULL;
        const struct iphdr *old_iph;
        struct flowi4 fl4;
                                  dev->name);
                        goto drop;
                }
 -              if (family != ip_tunnel_info_af(info))
 -                      goto drop;
 -
                dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port;
                vni = be64_to_cpu(info->key.tun_id);
 -              remote_ip.sa.sa_family = family;
 -              if (family == AF_INET)
 +              remote_ip.sa.sa_family = ip_tunnel_info_af(info);
 +              if (remote_ip.sa.sa_family == AF_INET)
                        remote_ip.sin.sin_addr.s_addr = info->key.u.ipv4.dst;
                else
                        remote_ip.sin6.sin6_addr = info->key.u.ipv6.dst;
        }
  
        if (dst->sa.sa_family == AF_INET) {
 +              if (!vxlan->vn4_sock)
 +                      goto drop;
 +              sk = vxlan->vn4_sock->sock->sk;
 +
                if (info && (info->key.tun_flags & TUNNEL_DONT_FRAGMENT))
                        df = htons(IP_DF);
  
                struct flowi6 fl6;
                u32 rt6i_flags;
  
 +              if (!vxlan->vn6_sock)
 +                      goto drop;
 +              sk = vxlan->vn6_sock->sock->sk;
 +
                memset(&fl6, 0, sizeof(fl6));
                fl6.flowi6_oif = rdst ? rdst->remote_ifindex : 0;
                fl6.daddr = dst->sin6.sin6_addr;
@@@ -2233,6 -2204,7 +2233,6 @@@ static void vxlan_vs_add_dev(struct vxl
        struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
        __u32 vni = vxlan->default_dst.remote_vni;
  
 -      vxlan->vn_sock = vs;
        spin_lock(&vn->sock_lock);
        hlist_add_head_rcu(&vxlan->hlist, vni_head(vs, vni));
        spin_unlock(&vn->sock_lock);
@@@ -2272,18 -2244,22 +2272,18 @@@ static void vxlan_uninit(struct net_dev
  static int vxlan_open(struct net_device *dev)
  {
        struct vxlan_dev *vxlan = netdev_priv(dev);
 -      struct vxlan_sock *vs;
 -      int ret = 0;
 +      int ret;
  
 -      vs = vxlan_sock_add(vxlan->net, vxlan->cfg.dst_port,
 -                          vxlan->cfg.no_share, vxlan->flags);
 -      if (IS_ERR(vs))
 -              return PTR_ERR(vs);
 -
 -      vxlan_vs_add_dev(vs, vxlan);
 +      ret = vxlan_sock_add(vxlan);
 +      if (ret < 0)
 +              return ret;
  
        if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip)) {
                ret = vxlan_igmp_join(vxlan);
                if (ret == -EADDRINUSE)
                        ret = 0;
                if (ret) {
 -                      vxlan_sock_release(vs);
 +                      vxlan_sock_release(vxlan);
                        return ret;
                }
        }
@@@ -2318,6 -2294,7 +2318,6 @@@ static int vxlan_stop(struct net_devic
  {
        struct vxlan_dev *vxlan = netdev_priv(dev);
        struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
 -      struct vxlan_sock *vs = vxlan->vn_sock;
        int ret = 0;
  
        if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip) &&
        del_timer_sync(&vxlan->age_timer);
  
        vxlan_flush(vxlan);
 -      vxlan_sock_release(vs);
 +      vxlan_sock_release(vxlan);
  
        return ret;
  }
@@@ -2563,13 -2540,14 +2563,13 @@@ static struct socket *vxlan_create_sock
  }
  
  /* Create new listen socket if needed */
 -static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
 -                                            u32 flags)
 +static struct vxlan_sock *vxlan_socket_create(struct net *net, bool ipv6,
 +                                            __be16 port, u32 flags)
  {
        struct vxlan_net *vn = net_generic(net, vxlan_net_id);
        struct vxlan_sock *vs;
        struct socket *sock;
        unsigned int h;
 -      bool ipv6 = !!(flags & VXLAN_F_IPV6);
        struct udp_tunnel_sock_cfg tunnel_cfg;
  
        vs = kzalloc(sizeof(*vs), GFP_KERNEL);
        return vs;
  }
  
 -static struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
 -                                       bool no_share, u32 flags)
 +static int __vxlan_sock_add(struct vxlan_dev *vxlan, bool ipv6)
  {
 -      struct vxlan_net *vn = net_generic(net, vxlan_net_id);
 -      struct vxlan_sock *vs;
 -      bool ipv6 = flags & VXLAN_F_IPV6;
 +      struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
 +      struct vxlan_sock *vs = NULL;
  
 -      if (!no_share) {
 +      if (!vxlan->cfg.no_share) {
                spin_lock(&vn->sock_lock);
 -              vs = vxlan_find_sock(net, ipv6 ? AF_INET6 : AF_INET, port,
 -                                   flags);
 -              if (vs) {
 -                      if (!atomic_add_unless(&vs->refcnt, 1, 0))
 -                              vs = ERR_PTR(-EBUSY);
 +              vs = vxlan_find_sock(vxlan->net, ipv6 ? AF_INET6 : AF_INET,
 +                                   vxlan->cfg.dst_port, vxlan->flags);
 +              if (vs && !atomic_add_unless(&vs->refcnt, 1, 0)) {
                        spin_unlock(&vn->sock_lock);
 -                      return vs;
 +                      return -EBUSY;
                }
                spin_unlock(&vn->sock_lock);
        }
 +      if (!vs)
 +              vs = vxlan_socket_create(vxlan->net, ipv6,
 +                                       vxlan->cfg.dst_port, vxlan->flags);
 +      if (IS_ERR(vs))
 +              return PTR_ERR(vs);
 +#if IS_ENABLED(CONFIG_IPV6)
 +      if (ipv6)
 +              vxlan->vn6_sock = vs;
 +      else
 +#endif
 +              vxlan->vn4_sock = vs;
 +      vxlan_vs_add_dev(vs, vxlan);
 +      return 0;
 +}
  
 -      return vxlan_socket_create(net, port, flags);
 +static int vxlan_sock_add(struct vxlan_dev *vxlan)
 +{
 +      bool ipv6 = vxlan->flags & VXLAN_F_IPV6;
 +      bool metadata = vxlan->flags & VXLAN_F_COLLECT_METADATA;
 +      int ret = 0;
 +
 +      vxlan->vn4_sock = NULL;
 +#if IS_ENABLED(CONFIG_IPV6)
 +      vxlan->vn6_sock = NULL;
 +      if (ipv6 || metadata)
 +              ret = __vxlan_sock_add(vxlan, true);
 +#endif
 +      if (!ret && (!ipv6 || metadata))
 +              ret = __vxlan_sock_add(vxlan, false);
 +      if (ret < 0)
 +              vxlan_sock_release(vxlan);
 +      return ret;
  }
  
  static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
        struct vxlan_net *vn = net_generic(src_net, vxlan_net_id);
        struct vxlan_dev *vxlan = netdev_priv(dev);
        struct vxlan_rdst *dst = &vxlan->default_dst;
 +      unsigned short needed_headroom = ETH_HLEN;
        int err;
        bool use_ipv6 = false;
        __be16 default_port = vxlan->cfg.dst_port;
                if (!IS_ENABLED(CONFIG_IPV6))
                        return -EPFNOSUPPORT;
                use_ipv6 = true;
 +              vxlan->flags |= VXLAN_F_IPV6;
        }
  
        if (conf->remote_ifindex) {
                                pr_info("IPv6 is disabled via sysctl\n");
                                return -EPERM;
                        }
 -                      vxlan->flags |= VXLAN_F_IPV6;
                }
  #endif
  
                if (!conf->mtu)
                        dev->mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
  
 -              dev->needed_headroom = lowerdev->hard_header_len +
 -                                     (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
 -      } else if (use_ipv6) {
 -              vxlan->flags |= VXLAN_F_IPV6;
 -              dev->needed_headroom = ETH_HLEN + VXLAN6_HEADROOM;
 -      } else {
 -              dev->needed_headroom = ETH_HLEN + VXLAN_HEADROOM;
 +              needed_headroom = lowerdev->hard_header_len;
        }
  
 +      if (use_ipv6 || conf->flags & VXLAN_F_COLLECT_METADATA)
 +              needed_headroom += VXLAN6_HEADROOM;
 +      else
 +              needed_headroom += VXLAN_HEADROOM;
 +      dev->needed_headroom = needed_headroom;
 +
        memcpy(&vxlan->cfg, conf, sizeof(*conf));
        if (!vxlan->cfg.dst_port)
                vxlan->cfg.dst_port = default_port;
@@@ -2794,11 -2745,10 +2794,10 @@@ static int vxlan_newlink(struct net *sr
        struct vxlan_config conf;
        int err;
  
-       if (!data[IFLA_VXLAN_ID])
-               return -EINVAL;
        memset(&conf, 0, sizeof(conf));
-       conf.vni = nla_get_u32(data[IFLA_VXLAN_ID]);
+       if (data[IFLA_VXLAN_ID])
+               conf.vni = nla_get_u32(data[IFLA_VXLAN_ID]);
  
        if (data[IFLA_VXLAN_GROUP]) {
                conf.remote_ip.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_GROUP]);
index bc421a5c535630f39b71fe016670729ec9a8945c,678d72af4a9df73976b7513d9af17888bc1ef7d1..80c174ff6b7b125b901e067208bf6548fc443b45
@@@ -337,7 -337,7 +337,7 @@@ enum ath10k_hw_rate_cck 
  #define TARGET_10X_MAX_FRAG_ENTRIES           0
  
  /* 10.2 parameters */
- #define TARGET_10_2_DMA_BURST_SIZE            1
+ #define TARGET_10_2_DMA_BURST_SIZE            0
  
  /* Target specific defines for WMI-TLV firmware */
  #define TARGET_TLV_NUM_VDEVS                  4
  
  #define TARGET_10_4_TX_DBG_LOG_SIZE           1024
  #define TARGET_10_4_NUM_WDS_ENTRIES           32
- #define TARGET_10_4_DMA_BURST_SIZE            1
+ #define TARGET_10_4_DMA_BURST_SIZE            0
  #define TARGET_10_4_MAC_AGGR_DELIM            0
  #define TARGET_10_4_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 1
  #define TARGET_10_4_VOW_CONFIG                        0
  /* Number of Copy Engines supported */
  #define CE_COUNT ar->hw_values->ce_count
  
 -/*
 - * Total number of PCIe MSI interrupts requested for all interrupt sources.
 - * PCIe standard forces this to be a power of 2.
 - * Some Host OS's limit MSI requests that can be granted to 8
 - * so for now we abide by this limit and avoid requesting more
 - * than that.
 - */
 -#define MSI_NUM_REQUEST_LOG2  3
 -#define MSI_NUM_REQUEST               (1<<MSI_NUM_REQUEST_LOG2)
 -
  /*
   * Granted MSIs are assigned as follows:
   * Firmware uses the first
index 5d532c7b813fd1d356884134db2a4ebb7fb2d7f4,90eb75012e4f4818a03481d98cb9b137a89393fa..2e2b92ba96b8ba93203f72f3ecf98d875002b5a7
@@@ -855,8 -855,7 +855,8 @@@ static void ath9k_set_hw_capab(struct a
                        BIT(NL80211_IFTYPE_STATION) |
                        BIT(NL80211_IFTYPE_ADHOC) |
                        BIT(NL80211_IFTYPE_MESH_POINT) |
 -                      BIT(NL80211_IFTYPE_WDS);
 +                      BIT(NL80211_IFTYPE_WDS) |
 +                      BIT(NL80211_IFTYPE_OCB);
  
                if (ath9k_is_chanctx_enabled())
                        hw->wiphy->interface_modes |=
        hw->max_rate_tries = 10;
        hw->sta_data_size = sizeof(struct ath_node);
        hw->vif_data_size = sizeof(struct ath_vif);
+       hw->extra_tx_headroom = 4;
  
        hw->wiphy->available_antennas_rx = BIT(ah->caps.max_rxchains) - 1;
        hw->wiphy->available_antennas_tx = BIT(ah->caps.max_txchains) - 1;
index d561181f2cff1f5d490285e5765cb6254ea7b8f3,3fb327d5a911aeba53e4ac71ad6bb2ad419d06b0..1a73c7a1da77d0e0fe16fbdc6fb866d6c65226ee
  #define IWL7260_UCODE_API_MAX 17
  
  /* Oldest version we won't warn about */
 -#define IWL7260_UCODE_API_OK  12
 -#define IWL3165_UCODE_API_OK  13
 +#define IWL7260_UCODE_API_OK  13
  
  /* Lowest firmware API version supported */
 -#define IWL7260_UCODE_API_MIN 12
 -#define IWL3165_UCODE_API_MIN 13
 +#define IWL7260_UCODE_API_MIN 13
  
  /* NVM versions */
  #define IWL7260_NVM_VERSION           0x0a1d
  
  static const struct iwl_base_params iwl7000_base_params = {
        .eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_7000,
 -      .num_of_queues = IWLAGN_NUM_QUEUES,
 +      .num_of_queues = 31,
        .pll_cfg_val = 0,
        .shadow_ram_support = true,
        .led_compensation = 57,
@@@ -267,6 -269,11 +267,6 @@@ const struct iwl_cfg iwl3165_2ac_cfg = 
        .name = "Intel(R) Dual Band Wireless AC 3165",
        .fw_name_pre = IWL7265D_FW_PRE,
        IWL_DEVICE_7000,
 -      /* sparse doens't like the re-assignment but it is safe */
 -#ifndef __CHECKER__
 -      .ucode_api_ok = IWL3165_UCODE_API_OK,
 -      .ucode_api_min = IWL3165_UCODE_API_MIN,
 -#endif
        .ht_params = &iwl7000_ht_params,
        .nvm_ver = IWL3165_NVM_VERSION,
        .nvm_calib_ver = IWL3165_TX_POWER_VERSION,
@@@ -341,6 -348,6 +341,6 @@@ const struct iwl_cfg iwl7265d_n_cfg = 
  };
  
  MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
- MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK));
+ MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
  MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
  MODULE_FIRMWARE(IWL7265D_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
index 1d54355ad76a27da11268219caa09ddc23baf4ad,576187611e614adc3a92ca50c4e1f2ab3a7bb7e1..85ae902df7c08d9d1d0ad4611e7947ace08bf0f7
@@@ -274,18 -274,13 +274,13 @@@ static void iwl_mvm_wowlan_program_keys
                break;
        case WLAN_CIPHER_SUITE_CCMP:
                if (sta) {
-                       u8 *pn = seq.ccmp.pn;
+                       u64 pn64;
  
                        aes_sc = data->rsc_tsc->all_tsc_rsc.aes.unicast_rsc;
                        aes_tx_sc = &data->rsc_tsc->all_tsc_rsc.aes.tsc;
  
-                       ieee80211_get_key_tx_seq(key, &seq);
-                       aes_tx_sc->pn = cpu_to_le64((u64)pn[5] |
-                                                   ((u64)pn[4] << 8) |
-                                                   ((u64)pn[3] << 16) |
-                                                   ((u64)pn[2] << 24) |
-                                                   ((u64)pn[1] << 32) |
-                                                   ((u64)pn[0] << 40));
+                       pn64 = atomic64_read(&key->tx_pn);
+                       aes_tx_sc->pn = cpu_to_le64(pn64);
                } else {
                        aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc;
                }
                        u8 *pn = seq.ccmp.pn;
  
                        ieee80211_get_key_rx_seq(key, i, &seq);
-                       aes_sc->pn = cpu_to_le64((u64)pn[5] |
-                                                ((u64)pn[4] << 8) |
-                                                ((u64)pn[3] << 16) |
-                                                ((u64)pn[2] << 24) |
-                                                ((u64)pn[1] << 32) |
-                                                ((u64)pn[0] << 40));
+                       aes_sc[i].pn = cpu_to_le64((u64)pn[5] |
+                                                  ((u64)pn[4] << 8) |
+                                                  ((u64)pn[3] << 16) |
+                                                  ((u64)pn[2] << 24) |
+                                                  ((u64)pn[1] << 32) |
+                                                  ((u64)pn[0] << 40));
                }
                data->use_rsc_tsc = true;
                break;
@@@ -1170,9 -1165,6 +1165,9 @@@ int iwl_mvm_suspend(struct ieee80211_h
        struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
        int ret;
  
 +      /* make sure the d0i3 exit work is not pending */
 +      flush_work(&mvm->d0i3_exit_work);
 +
        ret = iwl_trans_suspend(mvm->trans);
        if (ret)
                return ret;
@@@ -1456,15 -1448,15 +1451,15 @@@ static void iwl_mvm_d3_update_gtks(stru
  
                switch (key->cipher) {
                case WLAN_CIPHER_SUITE_CCMP:
-                       iwl_mvm_aes_sc_to_seq(&sc->aes.tsc, &seq);
                        iwl_mvm_set_aes_rx_seq(sc->aes.unicast_rsc, key);
+                       atomic64_set(&key->tx_pn, le64_to_cpu(sc->aes.tsc.pn));
                        break;
                case WLAN_CIPHER_SUITE_TKIP:
                        iwl_mvm_tkip_sc_to_seq(&sc->tkip.tsc, &seq);
                        iwl_mvm_set_tkip_rx_seq(sc->tkip.unicast_rsc, key);
+                       ieee80211_set_key_tx_seq(key, &seq);
                        break;
                }
-               ieee80211_set_key_tx_seq(key, &seq);
  
                /* that's it for this key */
                return;
index 834641e250fb2ace9b386eaf2edd0f867fa3ce08,5c7f7cc9ffcc2aa1d81cbf1ff5b2d33f46133727..d906fa13ba9710a3e9250cd481d735aa71f767de
@@@ -616,8 -616,12 +616,8 @@@ static int iwl_mvm_load_ucode_wait_aliv
         * will be empty.
         */
  
 -      for (i = 0; i < IWL_MAX_HW_QUEUES; i++) {
 -              if (i < mvm->first_agg_queue && i != IWL_MVM_CMD_QUEUE)
 -                      mvm->queue_to_mac80211[i] = i;
 -              else
 -                      mvm->queue_to_mac80211[i] = IWL_INVALID_MAC80211_QUEUE;
 -      }
 +      memset(&mvm->queue_info, 0, sizeof(mvm->queue_info));
 +      mvm->queue_info[IWL_MVM_CMD_QUEUE].hw_queue_refcount = 1;
  
        for (i = 0; i < IEEE80211_MAX_QUEUES; i++)
                atomic_set(&mvm->mac80211_queue_stop_count[i], 0);
@@@ -699,7 -703,7 +699,7 @@@ int iwl_run_init_mvm_ucode(struct iwl_m
         * abort after reading the nvm in case RF Kill is on, we will complete
         * the init seq later when RF kill will switch to off
         */
-       if (iwl_mvm_is_radio_killed(mvm)) {
+       if (iwl_mvm_is_radio_hw_killed(mvm)) {
                IWL_DEBUG_RF_KILL(mvm,
                                  "jump over all phy activities due to RF kill\n");
                iwl_remove_notification(&mvm->notif_wait, &calib_wait);
        ret = iwl_wait_notification(&mvm->notif_wait, &calib_wait,
                        MVM_UCODE_CALIB_TIMEOUT);
  
-       if (ret && iwl_mvm_is_radio_killed(mvm)) {
+       if (ret && iwl_mvm_is_radio_hw_killed(mvm)) {
                IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n");
                ret = 1;
        }
@@@ -936,6 -940,19 +936,6 @@@ int iwl_mvm_start_fw_dbg_conf(struct iw
        return ret;
  }
  
 -static int iwl_mvm_config_ltr_v1(struct iwl_mvm *mvm)
 -{
 -      struct iwl_ltr_config_cmd_v1 cmd_v1 = {
 -              .flags = cpu_to_le32(LTR_CFG_FLAG_FEATURE_ENABLE),
 -      };
 -
 -      if (!mvm->trans->ltr_enabled)
 -              return 0;
 -
 -      return iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0,
 -                                  sizeof(cmd_v1), &cmd_v1);
 -}
 -
  static int iwl_mvm_config_ltr(struct iwl_mvm *mvm)
  {
        struct iwl_ltr_config_cmd cmd = {
        if (!mvm->trans->ltr_enabled)
                return 0;
  
 -      if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_HDC_PHASE_0))
 -              return iwl_mvm_config_ltr_v1(mvm);
 -
        return iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0,
                                    sizeof(cmd), &cmd);
  }
index 8443e14101cfe304247cfc868e99682a13473e71,7c2944a72470b92acdca39a6f7c13b845b34842a..a38e07bb137fcf1720d4b2d2cf9eb108c3813faa
@@@ -820,7 -820,7 +820,7 @@@ static int iwl_mvm_mac_ampdu_action(str
                                    struct ieee80211_vif *vif,
                                    enum ieee80211_ampdu_mlme_action action,
                                    struct ieee80211_sta *sta, u16 tid,
 -                                  u16 *ssn, u8 buf_size)
 +                                  u16 *ssn, u8 buf_size, bool amsdu)
  {
        struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
        int ret;
@@@ -1577,6 -1577,20 +1577,6 @@@ static struct iwl_mvm_phy_ctxt *iwl_mvm
        return NULL;
  }
  
 -static int iwl_mvm_set_tx_power_old(struct iwl_mvm *mvm,
 -                                  struct ieee80211_vif *vif, s8 tx_power)
 -{
 -      /* FW is in charge of regulatory enforcement */
 -      struct iwl_reduce_tx_power_cmd reduce_txpwr_cmd = {
 -              .mac_context_id = iwl_mvm_vif_from_mac80211(vif)->id,
 -              .pwr_restriction = cpu_to_le16(tx_power),
 -      };
 -
 -      return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0,
 -                                  sizeof(reduce_txpwr_cmd),
 -                                  &reduce_txpwr_cmd);
 -}
 -
  static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                                s16 tx_power)
  {
        };
        int len = sizeof(cmd);
  
 -      if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_TX_POWER_DEV))
 -              return iwl_mvm_set_tx_power_old(mvm, vif, tx_power);
 -
        if (tx_power == IWL_DEFAULT_MAX_TX_POWER)
                cmd.v2.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER);
  
@@@ -2302,8 -2319,6 +2302,8 @@@ static int iwl_mvm_start_ap_ibss(struc
        if (vif->type == NL80211_IFTYPE_AP)
                iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
  
 +      mvmvif->ap_assoc_sta_count = 0;
 +
        /* Add the mac context */
        ret = iwl_mvm_mac_ctxt_add(mvm, vif);
        if (ret)
@@@ -2373,6 -2388,7 +2373,7 @@@ static void iwl_mvm_stop_ap_ibss(struc
                iwl_mvm_remove_time_event(mvm, mvmvif,
                                          &mvmvif->time_event_data);
                RCU_INIT_POINTER(mvm->csa_vif, NULL);
+               mvmvif->csa_countdown = false;
        }
  
        if (rcu_access_pointer(mvm->csa_tx_blocked_vif) == vif) {
@@@ -2598,7 -2614,6 +2599,7 @@@ static void iwl_mvm_sta_pre_rcu_remove(
                                       struct ieee80211_sta *sta)
  {
        struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
 +      struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
  
        /*
        if (sta == rcu_access_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id]))
                rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id],
                                   ERR_PTR(-ENOENT));
 +
 +      if (mvm_sta->vif->type == NL80211_IFTYPE_AP) {
 +              mvmvif->ap_assoc_sta_count--;
 +              iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
 +      }
 +
        mutex_unlock(&mvm->mutex);
  }
  
index 0d3aff1b4bad3ed5c6307f68b42c977ca6f52683,c754051a4ceacf138d4e6399c9cc679d8a9979c0..16c5a6d7e0c9dee0909b55cb027f2f28c5e6a282
@@@ -82,6 -82,7 +82,6 @@@
  #include "constants.h"
  #include "tof.h"
  
 -#define IWL_INVALID_MAC80211_QUEUE    0xff
  #define IWL_MVM_MAX_ADDRESSES         5
  /* RSSI offset for WkP */
  #define IWL_RSSI_OFFSET 50
@@@ -322,11 -323,11 +322,11 @@@ enum iwl_bt_force_ant_mode 
  struct iwl_mvm_vif_bf_data {
        bool bf_enabled;
        bool ba_enabled;
 -      s8 ave_beacon_signal;
 -      s8 last_cqm_event;
 -      s8 bt_coex_min_thold;
 -      s8 bt_coex_max_thold;
 -      s8 last_bt_coex_event;
 +      int ave_beacon_signal;
 +      int last_cqm_event;
 +      int bt_coex_min_thold;
 +      int bt_coex_max_thold;
 +      int last_bt_coex_event;
  };
  
  /**
   * @bssid: BSSID for this (client) interface
   * @associated: indicates that we're currently associated, used only for
   *    managing the firmware state in iwl_mvm_bss_info_changed_station()
 + * @ap_assoc_sta_count: count of stations associated to us - valid only
 + *    if VIF type is AP
   * @uploaded: indicates the MAC context has been added to the device
   * @ap_ibss_active: indicates that AP/IBSS is configured and that the interface
   *    should get quota etc.
@@@ -368,7 -367,6 +368,7 @@@ struct iwl_mvm_vif 
  
        u8 bssid[ETH_ALEN];
        bool associated;
 +      u8 ap_assoc_sta_count;
  
        bool uploaded;
        bool ap_ibss_active;
@@@ -604,14 -602,7 +604,14 @@@ struct iwl_mvm 
                u64 on_time_scan;
        } radio_stats, accu_radio_stats;
  
 -      u8 queue_to_mac80211[IWL_MAX_HW_QUEUES];
 +      struct {
 +              /* Map to HW queue */
 +              u32 hw_queue_to_mac80211;
 +              u8 hw_queue_refcount;
 +              bool setup_reserved;
 +              u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */
 +      } queue_info[IWL_MAX_HW_QUEUES];
 +      spinlock_t queue_info_lock; /* For syncing queue mgmt operations */
        atomic_t mac80211_queue_stop_count[IEEE80211_MAX_QUEUES];
  
        const char *nvm_file_name;
        struct debugfs_blob_wrapper nvm_sw_blob;
        struct debugfs_blob_wrapper nvm_calib_blob;
        struct debugfs_blob_wrapper nvm_prod_blob;
 +      struct debugfs_blob_wrapper nvm_phy_sku_blob;
  
        struct iwl_mvm_frame_stats drv_rx_stats;
        spinlock_t drv_stats_lock;
@@@ -870,6 -860,11 +870,11 @@@ static inline bool iwl_mvm_is_radio_kil
               test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);
  }
  
+ static inline bool iwl_mvm_is_radio_hw_killed(struct iwl_mvm *mvm)
+ {
+       return test_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
+ }
  /* Must be called with rcu_read_lock() held and it can only be
   * released when mvmsta is not needed anymore.
   */
@@@ -917,12 -912,6 +922,12 @@@ static inline bool iwl_mvm_is_d0i3_supp
                           IWL_UCODE_TLV_CAPA_D0I3_SUPPORT);
  }
  
 +static inline bool iwl_mvm_is_dqa_supported(struct iwl_mvm *mvm)
 +{
 +      return fw_has_capa(&mvm->fw->ucode_capa,
 +                         IWL_UCODE_TLV_CAPA_DQA_SUPPORT);
 +}
 +
  static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm)
  {
        bool nvm_lar = mvm->nvm_data->lar_enabled;
@@@ -950,6 -939,11 +955,6 @@@ static inline bool iwl_mvm_is_wifi_mcc_
                           IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC);
  }
  
 -static inline bool iwl_mvm_is_scd_cfg_supported(struct iwl_mvm *mvm)
 -{
 -      return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_SCD_CFG);
 -}
 -
  static inline bool iwl_mvm_bt_is_plcr_supported(struct iwl_mvm *mvm)
  {
        return fw_has_capa(&mvm->fw->ucode_capa,
@@@ -970,12 -964,6 +975,12 @@@ static inline bool iwl_mvm_is_csum_supp
                           IWL_UCODE_TLV_CAPA_CSUM_SUPPORT);
  }
  
 +static inline bool iwl_mvm_has_new_rx_api(struct iwl_mvm *mvm)
 +{
 +      /* firmware flag isn't defined yet */
 +      return false;
 +}
 +
  extern const u8 iwl_mvm_ac_to_tx_fifo[];
  
  struct iwl_rate_info {
@@@ -1148,6 -1136,7 +1153,6 @@@ void iwl_mvm_mac_ctxt_recalc_tsf_id(str
                                    struct ieee80211_vif *vif);
  unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm,
                                         struct ieee80211_vif *exclude_vif);
 -
  /* Bindings */
  int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
  int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
@@@ -1360,20 -1349,14 +1365,20 @@@ static inline bool iwl_mvm_vif_low_late
  }
  
  /* hw scheduler queue config */
 -void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, u16 ssn,
 -                      const struct iwl_trans_txq_scd_cfg *cfg,
 +void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
 +                      u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg,
                        unsigned int wdg_timeout);
 -void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, u8 flags);
 +/*
 + * Disable a TXQ.
 + * Note that in non-DQA mode the %mac80211_queue and %tid params are ignored.
 + */
 +void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
 +                       u8 tid, u8 flags);
 +int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 minq, u8 maxq);
  
  static inline
 -void iwl_mvm_enable_ac_txq(struct iwl_mvm *mvm, int queue,
 -                         u8 fifo, unsigned int wdg_timeout)
 +void iwl_mvm_enable_ac_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
 +                         u8 fifo, u16 ssn, unsigned int wdg_timeout)
  {
        struct iwl_trans_txq_scd_cfg cfg = {
                .fifo = fifo,
                .frame_limit = IWL_FRAME_LIMIT,
        };
  
 -      iwl_mvm_enable_txq(mvm, queue, 0, &cfg, wdg_timeout);
 +      iwl_mvm_enable_txq(mvm, queue, mac80211_queue, ssn, &cfg, wdg_timeout);
  }
  
  static inline void iwl_mvm_enable_agg_txq(struct iwl_mvm *mvm, int queue,
 -                                        int fifo, int sta_id, int tid,
 -                                        int frame_limit, u16 ssn,
 -                                        unsigned int wdg_timeout)
 +                                        int mac80211_queue, int fifo,
 +                                        int sta_id, int tid, int frame_limit,
 +                                        u16 ssn, unsigned int wdg_timeout)
  {
        struct iwl_trans_txq_scd_cfg cfg = {
                .fifo = fifo,
                .aggregate = true,
        };
  
 -      iwl_mvm_enable_txq(mvm, queue, ssn, &cfg, wdg_timeout);
 +      iwl_mvm_enable_txq(mvm, queue, mac80211_queue, ssn, &cfg, wdg_timeout);
  }
  
  /* Thermal management and CT-kill */
index 064c100e45fe5bf6e71c590f7c08877f636b52c7,f0cb092f980ec26b26e9ca3c6cadf813f39de853..7fcd2c24a0a4c682241a4d69c066d20fb61e7a76
@@@ -89,7 -89,6 +89,7 @@@ MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUT
  MODULE_LICENSE("GPL");
  
  static const struct iwl_op_mode_ops iwl_mvm_ops;
 +static const struct iwl_op_mode_ops iwl_mvm_ops_mq;
  
  struct iwl_mvm_mod_params iwlmvm_mod_params = {
        .power_scheme = IWL_POWER_SCHEME_BPS,
@@@ -223,6 -222,7 +223,6 @@@ struct iwl_rx_handlers 
   * called from a worker with mvm->mutex held.
   */
  static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
 -      RX_HANDLER(REPLY_RX_PHY_CMD, iwl_mvm_rx_rx_phy_cmd, false),
        RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, false),
        RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, false),
  
        RX_HANDLER(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION,
                   iwl_mvm_power_uapsd_misbehaving_ap_notif, false),
        RX_HANDLER(DTS_MEASUREMENT_NOTIFICATION, iwl_mvm_temp_notif, true),
 +      RX_HANDLER_GRP(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE,
 +                     iwl_mvm_temp_notif, true),
  
        RX_HANDLER(TDLS_CHANNEL_SWITCH_NOTIFICATION, iwl_mvm_rx_tdls_notif,
                   true),
  static const char *const iwl_mvm_cmd_strings[REPLY_MAX + 1] = {
        CMD(MVM_ALIVE),
        CMD(REPLY_ERROR),
 +      CMD(ECHO_CMD),
        CMD(INIT_COMPLETE_NOTIF),
        CMD(PHY_CONTEXT_CMD),
        CMD(MGMT_MCAST_KEY),
@@@ -425,6 -422,7 +425,6 @@@ iwl_op_mode_mvm_start(struct iwl_trans 
                hw->max_tx_aggregation_subframes = cfg->max_tx_agg_size;
  
        op_mode = hw->priv;
 -      op_mode->ops = &iwl_mvm_ops;
  
        mvm = IWL_OP_MODE_GET_MVM(op_mode);
        mvm->dev = trans->dev;
        mvm->fw = fw;
        mvm->hw = hw;
  
 +      if (iwl_mvm_has_new_rx_api(mvm)) {
 +              op_mode->ops = &iwl_mvm_ops_mq;
 +      } else {
 +              op_mode->ops = &iwl_mvm_ops;
 +
 +              if (WARN_ON(trans->num_rx_queues > 1))
 +                      goto out_free;
 +      }
 +
        mvm->restart_fw = iwlwifi_mod_params.restart_fw ? -1 : 0;
  
        mvm->aux_queue = 15;
        INIT_LIST_HEAD(&mvm->aux_roc_te_list);
        INIT_LIST_HEAD(&mvm->async_handlers_list);
        spin_lock_init(&mvm->time_event_lock);
 +      spin_lock_init(&mvm->queue_info_lock);
  
        INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk);
        INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk);
        ieee80211_unregister_hw(mvm->hw);
        iwl_mvm_leds_exit(mvm);
   out_free:
+       flush_delayed_work(&mvm->fw_dump_wk);
        iwl_phy_db_free(mvm->phy_db);
        kfree(mvm->scan_cmd);
        if (!cfg->no_power_up_nic_in_init || !mvm->nvm_file_name)
@@@ -728,11 -717,18 +729,11 @@@ static inline void iwl_mvm_rx_check_tri
        }
  }
  
 -static void iwl_mvm_rx_dispatch(struct iwl_op_mode *op_mode,
 -                              struct napi_struct *napi,
 -                              struct iwl_rx_cmd_buffer *rxb)
 +static void iwl_mvm_rx_common(struct iwl_mvm *mvm,
 +                            struct iwl_rx_cmd_buffer *rxb,
 +                            struct iwl_rx_packet *pkt)
  {
 -      struct iwl_rx_packet *pkt = rxb_addr(rxb);
 -      struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
 -      u8 i;
 -
 -      if (likely(pkt->hdr.cmd == REPLY_RX_MPDU_CMD)) {
 -              iwl_mvm_rx_rx_mpdu(mvm, napi, rxb);
 -              return;
 -      }
 +      int i;
  
        iwl_mvm_rx_check_trigger(mvm, pkt);
  
        }
  }
  
 +static void iwl_mvm_rx(struct iwl_op_mode *op_mode,
 +                     struct napi_struct *napi,
 +                     struct iwl_rx_cmd_buffer *rxb)
 +{
 +      struct iwl_rx_packet *pkt = rxb_addr(rxb);
 +      struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
 +
 +      if (likely(pkt->hdr.cmd == REPLY_RX_MPDU_CMD))
 +              iwl_mvm_rx_rx_mpdu(mvm, napi, rxb);
 +      else if (pkt->hdr.cmd == REPLY_RX_PHY_CMD)
 +              iwl_mvm_rx_rx_phy_cmd(mvm, rxb);
 +      else
 +              iwl_mvm_rx_common(mvm, rxb, pkt);
 +}
 +
 +static void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode,
 +                        struct napi_struct *napi,
 +                        struct iwl_rx_cmd_buffer *rxb)
 +{
 +      struct iwl_rx_packet *pkt = rxb_addr(rxb);
 +      struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
 +
 +      if (likely(pkt->hdr.cmd == REPLY_RX_MPDU_CMD))
 +              iwl_mvm_rx_rx_mpdu(mvm, napi, rxb);
 +      else if (pkt->hdr.cmd == REPLY_RX_PHY_CMD)
 +              iwl_mvm_rx_rx_phy_cmd(mvm, rxb);
 +      else
 +              iwl_mvm_rx_common(mvm, rxb, pkt);
 +}
 +
  static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int queue)
  {
        struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
 -      int mq = mvm->queue_to_mac80211[queue];
 +      unsigned long mq;
 +      int q;
  
 -      if (WARN_ON_ONCE(mq == IWL_INVALID_MAC80211_QUEUE))
 -              return;
 +      spin_lock_bh(&mvm->queue_info_lock);
 +      mq = mvm->queue_info[queue].hw_queue_to_mac80211;
 +      spin_unlock_bh(&mvm->queue_info_lock);
  
 -      if (atomic_inc_return(&mvm->mac80211_queue_stop_count[mq]) > 1) {
 -              IWL_DEBUG_TX_QUEUES(mvm,
 -                                  "queue %d (mac80211 %d) already stopped\n",
 -                                  queue, mq);
 +      if (WARN_ON_ONCE(!mq))
                return;
 -      }
  
 -      ieee80211_stop_queue(mvm->hw, mq);
 +      for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) {
 +              if (atomic_inc_return(&mvm->mac80211_queue_stop_count[q]) > 1) {
 +                      IWL_DEBUG_TX_QUEUES(mvm,
 +                                          "queue %d (mac80211 %d) already stopped\n",
 +                                          queue, q);
 +                      continue;
 +              }
 +
 +              ieee80211_stop_queue(mvm->hw, q);
 +      }
  }
  
  static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int queue)
  {
        struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
 -      int mq = mvm->queue_to_mac80211[queue];
 +      unsigned long mq;
 +      int q;
  
 -      if (WARN_ON_ONCE(mq == IWL_INVALID_MAC80211_QUEUE))
 -              return;
 +      spin_lock_bh(&mvm->queue_info_lock);
 +      mq = mvm->queue_info[queue].hw_queue_to_mac80211;
 +      spin_unlock_bh(&mvm->queue_info_lock);
  
 -      if (atomic_dec_return(&mvm->mac80211_queue_stop_count[mq]) > 0) {
 -              IWL_DEBUG_TX_QUEUES(mvm,
 -                                  "queue %d (mac80211 %d) still stopped\n",
 -                                  queue, mq);
 +      if (WARN_ON_ONCE(!mq))
                return;
 -      }
  
 -      ieee80211_wake_queue(mvm->hw, mq);
 +      for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) {
 +              if (atomic_dec_return(&mvm->mac80211_queue_stop_count[q]) > 0) {
 +                      IWL_DEBUG_TX_QUEUES(mvm,
 +                                          "queue %d (mac80211 %d) still stopped\n",
 +                                          queue, q);
 +                      continue;
 +              }
 +
 +              ieee80211_wake_queue(mvm->hw, q);
 +      }
  }
  
  void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state)
@@@ -1194,17 -1146,12 +1195,17 @@@ int iwl_mvm_enter_d0i3(struct iwl_op_mo
        /* make sure we have no running tx while configuring the seqno */
        synchronize_net();
  
 -      iwl_mvm_set_wowlan_data(mvm, &wowlan_config_cmd, &d0i3_iter_data);
 -      ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, flags,
 -                                 sizeof(wowlan_config_cmd),
 -                                 &wowlan_config_cmd);
 -      if (ret)
 -              return ret;
 +      /* configure wowlan configuration only if needed */
 +      if (mvm->d0i3_ap_sta_id != IWL_MVM_STATION_COUNT) {
 +              iwl_mvm_set_wowlan_data(mvm, &wowlan_config_cmd,
 +                                      &d0i3_iter_data);
 +
 +              ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, flags,
 +                                         sizeof(wowlan_config_cmd),
 +                                         &wowlan_config_cmd);
 +              if (ret)
 +                      return ret;
 +      }
  
        return iwl_mvm_send_cmd_pdu(mvm, D3_CONFIG_CMD,
                                    flags | CMD_MAKE_TRANS_IDLE,
@@@ -1311,7 -1258,7 +1312,7 @@@ static void iwl_mvm_d0i3_exit_work(stru
        };
        struct iwl_wowlan_status *status;
        int ret;
 -      u32 handled_reasons, wakeup_reasons;
 +      u32 handled_reasons, wakeup_reasons = 0;
        __le16 *qos_seq = NULL;
  
        mutex_lock(&mvm->mutex);
  out:
        iwl_mvm_d0i3_enable_tx(mvm, qos_seq);
  
 +      IWL_DEBUG_INFO(mvm, "d0i3 exit completed (wakeup reasons: 0x%x)\n",
 +                     wakeup_reasons);
 +
        /* qos_seq might point inside resp_pkt, so free it only now */
        if (get_status_cmd.resp_pkt)
                iwl_free_resp(&get_status_cmd);
@@@ -1395,38 -1339,17 +1396,38 @@@ int iwl_mvm_exit_d0i3(struct iwl_op_mod
        return _iwl_mvm_exit_d0i3(mvm);
  }
  
 +#define IWL_MVM_COMMON_OPS                                    \
 +      /* these could be differentiated */                     \
 +      .queue_full = iwl_mvm_stop_sw_queue,                    \
 +      .queue_not_full = iwl_mvm_wake_sw_queue,                \
 +      .hw_rf_kill = iwl_mvm_set_hw_rfkill_state,              \
 +      .free_skb = iwl_mvm_free_skb,                           \
 +      .nic_error = iwl_mvm_nic_error,                         \
 +      .cmd_queue_full = iwl_mvm_cmd_queue_full,               \
 +      .nic_config = iwl_mvm_nic_config,                       \
 +      .enter_d0i3 = iwl_mvm_enter_d0i3,                       \
 +      .exit_d0i3 = iwl_mvm_exit_d0i3,                         \
 +      /* as we only register one, these MUST be common! */    \
 +      .start = iwl_op_mode_mvm_start,                         \
 +      .stop = iwl_op_mode_mvm_stop
 +
  static const struct iwl_op_mode_ops iwl_mvm_ops = {
 -      .start = iwl_op_mode_mvm_start,
 -      .stop = iwl_op_mode_mvm_stop,
 -      .rx = iwl_mvm_rx_dispatch,
 -      .queue_full = iwl_mvm_stop_sw_queue,
 -      .queue_not_full = iwl_mvm_wake_sw_queue,
 -      .hw_rf_kill = iwl_mvm_set_hw_rfkill_state,
 -      .free_skb = iwl_mvm_free_skb,
 -      .nic_error = iwl_mvm_nic_error,
 -      .cmd_queue_full = iwl_mvm_cmd_queue_full,
 -      .nic_config = iwl_mvm_nic_config,
 -      .enter_d0i3 = iwl_mvm_enter_d0i3,
 -      .exit_d0i3 = iwl_mvm_exit_d0i3,
 +      IWL_MVM_COMMON_OPS,
 +      .rx = iwl_mvm_rx,
 +};
 +
 +static void iwl_mvm_rx_mq_rss(struct iwl_op_mode *op_mode,
 +                            struct napi_struct *napi,
 +                            struct iwl_rx_cmd_buffer *rxb,
 +                            unsigned int queue)
 +{
 +      struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
 +
 +      iwl_mvm_rx_rx_mpdu(mvm, napi, rxb);
 +}
 +
 +static const struct iwl_op_mode_ops iwl_mvm_ops_mq = {
 +      IWL_MVM_COMMON_OPS,
 +      .rx = iwl_mvm_rx_mq,
 +      .rx_rss = iwl_mvm_rx_mq_rss,
  };
index e581fc69129dceebe796bf7e27ccaa48dd7af0ff,fc19376986259de8faf9c2be4c5c3376780e54ff..c9b3eb70f340d48ffe60105622bee367c7ea848f
@@@ -70,7 -70,6 +70,7 @@@ struct inet_timewait_sock 
  #define tw_dport              __tw_common.skc_dport
  #define tw_num                        __tw_common.skc_num
  #define tw_cookie             __tw_common.skc_cookie
 +#define tw_dr                 __tw_common.skc_tw_dr
  
        int                     tw_timeout;
        volatile unsigned char  tw_substate;
@@@ -89,6 -88,7 +89,6 @@@
        kmemcheck_bitfield_end(flags);
        struct timer_list       tw_timer;
        struct inet_bind_bucket *tw_tb;
 -      struct inet_timewait_death_row *tw_dr;
  };
  #define tw_tclass tw_tos
  
@@@ -113,12 -113,12 +113,12 @@@ void __inet_twsk_hashdance(struct inet_
  void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo,
                          bool rearm);
  
- static void inline inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo)
+ static inline void inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo)
  {
        __inet_twsk_schedule(tw, timeo, false);
  }
  
- static void inline inet_twsk_reschedule(struct inet_timewait_sock *tw, int timeo)
+ static inline void inet_twsk_reschedule(struct inet_timewait_sock *tw, int timeo)
  {
        __inet_twsk_schedule(tw, timeo, true);
  }
diff --combined include/net/sock.h
index 64a75458d22cc2fc4e972601972cf9ff9cf0a57a,e23717013a4e6cb1ef84e6b8ba6654b011ee8670..aeed5c95f3caedcdb4c10668c67764d8557e9369
@@@ -150,10 -150,6 +150,10 @@@ typedef __u64 __bitwise __addrpair
   *    @skc_node: main hash linkage for various protocol lookup tables
   *    @skc_nulls_node: main hash linkage for TCP/UDP/UDP-Lite protocol
   *    @skc_tx_queue_mapping: tx queue number for this connection
 + *    @skc_flags: place holder for sk_flags
 + *            %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE,
 + *            %SO_OOBINLINE settings, %SO_TIMESTAMPING settings
 + *    @skc_incoming_cpu: record/match cpu processing incoming packets
   *    @skc_refcnt: reference count
   *
   *    This is the minimal network layer representation of sockets, the header
@@@ -204,16 -200,6 +204,16 @@@ struct sock_common 
  
        atomic64_t              skc_cookie;
  
 +      /* following fields are padding to force
 +       * offset(struct sock, sk_refcnt) == 128 on 64bit arches
 +       * assuming IPV6 is enabled. We use this padding differently
 +       * for different kind of 'sockets'
 +       */
 +      union {
 +              unsigned long   skc_flags;
 +              struct sock     *skc_listener; /* request_sock */
 +              struct inet_timewait_death_row *skc_tw_dr; /* inet_timewait_sock */
 +      };
        /*
         * fields between dontcopy_begin/dontcopy_end
         * are not copied in sock_copy()
                struct hlist_nulls_node skc_nulls_node;
        };
        int                     skc_tx_queue_mapping;
 +      union {
 +              int             skc_incoming_cpu;
 +              u32             skc_rcv_wnd;
 +              u32             skc_tw_rcv_nxt; /* struct tcp_timewait_sock  */
 +      };
 +
        atomic_t                skc_refcnt;
        /* private: */
        int                     skc_dontcopy_end[0];
 +      union {
 +              u32             skc_rxhash;
 +              u32             skc_window_clamp;
 +              u32             skc_tw_snd_nxt; /* struct tcp_timewait_sock */
 +      };
        /* public: */
  };
  
@@@ -268,6 -243,8 +268,6 @@@ struct cg_proto
    *   @sk_pacing_rate: Pacing rate (if supported by transport/packet scheduler)
    *   @sk_max_pacing_rate: Maximum pacing rate (%SO_MAX_PACING_RATE)
    *   @sk_sndbuf: size of send buffer in bytes
 -  *   @sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE,
 -  *              %SO_OOBINLINE settings, %SO_TIMESTAMPING settings
    *   @sk_no_check_tx: %SO_NO_CHECK setting, set checksum in TX packets
    *   @sk_no_check_rx: allow zero checksum in RX packets
    *   @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO)
    *   @sk_rcvlowat: %SO_RCVLOWAT setting
    *   @sk_rcvtimeo: %SO_RCVTIMEO setting
    *   @sk_sndtimeo: %SO_SNDTIMEO setting
 -  *   @sk_rxhash: flow hash received from netif layer
 -  *   @sk_incoming_cpu: record cpu processing incoming packets
    *   @sk_txhash: computed flow hash for use on transmit
    *   @sk_filter: socket filtering instructions
    *   @sk_timer: sock cleanup timer
@@@ -352,9 -331,6 +352,9 @@@ struct sock 
  #define sk_v6_daddr           __sk_common.skc_v6_daddr
  #define sk_v6_rcv_saddr       __sk_common.skc_v6_rcv_saddr
  #define sk_cookie             __sk_common.skc_cookie
 +#define sk_incoming_cpu               __sk_common.skc_incoming_cpu
 +#define sk_flags              __sk_common.skc_flags
 +#define sk_rxhash             __sk_common.skc_rxhash
  
        socket_lock_t           sk_lock;
        struct sk_buff_head     sk_receive_queue;
        } sk_backlog;
  #define sk_rmem_alloc sk_backlog.rmem_alloc
        int                     sk_forward_alloc;
 -#ifdef CONFIG_RPS
 -      __u32                   sk_rxhash;
 -#endif
 -      u16                     sk_incoming_cpu;
 -      /* 16bit hole
 -       * Warned : sk_incoming_cpu can be set from softirq,
 -       * Do not use this hole without fully understanding possible issues.
 -       */
  
        __u32                   sk_txhash;
  #ifdef CONFIG_NET_RX_BUSY_POLL
  #ifdef CONFIG_XFRM
        struct xfrm_policy      *sk_policy[2];
  #endif
 -      unsigned long           sk_flags;
        struct dst_entry        *sk_rx_dst;
        struct dst_entry __rcu  *sk_dst_cache;
        spinlock_t              sk_dst_lock;
@@@ -774,7 -759,7 +774,7 @@@ static inline int sk_memalloc_socks(voi
  
  #endif
  
 -static inline gfp_t sk_gfp_atomic(struct sock *sk, gfp_t gfp_mask)
 +static inline gfp_t sk_gfp_atomic(const struct sock *sk, gfp_t gfp_mask)
  {
        return GFP_ATOMIC | (sk->sk_allocation & __GFP_MEMALLOC);
  }
@@@ -843,6 -828,14 +843,14 @@@ static inline __must_check int sk_add_b
        if (sk_rcvqueues_full(sk, limit))
                return -ENOBUFS;
  
+       /*
+        * If the skb was allocated from pfmemalloc reserves, only
+        * allow SOCK_MEMALLOC sockets to use it as this socket is
+        * helping free memory
+        */
+       if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC))
+               return -ENOMEM;
        __sk_add_backlog(sk, skb);
        sk->sk_backlog.len += skb->truesize;
        return 0;
@@@ -1529,13 -1522,6 +1537,13 @@@ void sock_kfree_s(struct sock *sk, voi
  void sock_kzfree_s(struct sock *sk, void *mem, int size);
  void sk_send_sigurg(struct sock *sk);
  
 +struct sockcm_cookie {
 +      u32 mark;
 +};
 +
 +int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
 +                 struct sockcm_cookie *sockc);
 +
  /*
   * Functions to fill in entries in struct proto_ops when a protocol
   * does not implement a particular function.
@@@ -1676,16 -1662,12 +1684,16 @@@ static inline void sock_graft(struct so
  kuid_t sock_i_uid(struct sock *sk);
  unsigned long sock_i_ino(struct sock *sk);
  
 -static inline void sk_set_txhash(struct sock *sk)
 +static inline u32 net_tx_rndhash(void)
  {
 -      sk->sk_txhash = prandom_u32();
 +      u32 v = prandom_u32();
  
 -      if (unlikely(!sk->sk_txhash))
 -              sk->sk_txhash = 1;
 +      return v ?: 1;
 +}
 +
 +static inline void sk_set_txhash(struct sock *sk)
 +{
 +      sk->sk_txhash = net_tx_rndhash();
  }
  
  static inline void sk_rethink_txhash(struct sock *sk)
@@@ -2223,14 -2205,6 +2231,14 @@@ static inline bool sk_fullsock(const st
        return (1 << sk->sk_state) & ~(TCPF_TIME_WAIT | TCPF_NEW_SYN_RECV);
  }
  
 +/* This helper checks if a socket is a LISTEN or NEW_SYN_RECV
 + * SYNACK messages can be attached to either ones (depending on SYNCOOKIE)
 + */
 +static inline bool sk_listener(const struct sock *sk)
 +{
 +      return (1 << sk->sk_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV);
 +}
 +
  void sock_enable_timestamp(struct sock *sk, int flag);
  int sock_get_timestamp(struct sock *, struct timeval __user *);
  int sock_get_timestampns(struct sock *, struct timespec __user *);
index 4036e1b1980ff2b113104315bd1bf49db30a5fcb,036f73bc54cd692763dc04954151d45a28f5e98d..fef125e2d7774aec9c3299be0cc8df6145608be8
@@@ -323,10 -323,10 +323,10 @@@ enum ovs_key_attr 
        OVS_KEY_ATTR_MPLS,      /* array of struct ovs_key_mpls.
                                 * The implementation may restrict
                                 * the accepted length of the array. */
-       OVS_KEY_ATTR_CT_STATE,  /* u8 bitmask of OVS_CS_F_* */
+       OVS_KEY_ATTR_CT_STATE,  /* u32 bitmask of OVS_CS_F_* */
        OVS_KEY_ATTR_CT_ZONE,   /* u16 connection tracking zone. */
        OVS_KEY_ATTR_CT_MARK,   /* u32 connection tracking mark */
-       OVS_KEY_ATTR_CT_LABEL /* 16-octet connection tracking label */
+       OVS_KEY_ATTR_CT_LABELS, /* 16-octet connection tracking label */
  
  #ifdef __KERNEL__
        OVS_KEY_ATTR_TUNNEL_INFO,  /* struct ip_tunnel_info */
@@@ -349,8 -349,6 +349,8 @@@ enum ovs_tunnel_key_attr 
        OVS_TUNNEL_KEY_ATTR_TP_SRC,             /* be16 src Transport Port. */
        OVS_TUNNEL_KEY_ATTR_TP_DST,             /* be16 dst Transport Port. */
        OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS,         /* Nested OVS_VXLAN_EXT_* */
 +      OVS_TUNNEL_KEY_ATTR_IPV6_SRC,           /* struct in6_addr src IPv6 address. */
 +      OVS_TUNNEL_KEY_ATTR_IPV6_DST,           /* struct in6_addr dst IPv6 address. */
        __OVS_TUNNEL_KEY_ATTR_MAX
  };
  
@@@ -441,9 -439,9 +441,9 @@@ struct ovs_key_nd 
        __u8    nd_tll[ETH_ALEN];
  };
  
- #define OVS_CT_LABEL_LEN      16
- struct ovs_key_ct_label {
-       __u8    ct_label[OVS_CT_LABEL_LEN];
+ #define OVS_CT_LABELS_LEN     16
+ struct ovs_key_ct_labels {
+       __u8    ct_labels[OVS_CT_LABELS_LEN];
  };
  
  /* OVS_KEY_ATTR_CT_STATE flags */
  #define OVS_CS_F_ESTABLISHED       0x02 /* Part of an existing connection. */
  #define OVS_CS_F_RELATED           0x04 /* Related to an established
                                         * connection. */
- #define OVS_CS_F_INVALID           0x20 /* Could not track connection. */
- #define OVS_CS_F_REPLY_DIR         0x40 /* Flow is in the reply direction. */
- #define OVS_CS_F_TRACKED           0x80 /* Conntrack has occurred. */
+ #define OVS_CS_F_REPLY_DIR         0x08 /* Flow is in the reply direction. */
+ #define OVS_CS_F_INVALID           0x10 /* Could not track connection. */
+ #define OVS_CS_F_TRACKED           0x20 /* Conntrack has occurred. */
  
  /**
   * enum ovs_flow_attr - attributes for %OVS_FLOW_* commands.
@@@ -620,22 -618,24 +620,24 @@@ struct ovs_action_hash 
  
  /**
   * enum ovs_ct_attr - Attributes for %OVS_ACTION_ATTR_CT action.
-  * @OVS_CT_ATTR_FLAGS: u32 connection tracking flags.
+  * @OVS_CT_ATTR_COMMIT: If present, commits the connection to the conntrack
+  * table. This allows future packets for the same connection to be identified
+  * as 'established' or 'related'.
   * @OVS_CT_ATTR_ZONE: u16 connection tracking zone.
   * @OVS_CT_ATTR_MARK: u32 value followed by u32 mask. For each bit set in the
   * mask, the corresponding bit in the value is copied to the connection
   * tracking mark field in the connection.
-  * @OVS_CT_ATTR_LABEL: %OVS_CT_LABEL_LEN value followed by %OVS_CT_LABEL_LEN
+  * @OVS_CT_ATTR_LABEL: %OVS_CT_LABELS_LEN value followed by %OVS_CT_LABELS_LEN
   * mask. For each bit set in the mask, the corresponding bit in the value is
   * copied to the connection tracking label field in the connection.
   * @OVS_CT_ATTR_HELPER: variable length string defining conntrack ALG.
   */
  enum ovs_ct_attr {
        OVS_CT_ATTR_UNSPEC,
-       OVS_CT_ATTR_FLAGS,      /* u8 bitmask of OVS_CT_F_*. */
+       OVS_CT_ATTR_COMMIT,     /* No argument, commits connection. */
        OVS_CT_ATTR_ZONE,       /* u16 zone id. */
        OVS_CT_ATTR_MARK,       /* mark to associate with this connection. */
-       OVS_CT_ATTR_LABEL,      /* label to associate with this connection. */
+       OVS_CT_ATTR_LABELS,     /* labels to associate with this connection. */
        OVS_CT_ATTR_HELPER,     /* netlink helper to assist detection of
                                   related connections. */
        __OVS_CT_ATTR_MAX
  
  #define OVS_CT_ATTR_MAX (__OVS_CT_ATTR_MAX - 1)
  
- /*
-  * OVS_CT_ATTR_FLAGS flags - bitmask of %OVS_CT_F_*
-  * @OVS_CT_F_COMMIT: Commits the flow to the conntrack table. This allows
-  * future packets for the same connection to be identified as 'established'
-  * or 'related'.
-  */
- #define OVS_CT_F_COMMIT               0x01
  /**
   * enum ovs_action_attr - Action types.
   *
@@@ -707,7 -699,7 +701,7 @@@ enum ovs_action_attr 
                                       * data immediately followed by a mask.
                                       * The data must be zero for the unmasked
                                       * bits. */
-       OVS_ACTION_ATTR_CT,           /* One nested OVS_CT_ATTR_* . */
+       OVS_ACTION_ATTR_CT,           /* Nested OVS_CT_ATTR_* . */
  
        __OVS_ACTION_ATTR_MAX,        /* Nothing past this will be accepted
                                       * from userspace. */
index 4db0b3ccb497ec7558b36b13ff28792ad3d61ee9,9d8f5d10c1e553122be08d3407434ea24d19c136..123a5af4e8bb54b0cf33d9558d7fa0b1bb8a31f6
@@@ -160,7 -160,7 +160,7 @@@ struct rtattr 
  
  /* Macros to handle rtattributes */
  
- #define RTA_ALIGNTO   4
+ #define RTA_ALIGNTO   4U
  #define RTA_ALIGN(len) ( ((len)+RTA_ALIGNTO-1) & ~(RTA_ALIGNTO-1) )
  #define RTA_OK(rta,len) ((len) >= (int)sizeof(struct rtattr) && \
                         (rta)->rta_len >= sizeof(struct rtattr) && \
@@@ -270,7 -270,6 +270,7 @@@ enum rt_scope_t 
  #define RTM_F_CLONED          0x200   /* This route is cloned         */
  #define RTM_F_EQUALIZE                0x400   /* Multipath equalizer: NI      */
  #define RTM_F_PREFIX          0x800   /* Prefix addresses             */
 +#define RTM_F_LOOKUP_TABLE    0x1000  /* set rtm_table to FIB lookup result */
  
  /* Reserved table identifiers */
  
@@@ -667,7 -666,6 +667,7 @@@ struct tcamsg 
  #define RTEXT_FILTER_VF               (1 << 0)
  #define RTEXT_FILTER_BRVLAN   (1 << 1)
  #define RTEXT_FILTER_BRVLAN_COMPRESSED        (1 << 2)
 +#define       RTEXT_FILTER_SKIP_STATS (1 << 3)
  
  /* End of information exported to user level */
  
diff --combined net/bluetooth/hci_core.c
index d2b3dd32d6cf1c6469b9fc728c62a625ac1c9b67,e837539452fb0e2880d8335da7769752a3b08110..e4e53bd663dfb2392f912cc3b92f11dfb213ee55
@@@ -134,66 -134,6 +134,66 @@@ static const struct file_operations dut
        .llseek         = default_llseek,
  };
  
 +static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
 +                              size_t count, loff_t *ppos)
 +{
 +      struct hci_dev *hdev = file->private_data;
 +      char buf[3];
 +
 +      buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y': 'N';
 +      buf[1] = '\n';
 +      buf[2] = '\0';
 +      return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
 +}
 +
 +static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
 +                               size_t count, loff_t *ppos)
 +{
 +      struct hci_dev *hdev = file->private_data;
 +      char buf[32];
 +      size_t buf_size = min(count, (sizeof(buf)-1));
 +      bool enable;
 +      int err;
 +
 +      if (copy_from_user(buf, user_buf, buf_size))
 +              return -EFAULT;
 +
 +      buf[buf_size] = '\0';
 +      if (strtobool(buf, &enable))
 +              return -EINVAL;
 +
 +      hci_req_lock(hdev);
 +      err = hdev->set_diag(hdev, enable);
 +      hci_req_unlock(hdev);
 +
 +      if (err < 0)
 +              return err;
 +
 +      if (enable)
 +              hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
 +      else
 +              hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
 +
 +      return count;
 +}
 +
 +static const struct file_operations vendor_diag_fops = {
 +      .open           = simple_open,
 +      .read           = vendor_diag_read,
 +      .write          = vendor_diag_write,
 +      .llseek         = default_llseek,
 +};
 +
 +static void hci_debugfs_create_basic(struct hci_dev *hdev)
 +{
 +      debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
 +                          &dut_mode_fops);
 +
 +      if (hdev->set_diag)
 +              debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
 +                                  &vendor_diag_fops);
 +}
 +
  /* ---- HCI requests ---- */
  
  static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
@@@ -753,8 -693,7 +753,8 @@@ static void hci_init3_req(struct hci_re
  
        hci_setup_event_mask(req);
  
 -      if (hdev->commands[6] & 0x20) {
 +      if (hdev->commands[6] & 0x20 &&
 +          !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
                struct hci_cp_read_stored_link_key cp;
  
                bacpy(&cp.bdaddr, BDADDR_ANY);
@@@ -910,8 -849,13 +910,8 @@@ static int __hci_init(struct hci_dev *h
        if (err < 0)
                return err;
  
 -      /* The Device Under Test (DUT) mode is special and available for
 -       * all controller types. So just create it early on.
 -       */
 -      if (hci_dev_test_flag(hdev, HCI_SETUP)) {
 -              debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
 -                                  &dut_mode_fops);
 -      }
 +      if (hci_dev_test_flag(hdev, HCI_SETUP))
 +              hci_debugfs_create_basic(hdev);
  
        err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
        if (err < 0)
@@@ -988,9 -932,6 +988,9 @@@ static int __hci_unconf_init(struct hci
        if (err < 0)
                return err;
  
 +      if (hci_dev_test_flag(hdev, HCI_SETUP))
 +              hci_debugfs_create_basic(hdev);
 +
        return 0;
  }
  
@@@ -1443,9 -1384,6 +1443,9 @@@ static int hci_dev_do_open(struct hci_d
                goto done;
        }
  
 +      set_bit(HCI_RUNNING, &hdev->flags);
 +      hci_notify(hdev, HCI_DEV_OPEN);
 +
        atomic_set(&hdev->cmd_cnt, 1);
        set_bit(HCI_INIT, &hdev->flags);
  
                        hdev->sent_cmd = NULL;
                }
  
 +              clear_bit(HCI_RUNNING, &hdev->flags);
 +              hci_notify(hdev, HCI_DEV_CLOSE);
 +
                hdev->close(hdev);
                hdev->flags &= BIT(HCI_RAW);
        }
@@@ -1613,10 -1548,8 +1613,10 @@@ static void hci_pend_le_actions_clear(s
        BT_DBG("All LE pending actions cleared");
  }
  
 -static int hci_dev_do_close(struct hci_dev *hdev)
 +int hci_dev_do_close(struct hci_dev *hdev)
  {
 +      bool auto_off;
 +
        BT_DBG("%s %p", hdev->name, hdev);
  
        if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
  
        hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
  
 -      if (!hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
 -              if (hdev->dev_type == HCI_BREDR)
 -                      mgmt_powered(hdev, 0);
 -      }
 +      auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
 +
 +      if (!auto_off && hdev->dev_type == HCI_BREDR)
 +              mgmt_powered(hdev, 0);
  
        hci_inquiry_cache_flush(hdev);
        hci_pend_le_actions_clear(hdev);
        /* Reset device */
        skb_queue_purge(&hdev->cmd_q);
        atomic_set(&hdev->cmd_cnt, 1);
 -      if (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
 -          !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
 -          test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
 +      if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
 +          !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
                set_bit(HCI_INIT, &hdev->flags);
                __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
                clear_bit(HCI_INIT, &hdev->flags);
                hdev->sent_cmd = NULL;
        }
  
 +      clear_bit(HCI_RUNNING, &hdev->flags);
 +      hci_notify(hdev, HCI_DEV_CLOSE);
 +
        /* After this point our queues are empty
         * and no tasks are scheduled. */
        hdev->close(hdev);
@@@ -2930,13 -2861,6 +2930,6 @@@ struct hci_conn_params *hci_explicit_co
                        return param;
        }
  
-       list_for_each_entry(param, &hdev->pend_le_reports, action) {
-               if (bacmp(&param->addr, addr) == 0 &&
-                   param->addr_type == addr_type &&
-                   param->explicit_connect)
-                       return param;
-       }
        return NULL;
  }
  
@@@ -3539,13 -3463,6 +3532,13 @@@ int hci_recv_frame(struct hci_dev *hdev
                return -ENXIO;
        }
  
 +      if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
 +          bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
 +          bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
 +              kfree_skb(skb);
 +              return -EINVAL;
 +      }
 +
        /* Incoming skb */
        bt_cb(skb)->incoming = 1;
  
  }
  EXPORT_SYMBOL(hci_recv_frame);
  
 +/* Receive diagnostic message from HCI drivers */
 +int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
 +{
 +      /* Time stamp */
 +      __net_timestamp(skb);
 +
 +      /* Mark as diagnostic packet and send to monitor */
 +      bt_cb(skb)->pkt_type = HCI_DIAG_PKT;
 +      hci_send_to_monitor(hdev, skb);
 +
 +      kfree_skb(skb);
 +      return 0;
 +}
 +EXPORT_SYMBOL(hci_recv_diag);
 +
  /* ---- Interface to upper protocols ---- */
  
  int hci_register_cb(struct hci_cb *cb)
@@@ -3620,11 -3522,6 +3613,11 @@@ static void hci_send_frame(struct hci_d
        /* Get rid of skb owner, prior to sending to the driver. */
        skb_orphan(skb);
  
 +      if (!test_bit(HCI_RUNNING, &hdev->flags)) {
 +              kfree_skb(skb);
 +              return;
 +      }
 +
        err = hdev->send(hdev, skb);
        if (err < 0) {
                BT_ERR("%s sending frame failed (%d)", hdev->name, err);
@@@ -3675,25 -3572,6 +3668,25 @@@ void *hci_sent_cmd_data(struct hci_dev 
        return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
  }
  
 +/* Send HCI command and wait for command commplete event */
 +struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
 +                           const void *param, u32 timeout)
 +{
 +      struct sk_buff *skb;
 +
 +      if (!test_bit(HCI_UP, &hdev->flags))
 +              return ERR_PTR(-ENETDOWN);
 +
 +      bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
 +
 +      hci_req_lock(hdev);
 +      skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
 +      hci_req_unlock(hdev);
 +
 +      return skb;
 +}
 +EXPORT_SYMBOL(hci_cmd_sync);
 +
  /* Send ACL data */
  static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
  {
index 8acec932123ade906192ebbfd2daecee9b51f697,bc31099d3b5bd113a8496b2ccad9e3e40364187f..b4571d84cafa809777f9c7ba3a0a21f60595ed9d
@@@ -55,7 -55,12 +55,12 @@@ static void hci_cc_inquiry_cancel(struc
        wake_up_bit(&hdev->flags, HCI_INQUIRY);
  
        hci_dev_lock(hdev);
-       hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
+       /* Set discovery state to stopped if we're not doing LE active
+        * scanning.
+        */
+       if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
+           hdev->le_scan_type != LE_SCAN_ACTIVE)
+               hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
        hci_dev_unlock(hdev);
  
        hci_conn_check_pending(hdev);
@@@ -4648,8 -4653,8 +4653,8 @@@ static struct hci_conn *check_pending_l
        /* If we're not connectable only connect devices that we have in
         * our pend_le_conns list.
         */
-       params = hci_explicit_connect_lookup(hdev, addr, addr_type);
+       params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
+                                          addr_type);
        if (!params)
                return NULL;
  
@@@ -4719,27 -4724,6 +4724,27 @@@ static void process_adv_report(struct h
        struct hci_conn *conn;
        bool match;
        u32 flags;
 +      u8 *ptr, real_len;
 +
 +      /* Find the end of the data in case the report contains padded zero
 +       * bytes at the end causing an invalid length value.
 +       *
 +       * When data is NULL, len is 0 so there is no need for extra ptr
 +       * check as 'ptr < data + 0' is already false in such case.
 +       */
 +      for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
 +              if (ptr + 1 + *ptr > data + len)
 +                      break;
 +      }
 +
 +      real_len = ptr - data;
 +
 +      /* Adjust for actual length */
 +      if (len != real_len) {
 +              BT_ERR_RATELIMITED("%s advertising data length corrected",
 +                                 hdev->name);
 +              len = real_len;
 +      }
  
        /* If the direct address is present, then this report is from
         * a LE Direct Advertising Report event. In that case it is
diff --combined net/core/filter.c
index 0b00094932ab46f20b9c1ae6948322c5dd130afc,bb18c368000129ebea752a0a78785524848eaab8..672eefbfbe99fff2ade1bd2a095fb2366a2d2c0b
  #include <net/sch_generic.h>
  #include <net/cls_cgroup.h>
  #include <net/dst_metadata.h>
 +#include <net/dst.h>
  
  /**
   *    sk_filter - run a packet through a socket filter
   *    @sk: sock associated with &sk_buff
   *    @skb: buffer to filter
   *
 - * Run the filter code and then cut skb->data to correct size returned by
 - * SK_RUN_FILTER. If pkt_len is 0 we toss packet. If skb->len is smaller
 + * Run the eBPF program and then cut skb->data to correct size returned by
 + * the program. If pkt_len is 0 we toss packet. If skb->len is smaller
   * than pkt_len we keep whole skb->data. This is the socket level
 - * wrapper to SK_RUN_FILTER. It returns 0 if the packet should
 + * wrapper to BPF_PROG_RUN. It returns 0 if the packet should
   * be accepted or -EPERM if the packet should be tossed.
   *
   */
@@@ -83,7 -82,7 +83,7 @@@ int sk_filter(struct sock *sk, struct s
        rcu_read_lock();
        filter = rcu_dereference(sk->sk_filter);
        if (filter) {
 -              unsigned int pkt_len = SK_RUN_FILTER(filter, skb);
 +              unsigned int pkt_len = bpf_prog_run_save_cb(filter->prog, skb);
  
                err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
        }
@@@ -149,6 -148,12 +149,6 @@@ static u64 __get_raw_cpu_id(u64 ctx, u6
        return raw_smp_processor_id();
  }
  
 -/* note that this only generates 32-bit random numbers */
 -static u64 __get_random_u32(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
 -{
 -      return prandom_u32();
 -}
 -
  static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg,
                              struct bpf_insn *insn_buf)
  {
@@@ -307,8 -312,7 +307,8 @@@ static bool convert_bpf_extensions(stru
                        *insn = BPF_EMIT_CALL(__get_raw_cpu_id);
                        break;
                case SKF_AD_OFF + SKF_AD_RANDOM:
 -                      *insn = BPF_EMIT_CALL(__get_random_u32);
 +                      *insn = BPF_EMIT_CALL(bpf_user_rnd_u32);
 +                      bpf_user_rnd_init_once();
                        break;
                }
                break;
@@@ -997,7 -1001,7 +997,7 @@@ static struct bpf_prog *bpf_prepare_fil
        int err;
  
        fp->bpf_func = NULL;
 -      fp->jited = false;
 +      fp->jited = 0;
  
        err = bpf_check_classic(fp->insns, fp->len);
        if (err) {
@@@ -1079,18 -1083,16 +1079,18 @@@ EXPORT_SYMBOL_GPL(bpf_prog_create)
   *    @pfp: the unattached filter that is created
   *    @fprog: the filter program
   *    @trans: post-classic verifier transformation handler
 + *    @save_orig: save classic BPF program
   *
   * This function effectively does the same as bpf_prog_create(), only
   * that it builds up its insns buffer from user space provided buffer.
   * It also allows for passing a bpf_aux_classic_check_t handler.
   */
  int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
 -                            bpf_aux_classic_check_t trans)
 +                            bpf_aux_classic_check_t trans, bool save_orig)
  {
        unsigned int fsize = bpf_classic_proglen(fprog);
        struct bpf_prog *fp;
 +      int err;
  
        /* Make sure new filter is there and in the right amounts. */
        if (fprog->filter == NULL)
        }
  
        fp->len = fprog->len;
 -      /* Since unattached filters are not copied back to user
 -       * space through sk_get_filter(), we do not need to hold
 -       * a copy here, and can spare us the work.
 -       */
        fp->orig_prog = NULL;
  
 +      if (save_orig) {
 +              err = bpf_prog_store_orig_filter(fp, fprog);
 +              if (err) {
 +                      __bpf_prog_free(fp);
 +                      return -ENOMEM;
 +              }
 +      }
 +
        /* bpf_prepare_filter() already takes care of freeing
         * memory in case something goes wrong.
         */
@@@ -1406,6 -1404,9 +1406,6 @@@ static u64 bpf_clone_redirect(u64 r1, u
        if (unlikely(!dev))
                return -EINVAL;
  
 -      if (unlikely(!(dev->flags & IFF_UP)))
 -              return -EINVAL;
 -
        skb2 = skb_clone(skb, GFP_ATOMIC);
        if (unlikely(!skb2))
                return -ENOMEM;
                return dev_forward_skb(dev, skb2);
  
        skb2->dev = dev;
+       skb_sender_cpu_clear(skb2);
        return dev_queue_xmit(skb2);
  }
  
@@@ -1426,49 -1428,6 +1427,49 @@@ const struct bpf_func_proto bpf_clone_r
        .arg3_type      = ARG_ANYTHING,
  };
  
 +struct redirect_info {
 +      u32 ifindex;
 +      u32 flags;
 +};
 +
 +static DEFINE_PER_CPU(struct redirect_info, redirect_info);
 +static u64 bpf_redirect(u64 ifindex, u64 flags, u64 r3, u64 r4, u64 r5)
 +{
 +      struct redirect_info *ri = this_cpu_ptr(&redirect_info);
 +
 +      ri->ifindex = ifindex;
 +      ri->flags = flags;
 +      return TC_ACT_REDIRECT;
 +}
 +
 +int skb_do_redirect(struct sk_buff *skb)
 +{
 +      struct redirect_info *ri = this_cpu_ptr(&redirect_info);
 +      struct net_device *dev;
 +
 +      dev = dev_get_by_index_rcu(dev_net(skb->dev), ri->ifindex);
 +      ri->ifindex = 0;
 +      if (unlikely(!dev)) {
 +              kfree_skb(skb);
 +              return -EINVAL;
 +      }
 +
 +      if (BPF_IS_REDIRECT_INGRESS(ri->flags))
 +              return dev_forward_skb(dev, skb);
 +
 +      skb->dev = dev;
 +      skb_sender_cpu_clear(skb);
 +      return dev_queue_xmit(skb);
 +}
 +
 +const struct bpf_func_proto bpf_redirect_proto = {
 +      .func           = bpf_redirect,
 +      .gpl_only       = false,
 +      .ret_type       = RET_INTEGER,
 +      .arg1_type      = ARG_ANYTHING,
 +      .arg2_type      = ARG_ANYTHING,
 +};
 +
  static u64 bpf_get_cgroup_classid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
  {
        return task_get_classid((struct sk_buff *) (unsigned long) r1);
@@@ -1481,25 -1440,6 +1482,25 @@@ static const struct bpf_func_proto bpf_
        .arg1_type      = ARG_PTR_TO_CTX,
  };
  
 +static u64 bpf_get_route_realm(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
 +{
 +#ifdef CONFIG_IP_ROUTE_CLASSID
 +      const struct dst_entry *dst;
 +
 +      dst = skb_dst((struct sk_buff *) (unsigned long) r1);
 +      if (dst)
 +              return dst->tclassid;
 +#endif
 +      return 0;
 +}
 +
 +static const struct bpf_func_proto bpf_get_route_realm_proto = {
 +      .func           = bpf_get_route_realm,
 +      .gpl_only       = false,
 +      .ret_type       = RET_INTEGER,
 +      .arg1_type      = ARG_PTR_TO_CTX,
 +};
 +
  static u64 bpf_skb_vlan_push(u64 r1, u64 r2, u64 vlan_tci, u64 r4, u64 r5)
  {
        struct sk_buff *skb = (struct sk_buff *) (long) r1;
@@@ -1640,8 -1580,7 +1641,8 @@@ sk_filter_func_proto(enum bpf_func_id f
        case BPF_FUNC_ktime_get_ns:
                return &bpf_ktime_get_ns_proto;
        case BPF_FUNC_trace_printk:
 -              return bpf_get_trace_printk_proto();
 +              if (capable(CAP_SYS_ADMIN))
 +                      return bpf_get_trace_printk_proto();
        default:
                return NULL;
        }
@@@ -1669,10 -1608,6 +1670,10 @@@ tc_cls_act_func_proto(enum bpf_func_id 
                return &bpf_skb_get_tunnel_key_proto;
        case BPF_FUNC_skb_set_tunnel_key:
                return bpf_get_skb_set_tunnel_key_proto();
 +      case BPF_FUNC_redirect:
 +              return &bpf_redirect_proto;
 +      case BPF_FUNC_get_route_realm:
 +              return &bpf_get_route_realm_proto;
        default:
                return sk_filter_func_proto(func_id);
        }
@@@ -1698,9 -1633,6 +1699,9 @@@ static bool __is_valid_access(int off, 
  static bool sk_filter_is_valid_access(int off, int size,
                                      enum bpf_access_type type)
  {
 +      if (off == offsetof(struct __sk_buff, tc_classid))
 +              return false;
 +
        if (type == BPF_WRITE) {
                switch (off) {
                case offsetof(struct __sk_buff, cb[0]) ...
  static bool tc_cls_act_is_valid_access(int off, int size,
                                       enum bpf_access_type type)
  {
 +      if (off == offsetof(struct __sk_buff, tc_classid))
 +              return type == BPF_WRITE ? true : false;
 +
        if (type == BPF_WRITE) {
                switch (off) {
                case offsetof(struct __sk_buff, mark):
                case offsetof(struct __sk_buff, tc_index):
 +              case offsetof(struct __sk_buff, priority):
                case offsetof(struct __sk_buff, cb[0]) ...
                        offsetof(struct __sk_buff, cb[4]):
                        break;
  
  static u32 bpf_net_convert_ctx_access(enum bpf_access_type type, int dst_reg,
                                      int src_reg, int ctx_off,
 -                                    struct bpf_insn *insn_buf)
 +                                    struct bpf_insn *insn_buf,
 +                                    struct bpf_prog *prog)
  {
        struct bpf_insn *insn = insn_buf;
  
        case offsetof(struct __sk_buff, priority):
                BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, priority) != 4);
  
 -              *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
 -                                    offsetof(struct sk_buff, priority));
 +              if (type == BPF_WRITE)
 +                      *insn++ = BPF_STX_MEM(BPF_W, dst_reg, src_reg,
 +                                            offsetof(struct sk_buff, priority));
 +              else
 +                      *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
 +                                            offsetof(struct sk_buff, priority));
                break;
  
        case offsetof(struct __sk_buff, ingress_ifindex):
                offsetof(struct __sk_buff, cb[4]):
                BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, data) < 20);
  
 +              prog->cb_access = 1;
                ctx_off -= offsetof(struct __sk_buff, cb[0]);
                ctx_off += offsetof(struct sk_buff, cb);
                ctx_off += offsetof(struct qdisc_skb_cb, data);
                        *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg, ctx_off);
                break;
  
 +      case offsetof(struct __sk_buff, tc_classid):
 +              ctx_off -= offsetof(struct __sk_buff, tc_classid);
 +              ctx_off += offsetof(struct sk_buff, cb);
 +              ctx_off += offsetof(struct qdisc_skb_cb, tc_classid);
 +              WARN_ON(type != BPF_WRITE);
 +              *insn++ = BPF_STX_MEM(BPF_H, dst_reg, src_reg, ctx_off);
 +              break;
 +
        case offsetof(struct __sk_buff, tc_index):
  #ifdef CONFIG_NET_SCHED
                BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, tc_index) != 2);
@@@ -1941,9 -1855,13 +1942,13 @@@ int sk_get_filter(struct sock *sk, stru
                goto out;
  
        /* We're copying the filter that has been originally attached,
-        * so no conversion/decode needed anymore.
+        * so no conversion/decode needed anymore. eBPF programs that
+        * have no original program cannot be dumped through this.
         */
+       ret = -EACCES;
        fprog = filter->prog->orig_prog;
+       if (!fprog)
+               goto out;
  
        ret = fprog->len;
        if (!len)
diff --combined net/dsa/dsa.c
index aa398bcef9e30f39774afc2f4bdb060071150489,adb5325f49348412efb50f0eeee5a5f0fa1e50ed..1eba07feb34adb451734e18e1c73031c9b7b2e35
@@@ -22,6 -22,7 +22,7 @@@
  #include <linux/of_platform.h>
  #include <linux/of_net.h>
  #include <linux/sysfs.h>
+ #include <linux/phy_fixed.h>
  #include "dsa_priv.h"
  
  char dsa_driver_version[] = "0.1";
@@@ -305,7 -306,7 +306,7 @@@ static int dsa_switch_setup_one(struct 
        if (ret < 0)
                goto out;
  
-       ds->slave_mii_bus = mdiobus_alloc();
+       ds->slave_mii_bus = devm_mdiobus_alloc(parent);
        if (ds->slave_mii_bus == NULL) {
                ret = -ENOMEM;
                goto out;
  
        ret = mdiobus_register(ds->slave_mii_bus);
        if (ret < 0)
-               goto out_free;
+               goto out;
  
  
        /*
  
                ret = dsa_slave_create(ds, parent, i, pd->port_names[i]);
                if (ret < 0) {
 -                      netdev_err(dst->master_netdev, "[%d]: can't create dsa slave device for port %d(%s)\n",
 -                                 index, i, pd->port_names[i]);
 +                      netdev_err(dst->master_netdev, "[%d]: can't create dsa slave device for port %d(%s): %d\n",
 +                                 index, i, pd->port_names[i], ret);
                        ret = 0;
                }
        }
  
        return ret;
  
- out_free:
-       mdiobus_free(ds->slave_mii_bus);
  out:
-       kfree(ds);
        return ret;
  }
  
@@@ -400,7 -398,7 +398,7 @@@ dsa_switch_setup(struct dsa_switch_tre
        /*
         * Allocate and initialise switch state.
         */
-       ds = kzalloc(sizeof(*ds) + drv->priv_size, GFP_KERNEL);
+       ds = devm_kzalloc(parent, sizeof(*ds) + drv->priv_size, GFP_KERNEL);
        if (ds == NULL)
                return ERR_PTR(-ENOMEM);
  
  
  static void dsa_switch_destroy(struct dsa_switch *ds)
  {
+       struct device_node *port_dn;
+       struct phy_device *phydev;
+       struct dsa_chip_data *cd = ds->pd;
+       int port;
  #ifdef CONFIG_NET_DSA_HWMON
        if (ds->hwmon_dev)
                hwmon_device_unregister(ds->hwmon_dev);
  #endif
+       /* Disable configuration of the CPU and DSA ports */
+       for (port = 0; port < DSA_MAX_PORTS; port++) {
+               if (!(dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)))
+                       continue;
+               port_dn = cd->port_dn[port];
+               if (of_phy_is_fixed_link(port_dn)) {
+                       phydev = of_phy_find_device(port_dn);
+                       if (phydev) {
+                               int addr = phydev->addr;
+                               phy_device_free(phydev);
+                               of_node_put(port_dn);
+                               fixed_phy_del(addr);
+                       }
+               }
+       }
+       /* Destroy network devices for physical switch ports. */
+       for (port = 0; port < DSA_MAX_PORTS; port++) {
+               if (!(ds->phys_port_mask & (1 << port)))
+                       continue;
+               if (!ds->ports[port])
+                       continue;
+               unregister_netdev(ds->ports[port]);
+               free_netdev(ds->ports[port]);
+       }
+       mdiobus_unregister(ds->slave_mii_bus);
  }
  
  #ifdef CONFIG_PM_SLEEP
@@@ -802,10 -837,11 +837,11 @@@ static inline void dsa_of_remove(struc
  }
  #endif
  
- static void dsa_setup_dst(struct dsa_switch_tree *dst, struct net_device *dev,
-                         struct device *parent, struct dsa_platform_data *pd)
+ static int dsa_setup_dst(struct dsa_switch_tree *dst, struct net_device *dev,
+                        struct device *parent, struct dsa_platform_data *pd)
  {
        int i;
+       unsigned configured = 0;
  
        dst->pd = pd;
        dst->master_netdev = dev;
                dst->ds[i] = ds;
                if (ds->drv->poll_link != NULL)
                        dst->link_poll_needed = 1;
+               ++configured;
        }
  
+       /*
+        * If no switch was found, exit cleanly
+        */
+       if (!configured)
+               return -EPROBE_DEFER;
        /*
         * If we use a tagging format that doesn't have an ethertype
         * field, make sure that all packets from this point on get
                dst->link_poll_timer.expires = round_jiffies(jiffies + HZ);
                add_timer(&dst->link_poll_timer);
        }
+       return 0;
  }
  
  static int dsa_probe(struct platform_device *pdev)
                goto out;
        }
  
-       dst = kzalloc(sizeof(*dst), GFP_KERNEL);
+       dst = devm_kzalloc(&pdev->dev, sizeof(*dst), GFP_KERNEL);
        if (dst == NULL) {
                dev_put(dev);
                ret = -ENOMEM;
  
        platform_set_drvdata(pdev, dst);
  
-       dsa_setup_dst(dst, dev, &pdev->dev, pd);
+       ret = dsa_setup_dst(dst, dev, &pdev->dev, pd);
+       if (ret)
+               goto out;
  
        return 0;
  
@@@ -914,7 -962,7 +962,7 @@@ static void dsa_remove_dst(struct dsa_s
        for (i = 0; i < dst->pd->nr_chips; i++) {
                struct dsa_switch *ds = dst->ds[i];
  
-               if (ds != NULL)
+               if (ds)
                        dsa_switch_destroy(ds);
        }
  }
diff --combined net/ipv4/arp.c
index 01308e6e612735aee02b71b460f9a02e93f8673f,0c9c3482e41997a671c23be7dc0387edb1403916..59b3e0e8fd5110031eff0303b5f75622bdd7d22a
@@@ -312,7 -312,7 +312,7 @@@ static void arp_send_dst(int type, int 
        if (!skb)
                return;
  
-       skb_dst_set(skb, dst);
+       skb_dst_set(skb, dst_clone(dst));
        arp_xmit(skb);
  }
  
@@@ -384,7 -384,7 +384,7 @@@ static void arp_solicit(struct neighbou
        }
  
        if (skb && !(dev->priv_flags & IFF_XMIT_DST_RELEASE))
-               dst = dst_clone(skb_dst(skb));
+               dst = skb_dst(skb);
        arp_send_dst(ARPOP_REQUEST, ETH_P_ARP, target, dev, saddr,
                     dst_hw, dev->dev_addr, NULL, dst);
  }
@@@ -624,20 -624,14 +624,20 @@@ out
  }
  EXPORT_SYMBOL(arp_create);
  
 +static int arp_xmit_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
 +{
 +      return dev_queue_xmit(skb);
 +}
 +
  /*
   *    Send an arp packet.
   */
  void arp_xmit(struct sk_buff *skb)
  {
        /* Send it off, maybe filter it using firewalling first.  */
 -      NF_HOOK(NFPROTO_ARP, NF_ARP_OUT, NULL, skb,
 -              NULL, skb->dev, dev_queue_xmit_sk);
 +      NF_HOOK(NFPROTO_ARP, NF_ARP_OUT,
 +              dev_net(skb->dev), NULL, skb, NULL, skb->dev,
 +              arp_xmit_finish);
  }
  EXPORT_SYMBOL(arp_xmit);
  
   *    Process an arp request.
   */
  
 -static int arp_process(struct sock *sk, struct sk_buff *skb)
 +static int arp_process(struct net *net, struct sock *sk, struct sk_buff *skb)
  {
        struct net_device *dev = skb->dev;
        struct in_device *in_dev = __in_dev_get_rcu(dev);
        u16 dev_type = dev->type;
        int addr_type;
        struct neighbour *n;
 -      struct net *net = dev_net(dev);
        struct dst_entry *reply_dst = NULL;
        bool is_garp = false;
  
                                } else {
                                        pneigh_enqueue(&arp_tbl,
                                                       in_dev->arp_parms, skb);
-                                       return 0;
+                                       goto out_free_dst;
                                }
                                goto out;
                        }
  
  out:
        consume_skb(skb);
+ out_free_dst:
+       dst_release(reply_dst);
        return 0;
  }
  
  static void parp_redo(struct sk_buff *skb)
  {
 -      arp_process(NULL, skb);
 +      arp_process(dev_net(skb->dev), NULL, skb);
  }
  
  
@@@ -908,9 -905,8 +910,9 @@@ static int arp_rcv(struct sk_buff *skb
  
        memset(NEIGH_CB(skb), 0, sizeof(struct neighbour_cb));
  
 -      return NF_HOOK(NFPROTO_ARP, NF_ARP_IN, NULL, skb,
 -                     dev, NULL, arp_process);
 +      return NF_HOOK(NFPROTO_ARP, NF_ARP_IN,
 +                     dev_net(dev), NULL, skb, dev, NULL,
 +                     arp_process);
  
  consumeskb:
        consume_skb(skb);
diff --combined net/ipv6/addrconf.c
index f0326aae7a02dee3a37c3f49f99a67abe251f2d6,36b85bd05ac8a320b1b910c7d3454da1139ecd3c..d135350495e8deea6ab10363f9899ac6dc701142
@@@ -81,7 -81,6 +81,7 @@@
  #include <net/ip.h>
  #include <net/netlink.h>
  #include <net/pkt_sched.h>
 +#include <net/l3mdev.h>
  #include <linux/if_tunnel.h>
  #include <linux/rtnetlink.h>
  #include <linux/netconf.h>
@@@ -2147,7 -2146,7 +2147,7 @@@ addrconf_prefix_route(struct in6_addr *
                      unsigned long expires, u32 flags)
  {
        struct fib6_config cfg = {
 -              .fc_table = RT6_TABLE_PREFIX,
 +              .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_PREFIX,
                .fc_metric = IP6_RT_PRIO_ADDRCONF,
                .fc_ifindex = dev->ifindex,
                .fc_expires = expires,
@@@ -2180,9 -2179,8 +2180,9 @@@ static struct rt6_info *addrconf_get_pr
        struct fib6_node *fn;
        struct rt6_info *rt = NULL;
        struct fib6_table *table;
 +      u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_PREFIX;
  
 -      table = fib6_get_table(dev_net(dev), RT6_TABLE_PREFIX);
 +      table = fib6_get_table(dev_net(dev), tb_id);
        if (!table)
                return NULL;
  
@@@ -2213,7 -2211,7 +2213,7 @@@ out
  static void addrconf_add_mroute(struct net_device *dev)
  {
        struct fib6_config cfg = {
 -              .fc_table = RT6_TABLE_LOCAL,
 +              .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_LOCAL,
                .fc_metric = IP6_RT_PRIO_ADDRCONF,
                .fc_ifindex = dev->ifindex,
                .fc_dst_len = 8,
@@@ -3031,10 -3029,6 +3031,10 @@@ static void addrconf_addr_gen(struct in
  {
        struct in6_addr addr;
  
 +      /* no link local addresses on L3 master devices */
 +      if (netif_is_l3_master(idev->dev))
 +              return;
 +
        ipv6_addr_set(&addr, htonl(0xFE800000), 0, 0, 0);
  
        if (idev->addr_gen_mode == IN6_ADDR_GEN_MODE_STABLE_PRIVACY) {
@@@ -3125,6 -3119,8 +3125,8 @@@ static void addrconf_gre_config(struct 
        }
  
        addrconf_addr_gen(idev, true);
+       if (dev->flags & IFF_POINTOPOINT)
+               addrconf_add_mroute(dev);
  }
  #endif
  
@@@ -3631,7 -3627,7 +3633,7 @@@ static void addrconf_dad_work(struct wo
  
        /* send a neighbour solicitation for our addr */
        addrconf_addr_solict_mult(&ifp->addr, &mcaddr);
 -      ndisc_send_ns(ifp->idev->dev, NULL, &ifp->addr, &mcaddr, &in6addr_any, NULL);
 +      ndisc_send_ns(ifp->idev->dev, &ifp->addr, &mcaddr, &in6addr_any, NULL);
  out:
        in6_ifa_put(ifp);
        rtnl_unlock();
@@@ -4735,8 -4731,7 +4737,8 @@@ static void snmp6_fill_stats(u64 *stats
        }
  }
  
 -static int inet6_fill_ifla6_attrs(struct sk_buff *skb, struct inet6_dev *idev)
 +static int inet6_fill_ifla6_attrs(struct sk_buff *skb, struct inet6_dev *idev,
 +                                u32 ext_filter_mask)
  {
        struct nlattr *nla;
        struct ifla_cacheinfo ci;
  
        /* XXX - MC not implemented */
  
 +      if (ext_filter_mask & RTEXT_FILTER_SKIP_STATS)
 +              return 0;
 +
        nla = nla_reserve(skb, IFLA_INET6_STATS, IPSTATS_MIB_MAX * sizeof(u64));
        if (!nla)
                goto nla_put_failure;
@@@ -4794,15 -4786,14 +4796,15 @@@ static size_t inet6_get_link_af_size(co
        return inet6_ifla6_size();
  }
  
 -static int inet6_fill_link_af(struct sk_buff *skb, const struct net_device *dev)
 +static int inet6_fill_link_af(struct sk_buff *skb, const struct net_device *dev,
 +                            u32 ext_filter_mask)
  {
        struct inet6_dev *idev = __in6_dev_get(dev);
  
        if (!idev)
                return -ENODATA;
  
 -      if (inet6_fill_ifla6_attrs(skb, idev) < 0)
 +      if (inet6_fill_ifla6_attrs(skb, idev, ext_filter_mask) < 0)
                return -EMSGSIZE;
  
        return 0;
@@@ -4957,7 -4948,7 +4959,7 @@@ static int inet6_fill_ifinfo(struct sk_
        if (!protoinfo)
                goto nla_put_failure;
  
 -      if (inet6_fill_ifla6_attrs(skb, idev) < 0)
 +      if (inet6_fill_ifla6_attrs(skb, idev, 0) < 0)
                goto nla_put_failure;
  
        nla_nest_end(skb, protoinfo);
diff --combined net/ipv6/ip6_output.c
index 23f97c4783bbaf7a810eb77992bceab20ddb6925,61d403ee1031caa5536d9298abd4604606dff6f5..0c89671e0767e5debe909d654cd9d089bcf8fa19
@@@ -55,9 -55,8 +55,9 @@@
  #include <net/xfrm.h>
  #include <net/checksum.h>
  #include <linux/mroute6.h>
 +#include <net/l3mdev.h>
  
 -static int ip6_finish_output2(struct sock *sk, struct sk_buff *skb)
 +static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
  {
        struct dst_entry *dst = skb_dst(skb);
        struct net_device *dev = dst->dev;
@@@ -72,7 -71,7 +72,7 @@@
                struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
  
                if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(sk) &&
 -                  ((mroute6_socket(dev_net(dev), skb) &&
 +                  ((mroute6_socket(net, skb) &&
                     !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) ||
                     ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr,
                                         &ipv6_hdr(skb)->saddr))) {
                         */
                        if (newskb)
                                NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING,
 -                                      sk, newskb, NULL, newskb->dev,
 +                                      net, sk, newskb, NULL, newskb->dev,
                                        dev_loopback_xmit);
  
                        if (ipv6_hdr(skb)->hop_limit == 0) {
 -                              IP6_INC_STATS(dev_net(dev), idev,
 +                              IP6_INC_STATS(net, idev,
                                              IPSTATS_MIB_OUTDISCARDS);
                                kfree_skb(skb);
                                return 0;
                        }
                }
  
 -              IP6_UPD_PO_STATS(dev_net(dev), idev, IPSTATS_MIB_OUTMCAST,
 -                              skb->len);
 +              IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, skb->len);
  
                if (IPV6_ADDR_MC_SCOPE(&ipv6_hdr(skb)->daddr) <=
                    IPV6_ADDR_SCOPE_NODELOCAL &&
        }
        rcu_read_unlock_bh();
  
 -      IP6_INC_STATS(dev_net(dst->dev),
 -                    ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
 +      IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
        kfree_skb(skb);
        return -EINVAL;
  }
  
 -static int ip6_finish_output(struct sock *sk, struct sk_buff *skb)
 +static int ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
  {
        if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
            dst_allfrag(skb_dst(skb)) ||
            (IP6CB(skb)->frag_max_size && skb->len > IP6CB(skb)->frag_max_size))
 -              return ip6_fragment(sk, skb, ip6_finish_output2);
 +              return ip6_fragment(net, sk, skb, ip6_finish_output2);
        else
 -              return ip6_finish_output2(sk, skb);
 +              return ip6_finish_output2(net, sk, skb);
  }
  
 -int ip6_output(struct sock *sk, struct sk_buff *skb)
 +int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
  {
        struct net_device *dev = skb_dst(skb)->dev;
        struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
 +
        if (unlikely(idev->cnf.disable_ipv6)) {
 -              IP6_INC_STATS(dev_net(dev), idev,
 -                            IPSTATS_MIB_OUTDISCARDS);
 +              IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
                kfree_skb(skb);
                return 0;
        }
  
 -      return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, sk, skb,
 -                          NULL, dev,
 +      return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING,
 +                          net, sk, skb, NULL, dev,
                            ip6_finish_output,
                            !(IP6CB(skb)->flags & IP6SKB_REROUTED));
  }
  
  /*
 - *    xmit an sk_buff (used by TCP, SCTP and DCCP)
 + * xmit an sk_buff (used by TCP, SCTP and DCCP)
 + * Note : socket lock is not held for SYNACK packets, but might be modified
 + * by calls to skb_set_owner_w() and ipv6_local_error(),
 + * which are using proper atomic operations or spinlocks.
   */
 -
 -int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
 +int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
             struct ipv6_txoptions *opt, int tclass)
  {
        struct net *net = sock_net(sk);
 -      struct ipv6_pinfo *np = inet6_sk(sk);
 +      const struct ipv6_pinfo *np = inet6_sk(sk);
        struct in6_addr *first_hop = &fl6->daddr;
        struct dst_entry *dst = skb_dst(skb);
        struct ipv6hdr *hdr;
                        }
                        consume_skb(skb);
                        skb = skb2;
 -                      skb_set_owner_w(skb, sk);
 +                      /* skb_set_owner_w() changes sk->sk_wmem_alloc atomically,
 +                       * it is safe to call in our context (socket lock not held)
 +                       */
 +                      skb_set_owner_w(skb, (struct sock *)sk);
                }
                if (opt->opt_flen)
                        ipv6_push_frag_opts(skb, opt, &proto);
        if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) {
                IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)),
                              IPSTATS_MIB_OUT, skb->len);
 -              return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, sk, skb,
 -                             NULL, dst->dev, dst_output_sk);
 +              /* hooks should never assume socket lock is held.
 +               * we promote our socket to non const
 +               */
 +              return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
 +                             net, (struct sock *)sk, skb, NULL, dst->dev,
 +                             dst_output);
        }
  
        skb->dev = dst->dev;
 -      ipv6_local_error(sk, EMSGSIZE, fl6, mtu);
 +      /* ipv6_local_error() does not require socket lock,
 +       * we promote our socket to non const
 +       */
 +      ipv6_local_error((struct sock *)sk, EMSGSIZE, fl6, mtu);
 +
        IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS);
        kfree_skb(skb);
        return -EMSGSIZE;
@@@ -329,11 -317,10 +329,11 @@@ static int ip6_forward_proxy_check(stru
        return 0;
  }
  
 -static inline int ip6_forward_finish(struct sock *sk, struct sk_buff *skb)
 +static inline int ip6_forward_finish(struct net *net, struct sock *sk,
 +                                   struct sk_buff *skb)
  {
        skb_sender_cpu_clear(skb);
 -      return dst_output_sk(sk, skb);
 +      return dst_output(net, sk, skb);
  }
  
  static unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
@@@ -389,6 -376,9 +389,9 @@@ int ip6_forward(struct sk_buff *skb
        if (skb->pkt_type != PACKET_HOST)
                goto drop;
  
+       if (unlikely(skb->sk))
+               goto drop;
        if (skb_warn_if_lro(skb))
                goto drop;
  
  
        IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
        IP6_ADD_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
 -      return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, NULL, skb,
 -                     skb->dev, dst->dev,
 +      return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD,
 +                     net, NULL, skb, skb->dev, dst->dev,
                       ip6_forward_finish);
  
  error:
@@@ -553,8 -543,8 +556,8 @@@ static void ip6_copy_metadata(struct sk
        skb_copy_secmark(to, from);
  }
  
 -int ip6_fragment(struct sock *sk, struct sk_buff *skb,
 -               int (*output)(struct sock *, struct sk_buff *))
 +int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
 +               int (*output)(struct net *, struct sock *, struct sk_buff *))
  {
        struct sk_buff *frag;
        struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
        __be32 frag_id;
        int ptr, offset = 0, err = 0;
        u8 *prevhdr, nexthdr = 0;
 -      struct net *net = dev_net(skb_dst(skb)->dev);
  
        hlen = ip6_find_1stfragopt(skb, &prevhdr);
        nexthdr = *prevhdr;
                                ip6_copy_metadata(frag, skb);
                        }
  
 -                      err = output(sk, skb);
 +                      err = output(net, sk, skb);
                        if (!err)
                                IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
                                              IPSTATS_MIB_FRAGCREATES);
@@@ -814,7 -805,7 +817,7 @@@ slow_path
                /*
                 *      Put this fragment into the sending queue.
                 */
 -              err = output(sk, frag);
 +              err = output(net, sk, frag);
                if (err)
                        goto fail;
  
@@@ -886,8 -877,7 +889,8 @@@ static struct dst_entry *ip6_sk_dst_che
  #ifdef CONFIG_IPV6_SUBTREES
            ip6_rt_check(&rt->rt6i_src, &fl6->saddr, np->saddr_cache) ||
  #endif
 -          (fl6->flowi6_oif && fl6->flowi6_oif != dst->dev->ifindex)) {
 +         (!(fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF) &&
 +            (fl6->flowi6_oif && fl6->flowi6_oif != dst->dev->ifindex))) {
                dst_release(dst);
                dst = NULL;
        }
@@@ -896,7 -886,7 +899,7 @@@ out
        return dst;
  }
  
 -static int ip6_dst_lookup_tail(struct net *net, struct sock *sk,
 +static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk,
                               struct dst_entry **dst, struct flowi6 *fl6)
  {
  #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
@@@ -1027,7 -1017,7 +1030,7 @@@ EXPORT_SYMBOL_GPL(ip6_dst_lookup)
   *    It returns a valid dst pointer on success, or a pointer encoded
   *    error code.
   */
 -struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
 +struct dst_entry *ip6_dst_lookup_flow(const struct sock *sk, struct flowi6 *fl6,
                                      const struct in6_addr *final_dst)
  {
        struct dst_entry *dst = NULL;
        if (final_dst)
                fl6->daddr = *final_dst;
        if (!fl6->flowi6_oif)
 -              fl6->flowi6_oif = dst->dev->ifindex;
 +              fl6->flowi6_oif = l3mdev_fib_oif(dst->dev);
  
        return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
  }
@@@ -1693,7 -1683,7 +1696,7 @@@ int ip6_send_skb(struct sk_buff *skb
        struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
        int err;
  
 -      err = ip6_local_out(skb);
 +      err = ip6_local_out(net, skb->sk, skb);
        if (err) {
                if (err > 0)
                        err = net_xmit_errno(err);
diff --combined net/ipv6/route.c
index 5fc1149fe91d85bb26c18ac5a74d1728146d8b78,968f31c01f89e8ccc6a4a13c056f8be3dcc0b7c6..d0619632723a298cf2374874b2f4fb03fa18640f
@@@ -61,7 -61,6 +61,7 @@@
  #include <net/nexthop.h>
  #include <net/lwtunnel.h>
  #include <net/ip_tunnels.h>
 +#include <net/l3mdev.h>
  
  #include <asm/uaccess.h>
  
@@@ -87,9 -86,9 +87,9 @@@ static void           ip6_dst_ifdown(struct dst_
  static int             ip6_dst_gc(struct dst_ops *ops);
  
  static int            ip6_pkt_discard(struct sk_buff *skb);
 -static int            ip6_pkt_discard_out(struct sock *sk, struct sk_buff *skb);
 +static int            ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb);
  static int            ip6_pkt_prohibit(struct sk_buff *skb);
 -static int            ip6_pkt_prohibit_out(struct sock *sk, struct sk_buff *skb);
 +static int            ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb);
  static void           ip6_link_failure(struct sk_buff *skb);
  static void           ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
                                           struct sk_buff *skb, u32 mtu);
@@@ -143,6 -142,9 +143,9 @@@ static void rt6_uncached_list_flush_dev
        struct net_device *loopback_dev = net->loopback_dev;
        int cpu;
  
+       if (dev == loopback_dev)
+               return;
        for_each_possible_cpu(cpu) {
                struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
                struct rt6_info *rt;
                        struct inet6_dev *rt_idev = rt->rt6i_idev;
                        struct net_device *rt_dev = rt->dst.dev;
  
-                       if (rt_idev && (rt_idev->dev == dev || !dev) &&
-                           rt_idev->dev != loopback_dev) {
+                       if (rt_idev->dev == dev) {
                                rt->rt6i_idev = in6_dev_get(loopback_dev);
                                in6_dev_put(rt_idev);
                        }
  
-                       if (rt_dev && (rt_dev == dev || !dev) &&
-                           rt_dev != loopback_dev) {
+                       if (rt_dev == dev) {
                                rt->dst.dev = loopback_dev;
                                dev_hold(rt->dst.dev);
                                dev_put(rt_dev);
@@@ -248,12 -248,6 +249,6 @@@ static void ip6_rt_blackhole_redirect(s
  {
  }
  
- static u32 *ip6_rt_blackhole_cow_metrics(struct dst_entry *dst,
-                                        unsigned long old)
- {
-       return NULL;
- }
  static struct dst_ops ip6_dst_blackhole_ops = {
        .family                 =       AF_INET6,
        .destroy                =       ip6_dst_destroy,
        .default_advmss         =       ip6_default_advmss,
        .update_pmtu            =       ip6_rt_blackhole_update_pmtu,
        .redirect               =       ip6_rt_blackhole_redirect,
-       .cow_metrics            =       ip6_rt_blackhole_cow_metrics,
+       .cow_metrics            =       dst_cow_metrics_generic,
        .neigh_lookup           =       ip6_neigh_lookup,
  };
  
@@@ -309,7 -303,7 +304,7 @@@ static const struct rt6_info ip6_blk_ho
                .obsolete       = DST_OBSOLETE_FORCE_CHK,
                .error          = -EINVAL,
                .input          = dst_discard,
 -              .output         = dst_discard_sk,
 +              .output         = dst_discard_out,
        },
        .rt6i_flags     = (RTF_REJECT | RTF_NONEXTHOP),
        .rt6i_protocol  = RTPROT_KERNEL,
  
  #endif
  
+ static void rt6_info_init(struct rt6_info *rt)
+ {
+       struct dst_entry *dst = &rt->dst;
+       memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
+       INIT_LIST_HEAD(&rt->rt6i_siblings);
+       INIT_LIST_HEAD(&rt->rt6i_uncached);
+ }
  /* allocate dst with ip6_dst_ops */
  static struct rt6_info *__ip6_dst_alloc(struct net *net,
                                        struct net_device *dev,
        struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
                                        0, DST_OBSOLETE_FORCE_CHK, flags);
  
-       if (rt) {
-               struct dst_entry *dst = &rt->dst;
+       if (rt)
+               rt6_info_init(rt);
  
-               memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
-               INIT_LIST_HEAD(&rt->rt6i_siblings);
-               INIT_LIST_HEAD(&rt->rt6i_uncached);
-       }
        return rt;
  }
  
@@@ -422,7 -421,31 +422,7 @@@ static bool rt6_check_expired(const str
  static int rt6_info_hash_nhsfn(unsigned int candidate_count,
                               const struct flowi6 *fl6)
  {
 -      unsigned int val = fl6->flowi6_proto;
 -
 -      val ^= ipv6_addr_hash(&fl6->daddr);
 -      val ^= ipv6_addr_hash(&fl6->saddr);
 -
 -      /* Work only if this not encapsulated */
 -      switch (fl6->flowi6_proto) {
 -      case IPPROTO_UDP:
 -      case IPPROTO_TCP:
 -      case IPPROTO_SCTP:
 -              val ^= (__force u16)fl6->fl6_sport;
 -              val ^= (__force u16)fl6->fl6_dport;
 -              break;
 -
 -      case IPPROTO_ICMPV6:
 -              val ^= (__force u16)fl6->fl6_icmp_type;
 -              val ^= (__force u16)fl6->fl6_icmp_code;
 -              break;
 -      }
 -      /* RFC6438 recommands to use flowlabel */
 -      val ^= (__force u32)fl6->flowlabel;
 -
 -      /* Perhaps, we need to tune, this function? */
 -      val = val ^ (val >> 7) ^ (val >> 12);
 -      return val % candidate_count;
 +      return get_hash_from_flowi6(fl6) % candidate_count;
  }
  
  static struct rt6_info *rt6_multipath_select(struct rt6_info *match,
@@@ -475,10 -498,10 +475,10 @@@ static inline struct rt6_info *rt6_devi
                        if (dev->flags & IFF_LOOPBACK) {
                                if (!sprt->rt6i_idev ||
                                    sprt->rt6i_idev->dev->ifindex != oif) {
 -                                      if (flags & RT6_LOOKUP_F_IFACE && oif)
 +                                      if (flags & RT6_LOOKUP_F_IFACE)
                                                continue;
 -                                      if (local && (!oif ||
 -                                                    local->rt6i_idev->dev->ifindex == oif))
 +                                      if (local &&
 +                                          local->rt6i_idev->dev->ifindex == oif)
                                                continue;
                                }
                                local = sprt;
@@@ -515,7 -538,7 +515,7 @@@ static void rt6_probe_deferred(struct w
                container_of(w, struct __rt6_probe_work, work);
  
        addrconf_addr_solict_mult(&work->target, &mcaddr);
 -      ndisc_send_ns(work->dev, NULL, &work->target, &mcaddr, NULL, NULL);
 +      ndisc_send_ns(work->dev, &work->target, &mcaddr, NULL, NULL);
        dev_put(work->dev);
        kfree(work);
  }
@@@ -1045,9 -1068,6 +1045,9 @@@ static struct rt6_info *ip6_pol_route(s
        fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
        saved_fn = fn;
  
 +      if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
 +              oif = 0;
 +
  redo_rt6_select:
        rt = rt6_select(fn, oif, strict);
        if (rt->rt6i_nsiblings)
@@@ -1145,7 -1165,7 +1145,7 @@@ void ip6_route_input(struct sk_buff *sk
        int flags = RT6_LOOKUP_F_HAS_SADDR;
        struct ip_tunnel_info *tun_info;
        struct flowi6 fl6 = {
 -              .flowi6_iif = skb->dev->ifindex,
 +              .flowi6_iif = l3mdev_fib_oif(skb->dev),
                .daddr = iph->daddr,
                .saddr = iph->saddr,
                .flowlabel = ip6_flowinfo(iph),
@@@ -1169,13 -1189,8 +1169,13 @@@ static struct rt6_info *ip6_pol_route_o
  struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk,
                                    struct flowi6 *fl6)
  {
 +      struct dst_entry *dst;
        int flags = 0;
  
 +      dst = l3mdev_rt6_dst_by_oif(net, fl6);
 +      if (dst)
 +              return dst;
 +
        fl6->flowi6_iif = LOOPBACK_IFINDEX;
  
        if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr) ||
@@@ -1198,24 -1213,20 +1198,20 @@@ struct dst_entry *ip6_blackhole_route(s
  
        rt = dst_alloc(&ip6_dst_blackhole_ops, ort->dst.dev, 1, DST_OBSOLETE_NONE, 0);
        if (rt) {
-               new = &rt->dst;
-               memset(new + 1, 0, sizeof(*rt) - sizeof(*new));
+               rt6_info_init(rt);
  
+               new = &rt->dst;
                new->__use = 1;
                new->input = dst_discard;
 -              new->output = dst_discard_sk;
 +              new->output = dst_discard_out;
  
-               if (dst_metrics_read_only(&ort->dst))
-                       new->_metrics = ort->dst._metrics;
-               else
-                       dst_copy_metrics(new, &ort->dst);
+               dst_copy_metrics(new, &ort->dst);
                rt->rt6i_idev = ort->rt6i_idev;
                if (rt->rt6i_idev)
                        in6_dev_hold(rt->rt6i_idev);
  
                rt->rt6i_gateway = ort->rt6i_gateway;
-               rt->rt6i_flags = ort->rt6i_flags;
+               rt->rt6i_flags = ort->rt6i_flags & ~RTF_PCPU;
                rt->rt6i_metric = 0;
  
                memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
@@@ -1733,21 -1744,21 +1729,21 @@@ static int ip6_convert_metrics(struct m
        return -EINVAL;
  }
  
 -int ip6_route_info_create(struct fib6_config *cfg, struct rt6_info **rt_ret)
 +static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg)
  {
 -      int err;
        struct net *net = cfg->fc_nlinfo.nl_net;
        struct rt6_info *rt = NULL;
        struct net_device *dev = NULL;
        struct inet6_dev *idev = NULL;
        struct fib6_table *table;
        int addr_type;
 +      int err = -EINVAL;
  
        if (cfg->fc_dst_len > 128 || cfg->fc_src_len > 128)
 -              return -EINVAL;
 +              goto out;
  #ifndef CONFIG_IPV6_SUBTREES
        if (cfg->fc_src_len)
 -              return -EINVAL;
 +              goto out;
  #endif
        if (cfg->fc_ifindex) {
                err = -ENODEV;
                switch (cfg->fc_type) {
                case RTN_BLACKHOLE:
                        rt->dst.error = -EINVAL;
 -                      rt->dst.output = dst_discard_sk;
 +                      rt->dst.output = dst_discard_out;
                        rt->dst.input = dst_discard;
                        break;
                case RTN_PROHIBIT:
@@@ -1967,7 -1978,9 +1963,7 @@@ install_route
  
        cfg->fc_nlinfo.nl_net = dev_net(dev);
  
 -      *rt_ret = rt;
 -
 -      return 0;
 +      return rt;
  out:
        if (dev)
                dev_put(dev);
        if (rt)
                dst_free(&rt->dst);
  
 -      *rt_ret = NULL;
 -
 -      return err;
 +      return ERR_PTR(err);
  }
  
  int ip6_route_add(struct fib6_config *cfg)
  {
        struct mx6_config mxc = { .mx = NULL, };
 -      struct rt6_info *rt = NULL;
 +      struct rt6_info *rt;
        int err;
  
 -      err = ip6_route_info_create(cfg, &rt);
 -      if (err)
 +      rt = ip6_route_info_create(cfg);
 +      if (IS_ERR(rt)) {
 +              err = PTR_ERR(rt);
 +              rt = NULL;
                goto out;
 +      }
  
        err = ip6_convert_metrics(&mxc, cfg);
        if (err)
@@@ -2272,6 -2284,7 +2268,6 @@@ static struct rt6_info *rt6_add_route_i
                                           unsigned int pref)
  {
        struct fib6_config cfg = {
 -              .fc_table       = RT6_TABLE_INFO,
                .fc_metric      = IP6_RT_PRIO_USER,
                .fc_ifindex     = ifindex,
                .fc_dst_len     = prefixlen,
                .fc_nlinfo.nl_net = net,
        };
  
 +      cfg.fc_table = l3mdev_fib_table_by_index(net, ifindex) ? : RT6_TABLE_INFO;
        cfg.fc_dst = *prefix;
        cfg.fc_gateway = *gwaddr;
  
@@@ -2323,7 -2335,7 +2319,7 @@@ struct rt6_info *rt6_add_dflt_router(co
                                     unsigned int pref)
  {
        struct fib6_config cfg = {
 -              .fc_table       = RT6_TABLE_DFLT,
 +              .fc_table       = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT,
                .fc_metric      = IP6_RT_PRIO_USER,
                .fc_ifindex     = dev->ifindex,
                .fc_flags       = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
@@@ -2370,8 -2382,7 +2366,8 @@@ static void rtmsg_to_fib6_config(struc
  {
        memset(cfg, 0, sizeof(*cfg));
  
 -      cfg->fc_table = RT6_TABLE_MAIN;
 +      cfg->fc_table = l3mdev_fib_table_by_index(net, rtmsg->rtmsg_ifindex) ?
 +                       : RT6_TABLE_MAIN;
        cfg->fc_ifindex = rtmsg->rtmsg_ifindex;
        cfg->fc_metric = rtmsg->rtmsg_metric;
        cfg->fc_expires = rtmsg->rtmsg_info;
@@@ -2455,7 -2466,7 +2451,7 @@@ static int ip6_pkt_discard(struct sk_bu
        return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES);
  }
  
 -static int ip6_pkt_discard_out(struct sock *sk, struct sk_buff *skb)
 +static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
  {
        skb->dev = skb_dst(skb)->dev;
        return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
@@@ -2466,7 -2477,7 +2462,7 @@@ static int ip6_pkt_prohibit(struct sk_b
        return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES);
  }
  
 -static int ip6_pkt_prohibit_out(struct sock *sk, struct sk_buff *skb)
 +static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb)
  {
        skb->dev = skb_dst(skb)->dev;
        return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
@@@ -2480,7 -2491,6 +2476,7 @@@ struct rt6_info *addrconf_dst_alloc(str
                                    const struct in6_addr *addr,
                                    bool anycast)
  {
 +      u32 tb_id;
        struct net *net = dev_net(idev->dev);
        struct rt6_info *rt = ip6_dst_alloc(net, net->loopback_dev,
                                            DST_NOCOUNT);
        rt->rt6i_gateway  = *addr;
        rt->rt6i_dst.addr = *addr;
        rt->rt6i_dst.plen = 128;
 -      rt->rt6i_table = fib6_get_table(net, RT6_TABLE_LOCAL);
 +      tb_id = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL;
 +      rt->rt6i_table = fib6_get_table(net, tb_id);
        rt->dst.flags |= DST_NOCACHE;
  
        atomic_set(&rt->dst.__refcnt, 1);
@@@ -2609,7 -2618,8 +2605,8 @@@ void rt6_ifdown(struct net *net, struc
  
        fib6_clean_all(net, fib6_ifdown, &adn);
        icmp6_clean_all(fib6_ifdown, &adn);
-       rt6_uncached_list_flush_dev(net, dev);
+       if (dev)
+               rt6_uncached_list_flush_dev(net, dev);
  }
  
  struct rt6_mtu_change_arg {
@@@ -2882,12 -2892,9 +2879,12 @@@ static int ip6_route_multipath_add(stru
                                r_cfg.fc_encap_type = nla_get_u16(nla);
                }
  
 -              err = ip6_route_info_create(&r_cfg, &rt);
 -              if (err)
 +              rt = ip6_route_info_create(&r_cfg);
 +              if (IS_ERR(rt)) {
 +                      err = PTR_ERR(rt);
 +                      rt = NULL;
                        goto cleanup;
 +              }
  
                err = ip6_route_info_append(&rt6_nh_list, rt, &r_cfg);
                if (err) {
@@@ -3266,11 -3273,6 +3263,11 @@@ static int inet6_rtm_getroute(struct sk
        } else {
                fl6.flowi6_oif = oif;
  
 +              if (netif_index_is_l3_master(net, oif)) {
 +                      fl6.flowi6_flags = FLOWI_FLAG_L3MDEV_SRC |
 +                                         FLOWI_FLAG_SKIP_NH_OIF;
 +              }
 +
                rt = (struct rt6_info *)ip6_route_output(net, NULL, &fl6);
        }
  
diff --combined net/mac80211/debugfs.c
index 3636b45440ab40ecc3c618a916b1305bacfe1bc7,1560c8482bcb9fd587d1e278a499f99fd9958803..4d2aaebd4f97d8692758263e3d9759d4300abdb6
@@@ -123,8 -123,6 +123,8 @@@ static const char *hw_flag_names[NUM_IE
        FLAG(SUPPORTS_CLONED_SKBS),
        FLAG(SINGLE_SCAN_ON_ALL_BANDS),
        FLAG(TDLS_WIDER_BW),
 +      FLAG(SUPPORTS_AMSDU_IN_AMPDU),
 +      FLAG(BEACON_TX_STATUS),
  
        /* keep last for the build bug below */
        (void *)0x1
@@@ -151,7 -149,7 +151,7 @@@ static ssize_t hwflags_read(struct fil
  
        for (i = 0; i < NUM_IEEE80211_HW_FLAGS; i++) {
                if (test_bit(i, local->hw.flags))
-                       pos += scnprintf(pos, end - pos, "%s",
+                       pos += scnprintf(pos, end - pos, "%s\n",
                                         hw_flag_names[i]);
        }
  
diff --combined net/mac80211/status.c
index 98fd04c4b2a08f0126e62ca22a9e4eaf1376311a,3ed7ddfbf8e840d4c910b0e9452c06843031c91d..9169ccc36534bc308132166797c3e61e7271feba
@@@ -101,6 -101,7 +101,7 @@@ static void ieee80211_handle_filtered_f
         * when it wakes up for the next time.
         */
        set_sta_flag(sta, WLAN_STA_CLEAR_PS_FILT);
+       ieee80211_clear_fast_xmit(sta);
  
        /*
         * This code races in the following way:
@@@ -668,70 -669,16 +669,70 @@@ void ieee80211_tx_status_noskb(struct i
  }
  EXPORT_SYMBOL(ieee80211_tx_status_noskb);
  
 -void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
 +void ieee80211_tx_monitor(struct ieee80211_local *local, struct sk_buff *skb,
 +                        struct ieee80211_supported_band *sband,
 +                        int retry_count, int shift, bool send_to_cooked)
  {
        struct sk_buff *skb2;
 +      struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 +      struct ieee80211_sub_if_data *sdata;
 +      struct net_device *prev_dev = NULL;
 +      int rtap_len;
 +
 +      /* send frame to monitor interfaces now */
 +      rtap_len = ieee80211_tx_radiotap_len(info);
 +      if (WARN_ON_ONCE(skb_headroom(skb) < rtap_len)) {
 +              pr_err("ieee80211_tx_status: headroom too small\n");
 +              dev_kfree_skb(skb);
 +              return;
 +      }
 +      ieee80211_add_tx_radiotap_header(local, sband, skb, retry_count,
 +                                       rtap_len, shift);
 +
 +      /* XXX: is this sufficient for BPF? */
 +      skb_set_mac_header(skb, 0);
 +      skb->ip_summed = CHECKSUM_UNNECESSARY;
 +      skb->pkt_type = PACKET_OTHERHOST;
 +      skb->protocol = htons(ETH_P_802_2);
 +      memset(skb->cb, 0, sizeof(skb->cb));
 +
 +      rcu_read_lock();
 +      list_for_each_entry_rcu(sdata, &local->interfaces, list) {
 +              if (sdata->vif.type == NL80211_IFTYPE_MONITOR) {
 +                      if (!ieee80211_sdata_running(sdata))
 +                              continue;
 +
 +                      if ((sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) &&
 +                          !send_to_cooked)
 +                              continue;
 +
 +                      if (prev_dev) {
 +                              skb2 = skb_clone(skb, GFP_ATOMIC);
 +                              if (skb2) {
 +                                      skb2->dev = prev_dev;
 +                                      netif_rx(skb2);
 +                              }
 +                      }
 +
 +                      prev_dev = sdata->dev;
 +              }
 +      }
 +      if (prev_dev) {
 +              skb->dev = prev_dev;
 +              netif_rx(skb);
 +              skb = NULL;
 +      }
 +      rcu_read_unlock();
 +      dev_kfree_skb(skb);
 +}
 +
 +void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
 +{
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
        struct ieee80211_local *local = hw_to_local(hw);
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
        __le16 fc;
        struct ieee80211_supported_band *sband;
 -      struct ieee80211_sub_if_data *sdata;
 -      struct net_device *prev_dev = NULL;
        struct sta_info *sta;
        struct rhash_head *tmp;
        int retry_count;
        bool send_to_cooked;
        bool acked;
        struct ieee80211_bar *bar;
 -      int rtap_len;
        int shift = 0;
        int tid = IEEE80211_NUM_TIDS;
        const struct bucket_table *tbl;
                return;
        }
  
 -      /* send frame to monitor interfaces now */
 -      rtap_len = ieee80211_tx_radiotap_len(info);
 -      if (WARN_ON_ONCE(skb_headroom(skb) < rtap_len)) {
 -              pr_err("ieee80211_tx_status: headroom too small\n");
 -              dev_kfree_skb(skb);
 -              return;
 -      }
 -      ieee80211_add_tx_radiotap_header(local, sband, skb, retry_count,
 -                                       rtap_len, shift);
 -
 -      /* XXX: is this sufficient for BPF? */
 -      skb_set_mac_header(skb, 0);
 -      skb->ip_summed = CHECKSUM_UNNECESSARY;
 -      skb->pkt_type = PACKET_OTHERHOST;
 -      skb->protocol = htons(ETH_P_802_2);
 -      memset(skb->cb, 0, sizeof(skb->cb));
 -
 -      rcu_read_lock();
 -      list_for_each_entry_rcu(sdata, &local->interfaces, list) {
 -              if (sdata->vif.type == NL80211_IFTYPE_MONITOR) {
 -                      if (!ieee80211_sdata_running(sdata))
 -                              continue;
 -
 -                      if ((sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) &&
 -                          !send_to_cooked)
 -                              continue;
 -
 -                      if (prev_dev) {
 -                              skb2 = skb_clone(skb, GFP_ATOMIC);
 -                              if (skb2) {
 -                                      skb2->dev = prev_dev;
 -                                      netif_rx(skb2);
 -                              }
 -                      }
 -
 -                      prev_dev = sdata->dev;
 -              }
 -      }
 -      if (prev_dev) {
 -              skb->dev = prev_dev;
 -              netif_rx(skb);
 -              skb = NULL;
 -      }
 -      rcu_read_unlock();
 -      dev_kfree_skb(skb);
 +      /* send to monitor interfaces */
 +      ieee80211_tx_monitor(local, skb, sband, retry_count, shift, send_to_cooked);
  }
  EXPORT_SYMBOL(ieee80211_tx_status);
  
diff --combined net/mac80211/tx.c
index 464ba1a625bdc9aa82f4de53d60cdc341b0d0565,7892eb8ed4c8b1fa416ebcb297fab9f1cce91743..3478a83187e535287a3e63dbe399414e68496fa0
@@@ -1218,8 -1218,10 +1218,10 @@@ ieee80211_tx_prepare(struct ieee80211_s
  
        if (!tx->sta)
                info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
-       else if (test_and_clear_sta_flag(tx->sta, WLAN_STA_CLEAR_PS_FILT))
+       else if (test_and_clear_sta_flag(tx->sta, WLAN_STA_CLEAR_PS_FILT)) {
                info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
+               ieee80211_check_fast_xmit(tx->sta);
+       }
  
        info->flags |= IEEE80211_TX_CTL_FIRST_FRAGMENT;
  
@@@ -2451,7 -2453,8 +2453,8 @@@ void ieee80211_check_fast_xmit(struct s
  
        if (test_sta_flag(sta, WLAN_STA_PS_STA) ||
            test_sta_flag(sta, WLAN_STA_PS_DRIVER) ||
-           test_sta_flag(sta, WLAN_STA_PS_DELIVER))
+           test_sta_flag(sta, WLAN_STA_PS_DELIVER) ||
+           test_sta_flag(sta, WLAN_STA_CLEAR_PS_FILT))
                goto out;
  
        if (sdata->noack_map)
@@@ -2767,8 -2770,7 +2770,8 @@@ static bool ieee80211_xmit_fast(struct 
  
        if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) {
                *ieee80211_get_qos_ctl(hdr) = tid;
 -              hdr->seq_ctrl = ieee80211_tx_next_seq(sta, tid);
 +              if (!sta->sta.txq[0])
 +                      hdr->seq_ctrl = ieee80211_tx_next_seq(sta, tid);
        } else {
                info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
                hdr->seq_ctrl = cpu_to_le16(sdata->sequence_number);
@@@ -3513,12 -3515,6 +3516,12 @@@ struct sk_buff *ieee80211_beacon_get_ti
  {
        struct ieee80211_mutable_offsets offs = {};
        struct sk_buff *bcn = __ieee80211_beacon_get(hw, vif, &offs, false);
 +      struct sk_buff *copy;
 +      struct ieee80211_supported_band *sband;
 +      int shift;
 +
 +      if (!bcn)
 +              return bcn;
  
        if (tim_offset)
                *tim_offset = offs.tim_offset;
        if (tim_length)
                *tim_length = offs.tim_length;
  
 +      if (ieee80211_hw_check(hw, BEACON_TX_STATUS) ||
 +          !hw_to_local(hw)->monitors)
 +              return bcn;
 +
 +      /* send a copy to monitor interfaces */
 +      copy = skb_copy(bcn, GFP_ATOMIC);
 +      if (!copy)
 +              return bcn;
 +
 +      shift = ieee80211_vif_get_shift(vif);
 +      sband = hw->wiphy->bands[ieee80211_get_sdata_band(vif_to_sdata(vif))];
 +      ieee80211_tx_monitor(hw_to_local(hw), copy, sband, 1, shift, false);
 +
        return bcn;
  }
  EXPORT_SYMBOL(ieee80211_beacon_get_tim);
index 1d21ab9d2b5c0fc2d9996859687fff87bce12a6b,c6a39bf2c3b954481a7217fa2b0f95778a2376b8..c6087233d7fca456ee6e8db52aabb015d6f67f94
@@@ -620,7 -620,7 +620,7 @@@ static int set_sctp(struct sk_buff *skb
        return 0;
  }
  
 -static int ovs_vport_output(struct sock *sock, struct sk_buff *skb)
 +static int ovs_vport_output(struct net *net, struct sock *sk, struct sk_buff *skb)
  {
        struct ovs_frag_data *data = this_cpu_ptr(&ovs_frag_data_storage);
        struct vport *vport = data->vport;
@@@ -679,12 -679,12 +679,12 @@@ static void prepare_frag(struct vport *
        skb_pull(skb, hlen);
  }
  
 -static void ovs_fragment(struct vport *vport, struct sk_buff *skb, u16 mru,
 -                       __be16 ethertype)
 +static void ovs_fragment(struct net *net, struct vport *vport,
 +                       struct sk_buff *skb, u16 mru, __be16 ethertype)
  {
        if (skb_network_offset(skb) > MAX_L2_LEN) {
                OVS_NLERR(1, "L2 header too long to fragment");
-               return;
+               goto err;
        }
  
        if (ethertype == htons(ETH_P_IP)) {
                skb_dst_set_noref(skb, &ovs_dst);
                IPCB(skb)->frag_max_size = mru;
  
 -              ip_do_fragment(skb->sk, skb, ovs_vport_output);
 +              ip_do_fragment(net, skb->sk, skb, ovs_vport_output);
                refdst_drop(orig_dst);
        } else if (ethertype == htons(ETH_P_IPV6)) {
                const struct nf_ipv6_ops *v6ops = nf_get_ipv6_ops();
                struct rt6_info ovs_rt;
  
                if (!v6ops) {
-                       kfree_skb(skb);
-                       return;
+                       goto err;
                }
  
                prepare_frag(vport, skb);
                skb_dst_set_noref(skb, &ovs_rt.dst);
                IP6CB(skb)->frag_max_size = mru;
  
 -              v6ops->fragment(skb->sk, skb, ovs_vport_output);
 +              v6ops->fragment(net, skb->sk, skb, ovs_vport_output);
                refdst_drop(orig_dst);
        } else {
                WARN_ONCE(1, "Failed fragment ->%s: eth=%04x, MRU=%d, MTU=%d.",
                          ovs_vport_name(vport), ntohs(ethertype), mru,
                          vport->dev->mtu);
-               kfree_skb(skb);
+               goto err;
        }
+       return;
+ err:
+       kfree_skb(skb);
  }
  
  static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port,
                if (likely(!mru || (skb->len <= mru + ETH_HLEN))) {
                        ovs_vport_send(vport, skb);
                } else if (mru <= vport->dev->mtu) {
 +                      struct net *net = read_pnet(&dp->net);
                        __be16 ethertype = key->eth.type;
  
                        if (!is_flow_key_valid(key)) {
                                        ethertype = vlan_get_protocol(skb);
                        }
  
 -                      ovs_fragment(vport, skb, mru, ethertype);
 +                      ovs_fragment(net, vport, skb, mru, ethertype);
                } else {
                        kfree_skb(skb);
                }
@@@ -969,7 -971,7 +972,7 @@@ static int execute_masked_set_action(st
        case OVS_KEY_ATTR_CT_STATE:
        case OVS_KEY_ATTR_CT_ZONE:
        case OVS_KEY_ATTR_CT_MARK:
-       case OVS_KEY_ATTR_CT_LABEL:
+       case OVS_KEY_ATTR_CT_LABELS:
                err = -EINVAL;
                break;
        }
@@@ -1100,6 -1102,12 +1103,12 @@@ static int do_execute_actions(struct da
                        break;
  
                case OVS_ACTION_ATTR_CT:
+                       if (!is_flow_key_valid(key)) {
+                               err = ovs_flow_key_update(skb, key);
+                               if (err)
+                                       return err;
+                       }
                        err = ovs_ct_execute(ovs_dp_get_net(dp), skb, key,
                                             nla_data(a));
  
index ad614267cc2a620249e18e4ef6bec35c7fa19f8f,80bf702715bb3e8875ded486dbff63d86d7797f8..9ed833e9bb7db5c149366a4cb351bcd4b45ee4f3
@@@ -37,9 -37,9 +37,9 @@@ struct md_mark 
  };
  
  /* Metadata label for masked write to conntrack label. */
- struct md_label {
-       struct ovs_key_ct_label value;
-       struct ovs_key_ct_label mask;
+ struct md_labels {
+       struct ovs_key_ct_labels value;
+       struct ovs_key_ct_labels mask;
  };
  
  /* Conntrack action context for execution. */
@@@ -47,10 -47,10 +47,10 @@@ struct ovs_conntrack_info 
        struct nf_conntrack_helper *helper;
        struct nf_conntrack_zone zone;
        struct nf_conn *ct;
-       u32 flags;
+       u8 commit : 1;
        u16 family;
        struct md_mark mark;
-       struct md_label label;
+       struct md_labels labels;
  };
  
  static u16 key_to_nfproto(const struct sw_flow_key *key)
@@@ -109,21 -109,21 +109,21 @@@ static u32 ovs_ct_get_mark(const struc
  #endif
  }
  
- static void ovs_ct_get_label(const struct nf_conn *ct,
-                            struct ovs_key_ct_label *label)
+ static void ovs_ct_get_labels(const struct nf_conn *ct,
+                             struct ovs_key_ct_labels *labels)
  {
        struct nf_conn_labels *cl = ct ? nf_ct_labels_find(ct) : NULL;
  
        if (cl) {
                size_t len = cl->words * sizeof(long);
  
-               if (len > OVS_CT_LABEL_LEN)
-                       len = OVS_CT_LABEL_LEN;
-               else if (len < OVS_CT_LABEL_LEN)
-                       memset(label, 0, OVS_CT_LABEL_LEN);
-               memcpy(label, cl->bits, len);
+               if (len > OVS_CT_LABELS_LEN)
+                       len = OVS_CT_LABELS_LEN;
+               else if (len < OVS_CT_LABELS_LEN)
+                       memset(labels, 0, OVS_CT_LABELS_LEN);
+               memcpy(labels, cl->bits, len);
        } else {
-               memset(label, 0, OVS_CT_LABEL_LEN);
+               memset(labels, 0, OVS_CT_LABELS_LEN);
        }
  }
  
@@@ -134,7 -134,7 +134,7 @@@ static void __ovs_ct_update_key(struct 
        key->ct.state = state;
        key->ct.zone = zone->id;
        key->ct.mark = ovs_ct_get_mark(ct);
-       ovs_ct_get_label(ct, &key->ct.label);
+       ovs_ct_get_labels(ct, &key->ct.labels);
  }
  
  /* Update 'key' based on skb->nfct. If 'post_ct' is true, then OVS has
@@@ -167,7 -167,7 +167,7 @@@ void ovs_ct_fill_key(const struct sk_bu
  
  int ovs_ct_put_key(const struct sw_flow_key *key, struct sk_buff *skb)
  {
-       if (nla_put_u8(skb, OVS_KEY_ATTR_CT_STATE, key->ct.state))
+       if (nla_put_u32(skb, OVS_KEY_ATTR_CT_STATE, key->ct.state))
                return -EMSGSIZE;
  
        if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
                return -EMSGSIZE;
  
        if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
-           nla_put(skb, OVS_KEY_ATTR_CT_LABEL, sizeof(key->ct.label),
-                   &key->ct.label))
+           nla_put(skb, OVS_KEY_ATTR_CT_LABELS, sizeof(key->ct.labels),
+                   &key->ct.labels))
                return -EMSGSIZE;
  
        return 0;
@@@ -213,9 -213,9 +213,9 @@@ static int ovs_ct_set_mark(struct sk_bu
  #endif
  }
  
- static int ovs_ct_set_label(struct sk_buff *skb, struct sw_flow_key *key,
-                           const struct ovs_key_ct_label *label,
-                           const struct ovs_key_ct_label *mask)
+ static int ovs_ct_set_labels(struct sk_buff *skb, struct sw_flow_key *key,
+                            const struct ovs_key_ct_labels *labels,
+                            const struct ovs_key_ct_labels *mask)
  {
        enum ip_conntrack_info ctinfo;
        struct nf_conn_labels *cl;
                nf_ct_labels_ext_add(ct);
                cl = nf_ct_labels_find(ct);
        }
-       if (!cl || cl->words * sizeof(long) < OVS_CT_LABEL_LEN)
+       if (!cl || cl->words * sizeof(long) < OVS_CT_LABELS_LEN)
                return -ENOSPC;
  
-       err = nf_connlabels_replace(ct, (u32 *)label, (u32 *)mask,
-                                   OVS_CT_LABEL_LEN / sizeof(u32));
+       err = nf_connlabels_replace(ct, (u32 *)labels, (u32 *)mask,
+                                   OVS_CT_LABELS_LEN / sizeof(u32));
        if (err)
                return err;
  
-       ovs_ct_get_label(ct, &key->ct.label);
+       ovs_ct_get_labels(ct, &key->ct.labels);
        return 0;
  }
  
@@@ -304,7 -304,7 +304,7 @@@ static int handle_fragments(struct net 
                int err;
  
                memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
 -              err = ip_defrag(skb, user);
 +              err = ip_defrag(net, skb, user);
                if (err)
                        return err;
  
                struct sk_buff *reasm;
  
                memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
 -              reasm = nf_ct_frag6_gather(skb, user);
 +              reasm = nf_ct_frag6_gather(net, skb, user);
                if (!reasm)
                        return -EINPROGRESS;
  
@@@ -347,7 -347,7 +347,7 @@@ ovs_ct_expect_find(struct net *net, con
  {
        struct nf_conntrack_tuple tuple;
  
 -      if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb), proto, &tuple))
 +      if (!nf_ct_get_tuplepr(skb, skb_network_offset(skb), proto, net, &tuple))
                return NULL;
        return __nf_ct_expect_find(net, zone, &tuple);
  }
@@@ -465,12 -465,12 +465,12 @@@ static int ovs_ct_commit(struct net *ne
        return 0;
  }
  
- static bool label_nonzero(const struct ovs_key_ct_label *label)
+ static bool labels_nonzero(const struct ovs_key_ct_labels *labels)
  {
        size_t i;
  
-       for (i = 0; i < sizeof(*label); i++)
-               if (label->ct_label[i])
+       for (i = 0; i < sizeof(*labels); i++)
+               if (labels->ct_labels[i])
                        return true;
  
        return false;
@@@ -493,7 -493,7 +493,7 @@@ int ovs_ct_execute(struct net *net, str
                        return err;
        }
  
-       if (info->flags & OVS_CT_F_COMMIT)
+       if (info->commit)
                err = ovs_ct_commit(net, key, info, skb);
        else
                err = ovs_ct_lookup(net, key, info, skb);
                if (err)
                        goto err;
        }
-       if (label_nonzero(&info->label.mask))
-               err = ovs_ct_set_label(skb, key, &info->label.value,
-                                      &info->label.mask);
+       if (labels_nonzero(&info->labels.mask))
+               err = ovs_ct_set_labels(skb, key, &info->labels.value,
+                                       &info->labels.mask);
  err:
        skb_push(skb, nh_ofs);
        return err;
@@@ -539,14 -539,13 +539,13 @@@ static int ovs_ct_add_helper(struct ovs
  }
  
  static const struct ovs_ct_len_tbl ovs_ct_attr_lens[OVS_CT_ATTR_MAX + 1] = {
-       [OVS_CT_ATTR_FLAGS]     = { .minlen = sizeof(u32),
-                                   .maxlen = sizeof(u32) },
+       [OVS_CT_ATTR_COMMIT]    = { .minlen = 0, .maxlen = 0 },
        [OVS_CT_ATTR_ZONE]      = { .minlen = sizeof(u16),
                                    .maxlen = sizeof(u16) },
        [OVS_CT_ATTR_MARK]      = { .minlen = sizeof(struct md_mark),
                                    .maxlen = sizeof(struct md_mark) },
-       [OVS_CT_ATTR_LABEL]     = { .minlen = sizeof(struct md_label),
-                                   .maxlen = sizeof(struct md_label) },
+       [OVS_CT_ATTR_LABELS]    = { .minlen = sizeof(struct md_labels),
+                                   .maxlen = sizeof(struct md_labels) },
        [OVS_CT_ATTR_HELPER]    = { .minlen = 1,
                                    .maxlen = NF_CT_HELPER_NAME_LEN }
  };
@@@ -576,8 -575,8 +575,8 @@@ static int parse_ct(const struct nlatt
                }
  
                switch (type) {
-               case OVS_CT_ATTR_FLAGS:
-                       info->flags = nla_get_u32(a);
+               case OVS_CT_ATTR_COMMIT:
+                       info->commit = true;
                        break;
  #ifdef CONFIG_NF_CONNTRACK_ZONES
                case OVS_CT_ATTR_ZONE:
                }
  #endif
  #ifdef CONFIG_NF_CONNTRACK_LABELS
-               case OVS_CT_ATTR_LABEL: {
-                       struct md_label *label = nla_data(a);
+               case OVS_CT_ATTR_LABELS: {
+                       struct md_labels *labels = nla_data(a);
  
-                       info->label = *label;
+                       info->labels = *labels;
                        break;
                }
  #endif
@@@ -633,7 -632,7 +632,7 @@@ bool ovs_ct_verify(struct net *net, enu
            attr == OVS_KEY_ATTR_CT_MARK)
                return true;
        if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
-           attr == OVS_KEY_ATTR_CT_LABEL) {
+           attr == OVS_KEY_ATTR_CT_LABELS) {
                struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
  
                return ovs_net->xt_label;
@@@ -701,7 -700,7 +700,7 @@@ int ovs_ct_action_to_attr(const struct 
        if (!start)
                return -EMSGSIZE;
  
-       if (nla_put_u32(skb, OVS_CT_ATTR_FLAGS, ct_info->flags))
+       if (ct_info->commit && nla_put_flag(skb, OVS_CT_ATTR_COMMIT))
                return -EMSGSIZE;
        if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
            nla_put_u16(skb, OVS_CT_ATTR_ZONE, ct_info->zone.id))
                    &ct_info->mark))
                return -EMSGSIZE;
        if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
-           nla_put(skb, OVS_CT_ATTR_LABEL, sizeof(ct_info->label),
-                   &ct_info->label))
+           nla_put(skb, OVS_CT_ATTR_LABELS, sizeof(ct_info->labels),
+                   &ct_info->labels))
                return -EMSGSIZE;
        if (ct_info->helper) {
                if (nla_put_string(skb, OVS_CT_ATTR_HELPER,
@@@ -737,7 -736,7 +736,7 @@@ void ovs_ct_free_action(const struct nl
  
  void ovs_ct_init(struct net *net)
  {
-       unsigned int n_bits = sizeof(struct ovs_key_ct_label) * BITS_PER_BYTE;
+       unsigned int n_bits = sizeof(struct ovs_key_ct_labels) * BITS_PER_BYTE;
        struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
  
        if (nf_connlabels_get(net, n_bits)) {
diff --combined net/openvswitch/flow.h
index 5688e33e2de6192c414f7a1c0686a63941f5076b,8cfa15a08668804a2a74c0047662694f23acbb80..1d055c559eafb118043ed21c66916d7313c41d8d
@@@ -63,7 -63,6 +63,7 @@@ struct sw_flow_key 
                u32     skb_mark;       /* SKB mark. */
                u16     in_port;        /* Input switch port (or DP_MAX_PORTS). */
        } __packed phy; /* Safe when right after 'tun_key'. */
 +      u8 tun_proto;                   /* Protocol of encapsulating tunnel. */
        u32 ovs_flow_hash;              /* Datapath computed hash value.  */
        u32 recirc_id;                  /* Recirculation ID.  */
        struct {
                u16 zone;
                u32 mark;
                u8 state;
-               struct ovs_key_ct_label label;
+               struct ovs_key_ct_labels labels;
        } ct;
  
  } __aligned(BITS_PER_LONG/8); /* Ensure that we can do comparisons as longs. */
index 77850f177a47c68a9c17447e773d0c29c6d54b4e,171a691f1c3218ed1a63ac9151c95475ef7fcade..6799c8d470c603d45cfbefc0760f5552ea85c04b
@@@ -262,8 -262,8 +262,8 @@@ size_t ovs_tun_key_attr_size(void
         * updating this function.
         */
        return    nla_total_size(8)    /* OVS_TUNNEL_KEY_ATTR_ID */
 -              + nla_total_size(4)    /* OVS_TUNNEL_KEY_ATTR_IPV4_SRC */
 -              + nla_total_size(4)    /* OVS_TUNNEL_KEY_ATTR_IPV4_DST */
 +              + nla_total_size(16)   /* OVS_TUNNEL_KEY_ATTR_IPV[46]_SRC */
 +              + nla_total_size(16)   /* OVS_TUNNEL_KEY_ATTR_IPV[46]_DST */
                + nla_total_size(1)    /* OVS_TUNNEL_KEY_ATTR_TOS */
                + nla_total_size(1)    /* OVS_TUNNEL_KEY_ATTR_TTL */
                + nla_total_size(0)    /* OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT */
@@@ -291,10 -291,10 +291,10 @@@ size_t ovs_key_attr_size(void
                + nla_total_size(4)   /* OVS_KEY_ATTR_SKB_MARK */
                + nla_total_size(4)   /* OVS_KEY_ATTR_DP_HASH */
                + nla_total_size(4)   /* OVS_KEY_ATTR_RECIRC_ID */
-               + nla_total_size(1)   /* OVS_KEY_ATTR_CT_STATE */
+               + nla_total_size(4)   /* OVS_KEY_ATTR_CT_STATE */
                + nla_total_size(2)   /* OVS_KEY_ATTR_CT_ZONE */
                + nla_total_size(4)   /* OVS_KEY_ATTR_CT_MARK */
-               + nla_total_size(16)  /* OVS_KEY_ATTR_CT_LABEL */
+               + nla_total_size(16)  /* OVS_KEY_ATTR_CT_LABELS */
                + nla_total_size(12)  /* OVS_KEY_ATTR_ETHERNET */
                + nla_total_size(2)   /* OVS_KEY_ATTR_ETHERTYPE */
                + nla_total_size(4)   /* OVS_KEY_ATTR_VLAN */
@@@ -323,8 -323,6 +323,8 @@@ static const struct ovs_len_tbl ovs_tun
        [OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS]   = { .len = OVS_ATTR_VARIABLE },
        [OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS]    = { .len = OVS_ATTR_NESTED,
                                                .next = ovs_vxlan_ext_key_lens },
 +      [OVS_TUNNEL_KEY_ATTR_IPV6_SRC]      = { .len = sizeof(struct in6_addr) },
 +      [OVS_TUNNEL_KEY_ATTR_IPV6_DST]      = { .len = sizeof(struct in6_addr) },
  };
  
  /* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute.  */
@@@ -351,10 -349,10 +351,10 @@@ static const struct ovs_len_tbl ovs_key
        [OVS_KEY_ATTR_TUNNEL]    = { .len = OVS_ATTR_NESTED,
                                     .next = ovs_tunnel_key_lens, },
        [OVS_KEY_ATTR_MPLS]      = { .len = sizeof(struct ovs_key_mpls) },
-       [OVS_KEY_ATTR_CT_STATE]  = { .len = sizeof(u8) },
+       [OVS_KEY_ATTR_CT_STATE]  = { .len = sizeof(u32) },
        [OVS_KEY_ATTR_CT_ZONE]   = { .len = sizeof(u16) },
        [OVS_KEY_ATTR_CT_MARK]   = { .len = sizeof(u32) },
-       [OVS_KEY_ATTR_CT_LABEL]  = { .len = sizeof(struct ovs_key_ct_label) },
+       [OVS_KEY_ATTR_CT_LABELS] = { .len = sizeof(struct ovs_key_ct_labels) },
  };
  
  static bool check_attr_len(unsigned int attr_len, unsigned int expected_len)
@@@ -544,14 -542,14 +544,14 @@@ static int vxlan_tun_opt_from_nlattr(co
        return 0;
  }
  
 -static int ipv4_tun_from_nlattr(const struct nlattr *attr,
 -                              struct sw_flow_match *match, bool is_mask,
 -                              bool log)
 +static int ip_tun_from_nlattr(const struct nlattr *attr,
 +                            struct sw_flow_match *match, bool is_mask,
 +                            bool log)
  {
        struct nlattr *a;
        int rem;
        bool ttl = false;
 -      __be16 tun_flags = 0;
 +      __be16 tun_flags = 0, ipv4 = false, ipv6 = false;
        int opts_type = 0;
  
        nla_for_each_nested(a, attr, rem) {
                case OVS_TUNNEL_KEY_ATTR_IPV4_SRC:
                        SW_FLOW_KEY_PUT(match, tun_key.u.ipv4.src,
                                        nla_get_in_addr(a), is_mask);
 +                      ipv4 = true;
                        break;
                case OVS_TUNNEL_KEY_ATTR_IPV4_DST:
                        SW_FLOW_KEY_PUT(match, tun_key.u.ipv4.dst,
                                        nla_get_in_addr(a), is_mask);
 +                      ipv4 = true;
 +                      break;
 +              case OVS_TUNNEL_KEY_ATTR_IPV6_SRC:
 +                      SW_FLOW_KEY_PUT(match, tun_key.u.ipv6.dst,
 +                                      nla_get_in6_addr(a), is_mask);
 +                      ipv6 = true;
 +                      break;
 +              case OVS_TUNNEL_KEY_ATTR_IPV6_DST:
 +                      SW_FLOW_KEY_PUT(match, tun_key.u.ipv6.dst,
 +                                      nla_get_in6_addr(a), is_mask);
 +                      ipv6 = true;
                        break;
                case OVS_TUNNEL_KEY_ATTR_TOS:
                        SW_FLOW_KEY_PUT(match, tun_key.tos,
                        opts_type = type;
                        break;
                default:
 -                      OVS_NLERR(log, "Unknown IPv4 tunnel attribute %d",
 +                      OVS_NLERR(log, "Unknown IP tunnel attribute %d",
                                  type);
                        return -EINVAL;
                }
        }
  
        SW_FLOW_KEY_PUT(match, tun_key.tun_flags, tun_flags, is_mask);
 +      if (is_mask)
 +              SW_FLOW_KEY_MEMSET_FIELD(match, tun_proto, 0xff, true);
 +      else
 +              SW_FLOW_KEY_PUT(match, tun_proto, ipv6 ? AF_INET6 : AF_INET,
 +                              false);
  
        if (rem > 0) {
 -              OVS_NLERR(log, "IPv4 tunnel attribute has %d unknown bytes.",
 +              OVS_NLERR(log, "IP tunnel attribute has %d unknown bytes.",
                          rem);
                return -EINVAL;
        }
  
 +      if (ipv4 && ipv6) {
 +              OVS_NLERR(log, "Mixed IPv4 and IPv6 tunnel attributes");
 +              return -EINVAL;
 +      }
 +
        if (!is_mask) {
 -              if (!match->key->tun_key.u.ipv4.dst) {
 +              if (!ipv4 && !ipv6) {
 +                      OVS_NLERR(log, "IP tunnel dst address not specified");
 +                      return -EINVAL;
 +              }
 +              if (ipv4 && !match->key->tun_key.u.ipv4.dst) {
                        OVS_NLERR(log, "IPv4 tunnel dst address is zero");
                        return -EINVAL;
                }
 +              if (ipv6 && ipv6_addr_any(&match->key->tun_key.u.ipv6.dst)) {
 +                      OVS_NLERR(log, "IPv6 tunnel dst address is zero");
 +                      return -EINVAL;
 +              }
  
                if (!ttl) {
 -                      OVS_NLERR(log, "IPv4 tunnel TTL not specified.");
 +                      OVS_NLERR(log, "IP tunnel TTL not specified.");
                        return -EINVAL;
                }
        }
@@@ -714,36 -682,21 +714,36 @@@ static int vxlan_opt_to_nlattr(struct s
        return 0;
  }
  
 -static int __ipv4_tun_to_nlattr(struct sk_buff *skb,
 -                              const struct ip_tunnel_key *output,
 -                              const void *tun_opts, int swkey_tun_opts_len)
 +static int __ip_tun_to_nlattr(struct sk_buff *skb,
 +                            const struct ip_tunnel_key *output,
 +                            const void *tun_opts, int swkey_tun_opts_len,
 +                            unsigned short tun_proto)
  {
        if (output->tun_flags & TUNNEL_KEY &&
            nla_put_be64(skb, OVS_TUNNEL_KEY_ATTR_ID, output->tun_id))
                return -EMSGSIZE;
 -      if (output->u.ipv4.src &&
 -          nla_put_in_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV4_SRC,
 -                          output->u.ipv4.src))
 -              return -EMSGSIZE;
 -      if (output->u.ipv4.dst &&
 -          nla_put_in_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV4_DST,
 -                          output->u.ipv4.dst))
 -              return -EMSGSIZE;
 +      switch (tun_proto) {
 +      case AF_INET:
 +              if (output->u.ipv4.src &&
 +                  nla_put_in_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV4_SRC,
 +                                  output->u.ipv4.src))
 +                      return -EMSGSIZE;
 +              if (output->u.ipv4.dst &&
 +                  nla_put_in_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV4_DST,
 +                                  output->u.ipv4.dst))
 +                      return -EMSGSIZE;
 +              break;
 +      case AF_INET6:
 +              if (!ipv6_addr_any(&output->u.ipv6.src) &&
 +                  nla_put_in6_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV6_SRC,
 +                                   &output->u.ipv6.src))
 +                      return -EMSGSIZE;
 +              if (!ipv6_addr_any(&output->u.ipv6.dst) &&
 +                  nla_put_in6_addr(skb, OVS_TUNNEL_KEY_ATTR_IPV6_DST,
 +                                   &output->u.ipv6.dst))
 +                      return -EMSGSIZE;
 +              break;
 +      }
        if (output->tos &&
            nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TOS, output->tos))
                return -EMSGSIZE;
        return 0;
  }
  
 -static int ipv4_tun_to_nlattr(struct sk_buff *skb,
 -                            const struct ip_tunnel_key *output,
 -                            const void *tun_opts, int swkey_tun_opts_len)
 +static int ip_tun_to_nlattr(struct sk_buff *skb,
 +                          const struct ip_tunnel_key *output,
 +                          const void *tun_opts, int swkey_tun_opts_len,
 +                          unsigned short tun_proto)
  {
        struct nlattr *nla;
        int err;
        if (!nla)
                return -EMSGSIZE;
  
 -      err = __ipv4_tun_to_nlattr(skb, output, tun_opts, swkey_tun_opts_len);
 +      err = __ip_tun_to_nlattr(skb, output, tun_opts, swkey_tun_opts_len,
 +                               tun_proto);
        if (err)
                return err;
  
@@@ -802,10 -753,9 +802,10 @@@ int ovs_nla_put_egress_tunnel_key(struc
                                  const struct ip_tunnel_info *egress_tun_info,
                                  const void *egress_tun_opts)
  {
 -      return __ipv4_tun_to_nlattr(skb, &egress_tun_info->key,
 -                                  egress_tun_opts,
 -                                  egress_tun_info->options_len);
 +      return __ip_tun_to_nlattr(skb, &egress_tun_info->key,
 +                                egress_tun_opts,
 +                                egress_tun_info->options_len,
 +                                ip_tunnel_info_af(egress_tun_info));
  }
  
  static int metadata_from_nlattrs(struct net *net, struct sw_flow_match *match,
                *attrs &= ~(1 << OVS_KEY_ATTR_SKB_MARK);
        }
        if (*attrs & (1 << OVS_KEY_ATTR_TUNNEL)) {
 -              if (ipv4_tun_from_nlattr(a[OVS_KEY_ATTR_TUNNEL], match,
 -                                       is_mask, log) < 0)
 +              if (ip_tun_from_nlattr(a[OVS_KEY_ATTR_TUNNEL], match,
 +                                     is_mask, log) < 0)
                        return -EINVAL;
                *attrs &= ~(1 << OVS_KEY_ATTR_TUNNEL);
        }
  
        if (*attrs & (1 << OVS_KEY_ATTR_CT_STATE) &&
            ovs_ct_verify(net, OVS_KEY_ATTR_CT_STATE)) {
-               u8 ct_state = nla_get_u8(a[OVS_KEY_ATTR_CT_STATE]);
+               u32 ct_state = nla_get_u32(a[OVS_KEY_ATTR_CT_STATE]);
+               if (!is_mask && !ovs_ct_state_supported(ct_state)) {
+                       OVS_NLERR(log, "ct_state flags %08x unsupported",
+                                 ct_state);
+                       return -EINVAL;
+               }
  
                SW_FLOW_KEY_PUT(match, ct.state, ct_state, is_mask);
                *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_STATE);
                SW_FLOW_KEY_PUT(match, ct.mark, mark, is_mask);
                *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_MARK);
        }
-       if (*attrs & (1 << OVS_KEY_ATTR_CT_LABEL) &&
-           ovs_ct_verify(net, OVS_KEY_ATTR_CT_LABEL)) {
-               const struct ovs_key_ct_label *cl;
+       if (*attrs & (1 << OVS_KEY_ATTR_CT_LABELS) &&
+           ovs_ct_verify(net, OVS_KEY_ATTR_CT_LABELS)) {
+               const struct ovs_key_ct_labels *cl;
  
-               cl = nla_data(a[OVS_KEY_ATTR_CT_LABEL]);
-               SW_FLOW_KEY_MEMCPY(match, ct.label, cl->ct_label,
+               cl = nla_data(a[OVS_KEY_ATTR_CT_LABELS]);
+               SW_FLOW_KEY_MEMCPY(match, ct.labels, cl->ct_labels,
                                   sizeof(*cl), is_mask);
-               *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_LABEL);
+               *attrs &= ~(1ULL << OVS_KEY_ATTR_CT_LABELS);
        }
        return 0;
  }
@@@ -1244,7 -1200,7 +1250,7 @@@ int ovs_nla_get_match(struct net *net, 
                        /* The userspace does not send tunnel attributes that
                         * are 0, but we should not wildcard them nonetheless.
                         */
 -                      if (match->key->tun_key.u.ipv4.dst)
 +                      if (match->key->tun_proto)
                                SW_FLOW_KEY_MEMSET_FIELD(match, tun_key,
                                                         0xff, true);
  
@@@ -1417,14 -1373,14 +1423,14 @@@ static int __ovs_nla_put_key(const stru
        if (nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, output->phy.priority))
                goto nla_put_failure;
  
 -      if ((swkey->tun_key.u.ipv4.dst || is_mask)) {
 +      if ((swkey->tun_proto || is_mask)) {
                const void *opts = NULL;
  
                if (output->tun_key.tun_flags & TUNNEL_OPTIONS_PRESENT)
                        opts = TUN_METADATA_OPTS(output, swkey->tun_opts_len);
  
 -              if (ipv4_tun_to_nlattr(skb, &output->tun_key, opts,
 -                                     swkey->tun_opts_len))
 +              if (ip_tun_to_nlattr(skb, &output->tun_key, opts,
 +                                   swkey->tun_opts_len, swkey->tun_proto))
                        goto nla_put_failure;
        }
  
@@@ -1927,7 -1883,7 +1933,7 @@@ static int validate_and_copy_set_tun(co
        int err = 0, start, opts_type;
  
        ovs_match_init(&match, &key, NULL);
 -      opts_type = ipv4_tun_from_nlattr(nla_data(attr), &match, false, log);
 +      opts_type = ip_tun_from_nlattr(nla_data(attr), &match, false, log);
        if (opts_type < 0)
                return opts_type;
  
  
        tun_info = &tun_dst->u.tun_info;
        tun_info->mode = IP_TUNNEL_INFO_TX;
 +      if (key.tun_proto == AF_INET6)
 +              tun_info->mode |= IP_TUNNEL_INFO_IPV6;
        tun_info->key = key.tun_key;
  
        /* We need to store the options in the action itself since
@@@ -2025,7 -1979,7 +2031,7 @@@ static int validate_set(const struct nl
        case OVS_KEY_ATTR_PRIORITY:
        case OVS_KEY_ATTR_SKB_MARK:
        case OVS_KEY_ATTR_CT_MARK:
-       case OVS_KEY_ATTR_CT_LABEL:
+       case OVS_KEY_ATTR_CT_LABELS:
        case OVS_KEY_ATTR_ETHERNET:
                break;
  
@@@ -2426,11 -2380,10 +2432,11 @@@ static int set_action_to_attr(const str
                if (!start)
                        return -EMSGSIZE;
  
 -              err = ipv4_tun_to_nlattr(skb, &tun_info->key,
 -                                       tun_info->options_len ?
 +              err = ip_tun_to_nlattr(skb, &tun_info->key,
 +                                     tun_info->options_len ?
                                             ip_tunnel_info_opts(tun_info) : NULL,
 -                                       tun_info->options_len);
 +                                     tun_info->options_len,
 +                                     ip_tunnel_info_af(tun_info));
                if (err)
                        return err;
                nla_nest_end(skb, start);
index 95dbcedf0bd4422f927956b042ca469eeba56ea7,c7f74aab34b9ef7d2befcd571d44290b76fbfbc1..d073fff82fdb8c6c8d39b57690d37eedeb12423a
@@@ -93,7 -93,8 +93,8 @@@ struct sw_flow *ovs_flow_alloc(void
  
        /* Initialize the default stat node. */
        stats = kmem_cache_alloc_node(flow_stats_cache,
-                                     GFP_KERNEL | __GFP_ZERO, 0);
+                                     GFP_KERNEL | __GFP_ZERO,
+                                     node_online(0) ? 0 : NUMA_NO_NODE);
        if (!stats)
                goto err;
  
@@@ -427,7 -428,7 +428,7 @@@ static u32 flow_hash(const struct sw_fl
  
  static int flow_key_start(const struct sw_flow_key *key)
  {
 -      if (key->tun_key.u.ipv4.dst)
 +      if (key->tun_proto)
                return 0;
        else
                return rounddown(offsetof(struct sw_flow_key, phy),
index 73e3895175cf7e1951a5a168ba4eff5ef69349d2,77f5d17e261230a6db4f261445311eafb37c7b06..1eb76956b4390fef09825f38bda1f1b29a1ab352
@@@ -1,6 -1,6 +1,6 @@@
  /*
   * net/switchdev/switchdev.c - Switch device API
 - * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us>
 + * Copyright (c) 2014-2015 Jiri Pirko <jiri@resnulli.us>
   * Copyright (c) 2014-2015 Scott Feldman <sfeldma@gmail.com>
   *
   * This program is free software; you can redistribute it and/or modify
  #include <linux/mutex.h>
  #include <linux/notifier.h>
  #include <linux/netdevice.h>
 +#include <linux/etherdevice.h>
  #include <linux/if_bridge.h>
 +#include <linux/list.h>
 +#include <linux/workqueue.h>
+ #include <linux/if_vlan.h>
  #include <net/ip_fib.h>
  #include <net/switchdev.h>
  
 +/**
 + *    switchdev_trans_item_enqueue - Enqueue data item to transaction queue
 + *
 + *    @trans: transaction
 + *    @data: pointer to data being queued
 + *    @destructor: data destructor
 + *    @tritem: transaction item being queued
 + *
 + *    Enqeueue data item to transaction queue. tritem is typically placed in
 + *    cointainter pointed at by data pointer. Destructor is called on
 + *    transaction abort and after successful commit phase in case
 + *    the caller did not dequeue the item before.
 + */
 +void switchdev_trans_item_enqueue(struct switchdev_trans *trans,
 +                                void *data, void (*destructor)(void const *),
 +                                struct switchdev_trans_item *tritem)
 +{
 +      tritem->data = data;
 +      tritem->destructor = destructor;
 +      list_add_tail(&tritem->list, &trans->item_list);
 +}
 +EXPORT_SYMBOL_GPL(switchdev_trans_item_enqueue);
 +
 +static struct switchdev_trans_item *
 +__switchdev_trans_item_dequeue(struct switchdev_trans *trans)
 +{
 +      struct switchdev_trans_item *tritem;
 +
 +      if (list_empty(&trans->item_list))
 +              return NULL;
 +      tritem = list_first_entry(&trans->item_list,
 +                                struct switchdev_trans_item, list);
 +      list_del(&tritem->list);
 +      return tritem;
 +}
 +
 +/**
 + *    switchdev_trans_item_dequeue - Dequeue data item from transaction queue
 + *
 + *    @trans: transaction
 + */
 +void *switchdev_trans_item_dequeue(struct switchdev_trans *trans)
 +{
 +      struct switchdev_trans_item *tritem;
 +
 +      tritem = __switchdev_trans_item_dequeue(trans);
 +      BUG_ON(!tritem);
 +      return tritem->data;
 +}
 +EXPORT_SYMBOL_GPL(switchdev_trans_item_dequeue);
 +
 +static void switchdev_trans_init(struct switchdev_trans *trans)
 +{
 +      INIT_LIST_HEAD(&trans->item_list);
 +}
 +
 +static void switchdev_trans_items_destroy(struct switchdev_trans *trans)
 +{
 +      struct switchdev_trans_item *tritem;
 +
 +      while ((tritem = __switchdev_trans_item_dequeue(trans)))
 +              tritem->destructor(tritem->data);
 +}
 +
 +static void switchdev_trans_items_warn_destroy(struct net_device *dev,
 +                                             struct switchdev_trans *trans)
 +{
 +      WARN(!list_empty(&trans->item_list), "%s: transaction item queue is not empty.\n",
 +           dev->name);
 +      switchdev_trans_items_destroy(trans);
 +}
 +
 +static LIST_HEAD(deferred);
 +static DEFINE_SPINLOCK(deferred_lock);
 +
 +typedef void switchdev_deferred_func_t(struct net_device *dev,
 +                                     const void *data);
 +
 +struct switchdev_deferred_item {
 +      struct list_head list;
 +      struct net_device *dev;
 +      switchdev_deferred_func_t *func;
 +      unsigned long data[0];
 +};
 +
 +static struct switchdev_deferred_item *switchdev_deferred_dequeue(void)
 +{
 +      struct switchdev_deferred_item *dfitem;
 +
 +      spin_lock_bh(&deferred_lock);
 +      if (list_empty(&deferred)) {
 +              dfitem = NULL;
 +              goto unlock;
 +      }
 +      dfitem = list_first_entry(&deferred,
 +                                struct switchdev_deferred_item, list);
 +      list_del(&dfitem->list);
 +unlock:
 +      spin_unlock_bh(&deferred_lock);
 +      return dfitem;
 +}
 +
 +/**
 + *    switchdev_deferred_process - Process ops in deferred queue
 + *
 + *    Called to flush the ops currently queued in deferred ops queue.
 + *    rtnl_lock must be held.
 + */
 +void switchdev_deferred_process(void)
 +{
 +      struct switchdev_deferred_item *dfitem;
 +
 +      ASSERT_RTNL();
 +
 +      while ((dfitem = switchdev_deferred_dequeue())) {
 +              dfitem->func(dfitem->dev, dfitem->data);
 +              dev_put(dfitem->dev);
 +              kfree(dfitem);
 +      }
 +}
 +EXPORT_SYMBOL_GPL(switchdev_deferred_process);
 +
 +static void switchdev_deferred_process_work(struct work_struct *work)
 +{
 +      rtnl_lock();
 +      switchdev_deferred_process();
 +      rtnl_unlock();
 +}
 +
 +static DECLARE_WORK(deferred_process_work, switchdev_deferred_process_work);
 +
 +static int switchdev_deferred_enqueue(struct net_device *dev,
 +                                    const void *data, size_t data_len,
 +                                    switchdev_deferred_func_t *func)
 +{
 +      struct switchdev_deferred_item *dfitem;
 +
 +      dfitem = kmalloc(sizeof(*dfitem) + data_len, GFP_ATOMIC);
 +      if (!dfitem)
 +              return -ENOMEM;
 +      dfitem->dev = dev;
 +      dfitem->func = func;
 +      memcpy(dfitem->data, data, data_len);
 +      dev_hold(dev);
 +      spin_lock_bh(&deferred_lock);
 +      list_add_tail(&dfitem->list, &deferred);
 +      spin_unlock_bh(&deferred_lock);
 +      schedule_work(&deferred_process_work);
 +      return 0;
 +}
 +
  /**
   *    switchdev_port_attr_get - Get port attribute
   *
@@@ -185,7 -32,7 +186,7 @@@ int switchdev_port_attr_get(struct net_
        struct net_device *lower_dev;
        struct list_head *iter;
        struct switchdev_attr first = {
 -              .id = SWITCHDEV_ATTR_UNDEFINED
 +              .id = SWITCHDEV_ATTR_ID_UNDEFINED
        };
        int err = -EOPNOTSUPP;
  
                err = switchdev_port_attr_get(lower_dev, attr);
                if (err)
                        break;
 -              if (first.id == SWITCHDEV_ATTR_UNDEFINED)
 +              if (first.id == SWITCHDEV_ATTR_ID_UNDEFINED)
                        first = *attr;
                else if (memcmp(&first, attr, sizeof(*attr)))
                        return -ENODATA;
  EXPORT_SYMBOL_GPL(switchdev_port_attr_get);
  
  static int __switchdev_port_attr_set(struct net_device *dev,
 -                                   struct switchdev_attr *attr)
 +                                   const struct switchdev_attr *attr,
 +                                   struct switchdev_trans *trans)
  {
        const struct switchdev_ops *ops = dev->switchdev_ops;
        struct net_device *lower_dev;
        int err = -EOPNOTSUPP;
  
        if (ops && ops->switchdev_port_attr_set)
 -              return ops->switchdev_port_attr_set(dev, attr);
 +              return ops->switchdev_port_attr_set(dev, attr, trans);
  
        if (attr->flags & SWITCHDEV_F_NO_RECURSE)
 -              return err;
 +              goto done;
  
        /* Switch device port(s) may be stacked under
         * bond/team/vlan dev, so recurse down to set attr on
         */
  
        netdev_for_each_lower_dev(dev, lower_dev, iter) {
 -              err = __switchdev_port_attr_set(lower_dev, attr);
 +              err = __switchdev_port_attr_set(lower_dev, attr, trans);
 +              if (err == -EOPNOTSUPP &&
 +                  attr->flags & SWITCHDEV_F_SKIP_EOPNOTSUPP)
 +                      continue;
                if (err)
                        break;
        }
  
 -      return err;
 -}
 -
 -struct switchdev_attr_set_work {
 -      struct work_struct work;
 -      struct net_device *dev;
 -      struct switchdev_attr attr;
 -};
 -
 -static void switchdev_port_attr_set_work(struct work_struct *work)
 -{
 -      struct switchdev_attr_set_work *asw =
 -              container_of(work, struct switchdev_attr_set_work, work);
 -      int err;
 -
 -      rtnl_lock();
 -      err = switchdev_port_attr_set(asw->dev, &asw->attr);
 -      if (err && err != -EOPNOTSUPP)
 -              netdev_err(asw->dev, "failed (err=%d) to set attribute (id=%d)\n",
 -                         err, asw->attr.id);
 -      rtnl_unlock();
 +done:
 +      if (err == -EOPNOTSUPP && attr->flags & SWITCHDEV_F_SKIP_EOPNOTSUPP)
 +              err = 0;
  
 -      dev_put(asw->dev);
 -      kfree(work);
 -}
 -
 -static int switchdev_port_attr_set_defer(struct net_device *dev,
 -                                       struct switchdev_attr *attr)
 -{
 -      struct switchdev_attr_set_work *asw;
 -
 -      asw = kmalloc(sizeof(*asw), GFP_ATOMIC);
 -      if (!asw)
 -              return -ENOMEM;
 -
 -      INIT_WORK(&asw->work, switchdev_port_attr_set_work);
 -
 -      dev_hold(dev);
 -      asw->dev = dev;
 -      memcpy(&asw->attr, attr, sizeof(asw->attr));
 -
 -      schedule_work(&asw->work);
 -
 -      return 0;
 +      return err;
  }
  
 -/**
 - *    switchdev_port_attr_set - Set port attribute
 - *
 - *    @dev: port device
 - *    @attr: attribute to set
 - *
 - *    Use a 2-phase prepare-commit transaction model to ensure
 - *    system is not left in a partially updated state due to
 - *    failure from driver/device.
 - */
 -int switchdev_port_attr_set(struct net_device *dev, struct switchdev_attr *attr)
 +static int switchdev_port_attr_set_now(struct net_device *dev,
 +                                     const struct switchdev_attr *attr)
  {
 +      struct switchdev_trans trans;
        int err;
  
 -      if (!rtnl_is_locked()) {
 -              /* Running prepare-commit transaction across stacked
 -               * devices requires nothing moves, so if rtnl_lock is
 -               * not held, schedule a worker thread to hold rtnl_lock
 -               * while setting attr.
 -               */
 -
 -              return switchdev_port_attr_set_defer(dev, attr);
 -      }
 +      switchdev_trans_init(&trans);
  
        /* Phase I: prepare for attr set. Driver/device should fail
         * here if there are going to be issues in the commit phase,
         * but should not commit the attr.
         */
  
 -      attr->trans = SWITCHDEV_TRANS_PREPARE;
 -      err = __switchdev_port_attr_set(dev, attr);
 +      trans.ph_prepare = true;
 +      err = __switchdev_port_attr_set(dev, attr, &trans);
        if (err) {
                /* Prepare phase failed: abort the transaction.  Any
                 * resources reserved in the prepare phase are
                 * released.
                 */
  
 -              if (err != -EOPNOTSUPP) {
 -                      attr->trans = SWITCHDEV_TRANS_ABORT;
 -                      __switchdev_port_attr_set(dev, attr);
 -              }
 +              if (err != -EOPNOTSUPP)
 +                      switchdev_trans_items_destroy(&trans);
  
                return err;
        }
         * because the driver said everythings was OK in phase I.
         */
  
 -      attr->trans = SWITCHDEV_TRANS_COMMIT;
 -      err = __switchdev_port_attr_set(dev, attr);
 +      trans.ph_prepare = false;
 +      err = __switchdev_port_attr_set(dev, attr, &trans);
        WARN(err, "%s: Commit of attribute (id=%d) failed.\n",
             dev->name, attr->id);
 +      switchdev_trans_items_warn_destroy(dev, &trans);
  
        return err;
  }
 +
 +static void switchdev_port_attr_set_deferred(struct net_device *dev,
 +                                           const void *data)
 +{
 +      const struct switchdev_attr *attr = data;
 +      int err;
 +
 +      err = switchdev_port_attr_set_now(dev, attr);
 +      if (err && err != -EOPNOTSUPP)
 +              netdev_err(dev, "failed (err=%d) to set attribute (id=%d)\n",
 +                         err, attr->id);
 +}
 +
 +static int switchdev_port_attr_set_defer(struct net_device *dev,
 +                                       const struct switchdev_attr *attr)
 +{
 +      return switchdev_deferred_enqueue(dev, attr, sizeof(*attr),
 +                                        switchdev_port_attr_set_deferred);
 +}
 +
 +/**
 + *    switchdev_port_attr_set - Set port attribute
 + *
 + *    @dev: port device
 + *    @attr: attribute to set
 + *
 + *    Use a 2-phase prepare-commit transaction model to ensure
 + *    system is not left in a partially updated state due to
 + *    failure from driver/device.
 + *
 + *    rtnl_lock must be held and must not be in atomic section,
 + *    in case SWITCHDEV_F_DEFER flag is not set.
 + */
 +int switchdev_port_attr_set(struct net_device *dev,
 +                          const struct switchdev_attr *attr)
 +{
 +      if (attr->flags & SWITCHDEV_F_DEFER)
 +              return switchdev_port_attr_set_defer(dev, attr);
 +      ASSERT_RTNL();
 +      return switchdev_port_attr_set_now(dev, attr);
 +}
  EXPORT_SYMBOL_GPL(switchdev_port_attr_set);
  
  static int __switchdev_port_obj_add(struct net_device *dev,
 -                                  struct switchdev_obj *obj)
 +                                  const struct switchdev_obj *obj,
 +                                  struct switchdev_trans *trans)
  {
        const struct switchdev_ops *ops = dev->switchdev_ops;
        struct net_device *lower_dev;
        int err = -EOPNOTSUPP;
  
        if (ops && ops->switchdev_port_obj_add)
 -              return ops->switchdev_port_obj_add(dev, obj);
 +              return ops->switchdev_port_obj_add(dev, obj, trans);
  
        /* Switch device port(s) may be stacked under
         * bond/team/vlan dev, so recurse down to add object on
         */
  
        netdev_for_each_lower_dev(dev, lower_dev, iter) {
 -              err = __switchdev_port_obj_add(lower_dev, obj);
 +              err = __switchdev_port_obj_add(lower_dev, obj, trans);
                if (err)
                        break;
        }
        return err;
  }
  
 -/**
 - *    switchdev_port_obj_add - Add port object
 - *
 - *    @dev: port device
 - *    @obj: object to add
 - *
 - *    Use a 2-phase prepare-commit transaction model to ensure
 - *    system is not left in a partially updated state due to
 - *    failure from driver/device.
 - *
 - *    rtnl_lock must be held.
 - */
 -int switchdev_port_obj_add(struct net_device *dev, struct switchdev_obj *obj)
 +static int switchdev_port_obj_add_now(struct net_device *dev,
 +                                    const struct switchdev_obj *obj)
  {
 +      struct switchdev_trans trans;
        int err;
  
        ASSERT_RTNL();
  
 +      switchdev_trans_init(&trans);
 +
        /* Phase I: prepare for obj add. Driver/device should fail
         * here if there are going to be issues in the commit phase,
         * such as lack of resources or support.  The driver/device
         * but should not commit the obj.
         */
  
 -      obj->trans = SWITCHDEV_TRANS_PREPARE;
 -      err = __switchdev_port_obj_add(dev, obj);
 +      trans.ph_prepare = true;
 +      err = __switchdev_port_obj_add(dev, obj, &trans);
        if (err) {
                /* Prepare phase failed: abort the transaction.  Any
                 * resources reserved in the prepare phase are
                 * released.
                 */
  
 -              if (err != -EOPNOTSUPP) {
 -                      obj->trans = SWITCHDEV_TRANS_ABORT;
 -                      __switchdev_port_obj_add(dev, obj);
 -              }
 +              if (err != -EOPNOTSUPP)
 +                      switchdev_trans_items_destroy(&trans);
  
                return err;
        }
         * because the driver said everythings was OK in phase I.
         */
  
 -      obj->trans = SWITCHDEV_TRANS_COMMIT;
 -      err = __switchdev_port_obj_add(dev, obj);
 +      trans.ph_prepare = false;
 +      err = __switchdev_port_obj_add(dev, obj, &trans);
        WARN(err, "%s: Commit of object (id=%d) failed.\n", dev->name, obj->id);
 +      switchdev_trans_items_warn_destroy(dev, &trans);
  
        return err;
  }
 -EXPORT_SYMBOL_GPL(switchdev_port_obj_add);
 +
 +static void switchdev_port_obj_add_deferred(struct net_device *dev,
 +                                          const void *data)
 +{
 +      const struct switchdev_obj *obj = data;
 +      int err;
 +
 +      err = switchdev_port_obj_add_now(dev, obj);
 +      if (err && err != -EOPNOTSUPP)
 +              netdev_err(dev, "failed (err=%d) to add object (id=%d)\n",
 +                         err, obj->id);
 +}
 +
 +static int switchdev_port_obj_add_defer(struct net_device *dev,
 +                                      const struct switchdev_obj *obj)
 +{
 +      return switchdev_deferred_enqueue(dev, obj, sizeof(*obj),
 +                                        switchdev_port_obj_add_deferred);
 +}
  
  /**
 - *    switchdev_port_obj_del - Delete port object
 + *    switchdev_port_obj_add - Add port object
   *
   *    @dev: port device
 - *    @obj: object to delete
 + *    @id: object ID
 + *    @obj: object to add
 + *
 + *    Use a 2-phase prepare-commit transaction model to ensure
 + *    system is not left in a partially updated state due to
 + *    failure from driver/device.
 + *
 + *    rtnl_lock must be held and must not be in atomic section,
 + *    in case SWITCHDEV_F_DEFER flag is not set.
   */
 -int switchdev_port_obj_del(struct net_device *dev, struct switchdev_obj *obj)
 +int switchdev_port_obj_add(struct net_device *dev,
 +                         const struct switchdev_obj *obj)
 +{
 +      if (obj->flags & SWITCHDEV_F_DEFER)
 +              return switchdev_port_obj_add_defer(dev, obj);
 +      ASSERT_RTNL();
 +      return switchdev_port_obj_add_now(dev, obj);
 +}
 +EXPORT_SYMBOL_GPL(switchdev_port_obj_add);
 +
 +static int switchdev_port_obj_del_now(struct net_device *dev,
 +                                    const struct switchdev_obj *obj)
  {
        const struct switchdev_ops *ops = dev->switchdev_ops;
        struct net_device *lower_dev;
         */
  
        netdev_for_each_lower_dev(dev, lower_dev, iter) {
 -              err = switchdev_port_obj_del(lower_dev, obj);
 +              err = switchdev_port_obj_del_now(lower_dev, obj);
                if (err)
                        break;
        }
  
        return err;
  }
 +
 +static void switchdev_port_obj_del_deferred(struct net_device *dev,
 +                                          const void *data)
 +{
 +      const struct switchdev_obj *obj = data;
 +      int err;
 +
 +      err = switchdev_port_obj_del_now(dev, obj);
 +      if (err && err != -EOPNOTSUPP)
 +              netdev_err(dev, "failed (err=%d) to del object (id=%d)\n",
 +                         err, obj->id);
 +}
 +
 +static int switchdev_port_obj_del_defer(struct net_device *dev,
 +                                      const struct switchdev_obj *obj)
 +{
 +      return switchdev_deferred_enqueue(dev, obj, sizeof(*obj),
 +                                        switchdev_port_obj_del_deferred);
 +}
 +
 +/**
 + *    switchdev_port_obj_del - Delete port object
 + *
 + *    @dev: port device
 + *    @id: object ID
 + *    @obj: object to delete
 + *
 + *    rtnl_lock must be held and must not be in atomic section,
 + *    in case SWITCHDEV_F_DEFER flag is not set.
 + */
 +int switchdev_port_obj_del(struct net_device *dev,
 +                         const struct switchdev_obj *obj)
 +{
 +      if (obj->flags & SWITCHDEV_F_DEFER)
 +              return switchdev_port_obj_del_defer(dev, obj);
 +      ASSERT_RTNL();
 +      return switchdev_port_obj_del_now(dev, obj);
 +}
  EXPORT_SYMBOL_GPL(switchdev_port_obj_del);
  
  /**
   *    switchdev_port_obj_dump - Dump port objects
   *
   *    @dev: port device
 + *    @id: object ID
   *    @obj: object to dump
 + *    @cb: function to call with a filled object
 + *
 + *    rtnl_lock must be held.
   */
 -int switchdev_port_obj_dump(struct net_device *dev, struct switchdev_obj *obj)
 +int switchdev_port_obj_dump(struct net_device *dev, struct switchdev_obj *obj,
 +                          switchdev_obj_dump_cb_t *cb)
  {
        const struct switchdev_ops *ops = dev->switchdev_ops;
        struct net_device *lower_dev;
        struct list_head *iter;
        int err = -EOPNOTSUPP;
  
 +      ASSERT_RTNL();
 +
        if (ops && ops->switchdev_port_obj_dump)
 -              return ops->switchdev_port_obj_dump(dev, obj);
 +              return ops->switchdev_port_obj_dump(dev, obj, cb);
  
        /* Switch device port(s) may be stacked under
         * bond/team/vlan dev, so recurse down to dump objects on
         */
  
        netdev_for_each_lower_dev(dev, lower_dev, iter) {
 -              err = switchdev_port_obj_dump(lower_dev, obj);
 +              err = switchdev_port_obj_dump(lower_dev, obj, cb);
                break;
        }
  
@@@ -614,7 -398,7 +615,7 @@@ int call_switchdev_notifiers(unsigned l
  EXPORT_SYMBOL_GPL(call_switchdev_notifiers);
  
  struct switchdev_vlan_dump {
 -      struct switchdev_obj obj;
 +      struct switchdev_obj_port_vlan vlan;
        struct sk_buff *skb;
        u32 filter_mask;
        u16 flags;
        u16 end;
  };
  
 -static int switchdev_port_vlan_dump_put(struct net_device *dev,
 -                                      struct switchdev_vlan_dump *dump)
 +static int switchdev_port_vlan_dump_put(struct switchdev_vlan_dump *dump)
  {
        struct bridge_vlan_info vinfo;
  
        return 0;
  }
  
 -static int switchdev_port_vlan_dump_cb(struct net_device *dev,
 -                                     struct switchdev_obj *obj)
 +static int switchdev_port_vlan_dump_cb(struct switchdev_obj *obj)
  {
 +      struct switchdev_obj_port_vlan *vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
        struct switchdev_vlan_dump *dump =
 -              container_of(obj, struct switchdev_vlan_dump, obj);
 -      struct switchdev_obj_vlan *vlan = &dump->obj.u.vlan;
 +              container_of(vlan, struct switchdev_vlan_dump, vlan);
        int err = 0;
  
        if (vlan->vid_begin > vlan->vid_end)
                for (dump->begin = dump->end = vlan->vid_begin;
                     dump->begin <= vlan->vid_end;
                     dump->begin++, dump->end++) {
 -                      err = switchdev_port_vlan_dump_put(dev, dump);
 +                      err = switchdev_port_vlan_dump_put(dump);
                        if (err)
                                return err;
                }
                                /* prepend */
                                dump->begin = vlan->vid_begin;
                        } else {
 -                              err = switchdev_port_vlan_dump_put(dev, dump);
 +                              err = switchdev_port_vlan_dump_put(dump);
                                dump->flags = vlan->flags;
                                dump->begin = vlan->vid_begin;
                                dump->end = vlan->vid_end;
                                /* append */
                                dump->end = vlan->vid_end;
                        } else {
 -                              err = switchdev_port_vlan_dump_put(dev, dump);
 +                              err = switchdev_port_vlan_dump_put(dump);
                                dump->flags = vlan->flags;
                                dump->begin = vlan->vid_begin;
                                dump->end = vlan->vid_end;
@@@ -708,7 -494,10 +709,7 @@@ static int switchdev_port_vlan_fill(str
                                    u32 filter_mask)
  {
        struct switchdev_vlan_dump dump = {
 -              .obj = {
 -                      .id = SWITCHDEV_OBJ_PORT_VLAN,
 -                      .cb = switchdev_port_vlan_dump_cb,
 -              },
 +              .vlan.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
                .skb = skb,
                .filter_mask = filter_mask,
        };
  
        if ((filter_mask & RTEXT_FILTER_BRVLAN) ||
            (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) {
 -              err = switchdev_port_obj_dump(dev, &dump.obj);
 +              err = switchdev_port_obj_dump(dev, &dump.vlan.obj,
 +                                            switchdev_port_vlan_dump_cb);
                if (err)
                        goto err_out;
                if (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)
                        /* last one */
 -                      err = switchdev_port_vlan_dump_put(dev, &dump);
 +                      err = switchdev_port_vlan_dump_put(&dump);
        }
  
  err_out:
@@@ -742,7 -530,7 +743,7 @@@ int switchdev_port_bridge_getlink(struc
                                  int nlflags)
  {
        struct switchdev_attr attr = {
 -              .id = SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS,
 +              .id = SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS,
        };
        u16 mode = BRIDGE_MODE_UNDEF;
        u32 mask = BR_LEARNING | BR_LEARNING_SYNC;
@@@ -763,7 -551,7 +764,7 @@@ static int switchdev_port_br_setflag(st
                                     unsigned long brport_flag)
  {
        struct switchdev_attr attr = {
 -              .id = SWITCHDEV_ATTR_PORT_BRIDGE_FLAGS,
 +              .id = SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS,
        };
        u8 flag = nla_get_u8(nlattr);
        int err;
@@@ -830,13 -618,14 +831,13 @@@ static int switchdev_port_br_setlink_pr
  static int switchdev_port_br_afspec(struct net_device *dev,
                                    struct nlattr *afspec,
                                    int (*f)(struct net_device *dev,
 -                                           struct switchdev_obj *obj))
 +                                           const struct switchdev_obj *obj))
  {
        struct nlattr *attr;
        struct bridge_vlan_info *vinfo;
 -      struct switchdev_obj obj = {
 -              .id = SWITCHDEV_OBJ_PORT_VLAN,
 +      struct switchdev_obj_port_vlan vlan = {
 +              .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
        };
 -      struct switchdev_obj_vlan *vlan = &obj.u.vlan;
        int rem;
        int err;
  
                if (nla_len(attr) != sizeof(struct bridge_vlan_info))
                        return -EINVAL;
                vinfo = nla_data(attr);
 -              vlan->flags = vinfo->flags;
+               if (!vinfo->vid || vinfo->vid >= VLAN_VID_MASK)
+                       return -EINVAL;
 +              vlan.flags = vinfo->flags;
                if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) {
 -                      if (vlan->vid_begin)
 +                      if (vlan.vid_begin)
 +                              return -EINVAL;
 +                      vlan.vid_begin = vinfo->vid;
 +                      /* don't allow range of pvids */
 +                      if (vlan.flags & BRIDGE_VLAN_INFO_PVID)
                                return -EINVAL;
 -                      vlan->vid_begin = vinfo->vid;
                } else if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_END) {
 -                      if (!vlan->vid_begin)
 +                      if (!vlan.vid_begin)
                                return -EINVAL;
 -                      vlan->vid_end = vinfo->vid;
 -                      if (vlan->vid_end <= vlan->vid_begin)
 +                      vlan.vid_end = vinfo->vid;
 +                      if (vlan.vid_end <= vlan.vid_begin)
                                return -EINVAL;
 -                      err = f(dev, &obj);
 +                      err = f(dev, &vlan.obj);
                        if (err)
                                return err;
 -                      memset(vlan, 0, sizeof(*vlan));
 +                      memset(&vlan, 0, sizeof(vlan));
                } else {
 -                      if (vlan->vid_begin)
 +                      if (vlan.vid_begin)
                                return -EINVAL;
 -                      vlan->vid_begin = vinfo->vid;
 -                      vlan->vid_end = vinfo->vid;
 -                      err = f(dev, &obj);
 +                      vlan.vid_begin = vinfo->vid;
 +                      vlan.vid_end = vinfo->vid;
 +                      err = f(dev, &vlan.obj);
                        if (err)
                                return err;
 -                      memset(vlan, 0, sizeof(*vlan));
 +                      memset(&vlan, 0, sizeof(vlan));
                }
        }
  
@@@ -954,13 -742,15 +957,13 @@@ int switchdev_port_fdb_add(struct ndms
                           struct net_device *dev, const unsigned char *addr,
                           u16 vid, u16 nlm_flags)
  {
 -      struct switchdev_obj obj = {
 -              .id = SWITCHDEV_OBJ_PORT_FDB,
 -              .u.fdb = {
 -                      .addr = addr,
 -                      .vid = vid,
 -              },
 +      struct switchdev_obj_port_fdb fdb = {
 +              .obj.id = SWITCHDEV_OBJ_ID_PORT_FDB,
 +              .vid = vid,
        };
  
 -      return switchdev_port_obj_add(dev, &obj);
 +      ether_addr_copy(fdb.addr, addr);
 +      return switchdev_port_obj_add(dev, &fdb.obj);
  }
  EXPORT_SYMBOL_GPL(switchdev_port_fdb_add);
  
@@@ -979,29 -769,30 +982,29 @@@ int switchdev_port_fdb_del(struct ndms
                           struct net_device *dev, const unsigned char *addr,
                           u16 vid)
  {
 -      struct switchdev_obj obj = {
 -              .id = SWITCHDEV_OBJ_PORT_FDB,
 -              .u.fdb = {
 -                      .addr = addr,
 -                      .vid = vid,
 -              },
 +      struct switchdev_obj_port_fdb fdb = {
 +              .obj.id = SWITCHDEV_OBJ_ID_PORT_FDB,
 +              .vid = vid,
        };
  
 -      return switchdev_port_obj_del(dev, &obj);
 +      ether_addr_copy(fdb.addr, addr);
 +      return switchdev_port_obj_del(dev, &fdb.obj);
  }
  EXPORT_SYMBOL_GPL(switchdev_port_fdb_del);
  
  struct switchdev_fdb_dump {
 -      struct switchdev_obj obj;
 +      struct switchdev_obj_port_fdb fdb;
 +      struct net_device *dev;
        struct sk_buff *skb;
        struct netlink_callback *cb;
        int idx;
  };
  
 -static int switchdev_port_fdb_dump_cb(struct net_device *dev,
 -                                    struct switchdev_obj *obj)
 +static int switchdev_port_fdb_dump_cb(struct switchdev_obj *obj)
  {
 +      struct switchdev_obj_port_fdb *fdb = SWITCHDEV_OBJ_PORT_FDB(obj);
        struct switchdev_fdb_dump *dump =
 -              container_of(obj, struct switchdev_fdb_dump, obj);
 +              container_of(fdb, struct switchdev_fdb_dump, fdb);
        u32 portid = NETLINK_CB(dump->cb->skb).portid;
        u32 seq = dump->cb->nlh->nlmsg_seq;
        struct nlmsghdr *nlh;
        ndm->ndm_pad2    = 0;
        ndm->ndm_flags   = NTF_SELF;
        ndm->ndm_type    = 0;
 -      ndm->ndm_ifindex = dev->ifindex;
 -      ndm->ndm_state   = obj->u.fdb.ndm_state;
 +      ndm->ndm_ifindex = dump->dev->ifindex;
 +      ndm->ndm_state   = fdb->ndm_state;
  
 -      if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, obj->u.fdb.addr))
 +      if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, fdb->addr))
                goto nla_put_failure;
  
 -      if (obj->u.fdb.vid && nla_put_u16(dump->skb, NDA_VLAN, obj->u.fdb.vid))
 +      if (fdb->vid && nla_put_u16(dump->skb, NDA_VLAN, fdb->vid))
                goto nla_put_failure;
  
        nlmsg_end(dump->skb, nlh);
@@@ -1057,14 -848,16 +1060,14 @@@ int switchdev_port_fdb_dump(struct sk_b
                            struct net_device *filter_dev, int idx)
  {
        struct switchdev_fdb_dump dump = {
 -              .obj = {
 -                      .id = SWITCHDEV_OBJ_PORT_FDB,
 -                      .cb = switchdev_port_fdb_dump_cb,
 -              },
 +              .fdb.obj.id = SWITCHDEV_OBJ_ID_PORT_FDB,
 +              .dev = dev,
                .skb = skb,
                .cb = cb,
                .idx = idx,
        };
  
 -      switchdev_port_obj_dump(dev, &dump.obj);
 +      switchdev_port_obj_dump(dev, &dump.fdb.obj, switchdev_port_fdb_dump_cb);
        return dump.idx;
  }
  EXPORT_SYMBOL_GPL(switchdev_port_fdb_dump);
@@@ -1095,14 -888,12 +1098,14 @@@ static struct net_device *switchdev_get
  static struct net_device *switchdev_get_dev_by_nhs(struct fib_info *fi)
  {
        struct switchdev_attr attr = {
 -              .id = SWITCHDEV_ATTR_PORT_PARENT_ID,
 +              .id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
        };
        struct switchdev_attr prev_attr;
        struct net_device *dev = NULL;
        int nhsel;
  
 +      ASSERT_RTNL();
 +
        /* For this route, all nexthop devs must be on the same switch. */
  
        for (nhsel = 0; nhsel < fi->fib_nhs; nhsel++) {
  int switchdev_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi,
                           u8 tos, u8 type, u32 nlflags, u32 tb_id)
  {
 -      struct switchdev_obj fib_obj = {
 -              .id = SWITCHDEV_OBJ_IPV4_FIB,
 -              .u.ipv4_fib = {
 -                      .dst = dst,
 -                      .dst_len = dst_len,
 -                      .fi = fi,
 -                      .tos = tos,
 -                      .type = type,
 -                      .nlflags = nlflags,
 -                      .tb_id = tb_id,
 -              },
 +      struct switchdev_obj_ipv4_fib ipv4_fib = {
 +              .obj.id = SWITCHDEV_OBJ_ID_IPV4_FIB,
 +              .dst = dst,
 +              .dst_len = dst_len,
 +              .tos = tos,
 +              .type = type,
 +              .nlflags = nlflags,
 +              .tb_id = tb_id,
        };
        struct net_device *dev;
        int err = 0;
  
 +      memcpy(&ipv4_fib.fi, fi, sizeof(ipv4_fib.fi));
 +
        /* Don't offload route if using custom ip rules or if
         * IPv4 FIB offloading has been disabled completely.
         */
        if (!dev)
                return 0;
  
 -      err = switchdev_port_obj_add(dev, &fib_obj);
 +      err = switchdev_port_obj_add(dev, &ipv4_fib.obj);
        if (!err)
                fi->fib_flags |= RTNH_F_OFFLOAD;
  
@@@ -1197,20 -989,21 +1200,20 @@@ EXPORT_SYMBOL_GPL(switchdev_fib_ipv4_ad
  int switchdev_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi,
                           u8 tos, u8 type, u32 tb_id)
  {
 -      struct switchdev_obj fib_obj = {
 -              .id = SWITCHDEV_OBJ_IPV4_FIB,
 -              .u.ipv4_fib = {
 -                      .dst = dst,
 -                      .dst_len = dst_len,
 -                      .fi = fi,
 -                      .tos = tos,
 -                      .type = type,
 -                      .nlflags = 0,
 -                      .tb_id = tb_id,
 -              },
 +      struct switchdev_obj_ipv4_fib ipv4_fib = {
 +              .obj.id = SWITCHDEV_OBJ_ID_IPV4_FIB,
 +              .dst = dst,
 +              .dst_len = dst_len,
 +              .tos = tos,
 +              .type = type,
 +              .nlflags = 0,
 +              .tb_id = tb_id,
        };
        struct net_device *dev;
        int err = 0;
  
 +      memcpy(&ipv4_fib.fi, fi, sizeof(ipv4_fib.fi));
 +
        if (!(fi->fib_flags & RTNH_F_OFFLOAD))
                return 0;
  
        if (!dev)
                return 0;
  
 -      err = switchdev_port_obj_del(dev, &fib_obj);
 +      err = switchdev_port_obj_del(dev, &ipv4_fib.obj);
        if (!err)
                fi->fib_flags &= ~RTNH_F_OFFLOAD;
  
@@@ -1250,11 -1043,11 +1253,11 @@@ static bool switchdev_port_same_parent_
                                          struct net_device *b)
  {
        struct switchdev_attr a_attr = {
 -              .id = SWITCHDEV_ATTR_PORT_PARENT_ID,
 +              .id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
                .flags = SWITCHDEV_F_NO_RECURSE,
        };
        struct switchdev_attr b_attr = {
 -              .id = SWITCHDEV_ATTR_PORT_PARENT_ID,
 +              .id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
                .flags = SWITCHDEV_F_NO_RECURSE,
        };
  
@@@ -1333,11 -1126,10 +1336,11 @@@ void switchdev_port_fwd_mark_set(struc
        u32 mark = dev->ifindex;
        u32 reset_mark = 0;
  
 -      if (group_dev && joining) {
 -              mark = switchdev_port_fwd_mark_get(dev, group_dev);
 -      } else if (group_dev && !joining) {
 -              if (dev->offload_fwd_mark == mark)
 +      if (group_dev) {
 +              ASSERT_RTNL();
 +              if (joining)
 +                      mark = switchdev_port_fwd_mark_get(dev, group_dev);
 +              else if (dev->offload_fwd_mark == mark)
                        /* Ohoh, this port was the mark reference port,
                         * but it's leaving the group, so reset the
                         * mark for the remaining ports in the group.
diff --combined net/tipc/msg.h
index c784ba05f2aac4d23c916ed0b3c7211fe664d522,5351a3f97e8ecf17e545459344a8b315f522045a..9f0ef54be6129a4825ee73212dd5ba985ee76872
@@@ -357,7 -357,7 +357,7 @@@ static inline u32 msg_importance(struc
        if (likely((usr <= TIPC_CRITICAL_IMPORTANCE) && !msg_errcode(m)))
                return usr;
        if ((usr == MSG_FRAGMENTER) || (usr == MSG_BUNDLER))
-               return msg_bits(m, 5, 13, 0x7);
+               return msg_bits(m, 9, 0, 0x7);
        return TIPC_SYSTEM_IMPORTANCE;
  }
  
@@@ -366,7 -366,7 +366,7 @@@ static inline void msg_set_importance(s
        int usr = msg_user(m);
  
        if (likely((usr == MSG_FRAGMENTER) || (usr == MSG_BUNDLER)))
-               msg_set_bits(m, 5, 13, 0x7, i);
+               msg_set_bits(m, 9, 0, 0x7, i);
        else if (i < TIPC_SYSTEM_IMPORTANCE)
                msg_set_user(m, i);
        else
@@@ -790,8 -790,6 +790,8 @@@ int tipc_msg_build(struct tipc_msg *mhd
                   int offset, int dsz, int mtu, struct sk_buff_head *list);
  bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err);
  struct sk_buff *tipc_msg_reassemble(struct sk_buff_head *list);
 +void __tipc_skb_queue_sorted(struct sk_buff_head *list, u16 seqno,
 +                           struct sk_buff *skb);
  
  static inline u16 buf_seqno(struct sk_buff *skb)
  {
@@@ -864,6 -862,38 +864,6 @@@ static inline struct sk_buff *tipc_skb_
        return skb;
  }
  
 -/* tipc_skb_queue_sorted(); sort pkt into list according to sequence number
 - * @list: list to be appended to
 - * @skb: buffer to add
 - * Returns true if queue should treated further, otherwise false
 - */
 -static inline bool __tipc_skb_queue_sorted(struct sk_buff_head *list,
 -                                         struct sk_buff *skb)
 -{
 -      struct sk_buff *_skb, *tmp;
 -      struct tipc_msg *hdr = buf_msg(skb);
 -      u16 seqno = msg_seqno(hdr);
 -
 -      if (skb_queue_empty(list) || (msg_user(hdr) == LINK_PROTOCOL)) {
 -              __skb_queue_head(list, skb);
 -              return true;
 -      }
 -      if (likely(less(seqno, buf_seqno(skb_peek(list))))) {
 -              __skb_queue_head(list, skb);
 -              return true;
 -      }
 -      if (!more(seqno, buf_seqno(skb_peek_tail(list)))) {
 -              skb_queue_walk_safe(list, _skb, tmp) {
 -                      if (likely(less(seqno, buf_seqno(_skb)))) {
 -                              __skb_queue_before(list, _skb, skb);
 -                              return true;
 -                      }
 -              }
 -      }
 -      __skb_queue_tail(list, skb);
 -      return false;
 -}
 -
  /* tipc_skb_queue_splice_tail - append an skb list to lock protected list
   * @list: the new list to append. Not lock protected
   * @head: target list. Lock protected.
diff --combined net/tipc/node.c
index d1f340116c84c510fb80d22e50f1242d60ea4652,2c32a83037a3614ef09fb4b29c8f3337dcdf0f45..2670751d0e2e935b0b9b36efc3455f49f6680fd9
@@@ -317,11 -317,7 +317,11 @@@ static void __tipc_node_link_up(struct 
        struct tipc_link *ol = node_active_link(n, 0);
        struct tipc_link *nl = n->links[bearer_id].link;
  
 -      if (!nl || !tipc_link_is_up(nl))
 +      if (!nl)
 +              return;
 +
 +      tipc_link_fsm_evt(nl, LINK_ESTABLISH_EVT);
 +      if (!tipc_link_is_up(nl))
                return;
  
        n->working_links++;
@@@ -420,13 -416,7 +420,13 @@@ static void __tipc_node_link_down(struc
        }
  
        if (!tipc_node_is_up(n)) {
 +              if (tipc_link_peer_is_down(l))
 +                      tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT);
 +              tipc_node_fsm_evt(n, SELF_LOST_CONTACT_EVT);
 +              tipc_link_fsm_evt(l, LINK_RESET_EVT);
                tipc_link_reset(l);
 +              tipc_link_build_reset_msg(l, xmitq);
 +              *maddr = &n->links[*bearer_id].maddr;
                node_lost_contact(n, &le->inputq);
                return;
        }
        n->sync_point = tnl->rcv_nxt + (U16_MAX / 2 - 1);
        tipc_link_tnl_prepare(l, tnl, FAILOVER_MSG, xmitq);
        tipc_link_reset(l);
 +      tipc_link_fsm_evt(l, LINK_RESET_EVT);
        tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
        tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT);
        *maddr = &n->links[tnl->bearer_id].maddr;
  static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete)
  {
        struct tipc_link_entry *le = &n->links[bearer_id];
 +      struct tipc_link *l = le->link;
        struct tipc_media_addr *maddr;
        struct sk_buff_head xmitq;
  
 +      if (!l)
 +              return;
 +
        __skb_queue_head_init(&xmitq);
  
        tipc_node_lock(n);
 -      __tipc_node_link_down(n, &bearer_id, &xmitq, &maddr);
 -      if (delete && le->link) {
 -              kfree(le->link);
 -              le->link = NULL;
 -              n->link_cnt--;
 +      if (!tipc_link_is_establishing(l)) {
 +              __tipc_node_link_down(n, &bearer_id, &xmitq, &maddr);
 +              if (delete) {
 +                      kfree(l);
 +                      le->link = NULL;
 +                      n->link_cnt--;
 +              }
 +      } else {
 +              /* Defuse pending tipc_node_link_up() */
 +              tipc_link_fsm_evt(l, LINK_RESET_EVT);
        }
        tipc_node_unlock(n);
 -
        tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr);
        tipc_sk_rcv(n->net, &le->inputq);
  }
@@@ -586,7 -567,6 +586,7 @@@ void tipc_node_check_dest(struct net *n
                        goto exit;
                }
                tipc_link_reset(l);
 +              tipc_link_fsm_evt(l, LINK_RESET_EVT);
                if (n->state == NODE_FAILINGOVER)
                        tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
                le->link = l;
        memcpy(&le->maddr, maddr, sizeof(*maddr));
  exit:
        tipc_node_unlock(n);
 -      if (reset)
 +      if (reset && !tipc_link_is_reset(l))
                tipc_node_link_down(n, b->identity, false);
        tipc_node_put(n);
  }
@@@ -706,10 -686,10 +706,10 @@@ static void tipc_node_fsm_evt(struct ti
                        break;
                case SELF_ESTABL_CONTACT_EVT:
                case PEER_LOST_CONTACT_EVT:
 -                      break;
                case NODE_SYNCH_END_EVT:
 -              case NODE_SYNCH_BEGIN_EVT:
                case NODE_FAILOVER_BEGIN_EVT:
 +                      break;
 +              case NODE_SYNCH_BEGIN_EVT:
                case NODE_FAILOVER_END_EVT:
                default:
                        goto illegal_evt;
@@@ -869,6 -849,9 +869,6 @@@ static void node_lost_contact(struct ti
                        tipc_link_fsm_evt(l, LINK_FAILOVER_END_EVT);
        }
  
 -      /* Prevent re-contact with node until cleanup is done */
 -      tipc_node_fsm_evt(n_ptr, SELF_LOST_CONTACT_EVT);
 -
        /* Notify publications from this node */
        n_ptr->action_flags |= TIPC_NOTIFY_NODE_DOWN;
  
@@@ -1133,7 -1116,7 +1133,7 @@@ static bool tipc_node_check_state(struc
        }
  
        /* Ignore duplicate packets */
-       if (less(oseqno, rcv_nxt))
+       if ((usr != LINK_PROTOCOL) && less(oseqno, rcv_nxt))
                return true;
  
        /* Initiate or update failover mode if applicable */
        if (!pl || !tipc_link_is_up(pl))
                return true;
  
-       /* Initiate or update synch mode if applicable */
-       if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG)) {
+       /* Initiate synch mode if applicable */
+       if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG) && (oseqno == 1)) {
                syncpt = iseqno + exp_pkts - 1;
                if (!tipc_link_is_up(l)) {
                        tipc_link_fsm_evt(l, LINK_ESTABLISH_EVT);