]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 1 Sep 2017 19:49:03 +0000 (12:49 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 1 Sep 2017 19:49:03 +0000 (12:49 -0700)
Pull networking fixes from David Miller:

 1) Fix handling of pinned BPF map nodes in hash of maps, from Daniel
    Borkmann.

 2) IPSEC ESP error paths leak memory, from Steffen Klassert.

 3) We need an RCU grace period before freeing fib6_node objects, from
    Wei Wang.

 4) Must check skb_put_padto() return value in HSR driver, from FLorian
    Fainelli.

 5) Fix oops on PHY probe failure in ftgmac100 driver, from Andrew
    Jeffery.

 6) Fix infinite loop in UDP queue when using SO_PEEK_OFF, from Eric
    Dumazet.

 7) Use after free when tcf_chain_destroy() called multiple times, from
    Jiri Pirko.

 8) Fix KSZ DSA tag layer multiple free of SKBS, from Florian Fainelli.

 9) Fix leak of uninitialized memory in sctp_get_sctp_info(),
    inet_diag_msg_sctpladdrs_fill() and inet_diag_msg_sctpaddrs_fill().
    From Stefano Brivio.

10) L2TP tunnel refcount fixes from Guillaume Nault.

11) Don't leak UDP secpath in udp_set_dev_scratch(), from Yossi
    Kauperman.

12) Revert a PHY layer change wrt. handling of PHY_HALTED state in
    phy_stop_machine(), it causes regressions for multiple people. From
    Florian Fainelli.

13) When packets are sent out of br0 we have to clear the
    offload_fwdq_mark value.

14) Several NULL pointer deref fixes in packet schedulers when their
    ->init() routine fails. From Nikolay Aleksandrov.

15) Aquantium devices cannot checksum offload correctly when the packet
    is <= 60 bytes. From Pavel Belous.

16) Fix vnet header access past end of buffer in AF_PACKET, from
    Benjamin Poirier.

17) Double free in probe error paths of nfp driver, from Dan Carpenter.

18) QOS capability not checked properly in DCB init paths of mlx5
    driver, from Huy Nguyen.

19) Fix conflicts between firmware load failure and health_care timer in
    mlx5, also from Huy Nguyen.

20) Fix dangling page pointer when DMA mapping errors occur in mlx5,
    from Eran Ben ELisha.

21) ->ndo_setup_tc() in bnxt_en driver doesn't count rings properly,
    from Michael Chan.

22) Missing MSIX vector free in bnxt_en, also from Michael Chan.

23) Refcount leak in xfrm layer when using sk_policy, from Lorenzo
    Colitti.

24) Fix copy of uninitialized data in qlge driver, from Arnd Bergmann.

25) bpf_setsockopts() erroneously always returns -EINVAL even on
    success. Fix from Yuchung Cheng.

26) tipc_rcv() needs to linearize the SKB before parsing the inner
    headers, from Parthasarathy Bhuvaragan.

27) Fix deadlock between link status updates and link removal in netvsc
    driver, from Stephen Hemminger.

28) Missed locking of page fragment handling in ESP output, from Steffen
    Klassert.

29) Fix refcnt leak in ebpf congestion control code, from Sabrina
    Dubroca.

30) sxgbe_probe_config_dt() doesn't check devm_kzalloc()'s return value,
    from Christophe Jaillet.

31) Fix missing ipv6 rx_dst_cookie update when rx_dst is updated during
    early demux, from Paolo Abeni.

32) Several info leaks in xfrm_user layer, from Mathias Krause.

33) Fix out of bounds read in cxgb4 driver, from Stefano Brivio.

34) Properly propagate obsolete state of route upwards in ipv6 so that
    upper holders like xfrm can see it. From Xin Long.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (118 commits)
  udp: fix secpath leak
  bridge: switchdev: Clear forward mark when transmitting packet
  mlxsw: spectrum: Forbid linking to devices that have uppers
  wl1251: add a missing spin_lock_init()
  Revert "net: phy: Correctly process PHY_HALTED in phy_stop_machine()"
  net: dsa: bcm_sf2: Fix number of CFP entries for BCM7278
  kcm: do not attach PF_KCM sockets to avoid deadlock
  sch_tbf: fix two null pointer dereferences on init failure
  sch_sfq: fix null pointer dereference on init failure
  sch_netem: avoid null pointer deref on init failure
  sch_fq_codel: avoid double free on init failure
  sch_cbq: fix null pointer dereferences on init failure
  sch_hfsc: fix null pointer deref and double free on init failure
  sch_hhf: fix null pointer dereference on init failure
  sch_multiq: fix double free on init failure
  sch_htb: fix crash on init failure
  net/mlx5e: Fix CQ moderation mode not set properly
  net/mlx5e: Fix inline header size for small packets
  net/mlx5: E-Switch, Unload the representors in the correct order
  net/mlx5e: Properly resolve TC offloaded ipv6 vxlan tunnel source address
  ...

119 files changed:
drivers/net/dsa/bcm_sf2.c
drivers/net/dsa/bcm_sf2.h
drivers/net/dsa/bcm_sf2_cfp.c
drivers/net/ethernet/apm/xgene/xgene_enet_main.c
drivers/net/ethernet/aquantia/atlantic/aq_hw.h
drivers/net/ethernet/aquantia/atlantic/aq_nic.c
drivers/net/ethernet/aquantia/atlantic/aq_ring.c
drivers/net/ethernet/aquantia/atlantic/aq_utils.h
drivers/net/ethernet/aquantia/atlantic/aq_vec.c
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
drivers/net/ethernet/broadcom/genet/bcmgenet.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
drivers/net/ethernet/faraday/ftgmac100.c
drivers/net/ethernet/freescale/fman/mac.c
drivers/net/ethernet/marvell/mvpp2.c
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/srq.c
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
drivers/net/ethernet/netronome/nfp/flower/match.c
drivers/net/ethernet/netronome/nfp/flower/offload.c
drivers/net/ethernet/netronome/nfp/nfp_main.c
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
drivers/net/ethernet/netronome/nfp/nfp_net_main.c
drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
drivers/net/ethernet/ti/cpsw-common.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/macsec.c
drivers/net/phy/phy.c
drivers/net/phy/phy_device.c
drivers/net/usb/cdc_ncm.c
drivers/net/virtio_net.c
drivers/net/wireless/intel/iwlwifi/pcie/internal.h
drivers/net/wireless/intel/iwlwifi/pcie/rx.c
drivers/net/wireless/intel/iwlwifi/pcie/trans.c
drivers/net/wireless/ti/wl1251/main.c
include/linux/mlx5/driver.h
include/linux/netdevice.h
include/linux/skbuff.h
include/net/ip6_fib.h
include/net/sch_generic.h
include/net/tcp.h
include/net/udp.h
kernel/bpf/hashtab.c
net/bridge/br_device.c
net/bridge/br_switchdev.c
net/core/datagram.c
net/core/dev.c
net/core/filter.c
net/core/skbuff.c
net/dsa/dsa2.c
net/dsa/tag_ksz.c
net/dsa/tag_trailer.c
net/hsr/hsr_device.c
net/ipv4/esp4.c
net/ipv4/esp4_offload.c
net/ipv4/netfilter/arp_tables.c
net/ipv4/netfilter/ip_tables.c
net/ipv4/netfilter/ipt_CLUSTERIP.c
net/ipv4/tcp.c
net/ipv4/tcp_cong.c
net/ipv4/udp.c
net/ipv6/addrconf.c
net/ipv6/esp6.c
net/ipv6/esp6_offload.c
net/ipv6/ip6_fib.c
net/ipv6/ipv6_sockglue.c
net/ipv6/output_core.c
net/ipv6/route.c
net/ipv6/udp.c
net/kcm/kcmsock.c
net/l2tp/l2tp_core.c
net/l2tp/l2tp_core.h
net/l2tp/l2tp_netlink.c
net/netfilter/nf_nat_core.c
net/netfilter/nft_compat.c
net/netfilter/nft_limit.c
net/packet/af_packet.c
net/sched/cls_api.c
net/sched/sch_api.c
net/sched/sch_cbq.c
net/sched/sch_fq_codel.c
net/sched/sch_generic.c
net/sched/sch_hfsc.c
net/sched/sch_hhf.c
net/sched/sch_htb.c
net/sched/sch_multiq.c
net/sched/sch_netem.c
net/sched/sch_sfq.c
net/sched/sch_tbf.c
net/sctp/sctp_diag.c
net/sctp/socket.c
net/tipc/bearer.c
net/tipc/bearer.h
net/tipc/msg.c
net/tipc/node.c
net/tipc/socket.c
net/tipc/subscr.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_state.c
net/xfrm/xfrm_user.c

index 648f91b58d1e260a71de7db12dc39627bc40a35c..9b6ce7c3f6c3228c88fced286a31c3be0c19900f 100644 (file)
@@ -1048,6 +1048,7 @@ struct bcm_sf2_of_data {
        u32 type;
        const u16 *reg_offsets;
        unsigned int core_reg_align;
+       unsigned int num_cfp_rules;
 };
 
 /* Register offsets for the SWITCH_REG_* block */
@@ -1071,6 +1072,7 @@ static const struct bcm_sf2_of_data bcm_sf2_7445_data = {
        .type           = BCM7445_DEVICE_ID,
        .core_reg_align = 0,
        .reg_offsets    = bcm_sf2_7445_reg_offsets,
+       .num_cfp_rules  = 256,
 };
 
 static const u16 bcm_sf2_7278_reg_offsets[] = {
@@ -1093,6 +1095,7 @@ static const struct bcm_sf2_of_data bcm_sf2_7278_data = {
        .type           = BCM7278_DEVICE_ID,
        .core_reg_align = 1,
        .reg_offsets    = bcm_sf2_7278_reg_offsets,
+       .num_cfp_rules  = 128,
 };
 
 static const struct of_device_id bcm_sf2_of_match[] = {
@@ -1149,6 +1152,7 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
        priv->type = data->type;
        priv->reg_offsets = data->reg_offsets;
        priv->core_reg_align = data->core_reg_align;
+       priv->num_cfp_rules = data->num_cfp_rules;
 
        /* Auto-detection using standard registers will not work, so
         * provide an indication of what kind of device we are for
index 7d3030e04f1127bcfd8e98d32d0b0837093f52da..7f9125eef3df42038fdce368d4a5fe2690d3276c 100644 (file)
@@ -72,6 +72,7 @@ struct bcm_sf2_priv {
        u32                             type;
        const u16                       *reg_offsets;
        unsigned int                    core_reg_align;
+       unsigned int                    num_cfp_rules;
 
        /* spinlock protecting access to the indirect registers */
        spinlock_t                      indir_lock;
index 2fb32d67065f8aa164b3ea03d5cb914120c4e0c6..8a1da7e677070d33571748c2b511d4e38169324f 100644 (file)
@@ -98,7 +98,7 @@ static inline void bcm_sf2_cfp_rule_addr_set(struct bcm_sf2_priv *priv,
 {
        u32 reg;
 
-       WARN_ON(addr >= CFP_NUM_RULES);
+       WARN_ON(addr >= priv->num_cfp_rules);
 
        reg = core_readl(priv, CORE_CFP_ACC);
        reg &= ~(XCESS_ADDR_MASK << XCESS_ADDR_SHIFT);
@@ -109,7 +109,7 @@ static inline void bcm_sf2_cfp_rule_addr_set(struct bcm_sf2_priv *priv,
 static inline unsigned int bcm_sf2_cfp_rule_size(struct bcm_sf2_priv *priv)
 {
        /* Entry #0 is reserved */
-       return CFP_NUM_RULES - 1;
+       return priv->num_cfp_rules - 1;
 }
 
 static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
@@ -523,7 +523,7 @@ static int bcm_sf2_cfp_rule_get_all(struct bcm_sf2_priv *priv,
                if (!(reg & OP_STR_DONE))
                        break;
 
-       } while (index < CFP_NUM_RULES);
+       } while (index < priv->num_cfp_rules);
 
        /* Put the TCAM size here */
        nfc->data = bcm_sf2_cfp_rule_size(priv);
@@ -544,7 +544,7 @@ int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
        case ETHTOOL_GRXCLSRLCNT:
                /* Subtract the default, unusable rule */
                nfc->rule_cnt = bitmap_weight(priv->cfp.used,
-                                             CFP_NUM_RULES) - 1;
+                                             priv->num_cfp_rules) - 1;
                /* We support specifying rule locations */
                nfc->data |= RX_CLS_LOC_SPECIAL;
                break;
index 1d307f2def2d910eff7c983d9866fa88b331d869..6e253d913fe29df271ebae8726bbd5062d41ccb3 100644 (file)
@@ -1661,21 +1661,21 @@ static int xgene_enet_get_irqs(struct xgene_enet_pdata *pdata)
        return 0;
 }
 
-static int xgene_enet_check_phy_handle(struct xgene_enet_pdata *pdata)
+static void xgene_enet_check_phy_handle(struct xgene_enet_pdata *pdata)
 {
        int ret;
 
        if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII)
-               return 0;
+               return;
 
        if (!IS_ENABLED(CONFIG_MDIO_XGENE))
-               return 0;
+               return;
 
        ret = xgene_enet_phy_connect(pdata->ndev);
        if (!ret)
                pdata->mdio_driver = true;
 
-       return 0;
+       return;
 }
 
 static void xgene_enet_gpiod_get(struct xgene_enet_pdata *pdata)
@@ -1779,10 +1779,6 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
        if (ret)
                return ret;
 
-       ret = xgene_enet_check_phy_handle(pdata);
-       if (ret)
-               return ret;
-
        xgene_enet_gpiod_get(pdata);
 
        pdata->clk = devm_clk_get(&pdev->dev, NULL);
@@ -2097,9 +2093,11 @@ static int xgene_enet_probe(struct platform_device *pdev)
                goto err;
        }
 
+       xgene_enet_check_phy_handle(pdata);
+
        ret = xgene_enet_init_hw(pdata);
        if (ret)
-               goto err;
+               goto err2;
 
        link_state = pdata->mac_ops->link_state;
        if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
@@ -2117,29 +2115,30 @@ static int xgene_enet_probe(struct platform_device *pdev)
        spin_lock_init(&pdata->stats_lock);
        ret = xgene_extd_stats_init(pdata);
        if (ret)
-               goto err2;
+               goto err1;
 
        xgene_enet_napi_add(pdata);
        ret = register_netdev(ndev);
        if (ret) {
                netdev_err(ndev, "Failed to register netdev\n");
-               goto err2;
+               goto err1;
        }
 
        return 0;
 
-err2:
+err1:
        /*
         * If necessary, free_netdev() will call netif_napi_del() and undo
         * the effects of xgene_enet_napi_add()'s calls to netif_napi_add().
         */
 
+       xgene_enet_delete_desc_rings(pdata);
+
+err2:
        if (pdata->mdio_driver)
                xgene_enet_phy_disconnect(pdata);
        else if (phy_interface_mode_is_rgmii(pdata->phy_mode))
                xgene_enet_mdio_remove(pdata);
-err1:
-       xgene_enet_delete_desc_rings(pdata);
 err:
        free_netdev(ndev);
        return ret;
index fce0fd3f23ff251ae60392575f1c51bee4d88681..bf9b3f020e106cb07fd7630073f146d7f54ccfa9 100644 (file)
@@ -105,8 +105,7 @@ struct aq_hw_ops {
 
        int (*hw_set_mac_address)(struct aq_hw_s *self, u8 *mac_addr);
 
-       int (*hw_get_link_status)(struct aq_hw_s *self,
-                                 struct aq_hw_link_status_s *link_status);
+       int (*hw_get_link_status)(struct aq_hw_s *self);
 
        int (*hw_set_link_speed)(struct aq_hw_s *self, u32 speed);
 
index 9ee1c501678409719f460912c6316b803c2dc7fc..6ac9e2602d6d8ea1fefd0de613ee633b905cbd8b 100644 (file)
@@ -103,6 +103,8 @@ int aq_nic_cfg_start(struct aq_nic_s *self)
        else
                cfg->vecs = 1U;
 
+       cfg->num_rss_queues = min(cfg->vecs, AQ_CFG_NUM_RSS_QUEUES_DEF);
+
        cfg->irq_type = aq_pci_func_get_irq_type(self->aq_pci_func);
 
        if ((cfg->irq_type == AQ_HW_IRQ_LEGACY) ||
@@ -123,33 +125,30 @@ static void aq_nic_service_timer_cb(unsigned long param)
        struct net_device *ndev = aq_nic_get_ndev(self);
        int err = 0;
        unsigned int i = 0U;
-       struct aq_hw_link_status_s link_status;
        struct aq_ring_stats_rx_s stats_rx;
        struct aq_ring_stats_tx_s stats_tx;
 
        if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY))
                goto err_exit;
 
-       err = self->aq_hw_ops.hw_get_link_status(self->aq_hw, &link_status);
+       err = self->aq_hw_ops.hw_get_link_status(self->aq_hw);
        if (err < 0)
                goto err_exit;
 
-       self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw,
-                           self->aq_nic_cfg.is_interrupt_moderation);
-
-       if (memcmp(&link_status, &self->link_status, sizeof(link_status))) {
-               if (link_status.mbps) {
-                       aq_utils_obj_set(&self->header.flags,
-                                        AQ_NIC_FLAG_STARTED);
-                       aq_utils_obj_clear(&self->header.flags,
-                                          AQ_NIC_LINK_DOWN);
-                       netif_carrier_on(self->ndev);
-               } else {
-                       netif_carrier_off(self->ndev);
-                       aq_utils_obj_set(&self->header.flags, AQ_NIC_LINK_DOWN);
-               }
+       self->link_status = self->aq_hw->aq_link_status;
 
-               self->link_status = link_status;
+       self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw,
+                   self->aq_nic_cfg.is_interrupt_moderation);
+
+       if (self->link_status.mbps) {
+               aq_utils_obj_set(&self->header.flags,
+                                AQ_NIC_FLAG_STARTED);
+               aq_utils_obj_clear(&self->header.flags,
+                                  AQ_NIC_LINK_DOWN);
+               netif_carrier_on(self->ndev);
+       } else {
+               netif_carrier_off(self->ndev);
+               aq_utils_obj_set(&self->header.flags, AQ_NIC_LINK_DOWN);
        }
 
        memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s));
@@ -597,14 +596,11 @@ exit:
 }
 
 int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
-__releases(&ring->lock)
-__acquires(&ring->lock)
 {
        struct aq_ring_s *ring = NULL;
        unsigned int frags = 0U;
        unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs;
        unsigned int tc = 0U;
-       unsigned int trys = AQ_CFG_LOCK_TRYS;
        int err = NETDEV_TX_OK;
        bool is_nic_in_bad_state;
 
@@ -628,36 +624,21 @@ __acquires(&ring->lock)
                goto err_exit;
        }
 
-       do {
-               if (spin_trylock(&ring->header.lock)) {
-                       frags = aq_nic_map_skb(self, skb, ring);
-
-                       if (likely(frags)) {
-                               err = self->aq_hw_ops.hw_ring_tx_xmit(
-                                                               self->aq_hw,
-                                                               ring, frags);
-                               if (err >= 0) {
-                                       if (aq_ring_avail_dx(ring) <
-                                           AQ_CFG_SKB_FRAGS_MAX + 1)
-                                               aq_nic_ndev_queue_stop(
-                                                               self,
-                                                               ring->idx);
-
-                                       ++ring->stats.tx.packets;
-                                       ring->stats.tx.bytes += skb->len;
-                               }
-                       } else {
-                               err = NETDEV_TX_BUSY;
-                       }
+       frags = aq_nic_map_skb(self, skb, ring);
 
-                       spin_unlock(&ring->header.lock);
-                       break;
-               }
-       } while (--trys);
+       if (likely(frags)) {
+               err = self->aq_hw_ops.hw_ring_tx_xmit(self->aq_hw,
+                                                     ring,
+                                                     frags);
+               if (err >= 0) {
+                       if (aq_ring_avail_dx(ring) < AQ_CFG_SKB_FRAGS_MAX + 1)
+                               aq_nic_ndev_queue_stop(self, ring->idx);
 
-       if (!trys) {
+                       ++ring->stats.tx.packets;
+                       ring->stats.tx.bytes += skb->len;
+               }
+       } else {
                err = NETDEV_TX_BUSY;
-               goto err_exit;
        }
 
 err_exit:
@@ -688,11 +669,26 @@ int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
        netdev_for_each_mc_addr(ha, ndev) {
                ether_addr_copy(self->mc_list.ar[i++], ha->addr);
                ++self->mc_list.count;
+
+               if (i >= AQ_CFG_MULTICAST_ADDRESS_MAX)
+                       break;
        }
 
-       return self->aq_hw_ops.hw_multicast_list_set(self->aq_hw,
+       if (i >= AQ_CFG_MULTICAST_ADDRESS_MAX) {
+               /* Number of filters is too big: atlantic does not support this.
+                * Force all multi filter to support this.
+                * With this we disable all UC filters and setup "all pass"
+                * multicast mask
+                */
+               self->packet_filter |= IFF_ALLMULTI;
+               self->aq_hw->aq_nic_cfg->mc_list_count = 0;
+               return self->aq_hw_ops.hw_packet_filter_set(self->aq_hw,
+                                                       self->packet_filter);
+       } else {
+               return self->aq_hw_ops.hw_multicast_list_set(self->aq_hw,
                                                    self->mc_list.ar,
                                                    self->mc_list.count);
+       }
 }
 
 int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu)
index 9a0817938eca38526e4c746680efe2845e9b1e51..ec5579fb8268b29c6040f524fe20495b43fa617d 100644 (file)
@@ -101,7 +101,6 @@ int aq_ring_init(struct aq_ring_s *self)
        self->hw_head = 0;
        self->sw_head = 0;
        self->sw_tail = 0;
-       spin_lock_init(&self->header.lock);
        return 0;
 }
 
index f6012b34abe690471e3e317ccf3731e3f45506fc..e12bcdfb874a4409f8ccec886f626b19b48b2b52 100644 (file)
@@ -17,7 +17,6 @@
 #define AQ_DIMOF(_ARY_)  ARRAY_SIZE(_ARY_)
 
 struct aq_obj_s {
-       spinlock_t lock; /* spinlock for nic/rings processing */
        atomic_t flags;
 };
 
index ad5b4d4dac7f6c7a5626ce5b86b9ca6d1a37f012..fee446af748ff1a64a984cf4b5ec12dce46471d1 100644 (file)
@@ -34,8 +34,6 @@ struct aq_vec_s {
 #define AQ_VEC_RX_ID 1
 
 static int aq_vec_poll(struct napi_struct *napi, int budget)
-__releases(&self->lock)
-__acquires(&self->lock)
 {
        struct aq_vec_s *self = container_of(napi, struct aq_vec_s, napi);
        struct aq_ring_s *ring = NULL;
@@ -47,7 +45,7 @@ __acquires(&self->lock)
 
        if (!self) {
                err = -EINVAL;
-       } else if (spin_trylock(&self->header.lock)) {
+       } else {
                for (i = 0U, ring = self->ring[0];
                        self->tx_rings > i; ++i, ring = self->ring[i]) {
                        if (self->aq_hw_ops->hw_ring_tx_head_update) {
@@ -105,11 +103,8 @@ __acquires(&self->lock)
                        self->aq_hw_ops->hw_irq_enable(self->aq_hw,
                                        1U << self->aq_ring_param.vec_idx);
                }
-
-err_exit:
-               spin_unlock(&self->header.lock);
        }
-
+err_exit:
        return work_done;
 }
 
@@ -185,8 +180,6 @@ int aq_vec_init(struct aq_vec_s *self, struct aq_hw_ops *aq_hw_ops,
        self->aq_hw_ops = aq_hw_ops;
        self->aq_hw = aq_hw;
 
-       spin_lock_init(&self->header.lock);
-
        for (i = 0U, ring = self->ring[0];
                self->tx_rings > i; ++i, ring = self->ring[i]) {
                err = aq_ring_init(&ring[AQ_VEC_TX_ID]);
index faeb4935ef3e3af5c998ef701e1e7cd2f5d739c8..c5a02df7a48b719a65b169938746d777f3f0b5a0 100644 (file)
@@ -629,6 +629,12 @@ static int hw_atl_a0_hw_ring_rx_receive(struct aq_hw_s *self,
                                buff->is_udp_cso = (is_err & 0x10U) ? 0 : 1;
                        else if (0x0U == (pkt_type & 0x1CU))
                                buff->is_tcp_cso = (is_err & 0x10U) ? 0 : 1;
+
+                       /* Checksum offload workaround for small packets */
+                       if (rxd_wb->pkt_len <= 60) {
+                               buff->is_ip_cso = 0U;
+                               buff->is_cso_err = 0U;
+                       }
                }
 
                is_err &= ~0x18U;
index 1bceb7358e5ca3a4455badf55bc091d3705dc89d..21784cc39dabdb9005a0c4bff26c64b0ac7a5286 100644 (file)
@@ -645,6 +645,12 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
                                buff->is_udp_cso = buff->is_cso_err ? 0U : 1U;
                        else if (0x0U == (pkt_type & 0x1CU))
                                buff->is_tcp_cso = buff->is_cso_err ? 0U : 1U;
+
+                       /* Checksum offload workaround for small packets */
+                       if (rxd_wb->pkt_len <= 60) {
+                               buff->is_ip_cso = 0U;
+                               buff->is_cso_err = 0U;
+                       }
                }
 
                is_err &= ~0x18U;
index 8d6d8f5804daf2f431f0c93fcc8e4a2a7a1983ee..4f5ec9a0fbfb82b7bcf25fb234c5d6c3180c544a 100644 (file)
@@ -141,6 +141,12 @@ static int hw_atl_utils_init_ucp(struct aq_hw_s *self,
 
        err = hw_atl_utils_ver_match(aq_hw_caps->fw_ver_expected,
                                     aq_hw_read_reg(self, 0x18U));
+
+       if (err < 0)
+               pr_err("%s: Bad FW version detected: expected=%x, actual=%x\n",
+                      AQ_CFG_DRV_NAME,
+                      aq_hw_caps->fw_ver_expected,
+                      aq_hw_read_reg(self, 0x18U));
        return err;
 }
 
@@ -313,11 +319,11 @@ void hw_atl_utils_mpi_set(struct aq_hw_s *self,
 err_exit:;
 }
 
-int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self,
-                                    struct aq_hw_link_status_s *link_status)
+int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self)
 {
        u32 cp0x036C = aq_hw_read_reg(self, HW_ATL_MPI_STATE_ADR);
        u32 link_speed_mask = cp0x036C >> HW_ATL_MPI_SPEED_SHIFT;
+       struct aq_hw_link_status_s *link_status = &self->aq_link_status;
 
        if (!link_speed_mask) {
                link_status->mbps = 0U;
index a66aee51ab5b049c4e9f6367aebdd57cbbf5fc5a..e0360a6b2202ef5b4ac683a44edcde9bf20ebedc 100644 (file)
@@ -180,8 +180,7 @@ void hw_atl_utils_mpi_set(struct aq_hw_s *self,
 int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed,
                               enum hal_atl_utils_fw_state_e state);
 
-int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self,
-                                    struct aq_hw_link_status_s *link_status);
+int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self);
 
 int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self,
                                   struct aq_hw_caps_s *aq_hw_caps,
index dc3052751bc13ed2248c218de01849d865dbe952..c28fa5a8734cbc769adc16dfc5e36a8cd13b35cb 100644 (file)
@@ -597,7 +597,7 @@ static int bcm_sysport_set_coalesce(struct net_device *dev,
 
 static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb)
 {
-       dev_kfree_skb_any(cb->skb);
+       dev_consume_skb_any(cb->skb);
        cb->skb = NULL;
        dma_unmap_addr_set(cb, dma_addr, 0);
 }
@@ -1346,6 +1346,8 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
 
        ring->cbs = kcalloc(size, sizeof(struct bcm_sysport_cb), GFP_KERNEL);
        if (!ring->cbs) {
+               dma_free_coherent(kdev, sizeof(struct dma_desc),
+                                 ring->desc_cpu, ring->desc_dma);
                netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
                return -ENOMEM;
        }
index e7c8539cbddf6704720a4a11f5e09aebec066066..f20b3d2a4c2330543f64eee1334ae4e543317f9c 100644 (file)
@@ -4647,7 +4647,6 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
                pf->port_id = le16_to_cpu(resp->port_id);
                bp->dev->dev_port = pf->port_id;
                memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
-               memcpy(bp->dev->dev_addr, pf->mac_addr, ETH_ALEN);
                pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
                pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
                pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
@@ -4687,16 +4686,6 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
                vf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
 
                memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
-               mutex_unlock(&bp->hwrm_cmd_lock);
-
-               if (is_valid_ether_addr(vf->mac_addr)) {
-                       /* overwrite netdev dev_adr with admin VF MAC */
-                       memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
-               } else {
-                       eth_hw_addr_random(bp->dev);
-                       rc = bnxt_approve_mac(bp, bp->dev->dev_addr);
-               }
-               return rc;
 #endif
        }
 
@@ -7152,6 +7141,7 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
                bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
                netdev_reset_tc(dev);
        }
+       bp->tx_nr_rings += bp->tx_nr_rings_xdp;
        bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
                               bp->tx_nr_rings + bp->rx_nr_rings;
        bp->num_stat_ctxs = bp->cp_nr_rings;
@@ -7661,6 +7651,28 @@ void bnxt_restore_pf_fw_resources(struct bnxt *bp)
        bnxt_subtract_ulp_resources(bp, BNXT_ROCE_ULP);
 }
 
+static int bnxt_init_mac_addr(struct bnxt *bp)
+{
+       int rc = 0;
+
+       if (BNXT_PF(bp)) {
+               memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN);
+       } else {
+#ifdef CONFIG_BNXT_SRIOV
+               struct bnxt_vf_info *vf = &bp->vf;
+
+               if (is_valid_ether_addr(vf->mac_addr)) {
+                       /* overwrite netdev dev_adr with admin VF MAC */
+                       memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
+               } else {
+                       eth_hw_addr_random(bp->dev);
+                       rc = bnxt_approve_mac(bp, bp->dev->dev_addr);
+               }
+#endif
+       }
+       return rc;
+}
+
 static void bnxt_parse_log_pcie_link(struct bnxt *bp)
 {
        enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
@@ -7789,7 +7801,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                rc = -1;
                goto init_err_pci_clean;
        }
-
+       rc = bnxt_init_mac_addr(bp);
+       if (rc) {
+               dev_err(&pdev->dev, "Unable to initialize mac address.\n");
+               rc = -EADDRNOTAVAIL;
+               goto init_err_pci_clean;
+       }
        rc = bnxt_hwrm_queue_qportcfg(bp);
        if (rc) {
                netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n",
index 77da75a55c0200edf5229d1dd88939473f85194c..997e10e8b863cc74387016d9de9fbe9bbba5b9b3 100644 (file)
@@ -84,6 +84,8 @@ static int bnxt_unregister_dev(struct bnxt_en_dev *edev, int ulp_id)
 
                max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp);
                bnxt_set_max_func_stat_ctxs(bp, max_stat_ctxs + 1);
+               if (ulp->msix_requested)
+                       edev->en_ops->bnxt_free_msix(edev, ulp_id);
        }
        if (ulp->max_async_event_id)
                bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0);
index a981c4ee9d72deab705231088043e8690853d563..fea3f9a5fb2d37221cbf9abed77cdcdcf906a00c 100644 (file)
@@ -1360,7 +1360,7 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
                if (skb) {
                        pkts_compl++;
                        bytes_compl += GENET_CB(skb)->bytes_sent;
-                       dev_kfree_skb_any(skb);
+                       dev_consume_skb_any(skb);
                }
 
                txbds_processed++;
@@ -1875,7 +1875,7 @@ static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv,
                cb = ring->cbs + i;
                skb = bcmgenet_rx_refill(priv, cb);
                if (skb)
-                       dev_kfree_skb_any(skb);
+                       dev_consume_skb_any(skb);
                if (!cb->skb)
                        return -ENOMEM;
        }
@@ -1894,7 +1894,7 @@ static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv)
 
                skb = bcmgenet_free_rx_cb(&priv->pdev->dev, cb);
                if (skb)
-                       dev_kfree_skb_any(skb);
+                       dev_consume_skb_any(skb);
        }
 }
 
index 82bf7aac6cdbda6431d1bfdd715ce1a56b2763af..0293b41171a5d90070c2ff9a954e1dd0e42aea4d 100644 (file)
@@ -369,12 +369,12 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
                list_del(&entry.list);
                spin_unlock(&adap->mbox_lock);
                ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT;
-               t4_record_mbox(adap, cmd, MBOX_LEN, access, ret);
+               t4_record_mbox(adap, cmd, size, access, ret);
                return ret;
        }
 
        /* Copy in the new mailbox command and send it on its way ... */
-       t4_record_mbox(adap, cmd, MBOX_LEN, access, 0);
+       t4_record_mbox(adap, cmd, size, access, 0);
        for (i = 0; i < size; i += 8)
                t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
 
@@ -426,7 +426,7 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
        }
 
        ret = (pcie_fw & PCIE_FW_ERR_F) ? -ENXIO : -ETIMEDOUT;
-       t4_record_mbox(adap, cmd, MBOX_LEN, access, ret);
+       t4_record_mbox(adap, cmd, size, access, ret);
        dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
                *(const u8 *)cmd, mbox);
        t4_report_fw_error(adap);
index 34dae51effd45a19c9a2b8b607dafeaec3aa0456..59da7ac3c1087c03f52b514c3fc9b81074284a5e 100644 (file)
@@ -1863,7 +1863,6 @@ err_setup_mdio:
 err_ioremap:
        release_resource(priv->res);
 err_req_mem:
-       netif_napi_del(&priv->napi);
        free_netdev(netdev);
 err_alloc_etherdev:
        return err;
index 6e67d22fd0d54f69e5ee3358717e7ed539fad952..1c7da16ad0ffe5de0bbed64f027ed765daec7464 100644 (file)
@@ -623,6 +623,8 @@ static struct platform_device *dpaa_eth_add_device(int fman_id,
                goto no_mem;
        }
 
+       pdev->dev.of_node = node;
+       pdev->dev.parent = priv->dev;
        set_dma_ops(&pdev->dev, get_dma_ops(priv->dev));
 
        ret = platform_device_add_data(pdev, &data, sizeof(data));
index 48d21c1e09f27b74c67179a82ec3ab5af9b41d6b..4d598ca8503a50952576354ae35f1b3b6a574b6e 100644 (file)
@@ -6504,7 +6504,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
        struct resource *res;
        const char *dt_mac_addr;
        const char *mac_from;
-       char hw_mac_addr[ETH_ALEN];
+       char hw_mac_addr[ETH_ALEN] = {0};
        u32 id;
        int features;
        int phy_mode;
index 0039b4725405fcf0a961fa53eab250adc2d51f3c..2f26fb34d7416b88ee8bf7a3464e40837ab90c3c 100644 (file)
@@ -263,6 +263,7 @@ struct mlx5e_dcbx {
 
        /* The only setting that cannot be read from FW */
        u8                         tc_tsa[IEEE_8021QAZ_MAX_TCS];
+       u8                         cap;
 };
 #endif
 
index 2eb54d36e16eac77e3a9d0387758d66b51584bd3..c1d384fca4dc1195a1d0b76677176b7dfcbad538 100644 (file)
@@ -288,13 +288,8 @@ static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev,
 static u8 mlx5e_dcbnl_getdcbx(struct net_device *dev)
 {
        struct mlx5e_priv *priv = netdev_priv(dev);
-       struct mlx5e_dcbx *dcbx = &priv->dcbx;
-       u8 mode = DCB_CAP_DCBX_VER_IEEE | DCB_CAP_DCBX_VER_CEE;
-
-       if (dcbx->mode == MLX5E_DCBX_PARAM_VER_OPER_HOST)
-               mode |= DCB_CAP_DCBX_HOST;
 
-       return mode;
+       return priv->dcbx.cap;
 }
 
 static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode)
@@ -312,6 +307,7 @@ static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode)
                /* set dcbx to fw controlled */
                if (!mlx5e_dcbnl_set_dcbx_mode(priv, MLX5E_DCBX_PARAM_VER_OPER_AUTO)) {
                        dcbx->mode = MLX5E_DCBX_PARAM_VER_OPER_AUTO;
+                       dcbx->cap &= ~DCB_CAP_DCBX_HOST;
                        return 0;
                }
 
@@ -324,6 +320,8 @@ static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode)
        if (mlx5e_dcbnl_switch_to_host_mode(netdev_priv(dev)))
                return 1;
 
+       dcbx->cap = mode;
+
        return 0;
 }
 
@@ -628,9 +626,9 @@ static u8 mlx5e_dcbnl_getcap(struct net_device *netdev,
                *cap = false;
                break;
        case DCB_CAP_ATTR_DCBX:
-               *cap = (DCB_CAP_DCBX_LLD_MANAGED |
-                       DCB_CAP_DCBX_VER_CEE |
-                       DCB_CAP_DCBX_STATIC);
+               *cap = priv->dcbx.cap |
+                      DCB_CAP_DCBX_VER_CEE |
+                      DCB_CAP_DCBX_VER_IEEE;
                break;
        default:
                *cap = 0;
@@ -754,8 +752,16 @@ void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv)
 {
        struct mlx5e_dcbx *dcbx = &priv->dcbx;
 
+       if (!MLX5_CAP_GEN(priv->mdev, qos))
+               return;
+
        if (MLX5_CAP_GEN(priv->mdev, dcbx))
                mlx5e_dcbnl_query_dcbx_mode(priv, &dcbx->mode);
 
+       priv->dcbx.cap = DCB_CAP_DCBX_VER_CEE |
+                        DCB_CAP_DCBX_VER_IEEE;
+       if (priv->dcbx.mode == MLX5E_DCBX_PARAM_VER_OPER_HOST)
+               priv->dcbx.cap |= DCB_CAP_DCBX_HOST;
+
        mlx5e_ets_init(priv);
 }
index 917fade5f5d55aa1a89c5abaadf73d9e5f37d612..f5594014715bbbd1c281f95ecd871408b4949e06 100644 (file)
@@ -641,8 +641,10 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
 
        new_channels.params = priv->channels.params;
        new_channels.params.num_channels = count;
-       mlx5e_build_default_indir_rqt(priv->mdev, new_channels.params.indirection_rqt,
-                                     MLX5E_INDIR_RQT_SIZE, count);
+       if (!netif_is_rxfh_configured(priv->netdev))
+               mlx5e_build_default_indir_rqt(priv->mdev,
+                                             new_channels.params.indirection_rqt,
+                                             MLX5E_INDIR_RQT_SIZE, count);
 
        if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
                priv->channels.params = new_channels.params;
index 57f31fa478ceee5b83a3b4041cfef12457acbd03..6ad7f07e7861d9c8d6922b0c115fc950e0126dfc 100644 (file)
@@ -1969,6 +1969,7 @@ static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
        }
 
        mlx5e_build_common_cq_param(priv, param);
+       param->cq_period_mode = params->rx_cq_period_mode;
 }
 
 static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
index 325b2c8c1c6d18c8d8544ee15d0fcf0a347f6ee8..7344433259fca32fe288ba4c63dff6c64ab2d126 100644 (file)
@@ -222,13 +222,13 @@ static inline int mlx5e_page_alloc_mapped(struct mlx5e_rq *rq,
        if (unlikely(!page))
                return -ENOMEM;
 
-       dma_info->page = page;
        dma_info->addr = dma_map_page(rq->pdev, page, 0,
                                      RQ_PAGE_SIZE(rq), rq->buff.map_dir);
        if (unlikely(dma_mapping_error(rq->pdev, dma_info->addr))) {
                put_page(page);
                return -ENOMEM;
        }
+       dma_info->page = page;
 
        return 0;
 }
index 3c536f560dd2b529607ddf30de70d979efd0c59b..7f282e8f4e7fee460e1140dacc8f86ece6b1ea9e 100644 (file)
@@ -1443,12 +1443,10 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
        struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
        int ret;
 
-       dst = ip6_route_output(dev_net(mirred_dev), NULL, fl6);
-       ret = dst->error;
-       if (ret) {
-               dst_release(dst);
+       ret = ipv6_stub->ipv6_dst_lookup(dev_net(mirred_dev), NULL, &dst,
+                                        fl6);
+       if (ret < 0)
                return ret;
-       }
 
        *out_ttl = ip6_dst_hoplimit(dst);
 
index aaa0f4ebba9aee5229cfd7cd22088bfc3ac3027c..31353e5c3c783c2b6c5789fa46e66f4955535be7 100644 (file)
@@ -128,10 +128,10 @@ static inline int mlx5e_skb_l3_header_offset(struct sk_buff *skb)
                return mlx5e_skb_l2_header_offset(skb);
 }
 
-static inline unsigned int mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
-                                                struct sk_buff *skb)
+static inline u16 mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
+                                       struct sk_buff *skb)
 {
-       int hlen;
+       u16 hlen;
 
        switch (mode) {
        case MLX5_INLINE_MODE_NONE:
@@ -140,19 +140,22 @@ static inline unsigned int mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
                hlen = eth_get_headlen(skb->data, skb_headlen(skb));
                if (hlen == ETH_HLEN && !skb_vlan_tag_present(skb))
                        hlen += VLAN_HLEN;
-               return hlen;
+               break;
        case MLX5_INLINE_MODE_IP:
                /* When transport header is set to zero, it means no transport
                 * header. When transport header is set to 0xff's, it means
                 * transport header wasn't set.
                 */
-               if (skb_transport_offset(skb))
-                       return mlx5e_skb_l3_header_offset(skb);
+               if (skb_transport_offset(skb)) {
+                       hlen = mlx5e_skb_l3_header_offset(skb);
+                       break;
+               }
                /* fall through */
        case MLX5_INLINE_MODE_L2:
        default:
-               return mlx5e_skb_l2_header_offset(skb);
+               hlen = mlx5e_skb_l2_header_offset(skb);
        }
+       return min_t(u16, hlen, skb->len);
 }
 
 static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data,
index 95b64025ce36f74681a69cc8812dc8fa6da79261..5bc0593bd76e706e2b5c38a0a767af0324e67685 100644 (file)
@@ -815,7 +815,7 @@ void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
        struct mlx5_eswitch_rep *rep;
        int vport;
 
-       for (vport = 0; vport < nvports; vport++) {
+       for (vport = nvports - 1; vport >= 0; vport--) {
                rep = &esw->offloads.vport_reps[vport];
                if (!rep->valid)
                        continue;
index c065132b956d6ba772f812bff21a190d5759bf13..16885827367bfd9153f96faaf7c2afedc6c6a5b0 100644 (file)
@@ -1186,7 +1186,6 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
                }
        }
 
-       clear_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state);
        set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
 out:
        mutex_unlock(&dev->intf_state_mutex);
@@ -1261,7 +1260,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
                mlx5_drain_health_recovery(dev);
 
        mutex_lock(&dev->intf_state_mutex);
-       if (test_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state)) {
+       if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
                dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n",
                         __func__);
                if (cleanup)
@@ -1270,7 +1269,6 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
        }
 
        clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
-       set_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state);
 
        if (mlx5_device_registered(dev))
                mlx5_detach_device(dev);
@@ -1565,8 +1563,6 @@ static void shutdown(struct pci_dev *pdev)
        int err;
 
        dev_info(&pdev->dev, "Shutdown was called\n");
-       /* Notify mlx5 clients that the kernel is being shut down */
-       set_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &dev->intf_state);
        err = mlx5_try_fast_unload(dev);
        if (err)
                mlx5_unload_one(dev, priv, false);
index f774de6f5fcb4b88bbe0e491ca7a2e2c2fb432ea..520f6382dfdeceac61ffa383afd12a6de99daf54 100644 (file)
@@ -201,13 +201,13 @@ static int destroy_srq_cmd(struct mlx5_core_dev *dev,
 static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
                       u16 lwm, int is_srq)
 {
-       /* arm_srq structs missing using identical xrc ones */
-       u32 srq_in[MLX5_ST_SZ_DW(arm_xrc_srq_in)] = {0};
-       u32 srq_out[MLX5_ST_SZ_DW(arm_xrc_srq_out)] = {0};
+       u32 srq_in[MLX5_ST_SZ_DW(arm_rq_in)] = {0};
+       u32 srq_out[MLX5_ST_SZ_DW(arm_rq_out)] = {0};
 
-       MLX5_SET(arm_xrc_srq_in, srq_in, opcode,   MLX5_CMD_OP_ARM_XRC_SRQ);
-       MLX5_SET(arm_xrc_srq_in, srq_in, xrc_srqn, srq->srqn);
-       MLX5_SET(arm_xrc_srq_in, srq_in, lwm,      lwm);
+       MLX5_SET(arm_rq_in, srq_in, opcode, MLX5_CMD_OP_ARM_RQ);
+       MLX5_SET(arm_rq_in, srq_in, op_mod, MLX5_ARM_RQ_IN_OP_MOD_SRQ);
+       MLX5_SET(arm_rq_in, srq_in, srq_number, srq->srqn);
+       MLX5_SET(arm_rq_in, srq_in, lwm,      lwm);
 
        return  mlx5_cmd_exec(dev, srq_in, sizeof(srq_in),
                              srq_out, sizeof(srq_out));
index 60bf8f27cc00ecc6bbed3685a14dd3aa8a96fd78..c6a3e61b53bdbf0c32212a6415f4a1d2da769bf8 100644 (file)
@@ -4139,6 +4139,8 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
                        return -EINVAL;
                if (!info->linking)
                        break;
+               if (netdev_has_any_upper_dev(upper_dev))
+                       return -EINVAL;
                if (netif_is_lag_master(upper_dev) &&
                    !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
                                               info->upper_info))
@@ -4258,6 +4260,10 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
                upper_dev = info->upper_dev;
                if (!netif_is_bridge_master(upper_dev))
                        return -EINVAL;
+               if (!info->linking)
+                       break;
+               if (netdev_has_any_upper_dev(upper_dev))
+                       return -EINVAL;
                break;
        case NETDEV_CHANGEUPPER:
                upper_dev = info->upper_dev;
index 5eb1606765c58064a5e2fd6677a791165c18c071..d39ffbfcc436fdb4343b391f2ac6bbdd48dd14af 100644 (file)
@@ -705,6 +705,7 @@ static int mlxsw_sp_port_attr_mc_router_set(struct mlxsw_sp_port *mlxsw_sp_port,
                                            bool is_port_mc_router)
 {
        struct mlxsw_sp_bridge_port *bridge_port;
+       int err;
 
        if (switchdev_trans_ph_prepare(trans))
                return 0;
@@ -715,11 +716,17 @@ static int mlxsw_sp_port_attr_mc_router_set(struct mlxsw_sp_port *mlxsw_sp_port,
                return 0;
 
        if (!bridge_port->bridge_device->multicast_enabled)
-               return 0;
+               goto out;
 
-       return mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
-                                                   MLXSW_SP_FLOOD_TYPE_MC,
-                                                   is_port_mc_router);
+       err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
+                                                  MLXSW_SP_FLOOD_TYPE_MC,
+                                                  is_port_mc_router);
+       if (err)
+               return err;
+
+out:
+       bridge_port->mrouter = is_port_mc_router;
+       return 0;
 }
 
 static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
index 0e08404480efb7863e2be9a67109b0068b7aaf1a..d25b5038c3a269c316611512d3880597f534550f 100644 (file)
@@ -42,33 +42,29 @@ nfp_flower_compile_meta_tci(struct nfp_flower_meta_two *frame,
                            struct tc_cls_flower_offload *flow, u8 key_type,
                            bool mask_version)
 {
+       struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
        struct flow_dissector_key_vlan *flow_vlan;
        u16 tmp_tci;
 
+       memset(frame, 0, sizeof(struct nfp_flower_meta_two));
        /* Populate the metadata frame. */
        frame->nfp_flow_key_layer = key_type;
        frame->mask_id = ~0;
 
-       if (mask_version) {
-               frame->tci = cpu_to_be16(~0);
-               return;
-       }
-
-       flow_vlan = skb_flow_dissector_target(flow->dissector,
-                                             FLOW_DISSECTOR_KEY_VLAN,
-                                             flow->key);
-
-       /* Populate the tci field. */
-       if (!flow_vlan->vlan_id) {
-               tmp_tci = 0;
-       } else {
-               tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
-                                    flow_vlan->vlan_priority) |
-                         FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
-                                    flow_vlan->vlan_id) |
-                         NFP_FLOWER_MASK_VLAN_CFI;
+       if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
+               flow_vlan = skb_flow_dissector_target(flow->dissector,
+                                                     FLOW_DISSECTOR_KEY_VLAN,
+                                                     target);
+               /* Populate the tci field. */
+               if (flow_vlan->vlan_id) {
+                       tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
+                                            flow_vlan->vlan_priority) |
+                                 FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
+                                            flow_vlan->vlan_id) |
+                                 NFP_FLOWER_MASK_VLAN_CFI;
+                       frame->tci = cpu_to_be16(tmp_tci);
+               }
        }
-       frame->tci = cpu_to_be16(tmp_tci);
 }
 
 static void
@@ -99,17 +95,18 @@ nfp_flower_compile_mac(struct nfp_flower_mac_mpls *frame,
                       bool mask_version)
 {
        struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
-       struct flow_dissector_key_eth_addrs *flow_mac;
-
-       flow_mac = skb_flow_dissector_target(flow->dissector,
-                                            FLOW_DISSECTOR_KEY_ETH_ADDRS,
-                                            target);
+       struct flow_dissector_key_eth_addrs *addr;
 
        memset(frame, 0, sizeof(struct nfp_flower_mac_mpls));
 
-       /* Populate mac frame. */
-       ether_addr_copy(frame->mac_dst, &flow_mac->dst[0]);
-       ether_addr_copy(frame->mac_src, &flow_mac->src[0]);
+       if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+               addr = skb_flow_dissector_target(flow->dissector,
+                                                FLOW_DISSECTOR_KEY_ETH_ADDRS,
+                                                target);
+               /* Populate mac frame. */
+               ether_addr_copy(frame->mac_dst, &addr->dst[0]);
+               ether_addr_copy(frame->mac_src, &addr->src[0]);
+       }
 
        if (mask_version)
                frame->mpls_lse = cpu_to_be32(~0);
@@ -121,14 +118,17 @@ nfp_flower_compile_tport(struct nfp_flower_tp_ports *frame,
                         bool mask_version)
 {
        struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
-       struct flow_dissector_key_ports *flow_tp;
+       struct flow_dissector_key_ports *tp;
 
-       flow_tp = skb_flow_dissector_target(flow->dissector,
-                                           FLOW_DISSECTOR_KEY_PORTS,
-                                           target);
+       memset(frame, 0, sizeof(struct nfp_flower_tp_ports));
 
-       frame->port_src = flow_tp->src;
-       frame->port_dst = flow_tp->dst;
+       if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
+               tp = skb_flow_dissector_target(flow->dissector,
+                                              FLOW_DISSECTOR_KEY_PORTS,
+                                              target);
+               frame->port_src = tp->src;
+               frame->port_dst = tp->dst;
+       }
 }
 
 static void
@@ -137,25 +137,27 @@ nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *frame,
                        bool mask_version)
 {
        struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
-       struct flow_dissector_key_ipv4_addrs *flow_ipv4;
-       struct flow_dissector_key_basic *flow_basic;
-
-       flow_ipv4 = skb_flow_dissector_target(flow->dissector,
-                                             FLOW_DISSECTOR_KEY_IPV4_ADDRS,
-                                             target);
-
-       flow_basic = skb_flow_dissector_target(flow->dissector,
-                                              FLOW_DISSECTOR_KEY_BASIC,
-                                              target);
+       struct flow_dissector_key_ipv4_addrs *addr;
+       struct flow_dissector_key_basic *basic;
 
-       /* Populate IPv4 frame. */
-       frame->reserved = 0;
-       frame->ipv4_src = flow_ipv4->src;
-       frame->ipv4_dst = flow_ipv4->dst;
-       frame->proto = flow_basic->ip_proto;
        /* Wildcard TOS/TTL for now. */
-       frame->tos = 0;
-       frame->ttl = 0;
+       memset(frame, 0, sizeof(struct nfp_flower_ipv4));
+
+       if (dissector_uses_key(flow->dissector,
+                              FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
+               addr = skb_flow_dissector_target(flow->dissector,
+                                                FLOW_DISSECTOR_KEY_IPV4_ADDRS,
+                                                target);
+               frame->ipv4_src = addr->src;
+               frame->ipv4_dst = addr->dst;
+       }
+
+       if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
+               basic = skb_flow_dissector_target(flow->dissector,
+                                                 FLOW_DISSECTOR_KEY_BASIC,
+                                                 target);
+               frame->proto = basic->ip_proto;
+       }
 }
 
 static void
@@ -164,26 +166,27 @@ nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *frame,
                        bool mask_version)
 {
        struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
-       struct flow_dissector_key_ipv6_addrs *flow_ipv6;
-       struct flow_dissector_key_basic *flow_basic;
-
-       flow_ipv6 = skb_flow_dissector_target(flow->dissector,
-                                             FLOW_DISSECTOR_KEY_IPV6_ADDRS,
-                                             target);
+       struct flow_dissector_key_ipv6_addrs *addr;
+       struct flow_dissector_key_basic *basic;
 
-       flow_basic = skb_flow_dissector_target(flow->dissector,
-                                              FLOW_DISSECTOR_KEY_BASIC,
-                                              target);
-
-       /* Populate IPv6 frame. */
-       frame->reserved = 0;
-       frame->ipv6_src = flow_ipv6->src;
-       frame->ipv6_dst = flow_ipv6->dst;
-       frame->proto = flow_basic->ip_proto;
        /* Wildcard LABEL/TOS/TTL for now. */
-       frame->ipv6_flow_label_exthdr = 0;
-       frame->tos = 0;
-       frame->ttl = 0;
+       memset(frame, 0, sizeof(struct nfp_flower_ipv6));
+
+       if (dissector_uses_key(flow->dissector,
+                              FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
+               addr = skb_flow_dissector_target(flow->dissector,
+                                                FLOW_DISSECTOR_KEY_IPV6_ADDRS,
+                                                target);
+               frame->ipv6_src = addr->src;
+               frame->ipv6_dst = addr->dst;
+       }
+
+       if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
+               basic = skb_flow_dissector_target(flow->dissector,
+                                                 FLOW_DISSECTOR_KEY_BASIC,
+                                                 target);
+               frame->proto = basic->ip_proto;
+       }
 }
 
 int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
index 4ad10bd5e139402dc452d90be81f1384bb5a6785..74a96d6bb05ce1c4b3a64d03d18016327b697a84 100644 (file)
@@ -105,43 +105,62 @@ static int
 nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
                                struct tc_cls_flower_offload *flow)
 {
-       struct flow_dissector_key_control *mask_enc_ctl;
-       struct flow_dissector_key_basic *mask_basic;
-       struct flow_dissector_key_basic *key_basic;
+       struct flow_dissector_key_basic *mask_basic = NULL;
+       struct flow_dissector_key_basic *key_basic = NULL;
+       struct flow_dissector_key_ip *mask_ip = NULL;
        u32 key_layer_two;
        u8 key_layer;
        int key_size;
 
-       mask_enc_ctl = skb_flow_dissector_target(flow->dissector,
-                                                FLOW_DISSECTOR_KEY_ENC_CONTROL,
-                                                flow->mask);
+       if (dissector_uses_key(flow->dissector,
+                              FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
+               struct flow_dissector_key_control *mask_enc_ctl =
+                       skb_flow_dissector_target(flow->dissector,
+                                                 FLOW_DISSECTOR_KEY_ENC_CONTROL,
+                                                 flow->mask);
+               /* We are expecting a tunnel. For now we ignore offloading. */
+               if (mask_enc_ctl->addr_type)
+                       return -EOPNOTSUPP;
+       }
 
-       mask_basic = skb_flow_dissector_target(flow->dissector,
-                                              FLOW_DISSECTOR_KEY_BASIC,
-                                              flow->mask);
+       if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
+               mask_basic = skb_flow_dissector_target(flow->dissector,
+                                                      FLOW_DISSECTOR_KEY_BASIC,
+                                                      flow->mask);
+
+               key_basic = skb_flow_dissector_target(flow->dissector,
+                                                     FLOW_DISSECTOR_KEY_BASIC,
+                                                     flow->key);
+       }
+
+       if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_IP))
+               mask_ip = skb_flow_dissector_target(flow->dissector,
+                                                   FLOW_DISSECTOR_KEY_IP,
+                                                   flow->mask);
 
-       key_basic = skb_flow_dissector_target(flow->dissector,
-                                             FLOW_DISSECTOR_KEY_BASIC,
-                                             flow->key);
        key_layer_two = 0;
        key_layer = NFP_FLOWER_LAYER_PORT | NFP_FLOWER_LAYER_MAC;
        key_size = sizeof(struct nfp_flower_meta_one) +
                   sizeof(struct nfp_flower_in_port) +
                   sizeof(struct nfp_flower_mac_mpls);
 
-       /* We are expecting a tunnel. For now we ignore offloading. */
-       if (mask_enc_ctl->addr_type)
-               return -EOPNOTSUPP;
-
-       if (mask_basic->n_proto) {
+       if (mask_basic && mask_basic->n_proto) {
                /* Ethernet type is present in the key. */
                switch (key_basic->n_proto) {
                case cpu_to_be16(ETH_P_IP):
+                       if (mask_ip && mask_ip->tos)
+                               return -EOPNOTSUPP;
+                       if (mask_ip && mask_ip->ttl)
+                               return -EOPNOTSUPP;
                        key_layer |= NFP_FLOWER_LAYER_IPV4;
                        key_size += sizeof(struct nfp_flower_ipv4);
                        break;
 
                case cpu_to_be16(ETH_P_IPV6):
+                       if (mask_ip && mask_ip->tos)
+                               return -EOPNOTSUPP;
+                       if (mask_ip && mask_ip->ttl)
+                               return -EOPNOTSUPP;
                        key_layer |= NFP_FLOWER_LAYER_IPV6;
                        key_size += sizeof(struct nfp_flower_ipv6);
                        break;
@@ -152,6 +171,11 @@ nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
                case cpu_to_be16(ETH_P_ARP):
                        return -EOPNOTSUPP;
 
+               /* Currently we do not offload MPLS. */
+               case cpu_to_be16(ETH_P_MPLS_UC):
+               case cpu_to_be16(ETH_P_MPLS_MC):
+                       return -EOPNOTSUPP;
+
                /* Will be included in layer 2. */
                case cpu_to_be16(ETH_P_8021Q):
                        break;
@@ -166,7 +190,7 @@ nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
                }
        }
 
-       if (mask_basic->ip_proto) {
+       if (mask_basic && mask_basic->ip_proto) {
                /* Ethernet type is present in the key. */
                switch (key_basic->ip_proto) {
                case IPPROTO_TCP:
index d67969d3e484682c102a965c4abf5908dc1fc6fc..3f199db2002e5ce1c4a0dabc1475647426f6c876 100644 (file)
@@ -98,21 +98,20 @@ static int nfp_pcie_sriov_enable(struct pci_dev *pdev, int num_vfs)
        struct nfp_pf *pf = pci_get_drvdata(pdev);
        int err;
 
-       mutex_lock(&pf->lock);
-
        if (num_vfs > pf->limit_vfs) {
                nfp_info(pf->cpp, "Firmware limits number of VFs to %u\n",
                         pf->limit_vfs);
-               err = -EINVAL;
-               goto err_unlock;
+               return -EINVAL;
        }
 
        err = pci_enable_sriov(pdev, num_vfs);
        if (err) {
                dev_warn(&pdev->dev, "Failed to enable PCI SR-IOV: %d\n", err);
-               goto err_unlock;
+               return err;
        }
 
+       mutex_lock(&pf->lock);
+
        err = nfp_app_sriov_enable(pf->app, num_vfs);
        if (err) {
                dev_warn(&pdev->dev,
@@ -129,9 +128,8 @@ static int nfp_pcie_sriov_enable(struct pci_dev *pdev, int num_vfs)
        return num_vfs;
 
 err_sriov_disable:
-       pci_disable_sriov(pdev);
-err_unlock:
        mutex_unlock(&pf->lock);
+       pci_disable_sriov(pdev);
        return err;
 #endif
        return 0;
@@ -158,10 +156,10 @@ static int nfp_pcie_sriov_disable(struct pci_dev *pdev)
 
        pf->num_vfs = 0;
 
+       mutex_unlock(&pf->lock);
+
        pci_disable_sriov(pdev);
        dev_dbg(&pdev->dev, "Removed VFs.\n");
-
-       mutex_unlock(&pf->lock);
 #endif
        return 0;
 }
index 9f77ce038a4a339260e0118bb6a9f61e60547870..66a09e490cf5a5f2ce19193653910e0fcbe5632e 100644 (file)
@@ -895,6 +895,8 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
 
        netdev_tx_sent_queue(nd_q, txbuf->real_len);
 
+       skb_tx_timestamp(skb);
+
        tx_ring->wr_p += nr_frags + 1;
        if (nfp_net_tx_ring_should_stop(tx_ring))
                nfp_net_tx_ring_stop(nd_q, tx_ring);
@@ -903,8 +905,6 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
        if (!skb->xmit_more || netif_xmit_stopped(nd_q))
                nfp_net_tx_xmit_more_flush(tx_ring);
 
-       skb_tx_timestamp(skb);
-
        return NETDEV_TX_OK;
 
 err_unmap:
@@ -1751,6 +1751,10 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
                        continue;
                }
 
+               nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
+
+               nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
+
                if (likely(!meta.portid)) {
                        netdev = dp->netdev;
                } else {
@@ -1759,16 +1763,12 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
                        nn = netdev_priv(dp->netdev);
                        netdev = nfp_app_repr_get(nn->app, meta.portid);
                        if (unlikely(!netdev)) {
-                               nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, skb);
+                               nfp_net_rx_drop(dp, r_vec, rx_ring, NULL, skb);
                                continue;
                        }
                        nfp_repr_inc_rx_stats(netdev, pkt_len);
                }
 
-               nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
-
-               nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
-
                skb_reserve(skb, pkt_off);
                skb_put(skb, pkt_len);
 
index 5797dbf2b50779583e0a27e1fee02ce85b8feebb..34b985384d26129435686dca6c86b3e6e092cde2 100644 (file)
@@ -456,13 +456,9 @@ static int nfp_net_pf_app_start(struct nfp_pf *pf)
 {
        int err;
 
-       err = nfp_net_pf_app_start_ctrl(pf);
-       if (err)
-               return err;
-
        err = nfp_app_start(pf->app, pf->ctrl_vnic);
        if (err)
-               goto err_ctrl_stop;
+               return err;
 
        if (pf->num_vfs) {
                err = nfp_app_sriov_enable(pf->app, pf->num_vfs);
@@ -474,8 +470,6 @@ static int nfp_net_pf_app_start(struct nfp_pf *pf)
 
 err_app_stop:
        nfp_app_stop(pf->app);
-err_ctrl_stop:
-       nfp_net_pf_app_stop_ctrl(pf);
        return err;
 }
 
@@ -484,7 +478,6 @@ static void nfp_net_pf_app_stop(struct nfp_pf *pf)
        if (pf->num_vfs)
                nfp_app_sriov_disable(pf->app);
        nfp_app_stop(pf->app);
-       nfp_net_pf_app_stop_ctrl(pf);
 }
 
 static void nfp_net_pci_unmap_mem(struct nfp_pf *pf)
@@ -559,7 +552,7 @@ err_unmap_ctrl:
 
 static void nfp_net_pci_remove_finish(struct nfp_pf *pf)
 {
-       nfp_net_pf_app_stop(pf);
+       nfp_net_pf_app_stop_ctrl(pf);
        /* stop app first, to avoid double free of ctrl vNIC's ddir */
        nfp_net_debugfs_dir_clean(&pf->ddir);
 
@@ -690,6 +683,7 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
 {
        struct nfp_net_fw_version fw_ver;
        u8 __iomem *ctrl_bar, *qc_bar;
+       struct nfp_net *nn;
        int stride;
        int err;
 
@@ -766,7 +760,7 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
        if (err)
                goto err_free_vnics;
 
-       err = nfp_net_pf_app_start(pf);
+       err = nfp_net_pf_app_start_ctrl(pf);
        if (err)
                goto err_free_irqs;
 
@@ -774,12 +768,20 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
        if (err)
                goto err_stop_app;
 
+       err = nfp_net_pf_app_start(pf);
+       if (err)
+               goto err_clean_vnics;
+
        mutex_unlock(&pf->lock);
 
        return 0;
 
+err_clean_vnics:
+       list_for_each_entry(nn, &pf->vnics, vnic_list)
+               if (nfp_net_is_data_vnic(nn))
+                       nfp_net_pf_clean_vnic(pf, nn);
 err_stop_app:
-       nfp_net_pf_app_stop(pf);
+       nfp_net_pf_app_stop_ctrl(pf);
 err_free_irqs:
        nfp_net_pf_free_irqs(pf);
 err_free_vnics:
@@ -803,6 +805,8 @@ void nfp_net_pci_remove(struct nfp_pf *pf)
        if (list_empty(&pf->vnics))
                goto out;
 
+       nfp_net_pf_app_stop(pf);
+
        list_for_each_entry(nn, &pf->vnics, vnic_list)
                if (nfp_net_is_data_vnic(nn))
                        nfp_net_pf_clean_vnic(pf, nn);
index 28ea0af89aefeb2a733801af03a21b20d269cb37..e3223f2fe2ffc9d4b186a42e0cac87fc37021afd 100644 (file)
@@ -724,7 +724,7 @@ static void ql_build_coredump_seg_header(
        seg_hdr->cookie = MPI_COREDUMP_COOKIE;
        seg_hdr->segNum = seg_number;
        seg_hdr->segSize = seg_size;
-       memcpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
+       strncpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
 }
 
 /*
index bd07a15d3b7c09886de55af99a45975ac3401219..e03fcf914690c9a9e8fae548c4702f402d698f47 100644 (file)
@@ -6863,8 +6863,7 @@ static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start,
                        rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
                                             tp->TxDescArray + entry);
                        if (skb) {
-                               tp->dev->stats.tx_dropped++;
-                               dev_kfree_skb_any(skb);
+                               dev_consume_skb_any(skb);
                                tx_skb->skb = NULL;
                        }
                }
@@ -7319,7 +7318,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
                        tp->tx_stats.packets++;
                        tp->tx_stats.bytes += tx_skb->skb->len;
                        u64_stats_update_end(&tp->tx_stats.syncp);
-                       dev_kfree_skb_any(tx_skb->skb);
+                       dev_consume_skb_any(tx_skb->skb);
                        tx_skb->skb = NULL;
                }
                dirty_tx++;
index 73427e29df2afb808a0b525ebb1978df4d76a996..fbd00cb0cb7d7204e97b9b11d8ae058c95148834 100644 (file)
@@ -47,6 +47,8 @@ static int sxgbe_probe_config_dt(struct platform_device *pdev,
        plat->mdio_bus_data = devm_kzalloc(&pdev->dev,
                                           sizeof(*plat->mdio_bus_data),
                                           GFP_KERNEL);
+       if (!plat->mdio_bus_data)
+               return -ENOMEM;
 
        dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), GFP_KERNEL);
        if (!dma_cfg)
index 17d4bbaeb65cd7e8711c6cb541bc36713695235a..6e359572b9f0ea53ed46b553fb1cb51273415f57 100644 (file)
@@ -269,7 +269,10 @@ static int socfpga_dwmac_set_phy_mode(struct socfpga_dwmac *dwmac)
        ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << reg_shift);
        ctrl |= val << reg_shift;
 
-       if (dwmac->f2h_ptp_ref_clk) {
+       if (dwmac->f2h_ptp_ref_clk ||
+           phymode == PHY_INTERFACE_MODE_MII ||
+           phymode == PHY_INTERFACE_MODE_GMII ||
+           phymode == PHY_INTERFACE_MODE_SGMII) {
                ctrl |= SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2);
                regmap_read(sys_mgr_base_addr, SYSMGR_FPGAGRP_MODULE_REG,
                            &module);
index fffd6d5fc907b01d2277370f80b23af5a9288e8d..39c2122a4f26947ff564b773df454ce8fa05ec75 100644 (file)
@@ -979,14 +979,6 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
 }
 
 static const struct of_device_id sun8i_dwmac_match[] = {
-       { .compatible = "allwinner,sun8i-h3-emac",
-               .data = &emac_variant_h3 },
-       { .compatible = "allwinner,sun8i-v3s-emac",
-               .data = &emac_variant_v3s },
-       { .compatible = "allwinner,sun8i-a83t-emac",
-               .data = &emac_variant_a83t },
-       { .compatible = "allwinner,sun50i-a64-emac",
-               .data = &emac_variant_a64 },
        { }
 };
 MODULE_DEVICE_TABLE(of, sun8i_dwmac_match);
index 56ba411421f0a77bae5b4568fb273464472741f5..38d1cc557c116b1047e27daa7923a654a438d52e 100644 (file)
@@ -96,7 +96,7 @@ int ti_cm_get_macid(struct device *dev, int slave, u8 *mac_addr)
        if (of_machine_is_compatible("ti,dra7"))
                return davinci_emac_3517_get_macid(dev, 0x514, slave, mac_addr);
 
-       dev_err(dev, "incompatible machine/device type for reading mac address\n");
+       dev_info(dev, "incompatible machine/device type for reading mac address\n");
        return -ENOENT;
 }
 EXPORT_SYMBOL_GPL(ti_cm_get_macid);
index 0d78727f1a14dd9c4ae301f053769437cbe4eb3b..d91cbc6c3ca4eee43090bccc70c76b9a9d1fbb85 100644 (file)
@@ -1269,7 +1269,12 @@ static void netvsc_link_change(struct work_struct *w)
        bool notify = false, reschedule = false;
        unsigned long flags, next_reconfig, delay;
 
-       rtnl_lock();
+       /* if changes are happening, comeback later */
+       if (!rtnl_trylock()) {
+               schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
+               return;
+       }
+
        net_device = rtnl_dereference(ndev_ctx->nvdev);
        if (!net_device)
                goto out_unlock;
index 5e1ab11608560799bb6d956b213d06f58b8fc45e..98e4deaa3a6a1c2f89d55e8f2db54b6fc93380be 100644 (file)
@@ -3521,6 +3521,7 @@ module_init(macsec_init);
 module_exit(macsec_exit);
 
 MODULE_ALIAS_RTNL_LINK("macsec");
+MODULE_ALIAS_GENL_FAMILY("macsec");
 
 MODULE_DESCRIPTION("MACsec IEEE 802.1AE");
 MODULE_LICENSE("GPL v2");
index 5068c582d502c6944a01a2ee87062d7ffb409034..d0626bf5c540911b0d15bdbab1b960145b6d124c 100644 (file)
@@ -749,9 +749,6 @@ void phy_stop_machine(struct phy_device *phydev)
        if (phydev->state > PHY_UP && phydev->state != PHY_HALTED)
                phydev->state = PHY_UP;
        mutex_unlock(&phydev->lock);
-
-       /* Now we can run the state machine synchronously */
-       phy_state_machine(&phydev->state_queue.work);
 }
 
 /**
index 1790f7fec12573fe21d088f9865126784efc0c08..2f742ae5b92ee7d7be080ec60fca7958f722e576 100644 (file)
@@ -864,15 +864,17 @@ EXPORT_SYMBOL(phy_attached_info);
 #define ATTACHED_FMT "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)"
 void phy_attached_print(struct phy_device *phydev, const char *fmt, ...)
 {
+       const char *drv_name = phydev->drv ? phydev->drv->name : "unbound";
+
        if (!fmt) {
                dev_info(&phydev->mdio.dev, ATTACHED_FMT "\n",
-                        phydev->drv->name, phydev_name(phydev),
+                        drv_name, phydev_name(phydev),
                         phydev->irq);
        } else {
                va_list ap;
 
                dev_info(&phydev->mdio.dev, ATTACHED_FMT,
-                        phydev->drv->name, phydev_name(phydev),
+                        drv_name, phydev_name(phydev),
                         phydev->irq);
 
                va_start(ap, fmt);
index 8f572b9f362555b55dc2e3cccfc5761140664616..9c80e80c5493b4f5a5d56c06f0d188debe53eca1 100644 (file)
@@ -1758,6 +1758,13 @@ static const struct usb_device_id cdc_devs[] = {
          .driver_info = (unsigned long)&wwan_noarp_info,
        },
 
+       /* u-blox TOBY-L4 */
+       { USB_DEVICE_AND_INTERFACE_INFO(0x1546, 0x1010,
+               USB_CLASS_COMM,
+               USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
+         .driver_info = (unsigned long)&wwan_info,
+       },
+
        /* Generic CDC-NCM devices */
        { USB_INTERFACE_INFO(USB_CLASS_COMM,
                USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
index 98f17b05c68b745276ecbe67d2934918eaf340bd..b06169ea60dc9d519f9a7673e9aea809539dbd5c 100644 (file)
@@ -1058,7 +1058,7 @@ static void free_old_xmit_skbs(struct send_queue *sq)
                bytes += skb->len;
                packets++;
 
-               dev_kfree_skb_any(skb);
+               dev_consume_skb_any(skb);
        }
 
        /* Avoid overhead when no packets have been processed
index fa315d84e98eacdf4f58bf47863f9f0aa3cce712..a1ea9ef97ed97adc31e7c399b66123cbab8058e1 100644 (file)
@@ -787,6 +787,8 @@ int iwl_pci_fw_enter_d0i3(struct iwl_trans *trans);
 
 void iwl_pcie_enable_rx_wake(struct iwl_trans *trans, bool enable);
 
+void iwl_pcie_rx_allocator_work(struct work_struct *data);
+
 /* common functions that are used by gen2 transport */
 void iwl_pcie_apm_config(struct iwl_trans *trans);
 int iwl_pcie_prepare_card_hw(struct iwl_trans *trans);
index 351c4423125a219cc88b4dc0846d5b573526afbb..942736d3fa75521018580aed3518dd2fea251fc6 100644 (file)
@@ -597,7 +597,7 @@ static void iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
        rxq->free_count += RX_CLAIM_REQ_ALLOC;
 }
 
-static void iwl_pcie_rx_allocator_work(struct work_struct *data)
+void iwl_pcie_rx_allocator_work(struct work_struct *data)
 {
        struct iwl_rb_allocator *rba_p =
                container_of(data, struct iwl_rb_allocator, rx_alloc);
@@ -900,10 +900,6 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans)
                        return err;
        }
        def_rxq = trans_pcie->rxq;
-       if (!rba->alloc_wq)
-               rba->alloc_wq = alloc_workqueue("rb_allocator",
-                                               WQ_HIGHPRI | WQ_UNBOUND, 1);
-       INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work);
 
        spin_lock(&rba->lock);
        atomic_set(&rba->req_pending, 0);
@@ -1017,10 +1013,6 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
        }
 
        cancel_work_sync(&rba->rx_alloc);
-       if (rba->alloc_wq) {
-               destroy_workqueue(rba->alloc_wq);
-               rba->alloc_wq = NULL;
-       }
 
        iwl_pcie_free_rbs_pool(trans);
 
index f95eec52508e9bc4784f5cda71548bbd7d761b0b..3927bbf04f727d5e0b9a9c0a9d1c78cee42e3d94 100644 (file)
@@ -1786,6 +1786,11 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
                iwl_pcie_tx_free(trans);
        iwl_pcie_rx_free(trans);
 
+       if (trans_pcie->rba.alloc_wq) {
+               destroy_workqueue(trans_pcie->rba.alloc_wq);
+               trans_pcie->rba.alloc_wq = NULL;
+       }
+
        if (trans_pcie->msix_enabled) {
                for (i = 0; i < trans_pcie->alloc_vecs; i++) {
                        irq_set_affinity_hint(
@@ -3169,6 +3174,10 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
                trans_pcie->inta_mask = CSR_INI_SET_MASK;
         }
 
+       trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator",
+                                                  WQ_HIGHPRI | WQ_UNBOUND, 1);
+       INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work);
+
 #ifdef CONFIG_IWLWIFI_PCIE_RTPM
        trans->runtime_pm_mode = IWL_PLAT_PM_MODE_D0I3;
 #else
index 08f0477f78d93d102b11524cffeb982d7ac29c82..9915d83a4a30550816fafbc741471e00fb694942 100644 (file)
@@ -1571,6 +1571,7 @@ struct ieee80211_hw *wl1251_alloc_hw(void)
 
        wl->state = WL1251_STATE_OFF;
        mutex_init(&wl->mutex);
+       spin_lock_init(&wl->wl_lock);
 
        wl->tx_mgmt_frm_rate = DEFAULT_HW_GEN_TX_RATE;
        wl->tx_mgmt_frm_mod = DEFAULT_HW_GEN_MODULATION_TYPE;
index df6ce59a1f954257cdef95a7733736e42c8b9491..205d82d4c468717ac26050358acb65a968481097 100644 (file)
@@ -673,9 +673,7 @@ enum mlx5_device_state {
 };
 
 enum mlx5_interface_state {
-       MLX5_INTERFACE_STATE_DOWN = BIT(0),
-       MLX5_INTERFACE_STATE_UP = BIT(1),
-       MLX5_INTERFACE_STATE_SHUTDOWN = BIT(2),
+       MLX5_INTERFACE_STATE_UP = BIT(0),
 };
 
 enum mlx5_pci_status {
index 779b235955968353a926709436483bc3ea63dde9..c99ba7914c0a41d6d829e6018df16f4a229412b5 100644 (file)
@@ -3866,6 +3866,8 @@ int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
 bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
                                  struct net_device *upper_dev);
 
+bool netdev_has_any_upper_dev(struct net_device *dev);
+
 void *netdev_lower_get_next_private(struct net_device *dev,
                                    struct list_head **iter);
 void *netdev_lower_get_next_private_rcu(struct net_device *dev,
index dbe29b6c9bd6097b4a7e5763ad39d26921780270..d67a8182e5eb2177d978ca8a5effeaf6bd579394 100644 (file)
@@ -973,7 +973,23 @@ int __must_check skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg
 int __must_check skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg,
                              int offset, int len);
 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer);
-int skb_pad(struct sk_buff *skb, int pad);
+int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error);
+
+/**
+ *     skb_pad                 -       zero pad the tail of an skb
+ *     @skb: buffer to pad
+ *     @pad: space to pad
+ *
+ *     Ensure that a buffer is followed by a padding area that is zero
+ *     filled. Used by network drivers which may DMA or transfer data
+ *     beyond the buffer end onto the wire.
+ *
+ *     May return error in out of memory cases. The skb is freed on error.
+ */
+static inline int skb_pad(struct sk_buff *skb, int pad)
+{
+       return __skb_pad(skb, pad, true);
+}
 #define dev_kfree_skb(a)       consume_skb(a)
 
 int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
@@ -2825,25 +2841,42 @@ static inline int skb_padto(struct sk_buff *skb, unsigned int len)
  *     skb_put_padto - increase size and pad an skbuff up to a minimal size
  *     @skb: buffer to pad
  *     @len: minimal length
+ *     @free_on_error: free buffer on error
  *
  *     Pads up a buffer to ensure the trailing bytes exist and are
  *     blanked. If the buffer already contains sufficient data it
  *     is untouched. Otherwise it is extended. Returns zero on
- *     success. The skb is freed on error.
+ *     success. The skb is freed on error if @free_on_error is true.
  */
-static inline int skb_put_padto(struct sk_buff *skb, unsigned int len)
+static inline int __skb_put_padto(struct sk_buff *skb, unsigned int len,
+                                 bool free_on_error)
 {
        unsigned int size = skb->len;
 
        if (unlikely(size < len)) {
                len -= size;
-               if (skb_pad(skb, len))
+               if (__skb_pad(skb, len, free_on_error))
                        return -ENOMEM;
                __skb_put(skb, len);
        }
        return 0;
 }
 
+/**
+ *     skb_put_padto - increase size and pad an skbuff up to a minimal size
+ *     @skb: buffer to pad
+ *     @len: minimal length
+ *
+ *     Pads up a buffer to ensure the trailing bytes exist and are
+ *     blanked. If the buffer already contains sufficient data it
+ *     is untouched. Otherwise it is extended. Returns zero on
+ *     success. The skb is freed on error.
+ */
+static inline int skb_put_padto(struct sk_buff *skb, unsigned int len)
+{
+       return __skb_put_padto(skb, len, true);
+}
+
 static inline int skb_add_data(struct sk_buff *skb,
                               struct iov_iter *from, int copy)
 {
index 1a88008cc6f5e14a788491f62ae3c15e47b62c16..af509f801084dcf19a27f9d4c82344c5e54fe792 100644 (file)
@@ -70,6 +70,7 @@ struct fib6_node {
        __u16                   fn_flags;
        int                     fn_sernum;
        struct rt6_info         *rr_ptr;
+       struct rcu_head         rcu;
 };
 
 #ifndef CONFIG_IPV6_SUBTREES
@@ -104,7 +105,7 @@ struct rt6_info {
         * the same cache line.
         */
        struct fib6_table               *rt6i_table;
-       struct fib6_node                *rt6i_node;
+       struct fib6_node __rcu          *rt6i_node;
 
        struct in6_addr                 rt6i_gateway;
 
@@ -167,13 +168,40 @@ static inline void rt6_update_expires(struct rt6_info *rt0, int timeout)
        rt0->rt6i_flags |= RTF_EXPIRES;
 }
 
+/* Function to safely get fn->sernum for passed in rt
+ * and store result in passed in cookie.
+ * Return true if we can get cookie safely
+ * Return false if not
+ */
+static inline bool rt6_get_cookie_safe(const struct rt6_info *rt,
+                                      u32 *cookie)
+{
+       struct fib6_node *fn;
+       bool status = false;
+
+       rcu_read_lock();
+       fn = rcu_dereference(rt->rt6i_node);
+
+       if (fn) {
+               *cookie = fn->fn_sernum;
+               status = true;
+       }
+
+       rcu_read_unlock();
+       return status;
+}
+
 static inline u32 rt6_get_cookie(const struct rt6_info *rt)
 {
+       u32 cookie = 0;
+
        if (rt->rt6i_flags & RTF_PCPU ||
            (unlikely(!list_empty(&rt->rt6i_uncached)) && rt->dst.from))
                rt = (struct rt6_info *)(rt->dst.from);
 
-       return rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
+       rt6_get_cookie_safe(rt, &cookie);
+
+       return cookie;
 }
 
 static inline void ip6_rt_put(struct rt6_info *rt)
index 67f815e5d52517390226bc3531b1ea7b5f1020bc..c1109cdbbfa6afb9aff0d6033aef7b615630ffc1 100644 (file)
@@ -101,6 +101,13 @@ struct Qdisc {
        spinlock_t              busylock ____cacheline_aligned_in_smp;
 };
 
+static inline void qdisc_refcount_inc(struct Qdisc *qdisc)
+{
+       if (qdisc->flags & TCQ_F_BUILTIN)
+               return;
+       refcount_inc(&qdisc->refcnt);
+}
+
 static inline bool qdisc_is_running(const struct Qdisc *qdisc)
 {
        return (raw_read_seqcount(&qdisc->running) & 1) ? true : false;
index ada65e767b28dfcabb662a7b08f65c6fc04f5b73..f642a39f9eeeeb3a1bbff48fd467c3a3acb96160 100644 (file)
@@ -1004,9 +1004,7 @@ void tcp_get_default_congestion_control(char *name);
 void tcp_get_available_congestion_control(char *buf, size_t len);
 void tcp_get_allowed_congestion_control(char *buf, size_t len);
 int tcp_set_allowed_congestion_control(char *allowed);
-int tcp_set_congestion_control(struct sock *sk, const char *name, bool load);
-void tcp_reinit_congestion_control(struct sock *sk,
-                                  const struct tcp_congestion_ops *ca);
+int tcp_set_congestion_control(struct sock *sk, const char *name, bool load, bool reinit);
 u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
 void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);
 
index 586de4b811b5678c72a045a3ede9852164b9b3e2..626c2d8a70c59f51fb5b2558433d222b56610246 100644 (file)
@@ -260,7 +260,7 @@ static inline struct sk_buff *skb_recv_udp(struct sock *sk, unsigned int flags,
 }
 
 void udp_v4_early_demux(struct sk_buff *skb);
-void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst);
+bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst);
 int udp_get_port(struct sock *sk, unsigned short snum,
                 int (*saddr_cmp)(const struct sock *,
                                  const struct sock *));
index 4fb463172aa88e81d9caaaee271d8d8c8a9db978..d11c8181f4c5f9d612aa0f3bb3960d405dbf16b1 100644 (file)
@@ -652,12 +652,27 @@ static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr,
        }
 }
 
+static bool fd_htab_map_needs_adjust(const struct bpf_htab *htab)
+{
+       return htab->map.map_type == BPF_MAP_TYPE_HASH_OF_MAPS &&
+              BITS_PER_LONG == 64;
+}
+
+static u32 htab_size_value(const struct bpf_htab *htab, bool percpu)
+{
+       u32 size = htab->map.value_size;
+
+       if (percpu || fd_htab_map_needs_adjust(htab))
+               size = round_up(size, 8);
+       return size;
+}
+
 static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
                                         void *value, u32 key_size, u32 hash,
                                         bool percpu, bool onallcpus,
                                         struct htab_elem *old_elem)
 {
-       u32 size = htab->map.value_size;
+       u32 size = htab_size_value(htab, percpu);
        bool prealloc = htab_is_prealloc(htab);
        struct htab_elem *l_new, **pl_new;
        void __percpu *pptr;
@@ -696,9 +711,6 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
 
        memcpy(l_new->key, key, key_size);
        if (percpu) {
-               /* round up value_size to 8 bytes */
-               size = round_up(size, 8);
-
                if (prealloc) {
                        pptr = htab_elem_get_ptr(l_new, key_size);
                } else {
@@ -1209,17 +1221,9 @@ const struct bpf_map_ops htab_lru_percpu_map_ops = {
 
 static struct bpf_map *fd_htab_map_alloc(union bpf_attr *attr)
 {
-       struct bpf_map *map;
-
        if (attr->value_size != sizeof(u32))
                return ERR_PTR(-EINVAL);
-
-       /* pointer is stored internally */
-       attr->value_size = sizeof(void *);
-       map = htab_map_alloc(attr);
-       attr->value_size = sizeof(u32);
-
-       return map;
+       return htab_map_alloc(attr);
 }
 
 static void fd_htab_map_free(struct bpf_map *map)
index 861ae2a165f4dc3271b648794486e6a768590733..5a7be3bddfa9f2d02f519df265426b2a29419b0c 100644 (file)
@@ -53,6 +53,9 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
        brstats->tx_bytes += skb->len;
        u64_stats_update_end(&brstats->syncp);
 
+#ifdef CONFIG_NET_SWITCHDEV
+       skb->offload_fwd_mark = 0;
+#endif
        BR_INPUT_SKB_CB(skb)->brdev = dev;
 
        skb_reset_mac_header(skb);
index 181a44d0f1da6364a8965b54cf13aa6a5e44ef22..f6b1c7de059d8053e5820e66ae4bc354f6461b84 100644 (file)
@@ -115,7 +115,7 @@ br_switchdev_fdb_call_notifiers(bool adding, const unsigned char *mac,
 void
 br_switchdev_fdb_notify(const struct net_bridge_fdb_entry *fdb, int type)
 {
-       if (!fdb->added_by_user)
+       if (!fdb->added_by_user || !fdb->dst)
                return;
 
        switch (type) {
index a21ca8dee5eadca0d9ab7c78a939ac90bb3963b0..8c2f4489ff8f18680543b6adcad7604036458d5c 100644 (file)
@@ -362,7 +362,7 @@ int __sk_queue_drop_skb(struct sock *sk, struct sk_buff_head *sk_queue,
        if (flags & MSG_PEEK) {
                err = -ENOENT;
                spin_lock_bh(&sk_queue->lock);
-               if (skb == skb_peek(sk_queue)) {
+               if (skb->next) {
                        __skb_unlink(skb, sk_queue);
                        refcount_dec(&skb->users);
                        if (destructor)
index ce15a06d5558af0292cc739b42a7dc3c1d89428d..86b4b0a79e7abb6554af07ed81a7b91e2f8762bf 100644 (file)
@@ -5289,6 +5289,7 @@ static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock)
         * Ideally, a new ndo_busy_poll_stop() could avoid another round.
         */
        rc = napi->poll(napi, BUSY_POLL_BUDGET);
+       trace_napi_poll(napi, rc, BUSY_POLL_BUDGET);
        netpoll_poll_unlock(have_poll_lock);
        if (rc == BUSY_POLL_BUDGET)
                __napi_schedule(napi);
@@ -5667,12 +5668,13 @@ EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu);
  * Find out if a device is linked to an upper device and return true in case
  * it is. The caller must hold the RTNL lock.
  */
-static bool netdev_has_any_upper_dev(struct net_device *dev)
+bool netdev_has_any_upper_dev(struct net_device *dev)
 {
        ASSERT_RTNL();
 
        return !list_empty(&dev->adj_list.upper);
 }
+EXPORT_SYMBOL(netdev_has_any_upper_dev);
 
 /**
  * netdev_master_upper_dev_get - Get master upper device
index 6280a602604c2e0e05c57d0c2104b5f4791f6928..169974998c7692b063947cb925fede167f2fb817 100644 (file)
@@ -2836,15 +2836,12 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
                   sk->sk_prot->setsockopt == tcp_setsockopt) {
                if (optname == TCP_CONGESTION) {
                        char name[TCP_CA_NAME_MAX];
+                       bool reinit = bpf_sock->op > BPF_SOCK_OPS_NEEDS_ECN;
 
                        strncpy(name, optval, min_t(long, optlen,
                                                    TCP_CA_NAME_MAX-1));
                        name[TCP_CA_NAME_MAX-1] = 0;
-                       ret = tcp_set_congestion_control(sk, name, false);
-                       if (!ret && bpf_sock->op > BPF_SOCK_OPS_NEEDS_ECN)
-                               /* replacing an existing ca */
-                               tcp_reinit_congestion_control(sk,
-                                       inet_csk(sk)->icsk_ca_ops);
+                       ret = tcp_set_congestion_control(sk, name, false, reinit);
                } else {
                        struct tcp_sock *tp = tcp_sk(sk);
 
@@ -2872,7 +2869,6 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
                                ret = -EINVAL;
                        }
                }
-               ret = -EINVAL;
 #endif
        } else {
                ret = -EINVAL;
index f990eb8b30a9c4a57ef39d34413dd2f2a75babb6..e0755660628407e5a1cefc9ed2c4a725f68628a0 100644 (file)
@@ -1363,18 +1363,20 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
 EXPORT_SYMBOL(skb_copy_expand);
 
 /**
- *     skb_pad                 -       zero pad the tail of an skb
+ *     __skb_pad               -       zero pad the tail of an skb
  *     @skb: buffer to pad
  *     @pad: space to pad
+ *     @free_on_error: free buffer on error
  *
  *     Ensure that a buffer is followed by a padding area that is zero
  *     filled. Used by network drivers which may DMA or transfer data
  *     beyond the buffer end onto the wire.
  *
- *     May return error in out of memory cases. The skb is freed on error.
+ *     May return error in out of memory cases. The skb is freed on error
+ *     if @free_on_error is true.
  */
 
-int skb_pad(struct sk_buff *skb, int pad)
+int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error)
 {
        int err;
        int ntail;
@@ -1403,10 +1405,11 @@ int skb_pad(struct sk_buff *skb, int pad)
        return 0;
 
 free_skb:
-       kfree_skb(skb);
+       if (free_on_error)
+               kfree_skb(skb);
        return err;
 }
-EXPORT_SYMBOL(skb_pad);
+EXPORT_SYMBOL(__skb_pad);
 
 /**
  *     pskb_put - add data to the tail of a potentially fragmented buffer
index c442051d5a55732d37ddc18187389d66d8d08bd9..20bc9c56fca05c230477b15d6dad15e02b488800 100644 (file)
@@ -577,7 +577,7 @@ static int dsa_dst_parse(struct dsa_switch_tree *dst)
                        return err;
        }
 
-       if (!dst->cpu_dp->netdev) {
+       if (!dst->cpu_dp) {
                pr_warn("Tree has no master device\n");
                return -EINVAL;
        }
index de66ca8e620177693e55b54492404d8d3ee52197..fcd90f79458e20fefd76661fe7bc7e07d42ed1a3 100644 (file)
@@ -42,7 +42,8 @@ static struct sk_buff *ksz_xmit(struct sk_buff *skb, struct net_device *dev)
        padlen = (skb->len >= ETH_ZLEN) ? 0 : ETH_ZLEN - skb->len;
 
        if (skb_tailroom(skb) >= padlen + KSZ_INGRESS_TAG_LEN) {
-               if (skb_put_padto(skb, skb->len + padlen))
+               /* Let dsa_slave_xmit() free skb */
+               if (__skb_put_padto(skb, skb->len + padlen, false))
                        return NULL;
 
                nskb = skb;
@@ -60,12 +61,13 @@ static struct sk_buff *ksz_xmit(struct sk_buff *skb, struct net_device *dev)
                                         skb_transport_header(skb) - skb->head);
                skb_copy_and_csum_dev(skb, skb_put(nskb, skb->len));
 
-               if (skb_put_padto(nskb, nskb->len + padlen)) {
-                       kfree_skb(nskb);
+               /* Let skb_put_padto() free nskb, and let dsa_slave_xmit() free
+                * skb
+                */
+               if (skb_put_padto(nskb, nskb->len + padlen))
                        return NULL;
-               }
 
-               kfree_skb(skb);
+               consume_skb(skb);
        }
 
        tag = skb_put(nskb, KSZ_INGRESS_TAG_LEN);
index b09e56214005c7cf9e257eb777882b234a2d6f18..9c7b1d74a5c6cc6a80e51a259ef6645060390d31 100644 (file)
@@ -40,7 +40,7 @@ static struct sk_buff *trailer_xmit(struct sk_buff *skb, struct net_device *dev)
        skb_set_network_header(nskb, skb_network_header(skb) - skb->head);
        skb_set_transport_header(nskb, skb_transport_header(skb) - skb->head);
        skb_copy_and_csum_dev(skb, skb_put(nskb, skb->len));
-       kfree_skb(skb);
+       consume_skb(skb);
 
        if (padlen) {
                skb_put_zero(nskb, padlen);
index 4e7bdb213cd076e44b5a36b2168413682bb23f68..172d8309f89e52b1c06781571873520a41ef9036 100644 (file)
@@ -314,7 +314,8 @@ static void send_hsr_supervision_frame(struct hsr_port *master,
        hsr_sp = skb_put(skb, sizeof(struct hsr_sup_payload));
        ether_addr_copy(hsr_sp->MacAddressA, master->dev->dev_addr);
 
-       skb_put_padto(skb, ETH_ZLEN + HSR_HLEN);
+       if (skb_put_padto(skb, ETH_ZLEN + HSR_HLEN))
+               return;
 
        hsr_forward_skb(skb, master);
        return;
index 0cbee0a666ffd2a1b7451b0b07513b1cf1cebfc0..df68963dc90ada0ec19f8997d920f6faf3186e05 100644 (file)
@@ -258,7 +258,7 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
                esp_output_udp_encap(x, skb, esp);
 
        if (!skb_cloned(skb)) {
-               if (tailen <= skb_availroom(skb)) {
+               if (tailen <= skb_tailroom(skb)) {
                        nfrags = 1;
                        trailer = skb;
                        tail = skb_tail_pointer(trailer);
@@ -292,8 +292,6 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
 
                        kunmap_atomic(vaddr);
 
-                       spin_unlock_bh(&x->lock);
-
                        nfrags = skb_shinfo(skb)->nr_frags;
 
                        __skb_fill_page_desc(skb, nfrags, page, pfrag->offset,
@@ -301,6 +299,9 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
                        skb_shinfo(skb)->nr_frags = ++nfrags;
 
                        pfrag->offset = pfrag->offset + allocsize;
+
+                       spin_unlock_bh(&x->lock);
+
                        nfrags++;
 
                        skb->len += tailen;
@@ -381,7 +382,7 @@ int esp_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
                           (unsigned char *)esph - skb->data,
                           assoclen + ivlen + esp->clen + alen);
        if (unlikely(err < 0))
-               goto error;
+               goto error_free;
 
        if (!esp->inplace) {
                int allocsize;
@@ -392,7 +393,7 @@ int esp_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
                spin_lock_bh(&x->lock);
                if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
                        spin_unlock_bh(&x->lock);
-                       goto error;
+                       goto error_free;
                }
 
                skb_shinfo(skb)->nr_frags = 1;
@@ -409,7 +410,7 @@ int esp_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
                                   (unsigned char *)esph - skb->data,
                                   assoclen + ivlen + esp->clen + alen);
                if (unlikely(err < 0))
-                       goto error;
+                       goto error_free;
        }
 
        if ((x->props.flags & XFRM_STATE_ESN))
@@ -442,8 +443,9 @@ int esp_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
 
        if (sg != dsg)
                esp_ssg_unref(x, tmp);
-       kfree(tmp);
 
+error_free:
+       kfree(tmp);
 error:
        return err;
 }
@@ -695,8 +697,10 @@ skip_cow:
 
        sg_init_table(sg, nfrags);
        err = skb_to_sgvec(skb, sg, 0, skb->len);
-       if (unlikely(err < 0))
+       if (unlikely(err < 0)) {
+               kfree(tmp);
                goto out;
+       }
 
        skb->ip_summed = CHECKSUM_NONE;
 
index e0666016a7642c017b4693d32723245adc484982..50112324fa5c3638527b12477c356ce406ba9a36 100644 (file)
@@ -257,7 +257,7 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb,  netdev_features_
        esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
 
        err = esp_output_tail(x, skb, &esp);
-       if (err < 0)
+       if (err)
                return err;
 
        secpath_reset(skb);
index 0bc3c3d73e61e00a04d73f32800256a62c8943a5..9e9d9afd18f745f810dc5d985c5c4532ef8053a3 100644 (file)
@@ -268,14 +268,14 @@ unsigned int arpt_do_table(struct sk_buff *skb,
                acpar.targinfo = t->data;
                verdict = t->u.kernel.target->target(skb, &acpar);
 
-               /* Target might have changed stuff. */
-               arp = arp_hdr(skb);
-
-               if (verdict == XT_CONTINUE)
+               if (verdict == XT_CONTINUE) {
+                       /* Target might have changed stuff. */
+                       arp = arp_hdr(skb);
                        e = arpt_next_entry(e);
-               else
+               } else {
                        /* Verdict */
                        break;
+               }
        } while (!acpar.hotdrop);
        xt_write_recseq_end(addend);
        local_bh_enable();
index 2a55a40211cbfb94a9ade41c33678bba4be2a7e4..622ed2887cd563dc5e708028d2f17726c8ca1c29 100644 (file)
@@ -352,13 +352,14 @@ ipt_do_table(struct sk_buff *skb,
                acpar.targinfo = t->data;
 
                verdict = t->u.kernel.target->target(skb, &acpar);
-               /* Target might have changed stuff. */
-               ip = ip_hdr(skb);
-               if (verdict == XT_CONTINUE)
+               if (verdict == XT_CONTINUE) {
+                       /* Target might have changed stuff. */
+                       ip = ip_hdr(skb);
                        e = ipt_next_entry(e);
-               else
+               } else {
                        /* Verdict */
                        break;
+               }
        } while (!acpar.hotdrop);
 
        xt_write_recseq_end(addend);
index 7d72decb80f9f9c4150bd2a42c4b802ba1fd7f17..efaa04dcc80e3a66ef34fa6bd13be66b601925b0 100644 (file)
@@ -117,7 +117,8 @@ clusterip_config_entry_put(struct net *net, struct clusterip_config *c)
                 * functions are also incrementing the refcount on their own,
                 * so it's safe to remove the entry even if it's in use. */
 #ifdef CONFIG_PROC_FS
-               proc_remove(c->pde);
+               if (cn->procdir)
+                       proc_remove(c->pde);
 #endif
                return;
        }
@@ -815,6 +816,7 @@ static void clusterip_net_exit(struct net *net)
 #ifdef CONFIG_PROC_FS
        struct clusterip_net *cn = net_generic(net, clusterip_net_id);
        proc_remove(cn->procdir);
+       cn->procdir = NULL;
 #endif
        nf_unregister_net_hook(net, &cip_arp_ops);
 }
index 71ce33decd971feee72f225b2bf7ccaf2f5f456f..a3e91b552edce4edee0d3b9ee5e07105946d2dd9 100644 (file)
@@ -2481,7 +2481,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
                name[val] = 0;
 
                lock_sock(sk);
-               err = tcp_set_congestion_control(sk, name, true);
+               err = tcp_set_congestion_control(sk, name, true, true);
                release_sock(sk);
                return err;
        }
index fde983f6376be98247b9f2ff3d0307c2502e6392..421ea1b918da5bc4a3974531539cd67266f70798 100644 (file)
@@ -189,8 +189,8 @@ void tcp_init_congestion_control(struct sock *sk)
                INET_ECN_dontxmit(sk);
 }
 
-void tcp_reinit_congestion_control(struct sock *sk,
-                                  const struct tcp_congestion_ops *ca)
+static void tcp_reinit_congestion_control(struct sock *sk,
+                                         const struct tcp_congestion_ops *ca)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
 
@@ -338,7 +338,7 @@ out:
  * tcp_reinit_congestion_control (if the current congestion control was
  * already initialized.
  */
-int tcp_set_congestion_control(struct sock *sk, const char *name, bool load)
+int tcp_set_congestion_control(struct sock *sk, const char *name, bool load, bool reinit)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
        const struct tcp_congestion_ops *ca;
@@ -360,9 +360,18 @@ int tcp_set_congestion_control(struct sock *sk, const char *name, bool load)
        if (!ca) {
                err = -ENOENT;
        } else if (!load) {
-               icsk->icsk_ca_ops = ca;
-               if (!try_module_get(ca->owner))
+               const struct tcp_congestion_ops *old_ca = icsk->icsk_ca_ops;
+
+               if (try_module_get(ca->owner)) {
+                       if (reinit) {
+                               tcp_reinit_congestion_control(sk, ca);
+                       } else {
+                               icsk->icsk_ca_ops = ca;
+                               module_put(old_ca->owner);
+                       }
+               } else {
                        err = -EBUSY;
+               }
        } else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) ||
                     ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))) {
                err = -EPERM;
index cd1d044a7fa580f315af0fd81eb1bf425fd1f38c..62344804baaef96daf405dbdd5418db541b95864 100644 (file)
@@ -1176,7 +1176,7 @@ static void udp_set_dev_scratch(struct sk_buff *skb)
        scratch->csum_unnecessary = !!skb_csum_unnecessary(skb);
        scratch->is_linear = !skb_is_nonlinear(skb);
 #endif
-       if (likely(!skb->_skb_refdst))
+       if (likely(!skb->_skb_refdst && !skb_sec_path(skb)))
                scratch->_tsize_state |= UDP_SKB_IS_STATELESS;
 }
 
@@ -1929,14 +1929,16 @@ drop:
 /* For TCP sockets, sk_rx_dst is protected by socket lock
  * For UDP, we use xchg() to guard against concurrent changes.
  */
-void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
+bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
 {
        struct dst_entry *old;
 
        if (dst_hold_safe(dst)) {
                old = xchg(&sk->sk_rx_dst, dst);
                dst_release(old);
+               return old != dst;
        }
+       return false;
 }
 EXPORT_SYMBOL(udp_sk_rx_dst_set);
 
index 3c46e9513a31d04eeb1e76384f146b759ae38eed..936e9ab4dda5453ce30b8640b85693b9728502fd 100644 (file)
@@ -5556,7 +5556,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
                 * our DAD process, so we don't need
                 * to do it again
                 */
-               if (!(ifp->rt->rt6i_node))
+               if (!rcu_access_pointer(ifp->rt->rt6i_node))
                        ip6_ins_rt(ifp->rt);
                if (ifp->idev->cnf.forwarding)
                        addrconf_join_anycast(ifp);
index 9ed35473dcb53bd6ae52f8f86e1558410bbcd7b6..ab64f367d11cc256ddc56527d979a06e32170745 100644 (file)
@@ -226,7 +226,7 @@ int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info
        int tailen = esp->tailen;
 
        if (!skb_cloned(skb)) {
-               if (tailen <= skb_availroom(skb)) {
+               if (tailen <= skb_tailroom(skb)) {
                        nfrags = 1;
                        trailer = skb;
                        tail = skb_tail_pointer(trailer);
@@ -260,8 +260,6 @@ int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info
 
                        kunmap_atomic(vaddr);
 
-                       spin_unlock_bh(&x->lock);
-
                        nfrags = skb_shinfo(skb)->nr_frags;
 
                        __skb_fill_page_desc(skb, nfrags, page, pfrag->offset,
@@ -269,6 +267,9 @@ int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info
                        skb_shinfo(skb)->nr_frags = ++nfrags;
 
                        pfrag->offset = pfrag->offset + allocsize;
+
+                       spin_unlock_bh(&x->lock);
+
                        nfrags++;
 
                        skb->len += tailen;
@@ -345,7 +346,7 @@ int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info
                           (unsigned char *)esph - skb->data,
                           assoclen + ivlen + esp->clen + alen);
        if (unlikely(err < 0))
-               goto error;
+               goto error_free;
 
        if (!esp->inplace) {
                int allocsize;
@@ -356,7 +357,7 @@ int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info
                spin_lock_bh(&x->lock);
                if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
                        spin_unlock_bh(&x->lock);
-                       goto error;
+                       goto error_free;
                }
 
                skb_shinfo(skb)->nr_frags = 1;
@@ -373,7 +374,7 @@ int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info
                                   (unsigned char *)esph - skb->data,
                                   assoclen + ivlen + esp->clen + alen);
                if (unlikely(err < 0))
-                       goto error;
+                       goto error_free;
        }
 
        if ((x->props.flags & XFRM_STATE_ESN))
@@ -406,8 +407,9 @@ int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info
 
        if (sg != dsg)
                esp_ssg_unref(x, tmp);
-       kfree(tmp);
 
+error_free:
+       kfree(tmp);
 error:
        return err;
 }
index f02f131f6435a967de395b9a7069051c93a039d7..1cf437f75b0bf2bc446337ededbf58bd22673823 100644 (file)
@@ -286,7 +286,7 @@ static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb,  netdev_features
        esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32));
 
        err = esp6_output_tail(x, skb, &esp);
-       if (err < 0)
+       if (err)
                return err;
 
        secpath_reset(skb);
index 5cc0ea0381981b0539d5d6e67401d962a6f6a230..e1c85bb4eac0fd50905fc441e726eca843fc36a8 100644 (file)
@@ -148,11 +148,23 @@ static struct fib6_node *node_alloc(void)
        return fn;
 }
 
-static void node_free(struct fib6_node *fn)
+static void node_free_immediate(struct fib6_node *fn)
+{
+       kmem_cache_free(fib6_node_kmem, fn);
+}
+
+static void node_free_rcu(struct rcu_head *head)
 {
+       struct fib6_node *fn = container_of(head, struct fib6_node, rcu);
+
        kmem_cache_free(fib6_node_kmem, fn);
 }
 
+static void node_free(struct fib6_node *fn)
+{
+       call_rcu(&fn->rcu, node_free_rcu);
+}
+
 static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt)
 {
        int cpu;
@@ -601,9 +613,9 @@ insert_above:
 
                if (!in || !ln) {
                        if (in)
-                               node_free(in);
+                               node_free_immediate(in);
                        if (ln)
-                               node_free(ln);
+                               node_free_immediate(ln);
                        return ERR_PTR(-ENOMEM);
                }
 
@@ -877,7 +889,7 @@ add:
 
                rt->dst.rt6_next = iter;
                *ins = rt;
-               rt->rt6i_node = fn;
+               rcu_assign_pointer(rt->rt6i_node, fn);
                atomic_inc(&rt->rt6i_ref);
                if (!info->skip_notify)
                        inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags);
@@ -903,7 +915,7 @@ add:
                        return err;
 
                *ins = rt;
-               rt->rt6i_node = fn;
+               rcu_assign_pointer(rt->rt6i_node, fn);
                rt->dst.rt6_next = iter->dst.rt6_next;
                atomic_inc(&rt->rt6i_ref);
                if (!info->skip_notify)
@@ -1038,7 +1050,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt,
                                   root, and then (in failure) stale node
                                   in main tree.
                                 */
-                               node_free(sfn);
+                               node_free_immediate(sfn);
                                err = PTR_ERR(sn);
                                goto failure;
                        }
@@ -1468,8 +1480,9 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp,
 
 int fib6_del(struct rt6_info *rt, struct nl_info *info)
 {
+       struct fib6_node *fn = rcu_dereference_protected(rt->rt6i_node,
+                                   lockdep_is_held(&rt->rt6i_table->tb6_lock));
        struct net *net = info->nl_net;
-       struct fib6_node *fn = rt->rt6i_node;
        struct rt6_info **rtp;
 
 #if RT6_DEBUG >= 2
@@ -1658,7 +1671,9 @@ static int fib6_clean_node(struct fib6_walker *w)
                        if (res) {
 #if RT6_DEBUG >= 2
                                pr_debug("%s: del failed: rt=%p@%p err=%d\n",
-                                        __func__, rt, rt->rt6i_node, res);
+                                        __func__, rt,
+                                        rcu_access_pointer(rt->rt6i_node),
+                                        res);
 #endif
                                continue;
                        }
@@ -1780,8 +1795,10 @@ static int fib6_age(struct rt6_info *rt, void *arg)
                }
                gc_args->more++;
        } else if (rt->rt6i_flags & RTF_CACHE) {
+               if (time_after_eq(now, rt->dst.lastuse + gc_args->timeout))
+                       rt->dst.obsolete = DST_OBSOLETE_KILL;
                if (atomic_read(&rt->dst.__refcnt) == 1 &&
-                   time_after_eq(now, rt->dst.lastuse + gc_args->timeout)) {
+                   rt->dst.obsolete == DST_OBSOLETE_KILL) {
                        RT6_TRACE("aging clone %p\n", rt);
                        return -1;
                } else if (rt->rt6i_flags & RTF_GATEWAY) {
index 02d795fe3d7f2c5e6e922a25dbbe69c8139919b6..a5e466d4e09310ed99c391fea4bb6126d83fbb56 100644 (file)
@@ -242,7 +242,6 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
                        pktopt = xchg(&np->pktoptions, NULL);
                        kfree_skb(pktopt);
 
-                       sk->sk_destruct = inet_sock_destruct;
                        /*
                         * ... and add it to the refcnt debug socks count
                         * in the new family. -acme
index abb2c307fbe8337ce1714e7392072c945ed5af51..a338bbc33cf3cd895fa77e137d6f6389e9a6519c 100644 (file)
@@ -86,7 +86,6 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
 
        while (offset <= packet_len) {
                struct ipv6_opt_hdr *exthdr;
-               unsigned int len;
 
                switch (**nexthdr) {
 
@@ -112,10 +111,9 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
 
                exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) +
                                                 offset);
-               len = ipv6_optlen(exthdr);
-               if (len + offset >= IPV6_MAXPLEN)
+               offset += ipv6_optlen(exthdr);
+               if (offset > IPV6_MAXPLEN)
                        return -EINVAL;
-               offset += len;
                *nexthdr = &exthdr->nexthdr;
        }
 
index 94d6a13d47f0e9ec5ff4cbc50b90d4cd9ca3f38a..2d0e7798c793a4058dc0ef3a5b50734e774500a9 100644 (file)
@@ -440,7 +440,8 @@ static bool rt6_check_expired(const struct rt6_info *rt)
                if (time_after(jiffies, rt->dst.expires))
                        return true;
        } else if (rt->dst.from) {
-               return rt6_check_expired((struct rt6_info *) rt->dst.from);
+               return rt->dst.obsolete != DST_OBSOLETE_FORCE_CHK ||
+                      rt6_check_expired((struct rt6_info *)rt->dst.from);
        }
        return false;
 }
@@ -1289,7 +1290,9 @@ static void rt6_dst_from_metrics_check(struct rt6_info *rt)
 
 static struct dst_entry *rt6_check(struct rt6_info *rt, u32 cookie)
 {
-       if (!rt->rt6i_node || (rt->rt6i_node->fn_sernum != cookie))
+       u32 rt_cookie = 0;
+
+       if (!rt6_get_cookie_safe(rt, &rt_cookie) || rt_cookie != cookie)
                return NULL;
 
        if (rt6_check_expired(rt))
@@ -1357,8 +1360,14 @@ static void ip6_link_failure(struct sk_buff *skb)
                if (rt->rt6i_flags & RTF_CACHE) {
                        if (dst_hold_safe(&rt->dst))
                                ip6_del_rt(rt);
-               } else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT)) {
-                       rt->rt6i_node->fn_sernum = -1;
+               } else {
+                       struct fib6_node *fn;
+
+                       rcu_read_lock();
+                       fn = rcu_dereference(rt->rt6i_node);
+                       if (fn && (rt->rt6i_flags & RTF_DEFAULT))
+                               fn->fn_sernum = -1;
+                       rcu_read_unlock();
                }
        }
 }
@@ -1375,7 +1384,8 @@ static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu)
 static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt)
 {
        return !(rt->rt6i_flags & RTF_CACHE) &&
-               (rt->rt6i_flags & RTF_PCPU || rt->rt6i_node);
+               (rt->rt6i_flags & RTF_PCPU ||
+                rcu_access_pointer(rt->rt6i_node));
 }
 
 static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
index 20039c8501eb9729619f4337a2757a2954163614..d6886228e1d05c4dd192f5fe431fdaca1ffadabd 100644 (file)
@@ -768,6 +768,15 @@ start_lookup:
        return 0;
 }
 
+static void udp6_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
+{
+       if (udp_sk_rx_dst_set(sk, dst)) {
+               const struct rt6_info *rt = (const struct rt6_info *)dst;
+
+               inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
+       }
+}
+
 int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
                   int proto)
 {
@@ -817,7 +826,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
                int ret;
 
                if (unlikely(sk->sk_rx_dst != dst))
-                       udp_sk_rx_dst_set(sk, dst);
+                       udp6_sk_rx_dst_set(sk, dst);
 
                ret = udpv6_queue_rcv_skb(sk, skb);
                sock_put(sk);
index da49191f7ad0d7edfda0c10762ef5f7471a8f6ac..4abf6287d7e1c29314db5c846acd16c3a2a377db 100644 (file)
@@ -1383,6 +1383,10 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
        if (!csk)
                return -EINVAL;
 
+       /* We must prevent loops or risk deadlock ! */
+       if (csk->sk_family == PF_KCM)
+               return -EOPNOTSUPP;
+
        psock = kmem_cache_zalloc(kcm_psockp, GFP_KERNEL);
        if (!psock)
                return -ENOMEM;
index b0c2d4ae781d2114cdb09269d27768f0632a1776..90165a6874bcee338ce4d10ad5a7970a84954d19 100644 (file)
@@ -113,7 +113,6 @@ struct l2tp_net {
        spinlock_t l2tp_session_hlist_lock;
 };
 
-static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel);
 
 static inline struct l2tp_tunnel *l2tp_tunnel(struct sock *sk)
 {
@@ -127,39 +126,6 @@ static inline struct l2tp_net *l2tp_pernet(const struct net *net)
        return net_generic(net, l2tp_net_id);
 }
 
-/* Tunnel reference counts. Incremented per session that is added to
- * the tunnel.
- */
-static inline void l2tp_tunnel_inc_refcount_1(struct l2tp_tunnel *tunnel)
-{
-       refcount_inc(&tunnel->ref_count);
-}
-
-static inline void l2tp_tunnel_dec_refcount_1(struct l2tp_tunnel *tunnel)
-{
-       if (refcount_dec_and_test(&tunnel->ref_count))
-               l2tp_tunnel_free(tunnel);
-}
-#ifdef L2TP_REFCNT_DEBUG
-#define l2tp_tunnel_inc_refcount(_t)                                   \
-do {                                                                   \
-       pr_debug("l2tp_tunnel_inc_refcount: %s:%d %s: cnt=%d\n",        \
-                __func__, __LINE__, (_t)->name,                        \
-                refcount_read(&_t->ref_count));                        \
-       l2tp_tunnel_inc_refcount_1(_t);                                 \
-} while (0)
-#define l2tp_tunnel_dec_refcount(_t)                                   \
-do {                                                                   \
-       pr_debug("l2tp_tunnel_dec_refcount: %s:%d %s: cnt=%d\n",        \
-                __func__, __LINE__, (_t)->name,                        \
-                refcount_read(&_t->ref_count));                        \
-       l2tp_tunnel_dec_refcount_1(_t);                                 \
-} while (0)
-#else
-#define l2tp_tunnel_inc_refcount(t) l2tp_tunnel_inc_refcount_1(t)
-#define l2tp_tunnel_dec_refcount(t) l2tp_tunnel_dec_refcount_1(t)
-#endif
-
 /* Session hash global list for L2TPv3.
  * The session_id SHOULD be random according to RFC3931, but several
  * L2TP implementations use incrementing session_ids.  So we do a real
@@ -229,6 +195,27 @@ l2tp_session_id_hash(struct l2tp_tunnel *tunnel, u32 session_id)
        return &tunnel->session_hlist[hash_32(session_id, L2TP_HASH_BITS)];
 }
 
+/* Lookup a tunnel. A new reference is held on the returned tunnel. */
+struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id)
+{
+       const struct l2tp_net *pn = l2tp_pernet(net);
+       struct l2tp_tunnel *tunnel;
+
+       rcu_read_lock_bh();
+       list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
+               if (tunnel->tunnel_id == tunnel_id) {
+                       l2tp_tunnel_inc_refcount(tunnel);
+                       rcu_read_unlock_bh();
+
+                       return tunnel;
+               }
+       }
+       rcu_read_unlock_bh();
+
+       return NULL;
+}
+EXPORT_SYMBOL_GPL(l2tp_tunnel_get);
+
 /* Lookup a session. A new reference is held on the returned session.
  * Optionally calls session->ref() too if do_ref is true.
  */
@@ -1348,17 +1335,6 @@ static void l2tp_udp_encap_destroy(struct sock *sk)
        }
 }
 
-/* Really kill the tunnel.
- * Come here only when all sessions have been cleared from the tunnel.
- */
-static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
-{
-       BUG_ON(refcount_read(&tunnel->ref_count) != 0);
-       BUG_ON(tunnel->sock != NULL);
-       l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: free...\n", tunnel->name);
-       kfree_rcu(tunnel, rcu);
-}
-
 /* Workqueue tunnel deletion function */
 static void l2tp_tunnel_del_work(struct work_struct *work)
 {
@@ -1844,6 +1820,8 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn
 
                l2tp_session_set_header_len(session, tunnel->version);
 
+               refcount_set(&session->ref_count, 1);
+
                err = l2tp_session_add_to_tunnel(tunnel, session);
                if (err) {
                        kfree(session);
@@ -1851,10 +1829,6 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn
                        return ERR_PTR(err);
                }
 
-               /* Bump the reference count. The session context is deleted
-                * only when this drops to zero.
-                */
-               refcount_set(&session->ref_count, 1);
                l2tp_tunnel_inc_refcount(tunnel);
 
                /* Ensure tunnel socket isn't deleted */
index cdb6e3327f744040f8510ad0882c2f620509dfb9..9101297f27adb218a24bd7edd8ff1842849ab438 100644 (file)
@@ -231,6 +231,8 @@ out:
        return tunnel;
 }
 
+struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id);
+
 struct l2tp_session *l2tp_session_get(const struct net *net,
                                      struct l2tp_tunnel *tunnel,
                                      u32 session_id, bool do_ref);
@@ -269,6 +271,17 @@ int l2tp_nl_register_ops(enum l2tp_pwtype pw_type,
 void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type);
 int l2tp_ioctl(struct sock *sk, int cmd, unsigned long arg);
 
+static inline void l2tp_tunnel_inc_refcount(struct l2tp_tunnel *tunnel)
+{
+       refcount_inc(&tunnel->ref_count);
+}
+
+static inline void l2tp_tunnel_dec_refcount(struct l2tp_tunnel *tunnel)
+{
+       if (refcount_dec_and_test(&tunnel->ref_count))
+               kfree_rcu(tunnel, rcu);
+}
+
 /* Session reference counts. Incremented when code obtains a reference
  * to a session.
  */
index 12cfcd0ca807396d18e061e9bcf4c29e760b2563..57427d430f107897322441d7578ce9e97b8017bf 100644 (file)
@@ -65,10 +65,12 @@ static struct l2tp_session *l2tp_nl_session_get(struct genl_info *info,
                   (info->attrs[L2TP_ATTR_CONN_ID])) {
                tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
                session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]);
-               tunnel = l2tp_tunnel_find(net, tunnel_id);
-               if (tunnel)
+               tunnel = l2tp_tunnel_get(net, tunnel_id);
+               if (tunnel) {
                        session = l2tp_session_get(net, tunnel, session_id,
                                                   do_ref);
+                       l2tp_tunnel_dec_refcount(tunnel);
+               }
        }
 
        return session;
@@ -271,8 +273,8 @@ static int l2tp_nl_cmd_tunnel_delete(struct sk_buff *skb, struct genl_info *info
        }
        tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
 
-       tunnel = l2tp_tunnel_find(net, tunnel_id);
-       if (tunnel == NULL) {
+       tunnel = l2tp_tunnel_get(net, tunnel_id);
+       if (!tunnel) {
                ret = -ENODEV;
                goto out;
        }
@@ -282,6 +284,8 @@ static int l2tp_nl_cmd_tunnel_delete(struct sk_buff *skb, struct genl_info *info
 
        (void) l2tp_tunnel_delete(tunnel);
 
+       l2tp_tunnel_dec_refcount(tunnel);
+
 out:
        return ret;
 }
@@ -299,8 +303,8 @@ static int l2tp_nl_cmd_tunnel_modify(struct sk_buff *skb, struct genl_info *info
        }
        tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
 
-       tunnel = l2tp_tunnel_find(net, tunnel_id);
-       if (tunnel == NULL) {
+       tunnel = l2tp_tunnel_get(net, tunnel_id);
+       if (!tunnel) {
                ret = -ENODEV;
                goto out;
        }
@@ -311,6 +315,8 @@ static int l2tp_nl_cmd_tunnel_modify(struct sk_buff *skb, struct genl_info *info
        ret = l2tp_tunnel_notify(&l2tp_nl_family, info,
                                 tunnel, L2TP_CMD_TUNNEL_MODIFY);
 
+       l2tp_tunnel_dec_refcount(tunnel);
+
 out:
        return ret;
 }
@@ -438,34 +444,37 @@ static int l2tp_nl_cmd_tunnel_get(struct sk_buff *skb, struct genl_info *info)
 
        if (!info->attrs[L2TP_ATTR_CONN_ID]) {
                ret = -EINVAL;
-               goto out;
+               goto err;
        }
 
        tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
 
-       tunnel = l2tp_tunnel_find(net, tunnel_id);
-       if (tunnel == NULL) {
-               ret = -ENODEV;
-               goto out;
-       }
-
        msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
        if (!msg) {
                ret = -ENOMEM;
-               goto out;
+               goto err;
+       }
+
+       tunnel = l2tp_tunnel_get(net, tunnel_id);
+       if (!tunnel) {
+               ret = -ENODEV;
+               goto err_nlmsg;
        }
 
        ret = l2tp_nl_tunnel_send(msg, info->snd_portid, info->snd_seq,
                                  NLM_F_ACK, tunnel, L2TP_CMD_TUNNEL_GET);
        if (ret < 0)
-               goto err_out;
+               goto err_nlmsg_tunnel;
+
+       l2tp_tunnel_dec_refcount(tunnel);
 
        return genlmsg_unicast(net, msg, info->snd_portid);
 
-err_out:
+err_nlmsg_tunnel:
+       l2tp_tunnel_dec_refcount(tunnel);
+err_nlmsg:
        nlmsg_free(msg);
-
-out:
+err:
        return ret;
 }
 
@@ -509,8 +518,9 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
                ret = -EINVAL;
                goto out;
        }
+
        tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
-       tunnel = l2tp_tunnel_find(net, tunnel_id);
+       tunnel = l2tp_tunnel_get(net, tunnel_id);
        if (!tunnel) {
                ret = -ENODEV;
                goto out;
@@ -518,24 +528,24 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
 
        if (!info->attrs[L2TP_ATTR_SESSION_ID]) {
                ret = -EINVAL;
-               goto out;
+               goto out_tunnel;
        }
        session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]);
 
        if (!info->attrs[L2TP_ATTR_PEER_SESSION_ID]) {
                ret = -EINVAL;
-               goto out;
+               goto out_tunnel;
        }
        peer_session_id = nla_get_u32(info->attrs[L2TP_ATTR_PEER_SESSION_ID]);
 
        if (!info->attrs[L2TP_ATTR_PW_TYPE]) {
                ret = -EINVAL;
-               goto out;
+               goto out_tunnel;
        }
        cfg.pw_type = nla_get_u16(info->attrs[L2TP_ATTR_PW_TYPE]);
        if (cfg.pw_type >= __L2TP_PWTYPE_MAX) {
                ret = -EINVAL;
-               goto out;
+               goto out_tunnel;
        }
 
        if (tunnel->version > 2) {
@@ -557,7 +567,7 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
                        u16 len = nla_len(info->attrs[L2TP_ATTR_COOKIE]);
                        if (len > 8) {
                                ret = -EINVAL;
-                               goto out;
+                               goto out_tunnel;
                        }
                        cfg.cookie_len = len;
                        memcpy(&cfg.cookie[0], nla_data(info->attrs[L2TP_ATTR_COOKIE]), len);
@@ -566,7 +576,7 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
                        u16 len = nla_len(info->attrs[L2TP_ATTR_PEER_COOKIE]);
                        if (len > 8) {
                                ret = -EINVAL;
-                               goto out;
+                               goto out_tunnel;
                        }
                        cfg.peer_cookie_len = len;
                        memcpy(&cfg.peer_cookie[0], nla_data(info->attrs[L2TP_ATTR_PEER_COOKIE]), len);
@@ -609,7 +619,7 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
        if ((l2tp_nl_cmd_ops[cfg.pw_type] == NULL) ||
            (l2tp_nl_cmd_ops[cfg.pw_type]->session_create == NULL)) {
                ret = -EPROTONOSUPPORT;
-               goto out;
+               goto out_tunnel;
        }
 
        /* Check that pseudowire-specific params are present */
@@ -619,7 +629,7 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
        case L2TP_PWTYPE_ETH_VLAN:
                if (!info->attrs[L2TP_ATTR_VLAN_ID]) {
                        ret = -EINVAL;
-                       goto out;
+                       goto out_tunnel;
                }
                break;
        case L2TP_PWTYPE_ETH:
@@ -647,6 +657,8 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
                }
        }
 
+out_tunnel:
+       l2tp_tunnel_dec_refcount(tunnel);
 out:
        return ret;
 }
index eb541786ccb7c79b3498477dfefc2927368f1a6f..b1d3740ae36ae61c1ad53f35fd818423bf2e4b27 100644 (file)
@@ -441,7 +441,7 @@ nf_nat_setup_info(struct nf_conn *ct,
                else
                        ct->status |= IPS_DST_NAT;
 
-               if (nfct_help(ct))
+               if (nfct_help(ct) && !nfct_seqadj(ct))
                        if (!nfct_seqadj_ext_add(ct))
                                return NF_DROP;
        }
index f5a7cb68694e76db73dec897ad51305f29bb8981..b89f4f65b2a0fbbcd725001e58ae373ab2c3156b 100644 (file)
@@ -305,7 +305,7 @@ static int nft_target_validate(const struct nft_ctx *ctx,
                const struct nf_hook_ops *ops = &basechain->ops[0];
 
                hook_mask = 1 << ops->hooknum;
-               if (!(hook_mask & target->hooks))
+               if (target->hooks && !(hook_mask & target->hooks))
                        return -EINVAL;
 
                ret = nft_compat_chain_validate_dependency(target->table,
@@ -484,7 +484,7 @@ static int nft_match_validate(const struct nft_ctx *ctx,
                const struct nf_hook_ops *ops = &basechain->ops[0];
 
                hook_mask = 1 << ops->hooknum;
-               if (!(hook_mask & match->hooks))
+               if (match->hooks && !(hook_mask & match->hooks))
                        return -EINVAL;
 
                ret = nft_compat_chain_validate_dependency(match->table,
index 18dd57a526513bd726944fa7ad7a9e41fcfb0251..14538b1d4d110e9b00445f477fb55377a5189f94 100644 (file)
@@ -65,19 +65,23 @@ static int nft_limit_init(struct nft_limit *limit,
        limit->nsecs = unit * NSEC_PER_SEC;
        if (limit->rate == 0 || limit->nsecs < unit)
                return -EOVERFLOW;
-       limit->tokens = limit->tokens_max = limit->nsecs;
-
-       if (tb[NFTA_LIMIT_BURST]) {
-               u64 rate;
 
+       if (tb[NFTA_LIMIT_BURST])
                limit->burst = ntohl(nla_get_be32(tb[NFTA_LIMIT_BURST]));
+       else
+               limit->burst = 0;
+
+       if (limit->rate + limit->burst < limit->rate)
+               return -EOVERFLOW;
 
-               rate = limit->rate + limit->burst;
-               if (rate < limit->rate)
-                       return -EOVERFLOW;
+       /* The token bucket size limits the number of tokens can be
+        * accumulated. tokens_max specifies the bucket size.
+        * tokens_max = unit * (rate + burst) / rate.
+        */
+       limit->tokens = div_u64(limit->nsecs * (limit->rate + limit->burst),
+                               limit->rate);
+       limit->tokens_max = limit->tokens;
 
-               limit->rate = rate;
-       }
        if (tb[NFTA_LIMIT_FLAGS]) {
                u32 flags = ntohl(nla_get_be32(tb[NFTA_LIMIT_FLAGS]));
 
@@ -95,9 +99,8 @@ static int nft_limit_dump(struct sk_buff *skb, const struct nft_limit *limit,
 {
        u32 flags = limit->invert ? NFT_LIMIT_F_INV : 0;
        u64 secs = div_u64(limit->nsecs, NSEC_PER_SEC);
-       u64 rate = limit->rate - limit->burst;
 
-       if (nla_put_be64(skb, NFTA_LIMIT_RATE, cpu_to_be64(rate),
+       if (nla_put_be64(skb, NFTA_LIMIT_RATE, cpu_to_be64(limit->rate),
                         NFTA_LIMIT_PAD) ||
            nla_put_be64(skb, NFTA_LIMIT_UNIT, cpu_to_be64(secs),
                         NFTA_LIMIT_PAD) ||
index 008a45ca31124ed5fa54d666fce61c7982b12a2f..1c61af9af67dae10ea9675a45b191d7302c69151 100644 (file)
@@ -2191,6 +2191,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
        struct timespec ts;
        __u32 ts_status;
        bool is_drop_n_account = false;
+       bool do_vnet = false;
 
        /* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT.
         * We may add members to them until current aligned size without forcing
@@ -2241,8 +2242,10 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
                netoff = TPACKET_ALIGN(po->tp_hdrlen +
                                       (maclen < 16 ? 16 : maclen)) +
                                       po->tp_reserve;
-               if (po->has_vnet_hdr)
+               if (po->has_vnet_hdr) {
                        netoff += sizeof(struct virtio_net_hdr);
+                       do_vnet = true;
+               }
                macoff = netoff - maclen;
        }
        if (po->tp_version <= TPACKET_V2) {
@@ -2259,8 +2262,10 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
                                        skb_set_owner_r(copy_skb, sk);
                        }
                        snaplen = po->rx_ring.frame_size - macoff;
-                       if ((int)snaplen < 0)
+                       if ((int)snaplen < 0) {
                                snaplen = 0;
+                               do_vnet = false;
+                       }
                }
        } else if (unlikely(macoff + snaplen >
                            GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) {
@@ -2273,6 +2278,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
                if (unlikely((int)snaplen < 0)) {
                        snaplen = 0;
                        macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len;
+                       do_vnet = false;
                }
        }
        spin_lock(&sk->sk_receive_queue.lock);
@@ -2298,7 +2304,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
        }
        spin_unlock(&sk->sk_receive_queue.lock);
 
-       if (po->has_vnet_hdr) {
+       if (do_vnet) {
                if (virtio_net_hdr_from_skb(skb, h.raw + macoff -
                                            sizeof(struct virtio_net_hdr),
                                            vio_le(), true)) {
index 9fd44c22134783edf3db4f62ff1e8184c455cbd7..6c5ea84d2682ab81fb9755361fa77326fa9d9935 100644 (file)
@@ -215,9 +215,15 @@ static void tcf_chain_flush(struct tcf_chain *chain)
 
 static void tcf_chain_destroy(struct tcf_chain *chain)
 {
-       list_del(&chain->list);
-       tcf_chain_flush(chain);
-       kfree(chain);
+       /* May be already removed from the list by the previous call. */
+       if (!list_empty(&chain->list))
+               list_del_init(&chain->list);
+
+       /* There might still be a reference held when we got here from
+        * tcf_block_put. Wait for the user to drop reference before free.
+        */
+       if (!chain->refcnt)
+               kfree(chain);
 }
 
 struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
@@ -288,8 +294,10 @@ void tcf_block_put(struct tcf_block *block)
        if (!block)
                return;
 
-       list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
+       list_for_each_entry_safe(chain, tmp, &block->chain_list, list) {
+               tcf_chain_flush(chain);
                tcf_chain_destroy(chain);
+       }
        kfree(block);
 }
 EXPORT_SYMBOL(tcf_block_put);
index a3fa144b864871088209386fd573bded1886432f..4fb5a3222d0d324167f079f755be14eb028b4a50 100644 (file)
@@ -836,7 +836,7 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
 
                        old = dev_graft_qdisc(dev_queue, new);
                        if (new && i > 0)
-                               refcount_inc(&new->refcnt);
+                               qdisc_refcount_inc(new);
 
                        if (!ingress)
                                qdisc_destroy(old);
@@ -847,7 +847,7 @@ skip:
                        notify_and_destroy(net, skb, n, classid,
                                           dev->qdisc, new);
                        if (new && !new->ops->attach)
-                               refcount_inc(&new->refcnt);
+                               qdisc_refcount_inc(new);
                        dev->qdisc = new ? : &noop_qdisc;
 
                        if (new && new->ops->attach)
@@ -1256,7 +1256,7 @@ replay:
                                if (q == p ||
                                    (p && check_loop(q, p, 0)))
                                        return -ELOOP;
-                               refcount_inc(&q->refcnt);
+                               qdisc_refcount_inc(q);
                                goto graft;
                        } else {
                                if (!q)
index 780db43300b16284192b24006b0ae8677adbe505..156c8a33c6777a644c77b1adec9057b482bac109 100644 (file)
@@ -1139,6 +1139,13 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
        struct tc_ratespec *r;
        int err;
 
+       qdisc_watchdog_init(&q->watchdog, sch);
+       hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
+       q->delay_timer.function = cbq_undelay;
+
+       if (!opt)
+               return -EINVAL;
+
        err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy, NULL);
        if (err < 0)
                return err;
@@ -1177,9 +1184,6 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
        q->link.avpkt = q->link.allot/2;
        q->link.minidle = -0x7FFFFFFF;
 
-       qdisc_watchdog_init(&q->watchdog, sch);
-       hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
-       q->delay_timer.function = cbq_undelay;
        q->toplevel = TC_CBQ_MAXLEVEL;
        q->now = psched_get_time();
 
index 337f2d6d81e42e278b63443d904955e2c6692f03..2c0c05f2cc34a9de51390c45f29dd8db810075c7 100644 (file)
@@ -491,10 +491,8 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt)
                if (!q->flows)
                        return -ENOMEM;
                q->backlogs = kvzalloc(q->flows_cnt * sizeof(u32), GFP_KERNEL);
-               if (!q->backlogs) {
-                       kvfree(q->flows);
+               if (!q->backlogs)
                        return -ENOMEM;
-               }
                for (i = 0; i < q->flows_cnt; i++) {
                        struct fq_codel_flow *flow = q->flows + i;
 
index 57ba406f1437323a4ba172d52f14a8b687b86708..4ba6da5fb2546c35ad48fe1f3632df8ca9957b34 100644 (file)
@@ -785,7 +785,7 @@ static void attach_default_qdiscs(struct net_device *dev)
            dev->priv_flags & IFF_NO_QUEUE) {
                netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
                dev->qdisc = txq->qdisc_sleeping;
-               refcount_inc(&dev->qdisc->refcnt);
+               qdisc_refcount_inc(dev->qdisc);
        } else {
                qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT);
                if (qdisc) {
index fd15200f86273add7d6c8c4a18aaef912aba7411..11ab8dace901534b23b8f376ac704f995dc6b66b 100644 (file)
@@ -1418,6 +1418,8 @@ hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
        struct tc_hfsc_qopt *qopt;
        int err;
 
+       qdisc_watchdog_init(&q->watchdog, sch);
+
        if (opt == NULL || nla_len(opt) < sizeof(*qopt))
                return -EINVAL;
        qopt = nla_data(opt);
@@ -1430,7 +1432,7 @@ hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
 
        err = tcf_block_get(&q->root.block, &q->root.filter_list);
        if (err)
-               goto err_tcf;
+               return err;
 
        q->root.cl_common.classid = sch->handle;
        q->root.refcnt  = 1;
@@ -1448,13 +1450,7 @@ hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
        qdisc_class_hash_insert(&q->clhash, &q->root.cl_common);
        qdisc_class_hash_grow(sch, &q->clhash);
 
-       qdisc_watchdog_init(&q->watchdog, sch);
-
        return 0;
-
-err_tcf:
-       qdisc_class_hash_destroy(&q->clhash);
-       return err;
 }
 
 static int
index 51d3ba682af9ba0f69a3f3c3036519bf527cdee0..73a53c08091baafde3ef776ce9ea99cac9edfd9d 100644 (file)
@@ -477,6 +477,9 @@ static void hhf_destroy(struct Qdisc *sch)
                kvfree(q->hhf_valid_bits[i]);
        }
 
+       if (!q->hh_flows)
+               return;
+
        for (i = 0; i < HH_FLOWS_CNT; i++) {
                struct hh_flow_state *flow, *next;
                struct list_head *head = &q->hh_flows[i];
index 5d65ec5207e91202d501a83c983793f2e923f075..5bf5177b2bd3f6aa1b0ba9e4e59a946e1c739e0a 100644 (file)
@@ -1017,6 +1017,9 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt)
        int err;
        int i;
 
+       qdisc_watchdog_init(&q->watchdog, sch);
+       INIT_WORK(&q->work, htb_work_func);
+
        if (!opt)
                return -EINVAL;
 
@@ -1041,8 +1044,6 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt)
        for (i = 0; i < TC_HTB_NUMPRIO; i++)
                INIT_LIST_HEAD(q->drops + i);
 
-       qdisc_watchdog_init(&q->watchdog, sch);
-       INIT_WORK(&q->work, htb_work_func);
        qdisc_skb_head_init(&q->direct_queue);
 
        if (tb[TCA_HTB_DIRECT_QLEN])
index f143b7bbaa0d5b00d01d36e9f6eefb54bf677e8f..9c454f5d6c38820512485cceecbc06c9fa86f634 100644 (file)
@@ -257,12 +257,7 @@ static int multiq_init(struct Qdisc *sch, struct nlattr *opt)
        for (i = 0; i < q->max_bands; i++)
                q->queues[i] = &noop_qdisc;
 
-       err = multiq_tune(sch, opt);
-
-       if (err)
-               kfree(q->queues);
-
-       return err;
+       return multiq_tune(sch, opt);
 }
 
 static int multiq_dump(struct Qdisc *sch, struct sk_buff *skb)
index 1b3dd6190e9386c6f8104153eb9fdc0255e03ac5..14d1724e0dc436f49da643be8606be273ce22ebd 100644 (file)
@@ -933,11 +933,11 @@ static int netem_init(struct Qdisc *sch, struct nlattr *opt)
        struct netem_sched_data *q = qdisc_priv(sch);
        int ret;
 
+       qdisc_watchdog_init(&q->watchdog, sch);
+
        if (!opt)
                return -EINVAL;
 
-       qdisc_watchdog_init(&q->watchdog, sch);
-
        q->loss_model = CLG_RANDOM;
        ret = netem_change(sch, opt);
        if (ret)
index 82469ef9655eb65431477215bb2df0fb2737f905..fc69fc5956e9d4d2dbfe645e4c25e83328517371 100644 (file)
@@ -716,13 +716,13 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
        int i;
        int err;
 
+       setup_deferrable_timer(&q->perturb_timer, sfq_perturbation,
+                              (unsigned long)sch);
+
        err = tcf_block_get(&q->block, &q->filter_list);
        if (err)
                return err;
 
-       setup_deferrable_timer(&q->perturb_timer, sfq_perturbation,
-                              (unsigned long)sch);
-
        for (i = 0; i < SFQ_MAX_DEPTH + 1; i++) {
                q->dep[i].next = i + SFQ_MAX_FLOWS;
                q->dep[i].prev = i + SFQ_MAX_FLOWS;
index b2e4b6ad241a8e538c654ef4e8417ab756740a45..493270f0d5b055fa07d4dee2b35ec9d40bddc3d0 100644 (file)
@@ -425,12 +425,13 @@ static int tbf_init(struct Qdisc *sch, struct nlattr *opt)
 {
        struct tbf_sched_data *q = qdisc_priv(sch);
 
+       qdisc_watchdog_init(&q->watchdog, sch);
+       q->qdisc = &noop_qdisc;
+
        if (opt == NULL)
                return -EINVAL;
 
        q->t_c = ktime_get_ns();
-       qdisc_watchdog_init(&q->watchdog, sch);
-       q->qdisc = &noop_qdisc;
 
        return tbf_change(sch, opt);
 }
index 9a647214a91ebc583660db320307e0df1e13e5be..e99518e79b523e973d01e3f6bcfa2c9576c3fdd9 100644 (file)
@@ -70,7 +70,8 @@ static int inet_diag_msg_sctpladdrs_fill(struct sk_buff *skb,
 
        info = nla_data(attr);
        list_for_each_entry_rcu(laddr, address_list, list) {
-               memcpy(info, &laddr->a, addrlen);
+               memcpy(info, &laddr->a, sizeof(laddr->a));
+               memset(info + sizeof(laddr->a), 0, addrlen - sizeof(laddr->a));
                info += addrlen;
        }
 
@@ -93,7 +94,9 @@ static int inet_diag_msg_sctpaddrs_fill(struct sk_buff *skb,
        info = nla_data(attr);
        list_for_each_entry(from, &asoc->peer.transport_addr_list,
                            transports) {
-               memcpy(info, &from->ipaddr, addrlen);
+               memcpy(info, &from->ipaddr, sizeof(from->ipaddr));
+               memset(info + sizeof(from->ipaddr), 0,
+                      addrlen - sizeof(from->ipaddr));
                info += addrlen;
        }
 
index 1db478e345203f75733044d843a763cc3a3966e1..8d760863bc411023835b20383620f38d14ee2df1 100644 (file)
@@ -4538,8 +4538,7 @@ int sctp_get_sctp_info(struct sock *sk, struct sctp_association *asoc,
        info->sctpi_ictrlchunks = asoc->stats.ictrlchunks;
 
        prim = asoc->peer.primary_path;
-       memcpy(&info->sctpi_p_address, &prim->ipaddr,
-              sizeof(struct sockaddr_storage));
+       memcpy(&info->sctpi_p_address, &prim->ipaddr, sizeof(prim->ipaddr));
        info->sctpi_p_state = prim->state;
        info->sctpi_p_cwnd = prim->cwnd;
        info->sctpi_p_srtt = prim->srtt;
index 767e0537dde5a8d0cfac97f6b11ee1d568ef87ed..89cd061c4468247cf761541ff1a2ca27f0836d6f 100644 (file)
@@ -65,6 +65,8 @@ static struct tipc_bearer *bearer_get(struct net *net, int bearer_id)
 }
 
 static void bearer_disable(struct net *net, struct tipc_bearer *b);
+static int tipc_l2_rcv_msg(struct sk_buff *skb, struct net_device *dev,
+                          struct packet_type *pt, struct net_device *orig_dev);
 
 /**
  * tipc_media_find - locates specified media object by name
@@ -428,6 +430,10 @@ int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b,
 
        /* Associate TIPC bearer with L2 bearer */
        rcu_assign_pointer(b->media_ptr, dev);
+       b->pt.dev = dev;
+       b->pt.type = htons(ETH_P_TIPC);
+       b->pt.func = tipc_l2_rcv_msg;
+       dev_add_pack(&b->pt);
        memset(&b->bcast_addr, 0, sizeof(b->bcast_addr));
        memcpy(b->bcast_addr.value, dev->broadcast, b->media->hwaddr_len);
        b->bcast_addr.media_id = b->media->type_id;
@@ -447,6 +453,7 @@ void tipc_disable_l2_media(struct tipc_bearer *b)
        struct net_device *dev;
 
        dev = (struct net_device *)rtnl_dereference(b->media_ptr);
+       dev_remove_pack(&b->pt);
        RCU_INIT_POINTER(dev->tipc_ptr, NULL);
        synchronize_net();
        dev_put(dev);
@@ -594,11 +601,12 @@ static int tipc_l2_rcv_msg(struct sk_buff *skb, struct net_device *dev,
        struct tipc_bearer *b;
 
        rcu_read_lock();
-       b = rcu_dereference_rtnl(dev->tipc_ptr);
+       b = rcu_dereference_rtnl(dev->tipc_ptr) ?:
+               rcu_dereference_rtnl(orig_dev->tipc_ptr);
        if (likely(b && test_bit(0, &b->up) &&
                   (skb->pkt_type <= PACKET_MULTICAST))) {
                skb->next = NULL;
-               tipc_rcv(dev_net(dev), skb, b);
+               tipc_rcv(dev_net(b->pt.dev), skb, b);
                rcu_read_unlock();
                return NET_RX_SUCCESS;
        }
@@ -659,11 +667,6 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt,
        return NOTIFY_OK;
 }
 
-static struct packet_type tipc_packet_type __read_mostly = {
-       .type = htons(ETH_P_TIPC),
-       .func = tipc_l2_rcv_msg,
-};
-
 static struct notifier_block notifier = {
        .notifier_call  = tipc_l2_device_event,
        .priority       = 0,
@@ -671,19 +674,12 @@ static struct notifier_block notifier = {
 
 int tipc_bearer_setup(void)
 {
-       int err;
-
-       err = register_netdevice_notifier(&notifier);
-       if (err)
-               return err;
-       dev_add_pack(&tipc_packet_type);
-       return 0;
+       return register_netdevice_notifier(&notifier);
 }
 
 void tipc_bearer_cleanup(void)
 {
        unregister_netdevice_notifier(&notifier);
-       dev_remove_pack(&tipc_packet_type);
 }
 
 void tipc_bearer_stop(struct net *net)
index 635c9086e19af86b81b4dc572a7c2ee0374b02fc..e07a55a80c18ba0f3c4f1187b7544faeca395a4e 100644 (file)
@@ -131,6 +131,7 @@ struct tipc_media {
  * @name: bearer name (format = media:interface)
  * @media: ptr to media structure associated with bearer
  * @bcast_addr: media address used in broadcasting
+ * @pt: packet type for bearer
  * @rcu: rcu struct for tipc_bearer
  * @priority: default link priority for bearer
  * @window: default window size for bearer
@@ -151,6 +152,7 @@ struct tipc_bearer {
        char name[TIPC_MAX_BEARER_NAME];
        struct tipc_media *media;
        struct tipc_media_addr bcast_addr;
+       struct packet_type pt;
        struct rcu_head rcu;
        u32 priority;
        u32 window;
index dcd90e6fa7c39c962eb6f6684cc31dd2b2443e4a..6ef379f004ac6da5ef908911368149e10186b5c4 100644 (file)
@@ -479,13 +479,14 @@ bool tipc_msg_make_bundle(struct sk_buff **skb,  struct tipc_msg *msg,
 bool tipc_msg_reverse(u32 own_node,  struct sk_buff **skb, int err)
 {
        struct sk_buff *_skb = *skb;
-       struct tipc_msg *hdr = buf_msg(_skb);
+       struct tipc_msg *hdr;
        struct tipc_msg ohdr;
-       int dlen = min_t(uint, msg_data_sz(hdr), MAX_FORWARD_SIZE);
+       int dlen;
 
        if (skb_linearize(_skb))
                goto exit;
        hdr = buf_msg(_skb);
+       dlen = min_t(uint, msg_data_sz(hdr), MAX_FORWARD_SIZE);
        if (msg_dest_droppable(hdr))
                goto exit;
        if (msg_errcode(hdr))
@@ -511,6 +512,8 @@ bool tipc_msg_reverse(u32 own_node,  struct sk_buff **skb, int err)
            pskb_expand_head(_skb, BUF_HEADROOM, BUF_TAILROOM, GFP_ATOMIC))
                goto exit;
 
+       /* reassign after skb header modifications */
+       hdr = buf_msg(_skb);
        /* Now reverse the concerned fields */
        msg_set_errcode(hdr, err);
        msg_set_non_seq(hdr, 0);
index 9b4dcb6a16b50eefc04167dfdd1e509546b71bf6..7dd22330a6b4bf9113e189c613a863fce13425a2 100644 (file)
@@ -1126,8 +1126,8 @@ int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 addr,
                strncpy(linkname, tipc_link_name(link), len);
                err = 0;
        }
-exit:
        tipc_node_read_unlock(node);
+exit:
        tipc_node_put(node);
        return err;
 }
@@ -1557,6 +1557,8 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
 
        /* Check/update node state before receiving */
        if (unlikely(skb)) {
+               if (unlikely(skb_linearize(skb)))
+                       goto discard;
                tipc_node_write_lock(n);
                if (tipc_node_check_state(n, skb, bearer_id, &xmitq)) {
                        if (le->link) {
index 101e3597338f7c1e182c9090af0d14caf5be1b8b..d50edd6e00196af04ee7a4011d6339b100cb3537 100644 (file)
@@ -2255,8 +2255,8 @@ void tipc_sk_reinit(struct net *net)
 
        do {
                tsk = ERR_PTR(rhashtable_walk_start(&iter));
-               if (tsk)
-                       continue;
+               if (IS_ERR(tsk))
+                       goto walk_stop;
 
                while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) {
                        spin_lock_bh(&tsk->sk.sk_lock.slock);
@@ -2265,7 +2265,7 @@ void tipc_sk_reinit(struct net *net)
                        msg_set_orignode(msg, tn->own_addr);
                        spin_unlock_bh(&tsk->sk.sk_lock.slock);
                }
-
+walk_stop:
                rhashtable_walk_stop(&iter);
        } while (tsk == ERR_PTR(-EAGAIN));
 }
index 0bf91cd3733cb37ecc8ba4ccf7ae5a26cb6e966d..be3d9e3183dcb1ce929584a074755c1ce0969d0f 100644 (file)
@@ -52,7 +52,6 @@ struct tipc_subscriber {
        struct list_head subscrp_list;
 };
 
-static void tipc_subscrp_delete(struct tipc_subscription *sub);
 static void tipc_subscrb_put(struct tipc_subscriber *subscriber);
 
 /**
@@ -197,15 +196,19 @@ static void tipc_subscrb_subscrp_delete(struct tipc_subscriber *subscriber,
 {
        struct list_head *subscription_list = &subscriber->subscrp_list;
        struct tipc_subscription *sub, *temp;
+       u32 timeout;
 
        spin_lock_bh(&subscriber->lock);
        list_for_each_entry_safe(sub, temp, subscription_list,  subscrp_list) {
                if (s && memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr)))
                        continue;
 
-               tipc_nametbl_unsubscribe(sub);
-               list_del(&sub->subscrp_list);
-               tipc_subscrp_delete(sub);
+               timeout = htohl(sub->evt.s.timeout, sub->swap);
+               if (timeout == TIPC_WAIT_FOREVER || del_timer(&sub->timer)) {
+                       tipc_nametbl_unsubscribe(sub);
+                       list_del(&sub->subscrp_list);
+                       tipc_subscrp_put(sub);
+               }
 
                if (s)
                        break;
@@ -236,18 +239,12 @@ static void tipc_subscrb_delete(struct tipc_subscriber *subscriber)
        tipc_subscrb_put(subscriber);
 }
 
-static void tipc_subscrp_delete(struct tipc_subscription *sub)
-{
-       u32 timeout = htohl(sub->evt.s.timeout, sub->swap);
-
-       if (timeout == TIPC_WAIT_FOREVER || del_timer(&sub->timer))
-               tipc_subscrp_put(sub);
-}
-
 static void tipc_subscrp_cancel(struct tipc_subscr *s,
                                struct tipc_subscriber *subscriber)
 {
+       tipc_subscrb_get(subscriber);
        tipc_subscrb_subscrp_delete(subscriber, s);
+       tipc_subscrb_put(subscriber);
 }
 
 static struct tipc_subscription *tipc_subscrp_create(struct net *net,
index ff61d85579292dee8a7f1e9926e11734556f4fd9..69b16ee327d9958769f09c66d54ace50889d6665 100644 (file)
@@ -2226,7 +2226,6 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
                                goto no_transform;
                        }
 
-                       dst_hold(&xdst->u.dst);
                        route = xdst->route;
                }
        }
@@ -3308,9 +3307,15 @@ int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
        struct xfrm_state *x_new[XFRM_MAX_DEPTH];
        struct xfrm_migrate *mp;
 
+       /* Stage 0 - sanity checks */
        if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
                goto out;
 
+       if (dir >= XFRM_POLICY_MAX) {
+               err = -EINVAL;
+               goto out;
+       }
+
        /* Stage 1 - find policy */
        if ((pol = xfrm_migrate_policy_find(sel, dir, type, net)) == NULL) {
                err = -ENOENT;
index 6c0956d10db601add764616df62dbe19999c29b4..a792effdb0b5d51fb88835349a44a756d3a9e5e7 100644 (file)
@@ -1620,6 +1620,7 @@ int
 xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
               unsigned short family, struct net *net)
 {
+       int i;
        int err = 0;
        struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
        if (!afinfo)
@@ -1628,6 +1629,9 @@ xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
        spin_lock_bh(&net->xfrm.xfrm_state_lock); /*FIXME*/
        if (afinfo->tmpl_sort)
                err = afinfo->tmpl_sort(dst, src, n);
+       else
+               for (i = 0; i < n; i++)
+                       dst[i] = src[i];
        spin_unlock_bh(&net->xfrm.xfrm_state_lock);
        rcu_read_unlock();
        return err;
@@ -1638,6 +1642,7 @@ int
 xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
                unsigned short family)
 {
+       int i;
        int err = 0;
        struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
        struct net *net = xs_net(*src);
@@ -1648,6 +1653,9 @@ xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
        spin_lock_bh(&net->xfrm.xfrm_state_lock);
        if (afinfo->state_sort)
                err = afinfo->state_sort(dst, src, n);
+       else
+               for (i = 0; i < n; i++)
+                       dst[i] = src[i];
        spin_unlock_bh(&net->xfrm.xfrm_state_lock);
        rcu_read_unlock();
        return err;
index 2be4c6af008a7917ae5cc1e3306c5466cae387e4..9391ced0525986ce72938a9ed59c27ea124f7ba5 100644 (file)
@@ -796,7 +796,7 @@ static int copy_user_offload(struct xfrm_state_offload *xso, struct sk_buff *skb
                return -EMSGSIZE;
 
        xuo = nla_data(attr);
-
+       memset(xuo, 0, sizeof(*xuo));
        xuo->ifindex = xso->dev->ifindex;
        xuo->flags = xso->flags;
 
@@ -1869,6 +1869,7 @@ static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, const struct
                return -EMSGSIZE;
 
        id = nlmsg_data(nlh);
+       memset(&id->sa_id, 0, sizeof(id->sa_id));
        memcpy(&id->sa_id.daddr, &x->id.daddr, sizeof(x->id.daddr));
        id->sa_id.spi = x->id.spi;
        id->sa_id.family = x->props.family;
@@ -2578,6 +2579,8 @@ static int build_expire(struct sk_buff *skb, struct xfrm_state *x, const struct
        ue = nlmsg_data(nlh);
        copy_to_user_state(x, &ue->state);
        ue->hard = (c->data.hard != 0) ? 1 : 0;
+       /* clear the padding bytes */
+       memset(&ue->hard + 1, 0, sizeof(*ue) - offsetofend(typeof(*ue), hard));
 
        err = xfrm_mark_put(skb, &x->mark);
        if (err)
@@ -2715,6 +2718,7 @@ static int xfrm_notify_sa(struct xfrm_state *x, const struct km_event *c)
                struct nlattr *attr;
 
                id = nlmsg_data(nlh);
+               memset(id, 0, sizeof(*id));
                memcpy(&id->daddr, &x->id.daddr, sizeof(id->daddr));
                id->spi = x->id.spi;
                id->family = x->props.family;