]> git.karo-electronics.de Git - linux-beck.git/commitdiff
net/mlx4_en: protect ring->xdp_prog with rcu_read_lock
authorBrenden Blanco <bblanco@plumgrid.com>
Sun, 4 Sep 2016 04:29:58 +0000 (21:29 -0700)
committerDavid S. Miller <davem@davemloft.net>
Tue, 6 Sep 2016 20:39:33 +0000 (13:39 -0700)
Depending on the preempt mode, the bpf_prog stored in xdp_prog may be
freed despite the use of call_rcu inside bpf_prog_put. The situation is
possible when running in PREEMPT_RCU=y mode, for instance, since the rcu
callback for destroying the bpf prog can run even during the bh handling
in the mlx4 rx path.

Several options were considered before this patch was settled on:

Add a napi_synchronize loop in mlx4_xdp_set, which would occur after all
of the rings are updated with the new program.
This approach has the disadvantage that as the number of rings
increases, the speed of update will slow down significantly due to
napi_synchronize's msleep(1).

Add a new rcu_head in bpf_prog_aux, to be used by a new bpf_prog_put_bh.
The action of the bpf_prog_put_bh would be to then call bpf_prog_put
later. Those drivers that consume a bpf prog in a bh context (like mlx4)
would then use the bpf_prog_put_bh instead when the ring is up. This has
the problem of complexity, in maintaining proper refcnts and rcu lists,
and would likely be harder to review. In addition, this approach to
freeing must be exclusive with other frees of the bpf prog, for instance
a _bh prog must not be referenced from a prog array that is consumed by
a non-_bh prog.

The placement of rcu_read_lock in this patch is functionally the same as
putting an rcu_read_lock in napi_poll. Actually doing so could be a
potentially controversial change, but would bring the implementation in
line with sk_busy_loop (though of course the nature of those two paths
is substantially different), and would also avoid future copy/paste
problems with future supporters of XDP. Still, this patch does not take
that opinionated option.

Testing was done with kernels in either PREEMPT_RCU=y or
CONFIG_PREEMPT_VOLUNTARY=y+PREEMPT_RCU=n modes, with neither exhibiting
any drawback. With PREEMPT_RCU=n, the extra call to rcu_read_lock did
not show up in the perf report whatsoever, and with PREEMPT_RCU=y the
overhead of rcu_read_lock (according to perf) was the same before/after.
In the rx path, rcu_read_lock is eventually called for every packet
from netif_receive_skb_internal, so the napi poll call's rcu_read_lock
is easily amortized.

v2:
Remove extra rcu_read_lock in mlx4_en_process_rx_cq body
Annotate xdp_prog with __rcu, and convert all usages to rcu_assign or
rcu_dereference[_protected] as appropriate.
Add explicit mutex lock around rcu_assign instead of xchg loop.

Fixes: d576acf0a22 ("net/mlx4_en: add page recycle to prepare rx ring for tx support")
Acked-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Alexei Starovoitov <alexei.starovoitov@gmail.com>
Signed-off-by: Brenden Blanco <bblanco@plumgrid.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/en_rx.c
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h

index 4198e9bf89d044733e83596dec346dbd0c6a38a9..31a41add5b4c4c0345cd46e1986d3f35ed1de6a4 100644 (file)
@@ -2642,12 +2642,16 @@ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog)
                        if (IS_ERR(prog))
                                return PTR_ERR(prog);
                }
+               mutex_lock(&mdev->state_lock);
                for (i = 0; i < priv->rx_ring_num; i++) {
-                       /* This xchg is paired with READ_ONCE in the fastpath */
-                       old_prog = xchg(&priv->rx_ring[i]->xdp_prog, prog);
+                       old_prog = rcu_dereference_protected(
+                                       priv->rx_ring[i]->xdp_prog,
+                                       lockdep_is_held(&mdev->state_lock));
+                       rcu_assign_pointer(priv->rx_ring[i]->xdp_prog, prog);
                        if (old_prog)
                                bpf_prog_put(old_prog);
                }
+               mutex_unlock(&mdev->state_lock);
                return 0;
        }
 
@@ -2680,7 +2684,10 @@ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog)
                                                        priv->xdp_ring_num);
 
        for (i = 0; i < priv->rx_ring_num; i++) {
-               old_prog = xchg(&priv->rx_ring[i]->xdp_prog, prog);
+               old_prog = rcu_dereference_protected(
+                                       priv->rx_ring[i]->xdp_prog,
+                                       lockdep_is_held(&mdev->state_lock));
+               rcu_assign_pointer(priv->rx_ring[i]->xdp_prog, prog);
                if (old_prog)
                        bpf_prog_put(old_prog);
        }
index 2040dad8611df21ac63ac4b31cdac97e4b87af31..6758292311f416b823f90b2d65e1b1e9c1be80f8 100644 (file)
@@ -537,7 +537,9 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
        struct mlx4_en_rx_ring *ring = *pring;
        struct bpf_prog *old_prog;
 
-       old_prog = READ_ONCE(ring->xdp_prog);
+       old_prog = rcu_dereference_protected(
+                                       ring->xdp_prog,
+                                       lockdep_is_held(&mdev->state_lock));
        if (old_prog)
                bpf_prog_put(old_prog);
        mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE);
@@ -800,7 +802,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
        if (budget <= 0)
                return polled;
 
-       xdp_prog = READ_ONCE(ring->xdp_prog);
+       /* Protect accesses to: ring->xdp_prog, priv->mac_hash list */
+       rcu_read_lock();
+       xdp_prog = rcu_dereference(ring->xdp_prog);
        doorbell_pending = 0;
        tx_index = (priv->tx_ring_num - priv->xdp_ring_num) + cq->ring;
 
@@ -858,15 +862,11 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
                                /* Drop the packet, since HW loopback-ed it */
                                mac_hash = ethh->h_source[MLX4_EN_MAC_HASH_IDX];
                                bucket = &priv->mac_hash[mac_hash];
-                               rcu_read_lock();
                                hlist_for_each_entry_rcu(entry, bucket, hlist) {
                                        if (ether_addr_equal_64bits(entry->mac,
-                                                                   ethh->h_source)) {
-                                               rcu_read_unlock();
+                                                                   ethh->h_source))
                                                goto next;
-                                       }
                                }
-                               rcu_read_unlock();
                        }
                }
 
@@ -1077,6 +1077,7 @@ consumed:
        }
 
 out:
+       rcu_read_unlock();
        if (doorbell_pending)
                mlx4_en_xmit_doorbell(priv->tx_ring[tx_index]);
 
index 2c2913dcae980b0a665e54b128554fc168b364b7..47867c49f91c11df5883a4cd67b273ece82f3927 100644 (file)
@@ -340,7 +340,7 @@ struct mlx4_en_rx_ring {
        u8  fcs_del;
        void *buf;
        void *rx_info;
-       struct bpf_prog *xdp_prog;
+       struct bpf_prog __rcu *xdp_prog;
        struct mlx4_en_page_cache page_cache;
        unsigned long bytes;
        unsigned long packets;