2 * To speed up listener socket lookup, create an array to store all sockets
3 * listening on the same port. This allows a decision to be made after finding
4 * the first socket. An optional BPF program can also be configured for
5 * selecting the socket index from the array of available sockets.
8 #include <net/sock_reuseport.h>
10 #include <linux/rcupdate.h>
12 #define INIT_SOCKS 128
14 static DEFINE_SPINLOCK(reuseport_lock);
16 static struct sock_reuseport *__reuseport_alloc(u16 max_socks)
18 size_t size = sizeof(struct sock_reuseport) +
19 sizeof(struct sock *) * max_socks;
20 struct sock_reuseport *reuse = kzalloc(size, GFP_ATOMIC);
25 reuse->max_socks = max_socks;
27 RCU_INIT_POINTER(reuse->prog, NULL);
31 int reuseport_alloc(struct sock *sk)
33 struct sock_reuseport *reuse;
35 /* bh lock used since this function call may precede hlist lock in
36 * soft irq of receive path or setsockopt from process context
38 spin_lock_bh(&reuseport_lock);
39 WARN_ONCE(rcu_dereference_protected(sk->sk_reuseport_cb,
40 lockdep_is_held(&reuseport_lock)),
41 "multiple allocations for the same socket");
42 reuse = __reuseport_alloc(INIT_SOCKS);
44 spin_unlock_bh(&reuseport_lock);
50 rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
52 spin_unlock_bh(&reuseport_lock);
56 EXPORT_SYMBOL(reuseport_alloc);
58 static struct sock_reuseport *reuseport_grow(struct sock_reuseport *reuse)
60 struct sock_reuseport *more_reuse;
61 u32 more_socks_size, i;
63 more_socks_size = reuse->max_socks * 2U;
64 if (more_socks_size > U16_MAX)
67 more_reuse = __reuseport_alloc(more_socks_size);
71 more_reuse->max_socks = more_socks_size;
72 more_reuse->num_socks = reuse->num_socks;
73 more_reuse->prog = reuse->prog;
75 memcpy(more_reuse->socks, reuse->socks,
76 reuse->num_socks * sizeof(struct sock *));
78 for (i = 0; i < reuse->num_socks; ++i)
79 rcu_assign_pointer(reuse->socks[i]->sk_reuseport_cb,
82 /* Note: we use kfree_rcu here instead of reuseport_free_rcu so
83 * that reuse and more_reuse can temporarily share a reference
86 kfree_rcu(reuse, rcu);
91 * reuseport_add_sock - Add a socket to the reuseport group of another.
92 * @sk: New socket to add to the group.
93 * @sk2: Socket belonging to the existing reuseport group.
94 * May return ENOMEM and not add socket to group under memory pressure.
96 int reuseport_add_sock(struct sock *sk, const struct sock *sk2)
98 struct sock_reuseport *reuse;
100 spin_lock_bh(&reuseport_lock);
101 reuse = rcu_dereference_protected(sk2->sk_reuseport_cb,
102 lockdep_is_held(&reuseport_lock)),
103 WARN_ONCE(rcu_dereference_protected(sk->sk_reuseport_cb,
104 lockdep_is_held(&reuseport_lock)),
105 "socket already in reuseport group");
107 if (reuse->num_socks == reuse->max_socks) {
108 reuse = reuseport_grow(reuse);
110 spin_unlock_bh(&reuseport_lock);
115 reuse->socks[reuse->num_socks] = sk;
116 /* paired with smp_rmb() in reuseport_select_sock() */
119 rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
121 spin_unlock_bh(&reuseport_lock);
125 EXPORT_SYMBOL(reuseport_add_sock);
127 static void reuseport_free_rcu(struct rcu_head *head)
129 struct sock_reuseport *reuse;
131 reuse = container_of(head, struct sock_reuseport, rcu);
133 bpf_prog_destroy(reuse->prog);
137 void reuseport_detach_sock(struct sock *sk)
139 struct sock_reuseport *reuse;
142 spin_lock_bh(&reuseport_lock);
143 reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
144 lockdep_is_held(&reuseport_lock));
145 rcu_assign_pointer(sk->sk_reuseport_cb, NULL);
147 for (i = 0; i < reuse->num_socks; i++) {
148 if (reuse->socks[i] == sk) {
149 reuse->socks[i] = reuse->socks[reuse->num_socks - 1];
151 if (reuse->num_socks == 0)
152 call_rcu(&reuse->rcu, reuseport_free_rcu);
156 spin_unlock_bh(&reuseport_lock);
158 EXPORT_SYMBOL(reuseport_detach_sock);
160 static struct sock *run_bpf(struct sock_reuseport *reuse, u16 socks,
161 struct bpf_prog *prog, struct sk_buff *skb,
164 struct sk_buff *nskb = NULL;
167 if (skb_shared(skb)) {
168 nskb = skb_clone(skb, GFP_ATOMIC);
174 /* temporarily advance data past protocol header */
175 if (!pskb_pull(skb, hdr_len)) {
179 index = bpf_prog_run_save_cb(prog, skb);
180 __skb_push(skb, hdr_len);
187 return reuse->socks[index];
191 * reuseport_select_sock - Select a socket from an SO_REUSEPORT group.
192 * @sk: First socket in the group.
193 * @hash: When no BPF filter is available, use this hash to select.
194 * @skb: skb to run through BPF filter.
195 * @hdr_len: BPF filter expects skb data pointer at payload data. If
196 * the skb does not yet point at the payload, this parameter represents
197 * how far the pointer needs to advance to reach the payload.
198 * Returns a socket that should receive the packet (or NULL on error).
200 struct sock *reuseport_select_sock(struct sock *sk,
205 struct sock_reuseport *reuse;
206 struct bpf_prog *prog;
207 struct sock *sk2 = NULL;
211 reuse = rcu_dereference(sk->sk_reuseport_cb);
213 /* if memory allocation failed or add call is not yet complete */
217 prog = rcu_dereference(reuse->prog);
218 socks = READ_ONCE(reuse->num_socks);
220 /* paired with smp_wmb() in reuseport_add_sock() */
224 sk2 = run_bpf(reuse, socks, prog, skb, hdr_len);
226 sk2 = reuse->socks[reciprocal_scale(hash, socks)];
233 EXPORT_SYMBOL(reuseport_select_sock);
236 reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog)
238 struct sock_reuseport *reuse;
239 struct bpf_prog *old_prog;
241 spin_lock_bh(&reuseport_lock);
242 reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
243 lockdep_is_held(&reuseport_lock));
244 old_prog = rcu_dereference_protected(reuse->prog,
245 lockdep_is_held(&reuseport_lock));
246 rcu_assign_pointer(reuse->prog, prog);
247 spin_unlock_bh(&reuseport_lock);
251 EXPORT_SYMBOL(reuseport_attach_prog);