2 * Linux Socket Filter - Kernel level socket filtering
4 * Based on the design of the Berkeley Packet Filter. The new
5 * internal format has been designed by PLUMgrid:
7 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
11 * Jay Schulist <jschlst@samba.org>
12 * Alexei Starovoitov <ast@plumgrid.com>
13 * Daniel Borkmann <dborkman@redhat.com>
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
20 * Andi Kleen - Fix a few bad bugs and races.
21 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
24 #include <linux/module.h>
25 #include <linux/types.h>
27 #include <linux/fcntl.h>
28 #include <linux/socket.h>
30 #include <linux/inet.h>
31 #include <linux/netdevice.h>
32 #include <linux/if_packet.h>
33 #include <linux/if_arp.h>
34 #include <linux/gfp.h>
36 #include <net/protocol.h>
37 #include <net/netlink.h>
38 #include <linux/skbuff.h>
40 #include <net/flow_dissector.h>
41 #include <linux/errno.h>
42 #include <linux/timer.h>
43 #include <linux/uaccess.h>
44 #include <asm/unaligned.h>
45 #include <linux/filter.h>
46 #include <linux/ratelimit.h>
47 #include <linux/seccomp.h>
48 #include <linux/if_vlan.h>
49 #include <linux/bpf.h>
50 #include <net/sch_generic.h>
51 #include <net/cls_cgroup.h>
52 #include <net/dst_metadata.h>
54 #include <net/sock_reuseport.h>
57 * sk_filter_trim_cap - run a packet through a socket filter
58 * @sk: sock associated with &sk_buff
59 * @skb: buffer to filter
60 * @cap: limit on how short the eBPF program may trim the packet
62 * Run the eBPF program and then cut skb->data to correct size returned by
63 * the program. If pkt_len is 0 we toss packet. If skb->len is smaller
64 * than pkt_len we keep whole skb->data. This is the socket level
65 * wrapper to BPF_PROG_RUN. It returns 0 if the packet should
66 * be accepted or -EPERM if the packet should be tossed.
69 int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap)
72 struct sk_filter *filter;
75 * If the skb was allocated from pfmemalloc reserves, only
76 * allow SOCK_MEMALLOC sockets to use it as this socket is
79 if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC)) {
80 NET_INC_STATS(sock_net(sk), LINUX_MIB_PFMEMALLOCDROP);
83 err = BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb);
87 err = security_sock_rcv_skb(sk, skb);
92 filter = rcu_dereference(sk->sk_filter);
94 unsigned int pkt_len = bpf_prog_run_save_cb(filter->prog, skb);
95 err = pkt_len ? pskb_trim(skb, max(cap, pkt_len)) : -EPERM;
101 EXPORT_SYMBOL(sk_filter_trim_cap);
103 BPF_CALL_1(__skb_get_pay_offset, struct sk_buff *, skb)
105 return skb_get_poff(skb);
108 BPF_CALL_3(__skb_get_nlattr, struct sk_buff *, skb, u32, a, u32, x)
112 if (skb_is_nonlinear(skb))
115 if (skb->len < sizeof(struct nlattr))
118 if (a > skb->len - sizeof(struct nlattr))
121 nla = nla_find((struct nlattr *) &skb->data[a], skb->len - a, x);
123 return (void *) nla - (void *) skb->data;
128 BPF_CALL_3(__skb_get_nlattr_nest, struct sk_buff *, skb, u32, a, u32, x)
132 if (skb_is_nonlinear(skb))
135 if (skb->len < sizeof(struct nlattr))
138 if (a > skb->len - sizeof(struct nlattr))
141 nla = (struct nlattr *) &skb->data[a];
142 if (nla->nla_len > skb->len - a)
145 nla = nla_find_nested(nla, x);
147 return (void *) nla - (void *) skb->data;
152 BPF_CALL_0(__get_raw_cpu_id)
154 return raw_smp_processor_id();
157 static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto = {
158 .func = __get_raw_cpu_id,
160 .ret_type = RET_INTEGER,
163 static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg,
164 struct bpf_insn *insn_buf)
166 struct bpf_insn *insn = insn_buf;
170 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
172 *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
173 offsetof(struct sk_buff, mark));
177 *insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_TYPE_OFFSET());
178 *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, PKT_TYPE_MAX);
179 #ifdef __BIG_ENDIAN_BITFIELD
180 *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, 5);
185 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);
187 *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
188 offsetof(struct sk_buff, queue_mapping));
191 case SKF_AD_VLAN_TAG:
192 case SKF_AD_VLAN_TAG_PRESENT:
193 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
194 BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
196 /* dst_reg = *(u16 *) (src_reg + offsetof(vlan_tci)) */
197 *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
198 offsetof(struct sk_buff, vlan_tci));
199 if (skb_field == SKF_AD_VLAN_TAG) {
200 *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg,
204 *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, 12);
206 *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, 1);
211 return insn - insn_buf;
214 static bool convert_bpf_extensions(struct sock_filter *fp,
215 struct bpf_insn **insnp)
217 struct bpf_insn *insn = *insnp;
221 case SKF_AD_OFF + SKF_AD_PROTOCOL:
222 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
224 /* A = *(u16 *) (CTX + offsetof(protocol)) */
225 *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
226 offsetof(struct sk_buff, protocol));
227 /* A = ntohs(A) [emitting a nop or swap16] */
228 *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16);
231 case SKF_AD_OFF + SKF_AD_PKTTYPE:
232 cnt = convert_skb_access(SKF_AD_PKTTYPE, BPF_REG_A, BPF_REG_CTX, insn);
236 case SKF_AD_OFF + SKF_AD_IFINDEX:
237 case SKF_AD_OFF + SKF_AD_HATYPE:
238 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
239 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2);
241 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev),
242 BPF_REG_TMP, BPF_REG_CTX,
243 offsetof(struct sk_buff, dev));
244 /* if (tmp != 0) goto pc + 1 */
245 *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_TMP, 0, 1);
246 *insn++ = BPF_EXIT_INSN();
247 if (fp->k == SKF_AD_OFF + SKF_AD_IFINDEX)
248 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_TMP,
249 offsetof(struct net_device, ifindex));
251 *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_TMP,
252 offsetof(struct net_device, type));
255 case SKF_AD_OFF + SKF_AD_MARK:
256 cnt = convert_skb_access(SKF_AD_MARK, BPF_REG_A, BPF_REG_CTX, insn);
260 case SKF_AD_OFF + SKF_AD_RXHASH:
261 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
263 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX,
264 offsetof(struct sk_buff, hash));
267 case SKF_AD_OFF + SKF_AD_QUEUE:
268 cnt = convert_skb_access(SKF_AD_QUEUE, BPF_REG_A, BPF_REG_CTX, insn);
272 case SKF_AD_OFF + SKF_AD_VLAN_TAG:
273 cnt = convert_skb_access(SKF_AD_VLAN_TAG,
274 BPF_REG_A, BPF_REG_CTX, insn);
278 case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT:
279 cnt = convert_skb_access(SKF_AD_VLAN_TAG_PRESENT,
280 BPF_REG_A, BPF_REG_CTX, insn);
284 case SKF_AD_OFF + SKF_AD_VLAN_TPID:
285 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_proto) != 2);
287 /* A = *(u16 *) (CTX + offsetof(vlan_proto)) */
288 *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
289 offsetof(struct sk_buff, vlan_proto));
290 /* A = ntohs(A) [emitting a nop or swap16] */
291 *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16);
294 case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
295 case SKF_AD_OFF + SKF_AD_NLATTR:
296 case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
297 case SKF_AD_OFF + SKF_AD_CPU:
298 case SKF_AD_OFF + SKF_AD_RANDOM:
300 *insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX);
302 *insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_A);
304 *insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_X);
305 /* Emit call(arg1=CTX, arg2=A, arg3=X) */
307 case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
308 *insn = BPF_EMIT_CALL(__skb_get_pay_offset);
310 case SKF_AD_OFF + SKF_AD_NLATTR:
311 *insn = BPF_EMIT_CALL(__skb_get_nlattr);
313 case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
314 *insn = BPF_EMIT_CALL(__skb_get_nlattr_nest);
316 case SKF_AD_OFF + SKF_AD_CPU:
317 *insn = BPF_EMIT_CALL(__get_raw_cpu_id);
319 case SKF_AD_OFF + SKF_AD_RANDOM:
320 *insn = BPF_EMIT_CALL(bpf_user_rnd_u32);
321 bpf_user_rnd_init_once();
326 case SKF_AD_OFF + SKF_AD_ALU_XOR_X:
328 *insn = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_X);
332 /* This is just a dummy call to avoid letting the compiler
333 * evict __bpf_call_base() as an optimization. Placed here
334 * where no-one bothers.
336 BUG_ON(__bpf_call_base(0, 0, 0, 0, 0) != 0);
345 * bpf_convert_filter - convert filter program
346 * @prog: the user passed filter program
347 * @len: the length of the user passed filter program
348 * @new_prog: buffer where converted program will be stored
349 * @new_len: pointer to store length of converted program
351 * Remap 'sock_filter' style BPF instruction set to 'sock_filter_ext' style.
352 * Conversion workflow:
354 * 1) First pass for calculating the new program length:
355 * bpf_convert_filter(old_prog, old_len, NULL, &new_len)
357 * 2) 2nd pass to remap in two passes: 1st pass finds new
358 * jump offsets, 2nd pass remapping:
359 * new_prog = kmalloc(sizeof(struct bpf_insn) * new_len);
360 * bpf_convert_filter(old_prog, old_len, new_prog, &new_len);
362 static int bpf_convert_filter(struct sock_filter *prog, int len,
363 struct bpf_insn *new_prog, int *new_len)
365 int new_flen = 0, pass = 0, target, i;
366 struct bpf_insn *new_insn;
367 struct sock_filter *fp;
371 BUILD_BUG_ON(BPF_MEMWORDS * sizeof(u32) > MAX_BPF_STACK);
372 BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
374 if (len <= 0 || len > BPF_MAXINSNS)
378 addrs = kcalloc(len, sizeof(*addrs),
379 GFP_KERNEL | __GFP_NOWARN);
388 /* Classic BPF related prologue emission. */
390 /* Classic BPF expects A and X to be reset first. These need
391 * to be guaranteed to be the first two instructions.
393 *new_insn++ = BPF_ALU64_REG(BPF_XOR, BPF_REG_A, BPF_REG_A);
394 *new_insn++ = BPF_ALU64_REG(BPF_XOR, BPF_REG_X, BPF_REG_X);
396 /* All programs must keep CTX in callee saved BPF_REG_CTX.
397 * In eBPF case it's done by the compiler, here we need to
398 * do this ourself. Initial CTX is present in BPF_REG_ARG1.
400 *new_insn++ = BPF_MOV64_REG(BPF_REG_CTX, BPF_REG_ARG1);
405 for (i = 0; i < len; fp++, i++) {
406 struct bpf_insn tmp_insns[6] = { };
407 struct bpf_insn *insn = tmp_insns;
410 addrs[i] = new_insn - new_prog;
413 /* All arithmetic insns and skb loads map as-is. */
414 case BPF_ALU | BPF_ADD | BPF_X:
415 case BPF_ALU | BPF_ADD | BPF_K:
416 case BPF_ALU | BPF_SUB | BPF_X:
417 case BPF_ALU | BPF_SUB | BPF_K:
418 case BPF_ALU | BPF_AND | BPF_X:
419 case BPF_ALU | BPF_AND | BPF_K:
420 case BPF_ALU | BPF_OR | BPF_X:
421 case BPF_ALU | BPF_OR | BPF_K:
422 case BPF_ALU | BPF_LSH | BPF_X:
423 case BPF_ALU | BPF_LSH | BPF_K:
424 case BPF_ALU | BPF_RSH | BPF_X:
425 case BPF_ALU | BPF_RSH | BPF_K:
426 case BPF_ALU | BPF_XOR | BPF_X:
427 case BPF_ALU | BPF_XOR | BPF_K:
428 case BPF_ALU | BPF_MUL | BPF_X:
429 case BPF_ALU | BPF_MUL | BPF_K:
430 case BPF_ALU | BPF_DIV | BPF_X:
431 case BPF_ALU | BPF_DIV | BPF_K:
432 case BPF_ALU | BPF_MOD | BPF_X:
433 case BPF_ALU | BPF_MOD | BPF_K:
434 case BPF_ALU | BPF_NEG:
435 case BPF_LD | BPF_ABS | BPF_W:
436 case BPF_LD | BPF_ABS | BPF_H:
437 case BPF_LD | BPF_ABS | BPF_B:
438 case BPF_LD | BPF_IND | BPF_W:
439 case BPF_LD | BPF_IND | BPF_H:
440 case BPF_LD | BPF_IND | BPF_B:
441 /* Check for overloaded BPF extension and
442 * directly convert it if found, otherwise
443 * just move on with mapping.
445 if (BPF_CLASS(fp->code) == BPF_LD &&
446 BPF_MODE(fp->code) == BPF_ABS &&
447 convert_bpf_extensions(fp, &insn))
450 *insn = BPF_RAW_INSN(fp->code, BPF_REG_A, BPF_REG_X, 0, fp->k);
453 /* Jump transformation cannot use BPF block macros
454 * everywhere as offset calculation and target updates
455 * require a bit more work than the rest, i.e. jump
456 * opcodes map as-is, but offsets need adjustment.
459 #define BPF_EMIT_JMP \
461 if (target >= len || target < 0) \
463 insn->off = addrs ? addrs[target] - addrs[i] - 1 : 0; \
464 /* Adjust pc relative offset for 2nd or 3rd insn. */ \
465 insn->off -= insn - tmp_insns; \
468 case BPF_JMP | BPF_JA:
469 target = i + fp->k + 1;
470 insn->code = fp->code;
474 case BPF_JMP | BPF_JEQ | BPF_K:
475 case BPF_JMP | BPF_JEQ | BPF_X:
476 case BPF_JMP | BPF_JSET | BPF_K:
477 case BPF_JMP | BPF_JSET | BPF_X:
478 case BPF_JMP | BPF_JGT | BPF_K:
479 case BPF_JMP | BPF_JGT | BPF_X:
480 case BPF_JMP | BPF_JGE | BPF_K:
481 case BPF_JMP | BPF_JGE | BPF_X:
482 if (BPF_SRC(fp->code) == BPF_K && (int) fp->k < 0) {
483 /* BPF immediates are signed, zero extend
484 * immediate into tmp register and use it
487 *insn++ = BPF_MOV32_IMM(BPF_REG_TMP, fp->k);
489 insn->dst_reg = BPF_REG_A;
490 insn->src_reg = BPF_REG_TMP;
493 insn->dst_reg = BPF_REG_A;
495 bpf_src = BPF_SRC(fp->code);
496 insn->src_reg = bpf_src == BPF_X ? BPF_REG_X : 0;
499 /* Common case where 'jump_false' is next insn. */
501 insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
502 target = i + fp->jt + 1;
507 /* Convert JEQ into JNE when 'jump_true' is next insn. */
508 if (fp->jt == 0 && BPF_OP(fp->code) == BPF_JEQ) {
509 insn->code = BPF_JMP | BPF_JNE | bpf_src;
510 target = i + fp->jf + 1;
515 /* Other jumps are mapped into two insns: Jxx and JA. */
516 target = i + fp->jt + 1;
517 insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
521 insn->code = BPF_JMP | BPF_JA;
522 target = i + fp->jf + 1;
526 /* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */
527 case BPF_LDX | BPF_MSH | BPF_B:
529 *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_A);
530 /* A = BPF_R0 = *(u8 *) (skb->data + K) */
531 *insn++ = BPF_LD_ABS(BPF_B, fp->k);
533 *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 0xf);
535 *insn++ = BPF_ALU32_IMM(BPF_LSH, BPF_REG_A, 2);
537 *insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
539 *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_TMP);
542 /* RET_K is remaped into 2 insns. RET_A case doesn't need an
543 * extra mov as BPF_REG_0 is already mapped into BPF_REG_A.
545 case BPF_RET | BPF_A:
546 case BPF_RET | BPF_K:
547 if (BPF_RVAL(fp->code) == BPF_K)
548 *insn++ = BPF_MOV32_RAW(BPF_K, BPF_REG_0,
550 *insn = BPF_EXIT_INSN();
553 /* Store to stack. */
556 *insn = BPF_STX_MEM(BPF_W, BPF_REG_FP, BPF_CLASS(fp->code) ==
557 BPF_ST ? BPF_REG_A : BPF_REG_X,
558 -(BPF_MEMWORDS - fp->k) * 4);
561 /* Load from stack. */
562 case BPF_LD | BPF_MEM:
563 case BPF_LDX | BPF_MEM:
564 *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ?
565 BPF_REG_A : BPF_REG_X, BPF_REG_FP,
566 -(BPF_MEMWORDS - fp->k) * 4);
570 case BPF_LD | BPF_IMM:
571 case BPF_LDX | BPF_IMM:
572 *insn = BPF_MOV32_IMM(BPF_CLASS(fp->code) == BPF_LD ?
573 BPF_REG_A : BPF_REG_X, fp->k);
577 case BPF_MISC | BPF_TAX:
578 *insn = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
582 case BPF_MISC | BPF_TXA:
583 *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_X);
586 /* A = skb->len or X = skb->len */
587 case BPF_LD | BPF_W | BPF_LEN:
588 case BPF_LDX | BPF_W | BPF_LEN:
589 *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ?
590 BPF_REG_A : BPF_REG_X, BPF_REG_CTX,
591 offsetof(struct sk_buff, len));
594 /* Access seccomp_data fields. */
595 case BPF_LDX | BPF_ABS | BPF_W:
596 /* A = *(u32 *) (ctx + K) */
597 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, fp->k);
600 /* Unknown instruction. */
607 memcpy(new_insn, tmp_insns,
608 sizeof(*insn) * (insn - tmp_insns));
609 new_insn += insn - tmp_insns;
613 /* Only calculating new length. */
614 *new_len = new_insn - new_prog;
619 if (new_flen != new_insn - new_prog) {
620 new_flen = new_insn - new_prog;
627 BUG_ON(*new_len != new_flen);
636 * As we dont want to clear mem[] array for each packet going through
637 * __bpf_prog_run(), we check that filter loaded by user never try to read
638 * a cell if not previously written, and we check all branches to be sure
639 * a malicious user doesn't try to abuse us.
641 static int check_load_and_stores(const struct sock_filter *filter, int flen)
643 u16 *masks, memvalid = 0; /* One bit per cell, 16 cells */
646 BUILD_BUG_ON(BPF_MEMWORDS > 16);
648 masks = kmalloc_array(flen, sizeof(*masks), GFP_KERNEL);
652 memset(masks, 0xff, flen * sizeof(*masks));
654 for (pc = 0; pc < flen; pc++) {
655 memvalid &= masks[pc];
657 switch (filter[pc].code) {
660 memvalid |= (1 << filter[pc].k);
662 case BPF_LD | BPF_MEM:
663 case BPF_LDX | BPF_MEM:
664 if (!(memvalid & (1 << filter[pc].k))) {
669 case BPF_JMP | BPF_JA:
670 /* A jump must set masks on target */
671 masks[pc + 1 + filter[pc].k] &= memvalid;
674 case BPF_JMP | BPF_JEQ | BPF_K:
675 case BPF_JMP | BPF_JEQ | BPF_X:
676 case BPF_JMP | BPF_JGE | BPF_K:
677 case BPF_JMP | BPF_JGE | BPF_X:
678 case BPF_JMP | BPF_JGT | BPF_K:
679 case BPF_JMP | BPF_JGT | BPF_X:
680 case BPF_JMP | BPF_JSET | BPF_K:
681 case BPF_JMP | BPF_JSET | BPF_X:
682 /* A jump must set masks on targets */
683 masks[pc + 1 + filter[pc].jt] &= memvalid;
684 masks[pc + 1 + filter[pc].jf] &= memvalid;
694 static bool chk_code_allowed(u16 code_to_probe)
696 static const bool codes[] = {
697 /* 32 bit ALU operations */
698 [BPF_ALU | BPF_ADD | BPF_K] = true,
699 [BPF_ALU | BPF_ADD | BPF_X] = true,
700 [BPF_ALU | BPF_SUB | BPF_K] = true,
701 [BPF_ALU | BPF_SUB | BPF_X] = true,
702 [BPF_ALU | BPF_MUL | BPF_K] = true,
703 [BPF_ALU | BPF_MUL | BPF_X] = true,
704 [BPF_ALU | BPF_DIV | BPF_K] = true,
705 [BPF_ALU | BPF_DIV | BPF_X] = true,
706 [BPF_ALU | BPF_MOD | BPF_K] = true,
707 [BPF_ALU | BPF_MOD | BPF_X] = true,
708 [BPF_ALU | BPF_AND | BPF_K] = true,
709 [BPF_ALU | BPF_AND | BPF_X] = true,
710 [BPF_ALU | BPF_OR | BPF_K] = true,
711 [BPF_ALU | BPF_OR | BPF_X] = true,
712 [BPF_ALU | BPF_XOR | BPF_K] = true,
713 [BPF_ALU | BPF_XOR | BPF_X] = true,
714 [BPF_ALU | BPF_LSH | BPF_K] = true,
715 [BPF_ALU | BPF_LSH | BPF_X] = true,
716 [BPF_ALU | BPF_RSH | BPF_K] = true,
717 [BPF_ALU | BPF_RSH | BPF_X] = true,
718 [BPF_ALU | BPF_NEG] = true,
719 /* Load instructions */
720 [BPF_LD | BPF_W | BPF_ABS] = true,
721 [BPF_LD | BPF_H | BPF_ABS] = true,
722 [BPF_LD | BPF_B | BPF_ABS] = true,
723 [BPF_LD | BPF_W | BPF_LEN] = true,
724 [BPF_LD | BPF_W | BPF_IND] = true,
725 [BPF_LD | BPF_H | BPF_IND] = true,
726 [BPF_LD | BPF_B | BPF_IND] = true,
727 [BPF_LD | BPF_IMM] = true,
728 [BPF_LD | BPF_MEM] = true,
729 [BPF_LDX | BPF_W | BPF_LEN] = true,
730 [BPF_LDX | BPF_B | BPF_MSH] = true,
731 [BPF_LDX | BPF_IMM] = true,
732 [BPF_LDX | BPF_MEM] = true,
733 /* Store instructions */
736 /* Misc instructions */
737 [BPF_MISC | BPF_TAX] = true,
738 [BPF_MISC | BPF_TXA] = true,
739 /* Return instructions */
740 [BPF_RET | BPF_K] = true,
741 [BPF_RET | BPF_A] = true,
742 /* Jump instructions */
743 [BPF_JMP | BPF_JA] = true,
744 [BPF_JMP | BPF_JEQ | BPF_K] = true,
745 [BPF_JMP | BPF_JEQ | BPF_X] = true,
746 [BPF_JMP | BPF_JGE | BPF_K] = true,
747 [BPF_JMP | BPF_JGE | BPF_X] = true,
748 [BPF_JMP | BPF_JGT | BPF_K] = true,
749 [BPF_JMP | BPF_JGT | BPF_X] = true,
750 [BPF_JMP | BPF_JSET | BPF_K] = true,
751 [BPF_JMP | BPF_JSET | BPF_X] = true,
754 if (code_to_probe >= ARRAY_SIZE(codes))
757 return codes[code_to_probe];
760 static bool bpf_check_basics_ok(const struct sock_filter *filter,
765 if (flen == 0 || flen > BPF_MAXINSNS)
772 * bpf_check_classic - verify socket filter code
773 * @filter: filter to verify
774 * @flen: length of filter
776 * Check the user's filter code. If we let some ugly
777 * filter code slip through kaboom! The filter must contain
778 * no references or jumps that are out of range, no illegal
779 * instructions, and must end with a RET instruction.
781 * All jumps are forward as they are not signed.
783 * Returns 0 if the rule set is legal or -EINVAL if not.
785 static int bpf_check_classic(const struct sock_filter *filter,
791 /* Check the filter code now */
792 for (pc = 0; pc < flen; pc++) {
793 const struct sock_filter *ftest = &filter[pc];
795 /* May we actually operate on this code? */
796 if (!chk_code_allowed(ftest->code))
799 /* Some instructions need special checks */
800 switch (ftest->code) {
801 case BPF_ALU | BPF_DIV | BPF_K:
802 case BPF_ALU | BPF_MOD | BPF_K:
803 /* Check for division by zero */
807 case BPF_ALU | BPF_LSH | BPF_K:
808 case BPF_ALU | BPF_RSH | BPF_K:
812 case BPF_LD | BPF_MEM:
813 case BPF_LDX | BPF_MEM:
816 /* Check for invalid memory addresses */
817 if (ftest->k >= BPF_MEMWORDS)
820 case BPF_JMP | BPF_JA:
821 /* Note, the large ftest->k might cause loops.
822 * Compare this with conditional jumps below,
823 * where offsets are limited. --ANK (981016)
825 if (ftest->k >= (unsigned int)(flen - pc - 1))
828 case BPF_JMP | BPF_JEQ | BPF_K:
829 case BPF_JMP | BPF_JEQ | BPF_X:
830 case BPF_JMP | BPF_JGE | BPF_K:
831 case BPF_JMP | BPF_JGE | BPF_X:
832 case BPF_JMP | BPF_JGT | BPF_K:
833 case BPF_JMP | BPF_JGT | BPF_X:
834 case BPF_JMP | BPF_JSET | BPF_K:
835 case BPF_JMP | BPF_JSET | BPF_X:
836 /* Both conditionals must be safe */
837 if (pc + ftest->jt + 1 >= flen ||
838 pc + ftest->jf + 1 >= flen)
841 case BPF_LD | BPF_W | BPF_ABS:
842 case BPF_LD | BPF_H | BPF_ABS:
843 case BPF_LD | BPF_B | BPF_ABS:
845 if (bpf_anc_helper(ftest) & BPF_ANC)
847 /* Ancillary operation unknown or unsupported */
848 if (anc_found == false && ftest->k >= SKF_AD_OFF)
853 /* Last instruction must be a RET code */
854 switch (filter[flen - 1].code) {
855 case BPF_RET | BPF_K:
856 case BPF_RET | BPF_A:
857 return check_load_and_stores(filter, flen);
863 static int bpf_prog_store_orig_filter(struct bpf_prog *fp,
864 const struct sock_fprog *fprog)
866 unsigned int fsize = bpf_classic_proglen(fprog);
867 struct sock_fprog_kern *fkprog;
869 fp->orig_prog = kmalloc(sizeof(*fkprog), GFP_KERNEL);
873 fkprog = fp->orig_prog;
874 fkprog->len = fprog->len;
876 fkprog->filter = kmemdup(fp->insns, fsize,
877 GFP_KERNEL | __GFP_NOWARN);
878 if (!fkprog->filter) {
879 kfree(fp->orig_prog);
886 static void bpf_release_orig_filter(struct bpf_prog *fp)
888 struct sock_fprog_kern *fprog = fp->orig_prog;
891 kfree(fprog->filter);
896 static void __bpf_prog_release(struct bpf_prog *prog)
898 if (prog->type == BPF_PROG_TYPE_SOCKET_FILTER) {
901 bpf_release_orig_filter(prog);
906 static void __sk_filter_release(struct sk_filter *fp)
908 __bpf_prog_release(fp->prog);
913 * sk_filter_release_rcu - Release a socket filter by rcu_head
914 * @rcu: rcu_head that contains the sk_filter to free
916 static void sk_filter_release_rcu(struct rcu_head *rcu)
918 struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
920 __sk_filter_release(fp);
924 * sk_filter_release - release a socket filter
925 * @fp: filter to remove
927 * Remove a filter from a socket and release its resources.
929 static void sk_filter_release(struct sk_filter *fp)
931 if (atomic_dec_and_test(&fp->refcnt))
932 call_rcu(&fp->rcu, sk_filter_release_rcu);
935 void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
937 u32 filter_size = bpf_prog_size(fp->prog->len);
939 atomic_sub(filter_size, &sk->sk_omem_alloc);
940 sk_filter_release(fp);
943 /* try to charge the socket memory if there is space available
944 * return true on success
946 bool sk_filter_charge(struct sock *sk, struct sk_filter *fp)
948 u32 filter_size = bpf_prog_size(fp->prog->len);
950 /* same check as in sock_kmalloc() */
951 if (filter_size <= sysctl_optmem_max &&
952 atomic_read(&sk->sk_omem_alloc) + filter_size < sysctl_optmem_max) {
953 atomic_inc(&fp->refcnt);
954 atomic_add(filter_size, &sk->sk_omem_alloc);
960 static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp)
962 struct sock_filter *old_prog;
963 struct bpf_prog *old_fp;
964 int err, new_len, old_len = fp->len;
966 /* We are free to overwrite insns et al right here as it
967 * won't be used at this point in time anymore internally
968 * after the migration to the internal BPF instruction
971 BUILD_BUG_ON(sizeof(struct sock_filter) !=
972 sizeof(struct bpf_insn));
974 /* Conversion cannot happen on overlapping memory areas,
975 * so we need to keep the user BPF around until the 2nd
976 * pass. At this time, the user BPF is stored in fp->insns.
978 old_prog = kmemdup(fp->insns, old_len * sizeof(struct sock_filter),
979 GFP_KERNEL | __GFP_NOWARN);
985 /* 1st pass: calculate the new program length. */
986 err = bpf_convert_filter(old_prog, old_len, NULL, &new_len);
990 /* Expand fp for appending the new filter representation. */
992 fp = bpf_prog_realloc(old_fp, bpf_prog_size(new_len), 0);
994 /* The old_fp is still around in case we couldn't
995 * allocate new memory, so uncharge on that one.
1004 /* 2nd pass: remap sock_filter insns into bpf_insn insns. */
1005 err = bpf_convert_filter(old_prog, old_len, fp->insnsi, &new_len);
1007 /* 2nd bpf_convert_filter() can fail only if it fails
1008 * to allocate memory, remapping must succeed. Note,
1009 * that at this time old_fp has already been released
1014 /* We are guaranteed to never error here with cBPF to eBPF
1015 * transitions, since there's no issue with type compatibility
1016 * checks on program arrays.
1018 fp = bpf_prog_select_runtime(fp, &err);
1026 __bpf_prog_release(fp);
1027 return ERR_PTR(err);
1030 static struct bpf_prog *bpf_prepare_filter(struct bpf_prog *fp,
1031 bpf_aux_classic_check_t trans)
1035 fp->bpf_func = NULL;
1038 err = bpf_check_classic(fp->insns, fp->len);
1040 __bpf_prog_release(fp);
1041 return ERR_PTR(err);
1044 /* There might be additional checks and transformations
1045 * needed on classic filters, f.e. in case of seccomp.
1048 err = trans(fp->insns, fp->len);
1050 __bpf_prog_release(fp);
1051 return ERR_PTR(err);
1055 /* Probe if we can JIT compile the filter and if so, do
1056 * the compilation of the filter.
1058 bpf_jit_compile(fp);
1060 /* JIT compiler couldn't process this filter, so do the
1061 * internal BPF translation for the optimized interpreter.
1064 fp = bpf_migrate_filter(fp);
1070 * bpf_prog_create - create an unattached filter
1071 * @pfp: the unattached filter that is created
1072 * @fprog: the filter program
1074 * Create a filter independent of any socket. We first run some
1075 * sanity checks on it to make sure it does not explode on us later.
1076 * If an error occurs or there is insufficient memory for the filter
1077 * a negative errno code is returned. On success the return is zero.
1079 int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog)
1081 unsigned int fsize = bpf_classic_proglen(fprog);
1082 struct bpf_prog *fp;
1084 /* Make sure new filter is there and in the right amounts. */
1085 if (!bpf_check_basics_ok(fprog->filter, fprog->len))
1088 fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
1092 memcpy(fp->insns, fprog->filter, fsize);
1094 fp->len = fprog->len;
1095 /* Since unattached filters are not copied back to user
1096 * space through sk_get_filter(), we do not need to hold
1097 * a copy here, and can spare us the work.
1099 fp->orig_prog = NULL;
1101 /* bpf_prepare_filter() already takes care of freeing
1102 * memory in case something goes wrong.
1104 fp = bpf_prepare_filter(fp, NULL);
1111 EXPORT_SYMBOL_GPL(bpf_prog_create);
1114 * bpf_prog_create_from_user - create an unattached filter from user buffer
1115 * @pfp: the unattached filter that is created
1116 * @fprog: the filter program
1117 * @trans: post-classic verifier transformation handler
1118 * @save_orig: save classic BPF program
1120 * This function effectively does the same as bpf_prog_create(), only
1121 * that it builds up its insns buffer from user space provided buffer.
1122 * It also allows for passing a bpf_aux_classic_check_t handler.
1124 int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
1125 bpf_aux_classic_check_t trans, bool save_orig)
1127 unsigned int fsize = bpf_classic_proglen(fprog);
1128 struct bpf_prog *fp;
1131 /* Make sure new filter is there and in the right amounts. */
1132 if (!bpf_check_basics_ok(fprog->filter, fprog->len))
1135 fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
1139 if (copy_from_user(fp->insns, fprog->filter, fsize)) {
1140 __bpf_prog_free(fp);
1144 fp->len = fprog->len;
1145 fp->orig_prog = NULL;
1148 err = bpf_prog_store_orig_filter(fp, fprog);
1150 __bpf_prog_free(fp);
1155 /* bpf_prepare_filter() already takes care of freeing
1156 * memory in case something goes wrong.
1158 fp = bpf_prepare_filter(fp, trans);
1165 EXPORT_SYMBOL_GPL(bpf_prog_create_from_user);
1167 void bpf_prog_destroy(struct bpf_prog *fp)
1169 __bpf_prog_release(fp);
1171 EXPORT_SYMBOL_GPL(bpf_prog_destroy);
1173 static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk)
1175 struct sk_filter *fp, *old_fp;
1177 fp = kmalloc(sizeof(*fp), GFP_KERNEL);
1182 atomic_set(&fp->refcnt, 0);
1184 if (!sk_filter_charge(sk, fp)) {
1189 old_fp = rcu_dereference_protected(sk->sk_filter,
1190 lockdep_sock_is_held(sk));
1191 rcu_assign_pointer(sk->sk_filter, fp);
1194 sk_filter_uncharge(sk, old_fp);
1199 static int __reuseport_attach_prog(struct bpf_prog *prog, struct sock *sk)
1201 struct bpf_prog *old_prog;
1204 if (bpf_prog_size(prog->len) > sysctl_optmem_max)
1207 if (sk_unhashed(sk) && sk->sk_reuseport) {
1208 err = reuseport_alloc(sk);
1211 } else if (!rcu_access_pointer(sk->sk_reuseport_cb)) {
1212 /* The socket wasn't bound with SO_REUSEPORT */
1216 old_prog = reuseport_attach_prog(sk, prog);
1218 bpf_prog_destroy(old_prog);
1224 struct bpf_prog *__get_filter(struct sock_fprog *fprog, struct sock *sk)
1226 unsigned int fsize = bpf_classic_proglen(fprog);
1227 struct bpf_prog *prog;
1230 if (sock_flag(sk, SOCK_FILTER_LOCKED))
1231 return ERR_PTR(-EPERM);
1233 /* Make sure new filter is there and in the right amounts. */
1234 if (!bpf_check_basics_ok(fprog->filter, fprog->len))
1235 return ERR_PTR(-EINVAL);
1237 prog = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
1239 return ERR_PTR(-ENOMEM);
1241 if (copy_from_user(prog->insns, fprog->filter, fsize)) {
1242 __bpf_prog_free(prog);
1243 return ERR_PTR(-EFAULT);
1246 prog->len = fprog->len;
1248 err = bpf_prog_store_orig_filter(prog, fprog);
1250 __bpf_prog_free(prog);
1251 return ERR_PTR(-ENOMEM);
1254 /* bpf_prepare_filter() already takes care of freeing
1255 * memory in case something goes wrong.
1257 return bpf_prepare_filter(prog, NULL);
1261 * sk_attach_filter - attach a socket filter
1262 * @fprog: the filter program
1263 * @sk: the socket to use
1265 * Attach the user's filter code. We first run some sanity checks on
1266 * it to make sure it does not explode on us later. If an error
1267 * occurs or there is insufficient memory for the filter a negative
1268 * errno code is returned. On success the return is zero.
1270 int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
1272 struct bpf_prog *prog = __get_filter(fprog, sk);
1276 return PTR_ERR(prog);
1278 err = __sk_attach_prog(prog, sk);
1280 __bpf_prog_release(prog);
1286 EXPORT_SYMBOL_GPL(sk_attach_filter);
1288 int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk)
1290 struct bpf_prog *prog = __get_filter(fprog, sk);
1294 return PTR_ERR(prog);
1296 err = __reuseport_attach_prog(prog, sk);
1298 __bpf_prog_release(prog);
1305 static struct bpf_prog *__get_bpf(u32 ufd, struct sock *sk)
1307 if (sock_flag(sk, SOCK_FILTER_LOCKED))
1308 return ERR_PTR(-EPERM);
1310 return bpf_prog_get_type(ufd, BPF_PROG_TYPE_SOCKET_FILTER);
1313 int sk_attach_bpf(u32 ufd, struct sock *sk)
1315 struct bpf_prog *prog = __get_bpf(ufd, sk);
1319 return PTR_ERR(prog);
1321 err = __sk_attach_prog(prog, sk);
1330 int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk)
1332 struct bpf_prog *prog = __get_bpf(ufd, sk);
1336 return PTR_ERR(prog);
1338 err = __reuseport_attach_prog(prog, sk);
1347 struct bpf_scratchpad {
1349 __be32 diff[MAX_BPF_STACK / sizeof(__be32)];
1350 u8 buff[MAX_BPF_STACK];
1354 static DEFINE_PER_CPU(struct bpf_scratchpad, bpf_sp);
1356 static inline int __bpf_try_make_writable(struct sk_buff *skb,
1357 unsigned int write_len)
1359 return skb_ensure_writable(skb, write_len);
1362 static inline int bpf_try_make_writable(struct sk_buff *skb,
1363 unsigned int write_len)
1365 int err = __bpf_try_make_writable(skb, write_len);
1367 bpf_compute_data_end(skb);
1371 static int bpf_try_make_head_writable(struct sk_buff *skb)
1373 return bpf_try_make_writable(skb, skb_headlen(skb));
1376 static inline void bpf_push_mac_rcsum(struct sk_buff *skb)
1378 if (skb_at_tc_ingress(skb))
1379 skb_postpush_rcsum(skb, skb_mac_header(skb), skb->mac_len);
1382 static inline void bpf_pull_mac_rcsum(struct sk_buff *skb)
1384 if (skb_at_tc_ingress(skb))
1385 skb_postpull_rcsum(skb, skb_mac_header(skb), skb->mac_len);
1388 BPF_CALL_5(bpf_skb_store_bytes, struct sk_buff *, skb, u32, offset,
1389 const void *, from, u32, len, u64, flags)
1393 if (unlikely(flags & ~(BPF_F_RECOMPUTE_CSUM | BPF_F_INVALIDATE_HASH)))
1395 if (unlikely(offset > 0xffff))
1397 if (unlikely(bpf_try_make_writable(skb, offset + len)))
1400 ptr = skb->data + offset;
1401 if (flags & BPF_F_RECOMPUTE_CSUM)
1402 __skb_postpull_rcsum(skb, ptr, len, offset);
1404 memcpy(ptr, from, len);
1406 if (flags & BPF_F_RECOMPUTE_CSUM)
1407 __skb_postpush_rcsum(skb, ptr, len, offset);
1408 if (flags & BPF_F_INVALIDATE_HASH)
1409 skb_clear_hash(skb);
1414 static const struct bpf_func_proto bpf_skb_store_bytes_proto = {
1415 .func = bpf_skb_store_bytes,
1417 .ret_type = RET_INTEGER,
1418 .arg1_type = ARG_PTR_TO_CTX,
1419 .arg2_type = ARG_ANYTHING,
1420 .arg3_type = ARG_PTR_TO_MEM,
1421 .arg4_type = ARG_CONST_SIZE,
1422 .arg5_type = ARG_ANYTHING,
1425 BPF_CALL_4(bpf_skb_load_bytes, const struct sk_buff *, skb, u32, offset,
1426 void *, to, u32, len)
1430 if (unlikely(offset > 0xffff))
1433 ptr = skb_header_pointer(skb, offset, len, to);
1437 memcpy(to, ptr, len);
1445 static const struct bpf_func_proto bpf_skb_load_bytes_proto = {
1446 .func = bpf_skb_load_bytes,
1448 .ret_type = RET_INTEGER,
1449 .arg1_type = ARG_PTR_TO_CTX,
1450 .arg2_type = ARG_ANYTHING,
1451 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
1452 .arg4_type = ARG_CONST_SIZE,
1455 BPF_CALL_2(bpf_skb_pull_data, struct sk_buff *, skb, u32, len)
1457 /* Idea is the following: should the needed direct read/write
1458 * test fail during runtime, we can pull in more data and redo
1459 * again, since implicitly, we invalidate previous checks here.
1461 * Or, since we know how much we need to make read/writeable,
1462 * this can be done once at the program beginning for direct
1463 * access case. By this we overcome limitations of only current
1464 * headroom being accessible.
1466 return bpf_try_make_writable(skb, len ? : skb_headlen(skb));
1469 static const struct bpf_func_proto bpf_skb_pull_data_proto = {
1470 .func = bpf_skb_pull_data,
1472 .ret_type = RET_INTEGER,
1473 .arg1_type = ARG_PTR_TO_CTX,
1474 .arg2_type = ARG_ANYTHING,
1477 BPF_CALL_5(bpf_l3_csum_replace, struct sk_buff *, skb, u32, offset,
1478 u64, from, u64, to, u64, flags)
1482 if (unlikely(flags & ~(BPF_F_HDR_FIELD_MASK)))
1484 if (unlikely(offset > 0xffff || offset & 1))
1486 if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr))))
1489 ptr = (__sum16 *)(skb->data + offset);
1490 switch (flags & BPF_F_HDR_FIELD_MASK) {
1492 if (unlikely(from != 0))
1495 csum_replace_by_diff(ptr, to);
1498 csum_replace2(ptr, from, to);
1501 csum_replace4(ptr, from, to);
1510 static const struct bpf_func_proto bpf_l3_csum_replace_proto = {
1511 .func = bpf_l3_csum_replace,
1513 .ret_type = RET_INTEGER,
1514 .arg1_type = ARG_PTR_TO_CTX,
1515 .arg2_type = ARG_ANYTHING,
1516 .arg3_type = ARG_ANYTHING,
1517 .arg4_type = ARG_ANYTHING,
1518 .arg5_type = ARG_ANYTHING,
1521 BPF_CALL_5(bpf_l4_csum_replace, struct sk_buff *, skb, u32, offset,
1522 u64, from, u64, to, u64, flags)
1524 bool is_pseudo = flags & BPF_F_PSEUDO_HDR;
1525 bool is_mmzero = flags & BPF_F_MARK_MANGLED_0;
1526 bool do_mforce = flags & BPF_F_MARK_ENFORCE;
1529 if (unlikely(flags & ~(BPF_F_MARK_MANGLED_0 | BPF_F_MARK_ENFORCE |
1530 BPF_F_PSEUDO_HDR | BPF_F_HDR_FIELD_MASK)))
1532 if (unlikely(offset > 0xffff || offset & 1))
1534 if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr))))
1537 ptr = (__sum16 *)(skb->data + offset);
1538 if (is_mmzero && !do_mforce && !*ptr)
1541 switch (flags & BPF_F_HDR_FIELD_MASK) {
1543 if (unlikely(from != 0))
1546 inet_proto_csum_replace_by_diff(ptr, skb, to, is_pseudo);
1549 inet_proto_csum_replace2(ptr, skb, from, to, is_pseudo);
1552 inet_proto_csum_replace4(ptr, skb, from, to, is_pseudo);
1558 if (is_mmzero && !*ptr)
1559 *ptr = CSUM_MANGLED_0;
1563 static const struct bpf_func_proto bpf_l4_csum_replace_proto = {
1564 .func = bpf_l4_csum_replace,
1566 .ret_type = RET_INTEGER,
1567 .arg1_type = ARG_PTR_TO_CTX,
1568 .arg2_type = ARG_ANYTHING,
1569 .arg3_type = ARG_ANYTHING,
1570 .arg4_type = ARG_ANYTHING,
1571 .arg5_type = ARG_ANYTHING,
1574 BPF_CALL_5(bpf_csum_diff, __be32 *, from, u32, from_size,
1575 __be32 *, to, u32, to_size, __wsum, seed)
1577 struct bpf_scratchpad *sp = this_cpu_ptr(&bpf_sp);
1578 u32 diff_size = from_size + to_size;
1581 /* This is quite flexible, some examples:
1583 * from_size == 0, to_size > 0, seed := csum --> pushing data
1584 * from_size > 0, to_size == 0, seed := csum --> pulling data
1585 * from_size > 0, to_size > 0, seed := 0 --> diffing data
1587 * Even for diffing, from_size and to_size don't need to be equal.
1589 if (unlikely(((from_size | to_size) & (sizeof(__be32) - 1)) ||
1590 diff_size > sizeof(sp->diff)))
1593 for (i = 0; i < from_size / sizeof(__be32); i++, j++)
1594 sp->diff[j] = ~from[i];
1595 for (i = 0; i < to_size / sizeof(__be32); i++, j++)
1596 sp->diff[j] = to[i];
1598 return csum_partial(sp->diff, diff_size, seed);
1601 static const struct bpf_func_proto bpf_csum_diff_proto = {
1602 .func = bpf_csum_diff,
1605 .ret_type = RET_INTEGER,
1606 .arg1_type = ARG_PTR_TO_MEM,
1607 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
1608 .arg3_type = ARG_PTR_TO_MEM,
1609 .arg4_type = ARG_CONST_SIZE_OR_ZERO,
1610 .arg5_type = ARG_ANYTHING,
1613 BPF_CALL_2(bpf_csum_update, struct sk_buff *, skb, __wsum, csum)
1615 /* The interface is to be used in combination with bpf_csum_diff()
1616 * for direct packet writes. csum rotation for alignment as well
1617 * as emulating csum_sub() can be done from the eBPF program.
1619 if (skb->ip_summed == CHECKSUM_COMPLETE)
1620 return (skb->csum = csum_add(skb->csum, csum));
1625 static const struct bpf_func_proto bpf_csum_update_proto = {
1626 .func = bpf_csum_update,
1628 .ret_type = RET_INTEGER,
1629 .arg1_type = ARG_PTR_TO_CTX,
1630 .arg2_type = ARG_ANYTHING,
1633 static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb)
1635 return dev_forward_skb(dev, skb);
1638 static inline int __bpf_rx_skb_no_mac(struct net_device *dev,
1639 struct sk_buff *skb)
1641 int ret = ____dev_forward_skb(dev, skb);
1645 ret = netif_rx(skb);
1651 static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
1655 if (unlikely(__this_cpu_read(xmit_recursion) > XMIT_RECURSION_LIMIT)) {
1656 net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n");
1663 __this_cpu_inc(xmit_recursion);
1664 ret = dev_queue_xmit(skb);
1665 __this_cpu_dec(xmit_recursion);
1670 static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev,
1673 /* skb->mac_len is not set on normal egress */
1674 unsigned int mlen = skb->network_header - skb->mac_header;
1676 __skb_pull(skb, mlen);
1678 /* At ingress, the mac header has already been pulled once.
1679 * At egress, skb_pospull_rcsum has to be done in case that
1680 * the skb is originated from ingress (i.e. a forwarded skb)
1681 * to ensure that rcsum starts at net header.
1683 if (!skb_at_tc_ingress(skb))
1684 skb_postpull_rcsum(skb, skb_mac_header(skb), mlen);
1685 skb_pop_mac_header(skb);
1686 skb_reset_mac_len(skb);
1687 return flags & BPF_F_INGRESS ?
1688 __bpf_rx_skb_no_mac(dev, skb) : __bpf_tx_skb(dev, skb);
1691 static int __bpf_redirect_common(struct sk_buff *skb, struct net_device *dev,
1694 /* Verify that a link layer header is carried */
1695 if (unlikely(skb->mac_header >= skb->network_header)) {
1700 bpf_push_mac_rcsum(skb);
1701 return flags & BPF_F_INGRESS ?
1702 __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb);
1705 static int __bpf_redirect(struct sk_buff *skb, struct net_device *dev,
1708 if (dev_is_mac_header_xmit(dev))
1709 return __bpf_redirect_common(skb, dev, flags);
1711 return __bpf_redirect_no_mac(skb, dev, flags);
1714 BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags)
1716 struct net_device *dev;
1717 struct sk_buff *clone;
1720 if (unlikely(flags & ~(BPF_F_INGRESS)))
1723 dev = dev_get_by_index_rcu(dev_net(skb->dev), ifindex);
1727 clone = skb_clone(skb, GFP_ATOMIC);
1728 if (unlikely(!clone))
1731 /* For direct write, we need to keep the invariant that the skbs
1732 * we're dealing with need to be uncloned. Should uncloning fail
1733 * here, we need to free the just generated clone to unclone once
1736 ret = bpf_try_make_head_writable(skb);
1737 if (unlikely(ret)) {
1742 return __bpf_redirect(clone, dev, flags);
1745 static const struct bpf_func_proto bpf_clone_redirect_proto = {
1746 .func = bpf_clone_redirect,
1748 .ret_type = RET_INTEGER,
1749 .arg1_type = ARG_PTR_TO_CTX,
1750 .arg2_type = ARG_ANYTHING,
1751 .arg3_type = ARG_ANYTHING,
1754 struct redirect_info {
1759 static DEFINE_PER_CPU(struct redirect_info, redirect_info);
1761 BPF_CALL_2(bpf_redirect, u32, ifindex, u64, flags)
1763 struct redirect_info *ri = this_cpu_ptr(&redirect_info);
1765 if (unlikely(flags & ~(BPF_F_INGRESS)))
1768 ri->ifindex = ifindex;
1771 return TC_ACT_REDIRECT;
1774 int skb_do_redirect(struct sk_buff *skb)
1776 struct redirect_info *ri = this_cpu_ptr(&redirect_info);
1777 struct net_device *dev;
1779 dev = dev_get_by_index_rcu(dev_net(skb->dev), ri->ifindex);
1781 if (unlikely(!dev)) {
1786 return __bpf_redirect(skb, dev, ri->flags);
1789 static const struct bpf_func_proto bpf_redirect_proto = {
1790 .func = bpf_redirect,
1792 .ret_type = RET_INTEGER,
1793 .arg1_type = ARG_ANYTHING,
1794 .arg2_type = ARG_ANYTHING,
1797 BPF_CALL_1(bpf_get_cgroup_classid, const struct sk_buff *, skb)
1799 return task_get_classid(skb);
1802 static const struct bpf_func_proto bpf_get_cgroup_classid_proto = {
1803 .func = bpf_get_cgroup_classid,
1805 .ret_type = RET_INTEGER,
1806 .arg1_type = ARG_PTR_TO_CTX,
1809 BPF_CALL_1(bpf_get_route_realm, const struct sk_buff *, skb)
1811 return dst_tclassid(skb);
1814 static const struct bpf_func_proto bpf_get_route_realm_proto = {
1815 .func = bpf_get_route_realm,
1817 .ret_type = RET_INTEGER,
1818 .arg1_type = ARG_PTR_TO_CTX,
1821 BPF_CALL_1(bpf_get_hash_recalc, struct sk_buff *, skb)
1823 /* If skb_clear_hash() was called due to mangling, we can
1824 * trigger SW recalculation here. Later access to hash
1825 * can then use the inline skb->hash via context directly
1826 * instead of calling this helper again.
1828 return skb_get_hash(skb);
1831 static const struct bpf_func_proto bpf_get_hash_recalc_proto = {
1832 .func = bpf_get_hash_recalc,
1834 .ret_type = RET_INTEGER,
1835 .arg1_type = ARG_PTR_TO_CTX,
1838 BPF_CALL_1(bpf_set_hash_invalid, struct sk_buff *, skb)
1840 /* After all direct packet write, this can be used once for
1841 * triggering a lazy recalc on next skb_get_hash() invocation.
1843 skb_clear_hash(skb);
1847 static const struct bpf_func_proto bpf_set_hash_invalid_proto = {
1848 .func = bpf_set_hash_invalid,
1850 .ret_type = RET_INTEGER,
1851 .arg1_type = ARG_PTR_TO_CTX,
1854 BPF_CALL_3(bpf_skb_vlan_push, struct sk_buff *, skb, __be16, vlan_proto,
1859 if (unlikely(vlan_proto != htons(ETH_P_8021Q) &&
1860 vlan_proto != htons(ETH_P_8021AD)))
1861 vlan_proto = htons(ETH_P_8021Q);
1863 bpf_push_mac_rcsum(skb);
1864 ret = skb_vlan_push(skb, vlan_proto, vlan_tci);
1865 bpf_pull_mac_rcsum(skb);
1867 bpf_compute_data_end(skb);
1871 const struct bpf_func_proto bpf_skb_vlan_push_proto = {
1872 .func = bpf_skb_vlan_push,
1874 .ret_type = RET_INTEGER,
1875 .arg1_type = ARG_PTR_TO_CTX,
1876 .arg2_type = ARG_ANYTHING,
1877 .arg3_type = ARG_ANYTHING,
1879 EXPORT_SYMBOL_GPL(bpf_skb_vlan_push_proto);
1881 BPF_CALL_1(bpf_skb_vlan_pop, struct sk_buff *, skb)
1885 bpf_push_mac_rcsum(skb);
1886 ret = skb_vlan_pop(skb);
1887 bpf_pull_mac_rcsum(skb);
1889 bpf_compute_data_end(skb);
1893 const struct bpf_func_proto bpf_skb_vlan_pop_proto = {
1894 .func = bpf_skb_vlan_pop,
1896 .ret_type = RET_INTEGER,
1897 .arg1_type = ARG_PTR_TO_CTX,
1899 EXPORT_SYMBOL_GPL(bpf_skb_vlan_pop_proto);
1901 static int bpf_skb_generic_push(struct sk_buff *skb, u32 off, u32 len)
1903 /* Caller already did skb_cow() with len as headroom,
1904 * so no need to do it here.
1907 memmove(skb->data, skb->data + len, off);
1908 memset(skb->data + off, 0, len);
1910 /* No skb_postpush_rcsum(skb, skb->data + off, len)
1911 * needed here as it does not change the skb->csum
1912 * result for checksum complete when summing over
1918 static int bpf_skb_generic_pop(struct sk_buff *skb, u32 off, u32 len)
1920 /* skb_ensure_writable() is not needed here, as we're
1921 * already working on an uncloned skb.
1923 if (unlikely(!pskb_may_pull(skb, off + len)))
1926 skb_postpull_rcsum(skb, skb->data + off, len);
1927 memmove(skb->data + len, skb->data, off);
1928 __skb_pull(skb, len);
1933 static int bpf_skb_net_hdr_push(struct sk_buff *skb, u32 off, u32 len)
1935 bool trans_same = skb->transport_header == skb->network_header;
1938 /* There's no need for __skb_push()/__skb_pull() pair to
1939 * get to the start of the mac header as we're guaranteed
1940 * to always start from here under eBPF.
1942 ret = bpf_skb_generic_push(skb, off, len);
1944 skb->mac_header -= len;
1945 skb->network_header -= len;
1947 skb->transport_header = skb->network_header;
1953 static int bpf_skb_net_hdr_pop(struct sk_buff *skb, u32 off, u32 len)
1955 bool trans_same = skb->transport_header == skb->network_header;
1958 /* Same here, __skb_push()/__skb_pull() pair not needed. */
1959 ret = bpf_skb_generic_pop(skb, off, len);
1961 skb->mac_header += len;
1962 skb->network_header += len;
1964 skb->transport_header = skb->network_header;
1970 static int bpf_skb_proto_4_to_6(struct sk_buff *skb)
1972 const u32 len_diff = sizeof(struct ipv6hdr) - sizeof(struct iphdr);
1973 u32 off = skb->network_header - skb->mac_header;
1976 ret = skb_cow(skb, len_diff);
1977 if (unlikely(ret < 0))
1980 ret = bpf_skb_net_hdr_push(skb, off, len_diff);
1981 if (unlikely(ret < 0))
1984 if (skb_is_gso(skb)) {
1985 /* SKB_GSO_UDP stays as is. SKB_GSO_TCPV4 needs to
1986 * be changed into SKB_GSO_TCPV6.
1988 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
1989 skb_shinfo(skb)->gso_type &= ~SKB_GSO_TCPV4;
1990 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6;
1993 /* Due to IPv6 header, MSS needs to be downgraded. */
1994 skb_shinfo(skb)->gso_size -= len_diff;
1995 /* Header must be checked, and gso_segs recomputed. */
1996 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
1997 skb_shinfo(skb)->gso_segs = 0;
2000 skb->protocol = htons(ETH_P_IPV6);
2001 skb_clear_hash(skb);
2006 static int bpf_skb_proto_6_to_4(struct sk_buff *skb)
2008 const u32 len_diff = sizeof(struct ipv6hdr) - sizeof(struct iphdr);
2009 u32 off = skb->network_header - skb->mac_header;
2012 ret = skb_unclone(skb, GFP_ATOMIC);
2013 if (unlikely(ret < 0))
2016 ret = bpf_skb_net_hdr_pop(skb, off, len_diff);
2017 if (unlikely(ret < 0))
2020 if (skb_is_gso(skb)) {
2021 /* SKB_GSO_UDP stays as is. SKB_GSO_TCPV6 needs to
2022 * be changed into SKB_GSO_TCPV4.
2024 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
2025 skb_shinfo(skb)->gso_type &= ~SKB_GSO_TCPV6;
2026 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
2029 /* Due to IPv4 header, MSS can be upgraded. */
2030 skb_shinfo(skb)->gso_size += len_diff;
2031 /* Header must be checked, and gso_segs recomputed. */
2032 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
2033 skb_shinfo(skb)->gso_segs = 0;
2036 skb->protocol = htons(ETH_P_IP);
2037 skb_clear_hash(skb);
2042 static int bpf_skb_proto_xlat(struct sk_buff *skb, __be16 to_proto)
2044 __be16 from_proto = skb->protocol;
2046 if (from_proto == htons(ETH_P_IP) &&
2047 to_proto == htons(ETH_P_IPV6))
2048 return bpf_skb_proto_4_to_6(skb);
2050 if (from_proto == htons(ETH_P_IPV6) &&
2051 to_proto == htons(ETH_P_IP))
2052 return bpf_skb_proto_6_to_4(skb);
2057 BPF_CALL_3(bpf_skb_change_proto, struct sk_buff *, skb, __be16, proto,
2062 if (unlikely(flags))
2065 /* General idea is that this helper does the basic groundwork
2066 * needed for changing the protocol, and eBPF program fills the
2067 * rest through bpf_skb_store_bytes(), bpf_lX_csum_replace()
2068 * and other helpers, rather than passing a raw buffer here.
2070 * The rationale is to keep this minimal and without a need to
2071 * deal with raw packet data. F.e. even if we would pass buffers
2072 * here, the program still needs to call the bpf_lX_csum_replace()
2073 * helpers anyway. Plus, this way we keep also separation of
2074 * concerns, since f.e. bpf_skb_store_bytes() should only take
2077 * Currently, additional options and extension header space are
2078 * not supported, but flags register is reserved so we can adapt
2079 * that. For offloads, we mark packet as dodgy, so that headers
2080 * need to be verified first.
2082 ret = bpf_skb_proto_xlat(skb, proto);
2083 bpf_compute_data_end(skb);
2087 static const struct bpf_func_proto bpf_skb_change_proto_proto = {
2088 .func = bpf_skb_change_proto,
2090 .ret_type = RET_INTEGER,
2091 .arg1_type = ARG_PTR_TO_CTX,
2092 .arg2_type = ARG_ANYTHING,
2093 .arg3_type = ARG_ANYTHING,
2096 BPF_CALL_2(bpf_skb_change_type, struct sk_buff *, skb, u32, pkt_type)
2098 /* We only allow a restricted subset to be changed for now. */
2099 if (unlikely(!skb_pkt_type_ok(skb->pkt_type) ||
2100 !skb_pkt_type_ok(pkt_type)))
2103 skb->pkt_type = pkt_type;
2107 static const struct bpf_func_proto bpf_skb_change_type_proto = {
2108 .func = bpf_skb_change_type,
2110 .ret_type = RET_INTEGER,
2111 .arg1_type = ARG_PTR_TO_CTX,
2112 .arg2_type = ARG_ANYTHING,
2115 static u32 __bpf_skb_min_len(const struct sk_buff *skb)
2117 u32 min_len = skb_network_offset(skb);
2119 if (skb_transport_header_was_set(skb))
2120 min_len = skb_transport_offset(skb);
2121 if (skb->ip_summed == CHECKSUM_PARTIAL)
2122 min_len = skb_checksum_start_offset(skb) +
2123 skb->csum_offset + sizeof(__sum16);
2127 static u32 __bpf_skb_max_len(const struct sk_buff *skb)
2129 return skb->dev->mtu + skb->dev->hard_header_len;
2132 static int bpf_skb_grow_rcsum(struct sk_buff *skb, unsigned int new_len)
2134 unsigned int old_len = skb->len;
2137 ret = __skb_grow_rcsum(skb, new_len);
2139 memset(skb->data + old_len, 0, new_len - old_len);
2143 static int bpf_skb_trim_rcsum(struct sk_buff *skb, unsigned int new_len)
2145 return __skb_trim_rcsum(skb, new_len);
2148 BPF_CALL_3(bpf_skb_change_tail, struct sk_buff *, skb, u32, new_len,
2151 u32 max_len = __bpf_skb_max_len(skb);
2152 u32 min_len = __bpf_skb_min_len(skb);
2155 if (unlikely(flags || new_len > max_len || new_len < min_len))
2157 if (skb->encapsulation)
2160 /* The basic idea of this helper is that it's performing the
2161 * needed work to either grow or trim an skb, and eBPF program
2162 * rewrites the rest via helpers like bpf_skb_store_bytes(),
2163 * bpf_lX_csum_replace() and others rather than passing a raw
2164 * buffer here. This one is a slow path helper and intended
2165 * for replies with control messages.
2167 * Like in bpf_skb_change_proto(), we want to keep this rather
2168 * minimal and without protocol specifics so that we are able
2169 * to separate concerns as in bpf_skb_store_bytes() should only
2170 * be the one responsible for writing buffers.
2172 * It's really expected to be a slow path operation here for
2173 * control message replies, so we're implicitly linearizing,
2174 * uncloning and drop offloads from the skb by this.
2176 ret = __bpf_try_make_writable(skb, skb->len);
2178 if (new_len > skb->len)
2179 ret = bpf_skb_grow_rcsum(skb, new_len);
2180 else if (new_len < skb->len)
2181 ret = bpf_skb_trim_rcsum(skb, new_len);
2182 if (!ret && skb_is_gso(skb))
2186 bpf_compute_data_end(skb);
2190 static const struct bpf_func_proto bpf_skb_change_tail_proto = {
2191 .func = bpf_skb_change_tail,
2193 .ret_type = RET_INTEGER,
2194 .arg1_type = ARG_PTR_TO_CTX,
2195 .arg2_type = ARG_ANYTHING,
2196 .arg3_type = ARG_ANYTHING,
2199 BPF_CALL_3(bpf_skb_change_head, struct sk_buff *, skb, u32, head_room,
2202 u32 max_len = __bpf_skb_max_len(skb);
2203 u32 new_len = skb->len + head_room;
2206 if (unlikely(flags || (!skb_is_gso(skb) && new_len > max_len) ||
2207 new_len < skb->len))
2210 ret = skb_cow(skb, head_room);
2212 /* Idea for this helper is that we currently only
2213 * allow to expand on mac header. This means that
2214 * skb->protocol network header, etc, stay as is.
2215 * Compared to bpf_skb_change_tail(), we're more
2216 * flexible due to not needing to linearize or
2217 * reset GSO. Intention for this helper is to be
2218 * used by an L3 skb that needs to push mac header
2219 * for redirection into L2 device.
2221 __skb_push(skb, head_room);
2222 memset(skb->data, 0, head_room);
2223 skb_reset_mac_header(skb);
2226 bpf_compute_data_end(skb);
2230 static const struct bpf_func_proto bpf_skb_change_head_proto = {
2231 .func = bpf_skb_change_head,
2233 .ret_type = RET_INTEGER,
2234 .arg1_type = ARG_PTR_TO_CTX,
2235 .arg2_type = ARG_ANYTHING,
2236 .arg3_type = ARG_ANYTHING,
2239 BPF_CALL_2(bpf_xdp_adjust_head, struct xdp_buff *, xdp, int, offset)
2241 void *data = xdp->data + offset;
2243 if (unlikely(data < xdp->data_hard_start ||
2244 data > xdp->data_end - ETH_HLEN))
2252 static const struct bpf_func_proto bpf_xdp_adjust_head_proto = {
2253 .func = bpf_xdp_adjust_head,
2255 .ret_type = RET_INTEGER,
2256 .arg1_type = ARG_PTR_TO_CTX,
2257 .arg2_type = ARG_ANYTHING,
2260 bool bpf_helper_changes_pkt_data(void *func)
2262 if (func == bpf_skb_vlan_push ||
2263 func == bpf_skb_vlan_pop ||
2264 func == bpf_skb_store_bytes ||
2265 func == bpf_skb_change_proto ||
2266 func == bpf_skb_change_head ||
2267 func == bpf_skb_change_tail ||
2268 func == bpf_skb_pull_data ||
2269 func == bpf_l3_csum_replace ||
2270 func == bpf_l4_csum_replace ||
2271 func == bpf_xdp_adjust_head)
2277 static unsigned long bpf_skb_copy(void *dst_buff, const void *skb,
2278 unsigned long off, unsigned long len)
2280 void *ptr = skb_header_pointer(skb, off, len, dst_buff);
2284 if (ptr != dst_buff)
2285 memcpy(dst_buff, ptr, len);
2290 BPF_CALL_5(bpf_skb_event_output, struct sk_buff *, skb, struct bpf_map *, map,
2291 u64, flags, void *, meta, u64, meta_size)
2293 u64 skb_size = (flags & BPF_F_CTXLEN_MASK) >> 32;
2295 if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK)))
2297 if (unlikely(skb_size > skb->len))
2300 return bpf_event_output(map, flags, meta, meta_size, skb, skb_size,
2304 static const struct bpf_func_proto bpf_skb_event_output_proto = {
2305 .func = bpf_skb_event_output,
2307 .ret_type = RET_INTEGER,
2308 .arg1_type = ARG_PTR_TO_CTX,
2309 .arg2_type = ARG_CONST_MAP_PTR,
2310 .arg3_type = ARG_ANYTHING,
2311 .arg4_type = ARG_PTR_TO_MEM,
2312 .arg5_type = ARG_CONST_SIZE,
2315 static unsigned short bpf_tunnel_key_af(u64 flags)
2317 return flags & BPF_F_TUNINFO_IPV6 ? AF_INET6 : AF_INET;
2320 BPF_CALL_4(bpf_skb_get_tunnel_key, struct sk_buff *, skb, struct bpf_tunnel_key *, to,
2321 u32, size, u64, flags)
2323 const struct ip_tunnel_info *info = skb_tunnel_info(skb);
2324 u8 compat[sizeof(struct bpf_tunnel_key)];
2328 if (unlikely(!info || (flags & ~(BPF_F_TUNINFO_IPV6)))) {
2332 if (ip_tunnel_info_af(info) != bpf_tunnel_key_af(flags)) {
2336 if (unlikely(size != sizeof(struct bpf_tunnel_key))) {
2339 case offsetof(struct bpf_tunnel_key, tunnel_label):
2340 case offsetof(struct bpf_tunnel_key, tunnel_ext):
2342 case offsetof(struct bpf_tunnel_key, remote_ipv6[1]):
2343 /* Fixup deprecated structure layouts here, so we have
2344 * a common path later on.
2346 if (ip_tunnel_info_af(info) != AF_INET)
2349 to = (struct bpf_tunnel_key *)compat;
2356 to->tunnel_id = be64_to_cpu(info->key.tun_id);
2357 to->tunnel_tos = info->key.tos;
2358 to->tunnel_ttl = info->key.ttl;
2360 if (flags & BPF_F_TUNINFO_IPV6) {
2361 memcpy(to->remote_ipv6, &info->key.u.ipv6.src,
2362 sizeof(to->remote_ipv6));
2363 to->tunnel_label = be32_to_cpu(info->key.label);
2365 to->remote_ipv4 = be32_to_cpu(info->key.u.ipv4.src);
2368 if (unlikely(size != sizeof(struct bpf_tunnel_key)))
2369 memcpy(to_orig, to, size);
2373 memset(to_orig, 0, size);
2377 static const struct bpf_func_proto bpf_skb_get_tunnel_key_proto = {
2378 .func = bpf_skb_get_tunnel_key,
2380 .ret_type = RET_INTEGER,
2381 .arg1_type = ARG_PTR_TO_CTX,
2382 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
2383 .arg3_type = ARG_CONST_SIZE,
2384 .arg4_type = ARG_ANYTHING,
2387 BPF_CALL_3(bpf_skb_get_tunnel_opt, struct sk_buff *, skb, u8 *, to, u32, size)
2389 const struct ip_tunnel_info *info = skb_tunnel_info(skb);
2392 if (unlikely(!info ||
2393 !(info->key.tun_flags & TUNNEL_OPTIONS_PRESENT))) {
2397 if (unlikely(size < info->options_len)) {
2402 ip_tunnel_info_opts_get(to, info);
2403 if (size > info->options_len)
2404 memset(to + info->options_len, 0, size - info->options_len);
2406 return info->options_len;
2408 memset(to, 0, size);
2412 static const struct bpf_func_proto bpf_skb_get_tunnel_opt_proto = {
2413 .func = bpf_skb_get_tunnel_opt,
2415 .ret_type = RET_INTEGER,
2416 .arg1_type = ARG_PTR_TO_CTX,
2417 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
2418 .arg3_type = ARG_CONST_SIZE,
2421 static struct metadata_dst __percpu *md_dst;
2423 BPF_CALL_4(bpf_skb_set_tunnel_key, struct sk_buff *, skb,
2424 const struct bpf_tunnel_key *, from, u32, size, u64, flags)
2426 struct metadata_dst *md = this_cpu_ptr(md_dst);
2427 u8 compat[sizeof(struct bpf_tunnel_key)];
2428 struct ip_tunnel_info *info;
2430 if (unlikely(flags & ~(BPF_F_TUNINFO_IPV6 | BPF_F_ZERO_CSUM_TX |
2431 BPF_F_DONT_FRAGMENT)))
2433 if (unlikely(size != sizeof(struct bpf_tunnel_key))) {
2435 case offsetof(struct bpf_tunnel_key, tunnel_label):
2436 case offsetof(struct bpf_tunnel_key, tunnel_ext):
2437 case offsetof(struct bpf_tunnel_key, remote_ipv6[1]):
2438 /* Fixup deprecated structure layouts here, so we have
2439 * a common path later on.
2441 memcpy(compat, from, size);
2442 memset(compat + size, 0, sizeof(compat) - size);
2443 from = (const struct bpf_tunnel_key *) compat;
2449 if (unlikely((!(flags & BPF_F_TUNINFO_IPV6) && from->tunnel_label) ||
2454 dst_hold((struct dst_entry *) md);
2455 skb_dst_set(skb, (struct dst_entry *) md);
2457 info = &md->u.tun_info;
2458 info->mode = IP_TUNNEL_INFO_TX;
2460 info->key.tun_flags = TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_NOCACHE;
2461 if (flags & BPF_F_DONT_FRAGMENT)
2462 info->key.tun_flags |= TUNNEL_DONT_FRAGMENT;
2464 info->key.tun_id = cpu_to_be64(from->tunnel_id);
2465 info->key.tos = from->tunnel_tos;
2466 info->key.ttl = from->tunnel_ttl;
2468 if (flags & BPF_F_TUNINFO_IPV6) {
2469 info->mode |= IP_TUNNEL_INFO_IPV6;
2470 memcpy(&info->key.u.ipv6.dst, from->remote_ipv6,
2471 sizeof(from->remote_ipv6));
2472 info->key.label = cpu_to_be32(from->tunnel_label) &
2473 IPV6_FLOWLABEL_MASK;
2475 info->key.u.ipv4.dst = cpu_to_be32(from->remote_ipv4);
2476 if (flags & BPF_F_ZERO_CSUM_TX)
2477 info->key.tun_flags &= ~TUNNEL_CSUM;
2483 static const struct bpf_func_proto bpf_skb_set_tunnel_key_proto = {
2484 .func = bpf_skb_set_tunnel_key,
2486 .ret_type = RET_INTEGER,
2487 .arg1_type = ARG_PTR_TO_CTX,
2488 .arg2_type = ARG_PTR_TO_MEM,
2489 .arg3_type = ARG_CONST_SIZE,
2490 .arg4_type = ARG_ANYTHING,
2493 BPF_CALL_3(bpf_skb_set_tunnel_opt, struct sk_buff *, skb,
2494 const u8 *, from, u32, size)
2496 struct ip_tunnel_info *info = skb_tunnel_info(skb);
2497 const struct metadata_dst *md = this_cpu_ptr(md_dst);
2499 if (unlikely(info != &md->u.tun_info || (size & (sizeof(u32) - 1))))
2501 if (unlikely(size > IP_TUNNEL_OPTS_MAX))
2504 ip_tunnel_info_opts_set(info, from, size);
2509 static const struct bpf_func_proto bpf_skb_set_tunnel_opt_proto = {
2510 .func = bpf_skb_set_tunnel_opt,
2512 .ret_type = RET_INTEGER,
2513 .arg1_type = ARG_PTR_TO_CTX,
2514 .arg2_type = ARG_PTR_TO_MEM,
2515 .arg3_type = ARG_CONST_SIZE,
2518 static const struct bpf_func_proto *
2519 bpf_get_skb_set_tunnel_proto(enum bpf_func_id which)
2522 /* Race is not possible, since it's called from verifier
2523 * that is holding verifier mutex.
2525 md_dst = metadata_dst_alloc_percpu(IP_TUNNEL_OPTS_MAX,
2532 case BPF_FUNC_skb_set_tunnel_key:
2533 return &bpf_skb_set_tunnel_key_proto;
2534 case BPF_FUNC_skb_set_tunnel_opt:
2535 return &bpf_skb_set_tunnel_opt_proto;
2541 BPF_CALL_3(bpf_skb_under_cgroup, struct sk_buff *, skb, struct bpf_map *, map,
2544 struct bpf_array *array = container_of(map, struct bpf_array, map);
2545 struct cgroup *cgrp;
2548 sk = skb_to_full_sk(skb);
2549 if (!sk || !sk_fullsock(sk))
2551 if (unlikely(idx >= array->map.max_entries))
2554 cgrp = READ_ONCE(array->ptrs[idx]);
2555 if (unlikely(!cgrp))
2558 return sk_under_cgroup_hierarchy(sk, cgrp);
2561 static const struct bpf_func_proto bpf_skb_under_cgroup_proto = {
2562 .func = bpf_skb_under_cgroup,
2564 .ret_type = RET_INTEGER,
2565 .arg1_type = ARG_PTR_TO_CTX,
2566 .arg2_type = ARG_CONST_MAP_PTR,
2567 .arg3_type = ARG_ANYTHING,
2570 static unsigned long bpf_xdp_copy(void *dst_buff, const void *src_buff,
2571 unsigned long off, unsigned long len)
2573 memcpy(dst_buff, src_buff + off, len);
2577 BPF_CALL_5(bpf_xdp_event_output, struct xdp_buff *, xdp, struct bpf_map *, map,
2578 u64, flags, void *, meta, u64, meta_size)
2580 u64 xdp_size = (flags & BPF_F_CTXLEN_MASK) >> 32;
2582 if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK)))
2584 if (unlikely(xdp_size > (unsigned long)(xdp->data_end - xdp->data)))
2587 return bpf_event_output(map, flags, meta, meta_size, xdp->data,
2588 xdp_size, bpf_xdp_copy);
2591 static const struct bpf_func_proto bpf_xdp_event_output_proto = {
2592 .func = bpf_xdp_event_output,
2594 .ret_type = RET_INTEGER,
2595 .arg1_type = ARG_PTR_TO_CTX,
2596 .arg2_type = ARG_CONST_MAP_PTR,
2597 .arg3_type = ARG_ANYTHING,
2598 .arg4_type = ARG_PTR_TO_MEM,
2599 .arg5_type = ARG_CONST_SIZE,
2602 static const struct bpf_func_proto *
2603 bpf_base_func_proto(enum bpf_func_id func_id)
2606 case BPF_FUNC_map_lookup_elem:
2607 return &bpf_map_lookup_elem_proto;
2608 case BPF_FUNC_map_update_elem:
2609 return &bpf_map_update_elem_proto;
2610 case BPF_FUNC_map_delete_elem:
2611 return &bpf_map_delete_elem_proto;
2612 case BPF_FUNC_get_prandom_u32:
2613 return &bpf_get_prandom_u32_proto;
2614 case BPF_FUNC_get_smp_processor_id:
2615 return &bpf_get_raw_smp_processor_id_proto;
2616 case BPF_FUNC_get_numa_node_id:
2617 return &bpf_get_numa_node_id_proto;
2618 case BPF_FUNC_tail_call:
2619 return &bpf_tail_call_proto;
2620 case BPF_FUNC_ktime_get_ns:
2621 return &bpf_ktime_get_ns_proto;
2622 case BPF_FUNC_trace_printk:
2623 if (capable(CAP_SYS_ADMIN))
2624 return bpf_get_trace_printk_proto();
2630 static const struct bpf_func_proto *
2631 sk_filter_func_proto(enum bpf_func_id func_id)
2634 case BPF_FUNC_skb_load_bytes:
2635 return &bpf_skb_load_bytes_proto;
2637 return bpf_base_func_proto(func_id);
2641 static const struct bpf_func_proto *
2642 tc_cls_act_func_proto(enum bpf_func_id func_id)
2645 case BPF_FUNC_skb_store_bytes:
2646 return &bpf_skb_store_bytes_proto;
2647 case BPF_FUNC_skb_load_bytes:
2648 return &bpf_skb_load_bytes_proto;
2649 case BPF_FUNC_skb_pull_data:
2650 return &bpf_skb_pull_data_proto;
2651 case BPF_FUNC_csum_diff:
2652 return &bpf_csum_diff_proto;
2653 case BPF_FUNC_csum_update:
2654 return &bpf_csum_update_proto;
2655 case BPF_FUNC_l3_csum_replace:
2656 return &bpf_l3_csum_replace_proto;
2657 case BPF_FUNC_l4_csum_replace:
2658 return &bpf_l4_csum_replace_proto;
2659 case BPF_FUNC_clone_redirect:
2660 return &bpf_clone_redirect_proto;
2661 case BPF_FUNC_get_cgroup_classid:
2662 return &bpf_get_cgroup_classid_proto;
2663 case BPF_FUNC_skb_vlan_push:
2664 return &bpf_skb_vlan_push_proto;
2665 case BPF_FUNC_skb_vlan_pop:
2666 return &bpf_skb_vlan_pop_proto;
2667 case BPF_FUNC_skb_change_proto:
2668 return &bpf_skb_change_proto_proto;
2669 case BPF_FUNC_skb_change_type:
2670 return &bpf_skb_change_type_proto;
2671 case BPF_FUNC_skb_change_tail:
2672 return &bpf_skb_change_tail_proto;
2673 case BPF_FUNC_skb_get_tunnel_key:
2674 return &bpf_skb_get_tunnel_key_proto;
2675 case BPF_FUNC_skb_set_tunnel_key:
2676 return bpf_get_skb_set_tunnel_proto(func_id);
2677 case BPF_FUNC_skb_get_tunnel_opt:
2678 return &bpf_skb_get_tunnel_opt_proto;
2679 case BPF_FUNC_skb_set_tunnel_opt:
2680 return bpf_get_skb_set_tunnel_proto(func_id);
2681 case BPF_FUNC_redirect:
2682 return &bpf_redirect_proto;
2683 case BPF_FUNC_get_route_realm:
2684 return &bpf_get_route_realm_proto;
2685 case BPF_FUNC_get_hash_recalc:
2686 return &bpf_get_hash_recalc_proto;
2687 case BPF_FUNC_set_hash_invalid:
2688 return &bpf_set_hash_invalid_proto;
2689 case BPF_FUNC_perf_event_output:
2690 return &bpf_skb_event_output_proto;
2691 case BPF_FUNC_get_smp_processor_id:
2692 return &bpf_get_smp_processor_id_proto;
2693 case BPF_FUNC_skb_under_cgroup:
2694 return &bpf_skb_under_cgroup_proto;
2696 return bpf_base_func_proto(func_id);
2700 static const struct bpf_func_proto *
2701 xdp_func_proto(enum bpf_func_id func_id)
2704 case BPF_FUNC_perf_event_output:
2705 return &bpf_xdp_event_output_proto;
2706 case BPF_FUNC_get_smp_processor_id:
2707 return &bpf_get_smp_processor_id_proto;
2708 case BPF_FUNC_xdp_adjust_head:
2709 return &bpf_xdp_adjust_head_proto;
2711 return bpf_base_func_proto(func_id);
2715 static const struct bpf_func_proto *
2716 cg_skb_func_proto(enum bpf_func_id func_id)
2719 case BPF_FUNC_skb_load_bytes:
2720 return &bpf_skb_load_bytes_proto;
2722 return bpf_base_func_proto(func_id);
2726 static const struct bpf_func_proto *
2727 lwt_inout_func_proto(enum bpf_func_id func_id)
2730 case BPF_FUNC_skb_load_bytes:
2731 return &bpf_skb_load_bytes_proto;
2732 case BPF_FUNC_skb_pull_data:
2733 return &bpf_skb_pull_data_proto;
2734 case BPF_FUNC_csum_diff:
2735 return &bpf_csum_diff_proto;
2736 case BPF_FUNC_get_cgroup_classid:
2737 return &bpf_get_cgroup_classid_proto;
2738 case BPF_FUNC_get_route_realm:
2739 return &bpf_get_route_realm_proto;
2740 case BPF_FUNC_get_hash_recalc:
2741 return &bpf_get_hash_recalc_proto;
2742 case BPF_FUNC_perf_event_output:
2743 return &bpf_skb_event_output_proto;
2744 case BPF_FUNC_get_smp_processor_id:
2745 return &bpf_get_smp_processor_id_proto;
2746 case BPF_FUNC_skb_under_cgroup:
2747 return &bpf_skb_under_cgroup_proto;
2749 return bpf_base_func_proto(func_id);
2753 static const struct bpf_func_proto *
2754 lwt_xmit_func_proto(enum bpf_func_id func_id)
2757 case BPF_FUNC_skb_get_tunnel_key:
2758 return &bpf_skb_get_tunnel_key_proto;
2759 case BPF_FUNC_skb_set_tunnel_key:
2760 return bpf_get_skb_set_tunnel_proto(func_id);
2761 case BPF_FUNC_skb_get_tunnel_opt:
2762 return &bpf_skb_get_tunnel_opt_proto;
2763 case BPF_FUNC_skb_set_tunnel_opt:
2764 return bpf_get_skb_set_tunnel_proto(func_id);
2765 case BPF_FUNC_redirect:
2766 return &bpf_redirect_proto;
2767 case BPF_FUNC_clone_redirect:
2768 return &bpf_clone_redirect_proto;
2769 case BPF_FUNC_skb_change_tail:
2770 return &bpf_skb_change_tail_proto;
2771 case BPF_FUNC_skb_change_head:
2772 return &bpf_skb_change_head_proto;
2773 case BPF_FUNC_skb_store_bytes:
2774 return &bpf_skb_store_bytes_proto;
2775 case BPF_FUNC_csum_update:
2776 return &bpf_csum_update_proto;
2777 case BPF_FUNC_l3_csum_replace:
2778 return &bpf_l3_csum_replace_proto;
2779 case BPF_FUNC_l4_csum_replace:
2780 return &bpf_l4_csum_replace_proto;
2781 case BPF_FUNC_set_hash_invalid:
2782 return &bpf_set_hash_invalid_proto;
2784 return lwt_inout_func_proto(func_id);
2788 static bool __is_valid_access(int off, int size)
2790 if (off < 0 || off >= sizeof(struct __sk_buff))
2793 /* The verifier guarantees that size > 0. */
2794 if (off % size != 0)
2798 case offsetof(struct __sk_buff, cb[0]) ...
2799 offsetof(struct __sk_buff, cb[4]) + sizeof(__u32) - 1:
2801 offsetof(struct __sk_buff, cb[4]) + sizeof(__u32))
2805 if (size != sizeof(__u32))
2812 static bool sk_filter_is_valid_access(int off, int size,
2813 enum bpf_access_type type,
2814 enum bpf_reg_type *reg_type)
2817 case offsetof(struct __sk_buff, tc_classid):
2818 case offsetof(struct __sk_buff, data):
2819 case offsetof(struct __sk_buff, data_end):
2823 if (type == BPF_WRITE) {
2825 case offsetof(struct __sk_buff, cb[0]) ...
2826 offsetof(struct __sk_buff, cb[4]) + sizeof(__u32) - 1:
2833 return __is_valid_access(off, size);
2836 static bool lwt_is_valid_access(int off, int size,
2837 enum bpf_access_type type,
2838 enum bpf_reg_type *reg_type)
2841 case offsetof(struct __sk_buff, tc_classid):
2845 if (type == BPF_WRITE) {
2847 case offsetof(struct __sk_buff, mark):
2848 case offsetof(struct __sk_buff, priority):
2849 case offsetof(struct __sk_buff, cb[0]) ...
2850 offsetof(struct __sk_buff, cb[4]) + sizeof(__u32) - 1:
2858 case offsetof(struct __sk_buff, data):
2859 *reg_type = PTR_TO_PACKET;
2861 case offsetof(struct __sk_buff, data_end):
2862 *reg_type = PTR_TO_PACKET_END;
2866 return __is_valid_access(off, size);
2869 static bool sock_filter_is_valid_access(int off, int size,
2870 enum bpf_access_type type,
2871 enum bpf_reg_type *reg_type)
2873 if (type == BPF_WRITE) {
2875 case offsetof(struct bpf_sock, bound_dev_if):
2882 if (off < 0 || off + size > sizeof(struct bpf_sock))
2884 /* The verifier guarantees that size > 0. */
2885 if (off % size != 0)
2887 if (size != sizeof(__u32))
2893 static int tc_cls_act_prologue(struct bpf_insn *insn_buf, bool direct_write,
2894 const struct bpf_prog *prog)
2896 struct bpf_insn *insn = insn_buf;
2901 /* if (!skb->cloned)
2904 * (Fast-path, otherwise approximation that we might be
2905 * a clone, do the rest in helper.)
2907 *insn++ = BPF_LDX_MEM(BPF_B, BPF_REG_6, BPF_REG_1, CLONED_OFFSET());
2908 *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_6, CLONED_MASK);
2909 *insn++ = BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 7);
2911 /* ret = bpf_skb_pull_data(skb, 0); */
2912 *insn++ = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
2913 *insn++ = BPF_ALU64_REG(BPF_XOR, BPF_REG_2, BPF_REG_2);
2914 *insn++ = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
2915 BPF_FUNC_skb_pull_data);
2918 * return TC_ACT_SHOT;
2920 *insn++ = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2);
2921 *insn++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, TC_ACT_SHOT);
2922 *insn++ = BPF_EXIT_INSN();
2925 *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
2927 *insn++ = prog->insnsi[0];
2929 return insn - insn_buf;
2932 static bool tc_cls_act_is_valid_access(int off, int size,
2933 enum bpf_access_type type,
2934 enum bpf_reg_type *reg_type)
2936 if (type == BPF_WRITE) {
2938 case offsetof(struct __sk_buff, mark):
2939 case offsetof(struct __sk_buff, tc_index):
2940 case offsetof(struct __sk_buff, priority):
2941 case offsetof(struct __sk_buff, cb[0]) ...
2942 offsetof(struct __sk_buff, cb[4]) + sizeof(__u32) - 1:
2943 case offsetof(struct __sk_buff, tc_classid):
2951 case offsetof(struct __sk_buff, data):
2952 *reg_type = PTR_TO_PACKET;
2954 case offsetof(struct __sk_buff, data_end):
2955 *reg_type = PTR_TO_PACKET_END;
2959 return __is_valid_access(off, size);
2962 static bool __is_valid_xdp_access(int off, int size)
2964 if (off < 0 || off >= sizeof(struct xdp_md))
2966 if (off % size != 0)
2968 if (size != sizeof(__u32))
2974 static bool xdp_is_valid_access(int off, int size,
2975 enum bpf_access_type type,
2976 enum bpf_reg_type *reg_type)
2978 if (type == BPF_WRITE)
2982 case offsetof(struct xdp_md, data):
2983 *reg_type = PTR_TO_PACKET;
2985 case offsetof(struct xdp_md, data_end):
2986 *reg_type = PTR_TO_PACKET_END;
2990 return __is_valid_xdp_access(off, size);
2993 void bpf_warn_invalid_xdp_action(u32 act)
2995 WARN_ONCE(1, "Illegal XDP return value %u, expect packet loss\n", act);
2997 EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_action);
2999 static u32 bpf_convert_ctx_access(enum bpf_access_type type,
3000 const struct bpf_insn *si,
3001 struct bpf_insn *insn_buf,
3002 struct bpf_prog *prog)
3004 struct bpf_insn *insn = insn_buf;
3008 case offsetof(struct __sk_buff, len):
3009 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
3011 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
3012 offsetof(struct sk_buff, len));
3015 case offsetof(struct __sk_buff, protocol):
3016 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
3018 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
3019 offsetof(struct sk_buff, protocol));
3022 case offsetof(struct __sk_buff, vlan_proto):
3023 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_proto) != 2);
3025 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
3026 offsetof(struct sk_buff, vlan_proto));
3029 case offsetof(struct __sk_buff, priority):
3030 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, priority) != 4);
3032 if (type == BPF_WRITE)
3033 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
3034 offsetof(struct sk_buff, priority));
3036 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
3037 offsetof(struct sk_buff, priority));
3040 case offsetof(struct __sk_buff, ingress_ifindex):
3041 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, skb_iif) != 4);
3043 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
3044 offsetof(struct sk_buff, skb_iif));
3047 case offsetof(struct __sk_buff, ifindex):
3048 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
3050 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev),
3051 si->dst_reg, si->src_reg,
3052 offsetof(struct sk_buff, dev));
3053 *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1);
3054 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
3055 offsetof(struct net_device, ifindex));
3058 case offsetof(struct __sk_buff, hash):
3059 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
3061 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
3062 offsetof(struct sk_buff, hash));
3065 case offsetof(struct __sk_buff, mark):
3066 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
3068 if (type == BPF_WRITE)
3069 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
3070 offsetof(struct sk_buff, mark));
3072 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
3073 offsetof(struct sk_buff, mark));
3076 case offsetof(struct __sk_buff, pkt_type):
3077 return convert_skb_access(SKF_AD_PKTTYPE, si->dst_reg,
3080 case offsetof(struct __sk_buff, queue_mapping):
3081 return convert_skb_access(SKF_AD_QUEUE, si->dst_reg,
3084 case offsetof(struct __sk_buff, vlan_present):
3085 return convert_skb_access(SKF_AD_VLAN_TAG_PRESENT,
3086 si->dst_reg, si->src_reg, insn);
3088 case offsetof(struct __sk_buff, vlan_tci):
3089 return convert_skb_access(SKF_AD_VLAN_TAG,
3090 si->dst_reg, si->src_reg, insn);
3092 case offsetof(struct __sk_buff, cb[0]) ...
3093 offsetof(struct __sk_buff, cb[4]) + sizeof(__u32) - 1:
3094 BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, data) < 20);
3095 BUILD_BUG_ON((offsetof(struct sk_buff, cb) +
3096 offsetof(struct qdisc_skb_cb, data)) %
3099 prog->cb_access = 1;
3101 off -= offsetof(struct __sk_buff, cb[0]);
3102 off += offsetof(struct sk_buff, cb);
3103 off += offsetof(struct qdisc_skb_cb, data);
3104 if (type == BPF_WRITE)
3105 *insn++ = BPF_STX_MEM(BPF_SIZE(si->code), si->dst_reg,
3108 *insn++ = BPF_LDX_MEM(BPF_SIZE(si->code), si->dst_reg,
3112 case offsetof(struct __sk_buff, tc_classid):
3113 BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, tc_classid) != 2);
3116 off -= offsetof(struct __sk_buff, tc_classid);
3117 off += offsetof(struct sk_buff, cb);
3118 off += offsetof(struct qdisc_skb_cb, tc_classid);
3119 if (type == BPF_WRITE)
3120 *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg,
3123 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg,
3127 case offsetof(struct __sk_buff, data):
3128 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data),
3129 si->dst_reg, si->src_reg,
3130 offsetof(struct sk_buff, data));
3133 case offsetof(struct __sk_buff, data_end):
3135 off -= offsetof(struct __sk_buff, data_end);
3136 off += offsetof(struct sk_buff, cb);
3137 off += offsetof(struct bpf_skb_data_end, data_end);
3138 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg,
3142 case offsetof(struct __sk_buff, tc_index):
3143 #ifdef CONFIG_NET_SCHED
3144 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, tc_index) != 2);
3146 if (type == BPF_WRITE)
3147 *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg, si->src_reg,
3148 offsetof(struct sk_buff, tc_index));
3150 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
3151 offsetof(struct sk_buff, tc_index));
3153 if (type == BPF_WRITE)
3154 *insn++ = BPF_MOV64_REG(si->dst_reg, si->dst_reg);
3156 *insn++ = BPF_MOV64_IMM(si->dst_reg, 0);
3161 return insn - insn_buf;
3164 static u32 sock_filter_convert_ctx_access(enum bpf_access_type type,
3165 const struct bpf_insn *si,
3166 struct bpf_insn *insn_buf,
3167 struct bpf_prog *prog)
3169 struct bpf_insn *insn = insn_buf;
3172 case offsetof(struct bpf_sock, bound_dev_if):
3173 BUILD_BUG_ON(FIELD_SIZEOF(struct sock, sk_bound_dev_if) != 4);
3175 if (type == BPF_WRITE)
3176 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
3177 offsetof(struct sock, sk_bound_dev_if));
3179 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
3180 offsetof(struct sock, sk_bound_dev_if));
3183 case offsetof(struct bpf_sock, family):
3184 BUILD_BUG_ON(FIELD_SIZEOF(struct sock, sk_family) != 2);
3186 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
3187 offsetof(struct sock, sk_family));
3190 case offsetof(struct bpf_sock, type):
3191 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
3192 offsetof(struct sock, __sk_flags_offset));
3193 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_TYPE_MASK);
3194 *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, SK_FL_TYPE_SHIFT);
3197 case offsetof(struct bpf_sock, protocol):
3198 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
3199 offsetof(struct sock, __sk_flags_offset));
3200 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK);
3201 *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, SK_FL_PROTO_SHIFT);
3205 return insn - insn_buf;
3208 static u32 tc_cls_act_convert_ctx_access(enum bpf_access_type type,
3209 const struct bpf_insn *si,
3210 struct bpf_insn *insn_buf,
3211 struct bpf_prog *prog)
3213 struct bpf_insn *insn = insn_buf;
3216 case offsetof(struct __sk_buff, ifindex):
3217 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
3219 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev),
3220 si->dst_reg, si->src_reg,
3221 offsetof(struct sk_buff, dev));
3222 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
3223 offsetof(struct net_device, ifindex));
3226 return bpf_convert_ctx_access(type, si, insn_buf, prog);
3229 return insn - insn_buf;
3232 static u32 xdp_convert_ctx_access(enum bpf_access_type type,
3233 const struct bpf_insn *si,
3234 struct bpf_insn *insn_buf,
3235 struct bpf_prog *prog)
3237 struct bpf_insn *insn = insn_buf;
3240 case offsetof(struct xdp_md, data):
3241 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data),
3242 si->dst_reg, si->src_reg,
3243 offsetof(struct xdp_buff, data));
3245 case offsetof(struct xdp_md, data_end):
3246 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data_end),
3247 si->dst_reg, si->src_reg,
3248 offsetof(struct xdp_buff, data_end));
3252 return insn - insn_buf;
3255 static const struct bpf_verifier_ops sk_filter_ops = {
3256 .get_func_proto = sk_filter_func_proto,
3257 .is_valid_access = sk_filter_is_valid_access,
3258 .convert_ctx_access = bpf_convert_ctx_access,
3261 static const struct bpf_verifier_ops tc_cls_act_ops = {
3262 .get_func_proto = tc_cls_act_func_proto,
3263 .is_valid_access = tc_cls_act_is_valid_access,
3264 .convert_ctx_access = tc_cls_act_convert_ctx_access,
3265 .gen_prologue = tc_cls_act_prologue,
3268 static const struct bpf_verifier_ops xdp_ops = {
3269 .get_func_proto = xdp_func_proto,
3270 .is_valid_access = xdp_is_valid_access,
3271 .convert_ctx_access = xdp_convert_ctx_access,
3274 static const struct bpf_verifier_ops cg_skb_ops = {
3275 .get_func_proto = cg_skb_func_proto,
3276 .is_valid_access = sk_filter_is_valid_access,
3277 .convert_ctx_access = bpf_convert_ctx_access,
3280 static const struct bpf_verifier_ops lwt_inout_ops = {
3281 .get_func_proto = lwt_inout_func_proto,
3282 .is_valid_access = lwt_is_valid_access,
3283 .convert_ctx_access = bpf_convert_ctx_access,
3286 static const struct bpf_verifier_ops lwt_xmit_ops = {
3287 .get_func_proto = lwt_xmit_func_proto,
3288 .is_valid_access = lwt_is_valid_access,
3289 .convert_ctx_access = bpf_convert_ctx_access,
3290 .gen_prologue = tc_cls_act_prologue,
3293 static const struct bpf_verifier_ops cg_sock_ops = {
3294 .get_func_proto = bpf_base_func_proto,
3295 .is_valid_access = sock_filter_is_valid_access,
3296 .convert_ctx_access = sock_filter_convert_ctx_access,
3299 static struct bpf_prog_type_list sk_filter_type __ro_after_init = {
3300 .ops = &sk_filter_ops,
3301 .type = BPF_PROG_TYPE_SOCKET_FILTER,
3304 static struct bpf_prog_type_list sched_cls_type __ro_after_init = {
3305 .ops = &tc_cls_act_ops,
3306 .type = BPF_PROG_TYPE_SCHED_CLS,
3309 static struct bpf_prog_type_list sched_act_type __ro_after_init = {
3310 .ops = &tc_cls_act_ops,
3311 .type = BPF_PROG_TYPE_SCHED_ACT,
3314 static struct bpf_prog_type_list xdp_type __ro_after_init = {
3316 .type = BPF_PROG_TYPE_XDP,
3319 static struct bpf_prog_type_list cg_skb_type __ro_after_init = {
3321 .type = BPF_PROG_TYPE_CGROUP_SKB,
3324 static struct bpf_prog_type_list lwt_in_type __ro_after_init = {
3325 .ops = &lwt_inout_ops,
3326 .type = BPF_PROG_TYPE_LWT_IN,
3329 static struct bpf_prog_type_list lwt_out_type __ro_after_init = {
3330 .ops = &lwt_inout_ops,
3331 .type = BPF_PROG_TYPE_LWT_OUT,
3334 static struct bpf_prog_type_list lwt_xmit_type __ro_after_init = {
3335 .ops = &lwt_xmit_ops,
3336 .type = BPF_PROG_TYPE_LWT_XMIT,
3339 static struct bpf_prog_type_list cg_sock_type __ro_after_init = {
3340 .ops = &cg_sock_ops,
3341 .type = BPF_PROG_TYPE_CGROUP_SOCK
3344 static int __init register_sk_filter_ops(void)
3346 bpf_register_prog_type(&sk_filter_type);
3347 bpf_register_prog_type(&sched_cls_type);
3348 bpf_register_prog_type(&sched_act_type);
3349 bpf_register_prog_type(&xdp_type);
3350 bpf_register_prog_type(&cg_skb_type);
3351 bpf_register_prog_type(&cg_sock_type);
3352 bpf_register_prog_type(&lwt_in_type);
3353 bpf_register_prog_type(&lwt_out_type);
3354 bpf_register_prog_type(&lwt_xmit_type);
3358 late_initcall(register_sk_filter_ops);
3360 int sk_detach_filter(struct sock *sk)
3363 struct sk_filter *filter;
3365 if (sock_flag(sk, SOCK_FILTER_LOCKED))
3368 filter = rcu_dereference_protected(sk->sk_filter,
3369 lockdep_sock_is_held(sk));
3371 RCU_INIT_POINTER(sk->sk_filter, NULL);
3372 sk_filter_uncharge(sk, filter);
3378 EXPORT_SYMBOL_GPL(sk_detach_filter);
3380 int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
3383 struct sock_fprog_kern *fprog;
3384 struct sk_filter *filter;
3388 filter = rcu_dereference_protected(sk->sk_filter,
3389 lockdep_sock_is_held(sk));
3393 /* We're copying the filter that has been originally attached,
3394 * so no conversion/decode needed anymore. eBPF programs that
3395 * have no original program cannot be dumped through this.
3398 fprog = filter->prog->orig_prog;
3404 /* User space only enquires number of filter blocks. */
3408 if (len < fprog->len)
3412 if (copy_to_user(ubuf, fprog->filter, bpf_classic_proglen(fprog)))
3415 /* Instead of bytes, the API requests to return the number