2 * Linux Socket Filter - Kernel level socket filtering
5 * Jay Schulist <jschlst@samba.org>
7 * Based on the design of:
8 * - The Berkeley Packet Filter
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
15 * Andi Kleen - Fix a few bad bugs and races.
16 * Kris Katterjohn - Added many additional checks in sk_chk_filter()
19 #include <linux/module.h>
20 #include <linux/types.h>
22 #include <linux/fcntl.h>
23 #include <linux/socket.h>
25 #include <linux/inet.h>
26 #include <linux/netdevice.h>
27 #include <linux/if_packet.h>
28 #include <linux/gfp.h>
30 #include <net/protocol.h>
31 #include <net/netlink.h>
32 #include <linux/skbuff.h>
34 #include <linux/errno.h>
35 #include <linux/timer.h>
36 #include <asm/uaccess.h>
37 #include <asm/unaligned.h>
38 #include <linux/filter.h>
39 #include <linux/reciprocal_div.h>
40 #include <linux/ratelimit.h>
41 #include <linux/seccomp.h>
42 #include <linux/if_vlan.h>
44 /* No hurry in this branch
46 * Exported for the bpf jit load helper.
48 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
53 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
54 else if (k >= SKF_LL_OFF)
55 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
57 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
62 static inline void *load_pointer(const struct sk_buff *skb, int k,
63 unsigned int size, void *buffer)
66 return skb_header_pointer(skb, k, size, buffer);
67 return bpf_internal_load_pointer_neg_helper(skb, k, size);
71 * sk_filter - run a packet through a socket filter
72 * @sk: sock associated with &sk_buff
73 * @skb: buffer to filter
75 * Run the filter code and then cut skb->data to correct size returned by
76 * sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller
77 * than pkt_len we keep whole skb->data. This is the socket level
78 * wrapper to sk_run_filter. It returns 0 if the packet should
79 * be accepted or -EPERM if the packet should be tossed.
82 int sk_filter(struct sock *sk, struct sk_buff *skb)
85 struct sk_filter *filter;
88 * If the skb was allocated from pfmemalloc reserves, only
89 * allow SOCK_MEMALLOC sockets to use it as this socket is
92 if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC))
95 err = security_sock_rcv_skb(sk, skb);
100 filter = rcu_dereference(sk->sk_filter);
102 unsigned int pkt_len = SK_RUN_FILTER(filter, skb);
104 err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
110 EXPORT_SYMBOL(sk_filter);
113 * sk_run_filter - run a filter on a socket
114 * @skb: buffer to run the filter on
115 * @fentry: filter to apply
117 * Decode and apply filter instructions to the skb->data.
118 * Return length to keep, 0 for none. @skb is the data we are
119 * filtering, @filter is the array of filter instructions.
120 * Because all jumps are guaranteed to be before last instruction,
121 * and last instruction guaranteed to be a RET, we dont need to check
122 * flen. (We used to pass to this function the length of filter)
124 unsigned int sk_run_filter(const struct sk_buff *skb,
125 const struct sock_filter *fentry)
128 u32 A = 0; /* Accumulator */
129 u32 X = 0; /* Index Register */
130 u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */
135 * Process array of filter instructions.
138 #if defined(CONFIG_X86_32)
139 #define K (fentry->k)
141 const u32 K = fentry->k;
144 switch (fentry->code) {
145 case BPF_S_ALU_ADD_X:
148 case BPF_S_ALU_ADD_K:
151 case BPF_S_ALU_SUB_X:
154 case BPF_S_ALU_SUB_K:
157 case BPF_S_ALU_MUL_X:
160 case BPF_S_ALU_MUL_K:
163 case BPF_S_ALU_DIV_X:
168 case BPF_S_ALU_DIV_K:
169 A = reciprocal_divide(A, K);
171 case BPF_S_ALU_MOD_X:
176 case BPF_S_ALU_MOD_K:
179 case BPF_S_ALU_AND_X:
182 case BPF_S_ALU_AND_K:
191 case BPF_S_ANC_ALU_XOR_X:
192 case BPF_S_ALU_XOR_X:
195 case BPF_S_ALU_XOR_K:
198 case BPF_S_ALU_LSH_X:
201 case BPF_S_ALU_LSH_K:
204 case BPF_S_ALU_RSH_X:
207 case BPF_S_ALU_RSH_K:
216 case BPF_S_JMP_JGT_K:
217 fentry += (A > K) ? fentry->jt : fentry->jf;
219 case BPF_S_JMP_JGE_K:
220 fentry += (A >= K) ? fentry->jt : fentry->jf;
222 case BPF_S_JMP_JEQ_K:
223 fentry += (A == K) ? fentry->jt : fentry->jf;
225 case BPF_S_JMP_JSET_K:
226 fentry += (A & K) ? fentry->jt : fentry->jf;
228 case BPF_S_JMP_JGT_X:
229 fentry += (A > X) ? fentry->jt : fentry->jf;
231 case BPF_S_JMP_JGE_X:
232 fentry += (A >= X) ? fentry->jt : fentry->jf;
234 case BPF_S_JMP_JEQ_X:
235 fentry += (A == X) ? fentry->jt : fentry->jf;
237 case BPF_S_JMP_JSET_X:
238 fentry += (A & X) ? fentry->jt : fentry->jf;
243 ptr = load_pointer(skb, k, 4, &tmp);
245 A = get_unaligned_be32(ptr);
252 ptr = load_pointer(skb, k, 2, &tmp);
254 A = get_unaligned_be16(ptr);
261 ptr = load_pointer(skb, k, 1, &tmp);
270 case BPF_S_LDX_W_LEN:
282 case BPF_S_LDX_B_MSH:
283 ptr = load_pointer(skb, K, 1, &tmp);
285 X = (*(u8 *)ptr & 0xf) << 2;
317 case BPF_S_ANC_PROTOCOL:
318 A = ntohs(skb->protocol);
320 case BPF_S_ANC_PKTTYPE:
323 case BPF_S_ANC_IFINDEX:
326 A = skb->dev->ifindex;
331 case BPF_S_ANC_QUEUE:
332 A = skb->queue_mapping;
334 case BPF_S_ANC_HATYPE:
339 case BPF_S_ANC_RXHASH:
343 A = raw_smp_processor_id();
345 case BPF_S_ANC_VLAN_TAG:
346 A = vlan_tx_tag_get(skb);
348 case BPF_S_ANC_VLAN_TAG_PRESENT:
349 A = !!vlan_tx_tag_present(skb);
351 case BPF_S_ANC_PAY_OFFSET:
352 A = __skb_get_poff(skb);
354 case BPF_S_ANC_NLATTR: {
357 if (skb_is_nonlinear(skb))
359 if (A > skb->len - sizeof(struct nlattr))
362 nla = nla_find((struct nlattr *)&skb->data[A],
365 A = (void *)nla - (void *)skb->data;
370 case BPF_S_ANC_NLATTR_NEST: {
373 if (skb_is_nonlinear(skb))
375 if (A > skb->len - sizeof(struct nlattr))
378 nla = (struct nlattr *)&skb->data[A];
379 if (nla->nla_len > A - skb->len)
382 nla = nla_find_nested(nla, X);
384 A = (void *)nla - (void *)skb->data;
389 #ifdef CONFIG_SECCOMP_FILTER
390 case BPF_S_ANC_SECCOMP_LD_W:
391 A = seccomp_bpf_load(fentry->k);
395 WARN_RATELIMIT(1, "Unknown code:%u jt:%u tf:%u k:%u\n",
396 fentry->code, fentry->jt,
397 fentry->jf, fentry->k);
404 EXPORT_SYMBOL(sk_run_filter);
408 * A BPF program is able to use 16 cells of memory to store intermediate
409 * values (check u32 mem[BPF_MEMWORDS] in sk_run_filter())
410 * As we dont want to clear mem[] array for each packet going through
411 * sk_run_filter(), we check that filter loaded by user never try to read
412 * a cell if not previously written, and we check all branches to be sure
413 * a malicious user doesn't try to abuse us.
415 static int check_load_and_stores(struct sock_filter *filter, int flen)
417 u16 *masks, memvalid = 0; /* one bit per cell, 16 cells */
420 BUILD_BUG_ON(BPF_MEMWORDS > 16);
421 masks = kmalloc(flen * sizeof(*masks), GFP_KERNEL);
424 memset(masks, 0xff, flen * sizeof(*masks));
426 for (pc = 0; pc < flen; pc++) {
427 memvalid &= masks[pc];
429 switch (filter[pc].code) {
432 memvalid |= (1 << filter[pc].k);
436 if (!(memvalid & (1 << filter[pc].k))) {
442 /* a jump must set masks on target */
443 masks[pc + 1 + filter[pc].k] &= memvalid;
446 case BPF_S_JMP_JEQ_K:
447 case BPF_S_JMP_JEQ_X:
448 case BPF_S_JMP_JGE_K:
449 case BPF_S_JMP_JGE_X:
450 case BPF_S_JMP_JGT_K:
451 case BPF_S_JMP_JGT_X:
452 case BPF_S_JMP_JSET_X:
453 case BPF_S_JMP_JSET_K:
454 /* a jump must set masks on targets */
455 masks[pc + 1 + filter[pc].jt] &= memvalid;
456 masks[pc + 1 + filter[pc].jf] &= memvalid;
467 * sk_chk_filter - verify socket filter code
468 * @filter: filter to verify
469 * @flen: length of filter
471 * Check the user's filter code. If we let some ugly
472 * filter code slip through kaboom! The filter must contain
473 * no references or jumps that are out of range, no illegal
474 * instructions, and must end with a RET instruction.
476 * All jumps are forward as they are not signed.
478 * Returns 0 if the rule set is legal or -EINVAL if not.
480 int sk_chk_filter(struct sock_filter *filter, unsigned int flen)
483 * Valid instructions are initialized to non-0.
484 * Invalid instructions are initialized to 0.
486 static const u8 codes[] = {
487 [BPF_ALU|BPF_ADD|BPF_K] = BPF_S_ALU_ADD_K,
488 [BPF_ALU|BPF_ADD|BPF_X] = BPF_S_ALU_ADD_X,
489 [BPF_ALU|BPF_SUB|BPF_K] = BPF_S_ALU_SUB_K,
490 [BPF_ALU|BPF_SUB|BPF_X] = BPF_S_ALU_SUB_X,
491 [BPF_ALU|BPF_MUL|BPF_K] = BPF_S_ALU_MUL_K,
492 [BPF_ALU|BPF_MUL|BPF_X] = BPF_S_ALU_MUL_X,
493 [BPF_ALU|BPF_DIV|BPF_X] = BPF_S_ALU_DIV_X,
494 [BPF_ALU|BPF_MOD|BPF_K] = BPF_S_ALU_MOD_K,
495 [BPF_ALU|BPF_MOD|BPF_X] = BPF_S_ALU_MOD_X,
496 [BPF_ALU|BPF_AND|BPF_K] = BPF_S_ALU_AND_K,
497 [BPF_ALU|BPF_AND|BPF_X] = BPF_S_ALU_AND_X,
498 [BPF_ALU|BPF_OR|BPF_K] = BPF_S_ALU_OR_K,
499 [BPF_ALU|BPF_OR|BPF_X] = BPF_S_ALU_OR_X,
500 [BPF_ALU|BPF_XOR|BPF_K] = BPF_S_ALU_XOR_K,
501 [BPF_ALU|BPF_XOR|BPF_X] = BPF_S_ALU_XOR_X,
502 [BPF_ALU|BPF_LSH|BPF_K] = BPF_S_ALU_LSH_K,
503 [BPF_ALU|BPF_LSH|BPF_X] = BPF_S_ALU_LSH_X,
504 [BPF_ALU|BPF_RSH|BPF_K] = BPF_S_ALU_RSH_K,
505 [BPF_ALU|BPF_RSH|BPF_X] = BPF_S_ALU_RSH_X,
506 [BPF_ALU|BPF_NEG] = BPF_S_ALU_NEG,
507 [BPF_LD|BPF_W|BPF_ABS] = BPF_S_LD_W_ABS,
508 [BPF_LD|BPF_H|BPF_ABS] = BPF_S_LD_H_ABS,
509 [BPF_LD|BPF_B|BPF_ABS] = BPF_S_LD_B_ABS,
510 [BPF_LD|BPF_W|BPF_LEN] = BPF_S_LD_W_LEN,
511 [BPF_LD|BPF_W|BPF_IND] = BPF_S_LD_W_IND,
512 [BPF_LD|BPF_H|BPF_IND] = BPF_S_LD_H_IND,
513 [BPF_LD|BPF_B|BPF_IND] = BPF_S_LD_B_IND,
514 [BPF_LD|BPF_IMM] = BPF_S_LD_IMM,
515 [BPF_LDX|BPF_W|BPF_LEN] = BPF_S_LDX_W_LEN,
516 [BPF_LDX|BPF_B|BPF_MSH] = BPF_S_LDX_B_MSH,
517 [BPF_LDX|BPF_IMM] = BPF_S_LDX_IMM,
518 [BPF_MISC|BPF_TAX] = BPF_S_MISC_TAX,
519 [BPF_MISC|BPF_TXA] = BPF_S_MISC_TXA,
520 [BPF_RET|BPF_K] = BPF_S_RET_K,
521 [BPF_RET|BPF_A] = BPF_S_RET_A,
522 [BPF_ALU|BPF_DIV|BPF_K] = BPF_S_ALU_DIV_K,
523 [BPF_LD|BPF_MEM] = BPF_S_LD_MEM,
524 [BPF_LDX|BPF_MEM] = BPF_S_LDX_MEM,
526 [BPF_STX] = BPF_S_STX,
527 [BPF_JMP|BPF_JA] = BPF_S_JMP_JA,
528 [BPF_JMP|BPF_JEQ|BPF_K] = BPF_S_JMP_JEQ_K,
529 [BPF_JMP|BPF_JEQ|BPF_X] = BPF_S_JMP_JEQ_X,
530 [BPF_JMP|BPF_JGE|BPF_K] = BPF_S_JMP_JGE_K,
531 [BPF_JMP|BPF_JGE|BPF_X] = BPF_S_JMP_JGE_X,
532 [BPF_JMP|BPF_JGT|BPF_K] = BPF_S_JMP_JGT_K,
533 [BPF_JMP|BPF_JGT|BPF_X] = BPF_S_JMP_JGT_X,
534 [BPF_JMP|BPF_JSET|BPF_K] = BPF_S_JMP_JSET_K,
535 [BPF_JMP|BPF_JSET|BPF_X] = BPF_S_JMP_JSET_X,
540 if (flen == 0 || flen > BPF_MAXINSNS)
543 /* check the filter code now */
544 for (pc = 0; pc < flen; pc++) {
545 struct sock_filter *ftest = &filter[pc];
546 u16 code = ftest->code;
548 if (code >= ARRAY_SIZE(codes))
553 /* Some instructions need special checks */
555 case BPF_S_ALU_DIV_K:
556 /* check for division by zero */
559 ftest->k = reciprocal_value(ftest->k);
561 case BPF_S_ALU_MOD_K:
562 /* check for division by zero */
570 /* check for invalid memory addresses */
571 if (ftest->k >= BPF_MEMWORDS)
576 * Note, the large ftest->k might cause loops.
577 * Compare this with conditional jumps below,
578 * where offsets are limited. --ANK (981016)
580 if (ftest->k >= (unsigned int)(flen-pc-1))
583 case BPF_S_JMP_JEQ_K:
584 case BPF_S_JMP_JEQ_X:
585 case BPF_S_JMP_JGE_K:
586 case BPF_S_JMP_JGE_X:
587 case BPF_S_JMP_JGT_K:
588 case BPF_S_JMP_JGT_X:
589 case BPF_S_JMP_JSET_X:
590 case BPF_S_JMP_JSET_K:
591 /* for conditionals both must be safe */
592 if (pc + ftest->jt + 1 >= flen ||
593 pc + ftest->jf + 1 >= flen)
600 #define ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE: \
601 code = BPF_S_ANC_##CODE; \
609 ANCILLARY(NLATTR_NEST);
615 ANCILLARY(ALU_XOR_X);
617 ANCILLARY(VLAN_TAG_PRESENT);
618 ANCILLARY(PAY_OFFSET);
621 /* ancillary operation unknown or unsupported */
622 if (anc_found == false && ftest->k >= SKF_AD_OFF)
628 /* last instruction must be a RET code */
629 switch (filter[flen - 1].code) {
632 return check_load_and_stores(filter, flen);
636 EXPORT_SYMBOL(sk_chk_filter);
639 * sk_filter_release_rcu - Release a socket filter by rcu_head
640 * @rcu: rcu_head that contains the sk_filter to free
642 void sk_filter_release_rcu(struct rcu_head *rcu)
644 struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
648 EXPORT_SYMBOL(sk_filter_release_rcu);
650 static int __sk_prepare_filter(struct sk_filter *fp)
654 fp->bpf_func = sk_run_filter;
656 err = sk_chk_filter(fp->insns, fp->len);
665 * sk_unattached_filter_create - create an unattached filter
666 * @fprog: the filter program
667 * @pfp: the unattached filter that is created
669 * Create a filter independent of any socket. We first run some
670 * sanity checks on it to make sure it does not explode on us later.
671 * If an error occurs or there is insufficient memory for the filter
672 * a negative errno code is returned. On success the return is zero.
674 int sk_unattached_filter_create(struct sk_filter **pfp,
675 struct sock_fprog *fprog)
677 struct sk_filter *fp;
678 unsigned int fsize = sizeof(struct sock_filter) * fprog->len;
681 /* Make sure new filter is there and in the right amounts. */
682 if (fprog->filter == NULL)
685 fp = kmalloc(sk_filter_size(fprog->len), GFP_KERNEL);
688 memcpy(fp->insns, fprog->filter, fsize);
690 atomic_set(&fp->refcnt, 1);
691 fp->len = fprog->len;
693 err = __sk_prepare_filter(fp);
703 EXPORT_SYMBOL_GPL(sk_unattached_filter_create);
705 void sk_unattached_filter_destroy(struct sk_filter *fp)
707 sk_filter_release(fp);
709 EXPORT_SYMBOL_GPL(sk_unattached_filter_destroy);
712 * sk_attach_filter - attach a socket filter
713 * @fprog: the filter program
714 * @sk: the socket to use
716 * Attach the user's filter code. We first run some sanity checks on
717 * it to make sure it does not explode on us later. If an error
718 * occurs or there is insufficient memory for the filter a negative
719 * errno code is returned. On success the return is zero.
721 int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
723 struct sk_filter *fp, *old_fp;
724 unsigned int fsize = sizeof(struct sock_filter) * fprog->len;
725 unsigned int sk_fsize = sk_filter_size(fprog->len);
728 if (sock_flag(sk, SOCK_FILTER_LOCKED))
731 /* Make sure new filter is there and in the right amounts. */
732 if (fprog->filter == NULL)
735 fp = sock_kmalloc(sk, sk_fsize, GFP_KERNEL);
738 if (copy_from_user(fp->insns, fprog->filter, fsize)) {
739 sock_kfree_s(sk, fp, sk_fsize);
743 atomic_set(&fp->refcnt, 1);
744 fp->len = fprog->len;
746 err = __sk_prepare_filter(fp);
748 sk_filter_uncharge(sk, fp);
752 old_fp = rcu_dereference_protected(sk->sk_filter,
753 sock_owned_by_user(sk));
754 rcu_assign_pointer(sk->sk_filter, fp);
757 sk_filter_uncharge(sk, old_fp);
760 EXPORT_SYMBOL_GPL(sk_attach_filter);
762 int sk_detach_filter(struct sock *sk)
765 struct sk_filter *filter;
767 if (sock_flag(sk, SOCK_FILTER_LOCKED))
770 filter = rcu_dereference_protected(sk->sk_filter,
771 sock_owned_by_user(sk));
773 RCU_INIT_POINTER(sk->sk_filter, NULL);
774 sk_filter_uncharge(sk, filter);
779 EXPORT_SYMBOL_GPL(sk_detach_filter);
781 void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to)
783 static const u16 decodes[] = {
784 [BPF_S_ALU_ADD_K] = BPF_ALU|BPF_ADD|BPF_K,
785 [BPF_S_ALU_ADD_X] = BPF_ALU|BPF_ADD|BPF_X,
786 [BPF_S_ALU_SUB_K] = BPF_ALU|BPF_SUB|BPF_K,
787 [BPF_S_ALU_SUB_X] = BPF_ALU|BPF_SUB|BPF_X,
788 [BPF_S_ALU_MUL_K] = BPF_ALU|BPF_MUL|BPF_K,
789 [BPF_S_ALU_MUL_X] = BPF_ALU|BPF_MUL|BPF_X,
790 [BPF_S_ALU_DIV_X] = BPF_ALU|BPF_DIV|BPF_X,
791 [BPF_S_ALU_MOD_K] = BPF_ALU|BPF_MOD|BPF_K,
792 [BPF_S_ALU_MOD_X] = BPF_ALU|BPF_MOD|BPF_X,
793 [BPF_S_ALU_AND_K] = BPF_ALU|BPF_AND|BPF_K,
794 [BPF_S_ALU_AND_X] = BPF_ALU|BPF_AND|BPF_X,
795 [BPF_S_ALU_OR_K] = BPF_ALU|BPF_OR|BPF_K,
796 [BPF_S_ALU_OR_X] = BPF_ALU|BPF_OR|BPF_X,
797 [BPF_S_ALU_XOR_K] = BPF_ALU|BPF_XOR|BPF_K,
798 [BPF_S_ALU_XOR_X] = BPF_ALU|BPF_XOR|BPF_X,
799 [BPF_S_ALU_LSH_K] = BPF_ALU|BPF_LSH|BPF_K,
800 [BPF_S_ALU_LSH_X] = BPF_ALU|BPF_LSH|BPF_X,
801 [BPF_S_ALU_RSH_K] = BPF_ALU|BPF_RSH|BPF_K,
802 [BPF_S_ALU_RSH_X] = BPF_ALU|BPF_RSH|BPF_X,
803 [BPF_S_ALU_NEG] = BPF_ALU|BPF_NEG,
804 [BPF_S_LD_W_ABS] = BPF_LD|BPF_W|BPF_ABS,
805 [BPF_S_LD_H_ABS] = BPF_LD|BPF_H|BPF_ABS,
806 [BPF_S_LD_B_ABS] = BPF_LD|BPF_B|BPF_ABS,
807 [BPF_S_ANC_PROTOCOL] = BPF_LD|BPF_B|BPF_ABS,
808 [BPF_S_ANC_PKTTYPE] = BPF_LD|BPF_B|BPF_ABS,
809 [BPF_S_ANC_IFINDEX] = BPF_LD|BPF_B|BPF_ABS,
810 [BPF_S_ANC_NLATTR] = BPF_LD|BPF_B|BPF_ABS,
811 [BPF_S_ANC_NLATTR_NEST] = BPF_LD|BPF_B|BPF_ABS,
812 [BPF_S_ANC_MARK] = BPF_LD|BPF_B|BPF_ABS,
813 [BPF_S_ANC_QUEUE] = BPF_LD|BPF_B|BPF_ABS,
814 [BPF_S_ANC_HATYPE] = BPF_LD|BPF_B|BPF_ABS,
815 [BPF_S_ANC_RXHASH] = BPF_LD|BPF_B|BPF_ABS,
816 [BPF_S_ANC_CPU] = BPF_LD|BPF_B|BPF_ABS,
817 [BPF_S_ANC_ALU_XOR_X] = BPF_LD|BPF_B|BPF_ABS,
818 [BPF_S_ANC_SECCOMP_LD_W] = BPF_LD|BPF_B|BPF_ABS,
819 [BPF_S_ANC_VLAN_TAG] = BPF_LD|BPF_B|BPF_ABS,
820 [BPF_S_ANC_VLAN_TAG_PRESENT] = BPF_LD|BPF_B|BPF_ABS,
821 [BPF_S_ANC_PAY_OFFSET] = BPF_LD|BPF_B|BPF_ABS,
822 [BPF_S_LD_W_LEN] = BPF_LD|BPF_W|BPF_LEN,
823 [BPF_S_LD_W_IND] = BPF_LD|BPF_W|BPF_IND,
824 [BPF_S_LD_H_IND] = BPF_LD|BPF_H|BPF_IND,
825 [BPF_S_LD_B_IND] = BPF_LD|BPF_B|BPF_IND,
826 [BPF_S_LD_IMM] = BPF_LD|BPF_IMM,
827 [BPF_S_LDX_W_LEN] = BPF_LDX|BPF_W|BPF_LEN,
828 [BPF_S_LDX_B_MSH] = BPF_LDX|BPF_B|BPF_MSH,
829 [BPF_S_LDX_IMM] = BPF_LDX|BPF_IMM,
830 [BPF_S_MISC_TAX] = BPF_MISC|BPF_TAX,
831 [BPF_S_MISC_TXA] = BPF_MISC|BPF_TXA,
832 [BPF_S_RET_K] = BPF_RET|BPF_K,
833 [BPF_S_RET_A] = BPF_RET|BPF_A,
834 [BPF_S_ALU_DIV_K] = BPF_ALU|BPF_DIV|BPF_K,
835 [BPF_S_LD_MEM] = BPF_LD|BPF_MEM,
836 [BPF_S_LDX_MEM] = BPF_LDX|BPF_MEM,
838 [BPF_S_STX] = BPF_STX,
839 [BPF_S_JMP_JA] = BPF_JMP|BPF_JA,
840 [BPF_S_JMP_JEQ_K] = BPF_JMP|BPF_JEQ|BPF_K,
841 [BPF_S_JMP_JEQ_X] = BPF_JMP|BPF_JEQ|BPF_X,
842 [BPF_S_JMP_JGE_K] = BPF_JMP|BPF_JGE|BPF_K,
843 [BPF_S_JMP_JGE_X] = BPF_JMP|BPF_JGE|BPF_X,
844 [BPF_S_JMP_JGT_K] = BPF_JMP|BPF_JGT|BPF_K,
845 [BPF_S_JMP_JGT_X] = BPF_JMP|BPF_JGT|BPF_X,
846 [BPF_S_JMP_JSET_K] = BPF_JMP|BPF_JSET|BPF_K,
847 [BPF_S_JMP_JSET_X] = BPF_JMP|BPF_JSET|BPF_X,
853 to->code = decodes[code];
857 if (code == BPF_S_ALU_DIV_K) {
859 * When loaded this rule user gave us X, which was
860 * translated into R = r(X). Now we calculate the
861 * RR = r(R) and report it back. If next time this
862 * value is loaded and RRR = r(RR) is calculated
863 * then the R == RRR will be true.
865 * One exception. X == 1 translates into R == 0 and
866 * we can't calculate RR out of it with r().
872 to->k = reciprocal_value(filt->k);
874 BUG_ON(reciprocal_value(to->k) != filt->k);
879 int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf, unsigned int len)
881 struct sk_filter *filter;
885 filter = rcu_dereference_protected(sk->sk_filter,
886 sock_owned_by_user(sk));
894 if (len < filter->len)
898 for (i = 0; i < filter->len; i++) {
899 struct sock_filter fb;
901 sk_decode_filter(&filter->insns[i], &fb);
902 if (copy_to_user(&ubuf[i], &fb, sizeof(fb)))