2 * Linux Socket Filter - Kernel level socket filtering
5 * Jay Schulist <jschlst@samba.org>
7 * Based on the design of:
8 * - The Berkeley Packet Filter
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
15 * Andi Kleen - Fix a few bad bugs and races.
16 * Kris Katterjohn - Added many additional checks in sk_chk_filter()
19 #include <linux/module.h>
20 #include <linux/types.h>
22 #include <linux/fcntl.h>
23 #include <linux/socket.h>
25 #include <linux/inet.h>
26 #include <linux/netdevice.h>
27 #include <linux/if_packet.h>
28 #include <linux/gfp.h>
30 #include <net/protocol.h>
31 #include <net/netlink.h>
32 #include <linux/skbuff.h>
34 #include <linux/errno.h>
35 #include <linux/timer.h>
36 #include <asm/uaccess.h>
37 #include <asm/unaligned.h>
38 #include <linux/filter.h>
39 #include <linux/reciprocal_div.h>
40 #include <linux/ratelimit.h>
42 /* No hurry in this branch
44 * Exported for the bpf jit load helper.
46 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
51 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
52 else if (k >= SKF_LL_OFF)
53 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
55 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
60 static inline void *load_pointer(const struct sk_buff *skb, int k,
61 unsigned int size, void *buffer)
64 return skb_header_pointer(skb, k, size, buffer);
65 return bpf_internal_load_pointer_neg_helper(skb, k, size);
69 * sk_filter - run a packet through a socket filter
70 * @sk: sock associated with &sk_buff
71 * @skb: buffer to filter
73 * Run the filter code and then cut skb->data to correct size returned by
74 * sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller
75 * than pkt_len we keep whole skb->data. This is the socket level
76 * wrapper to sk_run_filter. It returns 0 if the packet should
77 * be accepted or -EPERM if the packet should be tossed.
80 int sk_filter(struct sock *sk, struct sk_buff *skb)
83 struct sk_filter *filter;
85 err = security_sock_rcv_skb(sk, skb);
90 filter = rcu_dereference(sk->sk_filter);
92 unsigned int pkt_len = SK_RUN_FILTER(filter, skb);
94 err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
100 EXPORT_SYMBOL(sk_filter);
103 * sk_run_filter - run a filter on a socket
104 * @skb: buffer to run the filter on
105 * @fentry: filter to apply
107 * Decode and apply filter instructions to the skb->data.
108 * Return length to keep, 0 for none. @skb is the data we are
109 * filtering, @filter is the array of filter instructions.
110 * Because all jumps are guaranteed to be before last instruction,
111 * and last instruction guaranteed to be a RET, we dont need to check
112 * flen. (We used to pass to this function the length of filter)
114 unsigned int sk_run_filter(const struct sk_buff *skb,
115 const struct sock_filter *fentry)
118 u32 A = 0; /* Accumulator */
119 u32 X = 0; /* Index Register */
120 u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */
125 * Process array of filter instructions.
128 #if defined(CONFIG_X86_32)
129 #define K (fentry->k)
131 const u32 K = fentry->k;
134 switch (fentry->code) {
135 case BPF_S_ALU_ADD_X:
138 case BPF_S_ALU_ADD_K:
141 case BPF_S_ALU_SUB_X:
144 case BPF_S_ALU_SUB_K:
147 case BPF_S_ALU_MUL_X:
150 case BPF_S_ALU_MUL_K:
153 case BPF_S_ALU_DIV_X:
158 case BPF_S_ALU_DIV_K:
159 A = reciprocal_divide(A, K);
161 case BPF_S_ALU_AND_X:
164 case BPF_S_ALU_AND_K:
173 case BPF_S_ALU_LSH_X:
176 case BPF_S_ALU_LSH_K:
179 case BPF_S_ALU_RSH_X:
182 case BPF_S_ALU_RSH_K:
191 case BPF_S_JMP_JGT_K:
192 fentry += (A > K) ? fentry->jt : fentry->jf;
194 case BPF_S_JMP_JGE_K:
195 fentry += (A >= K) ? fentry->jt : fentry->jf;
197 case BPF_S_JMP_JEQ_K:
198 fentry += (A == K) ? fentry->jt : fentry->jf;
200 case BPF_S_JMP_JSET_K:
201 fentry += (A & K) ? fentry->jt : fentry->jf;
203 case BPF_S_JMP_JGT_X:
204 fentry += (A > X) ? fentry->jt : fentry->jf;
206 case BPF_S_JMP_JGE_X:
207 fentry += (A >= X) ? fentry->jt : fentry->jf;
209 case BPF_S_JMP_JEQ_X:
210 fentry += (A == X) ? fentry->jt : fentry->jf;
212 case BPF_S_JMP_JSET_X:
213 fentry += (A & X) ? fentry->jt : fentry->jf;
218 ptr = load_pointer(skb, k, 4, &tmp);
220 A = get_unaligned_be32(ptr);
227 ptr = load_pointer(skb, k, 2, &tmp);
229 A = get_unaligned_be16(ptr);
236 ptr = load_pointer(skb, k, 1, &tmp);
245 case BPF_S_LDX_W_LEN:
257 case BPF_S_LDX_B_MSH:
258 ptr = load_pointer(skb, K, 1, &tmp);
260 X = (*(u8 *)ptr & 0xf) << 2;
292 case BPF_S_ANC_PROTOCOL:
293 A = ntohs(skb->protocol);
295 case BPF_S_ANC_PKTTYPE:
298 case BPF_S_ANC_IFINDEX:
301 A = skb->dev->ifindex;
306 case BPF_S_ANC_QUEUE:
307 A = skb->queue_mapping;
309 case BPF_S_ANC_HATYPE:
314 case BPF_S_ANC_RXHASH:
318 A = raw_smp_processor_id();
320 case BPF_S_ANC_ALU_XOR_X:
323 case BPF_S_ANC_NLATTR: {
326 if (skb_is_nonlinear(skb))
328 if (A > skb->len - sizeof(struct nlattr))
331 nla = nla_find((struct nlattr *)&skb->data[A],
334 A = (void *)nla - (void *)skb->data;
339 case BPF_S_ANC_NLATTR_NEST: {
342 if (skb_is_nonlinear(skb))
344 if (A > skb->len - sizeof(struct nlattr))
347 nla = (struct nlattr *)&skb->data[A];
348 if (nla->nla_len > A - skb->len)
351 nla = nla_find_nested(nla, X);
353 A = (void *)nla - (void *)skb->data;
359 WARN_RATELIMIT(1, "Unknown code:%u jt:%u tf:%u k:%u\n",
360 fentry->code, fentry->jt,
361 fentry->jf, fentry->k);
368 EXPORT_SYMBOL(sk_run_filter);
372 * A BPF program is able to use 16 cells of memory to store intermediate
373 * values (check u32 mem[BPF_MEMWORDS] in sk_run_filter())
374 * As we dont want to clear mem[] array for each packet going through
375 * sk_run_filter(), we check that filter loaded by user never try to read
376 * a cell if not previously written, and we check all branches to be sure
377 * a malicious user doesn't try to abuse us.
379 static int check_load_and_stores(struct sock_filter *filter, int flen)
381 u16 *masks, memvalid = 0; /* one bit per cell, 16 cells */
384 BUILD_BUG_ON(BPF_MEMWORDS > 16);
385 masks = kmalloc(flen * sizeof(*masks), GFP_KERNEL);
388 memset(masks, 0xff, flen * sizeof(*masks));
390 for (pc = 0; pc < flen; pc++) {
391 memvalid &= masks[pc];
393 switch (filter[pc].code) {
396 memvalid |= (1 << filter[pc].k);
400 if (!(memvalid & (1 << filter[pc].k))) {
406 /* a jump must set masks on target */
407 masks[pc + 1 + filter[pc].k] &= memvalid;
410 case BPF_S_JMP_JEQ_K:
411 case BPF_S_JMP_JEQ_X:
412 case BPF_S_JMP_JGE_K:
413 case BPF_S_JMP_JGE_X:
414 case BPF_S_JMP_JGT_K:
415 case BPF_S_JMP_JGT_X:
416 case BPF_S_JMP_JSET_X:
417 case BPF_S_JMP_JSET_K:
418 /* a jump must set masks on targets */
419 masks[pc + 1 + filter[pc].jt] &= memvalid;
420 masks[pc + 1 + filter[pc].jf] &= memvalid;
431 * sk_chk_filter - verify socket filter code
432 * @filter: filter to verify
433 * @flen: length of filter
435 * Check the user's filter code. If we let some ugly
436 * filter code slip through kaboom! The filter must contain
437 * no references or jumps that are out of range, no illegal
438 * instructions, and must end with a RET instruction.
440 * All jumps are forward as they are not signed.
442 * Returns 0 if the rule set is legal or -EINVAL if not.
444 int sk_chk_filter(struct sock_filter *filter, unsigned int flen)
447 * Valid instructions are initialized to non-0.
448 * Invalid instructions are initialized to 0.
450 static const u8 codes[] = {
451 [BPF_ALU|BPF_ADD|BPF_K] = BPF_S_ALU_ADD_K,
452 [BPF_ALU|BPF_ADD|BPF_X] = BPF_S_ALU_ADD_X,
453 [BPF_ALU|BPF_SUB|BPF_K] = BPF_S_ALU_SUB_K,
454 [BPF_ALU|BPF_SUB|BPF_X] = BPF_S_ALU_SUB_X,
455 [BPF_ALU|BPF_MUL|BPF_K] = BPF_S_ALU_MUL_K,
456 [BPF_ALU|BPF_MUL|BPF_X] = BPF_S_ALU_MUL_X,
457 [BPF_ALU|BPF_DIV|BPF_X] = BPF_S_ALU_DIV_X,
458 [BPF_ALU|BPF_AND|BPF_K] = BPF_S_ALU_AND_K,
459 [BPF_ALU|BPF_AND|BPF_X] = BPF_S_ALU_AND_X,
460 [BPF_ALU|BPF_OR|BPF_K] = BPF_S_ALU_OR_K,
461 [BPF_ALU|BPF_OR|BPF_X] = BPF_S_ALU_OR_X,
462 [BPF_ALU|BPF_LSH|BPF_K] = BPF_S_ALU_LSH_K,
463 [BPF_ALU|BPF_LSH|BPF_X] = BPF_S_ALU_LSH_X,
464 [BPF_ALU|BPF_RSH|BPF_K] = BPF_S_ALU_RSH_K,
465 [BPF_ALU|BPF_RSH|BPF_X] = BPF_S_ALU_RSH_X,
466 [BPF_ALU|BPF_NEG] = BPF_S_ALU_NEG,
467 [BPF_LD|BPF_W|BPF_ABS] = BPF_S_LD_W_ABS,
468 [BPF_LD|BPF_H|BPF_ABS] = BPF_S_LD_H_ABS,
469 [BPF_LD|BPF_B|BPF_ABS] = BPF_S_LD_B_ABS,
470 [BPF_LD|BPF_W|BPF_LEN] = BPF_S_LD_W_LEN,
471 [BPF_LD|BPF_W|BPF_IND] = BPF_S_LD_W_IND,
472 [BPF_LD|BPF_H|BPF_IND] = BPF_S_LD_H_IND,
473 [BPF_LD|BPF_B|BPF_IND] = BPF_S_LD_B_IND,
474 [BPF_LD|BPF_IMM] = BPF_S_LD_IMM,
475 [BPF_LDX|BPF_W|BPF_LEN] = BPF_S_LDX_W_LEN,
476 [BPF_LDX|BPF_B|BPF_MSH] = BPF_S_LDX_B_MSH,
477 [BPF_LDX|BPF_IMM] = BPF_S_LDX_IMM,
478 [BPF_MISC|BPF_TAX] = BPF_S_MISC_TAX,
479 [BPF_MISC|BPF_TXA] = BPF_S_MISC_TXA,
480 [BPF_RET|BPF_K] = BPF_S_RET_K,
481 [BPF_RET|BPF_A] = BPF_S_RET_A,
482 [BPF_ALU|BPF_DIV|BPF_K] = BPF_S_ALU_DIV_K,
483 [BPF_LD|BPF_MEM] = BPF_S_LD_MEM,
484 [BPF_LDX|BPF_MEM] = BPF_S_LDX_MEM,
486 [BPF_STX] = BPF_S_STX,
487 [BPF_JMP|BPF_JA] = BPF_S_JMP_JA,
488 [BPF_JMP|BPF_JEQ|BPF_K] = BPF_S_JMP_JEQ_K,
489 [BPF_JMP|BPF_JEQ|BPF_X] = BPF_S_JMP_JEQ_X,
490 [BPF_JMP|BPF_JGE|BPF_K] = BPF_S_JMP_JGE_K,
491 [BPF_JMP|BPF_JGE|BPF_X] = BPF_S_JMP_JGE_X,
492 [BPF_JMP|BPF_JGT|BPF_K] = BPF_S_JMP_JGT_K,
493 [BPF_JMP|BPF_JGT|BPF_X] = BPF_S_JMP_JGT_X,
494 [BPF_JMP|BPF_JSET|BPF_K] = BPF_S_JMP_JSET_K,
495 [BPF_JMP|BPF_JSET|BPF_X] = BPF_S_JMP_JSET_X,
499 if (flen == 0 || flen > BPF_MAXINSNS)
502 /* check the filter code now */
503 for (pc = 0; pc < flen; pc++) {
504 struct sock_filter *ftest = &filter[pc];
505 u16 code = ftest->code;
507 if (code >= ARRAY_SIZE(codes))
512 /* Some instructions need special checks */
514 case BPF_S_ALU_DIV_K:
515 /* check for division by zero */
518 ftest->k = reciprocal_value(ftest->k);
524 /* check for invalid memory addresses */
525 if (ftest->k >= BPF_MEMWORDS)
530 * Note, the large ftest->k might cause loops.
531 * Compare this with conditional jumps below,
532 * where offsets are limited. --ANK (981016)
534 if (ftest->k >= (unsigned int)(flen-pc-1))
537 case BPF_S_JMP_JEQ_K:
538 case BPF_S_JMP_JEQ_X:
539 case BPF_S_JMP_JGE_K:
540 case BPF_S_JMP_JGE_X:
541 case BPF_S_JMP_JGT_K:
542 case BPF_S_JMP_JGT_X:
543 case BPF_S_JMP_JSET_X:
544 case BPF_S_JMP_JSET_K:
545 /* for conditionals both must be safe */
546 if (pc + ftest->jt + 1 >= flen ||
547 pc + ftest->jf + 1 >= flen)
553 #define ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE: \
554 code = BPF_S_ANC_##CODE; \
561 ANCILLARY(NLATTR_NEST);
567 ANCILLARY(ALU_XOR_X);
573 /* last instruction must be a RET code */
574 switch (filter[flen - 1].code) {
577 return check_load_and_stores(filter, flen);
581 EXPORT_SYMBOL(sk_chk_filter);
584 * sk_filter_release_rcu - Release a socket filter by rcu_head
585 * @rcu: rcu_head that contains the sk_filter to free
587 void sk_filter_release_rcu(struct rcu_head *rcu)
589 struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
594 EXPORT_SYMBOL(sk_filter_release_rcu);
596 static int __sk_prepare_filter(struct sk_filter *fp)
600 fp->bpf_func = sk_run_filter;
602 err = sk_chk_filter(fp->insns, fp->len);
611 * sk_unattached_filter_create - create an unattached filter
612 * @fprog: the filter program
613 * @sk: the socket to use
615 * Create a filter independent ofr any socket. We first run some
616 * sanity checks on it to make sure it does not explode on us later.
617 * If an error occurs or there is insufficient memory for the filter
618 * a negative errno code is returned. On success the return is zero.
620 int sk_unattached_filter_create(struct sk_filter **pfp,
621 struct sock_fprog *fprog)
623 struct sk_filter *fp;
624 unsigned int fsize = sizeof(struct sock_filter) * fprog->len;
627 /* Make sure new filter is there and in the right amounts. */
628 if (fprog->filter == NULL)
631 fp = kmalloc(fsize + sizeof(*fp), GFP_KERNEL);
634 memcpy(fp->insns, fprog->filter, fsize);
636 atomic_set(&fp->refcnt, 1);
637 fp->len = fprog->len;
639 err = __sk_prepare_filter(fp);
649 EXPORT_SYMBOL_GPL(sk_unattached_filter_create);
651 void sk_unattached_filter_destroy(struct sk_filter *fp)
653 sk_filter_release(fp);
655 EXPORT_SYMBOL_GPL(sk_unattached_filter_destroy);
658 * sk_attach_filter - attach a socket filter
659 * @fprog: the filter program
660 * @sk: the socket to use
662 * Attach the user's filter code. We first run some sanity checks on
663 * it to make sure it does not explode on us later. If an error
664 * occurs or there is insufficient memory for the filter a negative
665 * errno code is returned. On success the return is zero.
667 int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
669 struct sk_filter *fp, *old_fp;
670 unsigned int fsize = sizeof(struct sock_filter) * fprog->len;
673 /* Make sure new filter is there and in the right amounts. */
674 if (fprog->filter == NULL)
677 fp = sock_kmalloc(sk, fsize+sizeof(*fp), GFP_KERNEL);
680 if (copy_from_user(fp->insns, fprog->filter, fsize)) {
681 sock_kfree_s(sk, fp, fsize+sizeof(*fp));
685 atomic_set(&fp->refcnt, 1);
686 fp->len = fprog->len;
688 err = __sk_prepare_filter(fp);
690 sk_filter_uncharge(sk, fp);
694 old_fp = rcu_dereference_protected(sk->sk_filter,
695 sock_owned_by_user(sk));
696 rcu_assign_pointer(sk->sk_filter, fp);
699 sk_filter_uncharge(sk, old_fp);
702 EXPORT_SYMBOL_GPL(sk_attach_filter);
704 int sk_detach_filter(struct sock *sk)
707 struct sk_filter *filter;
709 filter = rcu_dereference_protected(sk->sk_filter,
710 sock_owned_by_user(sk));
712 RCU_INIT_POINTER(sk->sk_filter, NULL);
713 sk_filter_uncharge(sk, filter);
718 EXPORT_SYMBOL_GPL(sk_detach_filter);