2 * Linux Socket Filter - Kernel level socket filtering
5 * Jay Schulist <jschlst@samba.org>
7 * Based on the design of:
8 * - The Berkeley Packet Filter
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
15 * Andi Kleen - Fix a few bad bugs and races.
16 * Kris Katterjohn - Added many additional checks in sk_chk_filter()
19 #include <linux/module.h>
20 #include <linux/types.h>
22 #include <linux/fcntl.h>
23 #include <linux/socket.h>
25 #include <linux/inet.h>
26 #include <linux/netdevice.h>
27 #include <linux/if_packet.h>
28 #include <linux/gfp.h>
30 #include <net/protocol.h>
31 #include <net/netlink.h>
32 #include <linux/skbuff.h>
34 #include <linux/errno.h>
35 #include <linux/timer.h>
36 #include <asm/system.h>
37 #include <asm/uaccess.h>
38 #include <asm/unaligned.h>
39 #include <linux/filter.h>
40 #include <linux/reciprocal_div.h>
93 BPF_S_ANC_NLATTR_NEST,
101 /* No hurry in this branch */
102 static void *__load_pointer(const struct sk_buff *skb, int k, unsigned int size)
106 if (k >= SKF_NET_OFF)
107 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
108 else if (k >= SKF_LL_OFF)
109 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
111 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
116 static inline void *load_pointer(const struct sk_buff *skb, int k,
117 unsigned int size, void *buffer)
120 return skb_header_pointer(skb, k, size, buffer);
121 return __load_pointer(skb, k, size);
125 * sk_filter - run a packet through a socket filter
126 * @sk: sock associated with &sk_buff
127 * @skb: buffer to filter
129 * Run the filter code and then cut skb->data to correct size returned by
130 * sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller
131 * than pkt_len we keep whole skb->data. This is the socket level
132 * wrapper to sk_run_filter. It returns 0 if the packet should
133 * be accepted or -EPERM if the packet should be tossed.
136 int sk_filter(struct sock *sk, struct sk_buff *skb)
139 struct sk_filter *filter;
141 err = security_sock_rcv_skb(sk, skb);
146 filter = rcu_dereference_bh(sk->sk_filter);
148 unsigned int pkt_len = sk_run_filter(skb, filter->insns);
150 err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
152 rcu_read_unlock_bh();
156 EXPORT_SYMBOL(sk_filter);
159 * sk_run_filter - run a filter on a socket
160 * @skb: buffer to run the filter on
161 * @fentry: filter to apply
163 * Decode and apply filter instructions to the skb->data.
164 * Return length to keep, 0 for none. @skb is the data we are
165 * filtering, @filter is the array of filter instructions.
166 * Because all jumps are guaranteed to be before last instruction,
167 * and last instruction guaranteed to be a RET, we dont need to check
168 * flen. (We used to pass to this function the length of filter)
170 unsigned int sk_run_filter(const struct sk_buff *skb,
171 const struct sock_filter *fentry)
174 u32 A = 0; /* Accumulator */
175 u32 X = 0; /* Index Register */
176 u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */
181 * Process array of filter instructions.
184 #if defined(CONFIG_X86_32)
185 #define K (fentry->k)
187 const u32 K = fentry->k;
190 switch (fentry->code) {
191 case BPF_S_ALU_ADD_X:
194 case BPF_S_ALU_ADD_K:
197 case BPF_S_ALU_SUB_X:
200 case BPF_S_ALU_SUB_K:
203 case BPF_S_ALU_MUL_X:
206 case BPF_S_ALU_MUL_K:
209 case BPF_S_ALU_DIV_X:
214 case BPF_S_ALU_DIV_K:
215 A = reciprocal_divide(A, K);
217 case BPF_S_ALU_AND_X:
220 case BPF_S_ALU_AND_K:
229 case BPF_S_ALU_LSH_X:
232 case BPF_S_ALU_LSH_K:
235 case BPF_S_ALU_RSH_X:
238 case BPF_S_ALU_RSH_K:
247 case BPF_S_JMP_JGT_K:
248 fentry += (A > K) ? fentry->jt : fentry->jf;
250 case BPF_S_JMP_JGE_K:
251 fentry += (A >= K) ? fentry->jt : fentry->jf;
253 case BPF_S_JMP_JEQ_K:
254 fentry += (A == K) ? fentry->jt : fentry->jf;
256 case BPF_S_JMP_JSET_K:
257 fentry += (A & K) ? fentry->jt : fentry->jf;
259 case BPF_S_JMP_JGT_X:
260 fentry += (A > X) ? fentry->jt : fentry->jf;
262 case BPF_S_JMP_JGE_X:
263 fentry += (A >= X) ? fentry->jt : fentry->jf;
265 case BPF_S_JMP_JEQ_X:
266 fentry += (A == X) ? fentry->jt : fentry->jf;
268 case BPF_S_JMP_JSET_X:
269 fentry += (A & X) ? fentry->jt : fentry->jf;
274 ptr = load_pointer(skb, k, 4, &tmp);
276 A = get_unaligned_be32(ptr);
283 ptr = load_pointer(skb, k, 2, &tmp);
285 A = get_unaligned_be16(ptr);
292 ptr = load_pointer(skb, k, 1, &tmp);
301 case BPF_S_LDX_W_LEN:
313 case BPF_S_LDX_B_MSH:
314 ptr = load_pointer(skb, K, 1, &tmp);
316 X = (*(u8 *)ptr & 0xf) << 2;
348 case BPF_S_ANC_PROTOCOL:
349 A = ntohs(skb->protocol);
351 case BPF_S_ANC_PKTTYPE:
354 case BPF_S_ANC_IFINDEX:
357 A = skb->dev->ifindex;
362 case BPF_S_ANC_QUEUE:
363 A = skb->queue_mapping;
365 case BPF_S_ANC_HATYPE:
370 case BPF_S_ANC_RXHASH:
374 A = raw_smp_processor_id();
376 case BPF_S_ANC_NLATTR: {
379 if (skb_is_nonlinear(skb))
381 if (A > skb->len - sizeof(struct nlattr))
384 nla = nla_find((struct nlattr *)&skb->data[A],
387 A = (void *)nla - (void *)skb->data;
392 case BPF_S_ANC_NLATTR_NEST: {
395 if (skb_is_nonlinear(skb))
397 if (A > skb->len - sizeof(struct nlattr))
400 nla = (struct nlattr *)&skb->data[A];
401 if (nla->nla_len > A - skb->len)
404 nla = nla_find_nested(nla, X);
406 A = (void *)nla - (void *)skb->data;
419 EXPORT_SYMBOL(sk_run_filter);
423 * A BPF program is able to use 16 cells of memory to store intermediate
424 * values (check u32 mem[BPF_MEMWORDS] in sk_run_filter())
425 * As we dont want to clear mem[] array for each packet going through
426 * sk_run_filter(), we check that filter loaded by user never try to read
427 * a cell if not previously written, and we check all branches to be sure
428 * a malicious user doesnt try to abuse us.
430 static int check_load_and_stores(struct sock_filter *filter, int flen)
432 u16 *masks, memvalid = 0; /* one bit per cell, 16 cells */
435 BUILD_BUG_ON(BPF_MEMWORDS > 16);
436 masks = kmalloc(flen * sizeof(*masks), GFP_KERNEL);
439 memset(masks, 0xff, flen * sizeof(*masks));
441 for (pc = 0; pc < flen; pc++) {
442 memvalid &= masks[pc];
444 switch (filter[pc].code) {
447 memvalid |= (1 << filter[pc].k);
451 if (!(memvalid & (1 << filter[pc].k))) {
457 /* a jump must set masks on target */
458 masks[pc + 1 + filter[pc].k] &= memvalid;
461 case BPF_S_JMP_JEQ_K:
462 case BPF_S_JMP_JEQ_X:
463 case BPF_S_JMP_JGE_K:
464 case BPF_S_JMP_JGE_X:
465 case BPF_S_JMP_JGT_K:
466 case BPF_S_JMP_JGT_X:
467 case BPF_S_JMP_JSET_X:
468 case BPF_S_JMP_JSET_K:
469 /* a jump must set masks on targets */
470 masks[pc + 1 + filter[pc].jt] &= memvalid;
471 masks[pc + 1 + filter[pc].jf] &= memvalid;
482 * sk_chk_filter - verify socket filter code
483 * @filter: filter to verify
484 * @flen: length of filter
486 * Check the user's filter code. If we let some ugly
487 * filter code slip through kaboom! The filter must contain
488 * no references or jumps that are out of range, no illegal
489 * instructions, and must end with a RET instruction.
491 * All jumps are forward as they are not signed.
493 * Returns 0 if the rule set is legal or -EINVAL if not.
495 int sk_chk_filter(struct sock_filter *filter, int flen)
498 * Valid instructions are initialized to non-0.
499 * Invalid instructions are initialized to 0.
501 static const u8 codes[] = {
502 [BPF_ALU|BPF_ADD|BPF_K] = BPF_S_ALU_ADD_K,
503 [BPF_ALU|BPF_ADD|BPF_X] = BPF_S_ALU_ADD_X,
504 [BPF_ALU|BPF_SUB|BPF_K] = BPF_S_ALU_SUB_K,
505 [BPF_ALU|BPF_SUB|BPF_X] = BPF_S_ALU_SUB_X,
506 [BPF_ALU|BPF_MUL|BPF_K] = BPF_S_ALU_MUL_K,
507 [BPF_ALU|BPF_MUL|BPF_X] = BPF_S_ALU_MUL_X,
508 [BPF_ALU|BPF_DIV|BPF_X] = BPF_S_ALU_DIV_X,
509 [BPF_ALU|BPF_AND|BPF_K] = BPF_S_ALU_AND_K,
510 [BPF_ALU|BPF_AND|BPF_X] = BPF_S_ALU_AND_X,
511 [BPF_ALU|BPF_OR|BPF_K] = BPF_S_ALU_OR_K,
512 [BPF_ALU|BPF_OR|BPF_X] = BPF_S_ALU_OR_X,
513 [BPF_ALU|BPF_LSH|BPF_K] = BPF_S_ALU_LSH_K,
514 [BPF_ALU|BPF_LSH|BPF_X] = BPF_S_ALU_LSH_X,
515 [BPF_ALU|BPF_RSH|BPF_K] = BPF_S_ALU_RSH_K,
516 [BPF_ALU|BPF_RSH|BPF_X] = BPF_S_ALU_RSH_X,
517 [BPF_ALU|BPF_NEG] = BPF_S_ALU_NEG,
518 [BPF_LD|BPF_W|BPF_ABS] = BPF_S_LD_W_ABS,
519 [BPF_LD|BPF_H|BPF_ABS] = BPF_S_LD_H_ABS,
520 [BPF_LD|BPF_B|BPF_ABS] = BPF_S_LD_B_ABS,
521 [BPF_LD|BPF_W|BPF_LEN] = BPF_S_LD_W_LEN,
522 [BPF_LD|BPF_W|BPF_IND] = BPF_S_LD_W_IND,
523 [BPF_LD|BPF_H|BPF_IND] = BPF_S_LD_H_IND,
524 [BPF_LD|BPF_B|BPF_IND] = BPF_S_LD_B_IND,
525 [BPF_LD|BPF_IMM] = BPF_S_LD_IMM,
526 [BPF_LDX|BPF_W|BPF_LEN] = BPF_S_LDX_W_LEN,
527 [BPF_LDX|BPF_B|BPF_MSH] = BPF_S_LDX_B_MSH,
528 [BPF_LDX|BPF_IMM] = BPF_S_LDX_IMM,
529 [BPF_MISC|BPF_TAX] = BPF_S_MISC_TAX,
530 [BPF_MISC|BPF_TXA] = BPF_S_MISC_TXA,
531 [BPF_RET|BPF_K] = BPF_S_RET_K,
532 [BPF_RET|BPF_A] = BPF_S_RET_A,
533 [BPF_ALU|BPF_DIV|BPF_K] = BPF_S_ALU_DIV_K,
534 [BPF_LD|BPF_MEM] = BPF_S_LD_MEM,
535 [BPF_LDX|BPF_MEM] = BPF_S_LDX_MEM,
537 [BPF_STX] = BPF_S_STX,
538 [BPF_JMP|BPF_JA] = BPF_S_JMP_JA,
539 [BPF_JMP|BPF_JEQ|BPF_K] = BPF_S_JMP_JEQ_K,
540 [BPF_JMP|BPF_JEQ|BPF_X] = BPF_S_JMP_JEQ_X,
541 [BPF_JMP|BPF_JGE|BPF_K] = BPF_S_JMP_JGE_K,
542 [BPF_JMP|BPF_JGE|BPF_X] = BPF_S_JMP_JGE_X,
543 [BPF_JMP|BPF_JGT|BPF_K] = BPF_S_JMP_JGT_K,
544 [BPF_JMP|BPF_JGT|BPF_X] = BPF_S_JMP_JGT_X,
545 [BPF_JMP|BPF_JSET|BPF_K] = BPF_S_JMP_JSET_K,
546 [BPF_JMP|BPF_JSET|BPF_X] = BPF_S_JMP_JSET_X,
550 if (flen == 0 || flen > BPF_MAXINSNS)
553 /* check the filter code now */
554 for (pc = 0; pc < flen; pc++) {
555 struct sock_filter *ftest = &filter[pc];
556 u16 code = ftest->code;
558 if (code >= ARRAY_SIZE(codes))
563 /* Some instructions need special checks */
565 case BPF_S_ALU_DIV_K:
566 /* check for division by zero */
569 ftest->k = reciprocal_value(ftest->k);
575 /* check for invalid memory addresses */
576 if (ftest->k >= BPF_MEMWORDS)
581 * Note, the large ftest->k might cause loops.
582 * Compare this with conditional jumps below,
583 * where offsets are limited. --ANK (981016)
585 if (ftest->k >= (unsigned)(flen-pc-1))
588 case BPF_S_JMP_JEQ_K:
589 case BPF_S_JMP_JEQ_X:
590 case BPF_S_JMP_JGE_K:
591 case BPF_S_JMP_JGE_X:
592 case BPF_S_JMP_JGT_K:
593 case BPF_S_JMP_JGT_X:
594 case BPF_S_JMP_JSET_X:
595 case BPF_S_JMP_JSET_K:
596 /* for conditionals both must be safe */
597 if (pc + ftest->jt + 1 >= flen ||
598 pc + ftest->jf + 1 >= flen)
604 #define ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE: \
605 code = BPF_S_ANC_##CODE; \
612 ANCILLARY(NLATTR_NEST);
623 /* last instruction must be a RET code */
624 switch (filter[flen - 1].code) {
627 return check_load_and_stores(filter, flen);
631 EXPORT_SYMBOL(sk_chk_filter);
634 * sk_filter_release_rcu - Release a socket filter by rcu_head
635 * @rcu: rcu_head that contains the sk_filter to free
637 void sk_filter_release_rcu(struct rcu_head *rcu)
639 struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
643 EXPORT_SYMBOL(sk_filter_release_rcu);
646 * sk_attach_filter - attach a socket filter
647 * @fprog: the filter program
648 * @sk: the socket to use
650 * Attach the user's filter code. We first run some sanity checks on
651 * it to make sure it does not explode on us later. If an error
652 * occurs or there is insufficient memory for the filter a negative
653 * errno code is returned. On success the return is zero.
655 int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
657 struct sk_filter *fp, *old_fp;
658 unsigned int fsize = sizeof(struct sock_filter) * fprog->len;
661 /* Make sure new filter is there and in the right amounts. */
662 if (fprog->filter == NULL)
665 fp = sock_kmalloc(sk, fsize+sizeof(*fp), GFP_KERNEL);
668 if (copy_from_user(fp->insns, fprog->filter, fsize)) {
669 sock_kfree_s(sk, fp, fsize+sizeof(*fp));
673 atomic_set(&fp->refcnt, 1);
674 fp->len = fprog->len;
676 err = sk_chk_filter(fp->insns, fp->len);
678 sk_filter_uncharge(sk, fp);
682 old_fp = rcu_dereference_protected(sk->sk_filter,
683 sock_owned_by_user(sk));
684 rcu_assign_pointer(sk->sk_filter, fp);
687 sk_filter_uncharge(sk, old_fp);
690 EXPORT_SYMBOL_GPL(sk_attach_filter);
692 int sk_detach_filter(struct sock *sk)
695 struct sk_filter *filter;
697 filter = rcu_dereference_protected(sk->sk_filter,
698 sock_owned_by_user(sk));
700 rcu_assign_pointer(sk->sk_filter, NULL);
701 sk_filter_uncharge(sk, filter);
706 EXPORT_SYMBOL_GPL(sk_detach_filter);