2 * Linux Socket Filter - Kernel level socket filtering
5 * Jay Schulist <jschlst@samba.org>
7 * Based on the design of:
8 * - The Berkeley Packet Filter
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
15 * Andi Kleen - Fix a few bad bugs and races.
16 * Kris Katterjohn - Added many additional checks in sk_chk_filter()
19 #include <linux/module.h>
20 #include <linux/types.h>
22 #include <linux/fcntl.h>
23 #include <linux/socket.h>
25 #include <linux/inet.h>
26 #include <linux/netdevice.h>
27 #include <linux/if_packet.h>
28 #include <linux/gfp.h>
30 #include <net/protocol.h>
31 #include <net/netlink.h>
32 #include <linux/skbuff.h>
34 #include <linux/errno.h>
35 #include <linux/timer.h>
36 #include <asm/uaccess.h>
37 #include <asm/unaligned.h>
38 #include <linux/filter.h>
39 #include <linux/reciprocal_div.h>
40 #include <linux/ratelimit.h>
42 /* No hurry in this branch
44 * Exported for the bpf jit load helper.
46 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
51 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
52 else if (k >= SKF_LL_OFF)
53 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
55 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
60 static inline void *load_pointer(const struct sk_buff *skb, int k,
61 unsigned int size, void *buffer)
64 return skb_header_pointer(skb, k, size, buffer);
65 return bpf_internal_load_pointer_neg_helper(skb, k, size);
69 * sk_filter - run a packet through a socket filter
70 * @sk: sock associated with &sk_buff
71 * @skb: buffer to filter
73 * Run the filter code and then cut skb->data to correct size returned by
74 * sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller
75 * than pkt_len we keep whole skb->data. This is the socket level
76 * wrapper to sk_run_filter. It returns 0 if the packet should
77 * be accepted or -EPERM if the packet should be tossed.
80 int sk_filter(struct sock *sk, struct sk_buff *skb)
83 struct sk_filter *filter;
85 err = security_sock_rcv_skb(sk, skb);
90 filter = rcu_dereference(sk->sk_filter);
92 unsigned int pkt_len = SK_RUN_FILTER(filter, skb);
94 err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
100 EXPORT_SYMBOL(sk_filter);
103 * sk_run_filter - run a filter on a socket
104 * @skb: buffer to run the filter on
105 * @fentry: filter to apply
107 * Decode and apply filter instructions to the skb->data.
108 * Return length to keep, 0 for none. @skb is the data we are
109 * filtering, @filter is the array of filter instructions.
110 * Because all jumps are guaranteed to be before last instruction,
111 * and last instruction guaranteed to be a RET, we dont need to check
112 * flen. (We used to pass to this function the length of filter)
114 unsigned int sk_run_filter(const struct sk_buff *skb,
115 const struct sock_filter *fentry)
118 u32 A = 0; /* Accumulator */
119 u32 X = 0; /* Index Register */
120 u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */
125 * Process array of filter instructions.
128 #if defined(CONFIG_X86_32)
129 #define K (fentry->k)
131 const u32 K = fentry->k;
134 switch (fentry->code) {
135 case BPF_S_ALU_ADD_X:
138 case BPF_S_ALU_ADD_K:
141 case BPF_S_ALU_SUB_X:
144 case BPF_S_ALU_SUB_K:
147 case BPF_S_ALU_MUL_X:
150 case BPF_S_ALU_MUL_K:
153 case BPF_S_ALU_DIV_X:
158 case BPF_S_ALU_DIV_K:
159 A = reciprocal_divide(A, K);
161 case BPF_S_ALU_AND_X:
164 case BPF_S_ALU_AND_K:
173 case BPF_S_ALU_LSH_X:
176 case BPF_S_ALU_LSH_K:
179 case BPF_S_ALU_RSH_X:
182 case BPF_S_ALU_RSH_K:
191 case BPF_S_JMP_JGT_K:
192 fentry += (A > K) ? fentry->jt : fentry->jf;
194 case BPF_S_JMP_JGE_K:
195 fentry += (A >= K) ? fentry->jt : fentry->jf;
197 case BPF_S_JMP_JEQ_K:
198 fentry += (A == K) ? fentry->jt : fentry->jf;
200 case BPF_S_JMP_JSET_K:
201 fentry += (A & K) ? fentry->jt : fentry->jf;
203 case BPF_S_JMP_JGT_X:
204 fentry += (A > X) ? fentry->jt : fentry->jf;
206 case BPF_S_JMP_JGE_X:
207 fentry += (A >= X) ? fentry->jt : fentry->jf;
209 case BPF_S_JMP_JEQ_X:
210 fentry += (A == X) ? fentry->jt : fentry->jf;
212 case BPF_S_JMP_JSET_X:
213 fentry += (A & X) ? fentry->jt : fentry->jf;
218 ptr = load_pointer(skb, k, 4, &tmp);
220 A = get_unaligned_be32(ptr);
227 ptr = load_pointer(skb, k, 2, &tmp);
229 A = get_unaligned_be16(ptr);
236 ptr = load_pointer(skb, k, 1, &tmp);
245 case BPF_S_LDX_W_LEN:
257 case BPF_S_LDX_B_MSH:
258 ptr = load_pointer(skb, K, 1, &tmp);
260 X = (*(u8 *)ptr & 0xf) << 2;
292 case BPF_S_ANC_PROTOCOL:
293 A = ntohs(skb->protocol);
295 case BPF_S_ANC_PKTTYPE:
298 case BPF_S_ANC_IFINDEX:
301 A = skb->dev->ifindex;
306 case BPF_S_ANC_QUEUE:
307 A = skb->queue_mapping;
309 case BPF_S_ANC_HATYPE:
314 case BPF_S_ANC_RXHASH:
318 A = raw_smp_processor_id();
320 case BPF_S_ANC_NLATTR: {
323 if (skb_is_nonlinear(skb))
325 if (A > skb->len - sizeof(struct nlattr))
328 nla = nla_find((struct nlattr *)&skb->data[A],
331 A = (void *)nla - (void *)skb->data;
336 case BPF_S_ANC_NLATTR_NEST: {
339 if (skb_is_nonlinear(skb))
341 if (A > skb->len - sizeof(struct nlattr))
344 nla = (struct nlattr *)&skb->data[A];
345 if (nla->nla_len > A - skb->len)
348 nla = nla_find_nested(nla, X);
350 A = (void *)nla - (void *)skb->data;
356 WARN_RATELIMIT(1, "Unknown code:%u jt:%u tf:%u k:%u\n",
357 fentry->code, fentry->jt,
358 fentry->jf, fentry->k);
365 EXPORT_SYMBOL(sk_run_filter);
369 * A BPF program is able to use 16 cells of memory to store intermediate
370 * values (check u32 mem[BPF_MEMWORDS] in sk_run_filter())
371 * As we dont want to clear mem[] array for each packet going through
372 * sk_run_filter(), we check that filter loaded by user never try to read
373 * a cell if not previously written, and we check all branches to be sure
374 * a malicious user doesn't try to abuse us.
376 static int check_load_and_stores(struct sock_filter *filter, int flen)
378 u16 *masks, memvalid = 0; /* one bit per cell, 16 cells */
381 BUILD_BUG_ON(BPF_MEMWORDS > 16);
382 masks = kmalloc(flen * sizeof(*masks), GFP_KERNEL);
385 memset(masks, 0xff, flen * sizeof(*masks));
387 for (pc = 0; pc < flen; pc++) {
388 memvalid &= masks[pc];
390 switch (filter[pc].code) {
393 memvalid |= (1 << filter[pc].k);
397 if (!(memvalid & (1 << filter[pc].k))) {
403 /* a jump must set masks on target */
404 masks[pc + 1 + filter[pc].k] &= memvalid;
407 case BPF_S_JMP_JEQ_K:
408 case BPF_S_JMP_JEQ_X:
409 case BPF_S_JMP_JGE_K:
410 case BPF_S_JMP_JGE_X:
411 case BPF_S_JMP_JGT_K:
412 case BPF_S_JMP_JGT_X:
413 case BPF_S_JMP_JSET_X:
414 case BPF_S_JMP_JSET_K:
415 /* a jump must set masks on targets */
416 masks[pc + 1 + filter[pc].jt] &= memvalid;
417 masks[pc + 1 + filter[pc].jf] &= memvalid;
428 * sk_chk_filter - verify socket filter code
429 * @filter: filter to verify
430 * @flen: length of filter
432 * Check the user's filter code. If we let some ugly
433 * filter code slip through kaboom! The filter must contain
434 * no references or jumps that are out of range, no illegal
435 * instructions, and must end with a RET instruction.
437 * All jumps are forward as they are not signed.
439 * Returns 0 if the rule set is legal or -EINVAL if not.
441 int sk_chk_filter(struct sock_filter *filter, unsigned int flen)
444 * Valid instructions are initialized to non-0.
445 * Invalid instructions are initialized to 0.
447 static const u8 codes[] = {
448 [BPF_ALU|BPF_ADD|BPF_K] = BPF_S_ALU_ADD_K,
449 [BPF_ALU|BPF_ADD|BPF_X] = BPF_S_ALU_ADD_X,
450 [BPF_ALU|BPF_SUB|BPF_K] = BPF_S_ALU_SUB_K,
451 [BPF_ALU|BPF_SUB|BPF_X] = BPF_S_ALU_SUB_X,
452 [BPF_ALU|BPF_MUL|BPF_K] = BPF_S_ALU_MUL_K,
453 [BPF_ALU|BPF_MUL|BPF_X] = BPF_S_ALU_MUL_X,
454 [BPF_ALU|BPF_DIV|BPF_X] = BPF_S_ALU_DIV_X,
455 [BPF_ALU|BPF_AND|BPF_K] = BPF_S_ALU_AND_K,
456 [BPF_ALU|BPF_AND|BPF_X] = BPF_S_ALU_AND_X,
457 [BPF_ALU|BPF_OR|BPF_K] = BPF_S_ALU_OR_K,
458 [BPF_ALU|BPF_OR|BPF_X] = BPF_S_ALU_OR_X,
459 [BPF_ALU|BPF_LSH|BPF_K] = BPF_S_ALU_LSH_K,
460 [BPF_ALU|BPF_LSH|BPF_X] = BPF_S_ALU_LSH_X,
461 [BPF_ALU|BPF_RSH|BPF_K] = BPF_S_ALU_RSH_K,
462 [BPF_ALU|BPF_RSH|BPF_X] = BPF_S_ALU_RSH_X,
463 [BPF_ALU|BPF_NEG] = BPF_S_ALU_NEG,
464 [BPF_LD|BPF_W|BPF_ABS] = BPF_S_LD_W_ABS,
465 [BPF_LD|BPF_H|BPF_ABS] = BPF_S_LD_H_ABS,
466 [BPF_LD|BPF_B|BPF_ABS] = BPF_S_LD_B_ABS,
467 [BPF_LD|BPF_W|BPF_LEN] = BPF_S_LD_W_LEN,
468 [BPF_LD|BPF_W|BPF_IND] = BPF_S_LD_W_IND,
469 [BPF_LD|BPF_H|BPF_IND] = BPF_S_LD_H_IND,
470 [BPF_LD|BPF_B|BPF_IND] = BPF_S_LD_B_IND,
471 [BPF_LD|BPF_IMM] = BPF_S_LD_IMM,
472 [BPF_LDX|BPF_W|BPF_LEN] = BPF_S_LDX_W_LEN,
473 [BPF_LDX|BPF_B|BPF_MSH] = BPF_S_LDX_B_MSH,
474 [BPF_LDX|BPF_IMM] = BPF_S_LDX_IMM,
475 [BPF_MISC|BPF_TAX] = BPF_S_MISC_TAX,
476 [BPF_MISC|BPF_TXA] = BPF_S_MISC_TXA,
477 [BPF_RET|BPF_K] = BPF_S_RET_K,
478 [BPF_RET|BPF_A] = BPF_S_RET_A,
479 [BPF_ALU|BPF_DIV|BPF_K] = BPF_S_ALU_DIV_K,
480 [BPF_LD|BPF_MEM] = BPF_S_LD_MEM,
481 [BPF_LDX|BPF_MEM] = BPF_S_LDX_MEM,
483 [BPF_STX] = BPF_S_STX,
484 [BPF_JMP|BPF_JA] = BPF_S_JMP_JA,
485 [BPF_JMP|BPF_JEQ|BPF_K] = BPF_S_JMP_JEQ_K,
486 [BPF_JMP|BPF_JEQ|BPF_X] = BPF_S_JMP_JEQ_X,
487 [BPF_JMP|BPF_JGE|BPF_K] = BPF_S_JMP_JGE_K,
488 [BPF_JMP|BPF_JGE|BPF_X] = BPF_S_JMP_JGE_X,
489 [BPF_JMP|BPF_JGT|BPF_K] = BPF_S_JMP_JGT_K,
490 [BPF_JMP|BPF_JGT|BPF_X] = BPF_S_JMP_JGT_X,
491 [BPF_JMP|BPF_JSET|BPF_K] = BPF_S_JMP_JSET_K,
492 [BPF_JMP|BPF_JSET|BPF_X] = BPF_S_JMP_JSET_X,
496 if (flen == 0 || flen > BPF_MAXINSNS)
499 /* check the filter code now */
500 for (pc = 0; pc < flen; pc++) {
501 struct sock_filter *ftest = &filter[pc];
502 u16 code = ftest->code;
504 if (code >= ARRAY_SIZE(codes))
509 /* Some instructions need special checks */
511 case BPF_S_ALU_DIV_K:
512 /* check for division by zero */
515 ftest->k = reciprocal_value(ftest->k);
521 /* check for invalid memory addresses */
522 if (ftest->k >= BPF_MEMWORDS)
527 * Note, the large ftest->k might cause loops.
528 * Compare this with conditional jumps below,
529 * where offsets are limited. --ANK (981016)
531 if (ftest->k >= (unsigned)(flen-pc-1))
534 case BPF_S_JMP_JEQ_K:
535 case BPF_S_JMP_JEQ_X:
536 case BPF_S_JMP_JGE_K:
537 case BPF_S_JMP_JGE_X:
538 case BPF_S_JMP_JGT_K:
539 case BPF_S_JMP_JGT_X:
540 case BPF_S_JMP_JSET_X:
541 case BPF_S_JMP_JSET_K:
542 /* for conditionals both must be safe */
543 if (pc + ftest->jt + 1 >= flen ||
544 pc + ftest->jf + 1 >= flen)
550 #define ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE: \
551 code = BPF_S_ANC_##CODE; \
558 ANCILLARY(NLATTR_NEST);
569 /* last instruction must be a RET code */
570 switch (filter[flen - 1].code) {
573 return check_load_and_stores(filter, flen);
577 EXPORT_SYMBOL(sk_chk_filter);
580 * sk_filter_release_rcu - Release a socket filter by rcu_head
581 * @rcu: rcu_head that contains the sk_filter to free
583 void sk_filter_release_rcu(struct rcu_head *rcu)
585 struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
590 EXPORT_SYMBOL(sk_filter_release_rcu);
593 * sk_attach_filter - attach a socket filter
594 * @fprog: the filter program
595 * @sk: the socket to use
597 * Attach the user's filter code. We first run some sanity checks on
598 * it to make sure it does not explode on us later. If an error
599 * occurs or there is insufficient memory for the filter a negative
600 * errno code is returned. On success the return is zero.
602 int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
604 struct sk_filter *fp, *old_fp;
605 unsigned int fsize = sizeof(struct sock_filter) * fprog->len;
608 /* Make sure new filter is there and in the right amounts. */
609 if (fprog->filter == NULL)
612 fp = sock_kmalloc(sk, fsize+sizeof(*fp), GFP_KERNEL);
615 if (copy_from_user(fp->insns, fprog->filter, fsize)) {
616 sock_kfree_s(sk, fp, fsize+sizeof(*fp));
620 atomic_set(&fp->refcnt, 1);
621 fp->len = fprog->len;
622 fp->bpf_func = sk_run_filter;
624 err = sk_chk_filter(fp->insns, fp->len);
626 sk_filter_uncharge(sk, fp);
632 old_fp = rcu_dereference_protected(sk->sk_filter,
633 sock_owned_by_user(sk));
634 rcu_assign_pointer(sk->sk_filter, fp);
637 sk_filter_uncharge(sk, old_fp);
640 EXPORT_SYMBOL_GPL(sk_attach_filter);
642 int sk_detach_filter(struct sock *sk)
645 struct sk_filter *filter;
647 filter = rcu_dereference_protected(sk->sk_filter,
648 sock_owned_by_user(sk));
650 RCU_INIT_POINTER(sk->sk_filter, NULL);
651 sk_filter_uncharge(sk, filter);
656 EXPORT_SYMBOL_GPL(sk_detach_filter);