1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful, but
8 * WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 * General Public License for more details.
12 #include <linux/bpf.h>
13 #include <linux/syscalls.h>
14 #include <linux/slab.h>
15 #include <linux/anon_inodes.h>
16 #include <linux/file.h>
17 #include <linux/license.h>
18 #include <linux/filter.h>
19 #include <linux/version.h>
21 int sysctl_unprivileged_bpf_disabled __read_mostly;
23 static LIST_HEAD(bpf_map_types);
25 static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
27 struct bpf_map_type_list *tl;
30 list_for_each_entry(tl, &bpf_map_types, list_node) {
31 if (tl->type == attr->map_type) {
32 map = tl->ops->map_alloc(attr);
36 map->map_type = attr->map_type;
40 return ERR_PTR(-EINVAL);
43 /* boot time registration of different map implementations */
44 void bpf_register_map_type(struct bpf_map_type_list *tl)
46 list_add(&tl->list_node, &bpf_map_types);
49 static int bpf_map_charge_memlock(struct bpf_map *map)
51 struct user_struct *user = get_current_user();
52 unsigned long memlock_limit;
54 memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
56 atomic_long_add(map->pages, &user->locked_vm);
58 if (atomic_long_read(&user->locked_vm) > memlock_limit) {
59 atomic_long_sub(map->pages, &user->locked_vm);
67 static void bpf_map_uncharge_memlock(struct bpf_map *map)
69 struct user_struct *user = map->user;
71 atomic_long_sub(map->pages, &user->locked_vm);
75 /* called from workqueue */
76 static void bpf_map_free_deferred(struct work_struct *work)
78 struct bpf_map *map = container_of(work, struct bpf_map, work);
80 bpf_map_uncharge_memlock(map);
81 /* implementation dependent freeing */
82 map->ops->map_free(map);
85 static void bpf_map_put_uref(struct bpf_map *map)
87 if (atomic_dec_and_test(&map->usercnt)) {
88 if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY)
89 bpf_fd_array_map_clear(map);
93 /* decrement map refcnt and schedule it for freeing via workqueue
94 * (unrelying map implementation ops->map_free() might sleep)
96 void bpf_map_put(struct bpf_map *map)
98 if (atomic_dec_and_test(&map->refcnt)) {
99 INIT_WORK(&map->work, bpf_map_free_deferred);
100 schedule_work(&map->work);
104 void bpf_map_put_with_uref(struct bpf_map *map)
106 bpf_map_put_uref(map);
110 static int bpf_map_release(struct inode *inode, struct file *filp)
112 bpf_map_put_with_uref(filp->private_data);
116 #ifdef CONFIG_PROC_FS
117 static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
119 const struct bpf_map *map = filp->private_data;
125 "max_entries:\t%u\n",
133 static const struct file_operations bpf_map_fops = {
134 #ifdef CONFIG_PROC_FS
135 .show_fdinfo = bpf_map_show_fdinfo,
137 .release = bpf_map_release,
140 int bpf_map_new_fd(struct bpf_map *map)
142 return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
146 /* helper macro to check that unused fields 'union bpf_attr' are zero */
147 #define CHECK_ATTR(CMD) \
148 memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
149 sizeof(attr->CMD##_LAST_FIELD), 0, \
151 offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
152 sizeof(attr->CMD##_LAST_FIELD)) != NULL
154 #define BPF_MAP_CREATE_LAST_FIELD max_entries
155 /* called via syscall */
156 static int map_create(union bpf_attr *attr)
161 err = CHECK_ATTR(BPF_MAP_CREATE);
165 /* find map type and init map: hashtable vs rbtree vs bloom vs ... */
166 map = find_and_alloc_map(attr);
170 atomic_set(&map->refcnt, 1);
171 atomic_set(&map->usercnt, 1);
173 err = bpf_map_charge_memlock(map);
177 err = bpf_map_new_fd(map);
179 /* failed to allocate fd */
185 map->ops->map_free(map);
189 /* if error is returned, fd is released.
190 * On success caller should complete fd access with matching fdput()
192 struct bpf_map *__bpf_map_get(struct fd f)
195 return ERR_PTR(-EBADF);
196 if (f.file->f_op != &bpf_map_fops) {
198 return ERR_PTR(-EINVAL);
201 return f.file->private_data;
204 void bpf_map_inc(struct bpf_map *map, bool uref)
206 atomic_inc(&map->refcnt);
208 atomic_inc(&map->usercnt);
211 struct bpf_map *bpf_map_get_with_uref(u32 ufd)
213 struct fd f = fdget(ufd);
216 map = __bpf_map_get(f);
220 bpf_map_inc(map, true);
226 /* helper to convert user pointers passed inside __aligned_u64 fields */
227 static void __user *u64_to_ptr(__u64 val)
229 return (void __user *) (unsigned long) val;
232 /* last field in 'union bpf_attr' used by this command */
233 #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD value
235 static int map_lookup_elem(union bpf_attr *attr)
237 void __user *ukey = u64_to_ptr(attr->key);
238 void __user *uvalue = u64_to_ptr(attr->value);
239 int ufd = attr->map_fd;
241 void *key, *value, *ptr;
245 if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
249 map = __bpf_map_get(f);
254 key = kmalloc(map->key_size, GFP_USER);
259 if (copy_from_user(key, ukey, map->key_size) != 0)
263 value = kmalloc(map->value_size, GFP_USER | __GFP_NOWARN);
268 ptr = map->ops->map_lookup_elem(map, key);
270 memcpy(value, ptr, map->value_size);
278 if (copy_to_user(uvalue, value, map->value_size) != 0)
292 #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
294 static int map_update_elem(union bpf_attr *attr)
296 void __user *ukey = u64_to_ptr(attr->key);
297 void __user *uvalue = u64_to_ptr(attr->value);
298 int ufd = attr->map_fd;
304 if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
308 map = __bpf_map_get(f);
313 key = kmalloc(map->key_size, GFP_USER);
318 if (copy_from_user(key, ukey, map->key_size) != 0)
322 value = kmalloc(map->value_size, GFP_USER | __GFP_NOWARN);
327 if (copy_from_user(value, uvalue, map->value_size) != 0)
330 /* eBPF program that use maps are running under rcu_read_lock(),
331 * therefore all map accessors rely on this fact, so do the same here
334 err = map->ops->map_update_elem(map, key, value, attr->flags);
346 #define BPF_MAP_DELETE_ELEM_LAST_FIELD key
348 static int map_delete_elem(union bpf_attr *attr)
350 void __user *ukey = u64_to_ptr(attr->key);
351 int ufd = attr->map_fd;
357 if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
361 map = __bpf_map_get(f);
366 key = kmalloc(map->key_size, GFP_USER);
371 if (copy_from_user(key, ukey, map->key_size) != 0)
375 err = map->ops->map_delete_elem(map, key);
385 /* last field in 'union bpf_attr' used by this command */
386 #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
388 static int map_get_next_key(union bpf_attr *attr)
390 void __user *ukey = u64_to_ptr(attr->key);
391 void __user *unext_key = u64_to_ptr(attr->next_key);
392 int ufd = attr->map_fd;
394 void *key, *next_key;
398 if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
402 map = __bpf_map_get(f);
407 key = kmalloc(map->key_size, GFP_USER);
412 if (copy_from_user(key, ukey, map->key_size) != 0)
416 next_key = kmalloc(map->key_size, GFP_USER);
421 err = map->ops->map_get_next_key(map, key, next_key);
427 if (copy_to_user(unext_key, next_key, map->key_size) != 0)
441 static LIST_HEAD(bpf_prog_types);
443 static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
445 struct bpf_prog_type_list *tl;
447 list_for_each_entry(tl, &bpf_prog_types, list_node) {
448 if (tl->type == type) {
449 prog->aux->ops = tl->ops;
458 void bpf_register_prog_type(struct bpf_prog_type_list *tl)
460 list_add(&tl->list_node, &bpf_prog_types);
463 /* fixup insn->imm field of bpf_call instructions:
464 * if (insn->imm == BPF_FUNC_map_lookup_elem)
465 * insn->imm = bpf_map_lookup_elem - __bpf_call_base;
466 * else if (insn->imm == BPF_FUNC_map_update_elem)
467 * insn->imm = bpf_map_update_elem - __bpf_call_base;
470 * this function is called after eBPF program passed verification
472 static void fixup_bpf_calls(struct bpf_prog *prog)
474 const struct bpf_func_proto *fn;
477 for (i = 0; i < prog->len; i++) {
478 struct bpf_insn *insn = &prog->insnsi[i];
480 if (insn->code == (BPF_JMP | BPF_CALL)) {
481 /* we reach here when program has bpf_call instructions
482 * and it passed bpf_check(), means that
483 * ops->get_func_proto must have been supplied, check it
485 BUG_ON(!prog->aux->ops->get_func_proto);
487 if (insn->imm == BPF_FUNC_get_route_realm)
488 prog->dst_needed = 1;
489 if (insn->imm == BPF_FUNC_get_prandom_u32)
490 bpf_user_rnd_init_once();
491 if (insn->imm == BPF_FUNC_tail_call) {
492 /* mark bpf_tail_call as different opcode
493 * to avoid conditional branch in
494 * interpeter for every normal call
495 * and to prevent accidental JITing by
496 * JIT compiler that doesn't support
504 fn = prog->aux->ops->get_func_proto(insn->imm);
505 /* all functions that have prototype and verifier allowed
506 * programs to call them, must be real in-kernel functions
509 insn->imm = fn->func - __bpf_call_base;
514 /* drop refcnt on maps used by eBPF program and free auxilary data */
515 static void free_used_maps(struct bpf_prog_aux *aux)
519 for (i = 0; i < aux->used_map_cnt; i++)
520 bpf_map_put(aux->used_maps[i]);
522 kfree(aux->used_maps);
525 static int bpf_prog_charge_memlock(struct bpf_prog *prog)
527 struct user_struct *user = get_current_user();
528 unsigned long memlock_limit;
530 memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
532 atomic_long_add(prog->pages, &user->locked_vm);
533 if (atomic_long_read(&user->locked_vm) > memlock_limit) {
534 atomic_long_sub(prog->pages, &user->locked_vm);
538 prog->aux->user = user;
542 static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
544 struct user_struct *user = prog->aux->user;
546 atomic_long_sub(prog->pages, &user->locked_vm);
550 static void __prog_put_common(struct rcu_head *rcu)
552 struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
555 bpf_prog_uncharge_memlock(aux->prog);
556 bpf_prog_free(aux->prog);
559 /* version of bpf_prog_put() that is called after a grace period */
560 void bpf_prog_put_rcu(struct bpf_prog *prog)
562 if (atomic_dec_and_test(&prog->aux->refcnt))
563 call_rcu(&prog->aux->rcu, __prog_put_common);
566 void bpf_prog_put(struct bpf_prog *prog)
568 if (atomic_dec_and_test(&prog->aux->refcnt))
569 __prog_put_common(&prog->aux->rcu);
571 EXPORT_SYMBOL_GPL(bpf_prog_put);
573 static int bpf_prog_release(struct inode *inode, struct file *filp)
575 struct bpf_prog *prog = filp->private_data;
577 bpf_prog_put_rcu(prog);
581 static const struct file_operations bpf_prog_fops = {
582 .release = bpf_prog_release,
585 int bpf_prog_new_fd(struct bpf_prog *prog)
587 return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
591 static struct bpf_prog *__bpf_prog_get(struct fd f)
594 return ERR_PTR(-EBADF);
595 if (f.file->f_op != &bpf_prog_fops) {
597 return ERR_PTR(-EINVAL);
600 return f.file->private_data;
603 /* called by sockets/tracing/seccomp before attaching program to an event
604 * pairs with bpf_prog_put()
606 struct bpf_prog *bpf_prog_get(u32 ufd)
608 struct fd f = fdget(ufd);
609 struct bpf_prog *prog;
611 prog = __bpf_prog_get(f);
615 atomic_inc(&prog->aux->refcnt);
620 EXPORT_SYMBOL_GPL(bpf_prog_get);
622 /* last field in 'union bpf_attr' used by this command */
623 #define BPF_PROG_LOAD_LAST_FIELD kern_version
625 static int bpf_prog_load(union bpf_attr *attr)
627 enum bpf_prog_type type = attr->prog_type;
628 struct bpf_prog *prog;
633 if (CHECK_ATTR(BPF_PROG_LOAD))
636 /* copy eBPF program license from user space */
637 if (strncpy_from_user(license, u64_to_ptr(attr->license),
638 sizeof(license) - 1) < 0)
640 license[sizeof(license) - 1] = 0;
642 /* eBPF programs must be GPL compatible to use GPL-ed functions */
643 is_gpl = license_is_gpl_compatible(license);
645 if (attr->insn_cnt >= BPF_MAXINSNS)
648 if (type == BPF_PROG_TYPE_KPROBE &&
649 attr->kern_version != LINUX_VERSION_CODE)
652 if (type != BPF_PROG_TYPE_SOCKET_FILTER && !capable(CAP_SYS_ADMIN))
655 /* plain bpf_prog allocation */
656 prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
660 err = bpf_prog_charge_memlock(prog);
662 goto free_prog_nouncharge;
664 prog->len = attr->insn_cnt;
667 if (copy_from_user(prog->insns, u64_to_ptr(attr->insns),
668 prog->len * sizeof(struct bpf_insn)) != 0)
671 prog->orig_prog = NULL;
674 atomic_set(&prog->aux->refcnt, 1);
675 prog->gpl_compatible = is_gpl ? 1 : 0;
677 /* find program type: socket_filter vs tracing_filter */
678 err = find_prog_type(type, prog);
682 /* run eBPF verifier */
683 err = bpf_check(&prog, attr);
687 /* fixup BPF_CALL->imm field */
688 fixup_bpf_calls(prog);
690 /* eBPF program is ready to be JITed */
691 err = bpf_prog_select_runtime(prog);
695 err = bpf_prog_new_fd(prog);
697 /* failed to allocate fd */
703 free_used_maps(prog->aux);
705 bpf_prog_uncharge_memlock(prog);
706 free_prog_nouncharge:
711 #define BPF_OBJ_LAST_FIELD bpf_fd
713 static int bpf_obj_pin(const union bpf_attr *attr)
715 if (CHECK_ATTR(BPF_OBJ))
718 return bpf_obj_pin_user(attr->bpf_fd, u64_to_ptr(attr->pathname));
721 static int bpf_obj_get(const union bpf_attr *attr)
723 if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0)
726 return bpf_obj_get_user(u64_to_ptr(attr->pathname));
729 SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
731 union bpf_attr attr = {};
734 if (!capable(CAP_SYS_ADMIN) && sysctl_unprivileged_bpf_disabled)
737 if (!access_ok(VERIFY_READ, uattr, 1))
740 if (size > PAGE_SIZE) /* silly large */
743 /* If we're handed a bigger struct than we know of,
744 * ensure all the unknown bits are 0 - i.e. new
745 * user-space does not rely on any kernel feature
746 * extensions we dont know about yet.
748 if (size > sizeof(attr)) {
749 unsigned char __user *addr;
750 unsigned char __user *end;
753 addr = (void __user *)uattr + sizeof(attr);
754 end = (void __user *)uattr + size;
756 for (; addr < end; addr++) {
757 err = get_user(val, addr);
766 /* copy attributes from user space, may be less than sizeof(bpf_attr) */
767 if (copy_from_user(&attr, uattr, size) != 0)
772 err = map_create(&attr);
774 case BPF_MAP_LOOKUP_ELEM:
775 err = map_lookup_elem(&attr);
777 case BPF_MAP_UPDATE_ELEM:
778 err = map_update_elem(&attr);
780 case BPF_MAP_DELETE_ELEM:
781 err = map_delete_elem(&attr);
783 case BPF_MAP_GET_NEXT_KEY:
784 err = map_get_next_key(&attr);
787 err = bpf_prog_load(&attr);
790 err = bpf_obj_pin(&attr);
793 err = bpf_obj_get(&attr);