1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful, but
8 * WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 * General Public License for more details.
12 #include <linux/bpf.h>
13 #include <linux/bpf_trace.h>
14 #include <linux/syscalls.h>
15 #include <linux/slab.h>
16 #include <linux/sched/signal.h>
17 #include <linux/vmalloc.h>
18 #include <linux/mmzone.h>
19 #include <linux/anon_inodes.h>
20 #include <linux/file.h>
21 #include <linux/license.h>
22 #include <linux/filter.h>
23 #include <linux/version.h>
24 #include <linux/kernel.h>
26 DEFINE_PER_CPU(int, bpf_prog_active);
28 int sysctl_unprivileged_bpf_disabled __read_mostly;
30 static const struct bpf_map_ops * const bpf_map_types[] = {
31 #define BPF_PROG_TYPE(_id, _ops)
32 #define BPF_MAP_TYPE(_id, _ops) \
34 #include <linux/bpf_types.h>
39 static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
43 if (attr->map_type >= ARRAY_SIZE(bpf_map_types) ||
44 !bpf_map_types[attr->map_type])
45 return ERR_PTR(-EINVAL);
47 map = bpf_map_types[attr->map_type]->map_alloc(attr);
50 map->ops = bpf_map_types[attr->map_type];
51 map->map_type = attr->map_type;
55 void *bpf_map_area_alloc(size_t size)
57 /* We definitely need __GFP_NORETRY, so OOM killer doesn't
58 * trigger under memory pressure as we really just want to
61 const gfp_t flags = __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO;
64 if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
65 area = kmalloc(size, GFP_USER | flags);
70 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | flags,
74 void bpf_map_area_free(void *area)
79 int bpf_map_precharge_memlock(u32 pages)
81 struct user_struct *user = get_current_user();
82 unsigned long memlock_limit, cur;
84 memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
85 cur = atomic_long_read(&user->locked_vm);
87 if (cur + pages > memlock_limit)
92 static int bpf_map_charge_memlock(struct bpf_map *map)
94 struct user_struct *user = get_current_user();
95 unsigned long memlock_limit;
97 memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
99 atomic_long_add(map->pages, &user->locked_vm);
101 if (atomic_long_read(&user->locked_vm) > memlock_limit) {
102 atomic_long_sub(map->pages, &user->locked_vm);
110 static void bpf_map_uncharge_memlock(struct bpf_map *map)
112 struct user_struct *user = map->user;
114 atomic_long_sub(map->pages, &user->locked_vm);
118 /* called from workqueue */
119 static void bpf_map_free_deferred(struct work_struct *work)
121 struct bpf_map *map = container_of(work, struct bpf_map, work);
123 bpf_map_uncharge_memlock(map);
124 /* implementation dependent freeing */
125 map->ops->map_free(map);
128 static void bpf_map_put_uref(struct bpf_map *map)
130 if (atomic_dec_and_test(&map->usercnt)) {
131 if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY)
132 bpf_fd_array_map_clear(map);
136 /* decrement map refcnt and schedule it for freeing via workqueue
137 * (unrelying map implementation ops->map_free() might sleep)
139 void bpf_map_put(struct bpf_map *map)
141 if (atomic_dec_and_test(&map->refcnt)) {
142 INIT_WORK(&map->work, bpf_map_free_deferred);
143 schedule_work(&map->work);
147 void bpf_map_put_with_uref(struct bpf_map *map)
149 bpf_map_put_uref(map);
153 static int bpf_map_release(struct inode *inode, struct file *filp)
155 struct bpf_map *map = filp->private_data;
157 if (map->ops->map_release)
158 map->ops->map_release(map, filp);
160 bpf_map_put_with_uref(map);
164 #ifdef CONFIG_PROC_FS
165 static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
167 const struct bpf_map *map = filp->private_data;
168 const struct bpf_array *array;
169 u32 owner_prog_type = 0;
171 if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) {
172 array = container_of(map, struct bpf_array, map);
173 owner_prog_type = array->owner_prog_type;
188 map->pages * 1ULL << PAGE_SHIFT);
191 seq_printf(m, "owner_prog_type:\t%u\n",
196 static const struct file_operations bpf_map_fops = {
197 #ifdef CONFIG_PROC_FS
198 .show_fdinfo = bpf_map_show_fdinfo,
200 .release = bpf_map_release,
203 int bpf_map_new_fd(struct bpf_map *map)
205 return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
209 /* helper macro to check that unused fields 'union bpf_attr' are zero */
210 #define CHECK_ATTR(CMD) \
211 memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
212 sizeof(attr->CMD##_LAST_FIELD), 0, \
214 offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
215 sizeof(attr->CMD##_LAST_FIELD)) != NULL
217 #define BPF_MAP_CREATE_LAST_FIELD inner_map_fd
218 /* called via syscall */
219 static int map_create(union bpf_attr *attr)
224 err = CHECK_ATTR(BPF_MAP_CREATE);
228 /* find map type and init map: hashtable vs rbtree vs bloom vs ... */
229 map = find_and_alloc_map(attr);
233 atomic_set(&map->refcnt, 1);
234 atomic_set(&map->usercnt, 1);
236 err = bpf_map_charge_memlock(map);
238 goto free_map_nouncharge;
240 err = bpf_map_new_fd(map);
242 /* failed to allocate fd */
245 trace_bpf_map_create(map, err);
249 bpf_map_uncharge_memlock(map);
251 map->ops->map_free(map);
255 /* if error is returned, fd is released.
256 * On success caller should complete fd access with matching fdput()
258 struct bpf_map *__bpf_map_get(struct fd f)
261 return ERR_PTR(-EBADF);
262 if (f.file->f_op != &bpf_map_fops) {
264 return ERR_PTR(-EINVAL);
267 return f.file->private_data;
270 /* prog's and map's refcnt limit */
271 #define BPF_MAX_REFCNT 32768
273 struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref)
275 if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) {
276 atomic_dec(&map->refcnt);
277 return ERR_PTR(-EBUSY);
280 atomic_inc(&map->usercnt);
284 struct bpf_map *bpf_map_get_with_uref(u32 ufd)
286 struct fd f = fdget(ufd);
289 map = __bpf_map_get(f);
293 map = bpf_map_inc(map, true);
299 int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
304 /* last field in 'union bpf_attr' used by this command */
305 #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD value
307 static int map_lookup_elem(union bpf_attr *attr)
309 void __user *ukey = u64_to_user_ptr(attr->key);
310 void __user *uvalue = u64_to_user_ptr(attr->value);
311 int ufd = attr->map_fd;
313 void *key, *value, *ptr;
318 if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
322 map = __bpf_map_get(f);
327 key = kmalloc(map->key_size, GFP_USER);
332 if (copy_from_user(key, ukey, map->key_size) != 0)
335 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
336 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
337 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
338 value_size = round_up(map->value_size, 8) * num_possible_cpus();
340 value_size = map->value_size;
343 value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
347 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
348 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
349 err = bpf_percpu_hash_copy(map, key, value);
350 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
351 err = bpf_percpu_array_copy(map, key, value);
352 } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
353 err = bpf_stackmap_copy(map, key, value);
354 } else if (map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
355 map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
359 ptr = map->ops->map_lookup_elem(map, key);
361 memcpy(value, ptr, value_size);
363 err = ptr ? 0 : -ENOENT;
370 if (copy_to_user(uvalue, value, value_size) != 0)
373 trace_bpf_map_lookup_elem(map, ufd, key, value);
385 #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
387 static int map_update_elem(union bpf_attr *attr)
389 void __user *ukey = u64_to_user_ptr(attr->key);
390 void __user *uvalue = u64_to_user_ptr(attr->value);
391 int ufd = attr->map_fd;
398 if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
402 map = __bpf_map_get(f);
407 key = kmalloc(map->key_size, GFP_USER);
412 if (copy_from_user(key, ukey, map->key_size) != 0)
415 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
416 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
417 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
418 value_size = round_up(map->value_size, 8) * num_possible_cpus();
420 value_size = map->value_size;
423 value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
428 if (copy_from_user(value, uvalue, value_size) != 0)
431 /* must increment bpf_prog_active to avoid kprobe+bpf triggering from
432 * inside bpf map update or delete otherwise deadlocks are possible
435 __this_cpu_inc(bpf_prog_active);
436 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
437 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
438 err = bpf_percpu_hash_update(map, key, value, attr->flags);
439 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
440 err = bpf_percpu_array_update(map, key, value, attr->flags);
441 } else if (map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY ||
442 map->map_type == BPF_MAP_TYPE_PROG_ARRAY ||
443 map->map_type == BPF_MAP_TYPE_CGROUP_ARRAY ||
444 map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) {
446 err = bpf_fd_array_map_update_elem(map, f.file, key, value,
449 } else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
451 err = bpf_fd_htab_map_update_elem(map, f.file, key, value,
456 err = map->ops->map_update_elem(map, key, value, attr->flags);
459 __this_cpu_dec(bpf_prog_active);
463 trace_bpf_map_update_elem(map, ufd, key, value);
473 #define BPF_MAP_DELETE_ELEM_LAST_FIELD key
475 static int map_delete_elem(union bpf_attr *attr)
477 void __user *ukey = u64_to_user_ptr(attr->key);
478 int ufd = attr->map_fd;
484 if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
488 map = __bpf_map_get(f);
493 key = kmalloc(map->key_size, GFP_USER);
498 if (copy_from_user(key, ukey, map->key_size) != 0)
502 __this_cpu_inc(bpf_prog_active);
504 err = map->ops->map_delete_elem(map, key);
506 __this_cpu_dec(bpf_prog_active);
510 trace_bpf_map_delete_elem(map, ufd, key);
518 /* last field in 'union bpf_attr' used by this command */
519 #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
521 static int map_get_next_key(union bpf_attr *attr)
523 void __user *ukey = u64_to_user_ptr(attr->key);
524 void __user *unext_key = u64_to_user_ptr(attr->next_key);
525 int ufd = attr->map_fd;
527 void *key, *next_key;
531 if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
535 map = __bpf_map_get(f);
541 key = kmalloc(map->key_size, GFP_USER);
546 if (copy_from_user(key, ukey, map->key_size) != 0)
553 next_key = kmalloc(map->key_size, GFP_USER);
558 err = map->ops->map_get_next_key(map, key, next_key);
564 if (copy_to_user(unext_key, next_key, map->key_size) != 0)
567 trace_bpf_map_next_key(map, ufd, key, next_key);
579 static const struct bpf_verifier_ops * const bpf_prog_types[] = {
580 #define BPF_PROG_TYPE(_id, _ops) \
582 #define BPF_MAP_TYPE(_id, _ops)
583 #include <linux/bpf_types.h>
588 static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
590 if (type >= ARRAY_SIZE(bpf_prog_types) || !bpf_prog_types[type])
593 prog->aux->ops = bpf_prog_types[type];
598 /* drop refcnt on maps used by eBPF program and free auxilary data */
599 static void free_used_maps(struct bpf_prog_aux *aux)
603 for (i = 0; i < aux->used_map_cnt; i++)
604 bpf_map_put(aux->used_maps[i]);
606 kfree(aux->used_maps);
609 int __bpf_prog_charge(struct user_struct *user, u32 pages)
611 unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
612 unsigned long user_bufs;
615 user_bufs = atomic_long_add_return(pages, &user->locked_vm);
616 if (user_bufs > memlock_limit) {
617 atomic_long_sub(pages, &user->locked_vm);
625 void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
628 atomic_long_sub(pages, &user->locked_vm);
631 static int bpf_prog_charge_memlock(struct bpf_prog *prog)
633 struct user_struct *user = get_current_user();
636 ret = __bpf_prog_charge(user, prog->pages);
642 prog->aux->user = user;
646 static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
648 struct user_struct *user = prog->aux->user;
650 __bpf_prog_uncharge(user, prog->pages);
654 static void __bpf_prog_put_rcu(struct rcu_head *rcu)
656 struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
659 bpf_prog_uncharge_memlock(aux->prog);
660 bpf_prog_free(aux->prog);
663 void bpf_prog_put(struct bpf_prog *prog)
665 if (atomic_dec_and_test(&prog->aux->refcnt)) {
666 trace_bpf_prog_put_rcu(prog);
667 bpf_prog_kallsyms_del(prog);
668 call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
671 EXPORT_SYMBOL_GPL(bpf_prog_put);
673 static int bpf_prog_release(struct inode *inode, struct file *filp)
675 struct bpf_prog *prog = filp->private_data;
681 #ifdef CONFIG_PROC_FS
682 static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
684 const struct bpf_prog *prog = filp->private_data;
685 char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
687 bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
696 prog->pages * 1ULL << PAGE_SHIFT);
700 static const struct file_operations bpf_prog_fops = {
701 #ifdef CONFIG_PROC_FS
702 .show_fdinfo = bpf_prog_show_fdinfo,
704 .release = bpf_prog_release,
707 int bpf_prog_new_fd(struct bpf_prog *prog)
709 return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
713 static struct bpf_prog *____bpf_prog_get(struct fd f)
716 return ERR_PTR(-EBADF);
717 if (f.file->f_op != &bpf_prog_fops) {
719 return ERR_PTR(-EINVAL);
722 return f.file->private_data;
725 struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i)
727 if (atomic_add_return(i, &prog->aux->refcnt) > BPF_MAX_REFCNT) {
728 atomic_sub(i, &prog->aux->refcnt);
729 return ERR_PTR(-EBUSY);
733 EXPORT_SYMBOL_GPL(bpf_prog_add);
735 void bpf_prog_sub(struct bpf_prog *prog, int i)
737 /* Only to be used for undoing previous bpf_prog_add() in some
738 * error path. We still know that another entity in our call
739 * path holds a reference to the program, thus atomic_sub() can
740 * be safely used in such cases!
742 WARN_ON(atomic_sub_return(i, &prog->aux->refcnt) == 0);
744 EXPORT_SYMBOL_GPL(bpf_prog_sub);
746 struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
748 return bpf_prog_add(prog, 1);
750 EXPORT_SYMBOL_GPL(bpf_prog_inc);
752 static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *type)
754 struct fd f = fdget(ufd);
755 struct bpf_prog *prog;
757 prog = ____bpf_prog_get(f);
760 if (type && prog->type != *type) {
761 prog = ERR_PTR(-EINVAL);
765 prog = bpf_prog_inc(prog);
771 struct bpf_prog *bpf_prog_get(u32 ufd)
773 return __bpf_prog_get(ufd, NULL);
776 struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type)
778 struct bpf_prog *prog = __bpf_prog_get(ufd, &type);
781 trace_bpf_prog_get_type(prog);
784 EXPORT_SYMBOL_GPL(bpf_prog_get_type);
786 /* last field in 'union bpf_attr' used by this command */
787 #define BPF_PROG_LOAD_LAST_FIELD kern_version
789 static int bpf_prog_load(union bpf_attr *attr)
791 enum bpf_prog_type type = attr->prog_type;
792 struct bpf_prog *prog;
797 if (CHECK_ATTR(BPF_PROG_LOAD))
800 /* copy eBPF program license from user space */
801 if (strncpy_from_user(license, u64_to_user_ptr(attr->license),
802 sizeof(license) - 1) < 0)
804 license[sizeof(license) - 1] = 0;
806 /* eBPF programs must be GPL compatible to use GPL-ed functions */
807 is_gpl = license_is_gpl_compatible(license);
809 if (attr->insn_cnt == 0 || attr->insn_cnt > BPF_MAXINSNS)
812 if (type == BPF_PROG_TYPE_KPROBE &&
813 attr->kern_version != LINUX_VERSION_CODE)
816 if (type != BPF_PROG_TYPE_SOCKET_FILTER && !capable(CAP_SYS_ADMIN))
819 /* plain bpf_prog allocation */
820 prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
824 err = bpf_prog_charge_memlock(prog);
826 goto free_prog_nouncharge;
828 prog->len = attr->insn_cnt;
831 if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns),
832 bpf_prog_insn_size(prog)) != 0)
835 prog->orig_prog = NULL;
838 atomic_set(&prog->aux->refcnt, 1);
839 prog->gpl_compatible = is_gpl ? 1 : 0;
841 /* find program type: socket_filter vs tracing_filter */
842 err = find_prog_type(type, prog);
846 /* run eBPF verifier */
847 err = bpf_check(&prog, attr);
851 /* eBPF program is ready to be JITed */
852 prog = bpf_prog_select_runtime(prog, &err);
856 err = bpf_prog_new_fd(prog);
858 /* failed to allocate fd */
861 bpf_prog_kallsyms_add(prog);
862 trace_bpf_prog_load(prog, err);
866 free_used_maps(prog->aux);
868 bpf_prog_uncharge_memlock(prog);
869 free_prog_nouncharge:
874 #define BPF_OBJ_LAST_FIELD bpf_fd
876 static int bpf_obj_pin(const union bpf_attr *attr)
878 if (CHECK_ATTR(BPF_OBJ))
881 return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname));
884 static int bpf_obj_get(const union bpf_attr *attr)
886 if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0)
889 return bpf_obj_get_user(u64_to_user_ptr(attr->pathname));
892 #ifdef CONFIG_CGROUP_BPF
894 #define BPF_PROG_ATTACH_LAST_FIELD attach_flags
896 static int bpf_prog_attach(const union bpf_attr *attr)
898 enum bpf_prog_type ptype;
899 struct bpf_prog *prog;
903 if (!capable(CAP_NET_ADMIN))
906 if (CHECK_ATTR(BPF_PROG_ATTACH))
909 if (attr->attach_flags & ~BPF_F_ALLOW_OVERRIDE)
912 switch (attr->attach_type) {
913 case BPF_CGROUP_INET_INGRESS:
914 case BPF_CGROUP_INET_EGRESS:
915 ptype = BPF_PROG_TYPE_CGROUP_SKB;
917 case BPF_CGROUP_INET_SOCK_CREATE:
918 ptype = BPF_PROG_TYPE_CGROUP_SOCK;
924 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
926 return PTR_ERR(prog);
928 cgrp = cgroup_get_from_fd(attr->target_fd);
931 return PTR_ERR(cgrp);
934 ret = cgroup_bpf_update(cgrp, prog, attr->attach_type,
935 attr->attach_flags & BPF_F_ALLOW_OVERRIDE);
943 #define BPF_PROG_DETACH_LAST_FIELD attach_type
945 static int bpf_prog_detach(const union bpf_attr *attr)
950 if (!capable(CAP_NET_ADMIN))
953 if (CHECK_ATTR(BPF_PROG_DETACH))
956 switch (attr->attach_type) {
957 case BPF_CGROUP_INET_INGRESS:
958 case BPF_CGROUP_INET_EGRESS:
959 case BPF_CGROUP_INET_SOCK_CREATE:
960 cgrp = cgroup_get_from_fd(attr->target_fd);
962 return PTR_ERR(cgrp);
964 ret = cgroup_bpf_update(cgrp, NULL, attr->attach_type, false);
974 #endif /* CONFIG_CGROUP_BPF */
976 #define BPF_PROG_TEST_RUN_LAST_FIELD test.duration
978 static int bpf_prog_test_run(const union bpf_attr *attr,
979 union bpf_attr __user *uattr)
981 struct bpf_prog *prog;
984 if (CHECK_ATTR(BPF_PROG_TEST_RUN))
987 prog = bpf_prog_get(attr->test.prog_fd);
989 return PTR_ERR(prog);
991 if (prog->aux->ops->test_run)
992 ret = prog->aux->ops->test_run(prog, attr, uattr);
998 SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
1000 union bpf_attr attr = {};
1003 if (!capable(CAP_SYS_ADMIN) && sysctl_unprivileged_bpf_disabled)
1006 if (!access_ok(VERIFY_READ, uattr, 1))
1009 if (size > PAGE_SIZE) /* silly large */
1012 /* If we're handed a bigger struct than we know of,
1013 * ensure all the unknown bits are 0 - i.e. new
1014 * user-space does not rely on any kernel feature
1015 * extensions we dont know about yet.
1017 if (size > sizeof(attr)) {
1018 unsigned char __user *addr;
1019 unsigned char __user *end;
1022 addr = (void __user *)uattr + sizeof(attr);
1023 end = (void __user *)uattr + size;
1025 for (; addr < end; addr++) {
1026 err = get_user(val, addr);
1032 size = sizeof(attr);
1035 /* copy attributes from user space, may be less than sizeof(bpf_attr) */
1036 if (copy_from_user(&attr, uattr, size) != 0)
1040 case BPF_MAP_CREATE:
1041 err = map_create(&attr);
1043 case BPF_MAP_LOOKUP_ELEM:
1044 err = map_lookup_elem(&attr);
1046 case BPF_MAP_UPDATE_ELEM:
1047 err = map_update_elem(&attr);
1049 case BPF_MAP_DELETE_ELEM:
1050 err = map_delete_elem(&attr);
1052 case BPF_MAP_GET_NEXT_KEY:
1053 err = map_get_next_key(&attr);
1056 err = bpf_prog_load(&attr);
1059 err = bpf_obj_pin(&attr);
1062 err = bpf_obj_get(&attr);
1064 #ifdef CONFIG_CGROUP_BPF
1065 case BPF_PROG_ATTACH:
1066 err = bpf_prog_attach(&attr);
1068 case BPF_PROG_DETACH:
1069 err = bpf_prog_detach(&attr);
1072 case BPF_PROG_TEST_RUN:
1073 err = bpf_prog_test_run(&attr, uattr);