1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful, but
8 * WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 * General Public License for more details.
12 #include <linux/bpf.h>
13 #include <linux/bpf_trace.h>
14 #include <linux/syscalls.h>
15 #include <linux/slab.h>
16 #include <linux/sched/signal.h>
17 #include <linux/vmalloc.h>
18 #include <linux/mmzone.h>
19 #include <linux/anon_inodes.h>
20 #include <linux/file.h>
21 #include <linux/license.h>
22 #include <linux/filter.h>
23 #include <linux/version.h>
24 #include <linux/kernel.h>
26 DEFINE_PER_CPU(int, bpf_prog_active);
28 int sysctl_unprivileged_bpf_disabled __read_mostly;
30 static LIST_HEAD(bpf_map_types);
32 static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
34 struct bpf_map_type_list *tl;
37 list_for_each_entry(tl, &bpf_map_types, list_node) {
38 if (tl->type == attr->map_type) {
39 map = tl->ops->map_alloc(attr);
43 map->map_type = attr->map_type;
47 return ERR_PTR(-EINVAL);
50 /* boot time registration of different map implementations */
51 void bpf_register_map_type(struct bpf_map_type_list *tl)
53 list_add(&tl->list_node, &bpf_map_types);
56 void *bpf_map_area_alloc(size_t size)
58 /* We definitely need __GFP_NORETRY, so OOM killer doesn't
59 * trigger under memory pressure as we really just want to
62 const gfp_t flags = __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO;
65 if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
66 area = kmalloc(size, GFP_USER | flags);
71 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | flags,
75 void bpf_map_area_free(void *area)
80 int bpf_map_precharge_memlock(u32 pages)
82 struct user_struct *user = get_current_user();
83 unsigned long memlock_limit, cur;
85 memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
86 cur = atomic_long_read(&user->locked_vm);
88 if (cur + pages > memlock_limit)
93 static int bpf_map_charge_memlock(struct bpf_map *map)
95 struct user_struct *user = get_current_user();
96 unsigned long memlock_limit;
98 memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
100 atomic_long_add(map->pages, &user->locked_vm);
102 if (atomic_long_read(&user->locked_vm) > memlock_limit) {
103 atomic_long_sub(map->pages, &user->locked_vm);
111 static void bpf_map_uncharge_memlock(struct bpf_map *map)
113 struct user_struct *user = map->user;
115 atomic_long_sub(map->pages, &user->locked_vm);
119 /* called from workqueue */
120 static void bpf_map_free_deferred(struct work_struct *work)
122 struct bpf_map *map = container_of(work, struct bpf_map, work);
124 bpf_map_uncharge_memlock(map);
125 /* implementation dependent freeing */
126 map->ops->map_free(map);
129 static void bpf_map_put_uref(struct bpf_map *map)
131 if (atomic_dec_and_test(&map->usercnt)) {
132 if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY)
133 bpf_fd_array_map_clear(map);
137 /* decrement map refcnt and schedule it for freeing via workqueue
138 * (unrelying map implementation ops->map_free() might sleep)
140 void bpf_map_put(struct bpf_map *map)
142 if (atomic_dec_and_test(&map->refcnt)) {
143 INIT_WORK(&map->work, bpf_map_free_deferred);
144 schedule_work(&map->work);
148 void bpf_map_put_with_uref(struct bpf_map *map)
150 bpf_map_put_uref(map);
154 static int bpf_map_release(struct inode *inode, struct file *filp)
156 struct bpf_map *map = filp->private_data;
158 if (map->ops->map_release)
159 map->ops->map_release(map, filp);
161 bpf_map_put_with_uref(map);
165 #ifdef CONFIG_PROC_FS
166 static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
168 const struct bpf_map *map = filp->private_data;
169 const struct bpf_array *array;
170 u32 owner_prog_type = 0;
172 if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) {
173 array = container_of(map, struct bpf_array, map);
174 owner_prog_type = array->owner_prog_type;
189 map->pages * 1ULL << PAGE_SHIFT);
192 seq_printf(m, "owner_prog_type:\t%u\n",
197 static const struct file_operations bpf_map_fops = {
198 #ifdef CONFIG_PROC_FS
199 .show_fdinfo = bpf_map_show_fdinfo,
201 .release = bpf_map_release,
204 int bpf_map_new_fd(struct bpf_map *map)
206 return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
210 /* helper macro to check that unused fields 'union bpf_attr' are zero */
211 #define CHECK_ATTR(CMD) \
212 memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
213 sizeof(attr->CMD##_LAST_FIELD), 0, \
215 offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
216 sizeof(attr->CMD##_LAST_FIELD)) != NULL
218 #define BPF_MAP_CREATE_LAST_FIELD map_flags
219 /* called via syscall */
220 static int map_create(union bpf_attr *attr)
225 err = CHECK_ATTR(BPF_MAP_CREATE);
229 /* find map type and init map: hashtable vs rbtree vs bloom vs ... */
230 map = find_and_alloc_map(attr);
234 atomic_set(&map->refcnt, 1);
235 atomic_set(&map->usercnt, 1);
237 err = bpf_map_charge_memlock(map);
239 goto free_map_nouncharge;
241 err = bpf_map_new_fd(map);
243 /* failed to allocate fd */
246 trace_bpf_map_create(map, err);
250 bpf_map_uncharge_memlock(map);
252 map->ops->map_free(map);
256 /* if error is returned, fd is released.
257 * On success caller should complete fd access with matching fdput()
259 struct bpf_map *__bpf_map_get(struct fd f)
262 return ERR_PTR(-EBADF);
263 if (f.file->f_op != &bpf_map_fops) {
265 return ERR_PTR(-EINVAL);
268 return f.file->private_data;
271 /* prog's and map's refcnt limit */
272 #define BPF_MAX_REFCNT 32768
274 struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref)
276 if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) {
277 atomic_dec(&map->refcnt);
278 return ERR_PTR(-EBUSY);
281 atomic_inc(&map->usercnt);
285 struct bpf_map *bpf_map_get_with_uref(u32 ufd)
287 struct fd f = fdget(ufd);
290 map = __bpf_map_get(f);
294 map = bpf_map_inc(map, true);
300 int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
305 /* last field in 'union bpf_attr' used by this command */
306 #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD value
308 static int map_lookup_elem(union bpf_attr *attr)
310 void __user *ukey = u64_to_user_ptr(attr->key);
311 void __user *uvalue = u64_to_user_ptr(attr->value);
312 int ufd = attr->map_fd;
314 void *key, *value, *ptr;
319 if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
323 map = __bpf_map_get(f);
328 key = kmalloc(map->key_size, GFP_USER);
333 if (copy_from_user(key, ukey, map->key_size) != 0)
336 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
337 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
338 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
339 value_size = round_up(map->value_size, 8) * num_possible_cpus();
341 value_size = map->value_size;
344 value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
348 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
349 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
350 err = bpf_percpu_hash_copy(map, key, value);
351 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
352 err = bpf_percpu_array_copy(map, key, value);
353 } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
354 err = bpf_stackmap_copy(map, key, value);
357 ptr = map->ops->map_lookup_elem(map, key);
359 memcpy(value, ptr, value_size);
361 err = ptr ? 0 : -ENOENT;
368 if (copy_to_user(uvalue, value, value_size) != 0)
371 trace_bpf_map_lookup_elem(map, ufd, key, value);
383 #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
385 static int map_update_elem(union bpf_attr *attr)
387 void __user *ukey = u64_to_user_ptr(attr->key);
388 void __user *uvalue = u64_to_user_ptr(attr->value);
389 int ufd = attr->map_fd;
396 if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
400 map = __bpf_map_get(f);
405 key = kmalloc(map->key_size, GFP_USER);
410 if (copy_from_user(key, ukey, map->key_size) != 0)
413 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
414 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
415 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
416 value_size = round_up(map->value_size, 8) * num_possible_cpus();
418 value_size = map->value_size;
421 value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
426 if (copy_from_user(value, uvalue, value_size) != 0)
429 /* must increment bpf_prog_active to avoid kprobe+bpf triggering from
430 * inside bpf map update or delete otherwise deadlocks are possible
433 __this_cpu_inc(bpf_prog_active);
434 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
435 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
436 err = bpf_percpu_hash_update(map, key, value, attr->flags);
437 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
438 err = bpf_percpu_array_update(map, key, value, attr->flags);
439 } else if (map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY ||
440 map->map_type == BPF_MAP_TYPE_PROG_ARRAY ||
441 map->map_type == BPF_MAP_TYPE_CGROUP_ARRAY) {
443 err = bpf_fd_array_map_update_elem(map, f.file, key, value,
448 err = map->ops->map_update_elem(map, key, value, attr->flags);
451 __this_cpu_dec(bpf_prog_active);
455 trace_bpf_map_update_elem(map, ufd, key, value);
465 #define BPF_MAP_DELETE_ELEM_LAST_FIELD key
467 static int map_delete_elem(union bpf_attr *attr)
469 void __user *ukey = u64_to_user_ptr(attr->key);
470 int ufd = attr->map_fd;
476 if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
480 map = __bpf_map_get(f);
485 key = kmalloc(map->key_size, GFP_USER);
490 if (copy_from_user(key, ukey, map->key_size) != 0)
494 __this_cpu_inc(bpf_prog_active);
496 err = map->ops->map_delete_elem(map, key);
498 __this_cpu_dec(bpf_prog_active);
502 trace_bpf_map_delete_elem(map, ufd, key);
510 /* last field in 'union bpf_attr' used by this command */
511 #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
513 static int map_get_next_key(union bpf_attr *attr)
515 void __user *ukey = u64_to_user_ptr(attr->key);
516 void __user *unext_key = u64_to_user_ptr(attr->next_key);
517 int ufd = attr->map_fd;
519 void *key, *next_key;
523 if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
527 map = __bpf_map_get(f);
532 key = kmalloc(map->key_size, GFP_USER);
537 if (copy_from_user(key, ukey, map->key_size) != 0)
541 next_key = kmalloc(map->key_size, GFP_USER);
546 err = map->ops->map_get_next_key(map, key, next_key);
552 if (copy_to_user(unext_key, next_key, map->key_size) != 0)
555 trace_bpf_map_next_key(map, ufd, key, next_key);
567 static LIST_HEAD(bpf_prog_types);
569 static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
571 struct bpf_prog_type_list *tl;
573 list_for_each_entry(tl, &bpf_prog_types, list_node) {
574 if (tl->type == type) {
575 prog->aux->ops = tl->ops;
584 void bpf_register_prog_type(struct bpf_prog_type_list *tl)
586 list_add(&tl->list_node, &bpf_prog_types);
589 /* fixup insn->imm field of bpf_call instructions:
590 * if (insn->imm == BPF_FUNC_map_lookup_elem)
591 * insn->imm = bpf_map_lookup_elem - __bpf_call_base;
592 * else if (insn->imm == BPF_FUNC_map_update_elem)
593 * insn->imm = bpf_map_update_elem - __bpf_call_base;
596 * this function is called after eBPF program passed verification
598 static void fixup_bpf_calls(struct bpf_prog *prog)
600 const struct bpf_func_proto *fn;
603 for (i = 0; i < prog->len; i++) {
604 struct bpf_insn *insn = &prog->insnsi[i];
606 if (insn->code == (BPF_JMP | BPF_CALL)) {
607 /* we reach here when program has bpf_call instructions
608 * and it passed bpf_check(), means that
609 * ops->get_func_proto must have been supplied, check it
611 BUG_ON(!prog->aux->ops->get_func_proto);
613 if (insn->imm == BPF_FUNC_get_route_realm)
614 prog->dst_needed = 1;
615 if (insn->imm == BPF_FUNC_get_prandom_u32)
616 bpf_user_rnd_init_once();
617 if (insn->imm == BPF_FUNC_xdp_adjust_head)
618 prog->xdp_adjust_head = 1;
619 if (insn->imm == BPF_FUNC_tail_call) {
620 /* mark bpf_tail_call as different opcode
621 * to avoid conditional branch in
622 * interpeter for every normal call
623 * and to prevent accidental JITing by
624 * JIT compiler that doesn't support
632 fn = prog->aux->ops->get_func_proto(insn->imm);
633 /* all functions that have prototype and verifier allowed
634 * programs to call them, must be real in-kernel functions
637 insn->imm = fn->func - __bpf_call_base;
642 /* drop refcnt on maps used by eBPF program and free auxilary data */
643 static void free_used_maps(struct bpf_prog_aux *aux)
647 for (i = 0; i < aux->used_map_cnt; i++)
648 bpf_map_put(aux->used_maps[i]);
650 kfree(aux->used_maps);
653 int __bpf_prog_charge(struct user_struct *user, u32 pages)
655 unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
656 unsigned long user_bufs;
659 user_bufs = atomic_long_add_return(pages, &user->locked_vm);
660 if (user_bufs > memlock_limit) {
661 atomic_long_sub(pages, &user->locked_vm);
669 void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
672 atomic_long_sub(pages, &user->locked_vm);
675 static int bpf_prog_charge_memlock(struct bpf_prog *prog)
677 struct user_struct *user = get_current_user();
680 ret = __bpf_prog_charge(user, prog->pages);
686 prog->aux->user = user;
690 static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
692 struct user_struct *user = prog->aux->user;
694 __bpf_prog_uncharge(user, prog->pages);
698 static void __bpf_prog_put_rcu(struct rcu_head *rcu)
700 struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
703 bpf_prog_uncharge_memlock(aux->prog);
704 bpf_prog_free(aux->prog);
707 void bpf_prog_put(struct bpf_prog *prog)
709 if (atomic_dec_and_test(&prog->aux->refcnt)) {
710 trace_bpf_prog_put_rcu(prog);
711 bpf_prog_kallsyms_del(prog);
712 call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
715 EXPORT_SYMBOL_GPL(bpf_prog_put);
717 static int bpf_prog_release(struct inode *inode, struct file *filp)
719 struct bpf_prog *prog = filp->private_data;
725 #ifdef CONFIG_PROC_FS
726 static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
728 const struct bpf_prog *prog = filp->private_data;
729 char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
731 bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
740 prog->pages * 1ULL << PAGE_SHIFT);
744 static const struct file_operations bpf_prog_fops = {
745 #ifdef CONFIG_PROC_FS
746 .show_fdinfo = bpf_prog_show_fdinfo,
748 .release = bpf_prog_release,
751 int bpf_prog_new_fd(struct bpf_prog *prog)
753 return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
757 static struct bpf_prog *____bpf_prog_get(struct fd f)
760 return ERR_PTR(-EBADF);
761 if (f.file->f_op != &bpf_prog_fops) {
763 return ERR_PTR(-EINVAL);
766 return f.file->private_data;
769 struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i)
771 if (atomic_add_return(i, &prog->aux->refcnt) > BPF_MAX_REFCNT) {
772 atomic_sub(i, &prog->aux->refcnt);
773 return ERR_PTR(-EBUSY);
777 EXPORT_SYMBOL_GPL(bpf_prog_add);
779 void bpf_prog_sub(struct bpf_prog *prog, int i)
781 /* Only to be used for undoing previous bpf_prog_add() in some
782 * error path. We still know that another entity in our call
783 * path holds a reference to the program, thus atomic_sub() can
784 * be safely used in such cases!
786 WARN_ON(atomic_sub_return(i, &prog->aux->refcnt) == 0);
788 EXPORT_SYMBOL_GPL(bpf_prog_sub);
790 struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
792 return bpf_prog_add(prog, 1);
794 EXPORT_SYMBOL_GPL(bpf_prog_inc);
796 static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *type)
798 struct fd f = fdget(ufd);
799 struct bpf_prog *prog;
801 prog = ____bpf_prog_get(f);
804 if (type && prog->type != *type) {
805 prog = ERR_PTR(-EINVAL);
809 prog = bpf_prog_inc(prog);
815 struct bpf_prog *bpf_prog_get(u32 ufd)
817 return __bpf_prog_get(ufd, NULL);
820 struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type)
822 struct bpf_prog *prog = __bpf_prog_get(ufd, &type);
825 trace_bpf_prog_get_type(prog);
828 EXPORT_SYMBOL_GPL(bpf_prog_get_type);
830 /* last field in 'union bpf_attr' used by this command */
831 #define BPF_PROG_LOAD_LAST_FIELD kern_version
833 static int bpf_prog_load(union bpf_attr *attr)
835 enum bpf_prog_type type = attr->prog_type;
836 struct bpf_prog *prog;
841 if (CHECK_ATTR(BPF_PROG_LOAD))
844 /* copy eBPF program license from user space */
845 if (strncpy_from_user(license, u64_to_user_ptr(attr->license),
846 sizeof(license) - 1) < 0)
848 license[sizeof(license) - 1] = 0;
850 /* eBPF programs must be GPL compatible to use GPL-ed functions */
851 is_gpl = license_is_gpl_compatible(license);
853 if (attr->insn_cnt == 0 || attr->insn_cnt > BPF_MAXINSNS)
856 if (type == BPF_PROG_TYPE_KPROBE &&
857 attr->kern_version != LINUX_VERSION_CODE)
860 if (type != BPF_PROG_TYPE_SOCKET_FILTER && !capable(CAP_SYS_ADMIN))
863 /* plain bpf_prog allocation */
864 prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
868 err = bpf_prog_charge_memlock(prog);
870 goto free_prog_nouncharge;
872 prog->len = attr->insn_cnt;
875 if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns),
876 bpf_prog_insn_size(prog)) != 0)
879 prog->orig_prog = NULL;
882 atomic_set(&prog->aux->refcnt, 1);
883 prog->gpl_compatible = is_gpl ? 1 : 0;
885 /* find program type: socket_filter vs tracing_filter */
886 err = find_prog_type(type, prog);
890 /* run eBPF verifier */
891 err = bpf_check(&prog, attr);
895 /* fixup BPF_CALL->imm field */
896 fixup_bpf_calls(prog);
898 /* eBPF program is ready to be JITed */
899 prog = bpf_prog_select_runtime(prog, &err);
903 err = bpf_prog_new_fd(prog);
905 /* failed to allocate fd */
908 bpf_prog_kallsyms_add(prog);
909 trace_bpf_prog_load(prog, err);
913 free_used_maps(prog->aux);
915 bpf_prog_uncharge_memlock(prog);
916 free_prog_nouncharge:
921 #define BPF_OBJ_LAST_FIELD bpf_fd
923 static int bpf_obj_pin(const union bpf_attr *attr)
925 if (CHECK_ATTR(BPF_OBJ))
928 return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname));
931 static int bpf_obj_get(const union bpf_attr *attr)
933 if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0)
936 return bpf_obj_get_user(u64_to_user_ptr(attr->pathname));
939 #ifdef CONFIG_CGROUP_BPF
941 #define BPF_PROG_ATTACH_LAST_FIELD attach_flags
943 static int bpf_prog_attach(const union bpf_attr *attr)
945 enum bpf_prog_type ptype;
946 struct bpf_prog *prog;
950 if (!capable(CAP_NET_ADMIN))
953 if (CHECK_ATTR(BPF_PROG_ATTACH))
956 if (attr->attach_flags & ~BPF_F_ALLOW_OVERRIDE)
959 switch (attr->attach_type) {
960 case BPF_CGROUP_INET_INGRESS:
961 case BPF_CGROUP_INET_EGRESS:
962 ptype = BPF_PROG_TYPE_CGROUP_SKB;
964 case BPF_CGROUP_INET_SOCK_CREATE:
965 ptype = BPF_PROG_TYPE_CGROUP_SOCK;
971 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
973 return PTR_ERR(prog);
975 cgrp = cgroup_get_from_fd(attr->target_fd);
978 return PTR_ERR(cgrp);
981 ret = cgroup_bpf_update(cgrp, prog, attr->attach_type,
982 attr->attach_flags & BPF_F_ALLOW_OVERRIDE);
990 #define BPF_PROG_DETACH_LAST_FIELD attach_type
992 static int bpf_prog_detach(const union bpf_attr *attr)
997 if (!capable(CAP_NET_ADMIN))
1000 if (CHECK_ATTR(BPF_PROG_DETACH))
1003 switch (attr->attach_type) {
1004 case BPF_CGROUP_INET_INGRESS:
1005 case BPF_CGROUP_INET_EGRESS:
1006 case BPF_CGROUP_INET_SOCK_CREATE:
1007 cgrp = cgroup_get_from_fd(attr->target_fd);
1009 return PTR_ERR(cgrp);
1011 ret = cgroup_bpf_update(cgrp, NULL, attr->attach_type, false);
1021 #endif /* CONFIG_CGROUP_BPF */
1023 SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
1025 union bpf_attr attr = {};
1028 if (!capable(CAP_SYS_ADMIN) && sysctl_unprivileged_bpf_disabled)
1031 if (!access_ok(VERIFY_READ, uattr, 1))
1034 if (size > PAGE_SIZE) /* silly large */
1037 /* If we're handed a bigger struct than we know of,
1038 * ensure all the unknown bits are 0 - i.e. new
1039 * user-space does not rely on any kernel feature
1040 * extensions we dont know about yet.
1042 if (size > sizeof(attr)) {
1043 unsigned char __user *addr;
1044 unsigned char __user *end;
1047 addr = (void __user *)uattr + sizeof(attr);
1048 end = (void __user *)uattr + size;
1050 for (; addr < end; addr++) {
1051 err = get_user(val, addr);
1057 size = sizeof(attr);
1060 /* copy attributes from user space, may be less than sizeof(bpf_attr) */
1061 if (copy_from_user(&attr, uattr, size) != 0)
1065 case BPF_MAP_CREATE:
1066 err = map_create(&attr);
1068 case BPF_MAP_LOOKUP_ELEM:
1069 err = map_lookup_elem(&attr);
1071 case BPF_MAP_UPDATE_ELEM:
1072 err = map_update_elem(&attr);
1074 case BPF_MAP_DELETE_ELEM:
1075 err = map_delete_elem(&attr);
1077 case BPF_MAP_GET_NEXT_KEY:
1078 err = map_get_next_key(&attr);
1081 err = bpf_prog_load(&attr);
1084 err = bpf_obj_pin(&attr);
1087 err = bpf_obj_get(&attr);
1090 #ifdef CONFIG_CGROUP_BPF
1091 case BPF_PROG_ATTACH:
1092 err = bpf_prog_attach(&attr);
1094 case BPF_PROG_DETACH:
1095 err = bpf_prog_detach(&attr);