1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful, but
8 * WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 * General Public License for more details.
12 #include <linux/bpf.h>
13 #include <linux/bpf_trace.h>
14 #include <linux/syscalls.h>
15 #include <linux/slab.h>
16 #include <linux/sched/signal.h>
17 #include <linux/vmalloc.h>
18 #include <linux/mmzone.h>
19 #include <linux/anon_inodes.h>
20 #include <linux/file.h>
21 #include <linux/license.h>
22 #include <linux/filter.h>
23 #include <linux/version.h>
24 #include <linux/kernel.h>
26 DEFINE_PER_CPU(int, bpf_prog_active);
28 int sysctl_unprivileged_bpf_disabled __read_mostly;
30 static const struct bpf_map_ops * const bpf_map_types[] = {
31 #define BPF_PROG_TYPE(_id, _ops)
32 #define BPF_MAP_TYPE(_id, _ops) \
34 #include <linux/bpf_types.h>
39 static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
43 if (attr->map_type >= ARRAY_SIZE(bpf_map_types) ||
44 !bpf_map_types[attr->map_type])
45 return ERR_PTR(-EINVAL);
47 map = bpf_map_types[attr->map_type]->map_alloc(attr);
50 map->ops = bpf_map_types[attr->map_type];
51 map->map_type = attr->map_type;
55 void *bpf_map_area_alloc(size_t size)
57 /* We definitely need __GFP_NORETRY, so OOM killer doesn't
58 * trigger under memory pressure as we really just want to
61 const gfp_t flags = __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO;
64 if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
65 area = kmalloc(size, GFP_USER | flags);
70 return __vmalloc(size, GFP_KERNEL | flags, PAGE_KERNEL);
73 void bpf_map_area_free(void *area)
78 int bpf_map_precharge_memlock(u32 pages)
80 struct user_struct *user = get_current_user();
81 unsigned long memlock_limit, cur;
83 memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
84 cur = atomic_long_read(&user->locked_vm);
86 if (cur + pages > memlock_limit)
91 static int bpf_map_charge_memlock(struct bpf_map *map)
93 struct user_struct *user = get_current_user();
94 unsigned long memlock_limit;
96 memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
98 atomic_long_add(map->pages, &user->locked_vm);
100 if (atomic_long_read(&user->locked_vm) > memlock_limit) {
101 atomic_long_sub(map->pages, &user->locked_vm);
109 static void bpf_map_uncharge_memlock(struct bpf_map *map)
111 struct user_struct *user = map->user;
113 atomic_long_sub(map->pages, &user->locked_vm);
117 /* called from workqueue */
118 static void bpf_map_free_deferred(struct work_struct *work)
120 struct bpf_map *map = container_of(work, struct bpf_map, work);
122 bpf_map_uncharge_memlock(map);
123 /* implementation dependent freeing */
124 map->ops->map_free(map);
127 static void bpf_map_put_uref(struct bpf_map *map)
129 if (atomic_dec_and_test(&map->usercnt)) {
130 if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY)
131 bpf_fd_array_map_clear(map);
135 /* decrement map refcnt and schedule it for freeing via workqueue
136 * (unrelying map implementation ops->map_free() might sleep)
138 void bpf_map_put(struct bpf_map *map)
140 if (atomic_dec_and_test(&map->refcnt)) {
141 INIT_WORK(&map->work, bpf_map_free_deferred);
142 schedule_work(&map->work);
146 void bpf_map_put_with_uref(struct bpf_map *map)
148 bpf_map_put_uref(map);
152 static int bpf_map_release(struct inode *inode, struct file *filp)
154 struct bpf_map *map = filp->private_data;
156 if (map->ops->map_release)
157 map->ops->map_release(map, filp);
159 bpf_map_put_with_uref(map);
163 #ifdef CONFIG_PROC_FS
164 static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
166 const struct bpf_map *map = filp->private_data;
167 const struct bpf_array *array;
168 u32 owner_prog_type = 0;
170 if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) {
171 array = container_of(map, struct bpf_array, map);
172 owner_prog_type = array->owner_prog_type;
187 map->pages * 1ULL << PAGE_SHIFT);
190 seq_printf(m, "owner_prog_type:\t%u\n",
195 static const struct file_operations bpf_map_fops = {
196 #ifdef CONFIG_PROC_FS
197 .show_fdinfo = bpf_map_show_fdinfo,
199 .release = bpf_map_release,
202 int bpf_map_new_fd(struct bpf_map *map)
204 return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
208 /* helper macro to check that unused fields 'union bpf_attr' are zero */
209 #define CHECK_ATTR(CMD) \
210 memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
211 sizeof(attr->CMD##_LAST_FIELD), 0, \
213 offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
214 sizeof(attr->CMD##_LAST_FIELD)) != NULL
216 #define BPF_MAP_CREATE_LAST_FIELD inner_map_fd
217 /* called via syscall */
218 static int map_create(union bpf_attr *attr)
223 err = CHECK_ATTR(BPF_MAP_CREATE);
227 /* find map type and init map: hashtable vs rbtree vs bloom vs ... */
228 map = find_and_alloc_map(attr);
232 atomic_set(&map->refcnt, 1);
233 atomic_set(&map->usercnt, 1);
235 err = bpf_map_charge_memlock(map);
237 goto free_map_nouncharge;
239 err = bpf_map_new_fd(map);
241 /* failed to allocate fd */
244 trace_bpf_map_create(map, err);
248 bpf_map_uncharge_memlock(map);
250 map->ops->map_free(map);
254 /* if error is returned, fd is released.
255 * On success caller should complete fd access with matching fdput()
257 struct bpf_map *__bpf_map_get(struct fd f)
260 return ERR_PTR(-EBADF);
261 if (f.file->f_op != &bpf_map_fops) {
263 return ERR_PTR(-EINVAL);
266 return f.file->private_data;
269 /* prog's and map's refcnt limit */
270 #define BPF_MAX_REFCNT 32768
272 struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref)
274 if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) {
275 atomic_dec(&map->refcnt);
276 return ERR_PTR(-EBUSY);
279 atomic_inc(&map->usercnt);
283 struct bpf_map *bpf_map_get_with_uref(u32 ufd)
285 struct fd f = fdget(ufd);
288 map = __bpf_map_get(f);
292 map = bpf_map_inc(map, true);
298 int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
303 /* last field in 'union bpf_attr' used by this command */
304 #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD value
306 static int map_lookup_elem(union bpf_attr *attr)
308 void __user *ukey = u64_to_user_ptr(attr->key);
309 void __user *uvalue = u64_to_user_ptr(attr->value);
310 int ufd = attr->map_fd;
312 void *key, *value, *ptr;
317 if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
321 map = __bpf_map_get(f);
326 key = kmalloc(map->key_size, GFP_USER);
331 if (copy_from_user(key, ukey, map->key_size) != 0)
334 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
335 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
336 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
337 value_size = round_up(map->value_size, 8) * num_possible_cpus();
339 value_size = map->value_size;
342 value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
346 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
347 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
348 err = bpf_percpu_hash_copy(map, key, value);
349 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
350 err = bpf_percpu_array_copy(map, key, value);
351 } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
352 err = bpf_stackmap_copy(map, key, value);
353 } else if (map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
354 map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
358 ptr = map->ops->map_lookup_elem(map, key);
360 memcpy(value, ptr, value_size);
362 err = ptr ? 0 : -ENOENT;
369 if (copy_to_user(uvalue, value, value_size) != 0)
372 trace_bpf_map_lookup_elem(map, ufd, key, value);
384 #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
386 static int map_update_elem(union bpf_attr *attr)
388 void __user *ukey = u64_to_user_ptr(attr->key);
389 void __user *uvalue = u64_to_user_ptr(attr->value);
390 int ufd = attr->map_fd;
397 if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
401 map = __bpf_map_get(f);
406 key = kmalloc(map->key_size, GFP_USER);
411 if (copy_from_user(key, ukey, map->key_size) != 0)
414 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
415 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
416 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
417 value_size = round_up(map->value_size, 8) * num_possible_cpus();
419 value_size = map->value_size;
422 value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
427 if (copy_from_user(value, uvalue, value_size) != 0)
430 /* must increment bpf_prog_active to avoid kprobe+bpf triggering from
431 * inside bpf map update or delete otherwise deadlocks are possible
434 __this_cpu_inc(bpf_prog_active);
435 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
436 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
437 err = bpf_percpu_hash_update(map, key, value, attr->flags);
438 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
439 err = bpf_percpu_array_update(map, key, value, attr->flags);
440 } else if (map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY ||
441 map->map_type == BPF_MAP_TYPE_PROG_ARRAY ||
442 map->map_type == BPF_MAP_TYPE_CGROUP_ARRAY ||
443 map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) {
445 err = bpf_fd_array_map_update_elem(map, f.file, key, value,
448 } else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
450 err = bpf_fd_htab_map_update_elem(map, f.file, key, value,
455 err = map->ops->map_update_elem(map, key, value, attr->flags);
458 __this_cpu_dec(bpf_prog_active);
462 trace_bpf_map_update_elem(map, ufd, key, value);
472 #define BPF_MAP_DELETE_ELEM_LAST_FIELD key
474 static int map_delete_elem(union bpf_attr *attr)
476 void __user *ukey = u64_to_user_ptr(attr->key);
477 int ufd = attr->map_fd;
483 if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
487 map = __bpf_map_get(f);
492 key = kmalloc(map->key_size, GFP_USER);
497 if (copy_from_user(key, ukey, map->key_size) != 0)
501 __this_cpu_inc(bpf_prog_active);
503 err = map->ops->map_delete_elem(map, key);
505 __this_cpu_dec(bpf_prog_active);
509 trace_bpf_map_delete_elem(map, ufd, key);
517 /* last field in 'union bpf_attr' used by this command */
518 #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
520 static int map_get_next_key(union bpf_attr *attr)
522 void __user *ukey = u64_to_user_ptr(attr->key);
523 void __user *unext_key = u64_to_user_ptr(attr->next_key);
524 int ufd = attr->map_fd;
526 void *key, *next_key;
530 if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
534 map = __bpf_map_get(f);
540 key = kmalloc(map->key_size, GFP_USER);
545 if (copy_from_user(key, ukey, map->key_size) != 0)
552 next_key = kmalloc(map->key_size, GFP_USER);
557 err = map->ops->map_get_next_key(map, key, next_key);
563 if (copy_to_user(unext_key, next_key, map->key_size) != 0)
566 trace_bpf_map_next_key(map, ufd, key, next_key);
578 static const struct bpf_verifier_ops * const bpf_prog_types[] = {
579 #define BPF_PROG_TYPE(_id, _ops) \
581 #define BPF_MAP_TYPE(_id, _ops)
582 #include <linux/bpf_types.h>
587 static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
589 if (type >= ARRAY_SIZE(bpf_prog_types) || !bpf_prog_types[type])
592 prog->aux->ops = bpf_prog_types[type];
597 /* drop refcnt on maps used by eBPF program and free auxilary data */
598 static void free_used_maps(struct bpf_prog_aux *aux)
602 for (i = 0; i < aux->used_map_cnt; i++)
603 bpf_map_put(aux->used_maps[i]);
605 kfree(aux->used_maps);
608 int __bpf_prog_charge(struct user_struct *user, u32 pages)
610 unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
611 unsigned long user_bufs;
614 user_bufs = atomic_long_add_return(pages, &user->locked_vm);
615 if (user_bufs > memlock_limit) {
616 atomic_long_sub(pages, &user->locked_vm);
624 void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
627 atomic_long_sub(pages, &user->locked_vm);
630 static int bpf_prog_charge_memlock(struct bpf_prog *prog)
632 struct user_struct *user = get_current_user();
635 ret = __bpf_prog_charge(user, prog->pages);
641 prog->aux->user = user;
645 static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
647 struct user_struct *user = prog->aux->user;
649 __bpf_prog_uncharge(user, prog->pages);
653 static void __bpf_prog_put_rcu(struct rcu_head *rcu)
655 struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
658 bpf_prog_uncharge_memlock(aux->prog);
659 bpf_prog_free(aux->prog);
662 void bpf_prog_put(struct bpf_prog *prog)
664 if (atomic_dec_and_test(&prog->aux->refcnt)) {
665 trace_bpf_prog_put_rcu(prog);
666 bpf_prog_kallsyms_del(prog);
667 call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
670 EXPORT_SYMBOL_GPL(bpf_prog_put);
672 static int bpf_prog_release(struct inode *inode, struct file *filp)
674 struct bpf_prog *prog = filp->private_data;
680 #ifdef CONFIG_PROC_FS
681 static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
683 const struct bpf_prog *prog = filp->private_data;
684 char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
686 bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
695 prog->pages * 1ULL << PAGE_SHIFT);
699 static const struct file_operations bpf_prog_fops = {
700 #ifdef CONFIG_PROC_FS
701 .show_fdinfo = bpf_prog_show_fdinfo,
703 .release = bpf_prog_release,
706 int bpf_prog_new_fd(struct bpf_prog *prog)
708 return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
712 static struct bpf_prog *____bpf_prog_get(struct fd f)
715 return ERR_PTR(-EBADF);
716 if (f.file->f_op != &bpf_prog_fops) {
718 return ERR_PTR(-EINVAL);
721 return f.file->private_data;
724 struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i)
726 if (atomic_add_return(i, &prog->aux->refcnt) > BPF_MAX_REFCNT) {
727 atomic_sub(i, &prog->aux->refcnt);
728 return ERR_PTR(-EBUSY);
732 EXPORT_SYMBOL_GPL(bpf_prog_add);
734 void bpf_prog_sub(struct bpf_prog *prog, int i)
736 /* Only to be used for undoing previous bpf_prog_add() in some
737 * error path. We still know that another entity in our call
738 * path holds a reference to the program, thus atomic_sub() can
739 * be safely used in such cases!
741 WARN_ON(atomic_sub_return(i, &prog->aux->refcnt) == 0);
743 EXPORT_SYMBOL_GPL(bpf_prog_sub);
745 struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
747 return bpf_prog_add(prog, 1);
749 EXPORT_SYMBOL_GPL(bpf_prog_inc);
751 static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *type)
753 struct fd f = fdget(ufd);
754 struct bpf_prog *prog;
756 prog = ____bpf_prog_get(f);
759 if (type && prog->type != *type) {
760 prog = ERR_PTR(-EINVAL);
764 prog = bpf_prog_inc(prog);
770 struct bpf_prog *bpf_prog_get(u32 ufd)
772 return __bpf_prog_get(ufd, NULL);
775 struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type)
777 struct bpf_prog *prog = __bpf_prog_get(ufd, &type);
780 trace_bpf_prog_get_type(prog);
783 EXPORT_SYMBOL_GPL(bpf_prog_get_type);
785 /* last field in 'union bpf_attr' used by this command */
786 #define BPF_PROG_LOAD_LAST_FIELD kern_version
788 static int bpf_prog_load(union bpf_attr *attr)
790 enum bpf_prog_type type = attr->prog_type;
791 struct bpf_prog *prog;
796 if (CHECK_ATTR(BPF_PROG_LOAD))
799 /* copy eBPF program license from user space */
800 if (strncpy_from_user(license, u64_to_user_ptr(attr->license),
801 sizeof(license) - 1) < 0)
803 license[sizeof(license) - 1] = 0;
805 /* eBPF programs must be GPL compatible to use GPL-ed functions */
806 is_gpl = license_is_gpl_compatible(license);
808 if (attr->insn_cnt == 0 || attr->insn_cnt > BPF_MAXINSNS)
811 if (type == BPF_PROG_TYPE_KPROBE &&
812 attr->kern_version != LINUX_VERSION_CODE)
815 if (type != BPF_PROG_TYPE_SOCKET_FILTER && !capable(CAP_SYS_ADMIN))
818 /* plain bpf_prog allocation */
819 prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
823 err = bpf_prog_charge_memlock(prog);
825 goto free_prog_nouncharge;
827 prog->len = attr->insn_cnt;
830 if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns),
831 bpf_prog_insn_size(prog)) != 0)
834 prog->orig_prog = NULL;
837 atomic_set(&prog->aux->refcnt, 1);
838 prog->gpl_compatible = is_gpl ? 1 : 0;
840 /* find program type: socket_filter vs tracing_filter */
841 err = find_prog_type(type, prog);
845 /* run eBPF verifier */
846 err = bpf_check(&prog, attr);
850 /* eBPF program is ready to be JITed */
851 prog = bpf_prog_select_runtime(prog, &err);
855 err = bpf_prog_new_fd(prog);
857 /* failed to allocate fd */
860 bpf_prog_kallsyms_add(prog);
861 trace_bpf_prog_load(prog, err);
865 free_used_maps(prog->aux);
867 bpf_prog_uncharge_memlock(prog);
868 free_prog_nouncharge:
873 #define BPF_OBJ_LAST_FIELD bpf_fd
875 static int bpf_obj_pin(const union bpf_attr *attr)
877 if (CHECK_ATTR(BPF_OBJ))
880 return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname));
883 static int bpf_obj_get(const union bpf_attr *attr)
885 if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0)
888 return bpf_obj_get_user(u64_to_user_ptr(attr->pathname));
891 #ifdef CONFIG_CGROUP_BPF
893 #define BPF_PROG_ATTACH_LAST_FIELD attach_flags
895 static int bpf_prog_attach(const union bpf_attr *attr)
897 enum bpf_prog_type ptype;
898 struct bpf_prog *prog;
902 if (!capable(CAP_NET_ADMIN))
905 if (CHECK_ATTR(BPF_PROG_ATTACH))
908 if (attr->attach_flags & ~BPF_F_ALLOW_OVERRIDE)
911 switch (attr->attach_type) {
912 case BPF_CGROUP_INET_INGRESS:
913 case BPF_CGROUP_INET_EGRESS:
914 ptype = BPF_PROG_TYPE_CGROUP_SKB;
916 case BPF_CGROUP_INET_SOCK_CREATE:
917 ptype = BPF_PROG_TYPE_CGROUP_SOCK;
923 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
925 return PTR_ERR(prog);
927 cgrp = cgroup_get_from_fd(attr->target_fd);
930 return PTR_ERR(cgrp);
933 ret = cgroup_bpf_update(cgrp, prog, attr->attach_type,
934 attr->attach_flags & BPF_F_ALLOW_OVERRIDE);
942 #define BPF_PROG_DETACH_LAST_FIELD attach_type
944 static int bpf_prog_detach(const union bpf_attr *attr)
949 if (!capable(CAP_NET_ADMIN))
952 if (CHECK_ATTR(BPF_PROG_DETACH))
955 switch (attr->attach_type) {
956 case BPF_CGROUP_INET_INGRESS:
957 case BPF_CGROUP_INET_EGRESS:
958 case BPF_CGROUP_INET_SOCK_CREATE:
959 cgrp = cgroup_get_from_fd(attr->target_fd);
961 return PTR_ERR(cgrp);
963 ret = cgroup_bpf_update(cgrp, NULL, attr->attach_type, false);
973 #endif /* CONFIG_CGROUP_BPF */
975 #define BPF_PROG_TEST_RUN_LAST_FIELD test.duration
977 static int bpf_prog_test_run(const union bpf_attr *attr,
978 union bpf_attr __user *uattr)
980 struct bpf_prog *prog;
983 if (CHECK_ATTR(BPF_PROG_TEST_RUN))
986 prog = bpf_prog_get(attr->test.prog_fd);
988 return PTR_ERR(prog);
990 if (prog->aux->ops->test_run)
991 ret = prog->aux->ops->test_run(prog, attr, uattr);
997 SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
999 union bpf_attr attr = {};
1002 if (!capable(CAP_SYS_ADMIN) && sysctl_unprivileged_bpf_disabled)
1005 if (!access_ok(VERIFY_READ, uattr, 1))
1008 if (size > PAGE_SIZE) /* silly large */
1011 /* If we're handed a bigger struct than we know of,
1012 * ensure all the unknown bits are 0 - i.e. new
1013 * user-space does not rely on any kernel feature
1014 * extensions we dont know about yet.
1016 if (size > sizeof(attr)) {
1017 unsigned char __user *addr;
1018 unsigned char __user *end;
1021 addr = (void __user *)uattr + sizeof(attr);
1022 end = (void __user *)uattr + size;
1024 for (; addr < end; addr++) {
1025 err = get_user(val, addr);
1031 size = sizeof(attr);
1034 /* copy attributes from user space, may be less than sizeof(bpf_attr) */
1035 if (copy_from_user(&attr, uattr, size) != 0)
1039 case BPF_MAP_CREATE:
1040 err = map_create(&attr);
1042 case BPF_MAP_LOOKUP_ELEM:
1043 err = map_lookup_elem(&attr);
1045 case BPF_MAP_UPDATE_ELEM:
1046 err = map_update_elem(&attr);
1048 case BPF_MAP_DELETE_ELEM:
1049 err = map_delete_elem(&attr);
1051 case BPF_MAP_GET_NEXT_KEY:
1052 err = map_get_next_key(&attr);
1055 err = bpf_prog_load(&attr);
1058 err = bpf_obj_pin(&attr);
1061 err = bpf_obj_get(&attr);
1063 #ifdef CONFIG_CGROUP_BPF
1064 case BPF_PROG_ATTACH:
1065 err = bpf_prog_attach(&attr);
1067 case BPF_PROG_DETACH:
1068 err = bpf_prog_detach(&attr);
1071 case BPF_PROG_TEST_RUN:
1072 err = bpf_prog_test_run(&attr, uattr);