1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2 * Copyright (c) 2016,2017 Facebook
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 #include <linux/bpf.h>
14 #include <linux/err.h>
15 #include <linux/slab.h>
17 #include <linux/filter.h>
18 #include <linux/perf_event.h>
20 static void bpf_array_free_percpu(struct bpf_array *array)
24 for (i = 0; i < array->map.max_entries; i++)
25 free_percpu(array->pptrs[i]);
28 static int bpf_array_alloc_percpu(struct bpf_array *array)
33 for (i = 0; i < array->map.max_entries; i++) {
34 ptr = __alloc_percpu_gfp(array->elem_size, 8,
35 GFP_USER | __GFP_NOWARN);
37 bpf_array_free_percpu(array);
40 array->pptrs[i] = ptr;
46 /* Called from syscall */
47 static struct bpf_map *array_map_alloc(union bpf_attr *attr)
49 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
50 struct bpf_array *array;
54 /* check sanity of attributes */
55 if (attr->max_entries == 0 || attr->key_size != 4 ||
56 attr->value_size == 0 || attr->map_flags)
57 return ERR_PTR(-EINVAL);
59 if (attr->value_size > KMALLOC_MAX_SIZE)
60 /* if value_size is bigger, the user space won't be able to
61 * access the elements.
63 return ERR_PTR(-E2BIG);
65 elem_size = round_up(attr->value_size, 8);
67 array_size = sizeof(*array);
69 array_size += (u64) attr->max_entries * sizeof(void *);
71 array_size += (u64) attr->max_entries * elem_size;
73 /* make sure there is no u32 overflow later in round_up() */
74 if (array_size >= U32_MAX - PAGE_SIZE)
75 return ERR_PTR(-ENOMEM);
77 /* allocate all map elements and zero-initialize them */
78 array = bpf_map_area_alloc(array_size);
80 return ERR_PTR(-ENOMEM);
82 /* copy mandatory map attributes */
83 array->map.map_type = attr->map_type;
84 array->map.key_size = attr->key_size;
85 array->map.value_size = attr->value_size;
86 array->map.max_entries = attr->max_entries;
87 array->elem_size = elem_size;
92 array_size += (u64) attr->max_entries * elem_size * num_possible_cpus();
94 if (array_size >= U32_MAX - PAGE_SIZE ||
95 elem_size > PCPU_MIN_UNIT_SIZE || bpf_array_alloc_percpu(array)) {
96 bpf_map_area_free(array);
97 return ERR_PTR(-ENOMEM);
100 array->map.pages = round_up(array_size, PAGE_SIZE) >> PAGE_SHIFT;
105 /* Called from syscall or from eBPF program */
106 static void *array_map_lookup_elem(struct bpf_map *map, void *key)
108 struct bpf_array *array = container_of(map, struct bpf_array, map);
109 u32 index = *(u32 *)key;
111 if (unlikely(index >= array->map.max_entries))
114 return array->value + array->elem_size * index;
117 /* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
118 static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
120 struct bpf_insn *insn = insn_buf;
121 u32 elem_size = round_up(map->value_size, 8);
122 const int ret = BPF_REG_0;
123 const int map_ptr = BPF_REG_1;
124 const int index = BPF_REG_2;
126 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
127 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
128 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3);
130 if (is_power_of_2(elem_size)) {
131 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
133 *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
135 *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
136 *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
137 *insn++ = BPF_MOV64_IMM(ret, 0);
138 return insn - insn_buf;
141 /* Called from eBPF program */
142 static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
144 struct bpf_array *array = container_of(map, struct bpf_array, map);
145 u32 index = *(u32 *)key;
147 if (unlikely(index >= array->map.max_entries))
150 return this_cpu_ptr(array->pptrs[index]);
153 int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
155 struct bpf_array *array = container_of(map, struct bpf_array, map);
156 u32 index = *(u32 *)key;
161 if (unlikely(index >= array->map.max_entries))
164 /* per_cpu areas are zero-filled and bpf programs can only
165 * access 'value_size' of them, so copying rounded areas
166 * will not leak any kernel data
168 size = round_up(map->value_size, 8);
170 pptr = array->pptrs[index];
171 for_each_possible_cpu(cpu) {
172 bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size);
179 /* Called from syscall */
180 static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
182 struct bpf_array *array = container_of(map, struct bpf_array, map);
183 u32 index = *(u32 *)key;
184 u32 *next = (u32 *)next_key;
186 if (index >= array->map.max_entries) {
191 if (index == array->map.max_entries - 1)
198 /* Called from syscall or from eBPF program */
199 static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
202 struct bpf_array *array = container_of(map, struct bpf_array, map);
203 u32 index = *(u32 *)key;
205 if (unlikely(map_flags > BPF_EXIST))
209 if (unlikely(index >= array->map.max_entries))
210 /* all elements were pre-allocated, cannot insert a new one */
213 if (unlikely(map_flags == BPF_NOEXIST))
214 /* all elements already exist */
217 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
218 memcpy(this_cpu_ptr(array->pptrs[index]),
219 value, map->value_size);
221 memcpy(array->value + array->elem_size * index,
222 value, map->value_size);
226 int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
229 struct bpf_array *array = container_of(map, struct bpf_array, map);
230 u32 index = *(u32 *)key;
235 if (unlikely(map_flags > BPF_EXIST))
239 if (unlikely(index >= array->map.max_entries))
240 /* all elements were pre-allocated, cannot insert a new one */
243 if (unlikely(map_flags == BPF_NOEXIST))
244 /* all elements already exist */
247 /* the user space will provide round_up(value_size, 8) bytes that
248 * will be copied into per-cpu area. bpf programs can only access
249 * value_size of it. During lookup the same extra bytes will be
250 * returned or zeros which were zero-filled by percpu_alloc,
251 * so no kernel data leaks possible
253 size = round_up(map->value_size, 8);
255 pptr = array->pptrs[index];
256 for_each_possible_cpu(cpu) {
257 bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size);
264 /* Called from syscall or from eBPF program */
265 static int array_map_delete_elem(struct bpf_map *map, void *key)
270 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
271 static void array_map_free(struct bpf_map *map)
273 struct bpf_array *array = container_of(map, struct bpf_array, map);
275 /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
276 * so the programs (can be more than one that used this map) were
277 * disconnected from events. Wait for outstanding programs to complete
282 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
283 bpf_array_free_percpu(array);
285 bpf_map_area_free(array);
288 static const struct bpf_map_ops array_ops = {
289 .map_alloc = array_map_alloc,
290 .map_free = array_map_free,
291 .map_get_next_key = array_map_get_next_key,
292 .map_lookup_elem = array_map_lookup_elem,
293 .map_update_elem = array_map_update_elem,
294 .map_delete_elem = array_map_delete_elem,
295 .map_gen_lookup = array_map_gen_lookup,
298 static struct bpf_map_type_list array_type __ro_after_init = {
300 .type = BPF_MAP_TYPE_ARRAY,
303 static const struct bpf_map_ops percpu_array_ops = {
304 .map_alloc = array_map_alloc,
305 .map_free = array_map_free,
306 .map_get_next_key = array_map_get_next_key,
307 .map_lookup_elem = percpu_array_map_lookup_elem,
308 .map_update_elem = array_map_update_elem,
309 .map_delete_elem = array_map_delete_elem,
312 static struct bpf_map_type_list percpu_array_type __ro_after_init = {
313 .ops = &percpu_array_ops,
314 .type = BPF_MAP_TYPE_PERCPU_ARRAY,
317 static int __init register_array_map(void)
319 bpf_register_map_type(&array_type);
320 bpf_register_map_type(&percpu_array_type);
323 late_initcall(register_array_map);
325 static struct bpf_map *fd_array_map_alloc(union bpf_attr *attr)
327 /* only file descriptors can be stored in this type of map */
328 if (attr->value_size != sizeof(u32))
329 return ERR_PTR(-EINVAL);
330 return array_map_alloc(attr);
333 static void fd_array_map_free(struct bpf_map *map)
335 struct bpf_array *array = container_of(map, struct bpf_array, map);
340 /* make sure it's empty */
341 for (i = 0; i < array->map.max_entries; i++)
342 BUG_ON(array->ptrs[i] != NULL);
344 bpf_map_area_free(array);
347 static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
352 /* only called from syscall */
353 int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
354 void *key, void *value, u64 map_flags)
356 struct bpf_array *array = container_of(map, struct bpf_array, map);
357 void *new_ptr, *old_ptr;
358 u32 index = *(u32 *)key, ufd;
360 if (map_flags != BPF_ANY)
363 if (index >= array->map.max_entries)
367 new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
369 return PTR_ERR(new_ptr);
371 old_ptr = xchg(array->ptrs + index, new_ptr);
373 map->ops->map_fd_put_ptr(old_ptr);
378 static int fd_array_map_delete_elem(struct bpf_map *map, void *key)
380 struct bpf_array *array = container_of(map, struct bpf_array, map);
382 u32 index = *(u32 *)key;
384 if (index >= array->map.max_entries)
387 old_ptr = xchg(array->ptrs + index, NULL);
389 map->ops->map_fd_put_ptr(old_ptr);
396 static void *prog_fd_array_get_ptr(struct bpf_map *map,
397 struct file *map_file, int fd)
399 struct bpf_array *array = container_of(map, struct bpf_array, map);
400 struct bpf_prog *prog = bpf_prog_get(fd);
405 if (!bpf_prog_array_compatible(array, prog)) {
407 return ERR_PTR(-EINVAL);
413 static void prog_fd_array_put_ptr(void *ptr)
418 /* decrement refcnt of all bpf_progs that are stored in this map */
419 void bpf_fd_array_map_clear(struct bpf_map *map)
421 struct bpf_array *array = container_of(map, struct bpf_array, map);
424 for (i = 0; i < array->map.max_entries; i++)
425 fd_array_map_delete_elem(map, &i);
428 static const struct bpf_map_ops prog_array_ops = {
429 .map_alloc = fd_array_map_alloc,
430 .map_free = fd_array_map_free,
431 .map_get_next_key = array_map_get_next_key,
432 .map_lookup_elem = fd_array_map_lookup_elem,
433 .map_delete_elem = fd_array_map_delete_elem,
434 .map_fd_get_ptr = prog_fd_array_get_ptr,
435 .map_fd_put_ptr = prog_fd_array_put_ptr,
438 static struct bpf_map_type_list prog_array_type __ro_after_init = {
439 .ops = &prog_array_ops,
440 .type = BPF_MAP_TYPE_PROG_ARRAY,
443 static int __init register_prog_array_map(void)
445 bpf_register_map_type(&prog_array_type);
448 late_initcall(register_prog_array_map);
450 static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
451 struct file *map_file)
453 struct bpf_event_entry *ee;
455 ee = kzalloc(sizeof(*ee), GFP_ATOMIC);
457 ee->event = perf_file->private_data;
458 ee->perf_file = perf_file;
459 ee->map_file = map_file;
465 static void __bpf_event_entry_free(struct rcu_head *rcu)
467 struct bpf_event_entry *ee;
469 ee = container_of(rcu, struct bpf_event_entry, rcu);
474 static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee)
476 call_rcu(&ee->rcu, __bpf_event_entry_free);
479 static void *perf_event_fd_array_get_ptr(struct bpf_map *map,
480 struct file *map_file, int fd)
482 const struct perf_event_attr *attr;
483 struct bpf_event_entry *ee;
484 struct perf_event *event;
485 struct file *perf_file;
487 perf_file = perf_event_get(fd);
488 if (IS_ERR(perf_file))
491 event = perf_file->private_data;
492 ee = ERR_PTR(-EINVAL);
494 attr = perf_event_attrs(event);
495 if (IS_ERR(attr) || attr->inherit)
498 switch (attr->type) {
499 case PERF_TYPE_SOFTWARE:
500 if (attr->config != PERF_COUNT_SW_BPF_OUTPUT)
504 case PERF_TYPE_HARDWARE:
505 ee = bpf_event_entry_gen(perf_file, map_file);
508 ee = ERR_PTR(-ENOMEM);
519 static void perf_event_fd_array_put_ptr(void *ptr)
521 bpf_event_entry_free_rcu(ptr);
524 static void perf_event_fd_array_release(struct bpf_map *map,
525 struct file *map_file)
527 struct bpf_array *array = container_of(map, struct bpf_array, map);
528 struct bpf_event_entry *ee;
532 for (i = 0; i < array->map.max_entries; i++) {
533 ee = READ_ONCE(array->ptrs[i]);
534 if (ee && ee->map_file == map_file)
535 fd_array_map_delete_elem(map, &i);
540 static const struct bpf_map_ops perf_event_array_ops = {
541 .map_alloc = fd_array_map_alloc,
542 .map_free = fd_array_map_free,
543 .map_get_next_key = array_map_get_next_key,
544 .map_lookup_elem = fd_array_map_lookup_elem,
545 .map_delete_elem = fd_array_map_delete_elem,
546 .map_fd_get_ptr = perf_event_fd_array_get_ptr,
547 .map_fd_put_ptr = perf_event_fd_array_put_ptr,
548 .map_release = perf_event_fd_array_release,
551 static struct bpf_map_type_list perf_event_array_type __ro_after_init = {
552 .ops = &perf_event_array_ops,
553 .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
556 static int __init register_perf_event_array_map(void)
558 bpf_register_map_type(&perf_event_array_type);
561 late_initcall(register_perf_event_array_map);
563 #ifdef CONFIG_CGROUPS
564 static void *cgroup_fd_array_get_ptr(struct bpf_map *map,
565 struct file *map_file /* not used */,
568 return cgroup_get_from_fd(fd);
571 static void cgroup_fd_array_put_ptr(void *ptr)
573 /* cgroup_put free cgrp after a rcu grace period */
577 static void cgroup_fd_array_free(struct bpf_map *map)
579 bpf_fd_array_map_clear(map);
580 fd_array_map_free(map);
583 static const struct bpf_map_ops cgroup_array_ops = {
584 .map_alloc = fd_array_map_alloc,
585 .map_free = cgroup_fd_array_free,
586 .map_get_next_key = array_map_get_next_key,
587 .map_lookup_elem = fd_array_map_lookup_elem,
588 .map_delete_elem = fd_array_map_delete_elem,
589 .map_fd_get_ptr = cgroup_fd_array_get_ptr,
590 .map_fd_put_ptr = cgroup_fd_array_put_ptr,
593 static struct bpf_map_type_list cgroup_array_type __ro_after_init = {
594 .ops = &cgroup_array_ops,
595 .type = BPF_MAP_TYPE_CGROUP_ARRAY,
598 static int __init register_cgroup_array_map(void)
600 bpf_register_map_type(&cgroup_array_type);
603 late_initcall(register_cgroup_array_map);