2 * Functions to manage eBPF programs attached to cgroups
4 * Copyright (c) 2016 Daniel Mack
6 * This file is subject to the terms and conditions of version 2 of the GNU
7 * General Public License. See the file COPYING in the main directory of the
8 * Linux distribution for more details.
11 #include <linux/kernel.h>
12 #include <linux/atomic.h>
13 #include <linux/cgroup.h>
14 #include <linux/filter.h>
15 #include <linux/slab.h>
16 #include <linux/sysctl.h>
17 #include <linux/string.h>
18 #include <linux/bpf.h>
19 #include <linux/bpf-cgroup.h>
22 DEFINE_STATIC_KEY_FALSE(cgroup_bpf_enabled_key);
23 EXPORT_SYMBOL(cgroup_bpf_enabled_key);
26 * cgroup_bpf_put() - put references of all bpf programs
27 * @cgrp: the cgroup to modify
29 void cgroup_bpf_put(struct cgroup *cgrp)
31 enum bpf_cgroup_storage_type stype;
34 for (type = 0; type < ARRAY_SIZE(cgrp->bpf.progs); type++) {
35 struct list_head *progs = &cgrp->bpf.progs[type];
36 struct bpf_prog_list *pl, *tmp;
38 list_for_each_entry_safe(pl, tmp, progs, node) {
40 bpf_prog_put(pl->prog);
41 for_each_cgroup_storage_type(stype) {
42 bpf_cgroup_storage_unlink(pl->storage[stype]);
43 bpf_cgroup_storage_free(pl->storage[stype]);
46 static_branch_dec(&cgroup_bpf_enabled_key);
48 bpf_prog_array_free(cgrp->bpf.effective[type]);
52 /* count number of elements in the list.
53 * it's slow but the list cannot be long
55 static u32 prog_list_length(struct list_head *head)
57 struct bpf_prog_list *pl;
60 list_for_each_entry(pl, head, node) {
68 /* if parent has non-overridable prog attached,
69 * disallow attaching new programs to the descendent cgroup.
70 * if parent has overridable or multi-prog, allow attaching
72 static bool hierarchy_allows_attach(struct cgroup *cgrp,
73 enum bpf_attach_type type,
78 p = cgroup_parent(cgrp);
82 u32 flags = p->bpf.flags[type];
85 if (flags & BPF_F_ALLOW_MULTI)
87 cnt = prog_list_length(&p->bpf.progs[type]);
88 WARN_ON_ONCE(cnt > 1);
90 return !!(flags & BPF_F_ALLOW_OVERRIDE);
96 /* compute a chain of effective programs for a given cgroup:
97 * start from the list of programs in this cgroup and add
98 * all parent programs.
99 * Note that parent's F_ALLOW_OVERRIDE-type program is yielding
100 * to programs in this cgroup
102 static int compute_effective_progs(struct cgroup *cgrp,
103 enum bpf_attach_type type,
104 struct bpf_prog_array __rcu **array)
106 enum bpf_cgroup_storage_type stype;
107 struct bpf_prog_array *progs;
108 struct bpf_prog_list *pl;
109 struct cgroup *p = cgrp;
112 /* count number of effective programs by walking parents */
114 if (cnt == 0 || (p->bpf.flags[type] & BPF_F_ALLOW_MULTI))
115 cnt += prog_list_length(&p->bpf.progs[type]);
116 p = cgroup_parent(p);
119 progs = bpf_prog_array_alloc(cnt, GFP_KERNEL);
123 /* populate the array with effective progs */
127 if (cnt > 0 && !(p->bpf.flags[type] & BPF_F_ALLOW_MULTI))
130 list_for_each_entry(pl, &p->bpf.progs[type], node) {
134 progs->items[cnt].prog = pl->prog;
135 for_each_cgroup_storage_type(stype)
136 progs->items[cnt].cgroup_storage[stype] =
140 } while ((p = cgroup_parent(p)));
142 rcu_assign_pointer(*array, progs);
146 static void activate_effective_progs(struct cgroup *cgrp,
147 enum bpf_attach_type type,
148 struct bpf_prog_array __rcu *array)
150 struct bpf_prog_array __rcu *old_array;
152 old_array = xchg(&cgrp->bpf.effective[type], array);
153 /* free prog array after grace period, since __cgroup_bpf_run_*()
154 * might be still walking the array
156 bpf_prog_array_free(old_array);
160 * cgroup_bpf_inherit() - inherit effective programs from parent
161 * @cgrp: the cgroup to modify
163 int cgroup_bpf_inherit(struct cgroup *cgrp)
165 /* has to use marco instead of const int, since compiler thinks
166 * that array below is variable length
168 #define NR ARRAY_SIZE(cgrp->bpf.effective)
169 struct bpf_prog_array __rcu *arrays[NR] = {};
172 for (i = 0; i < NR; i++)
173 INIT_LIST_HEAD(&cgrp->bpf.progs[i]);
175 for (i = 0; i < NR; i++)
176 if (compute_effective_progs(cgrp, i, &arrays[i]))
179 for (i = 0; i < NR; i++)
180 activate_effective_progs(cgrp, i, arrays[i]);
184 for (i = 0; i < NR; i++)
185 bpf_prog_array_free(arrays[i]);
189 static int update_effective_progs(struct cgroup *cgrp,
190 enum bpf_attach_type type)
192 struct cgroup_subsys_state *css;
195 /* allocate and recompute effective prog arrays */
196 css_for_each_descendant_pre(css, &cgrp->self) {
197 struct cgroup *desc = container_of(css, struct cgroup, self);
199 err = compute_effective_progs(desc, type, &desc->bpf.inactive);
204 /* all allocations were successful. Activate all prog arrays */
205 css_for_each_descendant_pre(css, &cgrp->self) {
206 struct cgroup *desc = container_of(css, struct cgroup, self);
208 activate_effective_progs(desc, type, desc->bpf.inactive);
209 desc->bpf.inactive = NULL;
215 /* oom while computing effective. Free all computed effective arrays
216 * since they were not activated
218 css_for_each_descendant_pre(css, &cgrp->self) {
219 struct cgroup *desc = container_of(css, struct cgroup, self);
221 bpf_prog_array_free(desc->bpf.inactive);
222 desc->bpf.inactive = NULL;
228 #define BPF_CGROUP_MAX_PROGS 64
231 * __cgroup_bpf_attach() - Attach the program to a cgroup, and
232 * propagate the change to descendants
233 * @cgrp: The cgroup which descendants to traverse
234 * @prog: A program to attach
235 * @type: Type of attach operation
236 * @flags: Option flags
238 * Must be called with cgroup_mutex held.
240 int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
241 enum bpf_attach_type type, u32 flags)
243 struct list_head *progs = &cgrp->bpf.progs[type];
244 struct bpf_prog *old_prog = NULL;
245 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE],
246 *old_storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {NULL};
247 enum bpf_cgroup_storage_type stype;
248 struct bpf_prog_list *pl;
249 bool pl_was_allocated;
252 if ((flags & BPF_F_ALLOW_OVERRIDE) && (flags & BPF_F_ALLOW_MULTI))
253 /* invalid combination */
256 if (!hierarchy_allows_attach(cgrp, type, flags))
259 if (!list_empty(progs) && cgrp->bpf.flags[type] != flags)
260 /* Disallow attaching non-overridable on top
261 * of existing overridable in this cgroup.
262 * Disallow attaching multi-prog if overridable or none
266 if (prog_list_length(progs) >= BPF_CGROUP_MAX_PROGS)
269 for_each_cgroup_storage_type(stype) {
270 storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
271 if (IS_ERR(storage[stype])) {
272 storage[stype] = NULL;
273 for_each_cgroup_storage_type(stype)
274 bpf_cgroup_storage_free(storage[stype]);
279 if (flags & BPF_F_ALLOW_MULTI) {
280 list_for_each_entry(pl, progs, node) {
281 if (pl->prog == prog) {
282 /* disallow attaching the same prog twice */
283 for_each_cgroup_storage_type(stype)
284 bpf_cgroup_storage_free(storage[stype]);
289 pl = kmalloc(sizeof(*pl), GFP_KERNEL);
291 for_each_cgroup_storage_type(stype)
292 bpf_cgroup_storage_free(storage[stype]);
296 pl_was_allocated = true;
298 for_each_cgroup_storage_type(stype)
299 pl->storage[stype] = storage[stype];
300 list_add_tail(&pl->node, progs);
302 if (list_empty(progs)) {
303 pl = kmalloc(sizeof(*pl), GFP_KERNEL);
305 for_each_cgroup_storage_type(stype)
306 bpf_cgroup_storage_free(storage[stype]);
309 pl_was_allocated = true;
310 list_add_tail(&pl->node, progs);
312 pl = list_first_entry(progs, typeof(*pl), node);
314 for_each_cgroup_storage_type(stype) {
315 old_storage[stype] = pl->storage[stype];
316 bpf_cgroup_storage_unlink(old_storage[stype]);
318 pl_was_allocated = false;
321 for_each_cgroup_storage_type(stype)
322 pl->storage[stype] = storage[stype];
325 cgrp->bpf.flags[type] = flags;
327 err = update_effective_progs(cgrp, type);
331 static_branch_inc(&cgroup_bpf_enabled_key);
332 for_each_cgroup_storage_type(stype) {
333 if (!old_storage[stype])
335 bpf_cgroup_storage_free(old_storage[stype]);
338 bpf_prog_put(old_prog);
339 static_branch_dec(&cgroup_bpf_enabled_key);
341 for_each_cgroup_storage_type(stype)
342 bpf_cgroup_storage_link(storage[stype], cgrp, type);
346 /* and cleanup the prog list */
348 for_each_cgroup_storage_type(stype) {
349 bpf_cgroup_storage_free(pl->storage[stype]);
350 pl->storage[stype] = old_storage[stype];
351 bpf_cgroup_storage_link(old_storage[stype], cgrp, type);
353 if (pl_was_allocated) {
361 * __cgroup_bpf_detach() - Detach the program from a cgroup, and
362 * propagate the change to descendants
363 * @cgrp: The cgroup which descendants to traverse
364 * @prog: A program to detach or NULL
365 * @type: Type of detach operation
367 * Must be called with cgroup_mutex held.
369 int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
370 enum bpf_attach_type type)
372 struct list_head *progs = &cgrp->bpf.progs[type];
373 enum bpf_cgroup_storage_type stype;
374 u32 flags = cgrp->bpf.flags[type];
375 struct bpf_prog *old_prog = NULL;
376 struct bpf_prog_list *pl;
379 if (flags & BPF_F_ALLOW_MULTI) {
381 /* to detach MULTI prog the user has to specify valid FD
382 * of the program to be detached
386 if (list_empty(progs))
387 /* report error when trying to detach and nothing is attached */
391 if (flags & BPF_F_ALLOW_MULTI) {
392 /* find the prog and detach it */
393 list_for_each_entry(pl, progs, node) {
394 if (pl->prog != prog)
397 /* mark it deleted, so it's ignored while
398 * recomputing effective
406 /* to maintain backward compatibility NONE and OVERRIDE cgroups
407 * allow detaching with invalid FD (prog==NULL)
409 pl = list_first_entry(progs, typeof(*pl), node);
414 err = update_effective_progs(cgrp, type);
418 /* now can actually delete it from this cgroup list */
420 for_each_cgroup_storage_type(stype) {
421 bpf_cgroup_storage_unlink(pl->storage[stype]);
422 bpf_cgroup_storage_free(pl->storage[stype]);
425 if (list_empty(progs))
426 /* last program was detached, reset flags to zero */
427 cgrp->bpf.flags[type] = 0;
429 bpf_prog_put(old_prog);
430 static_branch_dec(&cgroup_bpf_enabled_key);
434 /* and restore back old_prog */
439 /* Must be called with cgroup_mutex held to avoid races. */
440 int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
441 union bpf_attr __user *uattr)
443 __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
444 enum bpf_attach_type type = attr->query.attach_type;
445 struct list_head *progs = &cgrp->bpf.progs[type];
446 u32 flags = cgrp->bpf.flags[type];
449 if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE)
450 cnt = bpf_prog_array_length(cgrp->bpf.effective[type]);
452 cnt = prog_list_length(progs);
454 if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)))
456 if (copy_to_user(&uattr->query.prog_cnt, &cnt, sizeof(cnt)))
458 if (attr->query.prog_cnt == 0 || !prog_ids || !cnt)
459 /* return early if user requested only program count + flags */
461 if (attr->query.prog_cnt < cnt) {
462 cnt = attr->query.prog_cnt;
466 if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) {
467 return bpf_prog_array_copy_to_user(cgrp->bpf.effective[type],
470 struct bpf_prog_list *pl;
474 list_for_each_entry(pl, progs, node) {
475 id = pl->prog->aux->id;
476 if (copy_to_user(prog_ids + i, &id, sizeof(id)))
485 int cgroup_bpf_prog_attach(const union bpf_attr *attr,
486 enum bpf_prog_type ptype, struct bpf_prog *prog)
491 cgrp = cgroup_get_from_fd(attr->target_fd);
493 return PTR_ERR(cgrp);
495 ret = cgroup_bpf_attach(cgrp, prog, attr->attach_type,
501 int cgroup_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
503 struct bpf_prog *prog;
507 cgrp = cgroup_get_from_fd(attr->target_fd);
509 return PTR_ERR(cgrp);
511 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
515 ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type, 0);
523 int cgroup_bpf_prog_query(const union bpf_attr *attr,
524 union bpf_attr __user *uattr)
529 cgrp = cgroup_get_from_fd(attr->query.target_fd);
531 return PTR_ERR(cgrp);
533 ret = cgroup_bpf_query(cgrp, attr, uattr);
540 * __cgroup_bpf_run_filter_skb() - Run a program for packet filtering
541 * @sk: The socket sending or receiving traffic
542 * @skb: The skb that is being sent or received
543 * @type: The type of program to be exectuted
545 * If no socket is passed, or the socket is not of type INET or INET6,
546 * this function does nothing and returns 0.
548 * The program type passed in via @type must be suitable for network
549 * filtering. No further check is performed to assert that.
551 * This function will return %-EPERM if any if an attached program was found
552 * and if it returned != 1 during execution. In all other cases, 0 is returned.
554 int __cgroup_bpf_run_filter_skb(struct sock *sk,
556 enum bpf_attach_type type)
558 unsigned int offset = skb->data - skb_network_header(skb);
559 struct sock *save_sk;
560 void *saved_data_end;
564 if (!sk || !sk_fullsock(sk))
567 if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)
570 cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
573 __skb_push(skb, offset);
575 /* compute pointers for the bpf prog */
576 bpf_compute_and_save_data_end(skb, &saved_data_end);
578 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], skb,
579 __bpf_prog_run_save_cb);
580 bpf_restore_data_end(skb, saved_data_end);
581 __skb_pull(skb, offset);
583 return ret == 1 ? 0 : -EPERM;
585 EXPORT_SYMBOL(__cgroup_bpf_run_filter_skb);
588 * __cgroup_bpf_run_filter_sk() - Run a program on a sock
589 * @sk: sock structure to manipulate
590 * @type: The type of program to be exectuted
592 * socket is passed is expected to be of type INET or INET6.
594 * The program type passed in via @type must be suitable for sock
595 * filtering. No further check is performed to assert that.
597 * This function will return %-EPERM if any if an attached program was found
598 * and if it returned != 1 during execution. In all other cases, 0 is returned.
600 int __cgroup_bpf_run_filter_sk(struct sock *sk,
601 enum bpf_attach_type type)
603 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
606 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], sk, BPF_PROG_RUN);
607 return ret == 1 ? 0 : -EPERM;
609 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sk);
612 * __cgroup_bpf_run_filter_sock_addr() - Run a program on a sock and
613 * provided by user sockaddr
614 * @sk: sock struct that will use sockaddr
615 * @uaddr: sockaddr struct provided by user
616 * @type: The type of program to be exectuted
617 * @t_ctx: Pointer to attach type specific context
619 * socket is expected to be of type INET or INET6.
621 * This function will return %-EPERM if an attached program is found and
622 * returned value != 1 during execution. In all other cases, 0 is returned.
624 int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
625 struct sockaddr *uaddr,
626 enum bpf_attach_type type,
629 struct bpf_sock_addr_kern ctx = {
634 struct sockaddr_storage unspec;
638 /* Check socket family since not all sockets represent network
639 * endpoint (e.g. AF_UNIX).
641 if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)
645 memset(&unspec, 0, sizeof(unspec));
646 ctx.uaddr = (struct sockaddr *)&unspec;
649 cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
650 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx, BPF_PROG_RUN);
652 return ret == 1 ? 0 : -EPERM;
654 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_addr);
657 * __cgroup_bpf_run_filter_sock_ops() - Run a program on a sock
658 * @sk: socket to get cgroup from
659 * @sock_ops: bpf_sock_ops_kern struct to pass to program. Contains
660 * sk with connection information (IP addresses, etc.) May not contain
661 * cgroup info if it is a req sock.
662 * @type: The type of program to be exectuted
664 * socket passed is expected to be of type INET or INET6.
666 * The program type passed in via @type must be suitable for sock_ops
667 * filtering. No further check is performed to assert that.
669 * This function will return %-EPERM if any if an attached program was found
670 * and if it returned != 1 during execution. In all other cases, 0 is returned.
672 int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
673 struct bpf_sock_ops_kern *sock_ops,
674 enum bpf_attach_type type)
676 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
679 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], sock_ops,
681 return ret == 1 ? 0 : -EPERM;
683 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_ops);
685 int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
686 short access, enum bpf_attach_type type)
689 struct bpf_cgroup_dev_ctx ctx = {
690 .access_type = (access << 16) | dev_type,
697 cgrp = task_dfl_cgroup(current);
698 allow = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx,
704 EXPORT_SYMBOL(__cgroup_bpf_check_dev_permission);
706 static const struct bpf_func_proto *
707 cgroup_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
710 case BPF_FUNC_map_lookup_elem:
711 return &bpf_map_lookup_elem_proto;
712 case BPF_FUNC_map_update_elem:
713 return &bpf_map_update_elem_proto;
714 case BPF_FUNC_map_delete_elem:
715 return &bpf_map_delete_elem_proto;
716 case BPF_FUNC_map_push_elem:
717 return &bpf_map_push_elem_proto;
718 case BPF_FUNC_map_pop_elem:
719 return &bpf_map_pop_elem_proto;
720 case BPF_FUNC_map_peek_elem:
721 return &bpf_map_peek_elem_proto;
722 case BPF_FUNC_get_current_uid_gid:
723 return &bpf_get_current_uid_gid_proto;
724 case BPF_FUNC_get_local_storage:
725 return &bpf_get_local_storage_proto;
726 case BPF_FUNC_get_current_cgroup_id:
727 return &bpf_get_current_cgroup_id_proto;
728 case BPF_FUNC_trace_printk:
729 if (capable(CAP_SYS_ADMIN))
730 return bpf_get_trace_printk_proto();
737 static const struct bpf_func_proto *
738 cgroup_dev_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
740 return cgroup_base_func_proto(func_id, prog);
743 static bool cgroup_dev_is_valid_access(int off, int size,
744 enum bpf_access_type type,
745 const struct bpf_prog *prog,
746 struct bpf_insn_access_aux *info)
748 const int size_default = sizeof(__u32);
750 if (type == BPF_WRITE)
753 if (off < 0 || off + size > sizeof(struct bpf_cgroup_dev_ctx))
755 /* The verifier guarantees that size > 0. */
760 case bpf_ctx_range(struct bpf_cgroup_dev_ctx, access_type):
761 bpf_ctx_record_field_size(info, size_default);
762 if (!bpf_ctx_narrow_access_ok(off, size, size_default))
766 if (size != size_default)
773 const struct bpf_prog_ops cg_dev_prog_ops = {
776 const struct bpf_verifier_ops cg_dev_verifier_ops = {
777 .get_func_proto = cgroup_dev_func_proto,
778 .is_valid_access = cgroup_dev_is_valid_access,
782 * __cgroup_bpf_run_filter_sysctl - Run a program on sysctl
784 * @head: sysctl table header
785 * @table: sysctl table
786 * @write: sysctl is being read (= 0) or written (= 1)
787 * @buf: pointer to buffer passed by user space
788 * @pcount: value-result argument: value is size of buffer pointed to by @buf,
789 * result is size of @new_buf if program set new value, initial value
791 * @ppos: value-result argument: value is position at which read from or write
792 * to sysctl is happening, result is new position if program overrode it,
793 * initial value otherwise
794 * @new_buf: pointer to pointer to new buffer that will be allocated if program
795 * overrides new value provided by user space on sysctl write
796 * NOTE: it's caller responsibility to free *new_buf if it was set
797 * @type: type of program to be executed
799 * Program is run when sysctl is being accessed, either read or written, and
800 * can allow or deny such access.
802 * This function will return %-EPERM if an attached program is found and
803 * returned value != 1 during execution. In all other cases 0 is returned.
805 int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
806 struct ctl_table *table, int write,
807 void __user *buf, size_t *pcount,
808 loff_t *ppos, void **new_buf,
809 enum bpf_attach_type type)
811 struct bpf_sysctl_kern ctx = {
817 .cur_len = PAGE_SIZE,
825 ctx.cur_val = kmalloc_track_caller(ctx.cur_len, GFP_KERNEL);
832 if (table->proc_handler(table, 0, (void __user *)ctx.cur_val,
833 &ctx.cur_len, &pos)) {
834 /* Let BPF program decide how to proceed. */
839 /* Let BPF program decide how to proceed. */
843 if (write && buf && *pcount) {
844 /* BPF program should be able to override new value with a
845 * buffer bigger than provided by user.
847 ctx.new_val = kmalloc_track_caller(PAGE_SIZE, GFP_KERNEL);
848 ctx.new_len = min_t(size_t, PAGE_SIZE, *pcount);
850 copy_from_user(ctx.new_val, buf, ctx.new_len))
851 /* Let BPF program decide how to proceed. */
856 cgrp = task_dfl_cgroup(current);
857 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx, BPF_PROG_RUN);
862 if (ret == 1 && ctx.new_updated) {
863 *new_buf = ctx.new_val;
864 *pcount = ctx.new_len;
869 return ret == 1 ? 0 : -EPERM;
871 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sysctl);
873 static ssize_t sysctl_cpy_dir(const struct ctl_dir *dir, char **bufp,
876 ssize_t tmp_ret = 0, ret;
878 if (dir->header.parent) {
879 tmp_ret = sysctl_cpy_dir(dir->header.parent, bufp, lenp);
884 ret = strscpy(*bufp, dir->header.ctl_table[0].procname, *lenp);
891 /* Avoid leading slash. */
895 tmp_ret = strscpy(*bufp, "/", *lenp);
901 return ret + tmp_ret;
904 BPF_CALL_4(bpf_sysctl_get_name, struct bpf_sysctl_kern *, ctx, char *, buf,
905 size_t, buf_len, u64, flags)
907 ssize_t tmp_ret = 0, ret;
912 if (!(flags & BPF_F_SYSCTL_BASE_NAME)) {
915 tmp_ret = sysctl_cpy_dir(ctx->head->parent, &buf, &buf_len);
920 ret = strscpy(buf, ctx->table->procname, buf_len);
922 return ret < 0 ? ret : tmp_ret + ret;
925 static const struct bpf_func_proto bpf_sysctl_get_name_proto = {
926 .func = bpf_sysctl_get_name,
928 .ret_type = RET_INTEGER,
929 .arg1_type = ARG_PTR_TO_CTX,
930 .arg2_type = ARG_PTR_TO_MEM,
931 .arg3_type = ARG_CONST_SIZE,
932 .arg4_type = ARG_ANYTHING,
935 static int copy_sysctl_value(char *dst, size_t dst_len, char *src,
944 if (!src || !src_len) {
945 memset(dst, 0, dst_len);
949 memcpy(dst, src, min(dst_len, src_len));
951 if (dst_len > src_len) {
952 memset(dst + src_len, '\0', dst_len - src_len);
956 dst[dst_len - 1] = '\0';
961 BPF_CALL_3(bpf_sysctl_get_current_value, struct bpf_sysctl_kern *, ctx,
962 char *, buf, size_t, buf_len)
964 return copy_sysctl_value(buf, buf_len, ctx->cur_val, ctx->cur_len);
967 static const struct bpf_func_proto bpf_sysctl_get_current_value_proto = {
968 .func = bpf_sysctl_get_current_value,
970 .ret_type = RET_INTEGER,
971 .arg1_type = ARG_PTR_TO_CTX,
972 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
973 .arg3_type = ARG_CONST_SIZE,
976 BPF_CALL_3(bpf_sysctl_get_new_value, struct bpf_sysctl_kern *, ctx, char *, buf,
981 memset(buf, '\0', buf_len);
984 return copy_sysctl_value(buf, buf_len, ctx->new_val, ctx->new_len);
987 static const struct bpf_func_proto bpf_sysctl_get_new_value_proto = {
988 .func = bpf_sysctl_get_new_value,
990 .ret_type = RET_INTEGER,
991 .arg1_type = ARG_PTR_TO_CTX,
992 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
993 .arg3_type = ARG_CONST_SIZE,
996 BPF_CALL_3(bpf_sysctl_set_new_value, struct bpf_sysctl_kern *, ctx,
997 const char *, buf, size_t, buf_len)
999 if (!ctx->write || !ctx->new_val || !ctx->new_len || !buf || !buf_len)
1002 if (buf_len > PAGE_SIZE - 1)
1005 memcpy(ctx->new_val, buf, buf_len);
1006 ctx->new_len = buf_len;
1007 ctx->new_updated = 1;
1012 static const struct bpf_func_proto bpf_sysctl_set_new_value_proto = {
1013 .func = bpf_sysctl_set_new_value,
1015 .ret_type = RET_INTEGER,
1016 .arg1_type = ARG_PTR_TO_CTX,
1017 .arg2_type = ARG_PTR_TO_MEM,
1018 .arg3_type = ARG_CONST_SIZE,
1021 static const struct bpf_func_proto *
1022 sysctl_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1025 case BPF_FUNC_strtol:
1026 return &bpf_strtol_proto;
1027 case BPF_FUNC_strtoul:
1028 return &bpf_strtoul_proto;
1029 case BPF_FUNC_sysctl_get_name:
1030 return &bpf_sysctl_get_name_proto;
1031 case BPF_FUNC_sysctl_get_current_value:
1032 return &bpf_sysctl_get_current_value_proto;
1033 case BPF_FUNC_sysctl_get_new_value:
1034 return &bpf_sysctl_get_new_value_proto;
1035 case BPF_FUNC_sysctl_set_new_value:
1036 return &bpf_sysctl_set_new_value_proto;
1038 return cgroup_base_func_proto(func_id, prog);
1042 static bool sysctl_is_valid_access(int off, int size, enum bpf_access_type type,
1043 const struct bpf_prog *prog,
1044 struct bpf_insn_access_aux *info)
1046 const int size_default = sizeof(__u32);
1048 if (off < 0 || off + size > sizeof(struct bpf_sysctl) || off % size)
1052 case offsetof(struct bpf_sysctl, write):
1053 if (type != BPF_READ)
1055 bpf_ctx_record_field_size(info, size_default);
1056 return bpf_ctx_narrow_access_ok(off, size, size_default);
1057 case offsetof(struct bpf_sysctl, file_pos):
1058 if (type == BPF_READ) {
1059 bpf_ctx_record_field_size(info, size_default);
1060 return bpf_ctx_narrow_access_ok(off, size, size_default);
1062 return size == size_default;
1069 static u32 sysctl_convert_ctx_access(enum bpf_access_type type,
1070 const struct bpf_insn *si,
1071 struct bpf_insn *insn_buf,
1072 struct bpf_prog *prog, u32 *target_size)
1074 struct bpf_insn *insn = insn_buf;
1077 case offsetof(struct bpf_sysctl, write):
1078 *insn++ = BPF_LDX_MEM(
1079 BPF_SIZE(si->code), si->dst_reg, si->src_reg,
1080 bpf_target_off(struct bpf_sysctl_kern, write,
1081 FIELD_SIZEOF(struct bpf_sysctl_kern,
1085 case offsetof(struct bpf_sysctl, file_pos):
1086 /* ppos is a pointer so it should be accessed via indirect
1087 * loads and stores. Also for stores additional temporary
1088 * register is used since neither src_reg nor dst_reg can be
1091 if (type == BPF_WRITE) {
1092 int treg = BPF_REG_9;
1094 if (si->src_reg == treg || si->dst_reg == treg)
1096 if (si->src_reg == treg || si->dst_reg == treg)
1098 *insn++ = BPF_STX_MEM(
1099 BPF_DW, si->dst_reg, treg,
1100 offsetof(struct bpf_sysctl_kern, tmp_reg));
1101 *insn++ = BPF_LDX_MEM(
1102 BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos),
1104 offsetof(struct bpf_sysctl_kern, ppos));
1105 *insn++ = BPF_STX_MEM(
1106 BPF_SIZEOF(u32), treg, si->src_reg, 0);
1107 *insn++ = BPF_LDX_MEM(
1108 BPF_DW, treg, si->dst_reg,
1109 offsetof(struct bpf_sysctl_kern, tmp_reg));
1111 *insn++ = BPF_LDX_MEM(
1112 BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos),
1113 si->dst_reg, si->src_reg,
1114 offsetof(struct bpf_sysctl_kern, ppos));
1115 *insn++ = BPF_LDX_MEM(
1116 BPF_SIZE(si->code), si->dst_reg, si->dst_reg, 0);
1118 *target_size = sizeof(u32);
1122 return insn - insn_buf;
1125 const struct bpf_verifier_ops cg_sysctl_verifier_ops = {
1126 .get_func_proto = sysctl_func_proto,
1127 .is_valid_access = sysctl_is_valid_access,
1128 .convert_ctx_access = sysctl_convert_ctx_access,
1131 const struct bpf_prog_ops cg_sysctl_prog_ops = {