2 * Functions to manage eBPF programs attached to cgroups
4 * Copyright (c) 2016 Daniel Mack
6 * This file is subject to the terms and conditions of version 2 of the GNU
7 * General Public License. See the file COPYING in the main directory of the
8 * Linux distribution for more details.
11 #include <linux/kernel.h>
12 #include <linux/atomic.h>
13 #include <linux/cgroup.h>
14 #include <linux/slab.h>
15 #include <linux/bpf.h>
16 #include <linux/bpf-cgroup.h>
19 DEFINE_STATIC_KEY_FALSE(cgroup_bpf_enabled_key);
20 EXPORT_SYMBOL(cgroup_bpf_enabled_key);
23 * cgroup_bpf_put() - put references of all bpf programs
24 * @cgrp: the cgroup to modify
26 void cgroup_bpf_put(struct cgroup *cgrp)
30 for (type = 0; type < ARRAY_SIZE(cgrp->bpf.progs); type++) {
31 struct list_head *progs = &cgrp->bpf.progs[type];
32 struct bpf_prog_list *pl, *tmp;
34 list_for_each_entry_safe(pl, tmp, progs, node) {
36 bpf_prog_put(pl->prog);
38 static_branch_dec(&cgroup_bpf_enabled_key);
40 bpf_prog_array_free(cgrp->bpf.effective[type]);
44 /* count number of elements in the list.
45 * it's slow but the list cannot be long
47 static u32 prog_list_length(struct list_head *head)
49 struct bpf_prog_list *pl;
52 list_for_each_entry(pl, head, node) {
60 /* if parent has non-overridable prog attached,
61 * disallow attaching new programs to the descendent cgroup.
62 * if parent has overridable or multi-prog, allow attaching
64 static bool hierarchy_allows_attach(struct cgroup *cgrp,
65 enum bpf_attach_type type,
70 p = cgroup_parent(cgrp);
74 u32 flags = p->bpf.flags[type];
77 if (flags & BPF_F_ALLOW_MULTI)
79 cnt = prog_list_length(&p->bpf.progs[type]);
80 WARN_ON_ONCE(cnt > 1);
82 return !!(flags & BPF_F_ALLOW_OVERRIDE);
88 /* compute a chain of effective programs for a given cgroup:
89 * start from the list of programs in this cgroup and add
90 * all parent programs.
91 * Note that parent's F_ALLOW_OVERRIDE-type program is yielding
92 * to programs in this cgroup
94 static int compute_effective_progs(struct cgroup *cgrp,
95 enum bpf_attach_type type,
96 struct bpf_prog_array __rcu **array)
98 struct bpf_prog_array *progs;
99 struct bpf_prog_list *pl;
100 struct cgroup *p = cgrp;
103 /* count number of effective programs by walking parents */
105 if (cnt == 0 || (p->bpf.flags[type] & BPF_F_ALLOW_MULTI))
106 cnt += prog_list_length(&p->bpf.progs[type]);
107 p = cgroup_parent(p);
110 progs = bpf_prog_array_alloc(cnt, GFP_KERNEL);
114 /* populate the array with effective progs */
118 if (cnt == 0 || (p->bpf.flags[type] & BPF_F_ALLOW_MULTI))
119 list_for_each_entry(pl,
120 &p->bpf.progs[type], node) {
123 progs->progs[cnt++] = pl->prog;
125 p = cgroup_parent(p);
128 rcu_assign_pointer(*array, progs);
132 static void activate_effective_progs(struct cgroup *cgrp,
133 enum bpf_attach_type type,
134 struct bpf_prog_array __rcu *array)
136 struct bpf_prog_array __rcu *old_array;
138 old_array = xchg(&cgrp->bpf.effective[type], array);
139 /* free prog array after grace period, since __cgroup_bpf_run_*()
140 * might be still walking the array
142 bpf_prog_array_free(old_array);
146 * cgroup_bpf_inherit() - inherit effective programs from parent
147 * @cgrp: the cgroup to modify
149 int cgroup_bpf_inherit(struct cgroup *cgrp)
151 /* has to use marco instead of const int, since compiler thinks
152 * that array below is variable length
154 #define NR ARRAY_SIZE(cgrp->bpf.effective)
155 struct bpf_prog_array __rcu *arrays[NR] = {};
158 for (i = 0; i < NR; i++)
159 INIT_LIST_HEAD(&cgrp->bpf.progs[i]);
161 for (i = 0; i < NR; i++)
162 if (compute_effective_progs(cgrp, i, &arrays[i]))
165 for (i = 0; i < NR; i++)
166 activate_effective_progs(cgrp, i, arrays[i]);
170 for (i = 0; i < NR; i++)
171 bpf_prog_array_free(arrays[i]);
175 #define BPF_CGROUP_MAX_PROGS 64
178 * __cgroup_bpf_attach() - Attach the program to a cgroup, and
179 * propagate the change to descendants
180 * @cgrp: The cgroup which descendants to traverse
181 * @prog: A program to attach
182 * @type: Type of attach operation
184 * Must be called with cgroup_mutex held.
186 int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
187 enum bpf_attach_type type, u32 flags)
189 struct list_head *progs = &cgrp->bpf.progs[type];
190 struct bpf_prog *old_prog = NULL;
191 struct cgroup_subsys_state *css;
192 struct bpf_prog_list *pl;
193 bool pl_was_allocated;
196 if ((flags & BPF_F_ALLOW_OVERRIDE) && (flags & BPF_F_ALLOW_MULTI))
197 /* invalid combination */
200 if (!hierarchy_allows_attach(cgrp, type, flags))
203 if (!list_empty(progs) && cgrp->bpf.flags[type] != flags)
204 /* Disallow attaching non-overridable on top
205 * of existing overridable in this cgroup.
206 * Disallow attaching multi-prog if overridable or none
210 if (prog_list_length(progs) >= BPF_CGROUP_MAX_PROGS)
213 if (flags & BPF_F_ALLOW_MULTI) {
214 list_for_each_entry(pl, progs, node)
215 if (pl->prog == prog)
216 /* disallow attaching the same prog twice */
219 pl = kmalloc(sizeof(*pl), GFP_KERNEL);
222 pl_was_allocated = true;
224 list_add_tail(&pl->node, progs);
226 if (list_empty(progs)) {
227 pl = kmalloc(sizeof(*pl), GFP_KERNEL);
230 pl_was_allocated = true;
231 list_add_tail(&pl->node, progs);
233 pl = list_first_entry(progs, typeof(*pl), node);
235 pl_was_allocated = false;
240 cgrp->bpf.flags[type] = flags;
242 /* allocate and recompute effective prog arrays */
243 css_for_each_descendant_pre(css, &cgrp->self) {
244 struct cgroup *desc = container_of(css, struct cgroup, self);
246 err = compute_effective_progs(desc, type, &desc->bpf.inactive);
251 /* all allocations were successful. Activate all prog arrays */
252 css_for_each_descendant_pre(css, &cgrp->self) {
253 struct cgroup *desc = container_of(css, struct cgroup, self);
255 activate_effective_progs(desc, type, desc->bpf.inactive);
256 desc->bpf.inactive = NULL;
259 static_branch_inc(&cgroup_bpf_enabled_key);
261 bpf_prog_put(old_prog);
262 static_branch_dec(&cgroup_bpf_enabled_key);
267 /* oom while computing effective. Free all computed effective arrays
268 * since they were not activated
270 css_for_each_descendant_pre(css, &cgrp->self) {
271 struct cgroup *desc = container_of(css, struct cgroup, self);
273 bpf_prog_array_free(desc->bpf.inactive);
274 desc->bpf.inactive = NULL;
277 /* and cleanup the prog list */
279 if (pl_was_allocated) {
287 * __cgroup_bpf_detach() - Detach the program from a cgroup, and
288 * propagate the change to descendants
289 * @cgrp: The cgroup which descendants to traverse
290 * @prog: A program to detach or NULL
291 * @type: Type of detach operation
293 * Must be called with cgroup_mutex held.
295 int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
296 enum bpf_attach_type type, u32 unused_flags)
298 struct list_head *progs = &cgrp->bpf.progs[type];
299 u32 flags = cgrp->bpf.flags[type];
300 struct bpf_prog *old_prog = NULL;
301 struct cgroup_subsys_state *css;
302 struct bpf_prog_list *pl;
305 if (flags & BPF_F_ALLOW_MULTI) {
307 /* to detach MULTI prog the user has to specify valid FD
308 * of the program to be detached
312 if (list_empty(progs))
313 /* report error when trying to detach and nothing is attached */
317 if (flags & BPF_F_ALLOW_MULTI) {
318 /* find the prog and detach it */
319 list_for_each_entry(pl, progs, node) {
320 if (pl->prog != prog)
323 /* mark it deleted, so it's ignored while
324 * recomputing effective
332 /* to maintain backward compatibility NONE and OVERRIDE cgroups
333 * allow detaching with invalid FD (prog==NULL)
335 pl = list_first_entry(progs, typeof(*pl), node);
340 /* allocate and recompute effective prog arrays */
341 css_for_each_descendant_pre(css, &cgrp->self) {
342 struct cgroup *desc = container_of(css, struct cgroup, self);
344 err = compute_effective_progs(desc, type, &desc->bpf.inactive);
349 /* all allocations were successful. Activate all prog arrays */
350 css_for_each_descendant_pre(css, &cgrp->self) {
351 struct cgroup *desc = container_of(css, struct cgroup, self);
353 activate_effective_progs(desc, type, desc->bpf.inactive);
354 desc->bpf.inactive = NULL;
357 /* now can actually delete it from this cgroup list */
360 if (list_empty(progs))
361 /* last program was detached, reset flags to zero */
362 cgrp->bpf.flags[type] = 0;
364 bpf_prog_put(old_prog);
365 static_branch_dec(&cgroup_bpf_enabled_key);
369 /* oom while computing effective. Free all computed effective arrays
370 * since they were not activated
372 css_for_each_descendant_pre(css, &cgrp->self) {
373 struct cgroup *desc = container_of(css, struct cgroup, self);
375 bpf_prog_array_free(desc->bpf.inactive);
376 desc->bpf.inactive = NULL;
379 /* and restore back old_prog */
384 /* Must be called with cgroup_mutex held to avoid races. */
385 int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
386 union bpf_attr __user *uattr)
388 __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
389 enum bpf_attach_type type = attr->query.attach_type;
390 struct list_head *progs = &cgrp->bpf.progs[type];
391 u32 flags = cgrp->bpf.flags[type];
394 if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE)
395 cnt = bpf_prog_array_length(cgrp->bpf.effective[type]);
397 cnt = prog_list_length(progs);
399 if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)))
401 if (copy_to_user(&uattr->query.prog_cnt, &cnt, sizeof(cnt)))
403 if (attr->query.prog_cnt == 0 || !prog_ids || !cnt)
404 /* return early if user requested only program count + flags */
406 if (attr->query.prog_cnt < cnt) {
407 cnt = attr->query.prog_cnt;
411 if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) {
412 return bpf_prog_array_copy_to_user(cgrp->bpf.effective[type],
415 struct bpf_prog_list *pl;
419 list_for_each_entry(pl, progs, node) {
420 id = pl->prog->aux->id;
421 if (copy_to_user(prog_ids + i, &id, sizeof(id)))
430 int cgroup_bpf_prog_attach(const union bpf_attr *attr,
431 enum bpf_prog_type ptype, struct bpf_prog *prog)
436 cgrp = cgroup_get_from_fd(attr->target_fd);
438 return PTR_ERR(cgrp);
440 ret = cgroup_bpf_attach(cgrp, prog, attr->attach_type,
446 int cgroup_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
448 struct bpf_prog *prog;
452 cgrp = cgroup_get_from_fd(attr->target_fd);
454 return PTR_ERR(cgrp);
456 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
460 ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type, 0);
468 int cgroup_bpf_prog_query(const union bpf_attr *attr,
469 union bpf_attr __user *uattr)
474 cgrp = cgroup_get_from_fd(attr->query.target_fd);
476 return PTR_ERR(cgrp);
478 ret = cgroup_bpf_query(cgrp, attr, uattr);
485 * __cgroup_bpf_run_filter_skb() - Run a program for packet filtering
486 * @sk: The socket sending or receiving traffic
487 * @skb: The skb that is being sent or received
488 * @type: The type of program to be exectuted
490 * If no socket is passed, or the socket is not of type INET or INET6,
491 * this function does nothing and returns 0.
493 * The program type passed in via @type must be suitable for network
494 * filtering. No further check is performed to assert that.
496 * This function will return %-EPERM if any if an attached program was found
497 * and if it returned != 1 during execution. In all other cases, 0 is returned.
499 int __cgroup_bpf_run_filter_skb(struct sock *sk,
501 enum bpf_attach_type type)
503 unsigned int offset = skb->data - skb_network_header(skb);
504 struct sock *save_sk;
508 if (!sk || !sk_fullsock(sk))
511 if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)
514 cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
517 __skb_push(skb, offset);
518 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], skb,
519 bpf_prog_run_save_cb);
520 __skb_pull(skb, offset);
522 return ret == 1 ? 0 : -EPERM;
524 EXPORT_SYMBOL(__cgroup_bpf_run_filter_skb);
527 * __cgroup_bpf_run_filter_sk() - Run a program on a sock
528 * @sk: sock structure to manipulate
529 * @type: The type of program to be exectuted
531 * socket is passed is expected to be of type INET or INET6.
533 * The program type passed in via @type must be suitable for sock
534 * filtering. No further check is performed to assert that.
536 * This function will return %-EPERM if any if an attached program was found
537 * and if it returned != 1 during execution. In all other cases, 0 is returned.
539 int __cgroup_bpf_run_filter_sk(struct sock *sk,
540 enum bpf_attach_type type)
542 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
545 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], sk, BPF_PROG_RUN);
546 return ret == 1 ? 0 : -EPERM;
548 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sk);
551 * __cgroup_bpf_run_filter_sock_addr() - Run a program on a sock and
552 * provided by user sockaddr
553 * @sk: sock struct that will use sockaddr
554 * @uaddr: sockaddr struct provided by user
555 * @type: The type of program to be exectuted
556 * @t_ctx: Pointer to attach type specific context
558 * socket is expected to be of type INET or INET6.
560 * This function will return %-EPERM if an attached program is found and
561 * returned value != 1 during execution. In all other cases, 0 is returned.
563 int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
564 struct sockaddr *uaddr,
565 enum bpf_attach_type type,
568 struct bpf_sock_addr_kern ctx = {
573 struct sockaddr_storage unspec;
577 /* Check socket family since not all sockets represent network
578 * endpoint (e.g. AF_UNIX).
580 if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)
584 memset(&unspec, 0, sizeof(unspec));
585 ctx.uaddr = (struct sockaddr *)&unspec;
588 cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
589 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx, BPF_PROG_RUN);
591 return ret == 1 ? 0 : -EPERM;
593 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_addr);
596 * __cgroup_bpf_run_filter_sock_ops() - Run a program on a sock
597 * @sk: socket to get cgroup from
598 * @sock_ops: bpf_sock_ops_kern struct to pass to program. Contains
599 * sk with connection information (IP addresses, etc.) May not contain
600 * cgroup info if it is a req sock.
601 * @type: The type of program to be exectuted
603 * socket passed is expected to be of type INET or INET6.
605 * The program type passed in via @type must be suitable for sock_ops
606 * filtering. No further check is performed to assert that.
608 * This function will return %-EPERM if any if an attached program was found
609 * and if it returned != 1 during execution. In all other cases, 0 is returned.
611 int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
612 struct bpf_sock_ops_kern *sock_ops,
613 enum bpf_attach_type type)
615 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
618 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], sock_ops,
620 return ret == 1 ? 0 : -EPERM;
622 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_ops);
624 int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
625 short access, enum bpf_attach_type type)
628 struct bpf_cgroup_dev_ctx ctx = {
629 .access_type = (access << 16) | dev_type,
636 cgrp = task_dfl_cgroup(current);
637 allow = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx,
643 EXPORT_SYMBOL(__cgroup_bpf_check_dev_permission);
645 static const struct bpf_func_proto *
646 cgroup_dev_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
649 case BPF_FUNC_map_lookup_elem:
650 return &bpf_map_lookup_elem_proto;
651 case BPF_FUNC_map_update_elem:
652 return &bpf_map_update_elem_proto;
653 case BPF_FUNC_map_delete_elem:
654 return &bpf_map_delete_elem_proto;
655 case BPF_FUNC_get_current_uid_gid:
656 return &bpf_get_current_uid_gid_proto;
657 case BPF_FUNC_trace_printk:
658 if (capable(CAP_SYS_ADMIN))
659 return bpf_get_trace_printk_proto();
665 static bool cgroup_dev_is_valid_access(int off, int size,
666 enum bpf_access_type type,
667 const struct bpf_prog *prog,
668 struct bpf_insn_access_aux *info)
670 const int size_default = sizeof(__u32);
672 if (type == BPF_WRITE)
675 if (off < 0 || off + size > sizeof(struct bpf_cgroup_dev_ctx))
677 /* The verifier guarantees that size > 0. */
682 case bpf_ctx_range(struct bpf_cgroup_dev_ctx, access_type):
683 bpf_ctx_record_field_size(info, size_default);
684 if (!bpf_ctx_narrow_access_ok(off, size, size_default))
688 if (size != size_default)
695 const struct bpf_prog_ops cg_dev_prog_ops = {
698 const struct bpf_verifier_ops cg_dev_verifier_ops = {
699 .get_func_proto = cgroup_dev_func_proto,
700 .is_valid_access = cgroup_dev_is_valid_access,