1 #ifndef _LINUX_CGROUP_H
2 #define _LINUX_CGROUP_H
6 * Copyright (C) 2003 BULL SA
7 * Copyright (C) 2004-2006 Silicon Graphics, Inc.
11 #include <linux/sched.h>
12 #include <linux/cpumask.h>
13 #include <linux/nodemask.h>
14 #include <linux/rculist.h>
15 #include <linux/cgroupstats.h>
17 #include <linux/seq_file.h>
18 #include <linux/kernfs.h>
19 #include <linux/jump_label.h>
20 #include <linux/types.h>
21 #include <linux/ns_common.h>
22 #include <linux/nsproxy.h>
23 #include <linux/user_namespace.h>
24 #include <linux/refcount.h>
26 #include <linux/cgroup-defs.h>
31 * All weight knobs on the default hierarhcy should use the following min,
32 * default and max values. The default value is the logarithmic center of
33 * MIN and MAX and allows 100x to be expressed in both directions.
35 #define CGROUP_WEIGHT_MIN 1
36 #define CGROUP_WEIGHT_DFL 100
37 #define CGROUP_WEIGHT_MAX 10000
39 /* walk only threadgroup leaders */
40 #define CSS_TASK_ITER_PROCS (1U << 0)
42 /* a css_task_iter should be treated as an opaque object */
43 struct css_task_iter {
44 struct cgroup_subsys *ss;
47 struct list_head *cset_pos;
48 struct list_head *cset_head;
50 struct list_head *task_pos;
51 struct list_head *tasks_head;
52 struct list_head *mg_tasks_head;
54 struct css_set *cur_cset;
55 struct task_struct *cur_task;
56 struct list_head iters_node; /* css_set->task_iters */
59 extern struct cgroup_root cgrp_dfl_root;
60 extern struct css_set init_css_set;
62 #define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys;
63 #include <linux/cgroup_subsys.h>
67 extern struct static_key_true _x ## _cgrp_subsys_enabled_key; \
68 extern struct static_key_true _x ## _cgrp_subsys_on_dfl_key;
69 #include <linux/cgroup_subsys.h>
73 * cgroup_subsys_enabled - fast test on whether a subsys is enabled
74 * @ss: subsystem in question
76 #define cgroup_subsys_enabled(ss) \
77 static_branch_likely(&ss ## _enabled_key)
80 * cgroup_subsys_on_dfl - fast test on whether a subsys is on default hierarchy
81 * @ss: subsystem in question
83 #define cgroup_subsys_on_dfl(ss) \
84 static_branch_likely(&ss ## _on_dfl_key)
86 bool css_has_online_children(struct cgroup_subsys_state *css);
87 struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss);
88 struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup,
89 struct cgroup_subsys *ss);
90 struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
91 struct cgroup_subsys *ss);
93 struct cgroup *cgroup_get_from_path(const char *path);
94 struct cgroup *cgroup_get_from_fd(int fd);
96 int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
97 int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
99 int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
100 int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
101 int cgroup_rm_cftypes(struct cftype *cfts);
102 void cgroup_file_notify(struct cgroup_file *cfile);
104 int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
105 int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry);
106 int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
107 struct pid *pid, struct task_struct *tsk);
109 void cgroup_fork(struct task_struct *p);
110 extern int cgroup_can_fork(struct task_struct *p);
111 extern void cgroup_cancel_fork(struct task_struct *p);
112 extern void cgroup_post_fork(struct task_struct *p);
113 void cgroup_exit(struct task_struct *p);
114 void cgroup_free(struct task_struct *p);
116 int cgroup_init_early(void);
117 int cgroup_init(void);
120 * Iteration helpers and macros.
123 struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos,
124 struct cgroup_subsys_state *parent);
125 struct cgroup_subsys_state *css_next_descendant_pre(struct cgroup_subsys_state *pos,
126 struct cgroup_subsys_state *css);
127 struct cgroup_subsys_state *css_rightmost_descendant(struct cgroup_subsys_state *pos);
128 struct cgroup_subsys_state *css_next_descendant_post(struct cgroup_subsys_state *pos,
129 struct cgroup_subsys_state *css);
131 struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset,
132 struct cgroup_subsys_state **dst_cssp);
133 struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset,
134 struct cgroup_subsys_state **dst_cssp);
136 void css_task_iter_start(struct cgroup_subsys_state *css, unsigned int flags,
137 struct css_task_iter *it);
138 struct task_struct *css_task_iter_next(struct css_task_iter *it);
139 void css_task_iter_end(struct css_task_iter *it);
142 * css_for_each_child - iterate through children of a css
143 * @pos: the css * to use as the loop cursor
144 * @parent: css whose children to walk
146 * Walk @parent's children. Must be called under rcu_read_lock().
148 * If a subsystem synchronizes ->css_online() and the start of iteration, a
149 * css which finished ->css_online() is guaranteed to be visible in the
150 * future iterations and will stay visible until the last reference is put.
151 * A css which hasn't finished ->css_online() or already finished
152 * ->css_offline() may show up during traversal. It's each subsystem's
153 * responsibility to synchronize against on/offlining.
155 * It is allowed to temporarily drop RCU read lock during iteration. The
156 * caller is responsible for ensuring that @pos remains accessible until
157 * the start of the next iteration by, for example, bumping the css refcnt.
159 #define css_for_each_child(pos, parent) \
160 for ((pos) = css_next_child(NULL, (parent)); (pos); \
161 (pos) = css_next_child((pos), (parent)))
164 * css_for_each_descendant_pre - pre-order walk of a css's descendants
165 * @pos: the css * to use as the loop cursor
166 * @root: css whose descendants to walk
168 * Walk @root's descendants. @root is included in the iteration and the
169 * first node to be visited. Must be called under rcu_read_lock().
171 * If a subsystem synchronizes ->css_online() and the start of iteration, a
172 * css which finished ->css_online() is guaranteed to be visible in the
173 * future iterations and will stay visible until the last reference is put.
174 * A css which hasn't finished ->css_online() or already finished
175 * ->css_offline() may show up during traversal. It's each subsystem's
176 * responsibility to synchronize against on/offlining.
178 * For example, the following guarantees that a descendant can't escape
179 * state updates of its ancestors.
183 * Lock @css's parent and @css;
184 * Inherit state from the parent;
188 * my_update_state(@css)
190 * css_for_each_descendant_pre(@pos, @css) {
193 * Update @css's state;
195 * Verify @pos is alive and inherit state from its parent;
200 * As long as the inheriting step, including checking the parent state, is
201 * enclosed inside @pos locking, double-locking the parent isn't necessary
202 * while inheriting. The state update to the parent is guaranteed to be
203 * visible by walking order and, as long as inheriting operations to the
204 * same @pos are atomic to each other, multiple updates racing each other
205 * still result in the correct state. It's guaranateed that at least one
206 * inheritance happens for any css after the latest update to its parent.
208 * If checking parent's state requires locking the parent, each inheriting
209 * iteration should lock and unlock both @pos->parent and @pos.
211 * Alternatively, a subsystem may choose to use a single global lock to
212 * synchronize ->css_online() and ->css_offline() against tree-walking
215 * It is allowed to temporarily drop RCU read lock during iteration. The
216 * caller is responsible for ensuring that @pos remains accessible until
217 * the start of the next iteration by, for example, bumping the css refcnt.
219 #define css_for_each_descendant_pre(pos, css) \
220 for ((pos) = css_next_descendant_pre(NULL, (css)); (pos); \
221 (pos) = css_next_descendant_pre((pos), (css)))
224 * css_for_each_descendant_post - post-order walk of a css's descendants
225 * @pos: the css * to use as the loop cursor
226 * @css: css whose descendants to walk
228 * Similar to css_for_each_descendant_pre() but performs post-order
229 * traversal instead. @root is included in the iteration and the last
230 * node to be visited.
232 * If a subsystem synchronizes ->css_online() and the start of iteration, a
233 * css which finished ->css_online() is guaranteed to be visible in the
234 * future iterations and will stay visible until the last reference is put.
235 * A css which hasn't finished ->css_online() or already finished
236 * ->css_offline() may show up during traversal. It's each subsystem's
237 * responsibility to synchronize against on/offlining.
239 * Note that the walk visibility guarantee example described in pre-order
240 * walk doesn't apply the same to post-order walks.
242 #define css_for_each_descendant_post(pos, css) \
243 for ((pos) = css_next_descendant_post(NULL, (css)); (pos); \
244 (pos) = css_next_descendant_post((pos), (css)))
247 * cgroup_taskset_for_each - iterate cgroup_taskset
248 * @task: the loop cursor
249 * @dst_css: the destination css
250 * @tset: taskset to iterate
252 * @tset may contain multiple tasks and they may belong to multiple
255 * On the v2 hierarchy, there may be tasks from multiple processes and they
256 * may not share the source or destination csses.
258 * On traditional hierarchies, when there are multiple tasks in @tset, if a
259 * task of a process is in @tset, all tasks of the process are in @tset.
260 * Also, all are guaranteed to share the same source and destination csses.
262 * Iteration is not in any specific order.
264 #define cgroup_taskset_for_each(task, dst_css, tset) \
265 for ((task) = cgroup_taskset_first((tset), &(dst_css)); \
267 (task) = cgroup_taskset_next((tset), &(dst_css)))
270 * cgroup_taskset_for_each_leader - iterate group leaders in a cgroup_taskset
271 * @leader: the loop cursor
272 * @dst_css: the destination css
273 * @tset: taskset to iterate
275 * Iterate threadgroup leaders of @tset. For single-task migrations, @tset
276 * may not contain any.
278 #define cgroup_taskset_for_each_leader(leader, dst_css, tset) \
279 for ((leader) = cgroup_taskset_first((tset), &(dst_css)); \
281 (leader) = cgroup_taskset_next((tset), &(dst_css))) \
282 if ((leader) != (leader)->group_leader) \
291 * css_get - obtain a reference on the specified css
294 * The caller must already have a reference.
296 static inline void css_get(struct cgroup_subsys_state *css)
298 if (!(css->flags & CSS_NO_REF))
299 percpu_ref_get(&css->refcnt);
303 * css_get_many - obtain references on the specified css
305 * @n: number of references to get
307 * The caller must already have a reference.
309 static inline void css_get_many(struct cgroup_subsys_state *css, unsigned int n)
311 if (!(css->flags & CSS_NO_REF))
312 percpu_ref_get_many(&css->refcnt, n);
316 * css_tryget - try to obtain a reference on the specified css
319 * Obtain a reference on @css unless it already has reached zero and is
320 * being released. This function doesn't care whether @css is on or
321 * offline. The caller naturally needs to ensure that @css is accessible
322 * but doesn't have to be holding a reference on it - IOW, RCU protected
323 * access is good enough for this function. Returns %true if a reference
324 * count was successfully obtained; %false otherwise.
326 static inline bool css_tryget(struct cgroup_subsys_state *css)
328 if (!(css->flags & CSS_NO_REF))
329 return percpu_ref_tryget(&css->refcnt);
334 * css_tryget_online - try to obtain a reference on the specified css if online
337 * Obtain a reference on @css if it's online. The caller naturally needs
338 * to ensure that @css is accessible but doesn't have to be holding a
339 * reference on it - IOW, RCU protected access is good enough for this
340 * function. Returns %true if a reference count was successfully obtained;
343 static inline bool css_tryget_online(struct cgroup_subsys_state *css)
345 if (!(css->flags & CSS_NO_REF))
346 return percpu_ref_tryget_live(&css->refcnt);
351 * css_is_dying - test whether the specified css is dying
354 * Test whether @css is in the process of offlining or already offline. In
355 * most cases, ->css_online() and ->css_offline() callbacks should be
356 * enough; however, the actual offline operations are RCU delayed and this
357 * test returns %true also when @css is scheduled to be offlined.
359 * This is useful, for example, when the use case requires synchronous
360 * behavior with respect to cgroup removal. cgroup removal schedules css
361 * offlining but the css can seem alive while the operation is being
362 * delayed. If the delay affects user visible semantics, this test can be
363 * used to resolve the situation.
365 static inline bool css_is_dying(struct cgroup_subsys_state *css)
367 return !(css->flags & CSS_NO_REF) && percpu_ref_is_dying(&css->refcnt);
371 * css_put - put a css reference
374 * Put a reference obtained via css_get() and css_tryget_online().
376 static inline void css_put(struct cgroup_subsys_state *css)
378 if (!(css->flags & CSS_NO_REF))
379 percpu_ref_put(&css->refcnt);
383 * css_put_many - put css references
385 * @n: number of references to put
387 * Put references obtained via css_get() and css_tryget_online().
389 static inline void css_put_many(struct cgroup_subsys_state *css, unsigned int n)
391 if (!(css->flags & CSS_NO_REF))
392 percpu_ref_put_many(&css->refcnt, n);
395 static inline void cgroup_put(struct cgroup *cgrp)
397 css_put(&cgrp->self);
401 * task_css_set_check - obtain a task's css_set with extra access conditions
402 * @task: the task to obtain css_set for
403 * @__c: extra condition expression to be passed to rcu_dereference_check()
405 * A task's css_set is RCU protected, initialized and exited while holding
406 * task_lock(), and can only be modified while holding both cgroup_mutex
407 * and task_lock() while the task is alive. This macro verifies that the
408 * caller is inside proper critical section and returns @task's css_set.
410 * The caller can also specify additional allowed conditions via @__c, such
411 * as locks used during the cgroup_subsys::attach() methods.
413 #ifdef CONFIG_PROVE_RCU
414 extern struct mutex cgroup_mutex;
415 extern spinlock_t css_set_lock;
416 #define task_css_set_check(task, __c) \
417 rcu_dereference_check((task)->cgroups, \
418 lockdep_is_held(&cgroup_mutex) || \
419 lockdep_is_held(&css_set_lock) || \
420 ((task)->flags & PF_EXITING) || (__c))
422 #define task_css_set_check(task, __c) \
423 rcu_dereference((task)->cgroups)
427 * task_css_check - obtain css for (task, subsys) w/ extra access conds
428 * @task: the target task
429 * @subsys_id: the target subsystem ID
430 * @__c: extra condition expression to be passed to rcu_dereference_check()
432 * Return the cgroup_subsys_state for the (@task, @subsys_id) pair. The
433 * synchronization rules are the same as task_css_set_check().
435 #define task_css_check(task, subsys_id, __c) \
436 task_css_set_check((task), (__c))->subsys[(subsys_id)]
439 * task_css_set - obtain a task's css_set
440 * @task: the task to obtain css_set for
442 * See task_css_set_check().
444 static inline struct css_set *task_css_set(struct task_struct *task)
446 return task_css_set_check(task, false);
450 * task_css - obtain css for (task, subsys)
451 * @task: the target task
452 * @subsys_id: the target subsystem ID
454 * See task_css_check().
456 static inline struct cgroup_subsys_state *task_css(struct task_struct *task,
459 return task_css_check(task, subsys_id, false);
463 * task_get_css - find and get the css for (task, subsys)
464 * @task: the target task
465 * @subsys_id: the target subsystem ID
467 * Find the css for the (@task, @subsys_id) combination, increment a
468 * reference on and return it. This function is guaranteed to return a
471 static inline struct cgroup_subsys_state *
472 task_get_css(struct task_struct *task, int subsys_id)
474 struct cgroup_subsys_state *css;
478 css = task_css(task, subsys_id);
479 if (likely(css_tryget_online(css)))
488 * task_css_is_root - test whether a task belongs to the root css
489 * @task: the target task
490 * @subsys_id: the target subsystem ID
492 * Test whether @task belongs to the root css on the specified subsystem.
493 * May be invoked in any context.
495 static inline bool task_css_is_root(struct task_struct *task, int subsys_id)
497 return task_css_check(task, subsys_id, true) ==
498 init_css_set.subsys[subsys_id];
501 static inline struct cgroup *task_cgroup(struct task_struct *task,
504 return task_css(task, subsys_id)->cgroup;
508 * cgroup_is_descendant - test ancestry
509 * @cgrp: the cgroup to be tested
510 * @ancestor: possible ancestor of @cgrp
512 * Test whether @cgrp is a descendant of @ancestor. It also returns %true
513 * if @cgrp == @ancestor. This function is safe to call as long as @cgrp
514 * and @ancestor are accessible.
516 static inline bool cgroup_is_descendant(struct cgroup *cgrp,
517 struct cgroup *ancestor)
519 if (cgrp->root != ancestor->root || cgrp->level < ancestor->level)
521 return cgrp->ancestor_ids[ancestor->level] == ancestor->id;
525 * task_under_cgroup_hierarchy - test task's membership of cgroup ancestry
526 * @task: the task to be tested
527 * @ancestor: possible ancestor of @task's cgroup
529 * Tests whether @task's default cgroup hierarchy is a descendant of @ancestor.
530 * It follows all the same rules as cgroup_is_descendant, and only applies
531 * to the default hierarchy.
533 static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
534 struct cgroup *ancestor)
536 struct css_set *cset = task_css_set(task);
538 return cgroup_is_descendant(cset->dfl_cgrp, ancestor);
541 /* no synchronization, the result can only be used as a hint */
542 static inline bool cgroup_is_populated(struct cgroup *cgrp)
544 return cgrp->nr_populated_csets + cgrp->nr_populated_domain_children +
545 cgrp->nr_populated_threaded_children;
548 /* returns ino associated with a cgroup */
549 static inline ino_t cgroup_ino(struct cgroup *cgrp)
551 return cgrp->kn->ino;
554 /* cft/css accessors for cftype->write() operation */
555 static inline struct cftype *of_cft(struct kernfs_open_file *of)
560 struct cgroup_subsys_state *of_css(struct kernfs_open_file *of);
562 /* cft/css accessors for cftype->seq_*() operations */
563 static inline struct cftype *seq_cft(struct seq_file *seq)
565 return of_cft(seq->private);
568 static inline struct cgroup_subsys_state *seq_css(struct seq_file *seq)
570 return of_css(seq->private);
574 * Name / path handling functions. All are thin wrappers around the kernfs
575 * counterparts and can be called under any context.
578 static inline int cgroup_name(struct cgroup *cgrp, char *buf, size_t buflen)
580 return kernfs_name(cgrp->kn, buf, buflen);
583 static inline int cgroup_path(struct cgroup *cgrp, char *buf, size_t buflen)
585 return kernfs_path(cgrp->kn, buf, buflen);
588 static inline void pr_cont_cgroup_name(struct cgroup *cgrp)
590 pr_cont_kernfs_name(cgrp->kn);
593 static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
595 pr_cont_kernfs_path(cgrp->kn);
598 static inline void cgroup_init_kthreadd(void)
601 * kthreadd is inherited by all kthreads, keep it in the root so
602 * that the new kthreads are guaranteed to stay in the root until
603 * initialization is finished.
605 current->no_cgroup_migration = 1;
608 static inline void cgroup_kthread_ready(void)
611 * This kthread finished initialization. The creator should have
612 * set PF_NO_SETAFFINITY if this kthread should stay in the root.
614 current->no_cgroup_migration = 0;
617 #else /* !CONFIG_CGROUPS */
619 struct cgroup_subsys_state;
622 static inline void css_put(struct cgroup_subsys_state *css) {}
623 static inline int cgroup_attach_task_all(struct task_struct *from,
624 struct task_struct *t) { return 0; }
625 static inline int cgroupstats_build(struct cgroupstats *stats,
626 struct dentry *dentry) { return -EINVAL; }
628 static inline void cgroup_fork(struct task_struct *p) {}
629 static inline int cgroup_can_fork(struct task_struct *p) { return 0; }
630 static inline void cgroup_cancel_fork(struct task_struct *p) {}
631 static inline void cgroup_post_fork(struct task_struct *p) {}
632 static inline void cgroup_exit(struct task_struct *p) {}
633 static inline void cgroup_free(struct task_struct *p) {}
635 static inline int cgroup_init_early(void) { return 0; }
636 static inline int cgroup_init(void) { return 0; }
637 static inline void cgroup_init_kthreadd(void) {}
638 static inline void cgroup_kthread_ready(void) {}
640 static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
641 struct cgroup *ancestor)
645 #endif /* !CONFIG_CGROUPS */
648 * sock->sk_cgrp_data handling. For more info, see sock_cgroup_data
649 * definition in cgroup-defs.h.
651 #ifdef CONFIG_SOCK_CGROUP_DATA
653 #if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
654 extern spinlock_t cgroup_sk_update_lock;
657 void cgroup_sk_alloc_disable(void);
658 void cgroup_sk_alloc(struct sock_cgroup_data *skcd);
659 void cgroup_sk_free(struct sock_cgroup_data *skcd);
661 static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd)
663 #if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
667 * @skcd->val is 64bit but the following is safe on 32bit too as we
668 * just need the lower ulong to be written and read atomically.
670 v = READ_ONCE(skcd->val);
673 return &cgrp_dfl_root.cgrp;
675 return (struct cgroup *)(unsigned long)v ?: &cgrp_dfl_root.cgrp;
677 return (struct cgroup *)(unsigned long)skcd->val;
681 #else /* CONFIG_CGROUP_DATA */
683 static inline void cgroup_sk_alloc(struct sock_cgroup_data *skcd) {}
684 static inline void cgroup_sk_free(struct sock_cgroup_data *skcd) {}
686 #endif /* CONFIG_CGROUP_DATA */
688 struct cgroup_namespace {
691 struct user_namespace *user_ns;
692 struct ucounts *ucounts;
693 struct css_set *root_cset;
696 extern struct cgroup_namespace init_cgroup_ns;
698 #ifdef CONFIG_CGROUPS
700 void free_cgroup_ns(struct cgroup_namespace *ns);
702 struct cgroup_namespace *copy_cgroup_ns(unsigned long flags,
703 struct user_namespace *user_ns,
704 struct cgroup_namespace *old_ns);
706 int cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen,
707 struct cgroup_namespace *ns);
709 #else /* !CONFIG_CGROUPS */
711 static inline void free_cgroup_ns(struct cgroup_namespace *ns) { }
712 static inline struct cgroup_namespace *
713 copy_cgroup_ns(unsigned long flags, struct user_namespace *user_ns,
714 struct cgroup_namespace *old_ns)
719 #endif /* !CONFIG_CGROUPS */
721 static inline void get_cgroup_ns(struct cgroup_namespace *ns)
724 refcount_inc(&ns->count);
727 static inline void put_cgroup_ns(struct cgroup_namespace *ns)
729 if (ns && refcount_dec_and_test(&ns->count))
733 #endif /* _LINUX_CGROUP_H */