1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __CGROUP_INTERNAL_H
3 #define __CGROUP_INTERNAL_H
5 #include <linux/cgroup.h>
6 #include <linux/kernfs.h>
7 #include <linux/workqueue.h>
8 #include <linux/list.h>
9 #include <linux/refcount.h>
10 #include <linux/fs_context.h>
12 #define TRACE_CGROUP_PATH_LEN 1024
13 extern spinlock_t trace_cgroup_path_lock;
14 extern char trace_cgroup_path[TRACE_CGROUP_PATH_LEN];
15 extern bool cgroup_debug;
16 extern void __init enable_debug_cgroup(void);
19 * cgroup_path() takes a spin lock. It is good practice not to take
20 * spin locks within trace point handlers, as they are mostly hidden
21 * from normal view. As cgroup_path() can take the kernfs_rename_lock
22 * spin lock, it is best to not call that function from the trace event
25 * Note: trace_cgroup_##type##_enabled() is a static branch that will only
26 * be set when the trace event is enabled.
28 #define TRACE_CGROUP_PATH(type, cgrp, ...) \
30 if (trace_cgroup_##type##_enabled()) { \
31 spin_lock(&trace_cgroup_path_lock); \
32 cgroup_path(cgrp, trace_cgroup_path, \
33 TRACE_CGROUP_PATH_LEN); \
34 trace_cgroup_##type(cgrp, trace_cgroup_path, \
36 spin_unlock(&trace_cgroup_path_lock); \
41 * The cgroup filesystem superblock creation/mount context.
43 struct cgroup_fs_context {
44 struct kernfs_fs_context kfc;
45 struct cgroup_root *root;
46 struct cgroup_namespace *ns;
47 unsigned int flags; /* CGRP_ROOT_* flags */
50 bool cpuset_clone_children;
51 bool none; /* User explicitly requested empty subsystem */
52 bool all_ss; /* Seen 'all' option */
53 u16 subsys_mask; /* Selected subsystems */
54 char *name; /* Hierarchy name */
55 char *release_agent; /* Path for release notifications */
58 static inline struct cgroup_fs_context *cgroup_fc2context(struct fs_context *fc)
60 struct kernfs_fs_context *kfc = fc->fs_private;
62 return container_of(kfc, struct cgroup_fs_context, kfc);
66 * A cgroup can be associated with multiple css_sets as different tasks may
67 * belong to different cgroups on different hierarchies. In the other
68 * direction, a css_set is naturally associated with multiple cgroups.
69 * This M:N relationship is represented by the following link structure
70 * which exists for each association and allows traversing the associations
73 struct cgrp_cset_link {
74 /* the cgroup and css_set this link associates */
78 /* list of cgrp_cset_links anchored at cgrp->cset_links */
79 struct list_head cset_link;
81 /* list of cgrp_cset_links anchored at css_set->cgrp_links */
82 struct list_head cgrp_link;
85 /* used to track tasks and csets during migration */
86 struct cgroup_taskset {
87 /* the src and dst cset list running through cset->mg_node */
88 struct list_head src_csets;
89 struct list_head dst_csets;
91 /* the number of tasks in the set */
94 /* the subsys currently being processed */
98 * Fields for cgroup_taskset_*() iteration.
100 * Before migration is committed, the target migration tasks are on
101 * ->mg_tasks of the csets on ->src_csets. After, on ->mg_tasks of
102 * the csets on ->dst_csets. ->csets point to either ->src_csets
103 * or ->dst_csets depending on whether migration is committed.
105 * ->cur_csets and ->cur_task point to the current task position
108 struct list_head *csets;
109 struct css_set *cur_cset;
110 struct task_struct *cur_task;
113 /* migration context also tracks preloading */
114 struct cgroup_mgctx {
116 * Preloaded source and destination csets. Used to guarantee
117 * atomic success or failure on actual migration.
119 struct list_head preloaded_src_csets;
120 struct list_head preloaded_dst_csets;
122 /* tasks and csets to migrate */
123 struct cgroup_taskset tset;
125 /* subsystems affected by migration */
129 #define CGROUP_TASKSET_INIT(tset) \
131 .src_csets = LIST_HEAD_INIT(tset.src_csets), \
132 .dst_csets = LIST_HEAD_INIT(tset.dst_csets), \
133 .csets = &tset.src_csets, \
136 #define CGROUP_MGCTX_INIT(name) \
138 LIST_HEAD_INIT(name.preloaded_src_csets), \
139 LIST_HEAD_INIT(name.preloaded_dst_csets), \
140 CGROUP_TASKSET_INIT(name.tset), \
143 #define DEFINE_CGROUP_MGCTX(name) \
144 struct cgroup_mgctx name = CGROUP_MGCTX_INIT(name)
146 extern struct mutex cgroup_mutex;
147 extern spinlock_t css_set_lock;
148 extern struct cgroup_subsys *cgroup_subsys[];
149 extern struct list_head cgroup_roots;
150 extern struct file_system_type cgroup_fs_type;
152 /* iterate across the hierarchies */
153 #define for_each_root(root) \
154 list_for_each_entry((root), &cgroup_roots, root_list)
157 * for_each_subsys - iterate all enabled cgroup subsystems
158 * @ss: the iteration cursor
159 * @ssid: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end
161 #define for_each_subsys(ss, ssid) \
162 for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT && \
163 (((ss) = cgroup_subsys[ssid]) || true); (ssid)++)
165 static inline bool cgroup_is_dead(const struct cgroup *cgrp)
167 return !(cgrp->self.flags & CSS_ONLINE);
170 static inline bool notify_on_release(const struct cgroup *cgrp)
172 return test_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
175 void put_css_set_locked(struct css_set *cset);
177 static inline void put_css_set(struct css_set *cset)
182 * Ensure that the refcount doesn't hit zero while any readers
183 * can see it. Similar to atomic_dec_and_lock(), but for an
186 if (refcount_dec_not_one(&cset->refcount))
189 spin_lock_irqsave(&css_set_lock, flags);
190 put_css_set_locked(cset);
191 spin_unlock_irqrestore(&css_set_lock, flags);
195 * refcounted get/put for css_set objects
197 static inline void get_css_set(struct css_set *cset)
199 refcount_inc(&cset->refcount);
202 bool cgroup_ssid_enabled(int ssid);
203 bool cgroup_on_dfl(const struct cgroup *cgrp);
204 bool cgroup_is_thread_root(struct cgroup *cgrp);
205 bool cgroup_is_threaded(struct cgroup *cgrp);
207 struct cgroup_root *cgroup_root_from_kf(struct kernfs_root *kf_root);
208 struct cgroup *task_cgroup_from_root(struct task_struct *task,
209 struct cgroup_root *root);
210 struct cgroup *cgroup_kn_lock_live(struct kernfs_node *kn, bool drain_offline);
211 void cgroup_kn_unlock(struct kernfs_node *kn);
212 int cgroup_path_ns_locked(struct cgroup *cgrp, char *buf, size_t buflen,
213 struct cgroup_namespace *ns);
215 void cgroup_free_root(struct cgroup_root *root);
216 void init_cgroup_root(struct cgroup_fs_context *ctx);
217 int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask);
218 int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask);
219 int cgroup_do_get_tree(struct fs_context *fc);
221 int cgroup_migrate_vet_dst(struct cgroup *dst_cgrp);
222 void cgroup_migrate_finish(struct cgroup_mgctx *mgctx);
223 void cgroup_migrate_add_src(struct css_set *src_cset, struct cgroup *dst_cgrp,
224 struct cgroup_mgctx *mgctx);
225 int cgroup_migrate_prepare_dst(struct cgroup_mgctx *mgctx);
226 int cgroup_migrate(struct task_struct *leader, bool threadgroup,
227 struct cgroup_mgctx *mgctx);
229 int cgroup_attach_task(struct cgroup *dst_cgrp, struct task_struct *leader,
231 struct task_struct *cgroup_procs_write_start(char *buf, bool threadgroup)
232 __acquires(&cgroup_threadgroup_rwsem);
233 void cgroup_procs_write_finish(struct task_struct *task)
234 __releases(&cgroup_threadgroup_rwsem);
236 void cgroup_lock_and_drain_offline(struct cgroup *cgrp);
238 int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name, umode_t mode);
239 int cgroup_rmdir(struct kernfs_node *kn);
240 int cgroup_show_path(struct seq_file *sf, struct kernfs_node *kf_node,
241 struct kernfs_root *kf_root);
243 int __cgroup_task_count(const struct cgroup *cgrp);
244 int cgroup_task_count(const struct cgroup *cgrp);
249 int cgroup_rstat_init(struct cgroup *cgrp);
250 void cgroup_rstat_exit(struct cgroup *cgrp);
251 void cgroup_rstat_boot(void);
252 void cgroup_base_stat_cputime_show(struct seq_file *seq);
257 extern const struct proc_ns_operations cgroupns_operations;
262 extern struct cftype cgroup1_base_files[];
263 extern struct kernfs_syscall_ops cgroup1_kf_syscall_ops;
264 extern const struct fs_parameter_description cgroup1_fs_parameters;
266 int proc_cgroupstats_show(struct seq_file *m, void *v);
267 bool cgroup1_ssid_disabled(int ssid);
268 void cgroup1_pidlist_destroy_all(struct cgroup *cgrp);
269 void cgroup1_release_agent(struct work_struct *work);
270 void cgroup1_check_for_release(struct cgroup *cgrp);
271 int cgroup1_parse_param(struct fs_context *fc, struct fs_parameter *param);
272 int cgroup1_get_tree(struct fs_context *fc);
273 int cgroup1_reconfigure(struct fs_context *ctx);
275 #endif /* __CGROUP_INTERNAL_H */