1 /* SPDX-License-Identifier: GPL-2.0 */
5 * Common Block IO controller cgroup interface
7 * Based on ideas and code from CFQ, CFS and BFQ:
8 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
10 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
11 * Paolo Valente <paolo.valente@unimore.it>
13 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
14 * Nauman Rafique <nauman@google.com>
17 #include <linux/cgroup.h>
18 #include <linux/percpu.h>
19 #include <linux/percpu_counter.h>
20 #include <linux/u64_stats_sync.h>
21 #include <linux/seq_file.h>
22 #include <linux/radix-tree.h>
23 #include <linux/blkdev.h>
24 #include <linux/atomic.h>
25 #include <linux/kthread.h>
28 /* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */
29 #define BLKG_STAT_CPU_BATCH (INT_MAX / 2)
31 /* Max limits for throttle policy */
32 #define THROTL_IOPS_MAX UINT_MAX
34 #ifdef CONFIG_BLK_CGROUP
36 enum blkg_iostat_type {
47 struct cgroup_subsys_state css;
50 struct radix_tree_root blkg_tree;
51 struct blkcg_gq __rcu *blkg_hint;
52 struct hlist_head blkg_list;
54 struct blkcg_policy_data *cpd[BLKCG_MAX_POLS];
56 struct list_head all_blkcgs_node;
57 #ifdef CONFIG_CGROUP_WRITEBACK
58 struct list_head cgwb_list;
59 refcount_t cgwb_refcnt;
64 u64 bytes[BLKG_IOSTAT_NR];
65 u64 ios[BLKG_IOSTAT_NR];
68 struct blkg_iostat_set {
69 struct u64_stats_sync sync;
70 struct blkg_iostat cur;
71 struct blkg_iostat last;
75 * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
76 * request_queue (q). This is used by blkcg policies which need to track
77 * information per blkcg - q pair.
79 * There can be multiple active blkcg policies and each blkg:policy pair is
80 * represented by a blkg_policy_data which is allocated and freed by each
81 * policy's pd_alloc/free_fn() methods. A policy can allocate private data
82 * area by allocating larger data structure which embeds blkg_policy_data
85 struct blkg_policy_data {
86 /* the blkg and policy id this per-policy data belongs to */
87 struct blkcg_gq *blkg;
92 * Policies that need to keep per-blkcg data which is independent from any
93 * request_queue associated to it should implement cpd_alloc/free_fn()
94 * methods. A policy can allocate private data area by allocating larger
95 * data structure which embeds blkcg_policy_data at the beginning.
96 * cpd_init() is invoked to let each policy handle per-blkcg data.
98 struct blkcg_policy_data {
99 /* the blkcg and policy id this per-policy data belongs to */
104 /* association between a blk cgroup and a request queue */
106 /* Pointer to the associated request_queue */
107 struct request_queue *q;
108 struct list_head q_node;
109 struct hlist_node blkcg_node;
113 * Each blkg gets congested separately and the congestion state is
114 * propagated to the matching bdi_writeback_congested.
116 struct bdi_writeback_congested *wb_congested;
118 /* all non-root blkcg_gq's are guaranteed to have access to parent */
119 struct blkcg_gq *parent;
121 /* reference count */
122 struct percpu_ref refcnt;
124 /* is this blkg online? protected by both blkcg and q locks */
127 struct blkg_iostat_set __percpu *iostat_cpu;
128 struct blkg_iostat_set iostat;
130 struct blkg_policy_data *pd[BLKCG_MAX_POLS];
132 spinlock_t async_bio_lock;
133 struct bio_list async_bios;
134 struct work_struct async_bio_work;
137 atomic64_t delay_nsec;
138 atomic64_t delay_start;
142 struct rcu_head rcu_head;
145 typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp);
146 typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd);
147 typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd);
148 typedef void (blkcg_pol_bind_cpd_fn)(struct blkcg_policy_data *cpd);
149 typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp,
150 struct request_queue *q, struct blkcg *blkcg);
151 typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd);
152 typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
153 typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
154 typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
155 typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
156 typedef size_t (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd, char *buf,
159 struct blkcg_policy {
161 /* cgroup files for the policy */
162 struct cftype *dfl_cftypes;
163 struct cftype *legacy_cftypes;
166 blkcg_pol_alloc_cpd_fn *cpd_alloc_fn;
167 blkcg_pol_init_cpd_fn *cpd_init_fn;
168 blkcg_pol_free_cpd_fn *cpd_free_fn;
169 blkcg_pol_bind_cpd_fn *cpd_bind_fn;
171 blkcg_pol_alloc_pd_fn *pd_alloc_fn;
172 blkcg_pol_init_pd_fn *pd_init_fn;
173 blkcg_pol_online_pd_fn *pd_online_fn;
174 blkcg_pol_offline_pd_fn *pd_offline_fn;
175 blkcg_pol_free_pd_fn *pd_free_fn;
176 blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
177 blkcg_pol_stat_pd_fn *pd_stat_fn;
180 extern struct blkcg blkcg_root;
181 extern struct cgroup_subsys_state * const blkcg_root_css;
182 extern bool blkcg_debug_stats;
184 struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
185 struct request_queue *q, bool update_hint);
186 struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
187 struct request_queue *q);
188 struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
189 struct request_queue *q);
190 int blkcg_init_queue(struct request_queue *q);
191 void blkcg_drain_queue(struct request_queue *q);
192 void blkcg_exit_queue(struct request_queue *q);
194 /* Blkio controller policy registration */
195 int blkcg_policy_register(struct blkcg_policy *pol);
196 void blkcg_policy_unregister(struct blkcg_policy *pol);
197 int blkcg_activate_policy(struct request_queue *q,
198 const struct blkcg_policy *pol);
199 void blkcg_deactivate_policy(struct request_queue *q,
200 const struct blkcg_policy *pol);
202 const char *blkg_dev_name(struct blkcg_gq *blkg);
203 void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
204 u64 (*prfill)(struct seq_file *,
205 struct blkg_policy_data *, int),
206 const struct blkcg_policy *pol, int data,
208 u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
210 struct blkg_conf_ctx {
211 struct gendisk *disk;
212 struct blkcg_gq *blkg;
216 struct gendisk *blkcg_conf_get_disk(char **inputp);
217 int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
218 char *input, struct blkg_conf_ctx *ctx);
219 void blkg_conf_finish(struct blkg_conf_ctx *ctx);
222 * blkcg_css - find the current css
224 * Find the css associated with either the kthread or the current task.
225 * This may return a dying css, so it is up to the caller to use tryget logic
226 * to confirm it is alive and well.
228 static inline struct cgroup_subsys_state *blkcg_css(void)
230 struct cgroup_subsys_state *css;
232 css = kthread_blkcg();
235 return task_css(current, io_cgrp_id);
238 static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
240 return css ? container_of(css, struct blkcg, css) : NULL;
244 * __bio_blkcg - internal, inconsistent version to get blkcg
247 * This function is inconsistent and consequently is dangerous to use. The
248 * first part of the function returns a blkcg where a reference is owned by the
249 * bio. This means it does not need to be rcu protected as it cannot go away
250 * with the bio owning a reference to it. However, the latter potentially gets
251 * it from task_css(). This can race against task migration and the cgroup
252 * dying. It is also semantically different as it must be called rcu protected
253 * and is susceptible to failure when trying to get a reference to it.
254 * Therefore, it is not ok to assume that *_get() will always succeed on the
255 * blkcg returned here.
257 static inline struct blkcg *__bio_blkcg(struct bio *bio)
259 if (bio && bio->bi_blkg)
260 return bio->bi_blkg->blkcg;
261 return css_to_blkcg(blkcg_css());
265 * bio_blkcg - grab the blkcg associated with a bio
268 * This returns the blkcg associated with a bio, %NULL if not associated.
269 * Callers are expected to either handle %NULL or know association has been
270 * done prior to calling this.
272 static inline struct blkcg *bio_blkcg(struct bio *bio)
274 if (bio && bio->bi_blkg)
275 return bio->bi_blkg->blkcg;
279 static inline bool blk_cgroup_congested(void)
281 struct cgroup_subsys_state *css;
285 css = kthread_blkcg();
287 css = task_css(current, io_cgrp_id);
289 if (atomic_read(&css->cgroup->congestion_count)) {
300 * bio_issue_as_root_blkg - see if this bio needs to be issued as root blkg
301 * @return: true if this bio needs to be submitted with the root blkg context.
303 * In order to avoid priority inversions we sometimes need to issue a bio as if
304 * it were attached to the root blkg, and then backcharge to the actual owning
305 * blkg. The idea is we do bio_blkcg() to look up the actual context for the
306 * bio and attach the appropriate blkg to the bio. Then we call this helper and
307 * if it is true run with the root blkg for that queue and then do any
308 * backcharging to the originating cgroup once the io is complete.
310 static inline bool bio_issue_as_root_blkg(struct bio *bio)
312 return (bio->bi_opf & (REQ_META | REQ_SWAP)) != 0;
316 * blkcg_parent - get the parent of a blkcg
317 * @blkcg: blkcg of interest
319 * Return the parent blkcg of @blkcg. Can be called anytime.
321 static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
323 return css_to_blkcg(blkcg->css.parent);
327 * __blkg_lookup - internal version of blkg_lookup()
328 * @blkcg: blkcg of interest
329 * @q: request_queue of interest
330 * @update_hint: whether to update lookup hint with the result or not
332 * This is internal version and shouldn't be used by policy
333 * implementations. Looks up blkgs for the @blkcg - @q pair regardless of
334 * @q's bypass state. If @update_hint is %true, the caller should be
335 * holding @q->queue_lock and lookup hint is updated on success.
337 static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
338 struct request_queue *q,
341 struct blkcg_gq *blkg;
343 if (blkcg == &blkcg_root)
346 blkg = rcu_dereference(blkcg->blkg_hint);
347 if (blkg && blkg->q == q)
350 return blkg_lookup_slowpath(blkcg, q, update_hint);
354 * blkg_lookup - lookup blkg for the specified blkcg - q pair
355 * @blkcg: blkcg of interest
356 * @q: request_queue of interest
358 * Lookup blkg for the @blkcg - @q pair. This function should be called
359 * under RCU read lock.
361 static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
362 struct request_queue *q)
364 WARN_ON_ONCE(!rcu_read_lock_held());
365 return __blkg_lookup(blkcg, q, false);
369 * blk_queue_root_blkg - return blkg for the (blkcg_root, @q) pair
370 * @q: request_queue of interest
372 * Lookup blkg for @q at the root level. See also blkg_lookup().
374 static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q)
380 * blkg_to_pdata - get policy private data
381 * @blkg: blkg of interest
382 * @pol: policy of interest
384 * Return pointer to private data associated with the @blkg-@pol pair.
386 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
387 struct blkcg_policy *pol)
389 return blkg ? blkg->pd[pol->plid] : NULL;
392 static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
393 struct blkcg_policy *pol)
395 return blkcg ? blkcg->cpd[pol->plid] : NULL;
399 * pdata_to_blkg - get blkg associated with policy private data
400 * @pd: policy private data of interest
402 * @pd is policy private data. Determine the blkg it's associated with.
404 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
406 return pd ? pd->blkg : NULL;
409 static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd)
411 return cpd ? cpd->blkcg : NULL;
414 extern void blkcg_destroy_blkgs(struct blkcg *blkcg);
416 #ifdef CONFIG_CGROUP_WRITEBACK
419 * blkcg_cgwb_get - get a reference for blkcg->cgwb_list
420 * @blkcg: blkcg of interest
422 * This is used to track the number of active wb's related to a blkcg.
424 static inline void blkcg_cgwb_get(struct blkcg *blkcg)
426 refcount_inc(&blkcg->cgwb_refcnt);
430 * blkcg_cgwb_put - put a reference for @blkcg->cgwb_list
431 * @blkcg: blkcg of interest
433 * This is used to track the number of active wb's related to a blkcg.
434 * When this count goes to zero, all active wb has finished so the
435 * blkcg can continue destruction by calling blkcg_destroy_blkgs().
436 * This work may occur in cgwb_release_workfn() on the cgwb_release
439 static inline void blkcg_cgwb_put(struct blkcg *blkcg)
441 if (refcount_dec_and_test(&blkcg->cgwb_refcnt))
442 blkcg_destroy_blkgs(blkcg);
447 static inline void blkcg_cgwb_get(struct blkcg *blkcg) { }
449 static inline void blkcg_cgwb_put(struct blkcg *blkcg)
451 /* wb isn't being accounted, so trigger destruction right away */
452 blkcg_destroy_blkgs(blkcg);
458 * blkg_path - format cgroup path of blkg
459 * @blkg: blkg of interest
460 * @buf: target buffer
461 * @buflen: target buffer length
463 * Format the path of the cgroup of @blkg into @buf.
465 static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
467 return cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
471 * blkg_get - get a blkg reference
474 * The caller should be holding an existing reference.
476 static inline void blkg_get(struct blkcg_gq *blkg)
478 percpu_ref_get(&blkg->refcnt);
482 * blkg_tryget - try and get a blkg reference
485 * This is for use when doing an RCU lookup of the blkg. We may be in the midst
486 * of freeing this blkg, so we can only use it if the refcnt is not zero.
488 static inline bool blkg_tryget(struct blkcg_gq *blkg)
490 return blkg && percpu_ref_tryget(&blkg->refcnt);
494 * blkg_tryget_closest - try and get a blkg ref on the closet blkg
497 * This needs to be called rcu protected. As the failure mode here is to walk
498 * up the blkg tree, this ensure that the blkg->parent pointers are always
499 * valid. This returns the blkg that it ended up taking a reference on or %NULL
500 * if no reference was taken.
502 static inline struct blkcg_gq *blkg_tryget_closest(struct blkcg_gq *blkg)
504 struct blkcg_gq *ret_blkg = NULL;
506 WARN_ON_ONCE(!rcu_read_lock_held());
509 if (blkg_tryget(blkg)) {
520 * blkg_put - put a blkg reference
523 static inline void blkg_put(struct blkcg_gq *blkg)
525 percpu_ref_put(&blkg->refcnt);
529 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
530 * @d_blkg: loop cursor pointing to the current descendant
531 * @pos_css: used for iteration
532 * @p_blkg: target blkg to walk descendants of
534 * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU
535 * read locked. If called under either blkcg or queue lock, the iteration
536 * is guaranteed to include all and only online blkgs. The caller may
537 * update @pos_css by calling css_rightmost_descendant() to skip subtree.
538 * @p_blkg is included in the iteration and the first node to be visited.
540 #define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \
541 css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \
542 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
543 (p_blkg)->q, false)))
546 * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
547 * @d_blkg: loop cursor pointing to the current descendant
548 * @pos_css: used for iteration
549 * @p_blkg: target blkg to walk descendants of
551 * Similar to blkg_for_each_descendant_pre() but performs post-order
552 * traversal instead. Synchronization rules are the same. @p_blkg is
553 * included in the iteration and the last node to be visited.
555 #define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \
556 css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \
557 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
558 (p_blkg)->q, false)))
560 #ifdef CONFIG_BLK_DEV_THROTTLING
561 extern bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
564 static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
565 struct bio *bio) { return false; }
568 bool __blkcg_punt_bio_submit(struct bio *bio);
570 static inline bool blkcg_punt_bio_submit(struct bio *bio)
572 if (bio->bi_opf & REQ_CGROUP_PUNT)
573 return __blkcg_punt_bio_submit(bio);
578 static inline void blkcg_bio_issue_init(struct bio *bio)
580 bio_issue_init(&bio->bi_issue, bio_sectors(bio));
583 static inline bool blkcg_bio_issue_check(struct request_queue *q,
586 struct blkcg_gq *blkg;
592 char b[BDEVNAME_SIZE];
595 "no blkg associated for bio on block-device: %s\n",
596 bio_devname(bio, b));
597 bio_associate_blkg(bio);
602 throtl = blk_throtl_bio(q, blkg, bio);
605 struct blkg_iostat_set *bis;
608 if (op_is_discard(bio->bi_opf))
609 rwd = BLKG_IOSTAT_DISCARD;
610 else if (op_is_write(bio->bi_opf))
611 rwd = BLKG_IOSTAT_WRITE;
613 rwd = BLKG_IOSTAT_READ;
616 bis = per_cpu_ptr(blkg->iostat_cpu, cpu);
617 u64_stats_update_begin(&bis->sync);
620 * If the bio is flagged with BIO_QUEUE_ENTERED it means this
621 * is a split bio and we would have already accounted for the
624 if (!bio_flagged(bio, BIO_QUEUE_ENTERED))
625 bis->cur.bytes[rwd] += bio->bi_iter.bi_size;
628 u64_stats_update_end(&bis->sync);
629 if (cgroup_subsys_on_dfl(io_cgrp_subsys))
630 cgroup_rstat_updated(blkg->blkcg->css.cgroup, cpu);
634 blkcg_bio_issue_init(bio);
640 static inline void blkcg_use_delay(struct blkcg_gq *blkg)
642 if (atomic_add_return(1, &blkg->use_delay) == 1)
643 atomic_inc(&blkg->blkcg->css.cgroup->congestion_count);
646 static inline int blkcg_unuse_delay(struct blkcg_gq *blkg)
648 int old = atomic_read(&blkg->use_delay);
654 * We do this song and dance because we can race with somebody else
655 * adding or removing delay. If we just did an atomic_dec we'd end up
656 * negative and we'd already be in trouble. We need to subtract 1 and
657 * then check to see if we were the last delay so we can drop the
658 * congestion count on the cgroup.
661 int cur = atomic_cmpxchg(&blkg->use_delay, old, old - 1);
670 atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
674 static inline void blkcg_clear_delay(struct blkcg_gq *blkg)
676 int old = atomic_read(&blkg->use_delay);
679 /* We only want 1 person clearing the congestion count for this blkg. */
681 int cur = atomic_cmpxchg(&blkg->use_delay, old, 0);
683 atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
690 void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta);
691 void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay);
692 void blkcg_maybe_throttle_current(void);
693 #else /* CONFIG_BLK_CGROUP */
698 struct blkg_policy_data {
701 struct blkcg_policy_data {
707 struct blkcg_policy {
710 #define blkcg_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
712 static inline void blkcg_maybe_throttle_current(void) { }
713 static inline bool blk_cgroup_congested(void) { return false; }
717 static inline void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay) { }
719 static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
720 static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q)
722 static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
723 static inline void blkcg_drain_queue(struct request_queue *q) { }
724 static inline void blkcg_exit_queue(struct request_queue *q) { }
725 static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
726 static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
727 static inline int blkcg_activate_policy(struct request_queue *q,
728 const struct blkcg_policy *pol) { return 0; }
729 static inline void blkcg_deactivate_policy(struct request_queue *q,
730 const struct blkcg_policy *pol) { }
732 static inline struct blkcg *__bio_blkcg(struct bio *bio) { return NULL; }
733 static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
735 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
736 struct blkcg_policy *pol) { return NULL; }
737 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
738 static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
739 static inline void blkg_get(struct blkcg_gq *blkg) { }
740 static inline void blkg_put(struct blkcg_gq *blkg) { }
742 static inline bool blkcg_punt_bio_submit(struct bio *bio) { return false; }
743 static inline void blkcg_bio_issue_init(struct bio *bio) { }
744 static inline bool blkcg_bio_issue_check(struct request_queue *q,
745 struct bio *bio) { return true; }
747 #define blk_queue_for_each_rl(rl, q) \
748 for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
750 #endif /* CONFIG_BLOCK */
751 #endif /* CONFIG_BLK_CGROUP */
752 #endif /* _BLK_CGROUP_H */