1 // SPDX-License-Identifier: GPL-2.0
3 * Common Block IO controller cgroup interface
5 * Based on ideas and code from CFQ, CFS and BFQ:
6 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
8 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
9 * Paolo Valente <paolo.valente@unimore.it>
11 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
12 * Nauman Rafique <nauman@google.com>
14 * For policy-specific per-blkcg data:
15 * Copyright (C) 2015 Paolo Valente <paolo.valente@unimore.it>
16 * Arianna Avanzini <avanzini.arianna@gmail.com>
18 #include <linux/ioprio.h>
19 #include <linux/kdev_t.h>
20 #include <linux/module.h>
21 #include <linux/sched/signal.h>
22 #include <linux/err.h>
23 #include <linux/blkdev.h>
24 #include <linux/backing-dev.h>
25 #include <linux/slab.h>
26 #include <linux/genhd.h>
27 #include <linux/delay.h>
28 #include <linux/atomic.h>
29 #include <linux/ctype.h>
30 #include <linux/blk-cgroup.h>
31 #include <linux/tracehook.h>
32 #include <linux/psi.h>
35 #define MAX_KEY_LEN 100
38 * blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation.
39 * blkcg_pol_register_mutex nests outside of it and synchronizes entire
40 * policy [un]register operations including cgroup file additions /
41 * removals. Putting cgroup file registration outside blkcg_pol_mutex
42 * allows grabbing it from cgroup callbacks.
44 static DEFINE_MUTEX(blkcg_pol_register_mutex);
45 static DEFINE_MUTEX(blkcg_pol_mutex);
47 struct blkcg blkcg_root;
48 EXPORT_SYMBOL_GPL(blkcg_root);
50 struct cgroup_subsys_state * const blkcg_root_css = &blkcg_root.css;
51 EXPORT_SYMBOL_GPL(blkcg_root_css);
53 static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
55 static LIST_HEAD(all_blkcgs); /* protected by blkcg_pol_mutex */
57 static bool blkcg_debug_stats = false;
59 static bool blkcg_policy_enabled(struct request_queue *q,
60 const struct blkcg_policy *pol)
62 return pol && test_bit(pol->plid, q->blkcg_pols);
66 * blkg_free - free a blkg
69 * Free @blkg which may be partially allocated.
71 static void blkg_free(struct blkcg_gq *blkg)
78 for (i = 0; i < BLKCG_MAX_POLS; i++)
80 blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
82 blkg_rwstat_exit(&blkg->stat_ios);
83 blkg_rwstat_exit(&blkg->stat_bytes);
84 percpu_ref_exit(&blkg->refcnt);
88 static void __blkg_release(struct rcu_head *rcu)
90 struct blkcg_gq *blkg = container_of(rcu, struct blkcg_gq, rcu_head);
92 /* release the blkcg and parent blkg refs this blkg has been holding */
93 css_put(&blkg->blkcg->css);
95 blkg_put(blkg->parent);
97 wb_congested_put(blkg->wb_congested);
103 * A group is RCU protected, but having an rcu lock does not mean that one
104 * can access all the fields of blkg and assume these are valid. For
105 * example, don't try to follow throtl_data and request queue links.
107 * Having a reference to blkg under an rcu allows accesses to only values
108 * local to groups like group stats and group rate limits.
110 static void blkg_release(struct percpu_ref *ref)
112 struct blkcg_gq *blkg = container_of(ref, struct blkcg_gq, refcnt);
114 call_rcu(&blkg->rcu_head, __blkg_release);
118 * blkg_alloc - allocate a blkg
119 * @blkcg: block cgroup the new blkg is associated with
120 * @q: request_queue the new blkg is associated with
121 * @gfp_mask: allocation mask to use
123 * Allocate a new blkg assocating @blkcg and @q.
125 static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
128 struct blkcg_gq *blkg;
131 /* alloc and init base part */
132 blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node);
136 if (percpu_ref_init(&blkg->refcnt, blkg_release, 0, gfp_mask))
139 if (blkg_rwstat_init(&blkg->stat_bytes, gfp_mask) ||
140 blkg_rwstat_init(&blkg->stat_ios, gfp_mask))
144 INIT_LIST_HEAD(&blkg->q_node);
147 for (i = 0; i < BLKCG_MAX_POLS; i++) {
148 struct blkcg_policy *pol = blkcg_policy[i];
149 struct blkg_policy_data *pd;
151 if (!blkcg_policy_enabled(q, pol))
154 /* alloc per-policy data and attach it to blkg */
155 pd = pol->pd_alloc_fn(gfp_mask, q->node);
171 struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
172 struct request_queue *q, bool update_hint)
174 struct blkcg_gq *blkg;
177 * Hint didn't match. Look up from the radix tree. Note that the
178 * hint can only be updated under queue_lock as otherwise @blkg
179 * could have already been removed from blkg_tree. The caller is
180 * responsible for grabbing queue_lock if @update_hint.
182 blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
183 if (blkg && blkg->q == q) {
185 lockdep_assert_held(&q->queue_lock);
186 rcu_assign_pointer(blkcg->blkg_hint, blkg);
193 EXPORT_SYMBOL_GPL(blkg_lookup_slowpath);
196 * If @new_blkg is %NULL, this function tries to allocate a new one as
197 * necessary using %GFP_NOWAIT. @new_blkg is always consumed on return.
199 static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
200 struct request_queue *q,
201 struct blkcg_gq *new_blkg)
203 struct blkcg_gq *blkg;
204 struct bdi_writeback_congested *wb_congested;
207 WARN_ON_ONCE(!rcu_read_lock_held());
208 lockdep_assert_held(&q->queue_lock);
210 /* request_queue is dying, do not create/recreate a blkg */
211 if (blk_queue_dying(q)) {
216 /* blkg holds a reference to blkcg */
217 if (!css_tryget_online(&blkcg->css)) {
222 wb_congested = wb_congested_get_create(q->backing_dev_info,
224 GFP_NOWAIT | __GFP_NOWARN);
232 new_blkg = blkg_alloc(blkcg, q, GFP_NOWAIT | __GFP_NOWARN);
233 if (unlikely(!new_blkg)) {
235 goto err_put_congested;
239 blkg->wb_congested = wb_congested;
242 if (blkcg_parent(blkcg)) {
243 blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false);
244 if (WARN_ON_ONCE(!blkg->parent)) {
246 goto err_put_congested;
248 blkg_get(blkg->parent);
251 /* invoke per-policy init */
252 for (i = 0; i < BLKCG_MAX_POLS; i++) {
253 struct blkcg_policy *pol = blkcg_policy[i];
255 if (blkg->pd[i] && pol->pd_init_fn)
256 pol->pd_init_fn(blkg->pd[i]);
260 spin_lock(&blkcg->lock);
261 ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg);
263 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
264 list_add(&blkg->q_node, &q->blkg_list);
266 for (i = 0; i < BLKCG_MAX_POLS; i++) {
267 struct blkcg_policy *pol = blkcg_policy[i];
269 if (blkg->pd[i] && pol->pd_online_fn)
270 pol->pd_online_fn(blkg->pd[i]);
274 spin_unlock(&blkcg->lock);
279 /* @blkg failed fully initialized, use the usual release path */
284 wb_congested_put(wb_congested);
286 css_put(&blkcg->css);
293 * __blkg_lookup_create - lookup blkg, try to create one if not there
294 * @blkcg: blkcg of interest
295 * @q: request_queue of interest
297 * Lookup blkg for the @blkcg - @q pair. If it doesn't exist, try to
298 * create one. blkg creation is performed recursively from blkcg_root such
299 * that all non-root blkg's have access to the parent blkg. This function
300 * should be called under RCU read lock and @q->queue_lock.
302 * Returns the blkg or the closest blkg if blkg_create() fails as it walks
305 struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
306 struct request_queue *q)
308 struct blkcg_gq *blkg;
310 WARN_ON_ONCE(!rcu_read_lock_held());
311 lockdep_assert_held(&q->queue_lock);
313 blkg = __blkg_lookup(blkcg, q, true);
318 * Create blkgs walking down from blkcg_root to @blkcg, so that all
319 * non-root blkgs have access to their parents. Returns the closest
320 * blkg to the intended blkg should blkg_create() fail.
323 struct blkcg *pos = blkcg;
324 struct blkcg *parent = blkcg_parent(blkcg);
325 struct blkcg_gq *ret_blkg = q->root_blkg;
328 blkg = __blkg_lookup(parent, q, false);
330 /* remember closest blkg */
335 parent = blkcg_parent(parent);
338 blkg = blkg_create(pos, q, NULL);
347 * blkg_lookup_create - find or create a blkg
348 * @blkcg: target block cgroup
349 * @q: target request_queue
351 * This looks up or creates the blkg representing the unique pair
352 * of the blkcg and the request_queue.
354 struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
355 struct request_queue *q)
357 struct blkcg_gq *blkg = blkg_lookup(blkcg, q);
359 if (unlikely(!blkg)) {
362 spin_lock_irqsave(&q->queue_lock, flags);
363 blkg = __blkg_lookup_create(blkcg, q);
364 spin_unlock_irqrestore(&q->queue_lock, flags);
370 static void blkg_destroy(struct blkcg_gq *blkg)
372 struct blkcg *blkcg = blkg->blkcg;
373 struct blkcg_gq *parent = blkg->parent;
376 lockdep_assert_held(&blkg->q->queue_lock);
377 lockdep_assert_held(&blkcg->lock);
379 /* Something wrong if we are trying to remove same group twice */
380 WARN_ON_ONCE(list_empty(&blkg->q_node));
381 WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
383 for (i = 0; i < BLKCG_MAX_POLS; i++) {
384 struct blkcg_policy *pol = blkcg_policy[i];
386 if (blkg->pd[i] && pol->pd_offline_fn)
387 pol->pd_offline_fn(blkg->pd[i]);
391 blkg_rwstat_add_aux(&parent->stat_bytes, &blkg->stat_bytes);
392 blkg_rwstat_add_aux(&parent->stat_ios, &blkg->stat_ios);
395 blkg->online = false;
397 radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
398 list_del_init(&blkg->q_node);
399 hlist_del_init_rcu(&blkg->blkcg_node);
402 * Both setting lookup hint to and clearing it from @blkg are done
403 * under queue_lock. If it's not pointing to @blkg now, it never
404 * will. Hint assignment itself can race safely.
406 if (rcu_access_pointer(blkcg->blkg_hint) == blkg)
407 rcu_assign_pointer(blkcg->blkg_hint, NULL);
410 * Put the reference taken at the time of creation so that when all
411 * queues are gone, group can be destroyed.
413 percpu_ref_kill(&blkg->refcnt);
417 * blkg_destroy_all - destroy all blkgs associated with a request_queue
418 * @q: request_queue of interest
420 * Destroy all blkgs associated with @q.
422 static void blkg_destroy_all(struct request_queue *q)
424 struct blkcg_gq *blkg, *n;
426 spin_lock_irq(&q->queue_lock);
427 list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
428 struct blkcg *blkcg = blkg->blkcg;
430 spin_lock(&blkcg->lock);
432 spin_unlock(&blkcg->lock);
436 spin_unlock_irq(&q->queue_lock);
439 static int blkcg_reset_stats(struct cgroup_subsys_state *css,
440 struct cftype *cftype, u64 val)
442 struct blkcg *blkcg = css_to_blkcg(css);
443 struct blkcg_gq *blkg;
446 mutex_lock(&blkcg_pol_mutex);
447 spin_lock_irq(&blkcg->lock);
450 * Note that stat reset is racy - it doesn't synchronize against
451 * stat updates. This is a debug feature which shouldn't exist
452 * anyway. If you get hit by a race, retry.
454 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
455 blkg_rwstat_reset(&blkg->stat_bytes);
456 blkg_rwstat_reset(&blkg->stat_ios);
458 for (i = 0; i < BLKCG_MAX_POLS; i++) {
459 struct blkcg_policy *pol = blkcg_policy[i];
461 if (blkg->pd[i] && pol->pd_reset_stats_fn)
462 pol->pd_reset_stats_fn(blkg->pd[i]);
466 spin_unlock_irq(&blkcg->lock);
467 mutex_unlock(&blkcg_pol_mutex);
471 const char *blkg_dev_name(struct blkcg_gq *blkg)
473 /* some drivers (floppy) instantiate a queue w/o disk registered */
474 if (blkg->q->backing_dev_info->dev)
475 return dev_name(blkg->q->backing_dev_info->dev);
480 * blkcg_print_blkgs - helper for printing per-blkg data
481 * @sf: seq_file to print to
482 * @blkcg: blkcg of interest
483 * @prfill: fill function to print out a blkg
484 * @pol: policy in question
485 * @data: data to be passed to @prfill
486 * @show_total: to print out sum of prfill return values or not
488 * This function invokes @prfill on each blkg of @blkcg if pd for the
489 * policy specified by @pol exists. @prfill is invoked with @sf, the
490 * policy data and @data and the matching queue lock held. If @show_total
491 * is %true, the sum of the return values from @prfill is printed with
492 * "Total" label at the end.
494 * This is to be used to construct print functions for
495 * cftype->read_seq_string method.
497 void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
498 u64 (*prfill)(struct seq_file *,
499 struct blkg_policy_data *, int),
500 const struct blkcg_policy *pol, int data,
503 struct blkcg_gq *blkg;
507 hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
508 spin_lock_irq(&blkg->q->queue_lock);
509 if (blkcg_policy_enabled(blkg->q, pol))
510 total += prfill(sf, blkg->pd[pol->plid], data);
511 spin_unlock_irq(&blkg->q->queue_lock);
516 seq_printf(sf, "Total %llu\n", (unsigned long long)total);
518 EXPORT_SYMBOL_GPL(blkcg_print_blkgs);
521 * __blkg_prfill_u64 - prfill helper for a single u64 value
522 * @sf: seq_file to print to
523 * @pd: policy private data of interest
526 * Print @v to @sf for the device assocaited with @pd.
528 u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v)
530 const char *dname = blkg_dev_name(pd->blkg);
535 seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v);
538 EXPORT_SYMBOL_GPL(__blkg_prfill_u64);
541 * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat
542 * @sf: seq_file to print to
543 * @pd: policy private data of interest
544 * @rwstat: rwstat to print
546 * Print @rwstat to @sf for the device assocaited with @pd.
548 u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
549 const struct blkg_rwstat_sample *rwstat)
551 static const char *rwstr[] = {
552 [BLKG_RWSTAT_READ] = "Read",
553 [BLKG_RWSTAT_WRITE] = "Write",
554 [BLKG_RWSTAT_SYNC] = "Sync",
555 [BLKG_RWSTAT_ASYNC] = "Async",
556 [BLKG_RWSTAT_DISCARD] = "Discard",
558 const char *dname = blkg_dev_name(pd->blkg);
565 for (i = 0; i < BLKG_RWSTAT_NR; i++)
566 seq_printf(sf, "%s %s %llu\n", dname, rwstr[i],
569 v = rwstat->cnt[BLKG_RWSTAT_READ] +
570 rwstat->cnt[BLKG_RWSTAT_WRITE] +
571 rwstat->cnt[BLKG_RWSTAT_DISCARD];
572 seq_printf(sf, "%s Total %llu\n", dname, v);
575 EXPORT_SYMBOL_GPL(__blkg_prfill_rwstat);
578 * blkg_prfill_rwstat - prfill callback for blkg_rwstat
579 * @sf: seq_file to print to
580 * @pd: policy private data of interest
581 * @off: offset to the blkg_rwstat in @pd
583 * prfill callback for printing a blkg_rwstat.
585 u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
588 struct blkg_rwstat_sample rwstat = { };
590 blkg_rwstat_read((void *)pd + off, &rwstat);
591 return __blkg_prfill_rwstat(sf, pd, &rwstat);
593 EXPORT_SYMBOL_GPL(blkg_prfill_rwstat);
595 static u64 blkg_prfill_rwstat_field(struct seq_file *sf,
596 struct blkg_policy_data *pd, int off)
598 struct blkg_rwstat_sample rwstat = { };
600 blkg_rwstat_read((void *)pd->blkg + off, &rwstat);
601 return __blkg_prfill_rwstat(sf, pd, &rwstat);
605 * blkg_print_stat_bytes - seq_show callback for blkg->stat_bytes
606 * @sf: seq_file to print to
609 * To be used as cftype->seq_show to print blkg->stat_bytes.
610 * cftype->private must be set to the blkcg_policy.
612 int blkg_print_stat_bytes(struct seq_file *sf, void *v)
614 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
615 blkg_prfill_rwstat_field, (void *)seq_cft(sf)->private,
616 offsetof(struct blkcg_gq, stat_bytes), true);
619 EXPORT_SYMBOL_GPL(blkg_print_stat_bytes);
622 * blkg_print_stat_bytes - seq_show callback for blkg->stat_ios
623 * @sf: seq_file to print to
626 * To be used as cftype->seq_show to print blkg->stat_ios. cftype->private
627 * must be set to the blkcg_policy.
629 int blkg_print_stat_ios(struct seq_file *sf, void *v)
631 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
632 blkg_prfill_rwstat_field, (void *)seq_cft(sf)->private,
633 offsetof(struct blkcg_gq, stat_ios), true);
636 EXPORT_SYMBOL_GPL(blkg_print_stat_ios);
638 static u64 blkg_prfill_rwstat_field_recursive(struct seq_file *sf,
639 struct blkg_policy_data *pd,
642 struct blkg_rwstat_sample rwstat;
644 blkg_rwstat_recursive_sum(pd->blkg, NULL, off, &rwstat);
645 return __blkg_prfill_rwstat(sf, pd, &rwstat);
649 * blkg_print_stat_bytes_recursive - recursive version of blkg_print_stat_bytes
650 * @sf: seq_file to print to
653 int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v)
655 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
656 blkg_prfill_rwstat_field_recursive,
657 (void *)seq_cft(sf)->private,
658 offsetof(struct blkcg_gq, stat_bytes), true);
661 EXPORT_SYMBOL_GPL(blkg_print_stat_bytes_recursive);
664 * blkg_print_stat_ios_recursive - recursive version of blkg_print_stat_ios
665 * @sf: seq_file to print to
668 int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v)
670 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
671 blkg_prfill_rwstat_field_recursive,
672 (void *)seq_cft(sf)->private,
673 offsetof(struct blkcg_gq, stat_ios), true);
676 EXPORT_SYMBOL_GPL(blkg_print_stat_ios_recursive);
679 * blkg_rwstat_recursive_sum - collect hierarchical blkg_rwstat
680 * @blkg: blkg of interest
681 * @pol: blkcg_policy which contains the blkg_rwstat
682 * @off: offset to the blkg_rwstat in blkg_policy_data or @blkg
683 * @sum: blkg_rwstat_sample structure containing the results
685 * Collect the blkg_rwstat specified by @blkg, @pol and @off and all its
686 * online descendants and their aux counts. The caller must be holding the
687 * queue lock for online tests.
689 * If @pol is NULL, blkg_rwstat is at @off bytes into @blkg; otherwise, it
690 * is at @off bytes into @blkg's blkg_policy_data of the policy.
692 void blkg_rwstat_recursive_sum(struct blkcg_gq *blkg, struct blkcg_policy *pol,
693 int off, struct blkg_rwstat_sample *sum)
695 struct blkcg_gq *pos_blkg;
696 struct cgroup_subsys_state *pos_css;
699 lockdep_assert_held(&blkg->q->queue_lock);
702 blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
703 struct blkg_rwstat *rwstat;
705 if (!pos_blkg->online)
709 rwstat = (void *)blkg_to_pd(pos_blkg, pol) + off;
711 rwstat = (void *)pos_blkg + off;
713 for (i = 0; i < BLKG_RWSTAT_NR; i++)
714 sum->cnt[i] = blkg_rwstat_read_counter(rwstat, i);
718 EXPORT_SYMBOL_GPL(blkg_rwstat_recursive_sum);
720 /* Performs queue bypass and policy enabled checks then looks up blkg. */
721 static struct blkcg_gq *blkg_lookup_check(struct blkcg *blkcg,
722 const struct blkcg_policy *pol,
723 struct request_queue *q)
725 WARN_ON_ONCE(!rcu_read_lock_held());
726 lockdep_assert_held(&q->queue_lock);
728 if (!blkcg_policy_enabled(q, pol))
729 return ERR_PTR(-EOPNOTSUPP);
730 return __blkg_lookup(blkcg, q, true /* update_hint */);
734 * blkg_conf_prep - parse and prepare for per-blkg config update
735 * @blkcg: target block cgroup
736 * @pol: target policy
737 * @input: input string
738 * @ctx: blkg_conf_ctx to be filled
740 * Parse per-blkg config update from @input and initialize @ctx with the
741 * result. @ctx->blkg points to the blkg to be updated and @ctx->body the
742 * part of @input following MAJ:MIN. This function returns with RCU read
743 * lock and queue lock held and must be paired with blkg_conf_finish().
745 int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
746 char *input, struct blkg_conf_ctx *ctx)
747 __acquires(rcu) __acquires(&disk->queue->queue_lock)
749 struct gendisk *disk;
750 struct request_queue *q;
751 struct blkcg_gq *blkg;
752 unsigned int major, minor;
753 int key_len, part, ret;
756 if (sscanf(input, "%u:%u%n", &major, &minor, &key_len) != 2)
759 body = input + key_len;
762 body = skip_spaces(body);
764 disk = get_gendisk(MKDEV(major, minor), &part);
775 spin_lock_irq(&q->queue_lock);
777 blkg = blkg_lookup_check(blkcg, pol, q);
787 * Create blkgs walking down from blkcg_root to @blkcg, so that all
788 * non-root blkgs have access to their parents.
791 struct blkcg *pos = blkcg;
792 struct blkcg *parent;
793 struct blkcg_gq *new_blkg;
795 parent = blkcg_parent(blkcg);
796 while (parent && !__blkg_lookup(parent, q, false)) {
798 parent = blkcg_parent(parent);
801 /* Drop locks to do new blkg allocation with GFP_KERNEL. */
802 spin_unlock_irq(&q->queue_lock);
805 new_blkg = blkg_alloc(pos, q, GFP_KERNEL);
806 if (unlikely(!new_blkg)) {
812 spin_lock_irq(&q->queue_lock);
814 blkg = blkg_lookup_check(pos, pol, q);
823 blkg = blkg_create(pos, q, new_blkg);
840 spin_unlock_irq(&q->queue_lock);
843 put_disk_and_module(disk);
845 * If queue was bypassing, we should retry. Do so after a
846 * short msleep(). It isn't strictly necessary but queue
847 * can be bypassing for some time and it's always nice to
848 * avoid busy looping.
852 ret = restart_syscall();
858 * blkg_conf_finish - finish up per-blkg config update
859 * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep()
861 * Finish up after per-blkg config update. This function must be paired
862 * with blkg_conf_prep().
864 void blkg_conf_finish(struct blkg_conf_ctx *ctx)
865 __releases(&ctx->disk->queue->queue_lock) __releases(rcu)
867 spin_unlock_irq(&ctx->disk->queue->queue_lock);
869 put_disk_and_module(ctx->disk);
872 static int blkcg_print_stat(struct seq_file *sf, void *v)
874 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
875 struct blkcg_gq *blkg;
879 hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
882 struct blkg_rwstat_sample rwstat;
883 u64 rbytes, wbytes, rios, wios, dbytes, dios;
884 size_t size = seq_get_buf(sf, &buf), off = 0;
886 bool has_stats = false;
888 dname = blkg_dev_name(blkg);
893 * Hooray string manipulation, count is the size written NOT
894 * INCLUDING THE \0, so size is now count+1 less than what we
895 * had before, but we want to start writing the next bit from
896 * the \0 so we only add count to buf.
898 off += scnprintf(buf+off, size-off, "%s ", dname);
900 spin_lock_irq(&blkg->q->queue_lock);
902 blkg_rwstat_recursive_sum(blkg, NULL,
903 offsetof(struct blkcg_gq, stat_bytes), &rwstat);
904 rbytes = rwstat.cnt[BLKG_RWSTAT_READ];
905 wbytes = rwstat.cnt[BLKG_RWSTAT_WRITE];
906 dbytes = rwstat.cnt[BLKG_RWSTAT_DISCARD];
908 blkg_rwstat_recursive_sum(blkg, NULL,
909 offsetof(struct blkcg_gq, stat_ios), &rwstat);
910 rios = rwstat.cnt[BLKG_RWSTAT_READ];
911 wios = rwstat.cnt[BLKG_RWSTAT_WRITE];
912 dios = rwstat.cnt[BLKG_RWSTAT_DISCARD];
914 spin_unlock_irq(&blkg->q->queue_lock);
916 if (rbytes || wbytes || rios || wios) {
918 off += scnprintf(buf+off, size-off,
919 "rbytes=%llu wbytes=%llu rios=%llu wios=%llu dbytes=%llu dios=%llu",
920 rbytes, wbytes, rios, wios,
924 if (!blkcg_debug_stats)
927 if (atomic_read(&blkg->use_delay)) {
929 off += scnprintf(buf+off, size-off,
930 " use_delay=%d delay_nsec=%llu",
931 atomic_read(&blkg->use_delay),
932 (unsigned long long)atomic64_read(&blkg->delay_nsec));
935 for (i = 0; i < BLKCG_MAX_POLS; i++) {
936 struct blkcg_policy *pol = blkcg_policy[i];
939 if (!blkg->pd[i] || !pol->pd_stat_fn)
942 written = pol->pd_stat_fn(blkg->pd[i], buf+off, size-off);
949 if (off < size - 1) {
950 off += scnprintf(buf+off, size-off, "\n");
962 static struct cftype blkcg_files[] = {
965 .flags = CFTYPE_NOT_ON_ROOT,
966 .seq_show = blkcg_print_stat,
971 static struct cftype blkcg_legacy_files[] = {
973 .name = "reset_stats",
974 .write_u64 = blkcg_reset_stats,
980 * blkcg destruction is a three-stage process.
982 * 1. Destruction starts. The blkcg_css_offline() callback is invoked
983 * which offlines writeback. Here we tie the next stage of blkg destruction
984 * to the completion of writeback associated with the blkcg. This lets us
985 * avoid punting potentially large amounts of outstanding writeback to root
986 * while maintaining any ongoing policies. The next stage is triggered when
987 * the nr_cgwbs count goes to zero.
989 * 2. When the nr_cgwbs count goes to zero, blkcg_destroy_blkgs() is called
990 * and handles the destruction of blkgs. Here the css reference held by
991 * the blkg is put back eventually allowing blkcg_css_free() to be called.
992 * This work may occur in cgwb_release_workfn() on the cgwb_release
993 * workqueue. Any submitted ios that fail to get the blkg ref will be
994 * punted to the root_blkg.
996 * 3. Once the blkcg ref count goes to zero, blkcg_css_free() is called.
997 * This finally frees the blkcg.
1001 * blkcg_css_offline - cgroup css_offline callback
1002 * @css: css of interest
1004 * This function is called when @css is about to go away. Here the cgwbs are
1005 * offlined first and only once writeback associated with the blkcg has
1006 * finished do we start step 2 (see above).
1008 static void blkcg_css_offline(struct cgroup_subsys_state *css)
1010 struct blkcg *blkcg = css_to_blkcg(css);
1012 /* this prevents anyone from attaching or migrating to this blkcg */
1013 wb_blkcg_offline(blkcg);
1015 /* put the base cgwb reference allowing step 2 to be triggered */
1016 blkcg_cgwb_put(blkcg);
1020 * blkcg_destroy_blkgs - responsible for shooting down blkgs
1021 * @blkcg: blkcg of interest
1023 * blkgs should be removed while holding both q and blkcg locks. As blkcg lock
1024 * is nested inside q lock, this function performs reverse double lock dancing.
1025 * Destroying the blkgs releases the reference held on the blkcg's css allowing
1026 * blkcg_css_free to eventually be called.
1028 * This is the blkcg counterpart of ioc_release_fn().
1030 void blkcg_destroy_blkgs(struct blkcg *blkcg)
1032 spin_lock_irq(&blkcg->lock);
1034 while (!hlist_empty(&blkcg->blkg_list)) {
1035 struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
1036 struct blkcg_gq, blkcg_node);
1037 struct request_queue *q = blkg->q;
1039 if (spin_trylock(&q->queue_lock)) {
1041 spin_unlock(&q->queue_lock);
1043 spin_unlock_irq(&blkcg->lock);
1045 spin_lock_irq(&blkcg->lock);
1049 spin_unlock_irq(&blkcg->lock);
1052 static void blkcg_css_free(struct cgroup_subsys_state *css)
1054 struct blkcg *blkcg = css_to_blkcg(css);
1057 mutex_lock(&blkcg_pol_mutex);
1059 list_del(&blkcg->all_blkcgs_node);
1061 for (i = 0; i < BLKCG_MAX_POLS; i++)
1063 blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);
1065 mutex_unlock(&blkcg_pol_mutex);
1070 static struct cgroup_subsys_state *
1071 blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
1073 struct blkcg *blkcg;
1074 struct cgroup_subsys_state *ret;
1077 mutex_lock(&blkcg_pol_mutex);
1080 blkcg = &blkcg_root;
1082 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
1084 ret = ERR_PTR(-ENOMEM);
1089 for (i = 0; i < BLKCG_MAX_POLS ; i++) {
1090 struct blkcg_policy *pol = blkcg_policy[i];
1091 struct blkcg_policy_data *cpd;
1094 * If the policy hasn't been attached yet, wait for it
1095 * to be attached before doing anything else. Otherwise,
1096 * check if the policy requires any specific per-cgroup
1097 * data: if it does, allocate and initialize it.
1099 if (!pol || !pol->cpd_alloc_fn)
1102 cpd = pol->cpd_alloc_fn(GFP_KERNEL);
1104 ret = ERR_PTR(-ENOMEM);
1107 blkcg->cpd[i] = cpd;
1110 if (pol->cpd_init_fn)
1111 pol->cpd_init_fn(cpd);
1114 spin_lock_init(&blkcg->lock);
1115 INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT | __GFP_NOWARN);
1116 INIT_HLIST_HEAD(&blkcg->blkg_list);
1117 #ifdef CONFIG_CGROUP_WRITEBACK
1118 INIT_LIST_HEAD(&blkcg->cgwb_list);
1119 refcount_set(&blkcg->cgwb_refcnt, 1);
1121 list_add_tail(&blkcg->all_blkcgs_node, &all_blkcgs);
1123 mutex_unlock(&blkcg_pol_mutex);
1127 for (i--; i >= 0; i--)
1129 blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);
1131 if (blkcg != &blkcg_root)
1134 mutex_unlock(&blkcg_pol_mutex);
1139 * blkcg_init_queue - initialize blkcg part of request queue
1140 * @q: request_queue to initialize
1142 * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
1143 * part of new request_queue @q.
1146 * 0 on success, -errno on failure.
1148 int blkcg_init_queue(struct request_queue *q)
1150 struct blkcg_gq *new_blkg, *blkg;
1154 new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL);
1158 preloaded = !radix_tree_preload(GFP_KERNEL);
1160 /* Make sure the root blkg exists. */
1162 spin_lock_irq(&q->queue_lock);
1163 blkg = blkg_create(&blkcg_root, q, new_blkg);
1166 q->root_blkg = blkg;
1167 spin_unlock_irq(&q->queue_lock);
1171 radix_tree_preload_end();
1173 ret = blk_iolatency_init(q);
1175 goto err_destroy_all;
1177 ret = blk_throtl_init(q);
1179 goto err_destroy_all;
1183 blkg_destroy_all(q);
1186 spin_unlock_irq(&q->queue_lock);
1189 radix_tree_preload_end();
1190 return PTR_ERR(blkg);
1194 * blkcg_drain_queue - drain blkcg part of request_queue
1195 * @q: request_queue to drain
1197 * Called from blk_drain_queue(). Responsible for draining blkcg part.
1199 void blkcg_drain_queue(struct request_queue *q)
1201 lockdep_assert_held(&q->queue_lock);
1204 * @q could be exiting and already have destroyed all blkgs as
1205 * indicated by NULL root_blkg. If so, don't confuse policies.
1210 blk_throtl_drain(q);
1214 * blkcg_exit_queue - exit and release blkcg part of request_queue
1215 * @q: request_queue being released
1217 * Called from blk_exit_queue(). Responsible for exiting blkcg part.
1219 void blkcg_exit_queue(struct request_queue *q)
1221 blkg_destroy_all(q);
1226 * We cannot support shared io contexts, as we have no mean to support
1227 * two tasks with the same ioc in two different groups without major rework
1228 * of the main cic data structures. For now we allow a task to change
1229 * its cgroup only if it's the only owner of its ioc.
1231 static int blkcg_can_attach(struct cgroup_taskset *tset)
1233 struct task_struct *task;
1234 struct cgroup_subsys_state *dst_css;
1235 struct io_context *ioc;
1238 /* task_lock() is needed to avoid races with exit_io_context() */
1239 cgroup_taskset_for_each(task, dst_css, tset) {
1241 ioc = task->io_context;
1242 if (ioc && atomic_read(&ioc->nr_tasks) > 1)
1251 static void blkcg_bind(struct cgroup_subsys_state *root_css)
1255 mutex_lock(&blkcg_pol_mutex);
1257 for (i = 0; i < BLKCG_MAX_POLS; i++) {
1258 struct blkcg_policy *pol = blkcg_policy[i];
1259 struct blkcg *blkcg;
1261 if (!pol || !pol->cpd_bind_fn)
1264 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node)
1265 if (blkcg->cpd[pol->plid])
1266 pol->cpd_bind_fn(blkcg->cpd[pol->plid]);
1268 mutex_unlock(&blkcg_pol_mutex);
1271 static void blkcg_exit(struct task_struct *tsk)
1273 if (tsk->throttle_queue)
1274 blk_put_queue(tsk->throttle_queue);
1275 tsk->throttle_queue = NULL;
1278 struct cgroup_subsys io_cgrp_subsys = {
1279 .css_alloc = blkcg_css_alloc,
1280 .css_offline = blkcg_css_offline,
1281 .css_free = blkcg_css_free,
1282 .can_attach = blkcg_can_attach,
1284 .dfl_cftypes = blkcg_files,
1285 .legacy_cftypes = blkcg_legacy_files,
1286 .legacy_name = "blkio",
1290 * This ensures that, if available, memcg is automatically enabled
1291 * together on the default hierarchy so that the owner cgroup can
1292 * be retrieved from writeback pages.
1294 .depends_on = 1 << memory_cgrp_id,
1297 EXPORT_SYMBOL_GPL(io_cgrp_subsys);
1300 * blkcg_activate_policy - activate a blkcg policy on a request_queue
1301 * @q: request_queue of interest
1302 * @pol: blkcg policy to activate
1304 * Activate @pol on @q. Requires %GFP_KERNEL context. @q goes through
1305 * bypass mode to populate its blkgs with policy_data for @pol.
1307 * Activation happens with @q bypassed, so nobody would be accessing blkgs
1308 * from IO path. Update of each blkg is protected by both queue and blkcg
1309 * locks so that holding either lock and testing blkcg_policy_enabled() is
1310 * always enough for dereferencing policy data.
1312 * The caller is responsible for synchronizing [de]activations and policy
1313 * [un]registerations. Returns 0 on success, -errno on failure.
1315 int blkcg_activate_policy(struct request_queue *q,
1316 const struct blkcg_policy *pol)
1318 struct blkg_policy_data *pd_prealloc = NULL;
1319 struct blkcg_gq *blkg;
1322 if (blkcg_policy_enabled(q, pol))
1326 blk_mq_freeze_queue(q);
1329 pd_prealloc = pol->pd_alloc_fn(GFP_KERNEL, q->node);
1332 goto out_bypass_end;
1336 spin_lock_irq(&q->queue_lock);
1338 /* blkg_list is pushed at the head, reverse walk to init parents first */
1339 list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) {
1340 struct blkg_policy_data *pd;
1342 if (blkg->pd[pol->plid])
1345 pd = pol->pd_alloc_fn(GFP_NOWAIT | __GFP_NOWARN, q->node);
1347 swap(pd, pd_prealloc);
1349 spin_unlock_irq(&q->queue_lock);
1353 blkg->pd[pol->plid] = pd;
1355 pd->plid = pol->plid;
1356 if (pol->pd_init_fn)
1357 pol->pd_init_fn(pd);
1360 __set_bit(pol->plid, q->blkcg_pols);
1363 spin_unlock_irq(&q->queue_lock);
1366 blk_mq_unfreeze_queue(q);
1368 pol->pd_free_fn(pd_prealloc);
1371 EXPORT_SYMBOL_GPL(blkcg_activate_policy);
1374 * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue
1375 * @q: request_queue of interest
1376 * @pol: blkcg policy to deactivate
1378 * Deactivate @pol on @q. Follows the same synchronization rules as
1379 * blkcg_activate_policy().
1381 void blkcg_deactivate_policy(struct request_queue *q,
1382 const struct blkcg_policy *pol)
1384 struct blkcg_gq *blkg;
1386 if (!blkcg_policy_enabled(q, pol))
1390 blk_mq_freeze_queue(q);
1392 spin_lock_irq(&q->queue_lock);
1394 __clear_bit(pol->plid, q->blkcg_pols);
1396 list_for_each_entry(blkg, &q->blkg_list, q_node) {
1397 if (blkg->pd[pol->plid]) {
1398 if (pol->pd_offline_fn)
1399 pol->pd_offline_fn(blkg->pd[pol->plid]);
1400 pol->pd_free_fn(blkg->pd[pol->plid]);
1401 blkg->pd[pol->plid] = NULL;
1405 spin_unlock_irq(&q->queue_lock);
1408 blk_mq_unfreeze_queue(q);
1410 EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
1413 * blkcg_policy_register - register a blkcg policy
1414 * @pol: blkcg policy to register
1416 * Register @pol with blkcg core. Might sleep and @pol may be modified on
1417 * successful registration. Returns 0 on success and -errno on failure.
1419 int blkcg_policy_register(struct blkcg_policy *pol)
1421 struct blkcg *blkcg;
1424 mutex_lock(&blkcg_pol_register_mutex);
1425 mutex_lock(&blkcg_pol_mutex);
1427 /* find an empty slot */
1429 for (i = 0; i < BLKCG_MAX_POLS; i++)
1430 if (!blkcg_policy[i])
1432 if (i >= BLKCG_MAX_POLS) {
1433 pr_warn("blkcg_policy_register: BLKCG_MAX_POLS too small\n");
1437 /* Make sure cpd/pd_alloc_fn and cpd/pd_free_fn in pairs */
1438 if ((!pol->cpd_alloc_fn ^ !pol->cpd_free_fn) ||
1439 (!pol->pd_alloc_fn ^ !pol->pd_free_fn))
1444 blkcg_policy[pol->plid] = pol;
1446 /* allocate and install cpd's */
1447 if (pol->cpd_alloc_fn) {
1448 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1449 struct blkcg_policy_data *cpd;
1451 cpd = pol->cpd_alloc_fn(GFP_KERNEL);
1455 blkcg->cpd[pol->plid] = cpd;
1457 cpd->plid = pol->plid;
1458 pol->cpd_init_fn(cpd);
1462 mutex_unlock(&blkcg_pol_mutex);
1464 /* everything is in place, add intf files for the new policy */
1465 if (pol->dfl_cftypes)
1466 WARN_ON(cgroup_add_dfl_cftypes(&io_cgrp_subsys,
1468 if (pol->legacy_cftypes)
1469 WARN_ON(cgroup_add_legacy_cftypes(&io_cgrp_subsys,
1470 pol->legacy_cftypes));
1471 mutex_unlock(&blkcg_pol_register_mutex);
1475 if (pol->cpd_free_fn) {
1476 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1477 if (blkcg->cpd[pol->plid]) {
1478 pol->cpd_free_fn(blkcg->cpd[pol->plid]);
1479 blkcg->cpd[pol->plid] = NULL;
1483 blkcg_policy[pol->plid] = NULL;
1485 mutex_unlock(&blkcg_pol_mutex);
1486 mutex_unlock(&blkcg_pol_register_mutex);
1489 EXPORT_SYMBOL_GPL(blkcg_policy_register);
1492 * blkcg_policy_unregister - unregister a blkcg policy
1493 * @pol: blkcg policy to unregister
1495 * Undo blkcg_policy_register(@pol). Might sleep.
1497 void blkcg_policy_unregister(struct blkcg_policy *pol)
1499 struct blkcg *blkcg;
1501 mutex_lock(&blkcg_pol_register_mutex);
1503 if (WARN_ON(blkcg_policy[pol->plid] != pol))
1506 /* kill the intf files first */
1507 if (pol->dfl_cftypes)
1508 cgroup_rm_cftypes(pol->dfl_cftypes);
1509 if (pol->legacy_cftypes)
1510 cgroup_rm_cftypes(pol->legacy_cftypes);
1512 /* remove cpds and unregister */
1513 mutex_lock(&blkcg_pol_mutex);
1515 if (pol->cpd_free_fn) {
1516 list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
1517 if (blkcg->cpd[pol->plid]) {
1518 pol->cpd_free_fn(blkcg->cpd[pol->plid]);
1519 blkcg->cpd[pol->plid] = NULL;
1523 blkcg_policy[pol->plid] = NULL;
1525 mutex_unlock(&blkcg_pol_mutex);
1527 mutex_unlock(&blkcg_pol_register_mutex);
1529 EXPORT_SYMBOL_GPL(blkcg_policy_unregister);
1532 * Scale the accumulated delay based on how long it has been since we updated
1533 * the delay. We only call this when we are adding delay, in case it's been a
1534 * while since we added delay, and when we are checking to see if we need to
1535 * delay a task, to account for any delays that may have occurred.
1537 static void blkcg_scale_delay(struct blkcg_gq *blkg, u64 now)
1539 u64 old = atomic64_read(&blkg->delay_start);
1542 * We only want to scale down every second. The idea here is that we
1543 * want to delay people for min(delay_nsec, NSEC_PER_SEC) in a certain
1544 * time window. We only want to throttle tasks for recent delay that
1545 * has occurred, in 1 second time windows since that's the maximum
1546 * things can be throttled. We save the current delay window in
1547 * blkg->last_delay so we know what amount is still left to be charged
1548 * to the blkg from this point onward. blkg->last_use keeps track of
1549 * the use_delay counter. The idea is if we're unthrottling the blkg we
1550 * are ok with whatever is happening now, and we can take away more of
1551 * the accumulated delay as we've already throttled enough that
1552 * everybody is happy with their IO latencies.
1554 if (time_before64(old + NSEC_PER_SEC, now) &&
1555 atomic64_cmpxchg(&blkg->delay_start, old, now) == old) {
1556 u64 cur = atomic64_read(&blkg->delay_nsec);
1557 u64 sub = min_t(u64, blkg->last_delay, now - old);
1558 int cur_use = atomic_read(&blkg->use_delay);
1561 * We've been unthrottled, subtract a larger chunk of our
1562 * accumulated delay.
1564 if (cur_use < blkg->last_use)
1565 sub = max_t(u64, sub, blkg->last_delay >> 1);
1568 * This shouldn't happen, but handle it anyway. Our delay_nsec
1569 * should only ever be growing except here where we subtract out
1570 * min(last_delay, 1 second), but lord knows bugs happen and I'd
1571 * rather not end up with negative numbers.
1573 if (unlikely(cur < sub)) {
1574 atomic64_set(&blkg->delay_nsec, 0);
1575 blkg->last_delay = 0;
1577 atomic64_sub(sub, &blkg->delay_nsec);
1578 blkg->last_delay = cur - sub;
1580 blkg->last_use = cur_use;
1585 * This is called when we want to actually walk up the hierarchy and check to
1586 * see if we need to throttle, and then actually throttle if there is some
1587 * accumulated delay. This should only be called upon return to user space so
1588 * we're not holding some lock that would induce a priority inversion.
1590 static void blkcg_maybe_throttle_blkg(struct blkcg_gq *blkg, bool use_memdelay)
1592 unsigned long pflags;
1593 u64 now = ktime_to_ns(ktime_get());
1598 while (blkg->parent) {
1599 if (atomic_read(&blkg->use_delay)) {
1600 blkcg_scale_delay(blkg, now);
1601 delay_nsec = max_t(u64, delay_nsec,
1602 atomic64_read(&blkg->delay_nsec));
1604 blkg = blkg->parent;
1611 * Let's not sleep for all eternity if we've amassed a huge delay.
1612 * Swapping or metadata IO can accumulate 10's of seconds worth of
1613 * delay, and we want userspace to be able to do _something_ so cap the
1614 * delays at 1 second. If there's 10's of seconds worth of delay then
1615 * the tasks will be delayed for 1 second for every syscall.
1617 delay_nsec = min_t(u64, delay_nsec, 250 * NSEC_PER_MSEC);
1620 psi_memstall_enter(&pflags);
1622 exp = ktime_add_ns(now, delay_nsec);
1623 tok = io_schedule_prepare();
1625 __set_current_state(TASK_KILLABLE);
1626 if (!schedule_hrtimeout(&exp, HRTIMER_MODE_ABS))
1628 } while (!fatal_signal_pending(current));
1629 io_schedule_finish(tok);
1632 psi_memstall_leave(&pflags);
1636 * blkcg_maybe_throttle_current - throttle the current task if it has been marked
1638 * This is only called if we've been marked with set_notify_resume(). Obviously
1639 * we can be set_notify_resume() for reasons other than blkcg throttling, so we
1640 * check to see if current->throttle_queue is set and if not this doesn't do
1641 * anything. This should only ever be called by the resume code, it's not meant
1642 * to be called by people willy-nilly as it will actually do the work to
1643 * throttle the task if it is setup for throttling.
1645 void blkcg_maybe_throttle_current(void)
1647 struct request_queue *q = current->throttle_queue;
1648 struct cgroup_subsys_state *css;
1649 struct blkcg *blkcg;
1650 struct blkcg_gq *blkg;
1651 bool use_memdelay = current->use_memdelay;
1656 current->throttle_queue = NULL;
1657 current->use_memdelay = false;
1660 css = kthread_blkcg();
1662 blkcg = css_to_blkcg(css);
1664 blkcg = css_to_blkcg(task_css(current, io_cgrp_id));
1668 blkg = blkg_lookup(blkcg, q);
1671 if (!blkg_tryget(blkg))
1675 blkcg_maybe_throttle_blkg(blkg, use_memdelay);
1685 * blkcg_schedule_throttle - this task needs to check for throttling
1686 * @q: the request queue IO was submitted on
1687 * @use_memdelay: do we charge this to memory delay for PSI
1689 * This is called by the IO controller when we know there's delay accumulated
1690 * for the blkg for this task. We do not pass the blkg because there are places
1691 * we call this that may not have that information, the swapping code for
1692 * instance will only have a request_queue at that point. This set's the
1693 * notify_resume for the task to check and see if it requires throttling before
1694 * returning to user space.
1696 * We will only schedule once per syscall. You can call this over and over
1697 * again and it will only do the check once upon return to user space, and only
1698 * throttle once. If the task needs to be throttled again it'll need to be
1699 * re-set at the next time we see the task.
1701 void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay)
1703 if (unlikely(current->flags & PF_KTHREAD))
1706 if (!blk_get_queue(q))
1709 if (current->throttle_queue)
1710 blk_put_queue(current->throttle_queue);
1711 current->throttle_queue = q;
1713 current->use_memdelay = use_memdelay;
1714 set_notify_resume(current);
1718 * blkcg_add_delay - add delay to this blkg
1719 * @blkg: blkg of interest
1720 * @now: the current time in nanoseconds
1721 * @delta: how many nanoseconds of delay to add
1723 * Charge @delta to the blkg's current delay accumulation. This is used to
1724 * throttle tasks if an IO controller thinks we need more throttling.
1726 void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta)
1728 blkcg_scale_delay(blkg, now);
1729 atomic64_add(delta, &blkg->delay_nsec);
1732 module_param(blkcg_debug_stats, bool, 0644);
1733 MODULE_PARM_DESC(blkcg_debug_stats, "True if you want debug stats, false if not");