2 * blk-mq scheduling framework
4 * Copyright (C) 2016 Jens Axboe
6 #include <linux/kernel.h>
7 #include <linux/module.h>
8 #include <linux/blk-mq.h>
10 #include <trace/events/block.h>
14 #include "blk-mq-debugfs.h"
15 #include "blk-mq-sched.h"
16 #include "blk-mq-tag.h"
19 void blk_mq_sched_free_hctx_data(struct request_queue *q,
20 void (*exit)(struct blk_mq_hw_ctx *))
22 struct blk_mq_hw_ctx *hctx;
25 queue_for_each_hw_ctx(q, hctx, i) {
26 if (exit && hctx->sched_data)
28 kfree(hctx->sched_data);
29 hctx->sched_data = NULL;
32 EXPORT_SYMBOL_GPL(blk_mq_sched_free_hctx_data);
34 void blk_mq_sched_assign_ioc(struct request *rq, struct bio *bio)
36 struct request_queue *q = rq->q;
37 struct io_context *ioc = rq_ioc(bio);
40 spin_lock_irq(q->queue_lock);
41 icq = ioc_lookup_icq(ioc, q);
42 spin_unlock_irq(q->queue_lock);
45 icq = ioc_create_icq(ioc, q, GFP_ATOMIC);
49 get_io_context(icq->ioc);
54 * Mark a hardware queue as needing a restart. For shared queues, maintain
55 * a count of how many hardware queues are marked for restart.
57 static void blk_mq_sched_mark_restart_hctx(struct blk_mq_hw_ctx *hctx)
59 if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
62 if (hctx->flags & BLK_MQ_F_TAG_SHARED) {
63 struct request_queue *q = hctx->queue;
65 if (!test_and_set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
66 atomic_inc(&q->shared_hctx_restart);
68 set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
71 void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx)
73 if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
76 clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
78 if (blk_mq_hctx_has_pending(hctx)) {
79 blk_mq_run_hw_queue(hctx, true);
85 * Only SCSI implements .get_budget and .put_budget, and SCSI restarts
86 * its queue by itself in its completion handler, so we don't need to
87 * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE.
89 static void blk_mq_do_dispatch_sched(struct blk_mq_hw_ctx *hctx)
91 struct request_queue *q = hctx->queue;
92 struct elevator_queue *e = q->elevator;
99 if (e->type->ops.mq.has_work &&
100 !e->type->ops.mq.has_work(hctx))
103 ret = blk_mq_get_dispatch_budget(hctx);
104 if (ret == BLK_STS_RESOURCE)
107 rq = e->type->ops.mq.dispatch_request(hctx);
109 blk_mq_put_dispatch_budget(hctx);
111 } else if (ret != BLK_STS_OK) {
112 blk_mq_end_request(rq, ret);
117 * Now this rq owns the budget which has to be released
118 * if this rq won't be queued to driver via .queue_rq()
119 * in blk_mq_dispatch_rq_list().
121 list_add(&rq->queuelist, &rq_list);
122 } while (blk_mq_dispatch_rq_list(q, &rq_list, true));
125 static struct blk_mq_ctx *blk_mq_next_ctx(struct blk_mq_hw_ctx *hctx,
126 struct blk_mq_ctx *ctx)
128 unsigned idx = ctx->index_hw;
130 if (++idx == hctx->nr_ctx)
133 return hctx->ctxs[idx];
137 * Only SCSI implements .get_budget and .put_budget, and SCSI restarts
138 * its queue by itself in its completion handler, so we don't need to
139 * restart queue if .get_budget() returns BLK_STS_NO_RESOURCE.
141 static void blk_mq_do_dispatch_ctx(struct blk_mq_hw_ctx *hctx)
143 struct request_queue *q = hctx->queue;
145 struct blk_mq_ctx *ctx = READ_ONCE(hctx->dispatch_from);
151 if (!sbitmap_any_bit_set(&hctx->ctx_map))
154 ret = blk_mq_get_dispatch_budget(hctx);
155 if (ret == BLK_STS_RESOURCE)
158 rq = blk_mq_dequeue_from_ctx(hctx, ctx);
160 blk_mq_put_dispatch_budget(hctx);
162 } else if (ret != BLK_STS_OK) {
163 blk_mq_end_request(rq, ret);
168 * Now this rq owns the budget which has to be released
169 * if this rq won't be queued to driver via .queue_rq()
170 * in blk_mq_dispatch_rq_list().
172 list_add(&rq->queuelist, &rq_list);
174 /* round robin for fair dispatch */
175 ctx = blk_mq_next_ctx(hctx, rq->mq_ctx);
177 } while (blk_mq_dispatch_rq_list(q, &rq_list, true));
179 WRITE_ONCE(hctx->dispatch_from, ctx);
182 /* return true if hw queue need to be run again */
183 void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx)
185 struct request_queue *q = hctx->queue;
186 struct elevator_queue *e = q->elevator;
187 const bool has_sched_dispatch = e && e->type->ops.mq.dispatch_request;
190 /* RCU or SRCU read lock is needed before checking quiesced flag */
191 if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)))
197 * If we have previous entries on our dispatch list, grab them first for
198 * more fair dispatch.
200 if (!list_empty_careful(&hctx->dispatch)) {
201 spin_lock(&hctx->lock);
202 if (!list_empty(&hctx->dispatch))
203 list_splice_init(&hctx->dispatch, &rq_list);
204 spin_unlock(&hctx->lock);
208 * Only ask the scheduler for requests, if we didn't have residual
209 * requests from the dispatch list. This is to avoid the case where
210 * we only ever dispatch a fraction of the requests available because
211 * of low device queue depth. Once we pull requests out of the IO
212 * scheduler, we can no longer merge or sort them. So it's best to
213 * leave them there for as long as we can. Mark the hw queue as
214 * needing a restart in that case.
216 * We want to dispatch from the scheduler if there was nothing
217 * on the dispatch list or we were able to dispatch from the
220 if (!list_empty(&rq_list)) {
221 blk_mq_sched_mark_restart_hctx(hctx);
222 if (blk_mq_dispatch_rq_list(q, &rq_list, false)) {
223 if (has_sched_dispatch)
224 blk_mq_do_dispatch_sched(hctx);
226 blk_mq_do_dispatch_ctx(hctx);
228 } else if (has_sched_dispatch) {
229 blk_mq_do_dispatch_sched(hctx);
230 } else if (q->mq_ops->get_budget) {
232 * If we need to get budget before queuing request, we
233 * dequeue request one by one from sw queue for avoiding
234 * to mess up I/O merge when dispatch runs out of resource.
236 * TODO: get more budgets, and dequeue more requests in
239 blk_mq_do_dispatch_ctx(hctx);
241 blk_mq_flush_busy_ctxs(hctx, &rq_list);
242 blk_mq_dispatch_rq_list(q, &rq_list, false);
246 bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
247 struct request **merged_request)
251 switch (elv_merge(q, &rq, bio)) {
252 case ELEVATOR_BACK_MERGE:
253 if (!blk_mq_sched_allow_merge(q, rq, bio))
255 if (!bio_attempt_back_merge(q, rq, bio))
257 *merged_request = attempt_back_merge(q, rq);
258 if (!*merged_request)
259 elv_merged_request(q, rq, ELEVATOR_BACK_MERGE);
261 case ELEVATOR_FRONT_MERGE:
262 if (!blk_mq_sched_allow_merge(q, rq, bio))
264 if (!bio_attempt_front_merge(q, rq, bio))
266 *merged_request = attempt_front_merge(q, rq);
267 if (!*merged_request)
268 elv_merged_request(q, rq, ELEVATOR_FRONT_MERGE);
274 EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge);
277 * Reverse check our software queue for entries that we could potentially
278 * merge with. Currently includes a hand-wavy stop count of 8, to not spend
279 * too much time checking for merges.
281 static bool blk_mq_attempt_merge(struct request_queue *q,
282 struct blk_mq_ctx *ctx, struct bio *bio)
287 lockdep_assert_held(&ctx->lock);
289 list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) {
295 if (!blk_rq_merge_ok(rq, bio))
298 switch (blk_try_merge(rq, bio)) {
299 case ELEVATOR_BACK_MERGE:
300 if (blk_mq_sched_allow_merge(q, rq, bio))
301 merged = bio_attempt_back_merge(q, rq, bio);
303 case ELEVATOR_FRONT_MERGE:
304 if (blk_mq_sched_allow_merge(q, rq, bio))
305 merged = bio_attempt_front_merge(q, rq, bio);
307 case ELEVATOR_DISCARD_MERGE:
308 merged = bio_attempt_discard_merge(q, rq, bio);
322 bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio)
324 struct elevator_queue *e = q->elevator;
325 struct blk_mq_ctx *ctx = blk_mq_get_ctx(q);
326 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
329 if (e && e->type->ops.mq.bio_merge) {
331 return e->type->ops.mq.bio_merge(hctx, bio);
334 if (hctx->flags & BLK_MQ_F_SHOULD_MERGE) {
335 /* default per sw-queue merge */
336 spin_lock(&ctx->lock);
337 ret = blk_mq_attempt_merge(q, ctx, bio);
338 spin_unlock(&ctx->lock);
345 bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq)
347 return rq_mergeable(rq) && elv_attempt_insert_merge(q, rq);
349 EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge);
351 void blk_mq_sched_request_inserted(struct request *rq)
353 trace_block_rq_insert(rq->q, rq);
355 EXPORT_SYMBOL_GPL(blk_mq_sched_request_inserted);
357 static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx,
361 rq->rq_flags |= RQF_SORTED;
366 * If we already have a real request tag, send directly to
369 spin_lock(&hctx->lock);
370 list_add(&rq->queuelist, &hctx->dispatch);
371 spin_unlock(&hctx->lock);
376 * Add flush/fua to the queue. If we fail getting a driver tag, then
377 * punt to the requeue list. Requeue will re-invoke us from a context
378 * that's safe to block from.
380 static void blk_mq_sched_insert_flush(struct blk_mq_hw_ctx *hctx,
381 struct request *rq, bool can_block)
383 if (blk_mq_get_driver_tag(rq, &hctx, can_block)) {
384 blk_insert_flush(rq);
385 blk_mq_run_hw_queue(hctx, true);
387 blk_mq_add_to_requeue_list(rq, false, true);
390 void blk_mq_sched_insert_request(struct request *rq, bool at_head,
391 bool run_queue, bool async, bool can_block)
393 struct request_queue *q = rq->q;
394 struct elevator_queue *e = q->elevator;
395 struct blk_mq_ctx *ctx = rq->mq_ctx;
396 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
398 if (rq->tag == -1 && op_is_flush(rq->cmd_flags)) {
399 blk_mq_sched_insert_flush(hctx, rq, can_block);
403 if (e && blk_mq_sched_bypass_insert(hctx, rq))
406 if (e && e->type->ops.mq.insert_requests) {
409 list_add(&rq->queuelist, &list);
410 e->type->ops.mq.insert_requests(hctx, &list, at_head);
412 spin_lock(&ctx->lock);
413 __blk_mq_insert_request(hctx, rq, at_head);
414 spin_unlock(&ctx->lock);
419 blk_mq_run_hw_queue(hctx, async);
422 void blk_mq_sched_insert_requests(struct request_queue *q,
423 struct blk_mq_ctx *ctx,
424 struct list_head *list, bool run_queue_async)
426 struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
427 struct elevator_queue *e = hctx->queue->elevator;
430 struct request *rq, *next;
433 * We bypass requests that already have a driver tag assigned,
434 * which should only be flushes. Flushes are only ever inserted
435 * as single requests, so we shouldn't ever hit the
436 * WARN_ON_ONCE() below (but let's handle it just in case).
438 list_for_each_entry_safe(rq, next, list, queuelist) {
439 if (WARN_ON_ONCE(rq->tag != -1)) {
440 list_del_init(&rq->queuelist);
441 blk_mq_sched_bypass_insert(hctx, rq);
446 if (e && e->type->ops.mq.insert_requests)
447 e->type->ops.mq.insert_requests(hctx, list, false);
449 blk_mq_insert_requests(hctx, ctx, list);
451 blk_mq_run_hw_queue(hctx, run_queue_async);
454 static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set,
455 struct blk_mq_hw_ctx *hctx,
456 unsigned int hctx_idx)
458 if (hctx->sched_tags) {
459 blk_mq_free_rqs(set, hctx->sched_tags, hctx_idx);
460 blk_mq_free_rq_map(hctx->sched_tags);
461 hctx->sched_tags = NULL;
465 static int blk_mq_sched_alloc_tags(struct request_queue *q,
466 struct blk_mq_hw_ctx *hctx,
467 unsigned int hctx_idx)
469 struct blk_mq_tag_set *set = q->tag_set;
472 hctx->sched_tags = blk_mq_alloc_rq_map(set, hctx_idx, q->nr_requests,
474 if (!hctx->sched_tags)
477 ret = blk_mq_alloc_rqs(set, hctx->sched_tags, hctx_idx, q->nr_requests);
479 blk_mq_sched_free_tags(set, hctx, hctx_idx);
484 static void blk_mq_sched_tags_teardown(struct request_queue *q)
486 struct blk_mq_tag_set *set = q->tag_set;
487 struct blk_mq_hw_ctx *hctx;
490 queue_for_each_hw_ctx(q, hctx, i)
491 blk_mq_sched_free_tags(set, hctx, i);
494 int blk_mq_sched_init_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
495 unsigned int hctx_idx)
497 struct elevator_queue *e = q->elevator;
503 ret = blk_mq_sched_alloc_tags(q, hctx, hctx_idx);
507 if (e->type->ops.mq.init_hctx) {
508 ret = e->type->ops.mq.init_hctx(hctx, hctx_idx);
510 blk_mq_sched_free_tags(q->tag_set, hctx, hctx_idx);
515 blk_mq_debugfs_register_sched_hctx(q, hctx);
520 void blk_mq_sched_exit_hctx(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
521 unsigned int hctx_idx)
523 struct elevator_queue *e = q->elevator;
528 blk_mq_debugfs_unregister_sched_hctx(hctx);
530 if (e->type->ops.mq.exit_hctx && hctx->sched_data) {
531 e->type->ops.mq.exit_hctx(hctx, hctx_idx);
532 hctx->sched_data = NULL;
535 blk_mq_sched_free_tags(q->tag_set, hctx, hctx_idx);
538 int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
540 struct blk_mq_hw_ctx *hctx;
541 struct elevator_queue *eq;
551 * Default to double of smaller one between hw queue_depth and 128,
552 * since we don't split into sync/async like the old code did.
553 * Additionally, this is a per-hw queue depth.
555 q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth,
558 queue_for_each_hw_ctx(q, hctx, i) {
559 ret = blk_mq_sched_alloc_tags(q, hctx, i);
564 ret = e->ops.mq.init_sched(q, e);
568 blk_mq_debugfs_register_sched(q);
570 queue_for_each_hw_ctx(q, hctx, i) {
571 if (e->ops.mq.init_hctx) {
572 ret = e->ops.mq.init_hctx(hctx, i);
575 blk_mq_exit_sched(q, eq);
576 kobject_put(&eq->kobj);
580 blk_mq_debugfs_register_sched_hctx(q, hctx);
586 blk_mq_sched_tags_teardown(q);
591 void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
593 struct blk_mq_hw_ctx *hctx;
596 queue_for_each_hw_ctx(q, hctx, i) {
597 blk_mq_debugfs_unregister_sched_hctx(hctx);
598 if (e->type->ops.mq.exit_hctx && hctx->sched_data) {
599 e->type->ops.mq.exit_hctx(hctx, i);
600 hctx->sched_data = NULL;
603 blk_mq_debugfs_unregister_sched(q);
604 if (e->type->ops.mq.exit_sched)
605 e->type->ops.mq.exit_sched(e);
606 blk_mq_sched_tags_teardown(q);
610 int blk_mq_sched_init(struct request_queue *q)
614 mutex_lock(&q->sysfs_lock);
615 ret = elevator_init(q, NULL);
616 mutex_unlock(&q->sysfs_lock);