]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - block/blk-mq.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
[linux.git] / block / blk-mq.c
index 20a49be536b512dcaf5092334f45fd0e1b691c83..ec791156e9ccd1b17cc9c07b98f55b08f2a705de 100644 (file)
@@ -30,6 +30,7 @@
 #include <trace/events/block.h>
 
 #include <linux/blk-mq.h>
+#include <linux/t10-pi.h>
 #include "blk.h"
 #include "blk-mq.h"
 #include "blk-mq-debugfs.h"
@@ -700,6 +701,11 @@ void blk_mq_start_request(struct request *rq)
                 */
                rq->nr_phys_segments++;
        }
+
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+       if (blk_integrity_rq(rq) && req_op(rq) == REQ_OP_WRITE)
+               q->integrity.profile->prepare_fn(rq);
+#endif
 }
 EXPORT_SYMBOL(blk_mq_start_request);
 
@@ -912,7 +918,10 @@ static bool blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
         */
        if (blk_mq_req_expired(rq, next))
                blk_mq_rq_timed_out(rq, reserved);
-       if (refcount_dec_and_test(&rq->ref))
+
+       if (is_flush_rq(rq, hctx))
+               rq->end_io(rq, 0);
+       else if (refcount_dec_and_test(&rq->ref))
                __blk_mq_free_request(rq);
 
        return true;
@@ -1983,10 +1992,14 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
                /* bypass scheduler for flush rq */
                blk_insert_flush(rq);
                blk_mq_run_hw_queue(data.hctx, true);
-       } else if (plug && (q->nr_hw_queues == 1 || q->mq_ops->commit_rqs)) {
+       } else if (plug && (q->nr_hw_queues == 1 || q->mq_ops->commit_rqs ||
+                               !blk_queue_nonrot(q))) {
                /*
                 * Use plugging if we have a ->commit_rqs() hook as well, as
                 * we know the driver uses bd->last in a smart fashion.
+                *
+                * Use normal plugging if this disk is slow HDD, as sequential
+                * IO may benefit a lot from plug merging.
                 */
                unsigned int request_count = plug->rq_count;
                struct request *last = NULL;
@@ -2003,6 +2016,8 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
                }
 
                blk_add_rq_to_plug(plug, rq);
+       } else if (q->elevator) {
+               blk_mq_sched_insert_request(rq, false, true, true);
        } else if (plug && !blk_queue_nomerges(q)) {
                /*
                 * We do limited plugging. If the bio can be merged, do that.
@@ -2026,8 +2041,8 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
                        blk_mq_try_issue_directly(data.hctx, same_queue_rq,
                                        &cookie);
                }
-       } else if ((q->nr_hw_queues > 1 && is_sync) || (!q->elevator &&
-                       !data.hctx->dispatch_busy)) {
+       } else if ((q->nr_hw_queues > 1 && is_sync) ||
+                       !data.hctx->dispatch_busy) {
                blk_mq_try_issue_directly(data.hctx, rq, &cookie);
        } else {
                blk_mq_sched_insert_request(rq, false, true, true);