]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - block/blk-mq.c
blk-mq: insert passthrough request into hctx->dispatch directly
[linux.git] / block / blk-mq.c
index a12b1763508d3194853b6d33a057a62da62ea0e0..5e1e4151cb51876bdbd585e29f14abe7a04623af 100644 (file)
@@ -735,7 +735,7 @@ static void blk_mq_requeue_work(struct work_struct *work)
                 * merge.
                 */
                if (rq->rq_flags & RQF_DONTPREP)
-                       blk_mq_request_bypass_insert(rq, false);
+                       blk_mq_request_bypass_insert(rq, false, false);
                else
                        blk_mq_sched_insert_request(rq, true, false, false);
        }
@@ -1286,7 +1286,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
                        q->mq_ops->commit_rqs(hctx);
 
                spin_lock(&hctx->lock);
-               list_splice_init(list, &hctx->dispatch);
+               list_splice_tail_init(list, &hctx->dispatch);
                spin_unlock(&hctx->lock);
 
                /*
@@ -1677,12 +1677,16 @@ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
  * Should only be used carefully, when the caller knows we want to
  * bypass a potential IO scheduler on the target device.
  */
-void blk_mq_request_bypass_insert(struct request *rq, bool run_queue)
+void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
+                                 bool run_queue)
 {
        struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
 
        spin_lock(&hctx->lock);
-       list_add_tail(&rq->queuelist, &hctx->dispatch);
+       if (at_head)
+               list_add(&rq->queuelist, &hctx->dispatch);
+       else
+               list_add_tail(&rq->queuelist, &hctx->dispatch);
        spin_unlock(&hctx->lock);
 
        if (run_queue)
@@ -1849,7 +1853,7 @@ static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
        if (bypass_insert)
                return BLK_STS_RESOURCE;
 
-       blk_mq_request_bypass_insert(rq, run_queue);
+       blk_mq_request_bypass_insert(rq, false, run_queue);
        return BLK_STS_OK;
 }
 
@@ -1876,7 +1880,7 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
 
        ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false, true);
        if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
-               blk_mq_request_bypass_insert(rq, true);
+               blk_mq_request_bypass_insert(rq, false, true);
        else if (ret != BLK_STS_OK)
                blk_mq_end_request(rq, ret);
 
@@ -1910,7 +1914,7 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
                if (ret != BLK_STS_OK) {
                        if (ret == BLK_STS_RESOURCE ||
                                        ret == BLK_STS_DEV_RESOURCE) {
-                               blk_mq_request_bypass_insert(rq,
+                               blk_mq_request_bypass_insert(rq, false,
                                                        list_empty(list));
                                break;
                        }