]> asedeno.scripts.mit.edu Git - linux.git/blobdiff - block/blk-mq-sched.c
tty: drop outdated comments about release_tty() locking
[linux.git] / block / blk-mq-sched.c
index ca22afd47b3dcce1ea72da7f4a6218c4ac9d85b5..856356b1619e83f05fc86fc37ed4a9086b413d86 100644 (file)
@@ -361,13 +361,19 @@ static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx,
                                       bool has_sched,
                                       struct request *rq)
 {
-       /* dispatch flush rq directly */
-       if (rq->rq_flags & RQF_FLUSH_SEQ) {
-               spin_lock(&hctx->lock);
-               list_add(&rq->queuelist, &hctx->dispatch);
-               spin_unlock(&hctx->lock);
+       /*
+        * dispatch flush and passthrough rq directly
+        *
+        * passthrough request has to be added to hctx->dispatch directly.
+        * For some reason, device may be in one situation which can't
+        * handle FS request, so STS_RESOURCE is always returned and the
+        * FS request will be added to hctx->dispatch. However passthrough
+        * request may be required at that time for fixing the problem. If
+        * passthrough request is added to scheduler queue, there isn't any
+        * chance to dispatch it given we prioritize requests in hctx->dispatch.
+        */
+       if ((rq->rq_flags & RQF_FLUSH_SEQ) || blk_rq_is_passthrough(rq))
                return true;
-       }
 
        if (has_sched)
                rq->rq_flags |= RQF_SORTED;
@@ -391,8 +397,10 @@ void blk_mq_sched_insert_request(struct request *rq, bool at_head,
 
        WARN_ON(e && (rq->tag != -1));
 
-       if (blk_mq_sched_bypass_insert(hctx, !!e, rq))
+       if (blk_mq_sched_bypass_insert(hctx, !!e, rq)) {
+               blk_mq_request_bypass_insert(rq, at_head, false);
                goto run;
+       }
 
        if (e && e->type->ops.insert_requests) {
                LIST_HEAD(list);