]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
dm rq: fix a race condition in rq_completed()
authorBart Van Assche <bart.vanassche@sandisk.com>
Sat, 12 Nov 2016 01:05:27 +0000 (17:05 -0800)
committerMike Snitzer <snitzer@redhat.com>
Mon, 14 Nov 2016 20:17:50 +0000 (15:17 -0500)
It is required to hold the queue lock when calling blk_run_queue_async()
to avoid that a race between blk_run_queue_async() and
blk_cleanup_queue() is triggered.

Cc: stable@vger.kernel.org
Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
drivers/md/dm-rq.c

index 1d0d2adc050a5539a4b430bd9e055f99e1abb5d7..31a89c8832c0ed48e7887930542cc4c8f39ecc10 100644 (file)
@@ -226,6 +226,9 @@ static void rq_end_stats(struct mapped_device *md, struct request *orig)
  */
 static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
 {
+       struct request_queue *q = md->queue;
+       unsigned long flags;
+
        atomic_dec(&md->pending[rw]);
 
        /* nudge anyone waiting on suspend queue */
@@ -238,8 +241,11 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
         * back into ->request_fn() could deadlock attempting to grab the
         * queue lock again.
         */
-       if (!md->queue->mq_ops && run_queue)
-               blk_run_queue_async(md->queue);
+       if (!q->mq_ops && run_queue) {
+               spin_lock_irqsave(q->queue_lock, flags);
+               blk_run_queue_async(q);
+               spin_unlock_irqrestore(q->queue_lock, flags);
+       }
 
        /*
         * dm_put() must be at the end of this function. See the comment above