]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
nvme-pci: consolidate code for polling non-dedicated queues
authorChristoph Hellwig <hch@lst.de>
Sun, 2 Dec 2018 16:46:20 +0000 (17:46 +0100)
committerJens Axboe <axboe@kernel.dk>
Tue, 4 Dec 2018 18:38:18 +0000 (11:38 -0700)
We have three places that can poll for I/O completions on a normal
interrupt-enabled queue.  All of them are in slow path code, so
consolidate them to a single helper that uses spin_lock_irqsave and
removes the fast path cqe_pending check.

Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
drivers/nvme/host/pci.c

index d42bb76e5e7815dec7244df99f1288b6f2b797b1..10c26a2e355aec740fc78bef38604828b9115925 100644 (file)
@@ -1072,17 +1072,19 @@ static irqreturn_t nvme_irq_check(int irq, void *data)
        return IRQ_NONE;
 }
 
-static int __nvme_poll(struct nvme_queue *nvmeq, unsigned int tag)
+/*
+ * Poll for completions any queue, including those not dedicated to polling.
+ * Can be called from any context.
+ */
+static int nvme_poll_irqdisable(struct nvme_queue *nvmeq, unsigned int tag)
 {
+       unsigned long flags;
        u16 start, end;
        int found;
 
-       if (!nvme_cqe_pending(nvmeq))
-               return 0;
-
-       spin_lock_irq(&nvmeq->cq_lock);
+       spin_lock_irqsave(&nvmeq->cq_lock, flags);
        found = nvme_process_cq(nvmeq, &start, &end, tag);
-       spin_unlock_irq(&nvmeq->cq_lock);
+       spin_unlock_irqrestore(&nvmeq->cq_lock, flags);
 
        nvme_complete_cqes(nvmeq, start, end);
        return found;
@@ -1279,7 +1281,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
        /*
         * Did we miss an interrupt?
         */
-       if (__nvme_poll(nvmeq, req->tag)) {
+       if (nvme_poll_irqdisable(nvmeq, req->tag)) {
                dev_warn(dev->ctrl.device,
                         "I/O %d QID %d timeout, completion polled\n",
                         req->tag, nvmeq->qid);
@@ -1406,18 +1408,13 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq)
 static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown)
 {
        struct nvme_queue *nvmeq = &dev->queues[0];
-       u16 start, end;
 
        if (shutdown)
                nvme_shutdown_ctrl(&dev->ctrl);
        else
                nvme_disable_ctrl(&dev->ctrl, dev->ctrl.cap);
 
-       spin_lock_irq(&nvmeq->cq_lock);
-       nvme_process_cq(nvmeq, &start, &end, -1);
-       spin_unlock_irq(&nvmeq->cq_lock);
-
-       nvme_complete_cqes(nvmeq, start, end);
+       nvme_poll_irqdisable(nvmeq, -1);
 }
 
 static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues,
@@ -2217,17 +2214,9 @@ static void nvme_del_queue_end(struct request *req, blk_status_t error)
 static void nvme_del_cq_end(struct request *req, blk_status_t error)
 {
        struct nvme_queue *nvmeq = req->end_io_data;
-       u16 start, end;
 
-       if (!error) {
-               unsigned long flags;
-
-               spin_lock_irqsave(&nvmeq->cq_lock, flags);
-               nvme_process_cq(nvmeq, &start, &end, -1);
-               spin_unlock_irqrestore(&nvmeq->cq_lock, flags);
-
-               nvme_complete_cqes(nvmeq, start, end);
-       }
+       if (!error)
+               nvme_poll_irqdisable(nvmeq, -1);
 
        nvme_del_queue_end(req, error);
 }