]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
nvmet_fc: prevent new io rqsts in possible isr completions
authorJames Smart <jsmart2021@gmail.com>
Wed, 28 Feb 2018 22:49:11 +0000 (14:49 -0800)
committerJens Axboe <axboe@kernel.dk>
Mon, 26 Mar 2018 14:53:43 +0000 (08:53 -0600)
When a bio completion calls back into the transport for a
back-end io device, the request completion path can free
the transport io job structure allowing it to be reused for
other operations. The transport has a defer_rcv queue which
holds temporary cmd rcv ops while waitng for io job structures.
when the job frees, if there's a cmd waiting, it is picked up
and submitted for processing, which can call back out to the
bio path if it's a read.  Unfortunately, what is unknown is the
context of the original bio done call, and it may be in a state
(softirq) that is not compatible with submitting the new bio in
the same calling sequence. This is especially true when using
scsi back-end devices as scsi is in softirq when it makes the
done call.

Correct by scheduling the io to be started via workq rather
than calling the start new io path inline to the original bio
done path.

Signed-off-by: James Smart <james.smart@broadcom.com>
Signed-off-by: Keith Busch <keith.busch@intel.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
drivers/nvme/target/fc.c

index 9b39a6cb1935ae21c35b087c83407a5dd0542ef1..9f80f98d81d2c8cde41f8551b1819c73d5c33229 100644 (file)
@@ -87,6 +87,7 @@ struct nvmet_fc_fcp_iod {
        struct nvmet_req                req;
        struct work_struct              work;
        struct work_struct              done_work;
+       struct work_struct              defer_work;
 
        struct nvmet_fc_tgtport         *tgtport;
        struct nvmet_fc_tgt_queue       *queue;
@@ -224,6 +225,7 @@ static DEFINE_IDA(nvmet_fc_tgtport_cnt);
 static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work);
 static void nvmet_fc_handle_fcp_rqst_work(struct work_struct *work);
 static void nvmet_fc_fcp_rqst_op_done_work(struct work_struct *work);
+static void nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work);
 static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc);
 static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc);
 static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
@@ -429,6 +431,7 @@ nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
        for (i = 0; i < queue->sqsize; fod++, i++) {
                INIT_WORK(&fod->work, nvmet_fc_handle_fcp_rqst_work);
                INIT_WORK(&fod->done_work, nvmet_fc_fcp_rqst_op_done_work);
+               INIT_WORK(&fod->defer_work, nvmet_fc_fcp_rqst_op_defer_work);
                fod->tgtport = tgtport;
                fod->queue = queue;
                fod->active = false;
@@ -511,6 +514,17 @@ nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport *tgtport,
                nvmet_fc_handle_fcp_rqst(tgtport, fod);
 }
 
+static void
+nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work)
+{
+       struct nvmet_fc_fcp_iod *fod =
+               container_of(work, struct nvmet_fc_fcp_iod, defer_work);
+
+       /* Submit deferred IO for processing */
+       nvmet_fc_queue_fcp_req(fod->tgtport, fod->queue, fod->fcpreq);
+
+}
+
 static void
 nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
                        struct nvmet_fc_fcp_iod *fod)
@@ -568,13 +582,12 @@ nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
        /* inform LLDD IO is now being processed */
        tgtport->ops->defer_rcv(&tgtport->fc_target_port, fcpreq);
 
-       /* Submit deferred IO for processing */
-       nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq);
-
        /*
         * Leave the queue lookup get reference taken when
         * fod was originally allocated.
         */
+
+       queue_work(queue->work_q, &fod->defer_work);
 }
 
 static int