2 * Copyright (c) 2016 Avago Technologies. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful.
9 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
10 * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
11 * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
12 * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
13 * See the GNU General Public License for more details, a copy of which
14 * can be found in the file COPYING included with this package
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 #include <linux/module.h>
19 #include <linux/parser.h>
20 #include <uapi/scsi/fc/fc_fs.h>
21 #include <uapi/scsi/fc/fc_els.h>
25 #include <linux/nvme-fc-driver.h>
26 #include <linux/nvme-fc.h>
29 /* *************************** Data Structures/Defines ****************** */
33 * We handle AEN commands ourselves and don't even let the
34 * block layer know about them.
36 #define NVME_FC_NR_AEN_COMMANDS 1
37 #define NVME_FC_AQ_BLKMQ_DEPTH \
38 (NVMF_AQ_DEPTH - NVME_FC_NR_AEN_COMMANDS)
39 #define AEN_CMDID_BASE (NVME_FC_AQ_BLKMQ_DEPTH + 1)
41 enum nvme_fc_queue_flags {
42 NVME_FC_Q_CONNECTED = (1 << 0),
45 #define NVMEFC_QUEUE_DELAY 3 /* ms units */
47 struct nvme_fc_queue {
48 struct nvme_fc_ctrl *ctrl;
50 struct blk_mq_hw_ctx *hctx;
53 size_t cmnd_capsule_len;
62 } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
64 struct nvmefc_ls_req_op {
65 struct nvmefc_ls_req ls_req;
67 struct nvme_fc_ctrl *ctrl;
68 struct nvme_fc_queue *queue;
72 struct completion ls_done;
73 struct list_head lsreq_list; /* ctrl->ls_req_list */
77 enum nvme_fcpop_state {
78 FCPOP_STATE_UNINIT = 0,
80 FCPOP_STATE_ACTIVE = 2,
81 FCPOP_STATE_ABORTED = 3,
84 struct nvme_fc_fcp_op {
85 struct nvme_request nreq; /*
88 * the 1st element in the
93 struct nvmefc_fcp_req fcp_req;
95 struct nvme_fc_ctrl *ctrl;
96 struct nvme_fc_queue *queue;
103 struct nvme_fc_cmd_iu cmd_iu;
104 struct nvme_fc_ersp_iu rsp_iu;
107 struct nvme_fc_lport {
108 struct nvme_fc_local_port localport;
111 struct list_head port_list; /* nvme_fc_port_list */
112 struct list_head endp_list;
113 struct device *dev; /* physical device for dma */
114 struct nvme_fc_port_template *ops;
116 } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
118 struct nvme_fc_rport {
119 struct nvme_fc_remote_port remoteport;
121 struct list_head endp_list; /* for lport->endp_list */
122 struct list_head ctrl_list;
125 } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
127 enum nvme_fcctrl_state {
132 struct nvme_fc_ctrl {
134 struct nvme_fc_queue *queues;
138 struct nvme_fc_lport *lport;
139 struct nvme_fc_rport *rport;
146 struct list_head ctrl_list; /* rport->ctrl_list */
147 struct list_head ls_req_list;
149 struct blk_mq_tag_set admin_tag_set;
150 struct blk_mq_tag_set tag_set;
152 struct work_struct delete_work;
156 struct nvme_fc_fcp_op aen_ops[NVME_FC_NR_AEN_COMMANDS];
158 struct nvme_ctrl ctrl;
161 static inline struct nvme_fc_ctrl *
162 to_fc_ctrl(struct nvme_ctrl *ctrl)
164 return container_of(ctrl, struct nvme_fc_ctrl, ctrl);
167 static inline struct nvme_fc_lport *
168 localport_to_lport(struct nvme_fc_local_port *portptr)
170 return container_of(portptr, struct nvme_fc_lport, localport);
173 static inline struct nvme_fc_rport *
174 remoteport_to_rport(struct nvme_fc_remote_port *portptr)
176 return container_of(portptr, struct nvme_fc_rport, remoteport);
179 static inline struct nvmefc_ls_req_op *
180 ls_req_to_lsop(struct nvmefc_ls_req *lsreq)
182 return container_of(lsreq, struct nvmefc_ls_req_op, ls_req);
185 static inline struct nvme_fc_fcp_op *
186 fcp_req_to_fcp_op(struct nvmefc_fcp_req *fcpreq)
188 return container_of(fcpreq, struct nvme_fc_fcp_op, fcp_req);
193 /* *************************** Globals **************************** */
196 static DEFINE_SPINLOCK(nvme_fc_lock);
198 static LIST_HEAD(nvme_fc_lport_list);
199 static DEFINE_IDA(nvme_fc_local_port_cnt);
200 static DEFINE_IDA(nvme_fc_ctrl_cnt);
202 static struct workqueue_struct *nvme_fc_wq;
206 /* *********************** FC-NVME Port Management ************************ */
208 static int __nvme_fc_del_ctrl(struct nvme_fc_ctrl *);
209 static void __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *,
210 struct nvme_fc_queue *, unsigned int);
214 * nvme_fc_register_localport - transport entry point called by an
215 * LLDD to register the existence of a NVME
217 * @pinfo: pointer to information about the port to be registered
218 * @template: LLDD entrypoints and operational parameters for the port
219 * @dev: physical hardware device node port corresponds to. Will be
220 * used for DMA mappings
221 * @lport_p: pointer to a local port pointer. Upon success, the routine
222 * will allocate a nvme_fc_local_port structure and place its
223 * address in the local port pointer. Upon failure, local port
224 * pointer will be set to 0.
227 * a completion status. Must be 0 upon success; a negative errno
228 * (ex: -ENXIO) upon failure.
231 nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
232 struct nvme_fc_port_template *template,
234 struct nvme_fc_local_port **portptr)
236 struct nvme_fc_lport *newrec;
240 if (!template->localport_delete || !template->remoteport_delete ||
241 !template->ls_req || !template->fcp_io ||
242 !template->ls_abort || !template->fcp_abort ||
243 !template->max_hw_queues || !template->max_sgl_segments ||
244 !template->max_dif_sgl_segments || !template->dma_boundary) {
246 goto out_reghost_failed;
249 newrec = kmalloc((sizeof(*newrec) + template->local_priv_sz),
253 goto out_reghost_failed;
256 idx = ida_simple_get(&nvme_fc_local_port_cnt, 0, 0, GFP_KERNEL);
262 if (!get_device(dev) && dev) {
267 INIT_LIST_HEAD(&newrec->port_list);
268 INIT_LIST_HEAD(&newrec->endp_list);
269 kref_init(&newrec->ref);
270 newrec->ops = template;
272 ida_init(&newrec->endp_cnt);
273 newrec->localport.private = &newrec[1];
274 newrec->localport.node_name = pinfo->node_name;
275 newrec->localport.port_name = pinfo->port_name;
276 newrec->localport.port_role = pinfo->port_role;
277 newrec->localport.port_id = pinfo->port_id;
278 newrec->localport.port_state = FC_OBJSTATE_ONLINE;
279 newrec->localport.port_num = idx;
281 spin_lock_irqsave(&nvme_fc_lock, flags);
282 list_add_tail(&newrec->port_list, &nvme_fc_lport_list);
283 spin_unlock_irqrestore(&nvme_fc_lock, flags);
286 dma_set_seg_boundary(dev, template->dma_boundary);
288 *portptr = &newrec->localport;
292 ida_simple_remove(&nvme_fc_local_port_cnt, idx);
300 EXPORT_SYMBOL_GPL(nvme_fc_register_localport);
303 nvme_fc_free_lport(struct kref *ref)
305 struct nvme_fc_lport *lport =
306 container_of(ref, struct nvme_fc_lport, ref);
309 WARN_ON(lport->localport.port_state != FC_OBJSTATE_DELETED);
310 WARN_ON(!list_empty(&lport->endp_list));
312 /* remove from transport list */
313 spin_lock_irqsave(&nvme_fc_lock, flags);
314 list_del(&lport->port_list);
315 spin_unlock_irqrestore(&nvme_fc_lock, flags);
317 /* let the LLDD know we've finished tearing it down */
318 lport->ops->localport_delete(&lport->localport);
320 ida_simple_remove(&nvme_fc_local_port_cnt, lport->localport.port_num);
321 ida_destroy(&lport->endp_cnt);
323 put_device(lport->dev);
329 nvme_fc_lport_put(struct nvme_fc_lport *lport)
331 kref_put(&lport->ref, nvme_fc_free_lport);
335 nvme_fc_lport_get(struct nvme_fc_lport *lport)
337 return kref_get_unless_zero(&lport->ref);
341 * nvme_fc_unregister_localport - transport entry point called by an
342 * LLDD to deregister/remove a previously
343 * registered a NVME host FC port.
344 * @localport: pointer to the (registered) local port that is to be
348 * a completion status. Must be 0 upon success; a negative errno
349 * (ex: -ENXIO) upon failure.
352 nvme_fc_unregister_localport(struct nvme_fc_local_port *portptr)
354 struct nvme_fc_lport *lport = localport_to_lport(portptr);
360 spin_lock_irqsave(&nvme_fc_lock, flags);
362 if (portptr->port_state != FC_OBJSTATE_ONLINE) {
363 spin_unlock_irqrestore(&nvme_fc_lock, flags);
366 portptr->port_state = FC_OBJSTATE_DELETED;
368 spin_unlock_irqrestore(&nvme_fc_lock, flags);
370 nvme_fc_lport_put(lport);
374 EXPORT_SYMBOL_GPL(nvme_fc_unregister_localport);
377 * nvme_fc_register_remoteport - transport entry point called by an
378 * LLDD to register the existence of a NVME
379 * subsystem FC port on its fabric.
380 * @localport: pointer to the (registered) local port that the remote
381 * subsystem port is connected to.
382 * @pinfo: pointer to information about the port to be registered
383 * @rport_p: pointer to a remote port pointer. Upon success, the routine
384 * will allocate a nvme_fc_remote_port structure and place its
385 * address in the remote port pointer. Upon failure, remote port
386 * pointer will be set to 0.
389 * a completion status. Must be 0 upon success; a negative errno
390 * (ex: -ENXIO) upon failure.
393 nvme_fc_register_remoteport(struct nvme_fc_local_port *localport,
394 struct nvme_fc_port_info *pinfo,
395 struct nvme_fc_remote_port **portptr)
397 struct nvme_fc_lport *lport = localport_to_lport(localport);
398 struct nvme_fc_rport *newrec;
402 newrec = kmalloc((sizeof(*newrec) + lport->ops->remote_priv_sz),
406 goto out_reghost_failed;
409 if (!nvme_fc_lport_get(lport)) {
411 goto out_kfree_rport;
414 idx = ida_simple_get(&lport->endp_cnt, 0, 0, GFP_KERNEL);
420 INIT_LIST_HEAD(&newrec->endp_list);
421 INIT_LIST_HEAD(&newrec->ctrl_list);
422 kref_init(&newrec->ref);
423 spin_lock_init(&newrec->lock);
424 newrec->remoteport.localport = &lport->localport;
425 newrec->remoteport.private = &newrec[1];
426 newrec->remoteport.port_role = pinfo->port_role;
427 newrec->remoteport.node_name = pinfo->node_name;
428 newrec->remoteport.port_name = pinfo->port_name;
429 newrec->remoteport.port_id = pinfo->port_id;
430 newrec->remoteport.port_state = FC_OBJSTATE_ONLINE;
431 newrec->remoteport.port_num = idx;
433 spin_lock_irqsave(&nvme_fc_lock, flags);
434 list_add_tail(&newrec->endp_list, &lport->endp_list);
435 spin_unlock_irqrestore(&nvme_fc_lock, flags);
437 *portptr = &newrec->remoteport;
441 nvme_fc_lport_put(lport);
449 EXPORT_SYMBOL_GPL(nvme_fc_register_remoteport);
452 nvme_fc_free_rport(struct kref *ref)
454 struct nvme_fc_rport *rport =
455 container_of(ref, struct nvme_fc_rport, ref);
456 struct nvme_fc_lport *lport =
457 localport_to_lport(rport->remoteport.localport);
460 WARN_ON(rport->remoteport.port_state != FC_OBJSTATE_DELETED);
461 WARN_ON(!list_empty(&rport->ctrl_list));
463 /* remove from lport list */
464 spin_lock_irqsave(&nvme_fc_lock, flags);
465 list_del(&rport->endp_list);
466 spin_unlock_irqrestore(&nvme_fc_lock, flags);
468 /* let the LLDD know we've finished tearing it down */
469 lport->ops->remoteport_delete(&rport->remoteport);
471 ida_simple_remove(&lport->endp_cnt, rport->remoteport.port_num);
475 nvme_fc_lport_put(lport);
479 nvme_fc_rport_put(struct nvme_fc_rport *rport)
481 kref_put(&rport->ref, nvme_fc_free_rport);
485 nvme_fc_rport_get(struct nvme_fc_rport *rport)
487 return kref_get_unless_zero(&rport->ref);
491 * nvme_fc_unregister_remoteport - transport entry point called by an
492 * LLDD to deregister/remove a previously
493 * registered a NVME subsystem FC port.
494 * @remoteport: pointer to the (registered) remote port that is to be
498 * a completion status. Must be 0 upon success; a negative errno
499 * (ex: -ENXIO) upon failure.
502 nvme_fc_unregister_remoteport(struct nvme_fc_remote_port *portptr)
504 struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
505 struct nvme_fc_ctrl *ctrl;
511 spin_lock_irqsave(&rport->lock, flags);
513 if (portptr->port_state != FC_OBJSTATE_ONLINE) {
514 spin_unlock_irqrestore(&rport->lock, flags);
517 portptr->port_state = FC_OBJSTATE_DELETED;
519 /* tear down all associations to the remote port */
520 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list)
521 __nvme_fc_del_ctrl(ctrl);
523 spin_unlock_irqrestore(&rport->lock, flags);
525 nvme_fc_rport_put(rport);
528 EXPORT_SYMBOL_GPL(nvme_fc_unregister_remoteport);
531 /* *********************** FC-NVME DMA Handling **************************** */
534 * The fcloop device passes in a NULL device pointer. Real LLD's will
535 * pass in a valid device pointer. If NULL is passed to the dma mapping
536 * routines, depending on the platform, it may or may not succeed, and
540 * Wrapper all the dma routines and check the dev pointer.
542 * If simple mappings (return just a dma address, we'll noop them,
543 * returning a dma address of 0.
545 * On more complex mappings (dma_map_sg), a pseudo routine fills
546 * in the scatter list, setting all dma addresses to 0.
549 static inline dma_addr_t
550 fc_dma_map_single(struct device *dev, void *ptr, size_t size,
551 enum dma_data_direction dir)
553 return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
557 fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
559 return dev ? dma_mapping_error(dev, dma_addr) : 0;
563 fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
564 enum dma_data_direction dir)
567 dma_unmap_single(dev, addr, size, dir);
571 fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
572 enum dma_data_direction dir)
575 dma_sync_single_for_cpu(dev, addr, size, dir);
579 fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
580 enum dma_data_direction dir)
583 dma_sync_single_for_device(dev, addr, size, dir);
586 /* pseudo dma_map_sg call */
588 fc_map_sg(struct scatterlist *sg, int nents)
590 struct scatterlist *s;
593 WARN_ON(nents == 0 || sg[0].length == 0);
595 for_each_sg(sg, s, nents, i) {
597 #ifdef CONFIG_NEED_SG_DMA_LENGTH
598 s->dma_length = s->length;
605 fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
606 enum dma_data_direction dir)
608 return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
612 fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
613 enum dma_data_direction dir)
616 dma_unmap_sg(dev, sg, nents, dir);
620 /* *********************** FC-NVME LS Handling **************************** */
622 static void nvme_fc_ctrl_put(struct nvme_fc_ctrl *);
623 static int nvme_fc_ctrl_get(struct nvme_fc_ctrl *);
627 __nvme_fc_finish_ls_req(struct nvme_fc_ctrl *ctrl,
628 struct nvmefc_ls_req_op *lsop)
630 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
633 spin_lock_irqsave(&ctrl->lock, flags);
635 if (!lsop->req_queued) {
636 spin_unlock_irqrestore(&ctrl->lock, flags);
640 list_del(&lsop->lsreq_list);
642 lsop->req_queued = false;
644 spin_unlock_irqrestore(&ctrl->lock, flags);
646 fc_dma_unmap_single(ctrl->dev, lsreq->rqstdma,
647 (lsreq->rqstlen + lsreq->rsplen),
650 nvme_fc_ctrl_put(ctrl);
654 __nvme_fc_send_ls_req(struct nvme_fc_ctrl *ctrl,
655 struct nvmefc_ls_req_op *lsop,
656 void (*done)(struct nvmefc_ls_req *req, int status))
658 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
662 if (!nvme_fc_ctrl_get(ctrl))
667 lsop->req_queued = false;
668 INIT_LIST_HEAD(&lsop->lsreq_list);
669 init_completion(&lsop->ls_done);
671 lsreq->rqstdma = fc_dma_map_single(ctrl->dev, lsreq->rqstaddr,
672 lsreq->rqstlen + lsreq->rsplen,
674 if (fc_dma_mapping_error(ctrl->dev, lsreq->rqstdma)) {
675 nvme_fc_ctrl_put(ctrl);
677 "els request command failed EFAULT.\n");
680 lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen;
682 spin_lock_irqsave(&ctrl->lock, flags);
684 list_add_tail(&lsop->lsreq_list, &ctrl->ls_req_list);
686 lsop->req_queued = true;
688 spin_unlock_irqrestore(&ctrl->lock, flags);
690 ret = ctrl->lport->ops->ls_req(&ctrl->lport->localport,
691 &ctrl->rport->remoteport, lsreq);
693 lsop->ls_error = ret;
699 nvme_fc_send_ls_req_done(struct nvmefc_ls_req *lsreq, int status)
701 struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
703 lsop->ls_error = status;
704 complete(&lsop->ls_done);
708 nvme_fc_send_ls_req(struct nvme_fc_ctrl *ctrl, struct nvmefc_ls_req_op *lsop)
710 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
711 struct fcnvme_ls_rjt *rjt = lsreq->rspaddr;
714 ret = __nvme_fc_send_ls_req(ctrl, lsop, nvme_fc_send_ls_req_done);
718 * No timeout/not interruptible as we need the struct
719 * to exist until the lldd calls us back. Thus mandate
720 * wait until driver calls back. lldd responsible for
723 wait_for_completion(&lsop->ls_done);
725 __nvme_fc_finish_ls_req(ctrl, lsop);
729 "ls request command failed (%d).\n", ret);
733 /* ACC or RJT payload ? */
734 if (rjt->w0.ls_cmd == FCNVME_LS_RJT)
741 nvme_fc_send_ls_req_async(struct nvme_fc_ctrl *ctrl,
742 struct nvmefc_ls_req_op *lsop,
743 void (*done)(struct nvmefc_ls_req *req, int status))
747 ret = __nvme_fc_send_ls_req(ctrl, lsop, done);
749 /* don't wait for completion */
752 done(&lsop->ls_req, ret);
755 /* Validation Error indexes into the string table below */
759 VERR_LSDESC_RQST = 2,
760 VERR_LSDESC_RQST_LEN = 3,
762 VERR_ASSOC_ID_LEN = 5,
764 VERR_CONN_ID_LEN = 7,
766 VERR_CR_ASSOC_ACC_LEN = 9,
768 VERR_CR_CONN_ACC_LEN = 11,
770 VERR_DISCONN_ACC_LEN = 13,
773 static char *validation_errors[] = {
777 "Bad LSDESC_RQST Length",
778 "Not Association ID",
779 "Bad Association ID Length",
781 "Bad Connection ID Length",
783 "Bad CR_ASSOC ACC Length",
785 "Bad CR_CONN ACC Length",
786 "Not Disconnect Rqst",
787 "Bad Disconnect ACC Length",
791 nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
792 struct nvme_fc_queue *queue, u16 qsize, u16 ersp_ratio)
794 struct nvmefc_ls_req_op *lsop;
795 struct nvmefc_ls_req *lsreq;
796 struct fcnvme_ls_cr_assoc_rqst *assoc_rqst;
797 struct fcnvme_ls_cr_assoc_acc *assoc_acc;
800 lsop = kzalloc((sizeof(*lsop) +
801 ctrl->lport->ops->lsrqst_priv_sz +
802 sizeof(*assoc_rqst) + sizeof(*assoc_acc)), GFP_KERNEL);
807 lsreq = &lsop->ls_req;
809 lsreq->private = (void *)&lsop[1];
810 assoc_rqst = (struct fcnvme_ls_cr_assoc_rqst *)
811 (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
812 assoc_acc = (struct fcnvme_ls_cr_assoc_acc *)&assoc_rqst[1];
814 assoc_rqst->w0.ls_cmd = FCNVME_LS_CREATE_ASSOCIATION;
815 assoc_rqst->desc_list_len =
816 cpu_to_be32(sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
818 assoc_rqst->assoc_cmd.desc_tag =
819 cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD);
820 assoc_rqst->assoc_cmd.desc_len =
822 sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
824 assoc_rqst->assoc_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
825 assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize);
826 /* Linux supports only Dynamic controllers */
827 assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff);
828 memcpy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id,
829 min_t(size_t, FCNVME_ASSOC_HOSTID_LEN, sizeof(uuid_be)));
830 strncpy(assoc_rqst->assoc_cmd.hostnqn, ctrl->ctrl.opts->host->nqn,
831 min(FCNVME_ASSOC_HOSTNQN_LEN, NVMF_NQN_SIZE));
832 strncpy(assoc_rqst->assoc_cmd.subnqn, ctrl->ctrl.opts->subsysnqn,
833 min(FCNVME_ASSOC_SUBNQN_LEN, NVMF_NQN_SIZE));
836 lsreq->rqstaddr = assoc_rqst;
837 lsreq->rqstlen = sizeof(*assoc_rqst);
838 lsreq->rspaddr = assoc_acc;
839 lsreq->rsplen = sizeof(*assoc_acc);
840 lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
842 ret = nvme_fc_send_ls_req(ctrl, lsop);
844 goto out_free_buffer;
846 /* process connect LS completion */
848 /* validate the ACC response */
849 if (assoc_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC)
851 if (assoc_acc->hdr.desc_list_len !=
853 sizeof(struct fcnvme_ls_cr_assoc_acc)))
854 fcret = VERR_CR_ASSOC_ACC_LEN;
855 if (assoc_acc->hdr.rqst.desc_tag != cpu_to_be32(FCNVME_LSDESC_RQST))
856 fcret = VERR_LSDESC_RQST;
857 else if (assoc_acc->hdr.rqst.desc_len !=
858 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)))
859 fcret = VERR_LSDESC_RQST_LEN;
860 else if (assoc_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_ASSOCIATION)
861 fcret = VERR_CR_ASSOC;
862 else if (assoc_acc->associd.desc_tag !=
863 cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
864 fcret = VERR_ASSOC_ID;
865 else if (assoc_acc->associd.desc_len !=
867 sizeof(struct fcnvme_lsdesc_assoc_id)))
868 fcret = VERR_ASSOC_ID_LEN;
869 else if (assoc_acc->connectid.desc_tag !=
870 cpu_to_be32(FCNVME_LSDESC_CONN_ID))
871 fcret = VERR_CONN_ID;
872 else if (assoc_acc->connectid.desc_len !=
873 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id)))
874 fcret = VERR_CONN_ID_LEN;
879 "q %d connect failed: %s\n",
880 queue->qnum, validation_errors[fcret]);
882 ctrl->association_id =
883 be64_to_cpu(assoc_acc->associd.association_id);
884 queue->connection_id =
885 be64_to_cpu(assoc_acc->connectid.connection_id);
886 set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
894 "queue %d connect admin queue failed (%d).\n",
900 nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
901 u16 qsize, u16 ersp_ratio)
903 struct nvmefc_ls_req_op *lsop;
904 struct nvmefc_ls_req *lsreq;
905 struct fcnvme_ls_cr_conn_rqst *conn_rqst;
906 struct fcnvme_ls_cr_conn_acc *conn_acc;
909 lsop = kzalloc((sizeof(*lsop) +
910 ctrl->lport->ops->lsrqst_priv_sz +
911 sizeof(*conn_rqst) + sizeof(*conn_acc)), GFP_KERNEL);
916 lsreq = &lsop->ls_req;
918 lsreq->private = (void *)&lsop[1];
919 conn_rqst = (struct fcnvme_ls_cr_conn_rqst *)
920 (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
921 conn_acc = (struct fcnvme_ls_cr_conn_acc *)&conn_rqst[1];
923 conn_rqst->w0.ls_cmd = FCNVME_LS_CREATE_CONNECTION;
924 conn_rqst->desc_list_len = cpu_to_be32(
925 sizeof(struct fcnvme_lsdesc_assoc_id) +
926 sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
928 conn_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
929 conn_rqst->associd.desc_len =
931 sizeof(struct fcnvme_lsdesc_assoc_id));
932 conn_rqst->associd.association_id = cpu_to_be64(ctrl->association_id);
933 conn_rqst->connect_cmd.desc_tag =
934 cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD);
935 conn_rqst->connect_cmd.desc_len =
937 sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
938 conn_rqst->connect_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
939 conn_rqst->connect_cmd.qid = cpu_to_be16(queue->qnum);
940 conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize);
943 lsreq->rqstaddr = conn_rqst;
944 lsreq->rqstlen = sizeof(*conn_rqst);
945 lsreq->rspaddr = conn_acc;
946 lsreq->rsplen = sizeof(*conn_acc);
947 lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
949 ret = nvme_fc_send_ls_req(ctrl, lsop);
951 goto out_free_buffer;
953 /* process connect LS completion */
955 /* validate the ACC response */
956 if (conn_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC)
958 if (conn_acc->hdr.desc_list_len !=
959 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)))
960 fcret = VERR_CR_CONN_ACC_LEN;
961 if (conn_acc->hdr.rqst.desc_tag != cpu_to_be32(FCNVME_LSDESC_RQST))
962 fcret = VERR_LSDESC_RQST;
963 else if (conn_acc->hdr.rqst.desc_len !=
964 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)))
965 fcret = VERR_LSDESC_RQST_LEN;
966 else if (conn_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_CONNECTION)
967 fcret = VERR_CR_CONN;
968 else if (conn_acc->connectid.desc_tag !=
969 cpu_to_be32(FCNVME_LSDESC_CONN_ID))
970 fcret = VERR_CONN_ID;
971 else if (conn_acc->connectid.desc_len !=
972 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id)))
973 fcret = VERR_CONN_ID_LEN;
978 "q %d connect failed: %s\n",
979 queue->qnum, validation_errors[fcret]);
981 queue->connection_id =
982 be64_to_cpu(conn_acc->connectid.connection_id);
983 set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
991 "queue %d connect command failed (%d).\n",
997 nvme_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
999 struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
1000 struct nvme_fc_ctrl *ctrl = lsop->ctrl;
1002 __nvme_fc_finish_ls_req(ctrl, lsop);
1006 "disconnect assoc ls request command failed (%d).\n",
1009 /* fc-nvme iniator doesn't care about success or failure of cmd */
1015 * This routine sends a FC-NVME LS to disconnect (aka terminate)
1016 * the FC-NVME Association. Terminating the association also
1017 * terminates the FC-NVME connections (per queue, both admin and io
1018 * queues) that are part of the association. E.g. things are torn
1019 * down, and the related FC-NVME Association ID and Connection IDs
1022 * The behavior of the fc-nvme initiator is such that it's
1023 * understanding of the association and connections will implicitly
1024 * be torn down. The action is implicit as it may be due to a loss of
1025 * connectivity with the fc-nvme target, so you may never get a
1026 * response even if you tried. As such, the action of this routine
1027 * is to asynchronously send the LS, ignore any results of the LS, and
1028 * continue on with terminating the association. If the fc-nvme target
1029 * is present and receives the LS, it too can tear down.
1032 nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
1034 struct fcnvme_ls_disconnect_rqst *discon_rqst;
1035 struct fcnvme_ls_disconnect_acc *discon_acc;
1036 struct nvmefc_ls_req_op *lsop;
1037 struct nvmefc_ls_req *lsreq;
1039 lsop = kzalloc((sizeof(*lsop) +
1040 ctrl->lport->ops->lsrqst_priv_sz +
1041 sizeof(*discon_rqst) + sizeof(*discon_acc)),
1044 /* couldn't sent it... too bad */
1047 lsreq = &lsop->ls_req;
1049 lsreq->private = (void *)&lsop[1];
1050 discon_rqst = (struct fcnvme_ls_disconnect_rqst *)
1051 (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
1052 discon_acc = (struct fcnvme_ls_disconnect_acc *)&discon_rqst[1];
1054 discon_rqst->w0.ls_cmd = FCNVME_LS_DISCONNECT;
1055 discon_rqst->desc_list_len = cpu_to_be32(
1056 sizeof(struct fcnvme_lsdesc_assoc_id) +
1057 sizeof(struct fcnvme_lsdesc_disconn_cmd));
1059 discon_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
1060 discon_rqst->associd.desc_len =
1062 sizeof(struct fcnvme_lsdesc_assoc_id));
1064 discon_rqst->associd.association_id = cpu_to_be64(ctrl->association_id);
1066 discon_rqst->discon_cmd.desc_tag = cpu_to_be32(
1067 FCNVME_LSDESC_DISCONN_CMD);
1068 discon_rqst->discon_cmd.desc_len =
1070 sizeof(struct fcnvme_lsdesc_disconn_cmd));
1071 discon_rqst->discon_cmd.scope = FCNVME_DISCONN_ASSOCIATION;
1072 discon_rqst->discon_cmd.id = cpu_to_be64(ctrl->association_id);
1074 lsreq->rqstaddr = discon_rqst;
1075 lsreq->rqstlen = sizeof(*discon_rqst);
1076 lsreq->rspaddr = discon_acc;
1077 lsreq->rsplen = sizeof(*discon_acc);
1078 lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
1080 nvme_fc_send_ls_req_async(ctrl, lsop, nvme_fc_disconnect_assoc_done);
1082 /* only meaningful part to terminating the association */
1083 ctrl->association_id = 0;
1087 /* *********************** NVME Ctrl Routines **************************** */
1091 nvme_fc_reinit_request(void *data, struct request *rq)
1093 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1094 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
1096 memset(cmdiu, 0, sizeof(*cmdiu));
1097 cmdiu->scsi_id = NVME_CMD_SCSI_ID;
1098 cmdiu->fc_id = NVME_CMD_FC_ID;
1099 cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32));
1100 memset(&op->rsp_iu, 0, sizeof(op->rsp_iu));
1106 __nvme_fc_exit_request(struct nvme_fc_ctrl *ctrl,
1107 struct nvme_fc_fcp_op *op)
1109 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.rspdma,
1110 sizeof(op->rsp_iu), DMA_FROM_DEVICE);
1111 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.cmddma,
1112 sizeof(op->cmd_iu), DMA_TO_DEVICE);
1114 atomic_set(&op->state, FCPOP_STATE_UNINIT);
1118 nvme_fc_exit_request(void *data, struct request *rq,
1119 unsigned int hctx_idx, unsigned int rq_idx)
1121 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1123 return __nvme_fc_exit_request(data, op);
1127 nvme_fc_exit_aen_ops(struct nvme_fc_ctrl *ctrl)
1129 struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops;
1132 for (i = 0; i < NVME_FC_NR_AEN_COMMANDS; i++, aen_op++) {
1133 if (atomic_read(&aen_op->state) == FCPOP_STATE_UNINIT)
1135 __nvme_fc_exit_request(ctrl, aen_op);
1136 nvme_fc_ctrl_put(ctrl);
1141 nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
1143 struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req);
1144 struct request *rq = op->rq;
1145 struct nvmefc_fcp_req *freq = &op->fcp_req;
1146 struct nvme_fc_ctrl *ctrl = op->ctrl;
1147 struct nvme_fc_queue *queue = op->queue;
1148 struct nvme_completion *cqe = &op->rsp_iu.cqe;
1153 * The current linux implementation of a nvme controller
1154 * allocates a single tag set for all io queues and sizes
1155 * the io queues to fully hold all possible tags. Thus, the
1156 * implementation does not reference or care about the sqhd
1157 * value as it never needs to use the sqhd/sqtail pointers
1158 * for submission pacing.
1160 * This affects the FC-NVME implementation in two ways:
1161 * 1) As the value doesn't matter, we don't need to waste
1162 * cycles extracting it from ERSPs and stamping it in the
1163 * cases where the transport fabricates CQEs on successful
1165 * 2) The FC-NVME implementation requires that delivery of
1166 * ERSP completions are to go back to the nvme layer in order
1167 * relative to the rsn, such that the sqhd value will always
1168 * be "in order" for the nvme layer. As the nvme layer in
1169 * linux doesn't care about sqhd, there's no need to return
1173 * As the core nvme layer in linux currently does not look at
1174 * every field in the cqe - in cases where the FC transport must
1175 * fabricate a CQE, the following fields will not be set as they
1176 * are not referenced:
1177 * cqe.sqid, cqe.sqhd, cqe.command_id
1180 fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma,
1181 sizeof(op->rsp_iu), DMA_FROM_DEVICE);
1183 if (atomic_read(&op->state) == FCPOP_STATE_ABORTED)
1184 status = NVME_SC_ABORT_REQ | NVME_SC_DNR;
1186 status = freq->status;
1189 * For the linux implementation, if we have an unsuccesful
1190 * status, they blk-mq layer can typically be called with the
1191 * non-zero status and the content of the cqe isn't important.
1197 * command completed successfully relative to the wire
1198 * protocol. However, validate anything received and
1199 * extract the status and result from the cqe (create it
1203 switch (freq->rcv_rsplen) {
1206 case NVME_FC_SIZEOF_ZEROS_RSP:
1208 * No response payload or 12 bytes of payload (which
1209 * should all be zeros) are considered successful and
1210 * no payload in the CQE by the transport.
1212 if (freq->transferred_length !=
1213 be32_to_cpu(op->cmd_iu.data_len)) {
1217 op->nreq.result.u64 = 0;
1220 case sizeof(struct nvme_fc_ersp_iu):
1222 * The ERSP IU contains a full completion with CQE.
1223 * Validate ERSP IU and look at cqe.
1225 if (unlikely(be16_to_cpu(op->rsp_iu.iu_len) !=
1226 (freq->rcv_rsplen / 4) ||
1227 be32_to_cpu(op->rsp_iu.xfrd_len) !=
1228 freq->transferred_length ||
1229 op->rqno != le16_to_cpu(cqe->command_id))) {
1233 op->nreq.result = cqe->result;
1234 status = le16_to_cpu(cqe->status) >> 1;
1243 if (!queue->qnum && op->rqno >= AEN_CMDID_BASE) {
1244 nvme_complete_async_event(&queue->ctrl->ctrl, status,
1246 nvme_fc_ctrl_put(ctrl);
1250 blk_mq_complete_request(rq, status);
1254 __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl,
1255 struct nvme_fc_queue *queue, struct nvme_fc_fcp_op *op,
1256 struct request *rq, u32 rqno)
1258 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
1261 memset(op, 0, sizeof(*op));
1262 op->fcp_req.cmdaddr = &op->cmd_iu;
1263 op->fcp_req.cmdlen = sizeof(op->cmd_iu);
1264 op->fcp_req.rspaddr = &op->rsp_iu;
1265 op->fcp_req.rsplen = sizeof(op->rsp_iu);
1266 op->fcp_req.done = nvme_fc_fcpio_done;
1267 op->fcp_req.first_sgl = (struct scatterlist *)&op[1];
1268 op->fcp_req.private = &op->fcp_req.first_sgl[SG_CHUNK_SIZE];
1274 cmdiu->scsi_id = NVME_CMD_SCSI_ID;
1275 cmdiu->fc_id = NVME_CMD_FC_ID;
1276 cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32));
1278 op->fcp_req.cmddma = fc_dma_map_single(ctrl->lport->dev,
1279 &op->cmd_iu, sizeof(op->cmd_iu), DMA_TO_DEVICE);
1280 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.cmddma)) {
1282 "FCP Op failed - cmdiu dma mapping failed.\n");
1287 op->fcp_req.rspdma = fc_dma_map_single(ctrl->lport->dev,
1288 &op->rsp_iu, sizeof(op->rsp_iu),
1290 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.rspdma)) {
1292 "FCP Op failed - rspiu dma mapping failed.\n");
1296 atomic_set(&op->state, FCPOP_STATE_IDLE);
1302 nvme_fc_init_request(void *data, struct request *rq,
1303 unsigned int hctx_idx, unsigned int rq_idx,
1304 unsigned int numa_node)
1306 struct nvme_fc_ctrl *ctrl = data;
1307 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1308 struct nvme_fc_queue *queue = &ctrl->queues[hctx_idx+1];
1310 return __nvme_fc_init_request(ctrl, queue, op, rq, queue->rqcnt++);
1314 nvme_fc_init_admin_request(void *data, struct request *rq,
1315 unsigned int hctx_idx, unsigned int rq_idx,
1316 unsigned int numa_node)
1318 struct nvme_fc_ctrl *ctrl = data;
1319 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1320 struct nvme_fc_queue *queue = &ctrl->queues[0];
1322 return __nvme_fc_init_request(ctrl, queue, op, rq, queue->rqcnt++);
1326 nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl)
1328 struct nvme_fc_fcp_op *aen_op;
1329 struct nvme_fc_cmd_iu *cmdiu;
1330 struct nvme_command *sqe;
1333 aen_op = ctrl->aen_ops;
1334 for (i = 0; i < NVME_FC_NR_AEN_COMMANDS; i++, aen_op++) {
1335 cmdiu = &aen_op->cmd_iu;
1337 ret = __nvme_fc_init_request(ctrl, &ctrl->queues[0],
1338 aen_op, (struct request *)NULL,
1339 (AEN_CMDID_BASE + i));
1343 memset(sqe, 0, sizeof(*sqe));
1344 sqe->common.opcode = nvme_admin_async_event;
1345 sqe->common.command_id = AEN_CMDID_BASE + i;
1352 __nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, struct nvme_fc_ctrl *ctrl,
1355 struct nvme_fc_queue *queue = &ctrl->queues[qidx];
1357 hctx->driver_data = queue;
1362 nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
1363 unsigned int hctx_idx)
1365 struct nvme_fc_ctrl *ctrl = data;
1367 __nvme_fc_init_hctx(hctx, ctrl, hctx_idx + 1);
1373 nvme_fc_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
1374 unsigned int hctx_idx)
1376 struct nvme_fc_ctrl *ctrl = data;
1378 __nvme_fc_init_hctx(hctx, ctrl, hctx_idx);
1384 nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx, size_t queue_size)
1386 struct nvme_fc_queue *queue;
1388 queue = &ctrl->queues[idx];
1389 memset(queue, 0, sizeof(*queue));
1392 atomic_set(&queue->csn, 1);
1393 queue->dev = ctrl->dev;
1396 queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16;
1398 queue->cmnd_capsule_len = sizeof(struct nvme_command);
1400 queue->queue_size = queue_size;
1403 * Considered whether we should allocate buffers for all SQEs
1404 * and CQEs and dma map them - mapping their respective entries
1405 * into the request structures (kernel vm addr and dma address)
1406 * thus the driver could use the buffers/mappings directly.
1407 * It only makes sense if the LLDD would use them for its
1408 * messaging api. It's very unlikely most adapter api's would use
1409 * a native NVME sqe/cqe. More reasonable if FC-NVME IU payload
1410 * structures were used instead.
1415 * This routine terminates a queue at the transport level.
1416 * The transport has already ensured that all outstanding ios on
1417 * the queue have been terminated.
1418 * The transport will send a Disconnect LS request to terminate
1419 * the queue's connection. Termination of the admin queue will also
1420 * terminate the association at the target.
1423 nvme_fc_free_queue(struct nvme_fc_queue *queue)
1425 if (!test_and_clear_bit(NVME_FC_Q_CONNECTED, &queue->flags))
1429 * Current implementation never disconnects a single queue.
1430 * It always terminates a whole association. So there is never
1431 * a disconnect(queue) LS sent to the target.
1434 queue->connection_id = 0;
1435 clear_bit(NVME_FC_Q_CONNECTED, &queue->flags);
1439 __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *ctrl,
1440 struct nvme_fc_queue *queue, unsigned int qidx)
1442 if (ctrl->lport->ops->delete_queue)
1443 ctrl->lport->ops->delete_queue(&ctrl->lport->localport, qidx,
1444 queue->lldd_handle);
1445 queue->lldd_handle = NULL;
1449 nvme_fc_destroy_admin_queue(struct nvme_fc_ctrl *ctrl)
1451 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
1452 blk_cleanup_queue(ctrl->ctrl.admin_q);
1453 blk_mq_free_tag_set(&ctrl->admin_tag_set);
1454 nvme_fc_free_queue(&ctrl->queues[0]);
1458 nvme_fc_free_io_queues(struct nvme_fc_ctrl *ctrl)
1462 for (i = 1; i < ctrl->queue_count; i++)
1463 nvme_fc_free_queue(&ctrl->queues[i]);
1467 __nvme_fc_create_hw_queue(struct nvme_fc_ctrl *ctrl,
1468 struct nvme_fc_queue *queue, unsigned int qidx, u16 qsize)
1472 queue->lldd_handle = NULL;
1473 if (ctrl->lport->ops->create_queue)
1474 ret = ctrl->lport->ops->create_queue(&ctrl->lport->localport,
1475 qidx, qsize, &queue->lldd_handle);
1481 nvme_fc_delete_hw_io_queues(struct nvme_fc_ctrl *ctrl)
1483 struct nvme_fc_queue *queue = &ctrl->queues[ctrl->queue_count - 1];
1486 for (i = ctrl->queue_count - 1; i >= 1; i--, queue--)
1487 __nvme_fc_delete_hw_queue(ctrl, queue, i);
1491 nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
1493 struct nvme_fc_queue *queue = &ctrl->queues[1];
1496 for (i = 1; i < ctrl->queue_count; i++, queue++) {
1497 ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize);
1506 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[i], i);
1511 nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
1515 for (i = 1; i < ctrl->queue_count; i++) {
1516 ret = nvme_fc_connect_queue(ctrl, &ctrl->queues[i], qsize,
1520 ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
1529 nvme_fc_init_io_queues(struct nvme_fc_ctrl *ctrl)
1533 for (i = 1; i < ctrl->queue_count; i++)
1534 nvme_fc_init_queue(ctrl, i, ctrl->ctrl.sqsize);
1538 nvme_fc_ctrl_free(struct kref *ref)
1540 struct nvme_fc_ctrl *ctrl =
1541 container_of(ref, struct nvme_fc_ctrl, ref);
1542 unsigned long flags;
1544 if (ctrl->state != FCCTRL_INIT) {
1545 /* remove from rport list */
1546 spin_lock_irqsave(&ctrl->rport->lock, flags);
1547 list_del(&ctrl->ctrl_list);
1548 spin_unlock_irqrestore(&ctrl->rport->lock, flags);
1551 put_device(ctrl->dev);
1552 nvme_fc_rport_put(ctrl->rport);
1554 kfree(ctrl->queues);
1555 ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
1556 nvmf_free_options(ctrl->ctrl.opts);
1561 nvme_fc_ctrl_put(struct nvme_fc_ctrl *ctrl)
1563 kref_put(&ctrl->ref, nvme_fc_ctrl_free);
1567 nvme_fc_ctrl_get(struct nvme_fc_ctrl *ctrl)
1569 return kref_get_unless_zero(&ctrl->ref);
1573 * All accesses from nvme core layer done - can now free the
1574 * controller. Called after last nvme_put_ctrl() call
1577 nvme_fc_free_nvme_ctrl(struct nvme_ctrl *nctrl)
1579 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
1581 WARN_ON(nctrl != &ctrl->ctrl);
1584 * Tear down the association, which will generate link
1585 * traffic to terminate connections
1588 if (ctrl->state != FCCTRL_INIT) {
1589 /* send a Disconnect(association) LS to fc-nvme target */
1590 nvme_fc_xmt_disconnect_assoc(ctrl);
1592 if (ctrl->ctrl.tagset) {
1593 blk_cleanup_queue(ctrl->ctrl.connect_q);
1594 blk_mq_free_tag_set(&ctrl->tag_set);
1595 nvme_fc_delete_hw_io_queues(ctrl);
1596 nvme_fc_free_io_queues(ctrl);
1599 nvme_fc_exit_aen_ops(ctrl);
1601 nvme_fc_destroy_admin_queue(ctrl);
1604 nvme_fc_ctrl_put(ctrl);
1609 __nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op)
1613 state = atomic_xchg(&op->state, FCPOP_STATE_ABORTED);
1614 if (state != FCPOP_STATE_ACTIVE) {
1615 atomic_set(&op->state, state);
1616 return -ECANCELED; /* fail */
1619 ctrl->lport->ops->fcp_abort(&ctrl->lport->localport,
1620 &ctrl->rport->remoteport,
1621 op->queue->lldd_handle,
1627 enum blk_eh_timer_return
1628 nvme_fc_timeout(struct request *rq, bool reserved)
1630 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1631 struct nvme_fc_ctrl *ctrl = op->ctrl;
1635 return BLK_EH_RESET_TIMER;
1637 ret = __nvme_fc_abort_op(ctrl, op);
1639 /* io wasn't active to abort consider it done */
1640 return BLK_EH_HANDLED;
1643 * TODO: force a controller reset
1644 * when that happens, queues will be torn down and outstanding
1645 * ios will be terminated, and the above abort, on a single io
1646 * will no longer be needed.
1649 return BLK_EH_HANDLED;
1653 nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
1654 struct nvme_fc_fcp_op *op)
1656 struct nvmefc_fcp_req *freq = &op->fcp_req;
1657 u32 map_len = nvme_map_len(rq);
1658 enum dma_data_direction dir;
1666 freq->sg_table.sgl = freq->first_sgl;
1667 ret = sg_alloc_table_chained(&freq->sg_table, rq->nr_phys_segments,
1668 freq->sg_table.sgl);
1672 op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl);
1673 WARN_ON(op->nents > rq->nr_phys_segments);
1674 dir = (rq_data_dir(rq) == WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
1675 freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl,
1677 if (unlikely(freq->sg_cnt <= 0)) {
1678 sg_free_table_chained(&freq->sg_table, true);
1684 * TODO: blk_integrity_rq(rq) for DIF
1690 nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
1691 struct nvme_fc_fcp_op *op)
1693 struct nvmefc_fcp_req *freq = &op->fcp_req;
1698 fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents,
1699 ((rq_data_dir(rq) == WRITE) ?
1700 DMA_TO_DEVICE : DMA_FROM_DEVICE));
1702 nvme_cleanup_cmd(rq);
1704 sg_free_table_chained(&freq->sg_table, true);
1710 * In FC, the queue is a logical thing. At transport connect, the target
1711 * creates its "queue" and returns a handle that is to be given to the
1712 * target whenever it posts something to the corresponding SQ. When an
1713 * SQE is sent on a SQ, FC effectively considers the SQE, or rather the
1714 * command contained within the SQE, an io, and assigns a FC exchange
1715 * to it. The SQE and the associated SQ handle are sent in the initial
1716 * CMD IU sents on the exchange. All transfers relative to the io occur
1717 * as part of the exchange. The CQE is the last thing for the io,
1718 * which is transferred (explicitly or implicitly) with the RSP IU
1719 * sent on the exchange. After the CQE is received, the FC exchange is
1720 * terminaed and the Exchange may be used on a different io.
1722 * The transport to LLDD api has the transport making a request for a
1723 * new fcp io request to the LLDD. The LLDD then allocates a FC exchange
1724 * resource and transfers the command. The LLDD will then process all
1725 * steps to complete the io. Upon completion, the transport done routine
1728 * So - while the operation is outstanding to the LLDD, there is a link
1729 * level FC exchange resource that is also outstanding. This must be
1730 * considered in all cleanup operations.
1733 nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
1734 struct nvme_fc_fcp_op *op, u32 data_len,
1735 enum nvmefc_fcp_datadir io_dir)
1737 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
1738 struct nvme_command *sqe = &cmdiu->sqe;
1742 if (!nvme_fc_ctrl_get(ctrl))
1743 return BLK_MQ_RQ_QUEUE_ERROR;
1745 /* format the FC-NVME CMD IU and fcp_req */
1746 cmdiu->connection_id = cpu_to_be64(queue->connection_id);
1747 csn = atomic_inc_return(&queue->csn);
1748 cmdiu->csn = cpu_to_be32(csn);
1749 cmdiu->data_len = cpu_to_be32(data_len);
1751 case NVMEFC_FCP_WRITE:
1752 cmdiu->flags = FCNVME_CMD_FLAGS_WRITE;
1754 case NVMEFC_FCP_READ:
1755 cmdiu->flags = FCNVME_CMD_FLAGS_READ;
1757 case NVMEFC_FCP_NODATA:
1761 op->fcp_req.payload_length = data_len;
1762 op->fcp_req.io_dir = io_dir;
1763 op->fcp_req.transferred_length = 0;
1764 op->fcp_req.rcv_rsplen = 0;
1765 op->fcp_req.status = 0;
1766 op->fcp_req.sqid = cpu_to_le16(queue->qnum);
1769 * validate per fabric rules, set fields mandated by fabric spec
1770 * as well as those by FC-NVME spec.
1772 WARN_ON_ONCE(sqe->common.metadata);
1773 WARN_ON_ONCE(sqe->common.dptr.prp1);
1774 WARN_ON_ONCE(sqe->common.dptr.prp2);
1775 sqe->common.flags |= NVME_CMD_SGL_METABUF;
1778 * format SQE DPTR field per FC-NVME rules
1779 * type=data block descr; subtype=offset;
1780 * offset is currently 0.
1782 sqe->rw.dptr.sgl.type = NVME_SGL_FMT_OFFSET;
1783 sqe->rw.dptr.sgl.length = cpu_to_le32(data_len);
1784 sqe->rw.dptr.sgl.addr = 0;
1786 /* odd that we set the command_id - should come from nvme-fabrics */
1787 WARN_ON_ONCE(sqe->common.command_id != cpu_to_le16(op->rqno));
1789 if (op->rq) { /* skipped on aens */
1790 ret = nvme_fc_map_data(ctrl, op->rq, op);
1792 dev_err(queue->ctrl->ctrl.device,
1793 "Failed to map data (%d)\n", ret);
1794 nvme_cleanup_cmd(op->rq);
1795 nvme_fc_ctrl_put(ctrl);
1796 return (ret == -ENOMEM || ret == -EAGAIN) ?
1797 BLK_MQ_RQ_QUEUE_BUSY : BLK_MQ_RQ_QUEUE_ERROR;
1801 fc_dma_sync_single_for_device(ctrl->lport->dev, op->fcp_req.cmddma,
1802 sizeof(op->cmd_iu), DMA_TO_DEVICE);
1804 atomic_set(&op->state, FCPOP_STATE_ACTIVE);
1807 blk_mq_start_request(op->rq);
1809 ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport,
1810 &ctrl->rport->remoteport,
1811 queue->lldd_handle, &op->fcp_req);
1815 "Send nvme command failed - lldd returned %d.\n", ret);
1817 if (op->rq) { /* normal request */
1818 nvme_fc_unmap_data(ctrl, op->rq, op);
1819 nvme_cleanup_cmd(op->rq);
1821 /* else - aen. no cleanup needed */
1823 nvme_fc_ctrl_put(ctrl);
1826 return BLK_MQ_RQ_QUEUE_ERROR;
1829 blk_mq_stop_hw_queues(op->rq->q);
1830 blk_mq_delay_queue(queue->hctx, NVMEFC_QUEUE_DELAY);
1832 return BLK_MQ_RQ_QUEUE_BUSY;
1835 return BLK_MQ_RQ_QUEUE_OK;
1839 nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
1840 const struct blk_mq_queue_data *bd)
1842 struct nvme_ns *ns = hctx->queue->queuedata;
1843 struct nvme_fc_queue *queue = hctx->driver_data;
1844 struct nvme_fc_ctrl *ctrl = queue->ctrl;
1845 struct request *rq = bd->rq;
1846 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1847 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
1848 struct nvme_command *sqe = &cmdiu->sqe;
1849 enum nvmefc_fcp_datadir io_dir;
1853 ret = nvme_setup_cmd(ns, rq, sqe);
1857 data_len = nvme_map_len(rq);
1859 io_dir = ((rq_data_dir(rq) == WRITE) ?
1860 NVMEFC_FCP_WRITE : NVMEFC_FCP_READ);
1862 io_dir = NVMEFC_FCP_NODATA;
1864 return nvme_fc_start_fcp_op(ctrl, queue, op, data_len, io_dir);
1867 static struct blk_mq_tags *
1868 nvme_fc_tagset(struct nvme_fc_queue *queue)
1870 if (queue->qnum == 0)
1871 return queue->ctrl->admin_tag_set.tags[queue->qnum];
1873 return queue->ctrl->tag_set.tags[queue->qnum - 1];
1877 nvme_fc_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
1880 struct nvme_fc_queue *queue = hctx->driver_data;
1881 struct nvme_fc_ctrl *ctrl = queue->ctrl;
1882 struct request *req;
1883 struct nvme_fc_fcp_op *op;
1885 req = blk_mq_tag_to_rq(nvme_fc_tagset(queue), tag);
1887 dev_err(queue->ctrl->ctrl.device,
1888 "tag 0x%x on QNum %#x not found\n",
1893 op = blk_mq_rq_to_pdu(req);
1895 if ((atomic_read(&op->state) == FCPOP_STATE_ACTIVE) &&
1896 (ctrl->lport->ops->poll_queue))
1897 ctrl->lport->ops->poll_queue(&ctrl->lport->localport,
1898 queue->lldd_handle);
1900 return ((atomic_read(&op->state) != FCPOP_STATE_ACTIVE));
1904 nvme_fc_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
1906 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(arg);
1907 struct nvme_fc_fcp_op *aen_op;
1910 if (aer_idx > NVME_FC_NR_AEN_COMMANDS)
1913 aen_op = &ctrl->aen_ops[aer_idx];
1915 ret = nvme_fc_start_fcp_op(ctrl, aen_op->queue, aen_op, 0,
1918 dev_err(ctrl->ctrl.device,
1919 "failed async event work [%d]\n", aer_idx);
1923 nvme_fc_complete_rq(struct request *rq)
1925 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
1926 struct nvme_fc_ctrl *ctrl = op->ctrl;
1927 int error = 0, state;
1929 state = atomic_xchg(&op->state, FCPOP_STATE_IDLE);
1931 nvme_cleanup_cmd(rq);
1933 nvme_fc_unmap_data(ctrl, rq, op);
1935 if (unlikely(rq->errors)) {
1936 if (nvme_req_needs_retry(rq, rq->errors)) {
1937 nvme_requeue_req(rq);
1941 if (rq->cmd_type == REQ_TYPE_DRV_PRIV)
1944 error = nvme_error_status(rq->errors);
1947 nvme_fc_ctrl_put(ctrl);
1949 blk_mq_end_request(rq, error);
1952 static struct blk_mq_ops nvme_fc_mq_ops = {
1953 .queue_rq = nvme_fc_queue_rq,
1954 .complete = nvme_fc_complete_rq,
1955 .init_request = nvme_fc_init_request,
1956 .exit_request = nvme_fc_exit_request,
1957 .reinit_request = nvme_fc_reinit_request,
1958 .init_hctx = nvme_fc_init_hctx,
1959 .poll = nvme_fc_poll,
1960 .timeout = nvme_fc_timeout,
1963 static struct blk_mq_ops nvme_fc_admin_mq_ops = {
1964 .queue_rq = nvme_fc_queue_rq,
1965 .complete = nvme_fc_complete_rq,
1966 .init_request = nvme_fc_init_admin_request,
1967 .exit_request = nvme_fc_exit_request,
1968 .reinit_request = nvme_fc_reinit_request,
1969 .init_hctx = nvme_fc_init_admin_hctx,
1970 .timeout = nvme_fc_timeout,
1974 nvme_fc_configure_admin_queue(struct nvme_fc_ctrl *ctrl)
1979 nvme_fc_init_queue(ctrl, 0, NVME_FC_AQ_BLKMQ_DEPTH);
1981 error = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0],
1982 NVME_FC_AQ_BLKMQ_DEPTH,
1983 (NVME_FC_AQ_BLKMQ_DEPTH / 4));
1987 memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
1988 ctrl->admin_tag_set.ops = &nvme_fc_admin_mq_ops;
1989 ctrl->admin_tag_set.queue_depth = NVME_FC_AQ_BLKMQ_DEPTH;
1990 ctrl->admin_tag_set.reserved_tags = 2; /* fabric connect + Keep-Alive */
1991 ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
1992 ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_fc_fcp_op) +
1994 sizeof(struct scatterlist)) +
1995 ctrl->lport->ops->fcprqst_priv_sz;
1996 ctrl->admin_tag_set.driver_data = ctrl;
1997 ctrl->admin_tag_set.nr_hw_queues = 1;
1998 ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
2000 error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
2002 goto out_free_queue;
2004 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
2005 if (IS_ERR(ctrl->ctrl.admin_q)) {
2006 error = PTR_ERR(ctrl->ctrl.admin_q);
2007 goto out_free_tagset;
2010 error = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0,
2011 NVME_FC_AQ_BLKMQ_DEPTH);
2013 goto out_cleanup_queue;
2015 error = nvmf_connect_admin_queue(&ctrl->ctrl);
2017 goto out_delete_hw_queue;
2019 error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->cap);
2021 dev_err(ctrl->ctrl.device,
2022 "prop_get NVME_REG_CAP failed\n");
2023 goto out_delete_hw_queue;
2027 min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize);
2029 error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
2031 goto out_delete_hw_queue;
2033 segs = min_t(u32, NVME_FC_MAX_SEGMENTS,
2034 ctrl->lport->ops->max_sgl_segments);
2035 ctrl->ctrl.max_hw_sectors = (segs - 1) << (PAGE_SHIFT - 9);
2037 error = nvme_init_identify(&ctrl->ctrl);
2039 goto out_delete_hw_queue;
2041 nvme_start_keep_alive(&ctrl->ctrl);
2045 out_delete_hw_queue:
2046 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
2048 blk_cleanup_queue(ctrl->ctrl.admin_q);
2050 blk_mq_free_tag_set(&ctrl->admin_tag_set);
2052 nvme_fc_free_queue(&ctrl->queues[0]);
2057 * This routine is used by the transport when it needs to find active
2058 * io on a queue that is to be terminated. The transport uses
2059 * blk_mq_tagset_busy_itr() to find the busy requests, which then invoke
2060 * this routine to kill them on a 1 by 1 basis.
2062 * As FC allocates FC exchange for each io, the transport must contact
2063 * the LLDD to terminate the exchange, thus releasing the FC exchange.
2064 * After terminating the exchange the LLDD will call the transport's
2065 * normal io done path for the request, but it will have an aborted
2066 * status. The done path will return the io request back to the block
2067 * layer with an error status.
2070 nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
2072 struct nvme_ctrl *nctrl = data;
2073 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2074 struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req);
2077 if (!blk_mq_request_started(req))
2080 /* this performs an ABTS-LS on the FC exchange for the io */
2081 status = __nvme_fc_abort_op(ctrl, op);
2083 * if __nvme_fc_abort_op failed: io wasn't active to abort
2084 * consider it done. Assume completion path already completing
2088 /* io wasn't active to abort consider it done */
2089 /* assume completion path already completing in parallel */
2095 * This routine stops operation of the controller. Admin and IO queues
2096 * are stopped, outstanding ios on them terminated, and the nvme ctrl
2100 nvme_fc_shutdown_ctrl(struct nvme_fc_ctrl *ctrl)
2103 * If io queues are present, stop them and terminate all outstanding
2104 * ios on them. As FC allocates FC exchange for each io, the
2105 * transport must contact the LLDD to terminate the exchange,
2106 * thus releasing the FC exchange. We use blk_mq_tagset_busy_itr()
2107 * to tell us what io's are busy and invoke a transport routine
2108 * to kill them with the LLDD. After terminating the exchange
2109 * the LLDD will call the transport's normal io done path, but it
2110 * will have an aborted status. The done path will return the
2111 * io requests back to the block layer as part of normal completions
2112 * (but with error status).
2114 if (ctrl->queue_count > 1) {
2115 nvme_stop_queues(&ctrl->ctrl);
2116 blk_mq_tagset_busy_iter(&ctrl->tag_set,
2117 nvme_fc_terminate_exchange, &ctrl->ctrl);
2120 if (ctrl->ctrl.state == NVME_CTRL_LIVE)
2121 nvme_shutdown_ctrl(&ctrl->ctrl);
2124 * now clean up the admin queue. Same thing as above.
2125 * use blk_mq_tagset_busy_itr() and the transport routine to
2126 * terminate the exchanges.
2128 blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
2129 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
2130 nvme_fc_terminate_exchange, &ctrl->ctrl);
2134 * Called to teardown an association.
2135 * May be called with association fully in place or partially in place.
2138 __nvme_fc_remove_ctrl(struct nvme_fc_ctrl *ctrl)
2140 nvme_stop_keep_alive(&ctrl->ctrl);
2142 /* stop and terminate ios on admin and io queues */
2143 nvme_fc_shutdown_ctrl(ctrl);
2146 * tear down the controller
2147 * This will result in the last reference on the nvme ctrl to
2148 * expire, calling the transport nvme_fc_free_nvme_ctrl() callback.
2149 * From there, the transport will tear down it's logical queues and
2152 nvme_uninit_ctrl(&ctrl->ctrl);
2154 nvme_put_ctrl(&ctrl->ctrl);
2158 nvme_fc_del_ctrl_work(struct work_struct *work)
2160 struct nvme_fc_ctrl *ctrl =
2161 container_of(work, struct nvme_fc_ctrl, delete_work);
2163 __nvme_fc_remove_ctrl(ctrl);
2167 __nvme_fc_del_ctrl(struct nvme_fc_ctrl *ctrl)
2169 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
2172 if (!queue_work(nvme_fc_wq, &ctrl->delete_work))
2179 * Request from nvme core layer to delete the controller
2182 nvme_fc_del_nvme_ctrl(struct nvme_ctrl *nctrl)
2184 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
2185 struct nvme_fc_rport *rport = ctrl->rport;
2186 unsigned long flags;
2189 spin_lock_irqsave(&rport->lock, flags);
2190 ret = __nvme_fc_del_ctrl(ctrl);
2191 spin_unlock_irqrestore(&rport->lock, flags);
2195 flush_work(&ctrl->delete_work);
2201 nvme_fc_reset_nvme_ctrl(struct nvme_ctrl *nctrl)
2206 static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
2208 .module = THIS_MODULE,
2210 .reg_read32 = nvmf_reg_read32,
2211 .reg_read64 = nvmf_reg_read64,
2212 .reg_write32 = nvmf_reg_write32,
2213 .reset_ctrl = nvme_fc_reset_nvme_ctrl,
2214 .free_ctrl = nvme_fc_free_nvme_ctrl,
2215 .submit_async_event = nvme_fc_submit_async_event,
2216 .delete_ctrl = nvme_fc_del_nvme_ctrl,
2217 .get_subsysnqn = nvmf_get_subsysnqn,
2218 .get_address = nvmf_get_address,
2222 nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
2224 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2227 ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues);
2229 dev_info(ctrl->ctrl.device,
2230 "set_queue_count failed: %d\n", ret);
2234 ctrl->queue_count = opts->nr_io_queues + 1;
2235 if (!opts->nr_io_queues)
2238 dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n",
2239 opts->nr_io_queues);
2241 nvme_fc_init_io_queues(ctrl);
2243 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
2244 ctrl->tag_set.ops = &nvme_fc_mq_ops;
2245 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
2246 ctrl->tag_set.reserved_tags = 1; /* fabric connect */
2247 ctrl->tag_set.numa_node = NUMA_NO_NODE;
2248 ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
2249 ctrl->tag_set.cmd_size = sizeof(struct nvme_fc_fcp_op) +
2251 sizeof(struct scatterlist)) +
2252 ctrl->lport->ops->fcprqst_priv_sz;
2253 ctrl->tag_set.driver_data = ctrl;
2254 ctrl->tag_set.nr_hw_queues = ctrl->queue_count - 1;
2255 ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
2257 ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
2261 ctrl->ctrl.tagset = &ctrl->tag_set;
2263 ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
2264 if (IS_ERR(ctrl->ctrl.connect_q)) {
2265 ret = PTR_ERR(ctrl->ctrl.connect_q);
2266 goto out_free_tag_set;
2269 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.opts->queue_size);
2271 goto out_cleanup_blk_queue;
2273 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.opts->queue_size);
2275 goto out_delete_hw_queues;
2279 out_delete_hw_queues:
2280 nvme_fc_delete_hw_io_queues(ctrl);
2281 out_cleanup_blk_queue:
2282 nvme_stop_keep_alive(&ctrl->ctrl);
2283 blk_cleanup_queue(ctrl->ctrl.connect_q);
2285 blk_mq_free_tag_set(&ctrl->tag_set);
2286 nvme_fc_free_io_queues(ctrl);
2288 /* force put free routine to ignore io queues */
2289 ctrl->ctrl.tagset = NULL;
2295 static struct nvme_ctrl *
2296 __nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
2297 struct nvme_fc_lport *lport, struct nvme_fc_rport *rport)
2299 struct nvme_fc_ctrl *ctrl;
2300 unsigned long flags;
2304 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
2310 idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL);
2316 ctrl->ctrl.opts = opts;
2317 INIT_LIST_HEAD(&ctrl->ctrl_list);
2318 INIT_LIST_HEAD(&ctrl->ls_req_list);
2319 ctrl->lport = lport;
2320 ctrl->rport = rport;
2321 ctrl->dev = lport->dev;
2322 ctrl->state = FCCTRL_INIT;
2325 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0);
2329 get_device(ctrl->dev);
2330 kref_init(&ctrl->ref);
2332 INIT_WORK(&ctrl->delete_work, nvme_fc_del_ctrl_work);
2333 spin_lock_init(&ctrl->lock);
2335 /* io queue count */
2336 ctrl->queue_count = min_t(unsigned int,
2338 lport->ops->max_hw_queues);
2339 opts->nr_io_queues = ctrl->queue_count; /* so opts has valid value */
2340 ctrl->queue_count++; /* +1 for admin queue */
2342 ctrl->ctrl.sqsize = opts->queue_size - 1;
2343 ctrl->ctrl.kato = opts->kato;
2346 ctrl->queues = kcalloc(ctrl->queue_count, sizeof(struct nvme_fc_queue),
2349 goto out_uninit_ctrl;
2351 ret = nvme_fc_configure_admin_queue(ctrl);
2353 goto out_uninit_ctrl;
2357 /* FC-NVME supports 64-byte SQE only */
2358 if (ctrl->ctrl.ioccsz != 4) {
2359 dev_err(ctrl->ctrl.device, "ioccsz %d is not supported!\n",
2361 goto out_remove_admin_queue;
2363 /* FC-NVME supports 16-byte CQE only */
2364 if (ctrl->ctrl.iorcsz != 1) {
2365 dev_err(ctrl->ctrl.device, "iorcsz %d is not supported!\n",
2367 goto out_remove_admin_queue;
2369 /* FC-NVME does not have other data in the capsule */
2370 if (ctrl->ctrl.icdoff) {
2371 dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n",
2373 goto out_remove_admin_queue;
2376 /* FC-NVME supports normal SGL Data Block Descriptors */
2378 if (opts->queue_size > ctrl->ctrl.maxcmd) {
2379 /* warn if maxcmd is lower than queue_size */
2380 dev_warn(ctrl->ctrl.device,
2381 "queue_size %zu > ctrl maxcmd %u, reducing "
2383 opts->queue_size, ctrl->ctrl.maxcmd);
2384 opts->queue_size = ctrl->ctrl.maxcmd;
2387 ret = nvme_fc_init_aen_ops(ctrl);
2389 goto out_exit_aen_ops;
2391 if (ctrl->queue_count > 1) {
2392 ret = nvme_fc_create_io_queues(ctrl);
2394 goto out_exit_aen_ops;
2397 spin_lock_irqsave(&ctrl->lock, flags);
2398 ctrl->state = FCCTRL_ACTIVE;
2399 spin_unlock_irqrestore(&ctrl->lock, flags);
2401 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
2402 WARN_ON_ONCE(!changed);
2404 dev_info(ctrl->ctrl.device,
2405 "NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
2406 ctrl->cnum, ctrl->ctrl.opts->subsysnqn);
2408 kref_get(&ctrl->ctrl.kref);
2410 spin_lock_irqsave(&rport->lock, flags);
2411 list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list);
2412 spin_unlock_irqrestore(&rport->lock, flags);
2414 if (opts->nr_io_queues) {
2415 nvme_queue_scan(&ctrl->ctrl);
2416 nvme_queue_async_events(&ctrl->ctrl);
2422 nvme_fc_exit_aen_ops(ctrl);
2423 out_remove_admin_queue:
2424 /* send a Disconnect(association) LS to fc-nvme target */
2425 nvme_fc_xmt_disconnect_assoc(ctrl);
2426 nvme_stop_keep_alive(&ctrl->ctrl);
2427 nvme_fc_destroy_admin_queue(ctrl);
2429 nvme_uninit_ctrl(&ctrl->ctrl);
2430 nvme_put_ctrl(&ctrl->ctrl);
2433 /* exit via here will follow ctlr ref point callbacks to free */
2434 return ERR_PTR(ret);
2437 ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
2441 nvme_fc_rport_put(rport);
2442 /* exit via here doesn't follow ctlr ref points */
2443 return ERR_PTR(ret);
2448 FCT_TRADDR_WWNN = 1 << 0,
2449 FCT_TRADDR_WWPN = 1 << 1,
2452 struct nvmet_fc_traddr {
2457 static const match_table_t traddr_opt_tokens = {
2458 { FCT_TRADDR_WWNN, "nn-%s" },
2459 { FCT_TRADDR_WWPN, "pn-%s" },
2460 { FCT_TRADDR_ERR, NULL }
2464 nvme_fc_parse_address(struct nvmet_fc_traddr *traddr, char *buf)
2466 substring_t args[MAX_OPT_ARGS];
2467 char *options, *o, *p;
2471 options = o = kstrdup(buf, GFP_KERNEL);
2475 while ((p = strsep(&o, ":\n")) != NULL) {
2479 token = match_token(p, traddr_opt_tokens, args);
2481 case FCT_TRADDR_WWNN:
2482 if (match_u64(args, &token64)) {
2486 traddr->nn = token64;
2488 case FCT_TRADDR_WWPN:
2489 if (match_u64(args, &token64)) {
2493 traddr->pn = token64;
2496 pr_warn("unknown traddr token or missing value '%s'\n",
2508 static struct nvme_ctrl *
2509 nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts)
2511 struct nvme_fc_lport *lport;
2512 struct nvme_fc_rport *rport;
2513 struct nvmet_fc_traddr laddr = { 0L, 0L };
2514 struct nvmet_fc_traddr raddr = { 0L, 0L };
2515 unsigned long flags;
2518 ret = nvme_fc_parse_address(&raddr, opts->traddr);
2519 if (ret || !raddr.nn || !raddr.pn)
2520 return ERR_PTR(-EINVAL);
2522 ret = nvme_fc_parse_address(&laddr, opts->host_traddr);
2523 if (ret || !laddr.nn || !laddr.pn)
2524 return ERR_PTR(-EINVAL);
2526 /* find the host and remote ports to connect together */
2527 spin_lock_irqsave(&nvme_fc_lock, flags);
2528 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
2529 if (lport->localport.node_name != laddr.nn ||
2530 lport->localport.port_name != laddr.pn)
2533 list_for_each_entry(rport, &lport->endp_list, endp_list) {
2534 if (rport->remoteport.node_name != raddr.nn ||
2535 rport->remoteport.port_name != raddr.pn)
2538 /* if fail to get reference fall through. Will error */
2539 if (!nvme_fc_rport_get(rport))
2542 spin_unlock_irqrestore(&nvme_fc_lock, flags);
2544 return __nvme_fc_create_ctrl(dev, opts, lport, rport);
2547 spin_unlock_irqrestore(&nvme_fc_lock, flags);
2549 return ERR_PTR(-ENOENT);
2553 static struct nvmf_transport_ops nvme_fc_transport = {
2555 .required_opts = NVMF_OPT_TRADDR | NVMF_OPT_HOST_TRADDR,
2556 .allowed_opts = NVMF_OPT_RECONNECT_DELAY,
2557 .create_ctrl = nvme_fc_create_ctrl,
2560 static int __init nvme_fc_init_module(void)
2562 nvme_fc_wq = create_workqueue("nvme_fc_wq");
2566 nvmf_register_transport(&nvme_fc_transport);
2570 static void __exit nvme_fc_exit_module(void)
2572 /* sanity check - all lports should be removed */
2573 if (!list_empty(&nvme_fc_lport_list))
2574 pr_warn("%s: localport list not empty\n", __func__);
2576 nvmf_unregister_transport(&nvme_fc_transport);
2578 destroy_workqueue(nvme_fc_wq);
2580 ida_destroy(&nvme_fc_local_port_cnt);
2581 ida_destroy(&nvme_fc_ctrl_cnt);
2584 module_init(nvme_fc_init_module);
2585 module_exit(nvme_fc_exit_module);
2587 MODULE_LICENSE("GPL v2");