2 * NVMe over Fabrics RDMA target.
3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/atomic.h>
16 #include <linux/ctype.h>
17 #include <linux/delay.h>
18 #include <linux/err.h>
19 #include <linux/init.h>
20 #include <linux/module.h>
21 #include <linux/nvme.h>
22 #include <linux/slab.h>
23 #include <linux/string.h>
24 #include <linux/wait.h>
25 #include <linux/inet.h>
26 #include <asm/unaligned.h>
28 #include <rdma/ib_verbs.h>
29 #include <rdma/rdma_cm.h>
32 #include <linux/nvme-rdma.h>
36 * We allow up to a page of inline data to go with the SQE
38 #define NVMET_RDMA_INLINE_DATA_SIZE PAGE_SIZE
40 struct nvmet_rdma_cmd {
44 struct scatterlist inline_sg;
45 struct page *inline_page;
46 struct nvme_command *nvme_cmd;
47 struct nvmet_rdma_queue *queue;
51 NVMET_RDMA_REQ_INLINE_DATA = (1 << 0),
52 NVMET_RDMA_REQ_INVALIDATE_RKEY = (1 << 1),
55 struct nvmet_rdma_rsp {
56 struct ib_sge send_sge;
57 struct ib_cqe send_cqe;
58 struct ib_send_wr send_wr;
60 struct nvmet_rdma_cmd *cmd;
61 struct nvmet_rdma_queue *queue;
63 struct ib_cqe read_cqe;
64 struct rdma_rw_ctx rw;
72 struct list_head wait_list;
73 struct list_head free_list;
76 enum nvmet_rdma_queue_state {
77 NVMET_RDMA_Q_CONNECTING,
79 NVMET_RDMA_Q_DISCONNECTING,
80 NVMET_RDMA_IN_DEVICE_REMOVAL,
83 struct nvmet_rdma_queue {
84 struct rdma_cm_id *cm_id;
85 struct nvmet_port *port;
88 struct nvmet_rdma_device *dev;
89 spinlock_t state_lock;
90 enum nvmet_rdma_queue_state state;
91 struct nvmet_cq nvme_cq;
92 struct nvmet_sq nvme_sq;
94 struct nvmet_rdma_rsp *rsps;
95 struct list_head free_rsps;
97 struct nvmet_rdma_cmd *cmds;
99 struct work_struct release_work;
100 struct list_head rsp_wait_list;
101 struct list_head rsp_wr_wait_list;
102 spinlock_t rsp_wr_wait_lock;
109 struct list_head queue_list;
112 struct nvmet_rdma_device {
113 struct ib_device *device;
116 struct nvmet_rdma_cmd *srq_cmds;
119 struct list_head entry;
122 static bool nvmet_rdma_use_srq;
123 module_param_named(use_srq, nvmet_rdma_use_srq, bool, 0444);
124 MODULE_PARM_DESC(use_srq, "Use shared receive queue.");
126 static DEFINE_IDA(nvmet_rdma_queue_ida);
127 static LIST_HEAD(nvmet_rdma_queue_list);
128 static DEFINE_MUTEX(nvmet_rdma_queue_mutex);
130 static LIST_HEAD(device_list);
131 static DEFINE_MUTEX(device_list_mutex);
133 static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp);
134 static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc);
135 static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
136 static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc);
137 static void nvmet_rdma_qp_event(struct ib_event *event, void *priv);
138 static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue);
140 static struct nvmet_fabrics_ops nvmet_rdma_ops;
142 /* XXX: really should move to a generic header sooner or later.. */
143 static inline u32 get_unaligned_le24(const u8 *p)
145 return (u32)p[0] | (u32)p[1] << 8 | (u32)p[2] << 16;
148 static inline bool nvmet_rdma_need_data_in(struct nvmet_rdma_rsp *rsp)
150 return nvme_is_write(rsp->req.cmd) &&
152 !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA);
155 static inline bool nvmet_rdma_need_data_out(struct nvmet_rdma_rsp *rsp)
157 return !nvme_is_write(rsp->req.cmd) &&
159 !rsp->req.rsp->status &&
160 !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA);
163 static inline struct nvmet_rdma_rsp *
164 nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue)
166 struct nvmet_rdma_rsp *rsp;
169 spin_lock_irqsave(&queue->rsps_lock, flags);
170 rsp = list_first_entry(&queue->free_rsps,
171 struct nvmet_rdma_rsp, free_list);
172 list_del(&rsp->free_list);
173 spin_unlock_irqrestore(&queue->rsps_lock, flags);
179 nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp)
183 spin_lock_irqsave(&rsp->queue->rsps_lock, flags);
184 list_add_tail(&rsp->free_list, &rsp->queue->free_rsps);
185 spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags);
188 static void nvmet_rdma_free_sgl(struct scatterlist *sgl, unsigned int nents)
190 struct scatterlist *sg;
196 for_each_sg(sgl, sg, nents, count)
197 __free_page(sg_page(sg));
201 static int nvmet_rdma_alloc_sgl(struct scatterlist **sgl, unsigned int *nents,
204 struct scatterlist *sg;
209 nent = DIV_ROUND_UP(length, PAGE_SIZE);
210 sg = kmalloc_array(nent, sizeof(struct scatterlist), GFP_KERNEL);
214 sg_init_table(sg, nent);
217 u32 page_len = min_t(u32, length, PAGE_SIZE);
219 page = alloc_page(GFP_KERNEL);
223 sg_set_page(&sg[i], page, page_len, 0);
234 __free_page(sg_page(&sg[i]));
238 return NVME_SC_INTERNAL;
241 static int nvmet_rdma_alloc_cmd(struct nvmet_rdma_device *ndev,
242 struct nvmet_rdma_cmd *c, bool admin)
244 /* NVMe command / RDMA RECV */
245 c->nvme_cmd = kmalloc(sizeof(*c->nvme_cmd), GFP_KERNEL);
249 c->sge[0].addr = ib_dma_map_single(ndev->device, c->nvme_cmd,
250 sizeof(*c->nvme_cmd), DMA_FROM_DEVICE);
251 if (ib_dma_mapping_error(ndev->device, c->sge[0].addr))
254 c->sge[0].length = sizeof(*c->nvme_cmd);
255 c->sge[0].lkey = ndev->pd->local_dma_lkey;
258 c->inline_page = alloc_pages(GFP_KERNEL,
259 get_order(NVMET_RDMA_INLINE_DATA_SIZE));
262 c->sge[1].addr = ib_dma_map_page(ndev->device,
263 c->inline_page, 0, NVMET_RDMA_INLINE_DATA_SIZE,
265 if (ib_dma_mapping_error(ndev->device, c->sge[1].addr))
266 goto out_free_inline_page;
267 c->sge[1].length = NVMET_RDMA_INLINE_DATA_SIZE;
268 c->sge[1].lkey = ndev->pd->local_dma_lkey;
271 c->cqe.done = nvmet_rdma_recv_done;
273 c->wr.wr_cqe = &c->cqe;
274 c->wr.sg_list = c->sge;
275 c->wr.num_sge = admin ? 1 : 2;
279 out_free_inline_page:
281 __free_pages(c->inline_page,
282 get_order(NVMET_RDMA_INLINE_DATA_SIZE));
285 ib_dma_unmap_single(ndev->device, c->sge[0].addr,
286 sizeof(*c->nvme_cmd), DMA_FROM_DEVICE);
294 static void nvmet_rdma_free_cmd(struct nvmet_rdma_device *ndev,
295 struct nvmet_rdma_cmd *c, bool admin)
298 ib_dma_unmap_page(ndev->device, c->sge[1].addr,
299 NVMET_RDMA_INLINE_DATA_SIZE, DMA_FROM_DEVICE);
300 __free_pages(c->inline_page,
301 get_order(NVMET_RDMA_INLINE_DATA_SIZE));
303 ib_dma_unmap_single(ndev->device, c->sge[0].addr,
304 sizeof(*c->nvme_cmd), DMA_FROM_DEVICE);
308 static struct nvmet_rdma_cmd *
309 nvmet_rdma_alloc_cmds(struct nvmet_rdma_device *ndev,
310 int nr_cmds, bool admin)
312 struct nvmet_rdma_cmd *cmds;
313 int ret = -EINVAL, i;
315 cmds = kcalloc(nr_cmds, sizeof(struct nvmet_rdma_cmd), GFP_KERNEL);
319 for (i = 0; i < nr_cmds; i++) {
320 ret = nvmet_rdma_alloc_cmd(ndev, cmds + i, admin);
329 nvmet_rdma_free_cmd(ndev, cmds + i, admin);
335 static void nvmet_rdma_free_cmds(struct nvmet_rdma_device *ndev,
336 struct nvmet_rdma_cmd *cmds, int nr_cmds, bool admin)
340 for (i = 0; i < nr_cmds; i++)
341 nvmet_rdma_free_cmd(ndev, cmds + i, admin);
345 static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
346 struct nvmet_rdma_rsp *r)
348 /* NVMe CQE / RDMA SEND */
349 r->req.rsp = kmalloc(sizeof(*r->req.rsp), GFP_KERNEL);
353 r->send_sge.addr = ib_dma_map_single(ndev->device, r->req.rsp,
354 sizeof(*r->req.rsp), DMA_TO_DEVICE);
355 if (ib_dma_mapping_error(ndev->device, r->send_sge.addr))
358 r->send_sge.length = sizeof(*r->req.rsp);
359 r->send_sge.lkey = ndev->pd->local_dma_lkey;
361 r->send_cqe.done = nvmet_rdma_send_done;
363 r->send_wr.wr_cqe = &r->send_cqe;
364 r->send_wr.sg_list = &r->send_sge;
365 r->send_wr.num_sge = 1;
366 r->send_wr.send_flags = IB_SEND_SIGNALED;
368 /* Data In / RDMA READ */
369 r->read_cqe.done = nvmet_rdma_read_data_done;
378 static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev,
379 struct nvmet_rdma_rsp *r)
381 ib_dma_unmap_single(ndev->device, r->send_sge.addr,
382 sizeof(*r->req.rsp), DMA_TO_DEVICE);
387 nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue)
389 struct nvmet_rdma_device *ndev = queue->dev;
390 int nr_rsps = queue->recv_queue_size * 2;
391 int ret = -EINVAL, i;
393 queue->rsps = kcalloc(nr_rsps, sizeof(struct nvmet_rdma_rsp),
398 for (i = 0; i < nr_rsps; i++) {
399 struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
401 ret = nvmet_rdma_alloc_rsp(ndev, rsp);
405 list_add_tail(&rsp->free_list, &queue->free_rsps);
412 struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
414 list_del(&rsp->free_list);
415 nvmet_rdma_free_rsp(ndev, rsp);
422 static void nvmet_rdma_free_rsps(struct nvmet_rdma_queue *queue)
424 struct nvmet_rdma_device *ndev = queue->dev;
425 int i, nr_rsps = queue->recv_queue_size * 2;
427 for (i = 0; i < nr_rsps; i++) {
428 struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
430 list_del(&rsp->free_list);
431 nvmet_rdma_free_rsp(ndev, rsp);
436 static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev,
437 struct nvmet_rdma_cmd *cmd)
439 struct ib_recv_wr *bad_wr;
441 ib_dma_sync_single_for_device(ndev->device,
442 cmd->sge[0].addr, cmd->sge[0].length,
446 return ib_post_srq_recv(ndev->srq, &cmd->wr, &bad_wr);
447 return ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, &bad_wr);
450 static void nvmet_rdma_process_wr_wait_list(struct nvmet_rdma_queue *queue)
452 spin_lock(&queue->rsp_wr_wait_lock);
453 while (!list_empty(&queue->rsp_wr_wait_list)) {
454 struct nvmet_rdma_rsp *rsp;
457 rsp = list_entry(queue->rsp_wr_wait_list.next,
458 struct nvmet_rdma_rsp, wait_list);
459 list_del(&rsp->wait_list);
461 spin_unlock(&queue->rsp_wr_wait_lock);
462 ret = nvmet_rdma_execute_command(rsp);
463 spin_lock(&queue->rsp_wr_wait_lock);
466 list_add(&rsp->wait_list, &queue->rsp_wr_wait_list);
470 spin_unlock(&queue->rsp_wr_wait_lock);
474 static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp)
476 struct nvmet_rdma_queue *queue = rsp->queue;
478 atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail);
481 rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp,
482 queue->cm_id->port_num, rsp->req.sg,
483 rsp->req.sg_cnt, nvmet_data_dir(&rsp->req));
486 if (rsp->req.sg != &rsp->cmd->inline_sg)
487 nvmet_rdma_free_sgl(rsp->req.sg, rsp->req.sg_cnt);
489 if (unlikely(!list_empty_careful(&queue->rsp_wr_wait_list)))
490 nvmet_rdma_process_wr_wait_list(queue);
492 nvmet_rdma_put_rsp(rsp);
495 static void nvmet_rdma_error_comp(struct nvmet_rdma_queue *queue)
497 if (queue->nvme_sq.ctrl) {
498 nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl);
501 * we didn't setup the controller yet in case
502 * of admin connect error, just disconnect and
505 nvmet_rdma_queue_disconnect(queue);
509 static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
511 struct nvmet_rdma_rsp *rsp =
512 container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe);
514 nvmet_rdma_release_rsp(rsp);
516 if (unlikely(wc->status != IB_WC_SUCCESS &&
517 wc->status != IB_WC_WR_FLUSH_ERR)) {
518 pr_err("SEND for CQE 0x%p failed with status %s (%d).\n",
519 wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status);
520 nvmet_rdma_error_comp(rsp->queue);
524 static void nvmet_rdma_queue_response(struct nvmet_req *req)
526 struct nvmet_rdma_rsp *rsp =
527 container_of(req, struct nvmet_rdma_rsp, req);
528 struct rdma_cm_id *cm_id = rsp->queue->cm_id;
529 struct ib_send_wr *first_wr, *bad_wr;
531 if (rsp->flags & NVMET_RDMA_REQ_INVALIDATE_RKEY) {
532 rsp->send_wr.opcode = IB_WR_SEND_WITH_INV;
533 rsp->send_wr.ex.invalidate_rkey = rsp->invalidate_rkey;
535 rsp->send_wr.opcode = IB_WR_SEND;
538 if (nvmet_rdma_need_data_out(rsp))
539 first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp,
540 cm_id->port_num, NULL, &rsp->send_wr);
542 first_wr = &rsp->send_wr;
544 nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd);
546 ib_dma_sync_single_for_device(rsp->queue->dev->device,
547 rsp->send_sge.addr, rsp->send_sge.length,
550 if (ib_post_send(cm_id->qp, first_wr, &bad_wr)) {
551 pr_err("sending cmd response failed\n");
552 nvmet_rdma_release_rsp(rsp);
556 static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc)
558 struct nvmet_rdma_rsp *rsp =
559 container_of(wc->wr_cqe, struct nvmet_rdma_rsp, read_cqe);
560 struct nvmet_rdma_queue *queue = cq->cq_context;
562 WARN_ON(rsp->n_rdma <= 0);
563 atomic_add(rsp->n_rdma, &queue->sq_wr_avail);
564 rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp,
565 queue->cm_id->port_num, rsp->req.sg,
566 rsp->req.sg_cnt, nvmet_data_dir(&rsp->req));
569 if (unlikely(wc->status != IB_WC_SUCCESS)) {
570 nvmet_rdma_release_rsp(rsp);
571 if (wc->status != IB_WC_WR_FLUSH_ERR) {
572 pr_info("RDMA READ for CQE 0x%p failed with status %s (%d).\n",
573 wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status);
574 nvmet_rdma_error_comp(queue);
579 rsp->req.execute(&rsp->req);
582 static void nvmet_rdma_use_inline_sg(struct nvmet_rdma_rsp *rsp, u32 len,
585 sg_init_table(&rsp->cmd->inline_sg, 1);
586 sg_set_page(&rsp->cmd->inline_sg, rsp->cmd->inline_page, len, off);
587 rsp->req.sg = &rsp->cmd->inline_sg;
591 static u16 nvmet_rdma_map_sgl_inline(struct nvmet_rdma_rsp *rsp)
593 struct nvme_sgl_desc *sgl = &rsp->req.cmd->common.dptr.sgl;
594 u64 off = le64_to_cpu(sgl->addr);
595 u32 len = le32_to_cpu(sgl->length);
597 if (!nvme_is_write(rsp->req.cmd))
598 return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
600 if (off + len > NVMET_RDMA_INLINE_DATA_SIZE) {
601 pr_err("invalid inline data offset!\n");
602 return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR;
605 /* no data command? */
609 nvmet_rdma_use_inline_sg(rsp, len, off);
610 rsp->flags |= NVMET_RDMA_REQ_INLINE_DATA;
614 static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp,
615 struct nvme_keyed_sgl_desc *sgl, bool invalidate)
617 struct rdma_cm_id *cm_id = rsp->queue->cm_id;
618 u64 addr = le64_to_cpu(sgl->addr);
619 u32 len = get_unaligned_le24(sgl->length);
620 u32 key = get_unaligned_le32(sgl->key);
624 /* no data command? */
628 status = nvmet_rdma_alloc_sgl(&rsp->req.sg, &rsp->req.sg_cnt,
633 ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num,
634 rsp->req.sg, rsp->req.sg_cnt, 0, addr, key,
635 nvmet_data_dir(&rsp->req));
637 return NVME_SC_INTERNAL;
641 rsp->invalidate_rkey = key;
642 rsp->flags |= NVMET_RDMA_REQ_INVALIDATE_RKEY;
648 static u16 nvmet_rdma_map_sgl(struct nvmet_rdma_rsp *rsp)
650 struct nvme_keyed_sgl_desc *sgl = &rsp->req.cmd->common.dptr.ksgl;
652 switch (sgl->type >> 4) {
653 case NVME_SGL_FMT_DATA_DESC:
654 switch (sgl->type & 0xf) {
655 case NVME_SGL_FMT_OFFSET:
656 return nvmet_rdma_map_sgl_inline(rsp);
658 pr_err("invalid SGL subtype: %#x\n", sgl->type);
659 return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
661 case NVME_KEY_SGL_FMT_DATA_DESC:
662 switch (sgl->type & 0xf) {
663 case NVME_SGL_FMT_ADDRESS | NVME_SGL_FMT_INVALIDATE:
664 return nvmet_rdma_map_sgl_keyed(rsp, sgl, true);
665 case NVME_SGL_FMT_ADDRESS:
666 return nvmet_rdma_map_sgl_keyed(rsp, sgl, false);
668 pr_err("invalid SGL subtype: %#x\n", sgl->type);
669 return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
672 pr_err("invalid SGL type: %#x\n", sgl->type);
673 return NVME_SC_SGL_INVALID_TYPE | NVME_SC_DNR;
677 static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp)
679 struct nvmet_rdma_queue *queue = rsp->queue;
681 if (unlikely(atomic_sub_return(1 + rsp->n_rdma,
682 &queue->sq_wr_avail) < 0)) {
683 pr_debug("IB send queue full (needed %d): queue %u cntlid %u\n",
684 1 + rsp->n_rdma, queue->idx,
685 queue->nvme_sq.ctrl->cntlid);
686 atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail);
690 if (nvmet_rdma_need_data_in(rsp)) {
691 if (rdma_rw_ctx_post(&rsp->rw, queue->cm_id->qp,
692 queue->cm_id->port_num, &rsp->read_cqe, NULL))
693 nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR);
695 rsp->req.execute(&rsp->req);
701 static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue,
702 struct nvmet_rdma_rsp *cmd)
708 cmd->req.port = queue->port;
711 ib_dma_sync_single_for_cpu(queue->dev->device,
712 cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length,
714 ib_dma_sync_single_for_cpu(queue->dev->device,
715 cmd->send_sge.addr, cmd->send_sge.length,
718 if (!nvmet_req_init(&cmd->req, &queue->nvme_cq,
719 &queue->nvme_sq, &nvmet_rdma_ops))
722 status = nvmet_rdma_map_sgl(cmd);
726 if (unlikely(!nvmet_rdma_execute_command(cmd))) {
727 spin_lock(&queue->rsp_wr_wait_lock);
728 list_add_tail(&cmd->wait_list, &queue->rsp_wr_wait_list);
729 spin_unlock(&queue->rsp_wr_wait_lock);
735 nvmet_req_complete(&cmd->req, status);
738 static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
740 struct nvmet_rdma_cmd *cmd =
741 container_of(wc->wr_cqe, struct nvmet_rdma_cmd, cqe);
742 struct nvmet_rdma_queue *queue = cq->cq_context;
743 struct nvmet_rdma_rsp *rsp;
745 if (unlikely(wc->status != IB_WC_SUCCESS)) {
746 if (wc->status != IB_WC_WR_FLUSH_ERR) {
747 pr_err("RECV for CQE 0x%p failed with status %s (%d)\n",
748 wc->wr_cqe, ib_wc_status_msg(wc->status),
750 nvmet_rdma_error_comp(queue);
755 if (unlikely(wc->byte_len < sizeof(struct nvme_command))) {
756 pr_err("Ctrl Fatal Error: capsule size less than 64 bytes\n");
757 nvmet_rdma_error_comp(queue);
762 rsp = nvmet_rdma_get_rsp(queue);
765 rsp->req.cmd = cmd->nvme_cmd;
767 if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) {
770 spin_lock_irqsave(&queue->state_lock, flags);
771 if (queue->state == NVMET_RDMA_Q_CONNECTING)
772 list_add_tail(&rsp->wait_list, &queue->rsp_wait_list);
774 nvmet_rdma_put_rsp(rsp);
775 spin_unlock_irqrestore(&queue->state_lock, flags);
779 nvmet_rdma_handle_command(queue, rsp);
782 static void nvmet_rdma_destroy_srq(struct nvmet_rdma_device *ndev)
787 nvmet_rdma_free_cmds(ndev, ndev->srq_cmds, ndev->srq_size, false);
788 ib_destroy_srq(ndev->srq);
791 static int nvmet_rdma_init_srq(struct nvmet_rdma_device *ndev)
793 struct ib_srq_init_attr srq_attr = { NULL, };
798 srq_size = 4095; /* XXX: tune */
800 srq_attr.attr.max_wr = srq_size;
801 srq_attr.attr.max_sge = 2;
802 srq_attr.attr.srq_limit = 0;
803 srq_attr.srq_type = IB_SRQT_BASIC;
804 srq = ib_create_srq(ndev->pd, &srq_attr);
807 * If SRQs aren't supported we just go ahead and use normal
808 * non-shared receive queues.
810 pr_info("SRQ requested but not supported.\n");
814 ndev->srq_cmds = nvmet_rdma_alloc_cmds(ndev, srq_size, false);
815 if (IS_ERR(ndev->srq_cmds)) {
816 ret = PTR_ERR(ndev->srq_cmds);
817 goto out_destroy_srq;
821 ndev->srq_size = srq_size;
823 for (i = 0; i < srq_size; i++)
824 nvmet_rdma_post_recv(ndev, &ndev->srq_cmds[i]);
833 static void nvmet_rdma_free_dev(struct kref *ref)
835 struct nvmet_rdma_device *ndev =
836 container_of(ref, struct nvmet_rdma_device, ref);
838 mutex_lock(&device_list_mutex);
839 list_del(&ndev->entry);
840 mutex_unlock(&device_list_mutex);
842 nvmet_rdma_destroy_srq(ndev);
843 ib_dealloc_pd(ndev->pd);
848 static struct nvmet_rdma_device *
849 nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id)
851 struct nvmet_rdma_device *ndev;
854 mutex_lock(&device_list_mutex);
855 list_for_each_entry(ndev, &device_list, entry) {
856 if (ndev->device->node_guid == cm_id->device->node_guid &&
857 kref_get_unless_zero(&ndev->ref))
861 ndev = kzalloc(sizeof(*ndev), GFP_KERNEL);
865 ndev->device = cm_id->device;
866 kref_init(&ndev->ref);
868 ndev->pd = ib_alloc_pd(ndev->device, 0);
869 if (IS_ERR(ndev->pd))
872 if (nvmet_rdma_use_srq) {
873 ret = nvmet_rdma_init_srq(ndev);
878 list_add(&ndev->entry, &device_list);
880 mutex_unlock(&device_list_mutex);
881 pr_debug("added %s.\n", ndev->device->name);
885 ib_dealloc_pd(ndev->pd);
889 mutex_unlock(&device_list_mutex);
893 static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue)
895 struct ib_qp_init_attr qp_attr;
896 struct nvmet_rdma_device *ndev = queue->dev;
897 int comp_vector, nr_cqe, ret, i;
900 * Spread the io queues across completion vectors,
901 * but still keep all admin queues on vector 0.
903 comp_vector = !queue->host_qid ? 0 :
904 queue->idx % ndev->device->num_comp_vectors;
907 * Reserve CQ slots for RECV + RDMA_READ/RDMA_WRITE + RDMA_SEND.
909 nr_cqe = queue->recv_queue_size + 2 * queue->send_queue_size;
911 queue->cq = ib_alloc_cq(ndev->device, queue,
912 nr_cqe + 1, comp_vector,
914 if (IS_ERR(queue->cq)) {
915 ret = PTR_ERR(queue->cq);
916 pr_err("failed to create CQ cqe= %d ret= %d\n",
921 memset(&qp_attr, 0, sizeof(qp_attr));
922 qp_attr.qp_context = queue;
923 qp_attr.event_handler = nvmet_rdma_qp_event;
924 qp_attr.send_cq = queue->cq;
925 qp_attr.recv_cq = queue->cq;
926 qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
927 qp_attr.qp_type = IB_QPT_RC;
929 qp_attr.cap.max_send_wr = queue->send_queue_size + 1;
930 qp_attr.cap.max_rdma_ctxs = queue->send_queue_size;
931 qp_attr.cap.max_send_sge = max(ndev->device->attrs.max_sge_rd,
932 ndev->device->attrs.max_sge);
935 qp_attr.srq = ndev->srq;
938 qp_attr.cap.max_recv_wr = 1 + queue->recv_queue_size;
939 qp_attr.cap.max_recv_sge = 2;
942 ret = rdma_create_qp(queue->cm_id, ndev->pd, &qp_attr);
944 pr_err("failed to create_qp ret= %d\n", ret);
948 atomic_set(&queue->sq_wr_avail, qp_attr.cap.max_send_wr);
950 pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d cm_id= %p\n",
951 __func__, queue->cq->cqe, qp_attr.cap.max_send_sge,
952 qp_attr.cap.max_send_wr, queue->cm_id);
955 for (i = 0; i < queue->recv_queue_size; i++) {
956 queue->cmds[i].queue = queue;
957 nvmet_rdma_post_recv(ndev, &queue->cmds[i]);
965 ib_free_cq(queue->cq);
969 static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue)
971 ib_drain_qp(queue->cm_id->qp);
972 rdma_destroy_qp(queue->cm_id);
973 ib_free_cq(queue->cq);
976 static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue)
978 pr_info("freeing queue %d\n", queue->idx);
980 nvmet_sq_destroy(&queue->nvme_sq);
982 nvmet_rdma_destroy_queue_ib(queue);
983 if (!queue->dev->srq) {
984 nvmet_rdma_free_cmds(queue->dev, queue->cmds,
985 queue->recv_queue_size,
988 nvmet_rdma_free_rsps(queue);
989 ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx);
993 static void nvmet_rdma_release_queue_work(struct work_struct *w)
995 struct nvmet_rdma_queue *queue =
996 container_of(w, struct nvmet_rdma_queue, release_work);
997 struct rdma_cm_id *cm_id = queue->cm_id;
998 struct nvmet_rdma_device *dev = queue->dev;
999 enum nvmet_rdma_queue_state state = queue->state;
1001 nvmet_rdma_free_queue(queue);
1003 if (state != NVMET_RDMA_IN_DEVICE_REMOVAL)
1004 rdma_destroy_id(cm_id);
1006 kref_put(&dev->ref, nvmet_rdma_free_dev);
1010 nvmet_rdma_parse_cm_connect_req(struct rdma_conn_param *conn,
1011 struct nvmet_rdma_queue *queue)
1013 struct nvme_rdma_cm_req *req;
1015 req = (struct nvme_rdma_cm_req *)conn->private_data;
1016 if (!req || conn->private_data_len == 0)
1017 return NVME_RDMA_CM_INVALID_LEN;
1019 if (le16_to_cpu(req->recfmt) != NVME_RDMA_CM_FMT_1_0)
1020 return NVME_RDMA_CM_INVALID_RECFMT;
1022 queue->host_qid = le16_to_cpu(req->qid);
1025 * req->hsqsize corresponds to our recv queue size plus 1
1026 * req->hrqsize corresponds to our send queue size
1028 queue->recv_queue_size = le16_to_cpu(req->hsqsize) + 1;
1029 queue->send_queue_size = le16_to_cpu(req->hrqsize);
1031 if (!queue->host_qid && queue->recv_queue_size > NVMF_AQ_DEPTH)
1032 return NVME_RDMA_CM_INVALID_HSQSIZE;
1034 /* XXX: Should we enforce some kind of max for IO queues? */
1039 static int nvmet_rdma_cm_reject(struct rdma_cm_id *cm_id,
1040 enum nvme_rdma_cm_status status)
1042 struct nvme_rdma_cm_rej rej;
1044 rej.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0);
1045 rej.sts = cpu_to_le16(status);
1047 return rdma_reject(cm_id, (void *)&rej, sizeof(rej));
1050 static struct nvmet_rdma_queue *
1051 nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev,
1052 struct rdma_cm_id *cm_id,
1053 struct rdma_cm_event *event)
1055 struct nvmet_rdma_queue *queue;
1058 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
1060 ret = NVME_RDMA_CM_NO_RSC;
1064 ret = nvmet_sq_init(&queue->nvme_sq);
1066 ret = NVME_RDMA_CM_NO_RSC;
1067 goto out_free_queue;
1070 ret = nvmet_rdma_parse_cm_connect_req(&event->param.conn, queue);
1072 goto out_destroy_sq;
1075 * Schedules the actual release because calling rdma_destroy_id from
1076 * inside a CM callback would trigger a deadlock. (great API design..)
1078 INIT_WORK(&queue->release_work, nvmet_rdma_release_queue_work);
1080 queue->cm_id = cm_id;
1082 spin_lock_init(&queue->state_lock);
1083 queue->state = NVMET_RDMA_Q_CONNECTING;
1084 INIT_LIST_HEAD(&queue->rsp_wait_list);
1085 INIT_LIST_HEAD(&queue->rsp_wr_wait_list);
1086 spin_lock_init(&queue->rsp_wr_wait_lock);
1087 INIT_LIST_HEAD(&queue->free_rsps);
1088 spin_lock_init(&queue->rsps_lock);
1089 INIT_LIST_HEAD(&queue->queue_list);
1091 queue->idx = ida_simple_get(&nvmet_rdma_queue_ida, 0, 0, GFP_KERNEL);
1092 if (queue->idx < 0) {
1093 ret = NVME_RDMA_CM_NO_RSC;
1094 goto out_free_queue;
1097 ret = nvmet_rdma_alloc_rsps(queue);
1099 ret = NVME_RDMA_CM_NO_RSC;
1100 goto out_ida_remove;
1104 queue->cmds = nvmet_rdma_alloc_cmds(ndev,
1105 queue->recv_queue_size,
1107 if (IS_ERR(queue->cmds)) {
1108 ret = NVME_RDMA_CM_NO_RSC;
1109 goto out_free_responses;
1113 ret = nvmet_rdma_create_queue_ib(queue);
1115 pr_err("%s: creating RDMA queue failed (%d).\n",
1117 ret = NVME_RDMA_CM_NO_RSC;
1125 nvmet_rdma_free_cmds(queue->dev, queue->cmds,
1126 queue->recv_queue_size,
1130 nvmet_rdma_free_rsps(queue);
1132 ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx);
1134 nvmet_sq_destroy(&queue->nvme_sq);
1138 pr_debug("rejecting connect request with status code %d\n", ret);
1139 nvmet_rdma_cm_reject(cm_id, ret);
1143 static void nvmet_rdma_qp_event(struct ib_event *event, void *priv)
1145 struct nvmet_rdma_queue *queue = priv;
1147 switch (event->event) {
1148 case IB_EVENT_COMM_EST:
1149 rdma_notify(queue->cm_id, event->event);
1152 pr_err("received IB QP event: %s (%d)\n",
1153 ib_event_msg(event->event), event->event);
1158 static int nvmet_rdma_cm_accept(struct rdma_cm_id *cm_id,
1159 struct nvmet_rdma_queue *queue,
1160 struct rdma_conn_param *p)
1162 struct rdma_conn_param param = { };
1163 struct nvme_rdma_cm_rep priv = { };
1166 param.rnr_retry_count = 7;
1167 param.flow_control = 1;
1168 param.initiator_depth = min_t(u8, p->initiator_depth,
1169 queue->dev->device->attrs.max_qp_init_rd_atom);
1170 param.private_data = &priv;
1171 param.private_data_len = sizeof(priv);
1172 priv.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0);
1173 priv.crqsize = cpu_to_le16(queue->recv_queue_size);
1175 ret = rdma_accept(cm_id, ¶m);
1177 pr_err("rdma_accept failed (error code = %d)\n", ret);
1182 static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
1183 struct rdma_cm_event *event)
1185 struct nvmet_rdma_device *ndev;
1186 struct nvmet_rdma_queue *queue;
1189 ndev = nvmet_rdma_find_get_device(cm_id);
1191 pr_err("no client data!\n");
1192 nvmet_rdma_cm_reject(cm_id, NVME_RDMA_CM_NO_RSC);
1193 return -ECONNREFUSED;
1196 queue = nvmet_rdma_alloc_queue(ndev, cm_id, event);
1201 queue->port = cm_id->context;
1203 ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn);
1207 mutex_lock(&nvmet_rdma_queue_mutex);
1208 list_add_tail(&queue->queue_list, &nvmet_rdma_queue_list);
1209 mutex_unlock(&nvmet_rdma_queue_mutex);
1214 nvmet_rdma_free_queue(queue);
1216 kref_put(&ndev->ref, nvmet_rdma_free_dev);
1221 static void nvmet_rdma_queue_established(struct nvmet_rdma_queue *queue)
1223 unsigned long flags;
1225 spin_lock_irqsave(&queue->state_lock, flags);
1226 if (queue->state != NVMET_RDMA_Q_CONNECTING) {
1227 pr_warn("trying to establish a connected queue\n");
1230 queue->state = NVMET_RDMA_Q_LIVE;
1232 while (!list_empty(&queue->rsp_wait_list)) {
1233 struct nvmet_rdma_rsp *cmd;
1235 cmd = list_first_entry(&queue->rsp_wait_list,
1236 struct nvmet_rdma_rsp, wait_list);
1237 list_del(&cmd->wait_list);
1239 spin_unlock_irqrestore(&queue->state_lock, flags);
1240 nvmet_rdma_handle_command(queue, cmd);
1241 spin_lock_irqsave(&queue->state_lock, flags);
1245 spin_unlock_irqrestore(&queue->state_lock, flags);
1248 static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
1250 bool disconnect = false;
1251 unsigned long flags;
1253 pr_debug("cm_id= %p queue->state= %d\n", queue->cm_id, queue->state);
1255 spin_lock_irqsave(&queue->state_lock, flags);
1256 switch (queue->state) {
1257 case NVMET_RDMA_Q_CONNECTING:
1258 case NVMET_RDMA_Q_LIVE:
1259 queue->state = NVMET_RDMA_Q_DISCONNECTING;
1260 case NVMET_RDMA_IN_DEVICE_REMOVAL:
1263 case NVMET_RDMA_Q_DISCONNECTING:
1266 spin_unlock_irqrestore(&queue->state_lock, flags);
1269 rdma_disconnect(queue->cm_id);
1270 schedule_work(&queue->release_work);
1274 static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
1276 bool disconnect = false;
1278 mutex_lock(&nvmet_rdma_queue_mutex);
1279 if (!list_empty(&queue->queue_list)) {
1280 list_del_init(&queue->queue_list);
1283 mutex_unlock(&nvmet_rdma_queue_mutex);
1286 __nvmet_rdma_queue_disconnect(queue);
1289 static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
1290 struct nvmet_rdma_queue *queue)
1292 WARN_ON_ONCE(queue->state != NVMET_RDMA_Q_CONNECTING);
1294 mutex_lock(&nvmet_rdma_queue_mutex);
1295 if (!list_empty(&queue->queue_list))
1296 list_del_init(&queue->queue_list);
1297 mutex_unlock(&nvmet_rdma_queue_mutex);
1299 pr_err("failed to connect queue %d\n", queue->idx);
1300 schedule_work(&queue->release_work);
1304 * nvme_rdma_device_removal() - Handle RDMA device removal
1305 * @queue: nvmet rdma queue (cm id qp_context)
1306 * @addr: nvmet address (cm_id context)
1308 * DEVICE_REMOVAL event notifies us that the RDMA device is about
1309 * to unplug so we should take care of destroying our RDMA resources.
1310 * This event will be generated for each allocated cm_id.
1312 * Note that this event can be generated on a normal queue cm_id
1313 * and/or a device bound listener cm_id (where in this case
1314 * queue will be null).
1316 * we claim ownership on destroying the cm_id. For queues we move
1317 * the queue state to NVMET_RDMA_IN_DEVICE_REMOVAL and for port
1318 * we nullify the priv to prevent double cm_id destruction and destroying
1319 * the cm_id implicitely by returning a non-zero rc to the callout.
1321 static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id,
1322 struct nvmet_rdma_queue *queue)
1324 unsigned long flags;
1327 struct nvmet_port *port = cm_id->context;
1330 * This is a listener cm_id. Make sure that
1331 * future remove_port won't invoke a double
1332 * cm_id destroy. use atomic xchg to make sure
1333 * we don't compete with remove_port.
1335 if (xchg(&port->priv, NULL) != cm_id)
1339 * This is a queue cm_id. Make sure that
1340 * release queue will not destroy the cm_id
1341 * and schedule all ctrl queues removal (only
1342 * if the queue is not disconnecting already).
1344 spin_lock_irqsave(&queue->state_lock, flags);
1345 if (queue->state != NVMET_RDMA_Q_DISCONNECTING)
1346 queue->state = NVMET_RDMA_IN_DEVICE_REMOVAL;
1347 spin_unlock_irqrestore(&queue->state_lock, flags);
1348 nvmet_rdma_queue_disconnect(queue);
1349 flush_scheduled_work();
1353 * We need to return 1 so that the core will destroy
1354 * it's own ID. What a great API design..
1359 static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id,
1360 struct rdma_cm_event *event)
1362 struct nvmet_rdma_queue *queue = NULL;
1366 queue = cm_id->qp->qp_context;
1368 pr_debug("%s (%d): status %d id %p\n",
1369 rdma_event_msg(event->event), event->event,
1370 event->status, cm_id);
1372 switch (event->event) {
1373 case RDMA_CM_EVENT_CONNECT_REQUEST:
1374 ret = nvmet_rdma_queue_connect(cm_id, event);
1376 case RDMA_CM_EVENT_ESTABLISHED:
1377 nvmet_rdma_queue_established(queue);
1379 case RDMA_CM_EVENT_ADDR_CHANGE:
1380 case RDMA_CM_EVENT_DISCONNECTED:
1381 case RDMA_CM_EVENT_TIMEWAIT_EXIT:
1383 * We might end up here when we already freed the qp
1384 * which means queue release sequence is in progress,
1385 * so don't get in the way...
1388 nvmet_rdma_queue_disconnect(queue);
1390 case RDMA_CM_EVENT_DEVICE_REMOVAL:
1391 ret = nvmet_rdma_device_removal(cm_id, queue);
1393 case RDMA_CM_EVENT_REJECTED:
1394 pr_debug("Connection rejected: %s\n",
1395 rdma_reject_msg(cm_id, event->status));
1397 case RDMA_CM_EVENT_UNREACHABLE:
1398 case RDMA_CM_EVENT_CONNECT_ERROR:
1399 nvmet_rdma_queue_connect_fail(cm_id, queue);
1402 pr_err("received unrecognized RDMA CM event %d\n",
1410 static void nvmet_rdma_delete_ctrl(struct nvmet_ctrl *ctrl)
1412 struct nvmet_rdma_queue *queue;
1415 mutex_lock(&nvmet_rdma_queue_mutex);
1416 list_for_each_entry(queue, &nvmet_rdma_queue_list, queue_list) {
1417 if (queue->nvme_sq.ctrl == ctrl) {
1418 list_del_init(&queue->queue_list);
1419 mutex_unlock(&nvmet_rdma_queue_mutex);
1421 __nvmet_rdma_queue_disconnect(queue);
1425 mutex_unlock(&nvmet_rdma_queue_mutex);
1428 static int nvmet_rdma_add_port(struct nvmet_port *port)
1430 struct rdma_cm_id *cm_id;
1431 struct sockaddr_in addr_in;
1435 switch (port->disc_addr.adrfam) {
1436 case NVMF_ADDR_FAMILY_IP4:
1439 pr_err("address family %d not supported\n",
1440 port->disc_addr.adrfam);
1444 ret = kstrtou16(port->disc_addr.trsvcid, 0, &port_in);
1448 addr_in.sin_family = AF_INET;
1449 addr_in.sin_addr.s_addr = in_aton(port->disc_addr.traddr);
1450 addr_in.sin_port = htons(port_in);
1452 cm_id = rdma_create_id(&init_net, nvmet_rdma_cm_handler, port,
1453 RDMA_PS_TCP, IB_QPT_RC);
1454 if (IS_ERR(cm_id)) {
1455 pr_err("CM ID creation failed\n");
1456 return PTR_ERR(cm_id);
1459 ret = rdma_bind_addr(cm_id, (struct sockaddr *)&addr_in);
1461 pr_err("binding CM ID to %pISpc failed (%d)\n", &addr_in, ret);
1462 goto out_destroy_id;
1465 ret = rdma_listen(cm_id, 128);
1467 pr_err("listening to %pISpc failed (%d)\n", &addr_in, ret);
1468 goto out_destroy_id;
1471 pr_info("enabling port %d (%pISpc)\n",
1472 le16_to_cpu(port->disc_addr.portid), &addr_in);
1477 rdma_destroy_id(cm_id);
1481 static void nvmet_rdma_remove_port(struct nvmet_port *port)
1483 struct rdma_cm_id *cm_id = xchg(&port->priv, NULL);
1486 rdma_destroy_id(cm_id);
1489 static struct nvmet_fabrics_ops nvmet_rdma_ops = {
1490 .owner = THIS_MODULE,
1491 .type = NVMF_TRTYPE_RDMA,
1492 .sqe_inline_size = NVMET_RDMA_INLINE_DATA_SIZE,
1494 .has_keyed_sgls = 1,
1495 .add_port = nvmet_rdma_add_port,
1496 .remove_port = nvmet_rdma_remove_port,
1497 .queue_response = nvmet_rdma_queue_response,
1498 .delete_ctrl = nvmet_rdma_delete_ctrl,
1501 static int __init nvmet_rdma_init(void)
1503 return nvmet_register_transport(&nvmet_rdma_ops);
1506 static void __exit nvmet_rdma_exit(void)
1508 struct nvmet_rdma_queue *queue;
1510 nvmet_unregister_transport(&nvmet_rdma_ops);
1512 flush_scheduled_work();
1514 mutex_lock(&nvmet_rdma_queue_mutex);
1515 while ((queue = list_first_entry_or_null(&nvmet_rdma_queue_list,
1516 struct nvmet_rdma_queue, queue_list))) {
1517 list_del_init(&queue->queue_list);
1519 mutex_unlock(&nvmet_rdma_queue_mutex);
1520 __nvmet_rdma_queue_disconnect(queue);
1521 mutex_lock(&nvmet_rdma_queue_mutex);
1523 mutex_unlock(&nvmet_rdma_queue_mutex);
1525 flush_scheduled_work();
1526 ida_destroy(&nvmet_rdma_queue_ida);
1529 module_init(nvmet_rdma_init);
1530 module_exit(nvmet_rdma_exit);
1532 MODULE_LICENSE("GPL v2");
1533 MODULE_ALIAS("nvmet-transport-1"); /* 1 == NVMF_TRTYPE_RDMA */