2 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
3 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
4 * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/kernel.h>
35 #include <linux/module.h>
36 #include <linux/slab.h>
37 #include <linux/delay.h>
39 #include "iscsi_iser.h"
41 #define ISCSI_ISER_MAX_CONN 8
42 #define ISER_MAX_RX_LEN (ISER_QP_MAX_RECV_DTOS * ISCSI_ISER_MAX_CONN)
43 #define ISER_MAX_TX_LEN (ISER_QP_MAX_REQ_DTOS * ISCSI_ISER_MAX_CONN)
44 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_LEN + ISER_MAX_TX_LEN + \
47 static void iser_qp_event_callback(struct ib_event *cause, void *context)
49 iser_err("qp event %s (%d)\n",
50 ib_event_msg(cause->event), cause->event);
53 static void iser_event_handler(struct ib_event_handler *handler,
54 struct ib_event *event)
56 iser_err("async event %s (%d) on device %s port %d\n",
57 ib_event_msg(event->event), event->event,
58 event->device->name, event->element.port_num);
62 * iser_create_device_ib_res - creates Protection Domain (PD), Completion
63 * Queue (CQ), DMA Memory Region (DMA MR) with the device associated with
66 * returns 0 on success, -1 on failure
68 static int iser_create_device_ib_res(struct iser_device *device)
70 struct ib_device *ib_dev = device->ib_device;
73 ret = iser_assign_reg_ops(device);
77 device->comps_used = min_t(int, num_online_cpus(),
78 ib_dev->num_comp_vectors);
80 device->comps = kcalloc(device->comps_used, sizeof(*device->comps),
85 max_cqe = min(ISER_MAX_CQ_LEN, ib_dev->attrs.max_cqe);
87 iser_info("using %d CQs, device %s supports %d vectors max_cqe %d\n",
88 device->comps_used, ib_dev->name,
89 ib_dev->num_comp_vectors, max_cqe);
91 device->pd = ib_alloc_pd(ib_dev,
92 iser_always_reg ? 0 : IB_PD_UNSAFE_GLOBAL_RKEY);
93 if (IS_ERR(device->pd))
96 for (i = 0; i < device->comps_used; i++) {
97 struct iser_comp *comp = &device->comps[i];
99 comp->cq = ib_alloc_cq(ib_dev, comp, max_cqe, i,
101 if (IS_ERR(comp->cq)) {
107 INIT_IB_EVENT_HANDLER(&device->event_handler, ib_dev,
109 if (ib_register_event_handler(&device->event_handler))
115 for (i = 0; i < device->comps_used; i++) {
116 struct iser_comp *comp = &device->comps[i];
119 ib_free_cq(comp->cq);
121 ib_dealloc_pd(device->pd);
123 kfree(device->comps);
125 iser_err("failed to allocate an IB resource\n");
130 * iser_free_device_ib_res - destroy/dealloc/dereg the DMA MR,
131 * CQ and PD created with the device associated with the adapator.
133 static void iser_free_device_ib_res(struct iser_device *device)
137 for (i = 0; i < device->comps_used; i++) {
138 struct iser_comp *comp = &device->comps[i];
140 ib_free_cq(comp->cq);
144 (void)ib_unregister_event_handler(&device->event_handler);
145 ib_dealloc_pd(device->pd);
147 kfree(device->comps);
148 device->comps = NULL;
153 * iser_alloc_fmr_pool - Creates FMR pool and page_vector
155 * returns 0 on success, or errno code on failure
157 int iser_alloc_fmr_pool(struct ib_conn *ib_conn,
161 struct iser_device *device = ib_conn->device;
162 struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
163 struct iser_page_vec *page_vec;
164 struct iser_fr_desc *desc;
165 struct ib_fmr_pool *fmr_pool;
166 struct ib_fmr_pool_param params;
169 INIT_LIST_HEAD(&fr_pool->list);
170 spin_lock_init(&fr_pool->lock);
172 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
176 page_vec = kmalloc(sizeof(*page_vec) + (sizeof(u64) * size),
183 page_vec->pages = (u64 *)(page_vec + 1);
185 params.page_shift = SHIFT_4K;
186 params.max_pages_per_fmr = size;
187 /* make the pool size twice the max number of SCSI commands *
188 * the ML is expected to queue, watermark for unmap at 50% */
189 params.pool_size = cmds_max * 2;
190 params.dirty_watermark = cmds_max;
192 params.flush_function = NULL;
193 params.access = (IB_ACCESS_LOCAL_WRITE |
194 IB_ACCESS_REMOTE_WRITE |
195 IB_ACCESS_REMOTE_READ);
197 fmr_pool = ib_create_fmr_pool(device->pd, ¶ms);
198 if (IS_ERR(fmr_pool)) {
199 ret = PTR_ERR(fmr_pool);
200 iser_err("FMR allocation failed, err %d\n", ret);
204 desc->rsc.page_vec = page_vec;
205 desc->rsc.fmr_pool = fmr_pool;
206 list_add(&desc->list, &fr_pool->list);
219 * iser_free_fmr_pool - releases the FMR pool and page vec
221 void iser_free_fmr_pool(struct ib_conn *ib_conn)
223 struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
224 struct iser_fr_desc *desc;
226 desc = list_first_entry(&fr_pool->list,
227 struct iser_fr_desc, list);
228 list_del(&desc->list);
230 iser_info("freeing conn %p fmr pool %p\n",
231 ib_conn, desc->rsc.fmr_pool);
233 ib_destroy_fmr_pool(desc->rsc.fmr_pool);
234 kfree(desc->rsc.page_vec);
239 iser_alloc_reg_res(struct iser_device *device,
241 struct iser_reg_resources *res,
244 struct ib_device *ib_dev = device->ib_device;
245 enum ib_mr_type mr_type;
248 if (ib_dev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG)
249 mr_type = IB_MR_TYPE_SG_GAPS;
251 mr_type = IB_MR_TYPE_MEM_REG;
253 res->mr = ib_alloc_mr(pd, mr_type, size);
254 if (IS_ERR(res->mr)) {
255 ret = PTR_ERR(res->mr);
256 iser_err("Failed to allocate ib_fast_reg_mr err=%d\n", ret);
265 iser_free_reg_res(struct iser_reg_resources *rsc)
267 ib_dereg_mr(rsc->mr);
271 iser_alloc_pi_ctx(struct iser_device *device,
273 struct iser_fr_desc *desc,
276 struct iser_pi_context *pi_ctx = NULL;
279 desc->pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL);
283 pi_ctx = desc->pi_ctx;
285 ret = iser_alloc_reg_res(device, pd, &pi_ctx->rsc, size);
287 iser_err("failed to allocate reg_resources\n");
288 goto alloc_reg_res_err;
291 pi_ctx->sig_mr = ib_alloc_mr(pd, IB_MR_TYPE_SIGNATURE, 2);
292 if (IS_ERR(pi_ctx->sig_mr)) {
293 ret = PTR_ERR(pi_ctx->sig_mr);
296 pi_ctx->sig_mr_valid = 0;
297 desc->pi_ctx->sig_protected = 0;
302 iser_free_reg_res(&pi_ctx->rsc);
310 iser_free_pi_ctx(struct iser_pi_context *pi_ctx)
312 iser_free_reg_res(&pi_ctx->rsc);
313 ib_dereg_mr(pi_ctx->sig_mr);
317 static struct iser_fr_desc *
318 iser_create_fastreg_desc(struct iser_device *device,
323 struct iser_fr_desc *desc;
326 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
328 return ERR_PTR(-ENOMEM);
330 ret = iser_alloc_reg_res(device, pd, &desc->rsc, size);
332 goto reg_res_alloc_failure;
335 ret = iser_alloc_pi_ctx(device, pd, desc, size);
337 goto pi_ctx_alloc_failure;
342 pi_ctx_alloc_failure:
343 iser_free_reg_res(&desc->rsc);
344 reg_res_alloc_failure:
351 * iser_alloc_fastreg_pool - Creates pool of fast_reg descriptors
352 * for fast registration work requests.
353 * returns 0 on success, or errno code on failure
355 int iser_alloc_fastreg_pool(struct ib_conn *ib_conn,
359 struct iser_device *device = ib_conn->device;
360 struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
361 struct iser_fr_desc *desc;
364 INIT_LIST_HEAD(&fr_pool->list);
365 spin_lock_init(&fr_pool->lock);
367 for (i = 0; i < cmds_max; i++) {
368 desc = iser_create_fastreg_desc(device, device->pd,
369 ib_conn->pi_support, size);
375 list_add_tail(&desc->list, &fr_pool->list);
382 iser_free_fastreg_pool(ib_conn);
387 * iser_free_fastreg_pool - releases the pool of fast_reg descriptors
389 void iser_free_fastreg_pool(struct ib_conn *ib_conn)
391 struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
392 struct iser_fr_desc *desc, *tmp;
395 if (list_empty(&fr_pool->list))
398 iser_info("freeing conn %p fr pool\n", ib_conn);
400 list_for_each_entry_safe(desc, tmp, &fr_pool->list, list) {
401 list_del(&desc->list);
402 iser_free_reg_res(&desc->rsc);
404 iser_free_pi_ctx(desc->pi_ctx);
409 if (i < fr_pool->size)
410 iser_warn("pool still has %d regions registered\n",
415 * iser_create_ib_conn_res - Queue-Pair (QP)
417 * returns 0 on success, -1 on failure
419 static int iser_create_ib_conn_res(struct ib_conn *ib_conn)
421 struct iser_conn *iser_conn = to_iser_conn(ib_conn);
422 struct iser_device *device;
423 struct ib_device *ib_dev;
424 struct ib_qp_init_attr init_attr;
426 int index, min_index = 0;
428 BUG_ON(ib_conn->device == NULL);
430 device = ib_conn->device;
431 ib_dev = device->ib_device;
433 memset(&init_attr, 0, sizeof init_attr);
435 mutex_lock(&ig.connlist_mutex);
436 /* select the CQ with the minimal number of usages */
437 for (index = 0; index < device->comps_used; index++) {
438 if (device->comps[index].active_qps <
439 device->comps[min_index].active_qps)
442 ib_conn->comp = &device->comps[min_index];
443 ib_conn->comp->active_qps++;
444 mutex_unlock(&ig.connlist_mutex);
445 iser_info("cq index %d used for ib_conn %p\n", min_index, ib_conn);
447 init_attr.event_handler = iser_qp_event_callback;
448 init_attr.qp_context = (void *)ib_conn;
449 init_attr.send_cq = ib_conn->comp->cq;
450 init_attr.recv_cq = ib_conn->comp->cq;
451 init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS;
452 init_attr.cap.max_send_sge = 2;
453 init_attr.cap.max_recv_sge = 1;
454 init_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
455 init_attr.qp_type = IB_QPT_RC;
456 if (ib_conn->pi_support) {
457 init_attr.cap.max_send_wr = ISER_QP_SIG_MAX_REQ_DTOS + 1;
458 init_attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN;
459 iser_conn->max_cmds =
460 ISER_GET_MAX_XMIT_CMDS(ISER_QP_SIG_MAX_REQ_DTOS);
462 if (ib_dev->attrs.max_qp_wr > ISER_QP_MAX_REQ_DTOS) {
463 init_attr.cap.max_send_wr = ISER_QP_MAX_REQ_DTOS + 1;
464 iser_conn->max_cmds =
465 ISER_GET_MAX_XMIT_CMDS(ISER_QP_MAX_REQ_DTOS);
467 init_attr.cap.max_send_wr = ib_dev->attrs.max_qp_wr;
468 iser_conn->max_cmds =
469 ISER_GET_MAX_XMIT_CMDS(ib_dev->attrs.max_qp_wr);
470 iser_dbg("device %s supports max_send_wr %d\n",
471 device->ib_device->name, ib_dev->attrs.max_qp_wr);
475 ret = rdma_create_qp(ib_conn->cma_id, device->pd, &init_attr);
479 ib_conn->qp = ib_conn->cma_id->qp;
480 iser_info("setting conn %p cma_id %p qp %p\n",
481 ib_conn, ib_conn->cma_id,
482 ib_conn->cma_id->qp);
486 mutex_lock(&ig.connlist_mutex);
487 ib_conn->comp->active_qps--;
488 mutex_unlock(&ig.connlist_mutex);
489 iser_err("unable to alloc mem or create resource, err %d\n", ret);
495 * based on the resolved device node GUID see if there already allocated
496 * device for this device. If there's no such, create one.
499 struct iser_device *iser_device_find_by_ib_device(struct rdma_cm_id *cma_id)
501 struct iser_device *device;
503 mutex_lock(&ig.device_list_mutex);
505 list_for_each_entry(device, &ig.device_list, ig_list)
506 /* find if there's a match using the node GUID */
507 if (device->ib_device->node_guid == cma_id->device->node_guid)
510 device = kzalloc(sizeof *device, GFP_KERNEL);
514 /* assign this device to the device */
515 device->ib_device = cma_id->device;
516 /* init the device and link it into ig device list */
517 if (iser_create_device_ib_res(device)) {
522 list_add(&device->ig_list, &ig.device_list);
527 mutex_unlock(&ig.device_list_mutex);
531 /* if there's no demand for this device, release it */
532 static void iser_device_try_release(struct iser_device *device)
534 mutex_lock(&ig.device_list_mutex);
536 iser_info("device %p refcount %d\n", device, device->refcount);
537 if (!device->refcount) {
538 iser_free_device_ib_res(device);
539 list_del(&device->ig_list);
542 mutex_unlock(&ig.device_list_mutex);
546 * Called with state mutex held
548 static int iser_conn_state_comp_exch(struct iser_conn *iser_conn,
549 enum iser_conn_state comp,
550 enum iser_conn_state exch)
554 ret = (iser_conn->state == comp);
556 iser_conn->state = exch;
561 void iser_release_work(struct work_struct *work)
563 struct iser_conn *iser_conn;
565 iser_conn = container_of(work, struct iser_conn, release_work);
567 /* Wait for conn_stop to complete */
568 wait_for_completion(&iser_conn->stop_completion);
569 /* Wait for IB resouces cleanup to complete */
570 wait_for_completion(&iser_conn->ib_completion);
572 mutex_lock(&iser_conn->state_mutex);
573 iser_conn->state = ISER_CONN_DOWN;
574 mutex_unlock(&iser_conn->state_mutex);
576 iser_conn_release(iser_conn);
580 * iser_free_ib_conn_res - release IB related resources
581 * @iser_conn: iser connection struct
582 * @destroy: indicator if we need to try to release the
583 * iser device and memory regoins pool (only iscsi
584 * shutdown and DEVICE_REMOVAL will use this).
586 * This routine is called with the iser state mutex held
587 * so the cm_id removal is out of here. It is Safe to
588 * be invoked multiple times.
590 static void iser_free_ib_conn_res(struct iser_conn *iser_conn,
593 struct ib_conn *ib_conn = &iser_conn->ib_conn;
594 struct iser_device *device = ib_conn->device;
596 iser_info("freeing conn %p cma_id %p qp %p\n",
597 iser_conn, ib_conn->cma_id, ib_conn->qp);
599 if (ib_conn->qp != NULL) {
600 ib_conn->comp->active_qps--;
601 rdma_destroy_qp(ib_conn->cma_id);
606 if (iser_conn->rx_descs)
607 iser_free_rx_descriptors(iser_conn);
609 if (device != NULL) {
610 iser_device_try_release(device);
611 ib_conn->device = NULL;
617 * Frees all conn objects and deallocs conn descriptor
619 void iser_conn_release(struct iser_conn *iser_conn)
621 struct ib_conn *ib_conn = &iser_conn->ib_conn;
623 mutex_lock(&ig.connlist_mutex);
624 list_del(&iser_conn->conn_list);
625 mutex_unlock(&ig.connlist_mutex);
627 mutex_lock(&iser_conn->state_mutex);
628 /* In case we endup here without ep_disconnect being invoked. */
629 if (iser_conn->state != ISER_CONN_DOWN) {
630 iser_warn("iser conn %p state %d, expected state down.\n",
631 iser_conn, iser_conn->state);
632 iscsi_destroy_endpoint(iser_conn->ep);
633 iser_conn->state = ISER_CONN_DOWN;
636 * In case we never got to bind stage, we still need to
637 * release IB resources (which is safe to call more than once).
639 iser_free_ib_conn_res(iser_conn, true);
640 mutex_unlock(&iser_conn->state_mutex);
642 if (ib_conn->cma_id != NULL) {
643 rdma_destroy_id(ib_conn->cma_id);
644 ib_conn->cma_id = NULL;
651 * triggers start of the disconnect procedures and wait for them to be done
652 * Called with state mutex held
654 int iser_conn_terminate(struct iser_conn *iser_conn)
656 struct ib_conn *ib_conn = &iser_conn->ib_conn;
659 /* terminate the iser conn only if the conn state is UP */
660 if (!iser_conn_state_comp_exch(iser_conn, ISER_CONN_UP,
661 ISER_CONN_TERMINATING))
664 iser_info("iser_conn %p state %d\n", iser_conn, iser_conn->state);
666 /* suspend queuing of new iscsi commands */
667 if (iser_conn->iscsi_conn)
668 iscsi_suspend_queue(iser_conn->iscsi_conn);
671 * In case we didn't already clean up the cma_id (peer initiated
672 * a disconnection), we need to Cause the CMA to change the QP
675 if (ib_conn->cma_id) {
676 err = rdma_disconnect(ib_conn->cma_id);
678 iser_err("Failed to disconnect, conn: 0x%p err %d\n",
681 /* block until all flush errors are consumed */
682 ib_drain_sq(ib_conn->qp);
689 * Called with state mutex held
691 static void iser_connect_error(struct rdma_cm_id *cma_id)
693 struct iser_conn *iser_conn;
695 iser_conn = (struct iser_conn *)cma_id->context;
696 iser_conn->state = ISER_CONN_TERMINATING;
700 iser_calc_scsi_params(struct iser_conn *iser_conn,
701 unsigned int max_sectors)
703 struct iser_device *device = iser_conn->ib_conn.device;
704 unsigned short sg_tablesize, sup_sg_tablesize;
706 sg_tablesize = DIV_ROUND_UP(max_sectors * 512, SIZE_4K);
707 sup_sg_tablesize = min_t(unsigned, ISCSI_ISER_MAX_SG_TABLESIZE,
708 device->ib_device->attrs.max_fast_reg_page_list_len);
710 if (sg_tablesize > sup_sg_tablesize) {
711 sg_tablesize = sup_sg_tablesize;
712 iser_conn->scsi_max_sectors = sg_tablesize * SIZE_4K / 512;
714 iser_conn->scsi_max_sectors = max_sectors;
717 iser_conn->scsi_sg_tablesize = sg_tablesize;
719 iser_dbg("iser_conn %p, sg_tablesize %u, max_sectors %u\n",
720 iser_conn, iser_conn->scsi_sg_tablesize,
721 iser_conn->scsi_max_sectors);
725 * Called with state mutex held
727 static void iser_addr_handler(struct rdma_cm_id *cma_id)
729 struct iser_device *device;
730 struct iser_conn *iser_conn;
731 struct ib_conn *ib_conn;
734 iser_conn = (struct iser_conn *)cma_id->context;
735 if (iser_conn->state != ISER_CONN_PENDING)
739 ib_conn = &iser_conn->ib_conn;
740 device = iser_device_find_by_ib_device(cma_id);
742 iser_err("device lookup/creation failed\n");
743 iser_connect_error(cma_id);
747 ib_conn->device = device;
749 /* connection T10-PI support */
750 if (iser_pi_enable) {
751 if (!(device->ib_device->attrs.device_cap_flags &
752 IB_DEVICE_SIGNATURE_HANDOVER)) {
753 iser_warn("T10-PI requested but not supported on %s, "
754 "continue without T10-PI\n",
755 ib_conn->device->ib_device->name);
756 ib_conn->pi_support = false;
758 ib_conn->pi_support = true;
762 iser_calc_scsi_params(iser_conn, iser_max_sectors);
764 ret = rdma_resolve_route(cma_id, 1000);
766 iser_err("resolve route failed: %d\n", ret);
767 iser_connect_error(cma_id);
773 * Called with state mutex held
775 static void iser_route_handler(struct rdma_cm_id *cma_id)
777 struct rdma_conn_param conn_param;
779 struct iser_cm_hdr req_hdr;
780 struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
781 struct ib_conn *ib_conn = &iser_conn->ib_conn;
782 struct iser_device *device = ib_conn->device;
784 if (iser_conn->state != ISER_CONN_PENDING)
788 ret = iser_create_ib_conn_res(ib_conn);
792 memset(&conn_param, 0, sizeof conn_param);
793 conn_param.responder_resources = device->ib_device->attrs.max_qp_rd_atom;
794 conn_param.initiator_depth = 1;
795 conn_param.retry_count = 7;
796 conn_param.rnr_retry_count = 6;
798 memset(&req_hdr, 0, sizeof(req_hdr));
799 req_hdr.flags = ISER_ZBVA_NOT_SUP;
800 if (!device->remote_inv_sup)
801 req_hdr.flags |= ISER_SEND_W_INV_NOT_SUP;
802 conn_param.private_data = (void *)&req_hdr;
803 conn_param.private_data_len = sizeof(struct iser_cm_hdr);
805 ret = rdma_connect(cma_id, &conn_param);
807 iser_err("failure connecting: %d\n", ret);
813 iser_connect_error(cma_id);
816 static void iser_connected_handler(struct rdma_cm_id *cma_id,
817 const void *private_data)
819 struct iser_conn *iser_conn;
820 struct ib_qp_attr attr;
821 struct ib_qp_init_attr init_attr;
823 iser_conn = (struct iser_conn *)cma_id->context;
824 if (iser_conn->state != ISER_CONN_PENDING)
828 (void)ib_query_qp(cma_id->qp, &attr, ~0, &init_attr);
829 iser_info("remote qpn:%x my qpn:%x\n", attr.dest_qp_num, cma_id->qp->qp_num);
832 u8 flags = *(u8 *)private_data;
834 iser_conn->snd_w_inv = !(flags & ISER_SEND_W_INV_NOT_SUP);
837 iser_info("conn %p: negotiated %s invalidation\n",
838 iser_conn, iser_conn->snd_w_inv ? "remote" : "local");
840 iser_conn->state = ISER_CONN_UP;
841 complete(&iser_conn->up_completion);
844 static void iser_disconnected_handler(struct rdma_cm_id *cma_id)
846 struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
848 if (iser_conn_terminate(iser_conn)) {
849 if (iser_conn->iscsi_conn)
850 iscsi_conn_failure(iser_conn->iscsi_conn,
851 ISCSI_ERR_CONN_FAILED);
853 iser_err("iscsi_iser connection isn't bound\n");
857 static void iser_cleanup_handler(struct rdma_cm_id *cma_id,
860 struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
863 * We are not guaranteed that we visited disconnected_handler
864 * by now, call it here to be safe that we handle CM drep
867 iser_disconnected_handler(cma_id);
868 iser_free_ib_conn_res(iser_conn, destroy);
869 complete(&iser_conn->ib_completion);
872 static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
874 struct iser_conn *iser_conn;
877 iser_conn = (struct iser_conn *)cma_id->context;
878 iser_info("%s (%d): status %d conn %p id %p\n",
879 rdma_event_msg(event->event), event->event,
880 event->status, cma_id->context, cma_id);
882 mutex_lock(&iser_conn->state_mutex);
883 switch (event->event) {
884 case RDMA_CM_EVENT_ADDR_RESOLVED:
885 iser_addr_handler(cma_id);
887 case RDMA_CM_EVENT_ROUTE_RESOLVED:
888 iser_route_handler(cma_id);
890 case RDMA_CM_EVENT_ESTABLISHED:
891 iser_connected_handler(cma_id, event->param.conn.private_data);
893 case RDMA_CM_EVENT_REJECTED:
894 iser_info("Connection rejected: %s\n",
895 rdma_reject_msg(cma_id, event->status));
897 case RDMA_CM_EVENT_ADDR_ERROR:
898 case RDMA_CM_EVENT_ROUTE_ERROR:
899 case RDMA_CM_EVENT_CONNECT_ERROR:
900 case RDMA_CM_EVENT_UNREACHABLE:
901 iser_connect_error(cma_id);
903 case RDMA_CM_EVENT_DISCONNECTED:
904 case RDMA_CM_EVENT_ADDR_CHANGE:
905 case RDMA_CM_EVENT_TIMEWAIT_EXIT:
906 iser_cleanup_handler(cma_id, false);
908 case RDMA_CM_EVENT_DEVICE_REMOVAL:
910 * we *must* destroy the device as we cannot rely
911 * on iscsid to be around to initiate error handling.
912 * also if we are not in state DOWN implicitly destroy
915 iser_cleanup_handler(cma_id, true);
916 if (iser_conn->state != ISER_CONN_DOWN) {
917 iser_conn->ib_conn.cma_id = NULL;
922 iser_err("Unexpected RDMA CM event: %s (%d)\n",
923 rdma_event_msg(event->event), event->event);
926 mutex_unlock(&iser_conn->state_mutex);
931 void iser_conn_init(struct iser_conn *iser_conn)
933 struct ib_conn *ib_conn = &iser_conn->ib_conn;
935 iser_conn->state = ISER_CONN_INIT;
936 init_completion(&iser_conn->stop_completion);
937 init_completion(&iser_conn->ib_completion);
938 init_completion(&iser_conn->up_completion);
939 INIT_LIST_HEAD(&iser_conn->conn_list);
940 mutex_init(&iser_conn->state_mutex);
942 ib_conn->post_recv_buf_count = 0;
943 ib_conn->reg_cqe.done = iser_reg_comp;
947 * starts the process of connecting to the target
948 * sleeps until the connection is established or rejected
950 int iser_connect(struct iser_conn *iser_conn,
951 struct sockaddr *src_addr,
952 struct sockaddr *dst_addr,
955 struct ib_conn *ib_conn = &iser_conn->ib_conn;
958 mutex_lock(&iser_conn->state_mutex);
960 sprintf(iser_conn->name, "%pISp", dst_addr);
962 iser_info("connecting to: %s\n", iser_conn->name);
964 /* the device is known only --after-- address resolution */
965 ib_conn->device = NULL;
967 iser_conn->state = ISER_CONN_PENDING;
969 ib_conn->cma_id = rdma_create_id(&init_net, iser_cma_handler,
971 RDMA_PS_TCP, IB_QPT_RC);
972 if (IS_ERR(ib_conn->cma_id)) {
973 err = PTR_ERR(ib_conn->cma_id);
974 iser_err("rdma_create_id failed: %d\n", err);
978 err = rdma_resolve_addr(ib_conn->cma_id, src_addr, dst_addr, 1000);
980 iser_err("rdma_resolve_addr failed: %d\n", err);
985 wait_for_completion_interruptible(&iser_conn->up_completion);
987 if (iser_conn->state != ISER_CONN_UP) {
989 goto connect_failure;
992 mutex_unlock(&iser_conn->state_mutex);
994 mutex_lock(&ig.connlist_mutex);
995 list_add(&iser_conn->conn_list, &ig.connlist);
996 mutex_unlock(&ig.connlist_mutex);
1000 ib_conn->cma_id = NULL;
1002 iser_conn->state = ISER_CONN_DOWN;
1004 mutex_unlock(&iser_conn->state_mutex);
1005 iser_conn_release(iser_conn);
1009 int iser_post_recvl(struct iser_conn *iser_conn)
1011 struct ib_conn *ib_conn = &iser_conn->ib_conn;
1012 struct iser_login_desc *desc = &iser_conn->login_desc;
1013 struct ib_recv_wr wr, *wr_failed;
1016 desc->sge.addr = desc->rsp_dma;
1017 desc->sge.length = ISER_RX_LOGIN_SIZE;
1018 desc->sge.lkey = ib_conn->device->pd->local_dma_lkey;
1020 desc->cqe.done = iser_login_rsp;
1021 wr.wr_cqe = &desc->cqe;
1022 wr.sg_list = &desc->sge;
1026 ib_conn->post_recv_buf_count++;
1027 ib_ret = ib_post_recv(ib_conn->qp, &wr, &wr_failed);
1029 iser_err("ib_post_recv failed ret=%d\n", ib_ret);
1030 ib_conn->post_recv_buf_count--;
1036 int iser_post_recvm(struct iser_conn *iser_conn, int count)
1038 struct ib_conn *ib_conn = &iser_conn->ib_conn;
1039 unsigned int my_rx_head = iser_conn->rx_desc_head;
1040 struct iser_rx_desc *rx_desc;
1041 struct ib_recv_wr *wr, *wr_failed;
1044 for (wr = ib_conn->rx_wr, i = 0; i < count; i++, wr++) {
1045 rx_desc = &iser_conn->rx_descs[my_rx_head];
1046 rx_desc->cqe.done = iser_task_rsp;
1047 wr->wr_cqe = &rx_desc->cqe;
1048 wr->sg_list = &rx_desc->rx_sg;
1051 my_rx_head = (my_rx_head + 1) & iser_conn->qp_max_recv_dtos_mask;
1055 wr->next = NULL; /* mark end of work requests list */
1057 ib_conn->post_recv_buf_count += count;
1058 ib_ret = ib_post_recv(ib_conn->qp, ib_conn->rx_wr, &wr_failed);
1060 iser_err("ib_post_recv failed ret=%d\n", ib_ret);
1061 ib_conn->post_recv_buf_count -= count;
1063 iser_conn->rx_desc_head = my_rx_head;
1070 * iser_start_send - Initiate a Send DTO operation
1072 * returns 0 on success, -1 on failure
1074 int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc,
1077 struct ib_send_wr *bad_wr, *wr = iser_tx_next_wr(tx_desc);
1080 ib_dma_sync_single_for_device(ib_conn->device->ib_device,
1081 tx_desc->dma_addr, ISER_HEADERS_LEN,
1085 wr->wr_cqe = &tx_desc->cqe;
1086 wr->sg_list = tx_desc->tx_sg;
1087 wr->num_sge = tx_desc->num_sge;
1088 wr->opcode = IB_WR_SEND;
1089 wr->send_flags = signal ? IB_SEND_SIGNALED : 0;
1091 ib_ret = ib_post_send(ib_conn->qp, &tx_desc->wrs[0].send, &bad_wr);
1093 iser_err("ib_post_send failed, ret:%d opcode:%d\n",
1094 ib_ret, bad_wr->opcode);
1099 u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
1100 enum iser_data_dir cmd_dir, sector_t *sector)
1102 struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir];
1103 struct iser_fr_desc *desc = reg->mem_h;
1104 unsigned long sector_size = iser_task->sc->device->sector_size;
1105 struct ib_mr_status mr_status;
1108 if (desc && desc->pi_ctx->sig_protected) {
1109 desc->pi_ctx->sig_protected = 0;
1110 ret = ib_check_mr_status(desc->pi_ctx->sig_mr,
1111 IB_MR_CHECK_SIG_STATUS, &mr_status);
1113 pr_err("ib_check_mr_status failed, ret %d\n", ret);
1117 if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
1118 sector_t sector_off = mr_status.sig_err.sig_err_offset;
1120 sector_div(sector_off, sector_size + 8);
1121 *sector = scsi_get_lba(iser_task->sc) + sector_off;
1123 pr_err("PI error found type %d at sector %llx "
1124 "expected %x vs actual %x\n",
1125 mr_status.sig_err.err_type,
1126 (unsigned long long)*sector,
1127 mr_status.sig_err.expected,
1128 mr_status.sig_err.actual);
1130 switch (mr_status.sig_err.err_type) {
1131 case IB_SIG_BAD_GUARD:
1133 case IB_SIG_BAD_REFTAG:
1135 case IB_SIG_BAD_APPTAG:
1143 /* Not alot we can do here, return ambiguous guard error */
1147 void iser_err_comp(struct ib_wc *wc, const char *type)
1149 if (wc->status != IB_WC_WR_FLUSH_ERR) {
1150 struct iser_conn *iser_conn = to_iser_conn(wc->qp->qp_context);
1152 iser_err("%s failure: %s (%d) vend_err %x\n", type,
1153 ib_wc_status_msg(wc->status), wc->status,
1156 if (iser_conn->iscsi_conn)
1157 iscsi_conn_failure(iser_conn->iscsi_conn,
1158 ISCSI_ERR_CONN_FAILED);
1160 iser_dbg("%s failure: %s (%d)\n", type,
1161 ib_wc_status_msg(wc->status), wc->status);