2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/err.h>
39 #include <linux/string.h>
40 #include <linux/parser.h>
41 #include <linux/random.h>
42 #include <linux/jiffies.h>
43 #include <linux/lockdep.h>
44 #include <rdma/ib_cache.h>
46 #include <linux/atomic.h>
48 #include <scsi/scsi.h>
49 #include <scsi/scsi_device.h>
50 #include <scsi/scsi_dbg.h>
51 #include <scsi/scsi_tcq.h>
53 #include <scsi/scsi_transport_srp.h>
57 #define DRV_NAME "ib_srp"
58 #define PFX DRV_NAME ": "
59 #define DRV_VERSION "2.0"
60 #define DRV_RELDATE "July 26, 2015"
62 MODULE_AUTHOR("Roland Dreier");
63 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator");
64 MODULE_LICENSE("Dual BSD/GPL");
65 MODULE_VERSION(DRV_VERSION);
66 MODULE_INFO(release_date, DRV_RELDATE);
68 #if !defined(CONFIG_DYNAMIC_DEBUG)
69 #define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt)
70 #define DYNAMIC_DEBUG_BRANCH(descriptor) false
73 static unsigned int srp_sg_tablesize;
74 static unsigned int cmd_sg_entries;
75 static unsigned int indirect_sg_entries;
76 static bool allow_ext_sg;
77 static bool prefer_fr = true;
78 static bool register_always = true;
79 static bool never_register;
80 static int topspin_workarounds = 1;
82 module_param(srp_sg_tablesize, uint, 0444);
83 MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
85 module_param(cmd_sg_entries, uint, 0444);
86 MODULE_PARM_DESC(cmd_sg_entries,
87 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
89 module_param(indirect_sg_entries, uint, 0444);
90 MODULE_PARM_DESC(indirect_sg_entries,
91 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SG_MAX_SEGMENTS) ")");
93 module_param(allow_ext_sg, bool, 0444);
94 MODULE_PARM_DESC(allow_ext_sg,
95 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
97 module_param(topspin_workarounds, int, 0444);
98 MODULE_PARM_DESC(topspin_workarounds,
99 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
101 module_param(prefer_fr, bool, 0444);
102 MODULE_PARM_DESC(prefer_fr,
103 "Whether to use fast registration if both FMR and fast registration are supported");
105 module_param(register_always, bool, 0444);
106 MODULE_PARM_DESC(register_always,
107 "Use memory registration even for contiguous memory regions");
109 module_param(never_register, bool, 0444);
110 MODULE_PARM_DESC(never_register, "Never register memory");
112 static const struct kernel_param_ops srp_tmo_ops;
114 static int srp_reconnect_delay = 10;
115 module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
117 MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
119 static int srp_fast_io_fail_tmo = 15;
120 module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
122 MODULE_PARM_DESC(fast_io_fail_tmo,
123 "Number of seconds between the observation of a transport"
124 " layer error and failing all I/O. \"off\" means that this"
125 " functionality is disabled.");
127 static int srp_dev_loss_tmo = 600;
128 module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
130 MODULE_PARM_DESC(dev_loss_tmo,
131 "Maximum number of seconds that the SRP transport should"
132 " insulate transport layer errors. After this time has been"
133 " exceeded the SCSI host is removed. Should be"
134 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
135 " if fast_io_fail_tmo has not been set. \"off\" means that"
136 " this functionality is disabled.");
138 static unsigned ch_count;
139 module_param(ch_count, uint, 0444);
140 MODULE_PARM_DESC(ch_count,
141 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
143 static void srp_add_one(struct ib_device *device);
144 static void srp_remove_one(struct ib_device *device, void *client_data);
145 static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc);
146 static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
148 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
150 static struct scsi_transport_template *ib_srp_transport_template;
151 static struct workqueue_struct *srp_remove_wq;
153 static struct ib_client srp_client = {
156 .remove = srp_remove_one
159 static struct ib_sa_client srp_sa_client;
161 static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
163 int tmo = *(int *)kp->arg;
166 return sprintf(buffer, "%d", tmo);
168 return sprintf(buffer, "off");
171 static int srp_tmo_set(const char *val, const struct kernel_param *kp)
175 res = srp_parse_tmo(&tmo, val);
179 if (kp->arg == &srp_reconnect_delay)
180 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
182 else if (kp->arg == &srp_fast_io_fail_tmo)
183 res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
185 res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
189 *(int *)kp->arg = tmo;
195 static const struct kernel_param_ops srp_tmo_ops = {
200 static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
202 return (struct srp_target_port *) host->hostdata;
205 static const char *srp_target_info(struct Scsi_Host *host)
207 return host_to_target(host)->target_name;
210 static int srp_target_is_topspin(struct srp_target_port *target)
212 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
213 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
215 return topspin_workarounds &&
216 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
217 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
220 static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
222 enum dma_data_direction direction)
226 iu = kmalloc(sizeof *iu, gfp_mask);
230 iu->buf = kzalloc(size, gfp_mask);
234 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
236 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
240 iu->direction = direction;
252 static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
257 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
263 static void srp_qp_event(struct ib_event *event, void *context)
265 pr_debug("QP event %s (%d)\n",
266 ib_event_msg(event->event), event->event);
269 static int srp_init_qp(struct srp_target_port *target,
272 struct ib_qp_attr *attr;
275 attr = kmalloc(sizeof *attr, GFP_KERNEL);
279 ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
280 target->srp_host->port,
281 be16_to_cpu(target->pkey),
286 attr->qp_state = IB_QPS_INIT;
287 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
288 IB_ACCESS_REMOTE_WRITE);
289 attr->port_num = target->srp_host->port;
291 ret = ib_modify_qp(qp, attr,
302 static int srp_new_cm_id(struct srp_rdma_ch *ch)
304 struct srp_target_port *target = ch->target;
305 struct ib_cm_id *new_cm_id;
307 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
309 if (IS_ERR(new_cm_id))
310 return PTR_ERR(new_cm_id);
313 ib_destroy_cm_id(ch->cm_id);
314 ch->cm_id = new_cm_id;
315 ch->path.sgid = target->sgid;
316 ch->path.dgid = target->orig_dgid;
317 ch->path.pkey = target->pkey;
318 ch->path.service_id = target->service_id;
323 static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
325 struct srp_device *dev = target->srp_host->srp_dev;
326 struct ib_fmr_pool_param fmr_param;
328 memset(&fmr_param, 0, sizeof(fmr_param));
329 fmr_param.pool_size = target->mr_pool_size;
330 fmr_param.dirty_watermark = fmr_param.pool_size / 4;
332 fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
333 fmr_param.page_shift = ilog2(dev->mr_page_size);
334 fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
335 IB_ACCESS_REMOTE_WRITE |
336 IB_ACCESS_REMOTE_READ);
338 return ib_create_fmr_pool(dev->pd, &fmr_param);
342 * srp_destroy_fr_pool() - free the resources owned by a pool
343 * @pool: Fast registration pool to be destroyed.
345 static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
348 struct srp_fr_desc *d;
353 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
361 * srp_create_fr_pool() - allocate and initialize a pool for fast registration
362 * @device: IB device to allocate fast registration descriptors for.
363 * @pd: Protection domain associated with the FR descriptors.
364 * @pool_size: Number of descriptors to allocate.
365 * @max_page_list_len: Maximum fast registration work request page list length.
367 static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
368 struct ib_pd *pd, int pool_size,
369 int max_page_list_len)
371 struct srp_fr_pool *pool;
372 struct srp_fr_desc *d;
374 int i, ret = -EINVAL;
379 pool = kzalloc(sizeof(struct srp_fr_pool) +
380 pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL);
383 pool->size = pool_size;
384 pool->max_page_list_len = max_page_list_len;
385 spin_lock_init(&pool->lock);
386 INIT_LIST_HEAD(&pool->free_list);
388 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
389 mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG,
394 pr_info("%s: ib_alloc_mr() failed. Try to reduce max_cmd_per_lun, max_sect or ch_count\n",
395 dev_name(&device->dev));
399 list_add_tail(&d->entry, &pool->free_list);
406 srp_destroy_fr_pool(pool);
414 * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
415 * @pool: Pool to obtain descriptor from.
417 static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
419 struct srp_fr_desc *d = NULL;
422 spin_lock_irqsave(&pool->lock, flags);
423 if (!list_empty(&pool->free_list)) {
424 d = list_first_entry(&pool->free_list, typeof(*d), entry);
427 spin_unlock_irqrestore(&pool->lock, flags);
433 * srp_fr_pool_put() - put an FR descriptor back in the free list
434 * @pool: Pool the descriptor was allocated from.
435 * @desc: Pointer to an array of fast registration descriptor pointers.
436 * @n: Number of descriptors to put back.
438 * Note: The caller must already have queued an invalidation request for
439 * desc->mr->rkey before calling this function.
441 static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
447 spin_lock_irqsave(&pool->lock, flags);
448 for (i = 0; i < n; i++)
449 list_add(&desc[i]->entry, &pool->free_list);
450 spin_unlock_irqrestore(&pool->lock, flags);
453 static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
455 struct srp_device *dev = target->srp_host->srp_dev;
457 return srp_create_fr_pool(dev->dev, dev->pd, target->mr_pool_size,
458 dev->max_pages_per_mr);
462 * srp_destroy_qp() - destroy an RDMA queue pair
463 * @qp: RDMA queue pair.
465 * Drain the qp before destroying it. This avoids that the receive
466 * completion handler can access the queue pair while it is
469 static void srp_destroy_qp(struct ib_qp *qp)
475 static int srp_create_ch_ib(struct srp_rdma_ch *ch)
477 struct srp_target_port *target = ch->target;
478 struct srp_device *dev = target->srp_host->srp_dev;
479 struct ib_qp_init_attr *init_attr;
480 struct ib_cq *recv_cq, *send_cq;
482 struct ib_fmr_pool *fmr_pool = NULL;
483 struct srp_fr_pool *fr_pool = NULL;
484 const int m = 1 + dev->use_fast_reg * target->mr_per_cmd * 2;
487 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
491 /* queue_size + 1 for ib_drain_rq() */
492 recv_cq = ib_alloc_cq(dev->dev, ch, target->queue_size + 1,
493 ch->comp_vector, IB_POLL_SOFTIRQ);
494 if (IS_ERR(recv_cq)) {
495 ret = PTR_ERR(recv_cq);
499 send_cq = ib_alloc_cq(dev->dev, ch, m * target->queue_size,
500 ch->comp_vector, IB_POLL_DIRECT);
501 if (IS_ERR(send_cq)) {
502 ret = PTR_ERR(send_cq);
506 init_attr->event_handler = srp_qp_event;
507 init_attr->cap.max_send_wr = m * target->queue_size;
508 init_attr->cap.max_recv_wr = target->queue_size + 1;
509 init_attr->cap.max_recv_sge = 1;
510 init_attr->cap.max_send_sge = 1;
511 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
512 init_attr->qp_type = IB_QPT_RC;
513 init_attr->send_cq = send_cq;
514 init_attr->recv_cq = recv_cq;
516 qp = ib_create_qp(dev->pd, init_attr);
522 ret = srp_init_qp(target, qp);
526 if (dev->use_fast_reg) {
527 fr_pool = srp_alloc_fr_pool(target);
528 if (IS_ERR(fr_pool)) {
529 ret = PTR_ERR(fr_pool);
530 shost_printk(KERN_WARNING, target->scsi_host, PFX
531 "FR pool allocation failed (%d)\n", ret);
534 } else if (dev->use_fmr) {
535 fmr_pool = srp_alloc_fmr_pool(target);
536 if (IS_ERR(fmr_pool)) {
537 ret = PTR_ERR(fmr_pool);
538 shost_printk(KERN_WARNING, target->scsi_host, PFX
539 "FMR pool allocation failed (%d)\n", ret);
545 srp_destroy_qp(ch->qp);
547 ib_free_cq(ch->recv_cq);
549 ib_free_cq(ch->send_cq);
552 ch->recv_cq = recv_cq;
553 ch->send_cq = send_cq;
555 if (dev->use_fast_reg) {
557 srp_destroy_fr_pool(ch->fr_pool);
558 ch->fr_pool = fr_pool;
559 } else if (dev->use_fmr) {
561 ib_destroy_fmr_pool(ch->fmr_pool);
562 ch->fmr_pool = fmr_pool;
583 * Note: this function may be called without srp_alloc_iu_bufs() having been
584 * invoked. Hence the ch->[rt]x_ring checks.
586 static void srp_free_ch_ib(struct srp_target_port *target,
587 struct srp_rdma_ch *ch)
589 struct srp_device *dev = target->srp_host->srp_dev;
596 ib_destroy_cm_id(ch->cm_id);
600 /* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
604 if (dev->use_fast_reg) {
606 srp_destroy_fr_pool(ch->fr_pool);
607 } else if (dev->use_fmr) {
609 ib_destroy_fmr_pool(ch->fmr_pool);
612 srp_destroy_qp(ch->qp);
613 ib_free_cq(ch->send_cq);
614 ib_free_cq(ch->recv_cq);
617 * Avoid that the SCSI error handler tries to use this channel after
618 * it has been freed. The SCSI error handler can namely continue
619 * trying to perform recovery actions after scsi_remove_host()
625 ch->send_cq = ch->recv_cq = NULL;
628 for (i = 0; i < target->queue_size; ++i)
629 srp_free_iu(target->srp_host, ch->rx_ring[i]);
634 for (i = 0; i < target->queue_size; ++i)
635 srp_free_iu(target->srp_host, ch->tx_ring[i]);
641 static void srp_path_rec_completion(int status,
642 struct ib_sa_path_rec *pathrec,
645 struct srp_rdma_ch *ch = ch_ptr;
646 struct srp_target_port *target = ch->target;
650 shost_printk(KERN_ERR, target->scsi_host,
651 PFX "Got failed path rec status %d\n", status);
657 static int srp_lookup_path(struct srp_rdma_ch *ch)
659 struct srp_target_port *target = ch->target;
662 ch->path.numb_path = 1;
664 init_completion(&ch->done);
666 ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
667 target->srp_host->srp_dev->dev,
668 target->srp_host->port,
670 IB_SA_PATH_REC_SERVICE_ID |
671 IB_SA_PATH_REC_DGID |
672 IB_SA_PATH_REC_SGID |
673 IB_SA_PATH_REC_NUMB_PATH |
675 SRP_PATH_REC_TIMEOUT_MS,
677 srp_path_rec_completion,
678 ch, &ch->path_query);
679 if (ch->path_query_id < 0)
680 return ch->path_query_id;
682 ret = wait_for_completion_interruptible(&ch->done);
687 shost_printk(KERN_WARNING, target->scsi_host,
688 PFX "Path record query failed\n");
693 static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
695 struct srp_target_port *target = ch->target;
697 struct ib_cm_req_param param;
698 struct srp_login_req priv;
702 req = kzalloc(sizeof *req, GFP_KERNEL);
706 req->param.primary_path = &ch->path;
707 req->param.alternate_path = NULL;
708 req->param.service_id = target->service_id;
709 req->param.qp_num = ch->qp->qp_num;
710 req->param.qp_type = ch->qp->qp_type;
711 req->param.private_data = &req->priv;
712 req->param.private_data_len = sizeof req->priv;
713 req->param.flow_control = 1;
715 get_random_bytes(&req->param.starting_psn, 4);
716 req->param.starting_psn &= 0xffffff;
719 * Pick some arbitrary defaults here; we could make these
720 * module parameters if anyone cared about setting them.
722 req->param.responder_resources = 4;
723 req->param.remote_cm_response_timeout = 20;
724 req->param.local_cm_response_timeout = 20;
725 req->param.retry_count = target->tl_retry_count;
726 req->param.rnr_retry_count = 7;
727 req->param.max_cm_retries = 15;
729 req->priv.opcode = SRP_LOGIN_REQ;
731 req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
732 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
733 SRP_BUF_FORMAT_INDIRECT);
734 req->priv.req_flags = (multich ? SRP_MULTICHAN_MULTI :
735 SRP_MULTICHAN_SINGLE);
737 * In the published SRP specification (draft rev. 16a), the
738 * port identifier format is 8 bytes of ID extension followed
739 * by 8 bytes of GUID. Older drafts put the two halves in the
740 * opposite order, so that the GUID comes first.
742 * Targets conforming to these obsolete drafts can be
743 * recognized by the I/O Class they report.
745 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
746 memcpy(req->priv.initiator_port_id,
747 &target->sgid.global.interface_id, 8);
748 memcpy(req->priv.initiator_port_id + 8,
749 &target->initiator_ext, 8);
750 memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
751 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
753 memcpy(req->priv.initiator_port_id,
754 &target->initiator_ext, 8);
755 memcpy(req->priv.initiator_port_id + 8,
756 &target->sgid.global.interface_id, 8);
757 memcpy(req->priv.target_port_id, &target->id_ext, 8);
758 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
762 * Topspin/Cisco SRP targets will reject our login unless we
763 * zero out the first 8 bytes of our initiator port ID and set
764 * the second 8 bytes to the local node GUID.
766 if (srp_target_is_topspin(target)) {
767 shost_printk(KERN_DEBUG, target->scsi_host,
768 PFX "Topspin/Cisco initiator port ID workaround "
769 "activated for target GUID %016llx\n",
770 be64_to_cpu(target->ioc_guid));
771 memset(req->priv.initiator_port_id, 0, 8);
772 memcpy(req->priv.initiator_port_id + 8,
773 &target->srp_host->srp_dev->dev->node_guid, 8);
776 status = ib_send_cm_req(ch->cm_id, &req->param);
783 static bool srp_queue_remove_work(struct srp_target_port *target)
785 bool changed = false;
787 spin_lock_irq(&target->lock);
788 if (target->state != SRP_TARGET_REMOVED) {
789 target->state = SRP_TARGET_REMOVED;
792 spin_unlock_irq(&target->lock);
795 queue_work(srp_remove_wq, &target->remove_work);
800 static void srp_disconnect_target(struct srp_target_port *target)
802 struct srp_rdma_ch *ch;
805 /* XXX should send SRP_I_LOGOUT request */
807 for (i = 0; i < target->ch_count; i++) {
809 ch->connected = false;
810 if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
811 shost_printk(KERN_DEBUG, target->scsi_host,
812 PFX "Sending CM DREQ failed\n");
817 static void srp_free_req_data(struct srp_target_port *target,
818 struct srp_rdma_ch *ch)
820 struct srp_device *dev = target->srp_host->srp_dev;
821 struct ib_device *ibdev = dev->dev;
822 struct srp_request *req;
828 for (i = 0; i < target->req_ring_size; ++i) {
829 req = &ch->req_ring[i];
830 if (dev->use_fast_reg) {
833 kfree(req->fmr_list);
834 kfree(req->map_page);
836 if (req->indirect_dma_addr) {
837 ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
838 target->indirect_size,
841 kfree(req->indirect_desc);
848 static int srp_alloc_req_data(struct srp_rdma_ch *ch)
850 struct srp_target_port *target = ch->target;
851 struct srp_device *srp_dev = target->srp_host->srp_dev;
852 struct ib_device *ibdev = srp_dev->dev;
853 struct srp_request *req;
856 int i, ret = -ENOMEM;
858 ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring),
863 for (i = 0; i < target->req_ring_size; ++i) {
864 req = &ch->req_ring[i];
865 mr_list = kmalloc(target->mr_per_cmd * sizeof(void *),
869 if (srp_dev->use_fast_reg) {
870 req->fr_list = mr_list;
872 req->fmr_list = mr_list;
873 req->map_page = kmalloc(srp_dev->max_pages_per_mr *
874 sizeof(void *), GFP_KERNEL);
878 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
879 if (!req->indirect_desc)
882 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
883 target->indirect_size,
885 if (ib_dma_mapping_error(ibdev, dma_addr))
888 req->indirect_dma_addr = dma_addr;
897 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
898 * @shost: SCSI host whose attributes to remove from sysfs.
900 * Note: Any attributes defined in the host template and that did not exist
901 * before invocation of this function will be ignored.
903 static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
905 struct device_attribute **attr;
907 for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
908 device_remove_file(&shost->shost_dev, *attr);
911 static void srp_remove_target(struct srp_target_port *target)
913 struct srp_rdma_ch *ch;
916 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
918 srp_del_scsi_host_attr(target->scsi_host);
919 srp_rport_get(target->rport);
920 srp_remove_host(target->scsi_host);
921 scsi_remove_host(target->scsi_host);
922 srp_stop_rport_timers(target->rport);
923 srp_disconnect_target(target);
924 for (i = 0; i < target->ch_count; i++) {
926 srp_free_ch_ib(target, ch);
928 cancel_work_sync(&target->tl_err_work);
929 srp_rport_put(target->rport);
930 for (i = 0; i < target->ch_count; i++) {
932 srp_free_req_data(target, ch);
937 spin_lock(&target->srp_host->target_lock);
938 list_del(&target->list);
939 spin_unlock(&target->srp_host->target_lock);
941 scsi_host_put(target->scsi_host);
944 static void srp_remove_work(struct work_struct *work)
946 struct srp_target_port *target =
947 container_of(work, struct srp_target_port, remove_work);
949 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
951 srp_remove_target(target);
954 static void srp_rport_delete(struct srp_rport *rport)
956 struct srp_target_port *target = rport->lld_data;
958 srp_queue_remove_work(target);
962 * srp_connected_ch() - number of connected channels
963 * @target: SRP target port.
965 static int srp_connected_ch(struct srp_target_port *target)
969 for (i = 0; i < target->ch_count; i++)
970 c += target->ch[i].connected;
975 static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
977 struct srp_target_port *target = ch->target;
980 WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0);
982 ret = srp_lookup_path(ch);
987 init_completion(&ch->done);
988 ret = srp_send_req(ch, multich);
991 ret = wait_for_completion_interruptible(&ch->done);
996 * The CM event handling code will set status to
997 * SRP_PORT_REDIRECT if we get a port redirect REJ
998 * back, or SRP_DLID_REDIRECT if we get a lid/qp
1004 ch->connected = true;
1007 case SRP_PORT_REDIRECT:
1008 ret = srp_lookup_path(ch);
1013 case SRP_DLID_REDIRECT:
1016 case SRP_STALE_CONN:
1017 shost_printk(KERN_ERR, target->scsi_host, PFX
1018 "giving up on stale connection\n");
1028 return ret <= 0 ? ret : -ENODEV;
1031 static void srp_inv_rkey_err_done(struct ib_cq *cq, struct ib_wc *wc)
1033 srp_handle_qp_err(cq, wc, "INV RKEY");
1036 static int srp_inv_rkey(struct srp_request *req, struct srp_rdma_ch *ch,
1039 struct ib_send_wr *bad_wr;
1040 struct ib_send_wr wr = {
1041 .opcode = IB_WR_LOCAL_INV,
1045 .ex.invalidate_rkey = rkey,
1048 wr.wr_cqe = &req->reg_cqe;
1049 req->reg_cqe.done = srp_inv_rkey_err_done;
1050 return ib_post_send(ch->qp, &wr, &bad_wr);
1053 static void srp_unmap_data(struct scsi_cmnd *scmnd,
1054 struct srp_rdma_ch *ch,
1055 struct srp_request *req)
1057 struct srp_target_port *target = ch->target;
1058 struct srp_device *dev = target->srp_host->srp_dev;
1059 struct ib_device *ibdev = dev->dev;
1062 if (!scsi_sglist(scmnd) ||
1063 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1064 scmnd->sc_data_direction != DMA_FROM_DEVICE))
1067 if (dev->use_fast_reg) {
1068 struct srp_fr_desc **pfr;
1070 for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
1071 res = srp_inv_rkey(req, ch, (*pfr)->mr->rkey);
1073 shost_printk(KERN_ERR, target->scsi_host, PFX
1074 "Queueing INV WR for rkey %#x failed (%d)\n",
1075 (*pfr)->mr->rkey, res);
1076 queue_work(system_long_wq,
1077 &target->tl_err_work);
1081 srp_fr_pool_put(ch->fr_pool, req->fr_list,
1083 } else if (dev->use_fmr) {
1084 struct ib_pool_fmr **pfmr;
1086 for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)
1087 ib_fmr_pool_unmap(*pfmr);
1090 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
1091 scmnd->sc_data_direction);
1095 * srp_claim_req - Take ownership of the scmnd associated with a request.
1096 * @ch: SRP RDMA channel.
1097 * @req: SRP request.
1098 * @sdev: If not NULL, only take ownership for this SCSI device.
1099 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1100 * ownership of @req->scmnd if it equals @scmnd.
1103 * Either NULL or a pointer to the SCSI command the caller became owner of.
1105 static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
1106 struct srp_request *req,
1107 struct scsi_device *sdev,
1108 struct scsi_cmnd *scmnd)
1110 unsigned long flags;
1112 spin_lock_irqsave(&ch->lock, flags);
1114 (!sdev || req->scmnd->device == sdev) &&
1115 (!scmnd || req->scmnd == scmnd)) {
1121 spin_unlock_irqrestore(&ch->lock, flags);
1127 * srp_free_req() - Unmap data and adjust ch->req_lim.
1128 * @ch: SRP RDMA channel.
1129 * @req: Request to be freed.
1130 * @scmnd: SCSI command associated with @req.
1131 * @req_lim_delta: Amount to be added to @target->req_lim.
1133 static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1134 struct scsi_cmnd *scmnd, s32 req_lim_delta)
1136 unsigned long flags;
1138 srp_unmap_data(scmnd, ch, req);
1140 spin_lock_irqsave(&ch->lock, flags);
1141 ch->req_lim += req_lim_delta;
1142 spin_unlock_irqrestore(&ch->lock, flags);
1145 static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1146 struct scsi_device *sdev, int result)
1148 struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
1151 srp_free_req(ch, req, scmnd, 0);
1152 scmnd->result = result;
1153 scmnd->scsi_done(scmnd);
1157 static void srp_terminate_io(struct srp_rport *rport)
1159 struct srp_target_port *target = rport->lld_data;
1160 struct srp_rdma_ch *ch;
1161 struct Scsi_Host *shost = target->scsi_host;
1162 struct scsi_device *sdev;
1166 * Invoking srp_terminate_io() while srp_queuecommand() is running
1167 * is not safe. Hence the warning statement below.
1169 shost_for_each_device(sdev, shost)
1170 WARN_ON_ONCE(sdev->request_queue->request_fn_active);
1172 for (i = 0; i < target->ch_count; i++) {
1173 ch = &target->ch[i];
1175 for (j = 0; j < target->req_ring_size; ++j) {
1176 struct srp_request *req = &ch->req_ring[j];
1178 srp_finish_req(ch, req, NULL,
1179 DID_TRANSPORT_FAILFAST << 16);
1185 * It is up to the caller to ensure that srp_rport_reconnect() calls are
1186 * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1187 * srp_reset_device() or srp_reset_host() calls will occur while this function
1188 * is in progress. One way to realize that is not to call this function
1189 * directly but to call srp_reconnect_rport() instead since that last function
1190 * serializes calls of this function via rport->mutex and also blocks
1191 * srp_queuecommand() calls before invoking this function.
1193 static int srp_rport_reconnect(struct srp_rport *rport)
1195 struct srp_target_port *target = rport->lld_data;
1196 struct srp_rdma_ch *ch;
1198 bool multich = false;
1200 srp_disconnect_target(target);
1202 if (target->state == SRP_TARGET_SCANNING)
1206 * Now get a new local CM ID so that we avoid confusing the target in
1207 * case things are really fouled up. Doing so also ensures that all CM
1208 * callbacks will have finished before a new QP is allocated.
1210 for (i = 0; i < target->ch_count; i++) {
1211 ch = &target->ch[i];
1212 ret += srp_new_cm_id(ch);
1214 for (i = 0; i < target->ch_count; i++) {
1215 ch = &target->ch[i];
1216 for (j = 0; j < target->req_ring_size; ++j) {
1217 struct srp_request *req = &ch->req_ring[j];
1219 srp_finish_req(ch, req, NULL, DID_RESET << 16);
1222 for (i = 0; i < target->ch_count; i++) {
1223 ch = &target->ch[i];
1225 * Whether or not creating a new CM ID succeeded, create a new
1226 * QP. This guarantees that all completion callback function
1227 * invocations have finished before request resetting starts.
1229 ret += srp_create_ch_ib(ch);
1231 INIT_LIST_HEAD(&ch->free_tx);
1232 for (j = 0; j < target->queue_size; ++j)
1233 list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1236 target->qp_in_error = false;
1238 for (i = 0; i < target->ch_count; i++) {
1239 ch = &target->ch[i];
1242 ret = srp_connect_ch(ch, multich);
1247 shost_printk(KERN_INFO, target->scsi_host,
1248 PFX "reconnect succeeded\n");
1253 static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1254 unsigned int dma_len, u32 rkey)
1256 struct srp_direct_buf *desc = state->desc;
1258 WARN_ON_ONCE(!dma_len);
1260 desc->va = cpu_to_be64(dma_addr);
1261 desc->key = cpu_to_be32(rkey);
1262 desc->len = cpu_to_be32(dma_len);
1264 state->total_len += dma_len;
1269 static int srp_map_finish_fmr(struct srp_map_state *state,
1270 struct srp_rdma_ch *ch)
1272 struct srp_target_port *target = ch->target;
1273 struct srp_device *dev = target->srp_host->srp_dev;
1274 struct ib_pd *pd = target->pd;
1275 struct ib_pool_fmr *fmr;
1278 if (state->fmr.next >= state->fmr.end) {
1279 shost_printk(KERN_ERR, ch->target->scsi_host,
1280 PFX "Out of MRs (mr_per_cmd = %d)\n",
1281 ch->target->mr_per_cmd);
1285 WARN_ON_ONCE(!dev->use_fmr);
1287 if (state->npages == 0)
1290 if (state->npages == 1 && (pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)) {
1291 srp_map_desc(state, state->base_dma_addr, state->dma_len,
1292 pd->unsafe_global_rkey);
1296 fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
1297 state->npages, io_addr);
1299 return PTR_ERR(fmr);
1301 *state->fmr.next++ = fmr;
1304 srp_map_desc(state, state->base_dma_addr & ~dev->mr_page_mask,
1305 state->dma_len, fmr->fmr->rkey);
1314 static void srp_reg_mr_err_done(struct ib_cq *cq, struct ib_wc *wc)
1316 srp_handle_qp_err(cq, wc, "FAST REG");
1320 * Map up to sg_nents elements of state->sg where *sg_offset_p is the offset
1321 * where to start in the first element. If sg_offset_p != NULL then
1322 * *sg_offset_p is updated to the offset in state->sg[retval] of the first
1323 * byte that has not yet been mapped.
1325 static int srp_map_finish_fr(struct srp_map_state *state,
1326 struct srp_request *req,
1327 struct srp_rdma_ch *ch, int sg_nents,
1328 unsigned int *sg_offset_p)
1330 struct srp_target_port *target = ch->target;
1331 struct srp_device *dev = target->srp_host->srp_dev;
1332 struct ib_pd *pd = target->pd;
1333 struct ib_send_wr *bad_wr;
1334 struct ib_reg_wr wr;
1335 struct srp_fr_desc *desc;
1339 if (state->fr.next >= state->fr.end) {
1340 shost_printk(KERN_ERR, ch->target->scsi_host,
1341 PFX "Out of MRs (mr_per_cmd = %d)\n",
1342 ch->target->mr_per_cmd);
1346 WARN_ON_ONCE(!dev->use_fast_reg);
1348 if (sg_nents == 1 && (pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)) {
1349 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
1351 srp_map_desc(state, sg_dma_address(state->sg) + sg_offset,
1352 sg_dma_len(state->sg) - sg_offset,
1353 pd->unsafe_global_rkey);
1359 desc = srp_fr_pool_get(ch->fr_pool);
1363 rkey = ib_inc_rkey(desc->mr->rkey);
1364 ib_update_fast_reg_key(desc->mr, rkey);
1366 n = ib_map_mr_sg(desc->mr, state->sg, sg_nents, sg_offset_p,
1368 if (unlikely(n < 0)) {
1369 srp_fr_pool_put(ch->fr_pool, &desc, 1);
1370 pr_debug("%s: ib_map_mr_sg(%d, %d) returned %d.\n",
1371 dev_name(&req->scmnd->device->sdev_gendev), sg_nents,
1372 sg_offset_p ? *sg_offset_p : -1, n);
1376 WARN_ON_ONCE(desc->mr->length == 0);
1378 req->reg_cqe.done = srp_reg_mr_err_done;
1381 wr.wr.opcode = IB_WR_REG_MR;
1382 wr.wr.wr_cqe = &req->reg_cqe;
1384 wr.wr.send_flags = 0;
1386 wr.key = desc->mr->rkey;
1387 wr.access = (IB_ACCESS_LOCAL_WRITE |
1388 IB_ACCESS_REMOTE_READ |
1389 IB_ACCESS_REMOTE_WRITE);
1391 *state->fr.next++ = desc;
1394 srp_map_desc(state, desc->mr->iova,
1395 desc->mr->length, desc->mr->rkey);
1397 err = ib_post_send(ch->qp, &wr.wr, &bad_wr);
1398 if (unlikely(err)) {
1399 WARN_ON_ONCE(err == -ENOMEM);
1406 static int srp_map_sg_entry(struct srp_map_state *state,
1407 struct srp_rdma_ch *ch,
1408 struct scatterlist *sg)
1410 struct srp_target_port *target = ch->target;
1411 struct srp_device *dev = target->srp_host->srp_dev;
1412 struct ib_device *ibdev = dev->dev;
1413 dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
1414 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
1415 unsigned int len = 0;
1418 WARN_ON_ONCE(!dma_len);
1421 unsigned offset = dma_addr & ~dev->mr_page_mask;
1423 if (state->npages == dev->max_pages_per_mr ||
1424 (state->npages > 0 && offset != 0)) {
1425 ret = srp_map_finish_fmr(state, ch);
1430 len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);
1433 state->base_dma_addr = dma_addr;
1434 state->pages[state->npages++] = dma_addr & dev->mr_page_mask;
1435 state->dma_len += len;
1441 * If the end of the MR is not on a page boundary then we need to
1442 * close it out and start a new one -- we can only merge at page
1446 if ((dma_addr & ~dev->mr_page_mask) != 0)
1447 ret = srp_map_finish_fmr(state, ch);
1451 static int srp_map_sg_fmr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1452 struct srp_request *req, struct scatterlist *scat,
1455 struct scatterlist *sg;
1458 state->pages = req->map_page;
1459 state->fmr.next = req->fmr_list;
1460 state->fmr.end = req->fmr_list + ch->target->mr_per_cmd;
1462 for_each_sg(scat, sg, count, i) {
1463 ret = srp_map_sg_entry(state, ch, sg);
1468 ret = srp_map_finish_fmr(state, ch);
1475 static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1476 struct srp_request *req, struct scatterlist *scat,
1479 unsigned int sg_offset = 0;
1481 state->fr.next = req->fr_list;
1482 state->fr.end = req->fr_list + ch->target->mr_per_cmd;
1491 n = srp_map_finish_fr(state, req, ch, count, &sg_offset);
1492 if (unlikely(n < 0))
1496 for (i = 0; i < n; i++)
1497 state->sg = sg_next(state->sg);
1503 static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch,
1504 struct srp_request *req, struct scatterlist *scat,
1507 struct srp_target_port *target = ch->target;
1508 struct srp_device *dev = target->srp_host->srp_dev;
1509 struct scatterlist *sg;
1512 for_each_sg(scat, sg, count, i) {
1513 srp_map_desc(state, ib_sg_dma_address(dev->dev, sg),
1514 ib_sg_dma_len(dev->dev, sg),
1515 target->pd->unsafe_global_rkey);
1522 * Register the indirect data buffer descriptor with the HCA.
1524 * Note: since the indirect data buffer descriptor has been allocated with
1525 * kmalloc() it is guaranteed that this buffer is a physically contiguous
1528 static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
1529 void **next_mr, void **end_mr, u32 idb_len,
1532 struct srp_target_port *target = ch->target;
1533 struct srp_device *dev = target->srp_host->srp_dev;
1534 struct srp_map_state state;
1535 struct srp_direct_buf idb_desc;
1537 struct scatterlist idb_sg[1];
1540 memset(&state, 0, sizeof(state));
1541 memset(&idb_desc, 0, sizeof(idb_desc));
1542 state.gen.next = next_mr;
1543 state.gen.end = end_mr;
1544 state.desc = &idb_desc;
1545 state.base_dma_addr = req->indirect_dma_addr;
1546 state.dma_len = idb_len;
1548 if (dev->use_fast_reg) {
1550 sg_init_one(idb_sg, req->indirect_desc, idb_len);
1551 idb_sg->dma_address = req->indirect_dma_addr; /* hack! */
1552 #ifdef CONFIG_NEED_SG_DMA_LENGTH
1553 idb_sg->dma_length = idb_sg->length; /* hack^2 */
1555 ret = srp_map_finish_fr(&state, req, ch, 1, NULL);
1558 WARN_ON_ONCE(ret < 1);
1559 } else if (dev->use_fmr) {
1560 state.pages = idb_pages;
1561 state.pages[0] = (req->indirect_dma_addr &
1564 ret = srp_map_finish_fmr(&state, ch);
1571 *idb_rkey = idb_desc.key;
1576 static void srp_check_mapping(struct srp_map_state *state,
1577 struct srp_rdma_ch *ch, struct srp_request *req,
1578 struct scatterlist *scat, int count)
1580 struct srp_device *dev = ch->target->srp_host->srp_dev;
1581 struct srp_fr_desc **pfr;
1582 u64 desc_len = 0, mr_len = 0;
1585 for (i = 0; i < state->ndesc; i++)
1586 desc_len += be32_to_cpu(req->indirect_desc[i].len);
1587 if (dev->use_fast_reg)
1588 for (i = 0, pfr = req->fr_list; i < state->nmdesc; i++, pfr++)
1589 mr_len += (*pfr)->mr->length;
1590 else if (dev->use_fmr)
1591 for (i = 0; i < state->nmdesc; i++)
1592 mr_len += be32_to_cpu(req->indirect_desc[i].len);
1593 if (desc_len != scsi_bufflen(req->scmnd) ||
1594 mr_len > scsi_bufflen(req->scmnd))
1595 pr_err("Inconsistent: scsi len %d <> desc len %lld <> mr len %lld; ndesc %d; nmdesc = %d\n",
1596 scsi_bufflen(req->scmnd), desc_len, mr_len,
1597 state->ndesc, state->nmdesc);
1601 * srp_map_data() - map SCSI data buffer onto an SRP request
1602 * @scmnd: SCSI command to map
1603 * @ch: SRP RDMA channel
1606 * Returns the length in bytes of the SRP_CMD IU or a negative value if
1609 static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
1610 struct srp_request *req)
1612 struct srp_target_port *target = ch->target;
1613 struct ib_pd *pd = target->pd;
1614 struct scatterlist *scat;
1615 struct srp_cmd *cmd = req->cmd->buf;
1616 int len, nents, count, ret;
1617 struct srp_device *dev;
1618 struct ib_device *ibdev;
1619 struct srp_map_state state;
1620 struct srp_indirect_buf *indirect_hdr;
1621 u32 idb_len, table_len;
1625 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
1626 return sizeof (struct srp_cmd);
1628 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1629 scmnd->sc_data_direction != DMA_TO_DEVICE) {
1630 shost_printk(KERN_WARNING, target->scsi_host,
1631 PFX "Unhandled data direction %d\n",
1632 scmnd->sc_data_direction);
1636 nents = scsi_sg_count(scmnd);
1637 scat = scsi_sglist(scmnd);
1639 dev = target->srp_host->srp_dev;
1642 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
1643 if (unlikely(count == 0))
1646 fmt = SRP_DATA_DESC_DIRECT;
1647 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
1649 if (count == 1 && (pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)) {
1651 * The midlayer only generated a single gather/scatter
1652 * entry, or DMA mapping coalesced everything to a
1653 * single entry. So a direct descriptor along with
1654 * the DMA MR suffices.
1656 struct srp_direct_buf *buf = (void *) cmd->add_data;
1658 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
1659 buf->key = cpu_to_be32(pd->unsafe_global_rkey);
1660 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
1667 * We have more than one scatter/gather entry, so build our indirect
1668 * descriptor table, trying to merge as many entries as we can.
1670 indirect_hdr = (void *) cmd->add_data;
1672 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1673 target->indirect_size, DMA_TO_DEVICE);
1675 memset(&state, 0, sizeof(state));
1676 state.desc = req->indirect_desc;
1677 if (dev->use_fast_reg)
1678 ret = srp_map_sg_fr(&state, ch, req, scat, count);
1679 else if (dev->use_fmr)
1680 ret = srp_map_sg_fmr(&state, ch, req, scat, count);
1682 ret = srp_map_sg_dma(&state, ch, req, scat, count);
1683 req->nmdesc = state.nmdesc;
1688 DEFINE_DYNAMIC_DEBUG_METADATA(ddm,
1689 "Memory mapping consistency check");
1690 if (DYNAMIC_DEBUG_BRANCH(ddm))
1691 srp_check_mapping(&state, ch, req, scat, count);
1694 /* We've mapped the request, now pull as much of the indirect
1695 * descriptor table as we can into the command buffer. If this
1696 * target is not using an external indirect table, we are
1697 * guaranteed to fit into the command, as the SCSI layer won't
1698 * give us more S/G entries than we allow.
1700 if (state.ndesc == 1) {
1702 * Memory registration collapsed the sg-list into one entry,
1703 * so use a direct descriptor.
1705 struct srp_direct_buf *buf = (void *) cmd->add_data;
1707 *buf = req->indirect_desc[0];
1711 if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1712 !target->allow_ext_sg)) {
1713 shost_printk(KERN_ERR, target->scsi_host,
1714 "Could not fit S/G list into SRP_CMD\n");
1719 count = min(state.ndesc, target->cmd_sg_cnt);
1720 table_len = state.ndesc * sizeof (struct srp_direct_buf);
1721 idb_len = sizeof(struct srp_indirect_buf) + table_len;
1723 fmt = SRP_DATA_DESC_INDIRECT;
1724 len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
1725 len += count * sizeof (struct srp_direct_buf);
1727 memcpy(indirect_hdr->desc_list, req->indirect_desc,
1728 count * sizeof (struct srp_direct_buf));
1730 if (!(pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)) {
1731 ret = srp_map_idb(ch, req, state.gen.next, state.gen.end,
1732 idb_len, &idb_rkey);
1737 idb_rkey = cpu_to_be32(pd->unsafe_global_rkey);
1740 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
1741 indirect_hdr->table_desc.key = idb_rkey;
1742 indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1743 indirect_hdr->len = cpu_to_be32(state.total_len);
1745 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1746 cmd->data_out_desc_cnt = count;
1748 cmd->data_in_desc_cnt = count;
1750 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1754 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1755 cmd->buf_fmt = fmt << 4;
1762 srp_unmap_data(scmnd, ch, req);
1763 if (ret == -ENOMEM && req->nmdesc >= target->mr_pool_size)
1769 * Return an IU and possible credit to the free pool
1771 static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
1772 enum srp_iu_type iu_type)
1774 unsigned long flags;
1776 spin_lock_irqsave(&ch->lock, flags);
1777 list_add(&iu->list, &ch->free_tx);
1778 if (iu_type != SRP_IU_RSP)
1780 spin_unlock_irqrestore(&ch->lock, flags);
1784 * Must be called with ch->lock held to protect req_lim and free_tx.
1785 * If IU is not sent, it must be returned using srp_put_tx_iu().
1788 * An upper limit for the number of allocated information units for each
1790 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1791 * more than Scsi_Host.can_queue requests.
1792 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1793 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1794 * one unanswered SRP request to an initiator.
1796 static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
1797 enum srp_iu_type iu_type)
1799 struct srp_target_port *target = ch->target;
1800 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1803 lockdep_assert_held(&ch->lock);
1805 ib_process_cq_direct(ch->send_cq, -1);
1807 if (list_empty(&ch->free_tx))
1810 /* Initiator responses to target requests do not consume credits */
1811 if (iu_type != SRP_IU_RSP) {
1812 if (ch->req_lim <= rsv) {
1813 ++target->zero_req_lim;
1820 iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
1821 list_del(&iu->list);
1825 static void srp_send_done(struct ib_cq *cq, struct ib_wc *wc)
1827 struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
1828 struct srp_rdma_ch *ch = cq->cq_context;
1830 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1831 srp_handle_qp_err(cq, wc, "SEND");
1835 lockdep_assert_held(&ch->lock);
1837 list_add(&iu->list, &ch->free_tx);
1840 static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
1842 struct srp_target_port *target = ch->target;
1844 struct ib_send_wr wr, *bad_wr;
1846 list.addr = iu->dma;
1848 list.lkey = target->lkey;
1850 iu->cqe.done = srp_send_done;
1853 wr.wr_cqe = &iu->cqe;
1856 wr.opcode = IB_WR_SEND;
1857 wr.send_flags = IB_SEND_SIGNALED;
1859 return ib_post_send(ch->qp, &wr, &bad_wr);
1862 static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
1864 struct srp_target_port *target = ch->target;
1865 struct ib_recv_wr wr, *bad_wr;
1868 list.addr = iu->dma;
1869 list.length = iu->size;
1870 list.lkey = target->lkey;
1872 iu->cqe.done = srp_recv_done;
1875 wr.wr_cqe = &iu->cqe;
1879 return ib_post_recv(ch->qp, &wr, &bad_wr);
1882 static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
1884 struct srp_target_port *target = ch->target;
1885 struct srp_request *req;
1886 struct scsi_cmnd *scmnd;
1887 unsigned long flags;
1889 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
1890 spin_lock_irqsave(&ch->lock, flags);
1891 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1892 if (rsp->tag == ch->tsk_mgmt_tag) {
1893 ch->tsk_mgmt_status = -1;
1894 if (be32_to_cpu(rsp->resp_data_len) >= 4)
1895 ch->tsk_mgmt_status = rsp->data[3];
1896 complete(&ch->tsk_mgmt_done);
1898 shost_printk(KERN_ERR, target->scsi_host,
1899 "Received tsk mgmt response too late for tag %#llx\n",
1902 spin_unlock_irqrestore(&ch->lock, flags);
1904 scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
1905 if (scmnd && scmnd->host_scribble) {
1906 req = (void *)scmnd->host_scribble;
1907 scmnd = srp_claim_req(ch, req, NULL, scmnd);
1912 shost_printk(KERN_ERR, target->scsi_host,
1913 "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1914 rsp->tag, ch - target->ch, ch->qp->qp_num);
1916 spin_lock_irqsave(&ch->lock, flags);
1917 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1918 spin_unlock_irqrestore(&ch->lock, flags);
1922 scmnd->result = rsp->status;
1924 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1925 memcpy(scmnd->sense_buffer, rsp->data +
1926 be32_to_cpu(rsp->resp_data_len),
1927 min_t(int, be32_to_cpu(rsp->sense_data_len),
1928 SCSI_SENSE_BUFFERSIZE));
1931 if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
1932 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
1933 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
1934 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
1935 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1936 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1937 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1938 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
1940 srp_free_req(ch, req, scmnd,
1941 be32_to_cpu(rsp->req_lim_delta));
1943 scmnd->host_scribble = NULL;
1944 scmnd->scsi_done(scmnd);
1948 static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
1951 struct srp_target_port *target = ch->target;
1952 struct ib_device *dev = target->srp_host->srp_dev->dev;
1953 unsigned long flags;
1957 spin_lock_irqsave(&ch->lock, flags);
1958 ch->req_lim += req_delta;
1959 iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
1960 spin_unlock_irqrestore(&ch->lock, flags);
1963 shost_printk(KERN_ERR, target->scsi_host, PFX
1964 "no IU available to send response\n");
1968 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1969 memcpy(iu->buf, rsp, len);
1970 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1972 err = srp_post_send(ch, iu, len);
1974 shost_printk(KERN_ERR, target->scsi_host, PFX
1975 "unable to post response: %d\n", err);
1976 srp_put_tx_iu(ch, iu, SRP_IU_RSP);
1982 static void srp_process_cred_req(struct srp_rdma_ch *ch,
1983 struct srp_cred_req *req)
1985 struct srp_cred_rsp rsp = {
1986 .opcode = SRP_CRED_RSP,
1989 s32 delta = be32_to_cpu(req->req_lim_delta);
1991 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
1992 shost_printk(KERN_ERR, ch->target->scsi_host, PFX
1993 "problems processing SRP_CRED_REQ\n");
1996 static void srp_process_aer_req(struct srp_rdma_ch *ch,
1997 struct srp_aer_req *req)
1999 struct srp_target_port *target = ch->target;
2000 struct srp_aer_rsp rsp = {
2001 .opcode = SRP_AER_RSP,
2004 s32 delta = be32_to_cpu(req->req_lim_delta);
2006 shost_printk(KERN_ERR, target->scsi_host, PFX
2007 "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun));
2009 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
2010 shost_printk(KERN_ERR, target->scsi_host, PFX
2011 "problems processing SRP_AER_REQ\n");
2014 static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc)
2016 struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
2017 struct srp_rdma_ch *ch = cq->cq_context;
2018 struct srp_target_port *target = ch->target;
2019 struct ib_device *dev = target->srp_host->srp_dev->dev;
2023 if (unlikely(wc->status != IB_WC_SUCCESS)) {
2024 srp_handle_qp_err(cq, wc, "RECV");
2028 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
2031 opcode = *(u8 *) iu->buf;
2034 shost_printk(KERN_ERR, target->scsi_host,
2035 PFX "recv completion, opcode 0x%02x\n", opcode);
2036 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
2037 iu->buf, wc->byte_len, true);
2042 srp_process_rsp(ch, iu->buf);
2046 srp_process_cred_req(ch, iu->buf);
2050 srp_process_aer_req(ch, iu->buf);
2054 /* XXX Handle target logout */
2055 shost_printk(KERN_WARNING, target->scsi_host,
2056 PFX "Got target logout request\n");
2060 shost_printk(KERN_WARNING, target->scsi_host,
2061 PFX "Unhandled SRP opcode 0x%02x\n", opcode);
2065 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
2068 res = srp_post_recv(ch, iu);
2070 shost_printk(KERN_ERR, target->scsi_host,
2071 PFX "Recv failed with error code %d\n", res);
2075 * srp_tl_err_work() - handle a transport layer error
2076 * @work: Work structure embedded in an SRP target port.
2078 * Note: This function may get invoked before the rport has been created,
2079 * hence the target->rport test.
2081 static void srp_tl_err_work(struct work_struct *work)
2083 struct srp_target_port *target;
2085 target = container_of(work, struct srp_target_port, tl_err_work);
2087 srp_start_tl_fail_timers(target->rport);
2090 static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
2093 struct srp_rdma_ch *ch = cq->cq_context;
2094 struct srp_target_port *target = ch->target;
2096 if (ch->connected && !target->qp_in_error) {
2097 shost_printk(KERN_ERR, target->scsi_host,
2098 PFX "failed %s status %s (%d) for CQE %p\n",
2099 opname, ib_wc_status_msg(wc->status), wc->status,
2101 queue_work(system_long_wq, &target->tl_err_work);
2103 target->qp_in_error = true;
2106 static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
2108 struct srp_target_port *target = host_to_target(shost);
2109 struct srp_rport *rport = target->rport;
2110 struct srp_rdma_ch *ch;
2111 struct srp_request *req;
2113 struct srp_cmd *cmd;
2114 struct ib_device *dev;
2115 unsigned long flags;
2119 const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
2122 * The SCSI EH thread is the only context from which srp_queuecommand()
2123 * can get invoked for blocked devices (SDEV_BLOCK /
2124 * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
2125 * locking the rport mutex if invoked from inside the SCSI EH.
2128 mutex_lock(&rport->mutex);
2130 scmnd->result = srp_chkready(target->rport);
2131 if (unlikely(scmnd->result))
2134 WARN_ON_ONCE(scmnd->request->tag < 0);
2135 tag = blk_mq_unique_tag(scmnd->request);
2136 ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
2137 idx = blk_mq_unique_tag_to_tag(tag);
2138 WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n",
2139 dev_name(&shost->shost_gendev), tag, idx,
2140 target->req_ring_size);
2142 spin_lock_irqsave(&ch->lock, flags);
2143 iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
2144 spin_unlock_irqrestore(&ch->lock, flags);
2149 req = &ch->req_ring[idx];
2150 dev = target->srp_host->srp_dev->dev;
2151 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
2154 scmnd->host_scribble = (void *) req;
2157 memset(cmd, 0, sizeof *cmd);
2159 cmd->opcode = SRP_CMD;
2160 int_to_scsilun(scmnd->device->lun, &cmd->lun);
2162 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
2167 len = srp_map_data(scmnd, ch, req);
2169 shost_printk(KERN_ERR, target->scsi_host,
2170 PFX "Failed to map data (%d)\n", len);
2172 * If we ran out of memory descriptors (-ENOMEM) because an
2173 * application is queuing many requests with more than
2174 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
2175 * to reduce queue depth temporarily.
2177 scmnd->result = len == -ENOMEM ?
2178 DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
2182 ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
2185 if (srp_post_send(ch, iu, len)) {
2186 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
2194 mutex_unlock(&rport->mutex);
2199 srp_unmap_data(scmnd, ch, req);
2202 srp_put_tx_iu(ch, iu, SRP_IU_CMD);
2205 * Avoid that the loops that iterate over the request ring can
2206 * encounter a dangling SCSI command pointer.
2211 if (scmnd->result) {
2212 scmnd->scsi_done(scmnd);
2215 ret = SCSI_MLQUEUE_HOST_BUSY;
2222 * Note: the resources allocated in this function are freed in
2225 static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
2227 struct srp_target_port *target = ch->target;
2230 ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
2234 ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
2239 for (i = 0; i < target->queue_size; ++i) {
2240 ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2242 GFP_KERNEL, DMA_FROM_DEVICE);
2243 if (!ch->rx_ring[i])
2247 for (i = 0; i < target->queue_size; ++i) {
2248 ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
2250 GFP_KERNEL, DMA_TO_DEVICE);
2251 if (!ch->tx_ring[i])
2254 list_add(&ch->tx_ring[i]->list, &ch->free_tx);
2260 for (i = 0; i < target->queue_size; ++i) {
2261 srp_free_iu(target->srp_host, ch->rx_ring[i]);
2262 srp_free_iu(target->srp_host, ch->tx_ring[i]);
2275 static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2277 uint64_t T_tr_ns, max_compl_time_ms;
2278 uint32_t rq_tmo_jiffies;
2281 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2282 * table 91), both the QP timeout and the retry count have to be set
2283 * for RC QP's during the RTR to RTS transition.
2285 WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2286 (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2289 * Set target->rq_tmo_jiffies to one second more than the largest time
2290 * it can take before an error completion is generated. See also
2291 * C9-140..142 in the IBTA spec for more information about how to
2292 * convert the QP Local ACK Timeout value to nanoseconds.
2294 T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2295 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2296 do_div(max_compl_time_ms, NSEC_PER_MSEC);
2297 rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2299 return rq_tmo_jiffies;
2302 static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
2303 const struct srp_login_rsp *lrsp,
2304 struct srp_rdma_ch *ch)
2306 struct srp_target_port *target = ch->target;
2307 struct ib_qp_attr *qp_attr = NULL;
2312 if (lrsp->opcode == SRP_LOGIN_RSP) {
2313 ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2314 ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
2317 * Reserve credits for task management so we don't
2318 * bounce requests back to the SCSI mid-layer.
2320 target->scsi_host->can_queue
2321 = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
2322 target->scsi_host->can_queue);
2323 target->scsi_host->cmd_per_lun
2324 = min_t(int, target->scsi_host->can_queue,
2325 target->scsi_host->cmd_per_lun);
2327 shost_printk(KERN_WARNING, target->scsi_host,
2328 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2334 ret = srp_alloc_iu_bufs(ch);
2340 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
2344 qp_attr->qp_state = IB_QPS_RTR;
2345 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2349 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2353 for (i = 0; i < target->queue_size; i++) {
2354 struct srp_iu *iu = ch->rx_ring[i];
2356 ret = srp_post_recv(ch, iu);
2361 qp_attr->qp_state = IB_QPS_RTS;
2362 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2366 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2368 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2372 ret = ib_send_cm_rtu(cm_id, NULL, 0);
2381 static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
2382 struct ib_cm_event *event,
2383 struct srp_rdma_ch *ch)
2385 struct srp_target_port *target = ch->target;
2386 struct Scsi_Host *shost = target->scsi_host;
2387 struct ib_class_port_info *cpi;
2390 switch (event->param.rej_rcvd.reason) {
2391 case IB_CM_REJ_PORT_CM_REDIRECT:
2392 cpi = event->param.rej_rcvd.ari;
2393 ch->path.dlid = cpi->redirect_lid;
2394 ch->path.pkey = cpi->redirect_pkey;
2395 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
2396 memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16);
2398 ch->status = ch->path.dlid ?
2399 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2402 case IB_CM_REJ_PORT_REDIRECT:
2403 if (srp_target_is_topspin(target)) {
2405 * Topspin/Cisco SRP gateways incorrectly send
2406 * reject reason code 25 when they mean 24
2409 memcpy(ch->path.dgid.raw,
2410 event->param.rej_rcvd.ari, 16);
2412 shost_printk(KERN_DEBUG, shost,
2413 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
2414 be64_to_cpu(ch->path.dgid.global.subnet_prefix),
2415 be64_to_cpu(ch->path.dgid.global.interface_id));
2417 ch->status = SRP_PORT_REDIRECT;
2419 shost_printk(KERN_WARNING, shost,
2420 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
2421 ch->status = -ECONNRESET;
2425 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
2426 shost_printk(KERN_WARNING, shost,
2427 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
2428 ch->status = -ECONNRESET;
2431 case IB_CM_REJ_CONSUMER_DEFINED:
2432 opcode = *(u8 *) event->private_data;
2433 if (opcode == SRP_LOGIN_REJ) {
2434 struct srp_login_rej *rej = event->private_data;
2435 u32 reason = be32_to_cpu(rej->reason);
2437 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
2438 shost_printk(KERN_WARNING, shost,
2439 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
2441 shost_printk(KERN_WARNING, shost, PFX
2442 "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
2444 target->orig_dgid.raw, reason);
2446 shost_printk(KERN_WARNING, shost,
2447 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2448 " opcode 0x%02x\n", opcode);
2449 ch->status = -ECONNRESET;
2452 case IB_CM_REJ_STALE_CONN:
2453 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
2454 ch->status = SRP_STALE_CONN;
2458 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
2459 event->param.rej_rcvd.reason);
2460 ch->status = -ECONNRESET;
2464 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2466 struct srp_rdma_ch *ch = cm_id->context;
2467 struct srp_target_port *target = ch->target;
2470 switch (event->event) {
2471 case IB_CM_REQ_ERROR:
2472 shost_printk(KERN_DEBUG, target->scsi_host,
2473 PFX "Sending CM REQ failed\n");
2475 ch->status = -ECONNRESET;
2478 case IB_CM_REP_RECEIVED:
2480 srp_cm_rep_handler(cm_id, event->private_data, ch);
2483 case IB_CM_REJ_RECEIVED:
2484 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
2487 srp_cm_rej_handler(cm_id, event, ch);
2490 case IB_CM_DREQ_RECEIVED:
2491 shost_printk(KERN_WARNING, target->scsi_host,
2492 PFX "DREQ received - connection closed\n");
2493 ch->connected = false;
2494 if (ib_send_cm_drep(cm_id, NULL, 0))
2495 shost_printk(KERN_ERR, target->scsi_host,
2496 PFX "Sending CM DREP failed\n");
2497 queue_work(system_long_wq, &target->tl_err_work);
2500 case IB_CM_TIMEWAIT_EXIT:
2501 shost_printk(KERN_ERR, target->scsi_host,
2502 PFX "connection closed\n");
2508 case IB_CM_MRA_RECEIVED:
2509 case IB_CM_DREQ_ERROR:
2510 case IB_CM_DREP_RECEIVED:
2514 shost_printk(KERN_WARNING, target->scsi_host,
2515 PFX "Unhandled CM event %d\n", event->event);
2520 complete(&ch->done);
2526 * srp_change_queue_depth - setting device queue depth
2527 * @sdev: scsi device struct
2528 * @qdepth: requested queue depth
2530 * Returns queue depth.
2533 srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
2535 if (!sdev->tagged_supported)
2537 return scsi_change_queue_depth(sdev, qdepth);
2540 static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
2541 u8 func, u8 *status)
2543 struct srp_target_port *target = ch->target;
2544 struct srp_rport *rport = target->rport;
2545 struct ib_device *dev = target->srp_host->srp_dev->dev;
2547 struct srp_tsk_mgmt *tsk_mgmt;
2550 if (!ch->connected || target->qp_in_error)
2554 * Lock the rport mutex to avoid that srp_create_ch_ib() is
2555 * invoked while a task management function is being sent.
2557 mutex_lock(&rport->mutex);
2558 spin_lock_irq(&ch->lock);
2559 iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2560 spin_unlock_irq(&ch->lock);
2563 mutex_unlock(&rport->mutex);
2568 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2571 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2573 tsk_mgmt->opcode = SRP_TSK_MGMT;
2574 int_to_scsilun(lun, &tsk_mgmt->lun);
2575 tsk_mgmt->tsk_mgmt_func = func;
2576 tsk_mgmt->task_tag = req_tag;
2578 spin_lock_irq(&ch->lock);
2579 ch->tsk_mgmt_tag = (ch->tsk_mgmt_tag + 1) | SRP_TAG_TSK_MGMT;
2580 tsk_mgmt->tag = ch->tsk_mgmt_tag;
2581 spin_unlock_irq(&ch->lock);
2583 init_completion(&ch->tsk_mgmt_done);
2585 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2587 if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2588 srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
2589 mutex_unlock(&rport->mutex);
2593 res = wait_for_completion_timeout(&ch->tsk_mgmt_done,
2594 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS));
2595 if (res > 0 && status)
2596 *status = ch->tsk_mgmt_status;
2597 mutex_unlock(&rport->mutex);
2599 WARN_ON_ONCE(res < 0);
2601 return res > 0 ? 0 : -1;
2604 static int srp_abort(struct scsi_cmnd *scmnd)
2606 struct srp_target_port *target = host_to_target(scmnd->device->host);
2607 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
2610 struct srp_rdma_ch *ch;
2613 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
2617 tag = blk_mq_unique_tag(scmnd->request);
2618 ch_idx = blk_mq_unique_tag_to_hwq(tag);
2619 if (WARN_ON_ONCE(ch_idx >= target->ch_count))
2621 ch = &target->ch[ch_idx];
2622 if (!srp_claim_req(ch, req, NULL, scmnd))
2624 shost_printk(KERN_ERR, target->scsi_host,
2625 "Sending SRP abort for tag %#x\n", tag);
2626 if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
2627 SRP_TSK_ABORT_TASK, NULL) == 0)
2629 else if (target->rport->state == SRP_RPORT_LOST)
2633 srp_free_req(ch, req, scmnd, 0);
2634 scmnd->result = DID_ABORT << 16;
2635 scmnd->scsi_done(scmnd);
2640 static int srp_reset_device(struct scsi_cmnd *scmnd)
2642 struct srp_target_port *target = host_to_target(scmnd->device->host);
2643 struct srp_rdma_ch *ch;
2647 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
2649 ch = &target->ch[0];
2650 if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
2651 SRP_TSK_LUN_RESET, &status))
2656 for (i = 0; i < target->ch_count; i++) {
2657 ch = &target->ch[i];
2658 for (i = 0; i < target->req_ring_size; ++i) {
2659 struct srp_request *req = &ch->req_ring[i];
2661 srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
2668 static int srp_reset_host(struct scsi_cmnd *scmnd)
2670 struct srp_target_port *target = host_to_target(scmnd->device->host);
2672 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
2674 return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
2677 static int srp_slave_alloc(struct scsi_device *sdev)
2679 struct Scsi_Host *shost = sdev->host;
2680 struct srp_target_port *target = host_to_target(shost);
2681 struct srp_device *srp_dev = target->srp_host->srp_dev;
2684 blk_queue_virt_boundary(sdev->request_queue,
2685 ~srp_dev->mr_page_mask);
2690 static int srp_slave_configure(struct scsi_device *sdev)
2692 struct Scsi_Host *shost = sdev->host;
2693 struct srp_target_port *target = host_to_target(shost);
2694 struct request_queue *q = sdev->request_queue;
2695 unsigned long timeout;
2697 if (sdev->type == TYPE_DISK) {
2698 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2699 blk_queue_rq_timeout(q, timeout);
2705 static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
2708 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2710 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->id_ext));
2713 static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
2716 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2718 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid));
2721 static ssize_t show_service_id(struct device *dev,
2722 struct device_attribute *attr, char *buf)
2724 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2726 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->service_id));
2729 static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
2732 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2734 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey));
2737 static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
2740 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2742 return sprintf(buf, "%pI6\n", target->sgid.raw);
2745 static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
2748 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2749 struct srp_rdma_ch *ch = &target->ch[0];
2751 return sprintf(buf, "%pI6\n", ch->path.dgid.raw);
2754 static ssize_t show_orig_dgid(struct device *dev,
2755 struct device_attribute *attr, char *buf)
2757 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2759 return sprintf(buf, "%pI6\n", target->orig_dgid.raw);
2762 static ssize_t show_req_lim(struct device *dev,
2763 struct device_attribute *attr, char *buf)
2765 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2766 struct srp_rdma_ch *ch;
2767 int i, req_lim = INT_MAX;
2769 for (i = 0; i < target->ch_count; i++) {
2770 ch = &target->ch[i];
2771 req_lim = min(req_lim, ch->req_lim);
2773 return sprintf(buf, "%d\n", req_lim);
2776 static ssize_t show_zero_req_lim(struct device *dev,
2777 struct device_attribute *attr, char *buf)
2779 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2781 return sprintf(buf, "%d\n", target->zero_req_lim);
2784 static ssize_t show_local_ib_port(struct device *dev,
2785 struct device_attribute *attr, char *buf)
2787 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2789 return sprintf(buf, "%d\n", target->srp_host->port);
2792 static ssize_t show_local_ib_device(struct device *dev,
2793 struct device_attribute *attr, char *buf)
2795 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2797 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
2800 static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
2803 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2805 return sprintf(buf, "%d\n", target->ch_count);
2808 static ssize_t show_comp_vector(struct device *dev,
2809 struct device_attribute *attr, char *buf)
2811 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2813 return sprintf(buf, "%d\n", target->comp_vector);
2816 static ssize_t show_tl_retry_count(struct device *dev,
2817 struct device_attribute *attr, char *buf)
2819 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2821 return sprintf(buf, "%d\n", target->tl_retry_count);
2824 static ssize_t show_cmd_sg_entries(struct device *dev,
2825 struct device_attribute *attr, char *buf)
2827 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2829 return sprintf(buf, "%u\n", target->cmd_sg_cnt);
2832 static ssize_t show_allow_ext_sg(struct device *dev,
2833 struct device_attribute *attr, char *buf)
2835 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2837 return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
2840 static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
2841 static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
2842 static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
2843 static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
2844 static DEVICE_ATTR(sgid, S_IRUGO, show_sgid, NULL);
2845 static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
2846 static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
2847 static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
2848 static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
2849 static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
2850 static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
2851 static DEVICE_ATTR(ch_count, S_IRUGO, show_ch_count, NULL);
2852 static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL);
2853 static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL);
2854 static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
2855 static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL);
2857 static struct device_attribute *srp_host_attrs[] = {
2860 &dev_attr_service_id,
2864 &dev_attr_orig_dgid,
2866 &dev_attr_zero_req_lim,
2867 &dev_attr_local_ib_port,
2868 &dev_attr_local_ib_device,
2870 &dev_attr_comp_vector,
2871 &dev_attr_tl_retry_count,
2872 &dev_attr_cmd_sg_entries,
2873 &dev_attr_allow_ext_sg,
2877 static struct scsi_host_template srp_template = {
2878 .module = THIS_MODULE,
2879 .name = "InfiniBand SRP initiator",
2880 .proc_name = DRV_NAME,
2881 .slave_alloc = srp_slave_alloc,
2882 .slave_configure = srp_slave_configure,
2883 .info = srp_target_info,
2884 .queuecommand = srp_queuecommand,
2885 .change_queue_depth = srp_change_queue_depth,
2886 .eh_abort_handler = srp_abort,
2887 .eh_device_reset_handler = srp_reset_device,
2888 .eh_host_reset_handler = srp_reset_host,
2889 .skip_settle_delay = true,
2890 .sg_tablesize = SRP_DEF_SG_TABLESIZE,
2891 .can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
2893 .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
2894 .use_clustering = ENABLE_CLUSTERING,
2895 .shost_attrs = srp_host_attrs,
2896 .track_queue_depth = 1,
2899 static int srp_sdev_count(struct Scsi_Host *host)
2901 struct scsi_device *sdev;
2904 shost_for_each_device(sdev, host)
2912 * < 0 upon failure. Caller is responsible for SRP target port cleanup.
2913 * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port
2914 * removal has been scheduled.
2915 * 0 and target->state != SRP_TARGET_REMOVED upon success.
2917 static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2919 struct srp_rport_identifiers ids;
2920 struct srp_rport *rport;
2922 target->state = SRP_TARGET_SCANNING;
2923 sprintf(target->target_name, "SRP.T10:%016llX",
2924 be64_to_cpu(target->id_ext));
2926 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
2929 memcpy(ids.port_id, &target->id_ext, 8);
2930 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
2931 ids.roles = SRP_RPORT_ROLE_TARGET;
2932 rport = srp_rport_add(target->scsi_host, &ids);
2933 if (IS_ERR(rport)) {
2934 scsi_remove_host(target->scsi_host);
2935 return PTR_ERR(rport);
2938 rport->lld_data = target;
2939 target->rport = rport;
2941 spin_lock(&host->target_lock);
2942 list_add_tail(&target->list, &host->target_list);
2943 spin_unlock(&host->target_lock);
2945 scsi_scan_target(&target->scsi_host->shost_gendev,
2946 0, target->scsi_id, SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
2948 if (srp_connected_ch(target) < target->ch_count ||
2949 target->qp_in_error) {
2950 shost_printk(KERN_INFO, target->scsi_host,
2951 PFX "SCSI scan failed - removing SCSI host\n");
2952 srp_queue_remove_work(target);
2956 pr_debug("%s: SCSI scan succeeded - detected %d LUNs\n",
2957 dev_name(&target->scsi_host->shost_gendev),
2958 srp_sdev_count(target->scsi_host));
2960 spin_lock_irq(&target->lock);
2961 if (target->state == SRP_TARGET_SCANNING)
2962 target->state = SRP_TARGET_LIVE;
2963 spin_unlock_irq(&target->lock);
2969 static void srp_release_dev(struct device *dev)
2971 struct srp_host *host =
2972 container_of(dev, struct srp_host, dev);
2974 complete(&host->released);
2977 static struct class srp_class = {
2978 .name = "infiniband_srp",
2979 .dev_release = srp_release_dev
2983 * srp_conn_unique() - check whether the connection to a target is unique
2985 * @target: SRP target port.
2987 static bool srp_conn_unique(struct srp_host *host,
2988 struct srp_target_port *target)
2990 struct srp_target_port *t;
2993 if (target->state == SRP_TARGET_REMOVED)
2998 spin_lock(&host->target_lock);
2999 list_for_each_entry(t, &host->target_list, list) {
3001 target->id_ext == t->id_ext &&
3002 target->ioc_guid == t->ioc_guid &&
3003 target->initiator_ext == t->initiator_ext) {
3008 spin_unlock(&host->target_lock);
3015 * Target ports are added by writing
3017 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
3018 * pkey=<P_Key>,service_id=<service ID>
3020 * to the add_target sysfs attribute.
3024 SRP_OPT_ID_EXT = 1 << 0,
3025 SRP_OPT_IOC_GUID = 1 << 1,
3026 SRP_OPT_DGID = 1 << 2,
3027 SRP_OPT_PKEY = 1 << 3,
3028 SRP_OPT_SERVICE_ID = 1 << 4,
3029 SRP_OPT_MAX_SECT = 1 << 5,
3030 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
3031 SRP_OPT_IO_CLASS = 1 << 7,
3032 SRP_OPT_INITIATOR_EXT = 1 << 8,
3033 SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
3034 SRP_OPT_ALLOW_EXT_SG = 1 << 10,
3035 SRP_OPT_SG_TABLESIZE = 1 << 11,
3036 SRP_OPT_COMP_VECTOR = 1 << 12,
3037 SRP_OPT_TL_RETRY_COUNT = 1 << 13,
3038 SRP_OPT_QUEUE_SIZE = 1 << 14,
3039 SRP_OPT_ALL = (SRP_OPT_ID_EXT |
3043 SRP_OPT_SERVICE_ID),
3046 static const match_table_t srp_opt_tokens = {
3047 { SRP_OPT_ID_EXT, "id_ext=%s" },
3048 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
3049 { SRP_OPT_DGID, "dgid=%s" },
3050 { SRP_OPT_PKEY, "pkey=%x" },
3051 { SRP_OPT_SERVICE_ID, "service_id=%s" },
3052 { SRP_OPT_MAX_SECT, "max_sect=%d" },
3053 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
3054 { SRP_OPT_IO_CLASS, "io_class=%x" },
3055 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
3056 { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
3057 { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
3058 { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
3059 { SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
3060 { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
3061 { SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
3062 { SRP_OPT_ERR, NULL }
3065 static int srp_parse_options(const char *buf, struct srp_target_port *target)
3067 char *options, *sep_opt;
3070 substring_t args[MAX_OPT_ARGS];
3076 options = kstrdup(buf, GFP_KERNEL);
3081 while ((p = strsep(&sep_opt, ",\n")) != NULL) {
3085 token = match_token(p, srp_opt_tokens, args);
3089 case SRP_OPT_ID_EXT:
3090 p = match_strdup(args);
3095 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
3099 case SRP_OPT_IOC_GUID:
3100 p = match_strdup(args);
3105 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
3110 p = match_strdup(args);
3115 if (strlen(p) != 32) {
3116 pr_warn("bad dest GID parameter '%s'\n", p);
3121 for (i = 0; i < 16; ++i) {
3122 strlcpy(dgid, p + i * 2, sizeof(dgid));
3123 if (sscanf(dgid, "%hhx",
3124 &target->orig_dgid.raw[i]) < 1) {
3134 if (match_hex(args, &token)) {
3135 pr_warn("bad P_Key parameter '%s'\n", p);
3138 target->pkey = cpu_to_be16(token);
3141 case SRP_OPT_SERVICE_ID:
3142 p = match_strdup(args);
3147 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
3151 case SRP_OPT_MAX_SECT:
3152 if (match_int(args, &token)) {
3153 pr_warn("bad max sect parameter '%s'\n", p);
3156 target->scsi_host->max_sectors = token;
3159 case SRP_OPT_QUEUE_SIZE:
3160 if (match_int(args, &token) || token < 1) {
3161 pr_warn("bad queue_size parameter '%s'\n", p);
3164 target->scsi_host->can_queue = token;
3165 target->queue_size = token + SRP_RSP_SQ_SIZE +
3166 SRP_TSK_MGMT_SQ_SIZE;
3167 if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3168 target->scsi_host->cmd_per_lun = token;
3171 case SRP_OPT_MAX_CMD_PER_LUN:
3172 if (match_int(args, &token) || token < 1) {
3173 pr_warn("bad max cmd_per_lun parameter '%s'\n",
3177 target->scsi_host->cmd_per_lun = token;
3180 case SRP_OPT_IO_CLASS:
3181 if (match_hex(args, &token)) {
3182 pr_warn("bad IO class parameter '%s'\n", p);
3185 if (token != SRP_REV10_IB_IO_CLASS &&
3186 token != SRP_REV16A_IB_IO_CLASS) {
3187 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3188 token, SRP_REV10_IB_IO_CLASS,
3189 SRP_REV16A_IB_IO_CLASS);
3192 target->io_class = token;
3195 case SRP_OPT_INITIATOR_EXT:
3196 p = match_strdup(args);
3201 target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
3205 case SRP_OPT_CMD_SG_ENTRIES:
3206 if (match_int(args, &token) || token < 1 || token > 255) {
3207 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3211 target->cmd_sg_cnt = token;
3214 case SRP_OPT_ALLOW_EXT_SG:
3215 if (match_int(args, &token)) {
3216 pr_warn("bad allow_ext_sg parameter '%s'\n", p);
3219 target->allow_ext_sg = !!token;
3222 case SRP_OPT_SG_TABLESIZE:
3223 if (match_int(args, &token) || token < 1 ||
3224 token > SG_MAX_SEGMENTS) {
3225 pr_warn("bad max sg_tablesize parameter '%s'\n",
3229 target->sg_tablesize = token;
3232 case SRP_OPT_COMP_VECTOR:
3233 if (match_int(args, &token) || token < 0) {
3234 pr_warn("bad comp_vector parameter '%s'\n", p);
3237 target->comp_vector = token;
3240 case SRP_OPT_TL_RETRY_COUNT:
3241 if (match_int(args, &token) || token < 2 || token > 7) {
3242 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
3246 target->tl_retry_count = token;
3250 pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3256 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
3259 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
3260 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
3261 !(srp_opt_tokens[i].token & opt_mask))
3262 pr_warn("target creation request is missing parameter '%s'\n",
3263 srp_opt_tokens[i].pattern);
3265 if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
3266 && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3267 pr_warn("cmd_per_lun = %d > queue_size = %d\n",
3268 target->scsi_host->cmd_per_lun,
3269 target->scsi_host->can_queue);
3276 static ssize_t srp_create_target(struct device *dev,
3277 struct device_attribute *attr,
3278 const char *buf, size_t count)
3280 struct srp_host *host =
3281 container_of(dev, struct srp_host, dev);
3282 struct Scsi_Host *target_host;
3283 struct srp_target_port *target;
3284 struct srp_rdma_ch *ch;
3285 struct srp_device *srp_dev = host->srp_dev;
3286 struct ib_device *ibdev = srp_dev->dev;
3287 int ret, node_idx, node, cpu, i;
3288 unsigned int max_sectors_per_mr, mr_per_cmd = 0;
3289 bool multich = false;
3291 target_host = scsi_host_alloc(&srp_template,
3292 sizeof (struct srp_target_port));
3296 target_host->transportt = ib_srp_transport_template;
3297 target_host->max_channel = 0;
3298 target_host->max_id = 1;
3299 target_host->max_lun = -1LL;
3300 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
3302 target = host_to_target(target_host);
3304 target->io_class = SRP_REV16A_IB_IO_CLASS;
3305 target->scsi_host = target_host;
3306 target->srp_host = host;
3307 target->pd = host->srp_dev->pd;
3308 target->lkey = host->srp_dev->pd->local_dma_lkey;
3309 target->cmd_sg_cnt = cmd_sg_entries;
3310 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
3311 target->allow_ext_sg = allow_ext_sg;
3312 target->tl_retry_count = 7;
3313 target->queue_size = SRP_DEFAULT_QUEUE_SIZE;
3316 * Avoid that the SCSI host can be removed by srp_remove_target()
3317 * before this function returns.
3319 scsi_host_get(target->scsi_host);
3321 ret = mutex_lock_interruptible(&host->add_target_mutex);
3325 ret = srp_parse_options(buf, target);
3329 target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
3331 if (!srp_conn_unique(target->srp_host, target)) {
3332 shost_printk(KERN_INFO, target->scsi_host,
3333 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
3334 be64_to_cpu(target->id_ext),
3335 be64_to_cpu(target->ioc_guid),
3336 be64_to_cpu(target->initiator_ext));
3341 if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
3342 target->cmd_sg_cnt < target->sg_tablesize) {
3343 pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
3344 target->sg_tablesize = target->cmd_sg_cnt;
3347 if (srp_dev->use_fast_reg || srp_dev->use_fmr) {
3349 * FR and FMR can only map one HCA page per entry. If the
3350 * start address is not aligned on a HCA page boundary two
3351 * entries will be used for the head and the tail although
3352 * these two entries combined contain at most one HCA page of
3353 * data. Hence the "+ 1" in the calculation below.
3355 * The indirect data buffer descriptor is contiguous so the
3356 * memory for that buffer will only be registered if
3357 * register_always is true. Hence add one to mr_per_cmd if
3358 * register_always has been set.
3360 max_sectors_per_mr = srp_dev->max_pages_per_mr <<
3361 (ilog2(srp_dev->mr_page_size) - 9);
3362 mr_per_cmd = register_always +
3363 (target->scsi_host->max_sectors + 1 +
3364 max_sectors_per_mr - 1) / max_sectors_per_mr;
3365 pr_debug("max_sectors = %u; max_pages_per_mr = %u; mr_page_size = %u; max_sectors_per_mr = %u; mr_per_cmd = %u\n",
3366 target->scsi_host->max_sectors,
3367 srp_dev->max_pages_per_mr, srp_dev->mr_page_size,
3368 max_sectors_per_mr, mr_per_cmd);
3371 target_host->sg_tablesize = target->sg_tablesize;
3372 target->mr_pool_size = target->scsi_host->can_queue * mr_per_cmd;
3373 target->mr_per_cmd = mr_per_cmd;
3374 target->indirect_size = target->sg_tablesize *
3375 sizeof (struct srp_direct_buf);
3376 target->max_iu_len = sizeof (struct srp_cmd) +
3377 sizeof (struct srp_indirect_buf) +
3378 target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
3380 INIT_WORK(&target->tl_err_work, srp_tl_err_work);
3381 INIT_WORK(&target->remove_work, srp_remove_work);
3382 spin_lock_init(&target->lock);
3383 ret = ib_query_gid(ibdev, host->port, 0, &target->sgid, NULL);
3388 target->ch_count = max_t(unsigned, num_online_nodes(),
3390 min(4 * num_online_nodes(),
3391 ibdev->num_comp_vectors),
3392 num_online_cpus()));
3393 target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3399 for_each_online_node(node) {
3400 const int ch_start = (node_idx * target->ch_count /
3401 num_online_nodes());
3402 const int ch_end = ((node_idx + 1) * target->ch_count /
3403 num_online_nodes());
3404 const int cv_start = (node_idx * ibdev->num_comp_vectors /
3405 num_online_nodes() + target->comp_vector)
3406 % ibdev->num_comp_vectors;
3407 const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors /
3408 num_online_nodes() + target->comp_vector)
3409 % ibdev->num_comp_vectors;
3412 for_each_online_cpu(cpu) {
3413 if (cpu_to_node(cpu) != node)
3415 if (ch_start + cpu_idx >= ch_end)
3417 ch = &target->ch[ch_start + cpu_idx];
3418 ch->target = target;
3419 ch->comp_vector = cv_start == cv_end ? cv_start :
3420 cv_start + cpu_idx % (cv_end - cv_start);
3421 spin_lock_init(&ch->lock);
3422 INIT_LIST_HEAD(&ch->free_tx);
3423 ret = srp_new_cm_id(ch);
3425 goto err_disconnect;
3427 ret = srp_create_ch_ib(ch);
3429 goto err_disconnect;
3431 ret = srp_alloc_req_data(ch);
3433 goto err_disconnect;
3435 ret = srp_connect_ch(ch, multich);
3437 shost_printk(KERN_ERR, target->scsi_host,
3438 PFX "Connection %d/%d failed\n",
3441 if (node_idx == 0 && cpu_idx == 0) {
3442 goto err_disconnect;
3444 srp_free_ch_ib(target, ch);
3445 srp_free_req_data(target, ch);
3446 target->ch_count = ch - target->ch;
3458 target->scsi_host->nr_hw_queues = target->ch_count;
3460 ret = srp_add_target(host, target);
3462 goto err_disconnect;
3464 if (target->state != SRP_TARGET_REMOVED) {
3465 shost_printk(KERN_DEBUG, target->scsi_host, PFX
3466 "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3467 be64_to_cpu(target->id_ext),
3468 be64_to_cpu(target->ioc_guid),
3469 be16_to_cpu(target->pkey),
3470 be64_to_cpu(target->service_id),
3471 target->sgid.raw, target->orig_dgid.raw);
3477 mutex_unlock(&host->add_target_mutex);
3480 scsi_host_put(target->scsi_host);
3482 scsi_host_put(target->scsi_host);
3487 srp_disconnect_target(target);
3489 for (i = 0; i < target->ch_count; i++) {
3490 ch = &target->ch[i];
3491 srp_free_ch_ib(target, ch);
3492 srp_free_req_data(target, ch);
3499 static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
3501 static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
3504 struct srp_host *host = container_of(dev, struct srp_host, dev);
3506 return sprintf(buf, "%s\n", host->srp_dev->dev->name);
3509 static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
3511 static ssize_t show_port(struct device *dev, struct device_attribute *attr,
3514 struct srp_host *host = container_of(dev, struct srp_host, dev);
3516 return sprintf(buf, "%d\n", host->port);
3519 static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
3521 static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
3523 struct srp_host *host;
3525 host = kzalloc(sizeof *host, GFP_KERNEL);
3529 INIT_LIST_HEAD(&host->target_list);
3530 spin_lock_init(&host->target_lock);
3531 init_completion(&host->released);
3532 mutex_init(&host->add_target_mutex);
3533 host->srp_dev = device;
3536 host->dev.class = &srp_class;
3537 host->dev.parent = device->dev->dma_device;
3538 dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
3540 if (device_register(&host->dev))
3542 if (device_create_file(&host->dev, &dev_attr_add_target))
3544 if (device_create_file(&host->dev, &dev_attr_ibdev))
3546 if (device_create_file(&host->dev, &dev_attr_port))
3552 device_unregister(&host->dev);
3560 static void srp_add_one(struct ib_device *device)
3562 struct srp_device *srp_dev;
3563 struct ib_device_attr *attr = &device->attrs;
3564 struct srp_host *host;
3565 int mr_page_shift, p;
3566 u64 max_pages_per_mr;
3567 unsigned int flags = 0;
3569 srp_dev = kzalloc(sizeof(*srp_dev), GFP_KERNEL);
3574 * Use the smallest page size supported by the HCA, down to a
3575 * minimum of 4096 bytes. We're unlikely to build large sglists
3576 * out of smaller entries.
3578 mr_page_shift = max(12, ffs(attr->page_size_cap) - 1);
3579 srp_dev->mr_page_size = 1 << mr_page_shift;
3580 srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1);
3581 max_pages_per_mr = attr->max_mr_size;
3582 do_div(max_pages_per_mr, srp_dev->mr_page_size);
3583 pr_debug("%s: %llu / %u = %llu <> %u\n", __func__,
3584 attr->max_mr_size, srp_dev->mr_page_size,
3585 max_pages_per_mr, SRP_MAX_PAGES_PER_MR);
3586 srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
3589 srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
3590 device->map_phys_fmr && device->unmap_fmr);
3591 srp_dev->has_fr = (attr->device_cap_flags &
3592 IB_DEVICE_MEM_MGT_EXTENSIONS);
3593 if (!never_register && !srp_dev->has_fmr && !srp_dev->has_fr) {
3594 dev_warn(&device->dev, "neither FMR nor FR is supported\n");
3595 } else if (!never_register &&
3596 attr->max_mr_size >= 2 * srp_dev->mr_page_size) {
3597 srp_dev->use_fast_reg = (srp_dev->has_fr &&
3598 (!srp_dev->has_fmr || prefer_fr));
3599 srp_dev->use_fmr = !srp_dev->use_fast_reg && srp_dev->has_fmr;
3602 if (never_register || !register_always ||
3603 (!srp_dev->has_fmr && !srp_dev->has_fr))
3604 flags |= IB_PD_UNSAFE_GLOBAL_RKEY;
3606 if (srp_dev->use_fast_reg) {
3607 srp_dev->max_pages_per_mr =
3608 min_t(u32, srp_dev->max_pages_per_mr,
3609 attr->max_fast_reg_page_list_len);
3611 srp_dev->mr_max_size = srp_dev->mr_page_size *
3612 srp_dev->max_pages_per_mr;
3613 pr_debug("%s: mr_page_shift = %d, device->max_mr_size = %#llx, device->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
3614 device->name, mr_page_shift, attr->max_mr_size,
3615 attr->max_fast_reg_page_list_len,
3616 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
3618 INIT_LIST_HEAD(&srp_dev->dev_list);
3620 srp_dev->dev = device;
3621 srp_dev->pd = ib_alloc_pd(device, flags);
3622 if (IS_ERR(srp_dev->pd))
3626 for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
3627 host = srp_add_port(srp_dev, p);
3629 list_add_tail(&host->list, &srp_dev->dev_list);
3632 ib_set_client_data(device, &srp_client, srp_dev);
3639 static void srp_remove_one(struct ib_device *device, void *client_data)
3641 struct srp_device *srp_dev;
3642 struct srp_host *host, *tmp_host;
3643 struct srp_target_port *target;
3645 srp_dev = client_data;
3649 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
3650 device_unregister(&host->dev);
3652 * Wait for the sysfs entry to go away, so that no new
3653 * target ports can be created.
3655 wait_for_completion(&host->released);
3658 * Remove all target ports.
3660 spin_lock(&host->target_lock);
3661 list_for_each_entry(target, &host->target_list, list)
3662 srp_queue_remove_work(target);
3663 spin_unlock(&host->target_lock);
3666 * Wait for tl_err and target port removal tasks.
3668 flush_workqueue(system_long_wq);
3669 flush_workqueue(srp_remove_wq);
3674 ib_dealloc_pd(srp_dev->pd);
3679 static struct srp_function_template ib_srp_transport_functions = {
3680 .has_rport_state = true,
3681 .reset_timer_if_blocked = true,
3682 .reconnect_delay = &srp_reconnect_delay,
3683 .fast_io_fail_tmo = &srp_fast_io_fail_tmo,
3684 .dev_loss_tmo = &srp_dev_loss_tmo,
3685 .reconnect = srp_rport_reconnect,
3686 .rport_delete = srp_rport_delete,
3687 .terminate_rport_io = srp_terminate_io,
3690 static int __init srp_init_module(void)
3694 if (srp_sg_tablesize) {
3695 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
3696 if (!cmd_sg_entries)
3697 cmd_sg_entries = srp_sg_tablesize;
3700 if (!cmd_sg_entries)
3701 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
3703 if (cmd_sg_entries > 255) {
3704 pr_warn("Clamping cmd_sg_entries to 255\n");
3705 cmd_sg_entries = 255;
3708 if (!indirect_sg_entries)
3709 indirect_sg_entries = cmd_sg_entries;
3710 else if (indirect_sg_entries < cmd_sg_entries) {
3711 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
3713 indirect_sg_entries = cmd_sg_entries;
3716 if (indirect_sg_entries > SG_MAX_SEGMENTS) {
3717 pr_warn("Clamping indirect_sg_entries to %u\n",
3719 indirect_sg_entries = SG_MAX_SEGMENTS;
3722 srp_remove_wq = create_workqueue("srp_remove");
3723 if (!srp_remove_wq) {
3729 ib_srp_transport_template =
3730 srp_attach_transport(&ib_srp_transport_functions);
3731 if (!ib_srp_transport_template)
3734 ret = class_register(&srp_class);
3736 pr_err("couldn't register class infiniband_srp\n");
3740 ib_sa_register_client(&srp_sa_client);
3742 ret = ib_register_client(&srp_client);
3744 pr_err("couldn't register IB client\n");
3752 ib_sa_unregister_client(&srp_sa_client);
3753 class_unregister(&srp_class);
3756 srp_release_transport(ib_srp_transport_template);
3759 destroy_workqueue(srp_remove_wq);
3763 static void __exit srp_cleanup_module(void)
3765 ib_unregister_client(&srp_client);
3766 ib_sa_unregister_client(&srp_sa_client);
3767 class_unregister(&srp_class);
3768 srp_release_transport(ib_srp_transport_template);
3769 destroy_workqueue(srp_remove_wq);
3772 module_init(srp_init_module);
3773 module_exit(srp_cleanup_module);