2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/err.h>
39 #include <linux/string.h>
40 #include <linux/parser.h>
41 #include <linux/random.h>
42 #include <linux/jiffies.h>
43 #include <linux/lockdep.h>
44 #include <rdma/ib_cache.h>
46 #include <linux/atomic.h>
48 #include <scsi/scsi.h>
49 #include <scsi/scsi_device.h>
50 #include <scsi/scsi_dbg.h>
51 #include <scsi/scsi_tcq.h>
53 #include <scsi/scsi_transport_srp.h>
57 #define DRV_NAME "ib_srp"
58 #define PFX DRV_NAME ": "
59 #define DRV_VERSION "2.0"
60 #define DRV_RELDATE "July 26, 2015"
62 MODULE_AUTHOR("Roland Dreier");
63 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator");
64 MODULE_LICENSE("Dual BSD/GPL");
65 MODULE_VERSION(DRV_VERSION);
66 MODULE_INFO(release_date, DRV_RELDATE);
68 #if !defined(CONFIG_DYNAMIC_DEBUG)
69 #define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt)
70 #define DYNAMIC_DEBUG_BRANCH(descriptor) false
73 static unsigned int srp_sg_tablesize;
74 static unsigned int cmd_sg_entries;
75 static unsigned int indirect_sg_entries;
76 static bool allow_ext_sg;
77 static bool prefer_fr = true;
78 static bool register_always = true;
79 static bool never_register;
80 static int topspin_workarounds = 1;
82 module_param(srp_sg_tablesize, uint, 0444);
83 MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
85 module_param(cmd_sg_entries, uint, 0444);
86 MODULE_PARM_DESC(cmd_sg_entries,
87 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
89 module_param(indirect_sg_entries, uint, 0444);
90 MODULE_PARM_DESC(indirect_sg_entries,
91 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SG_MAX_SEGMENTS) ")");
93 module_param(allow_ext_sg, bool, 0444);
94 MODULE_PARM_DESC(allow_ext_sg,
95 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
97 module_param(topspin_workarounds, int, 0444);
98 MODULE_PARM_DESC(topspin_workarounds,
99 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
101 module_param(prefer_fr, bool, 0444);
102 MODULE_PARM_DESC(prefer_fr,
103 "Whether to use fast registration if both FMR and fast registration are supported");
105 module_param(register_always, bool, 0444);
106 MODULE_PARM_DESC(register_always,
107 "Use memory registration even for contiguous memory regions");
109 module_param(never_register, bool, 0444);
110 MODULE_PARM_DESC(never_register, "Never register memory");
112 static const struct kernel_param_ops srp_tmo_ops;
114 static int srp_reconnect_delay = 10;
115 module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
117 MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
119 static int srp_fast_io_fail_tmo = 15;
120 module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
122 MODULE_PARM_DESC(fast_io_fail_tmo,
123 "Number of seconds between the observation of a transport"
124 " layer error and failing all I/O. \"off\" means that this"
125 " functionality is disabled.");
127 static int srp_dev_loss_tmo = 600;
128 module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
130 MODULE_PARM_DESC(dev_loss_tmo,
131 "Maximum number of seconds that the SRP transport should"
132 " insulate transport layer errors. After this time has been"
133 " exceeded the SCSI host is removed. Should be"
134 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
135 " if fast_io_fail_tmo has not been set. \"off\" means that"
136 " this functionality is disabled.");
138 static unsigned ch_count;
139 module_param(ch_count, uint, 0444);
140 MODULE_PARM_DESC(ch_count,
141 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
143 static void srp_add_one(struct ib_device *device);
144 static void srp_remove_one(struct ib_device *device, void *client_data);
145 static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc);
146 static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
148 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
150 static struct scsi_transport_template *ib_srp_transport_template;
151 static struct workqueue_struct *srp_remove_wq;
153 static struct ib_client srp_client = {
156 .remove = srp_remove_one
159 static struct ib_sa_client srp_sa_client;
161 static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
163 int tmo = *(int *)kp->arg;
166 return sprintf(buffer, "%d", tmo);
168 return sprintf(buffer, "off");
171 static int srp_tmo_set(const char *val, const struct kernel_param *kp)
175 res = srp_parse_tmo(&tmo, val);
179 if (kp->arg == &srp_reconnect_delay)
180 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
182 else if (kp->arg == &srp_fast_io_fail_tmo)
183 res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
185 res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
189 *(int *)kp->arg = tmo;
195 static const struct kernel_param_ops srp_tmo_ops = {
200 static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
202 return (struct srp_target_port *) host->hostdata;
205 static const char *srp_target_info(struct Scsi_Host *host)
207 return host_to_target(host)->target_name;
210 static int srp_target_is_topspin(struct srp_target_port *target)
212 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
213 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
215 return topspin_workarounds &&
216 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
217 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
220 static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
222 enum dma_data_direction direction)
226 iu = kmalloc(sizeof *iu, gfp_mask);
230 iu->buf = kzalloc(size, gfp_mask);
234 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
236 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
240 iu->direction = direction;
252 static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
257 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
263 static void srp_qp_event(struct ib_event *event, void *context)
265 pr_debug("QP event %s (%d)\n",
266 ib_event_msg(event->event), event->event);
269 static int srp_init_qp(struct srp_target_port *target,
272 struct ib_qp_attr *attr;
275 attr = kmalloc(sizeof *attr, GFP_KERNEL);
279 ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
280 target->srp_host->port,
281 be16_to_cpu(target->pkey),
286 attr->qp_state = IB_QPS_INIT;
287 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
288 IB_ACCESS_REMOTE_WRITE);
289 attr->port_num = target->srp_host->port;
291 ret = ib_modify_qp(qp, attr,
302 static int srp_new_cm_id(struct srp_rdma_ch *ch)
304 struct srp_target_port *target = ch->target;
305 struct ib_cm_id *new_cm_id;
307 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
309 if (IS_ERR(new_cm_id))
310 return PTR_ERR(new_cm_id);
313 ib_destroy_cm_id(ch->cm_id);
314 ch->cm_id = new_cm_id;
315 ch->path.sgid = target->sgid;
316 ch->path.dgid = target->orig_dgid;
317 ch->path.pkey = target->pkey;
318 ch->path.service_id = target->service_id;
323 static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
325 struct srp_device *dev = target->srp_host->srp_dev;
326 struct ib_fmr_pool_param fmr_param;
328 memset(&fmr_param, 0, sizeof(fmr_param));
329 fmr_param.pool_size = target->mr_pool_size;
330 fmr_param.dirty_watermark = fmr_param.pool_size / 4;
332 fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
333 fmr_param.page_shift = ilog2(dev->mr_page_size);
334 fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
335 IB_ACCESS_REMOTE_WRITE |
336 IB_ACCESS_REMOTE_READ);
338 return ib_create_fmr_pool(dev->pd, &fmr_param);
342 * srp_destroy_fr_pool() - free the resources owned by a pool
343 * @pool: Fast registration pool to be destroyed.
345 static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
348 struct srp_fr_desc *d;
353 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
361 * srp_create_fr_pool() - allocate and initialize a pool for fast registration
362 * @device: IB device to allocate fast registration descriptors for.
363 * @pd: Protection domain associated with the FR descriptors.
364 * @pool_size: Number of descriptors to allocate.
365 * @max_page_list_len: Maximum fast registration work request page list length.
367 static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
368 struct ib_pd *pd, int pool_size,
369 int max_page_list_len)
371 struct srp_fr_pool *pool;
372 struct srp_fr_desc *d;
374 int i, ret = -EINVAL;
379 pool = kzalloc(sizeof(struct srp_fr_pool) +
380 pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL);
383 pool->size = pool_size;
384 pool->max_page_list_len = max_page_list_len;
385 spin_lock_init(&pool->lock);
386 INIT_LIST_HEAD(&pool->free_list);
388 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
389 mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG,
394 pr_info("%s: ib_alloc_mr() failed. Try to reduce max_cmd_per_lun, max_sect or ch_count\n",
395 dev_name(&device->dev));
399 list_add_tail(&d->entry, &pool->free_list);
406 srp_destroy_fr_pool(pool);
414 * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
415 * @pool: Pool to obtain descriptor from.
417 static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
419 struct srp_fr_desc *d = NULL;
422 spin_lock_irqsave(&pool->lock, flags);
423 if (!list_empty(&pool->free_list)) {
424 d = list_first_entry(&pool->free_list, typeof(*d), entry);
427 spin_unlock_irqrestore(&pool->lock, flags);
433 * srp_fr_pool_put() - put an FR descriptor back in the free list
434 * @pool: Pool the descriptor was allocated from.
435 * @desc: Pointer to an array of fast registration descriptor pointers.
436 * @n: Number of descriptors to put back.
438 * Note: The caller must already have queued an invalidation request for
439 * desc->mr->rkey before calling this function.
441 static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
447 spin_lock_irqsave(&pool->lock, flags);
448 for (i = 0; i < n; i++)
449 list_add(&desc[i]->entry, &pool->free_list);
450 spin_unlock_irqrestore(&pool->lock, flags);
453 static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
455 struct srp_device *dev = target->srp_host->srp_dev;
457 return srp_create_fr_pool(dev->dev, dev->pd, target->mr_pool_size,
458 dev->max_pages_per_mr);
462 * srp_destroy_qp() - destroy an RDMA queue pair
463 * @qp: RDMA queue pair.
465 * Drain the qp before destroying it. This avoids that the receive
466 * completion handler can access the queue pair while it is
469 static void srp_destroy_qp(struct srp_rdma_ch *ch, struct ib_qp *qp)
471 spin_lock_irq(&ch->lock);
472 ib_process_cq_direct(ch->send_cq, -1);
473 spin_unlock_irq(&ch->lock);
479 static int srp_create_ch_ib(struct srp_rdma_ch *ch)
481 struct srp_target_port *target = ch->target;
482 struct srp_device *dev = target->srp_host->srp_dev;
483 struct ib_qp_init_attr *init_attr;
484 struct ib_cq *recv_cq, *send_cq;
486 struct ib_fmr_pool *fmr_pool = NULL;
487 struct srp_fr_pool *fr_pool = NULL;
488 const int m = 1 + dev->use_fast_reg * target->mr_per_cmd * 2;
491 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
495 /* queue_size + 1 for ib_drain_rq() */
496 recv_cq = ib_alloc_cq(dev->dev, ch, target->queue_size + 1,
497 ch->comp_vector, IB_POLL_SOFTIRQ);
498 if (IS_ERR(recv_cq)) {
499 ret = PTR_ERR(recv_cq);
503 send_cq = ib_alloc_cq(dev->dev, ch, m * target->queue_size,
504 ch->comp_vector, IB_POLL_DIRECT);
505 if (IS_ERR(send_cq)) {
506 ret = PTR_ERR(send_cq);
510 init_attr->event_handler = srp_qp_event;
511 init_attr->cap.max_send_wr = m * target->queue_size;
512 init_attr->cap.max_recv_wr = target->queue_size + 1;
513 init_attr->cap.max_recv_sge = 1;
514 init_attr->cap.max_send_sge = 1;
515 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
516 init_attr->qp_type = IB_QPT_RC;
517 init_attr->send_cq = send_cq;
518 init_attr->recv_cq = recv_cq;
520 qp = ib_create_qp(dev->pd, init_attr);
526 ret = srp_init_qp(target, qp);
530 if (dev->use_fast_reg) {
531 fr_pool = srp_alloc_fr_pool(target);
532 if (IS_ERR(fr_pool)) {
533 ret = PTR_ERR(fr_pool);
534 shost_printk(KERN_WARNING, target->scsi_host, PFX
535 "FR pool allocation failed (%d)\n", ret);
538 } else if (dev->use_fmr) {
539 fmr_pool = srp_alloc_fmr_pool(target);
540 if (IS_ERR(fmr_pool)) {
541 ret = PTR_ERR(fmr_pool);
542 shost_printk(KERN_WARNING, target->scsi_host, PFX
543 "FMR pool allocation failed (%d)\n", ret);
549 srp_destroy_qp(ch, ch->qp);
551 ib_free_cq(ch->recv_cq);
553 ib_free_cq(ch->send_cq);
556 ch->recv_cq = recv_cq;
557 ch->send_cq = send_cq;
559 if (dev->use_fast_reg) {
561 srp_destroy_fr_pool(ch->fr_pool);
562 ch->fr_pool = fr_pool;
563 } else if (dev->use_fmr) {
565 ib_destroy_fmr_pool(ch->fmr_pool);
566 ch->fmr_pool = fmr_pool;
573 srp_destroy_qp(ch, qp);
587 * Note: this function may be called without srp_alloc_iu_bufs() having been
588 * invoked. Hence the ch->[rt]x_ring checks.
590 static void srp_free_ch_ib(struct srp_target_port *target,
591 struct srp_rdma_ch *ch)
593 struct srp_device *dev = target->srp_host->srp_dev;
600 ib_destroy_cm_id(ch->cm_id);
604 /* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
608 if (dev->use_fast_reg) {
610 srp_destroy_fr_pool(ch->fr_pool);
611 } else if (dev->use_fmr) {
613 ib_destroy_fmr_pool(ch->fmr_pool);
616 srp_destroy_qp(ch, ch->qp);
617 ib_free_cq(ch->send_cq);
618 ib_free_cq(ch->recv_cq);
621 * Avoid that the SCSI error handler tries to use this channel after
622 * it has been freed. The SCSI error handler can namely continue
623 * trying to perform recovery actions after scsi_remove_host()
629 ch->send_cq = ch->recv_cq = NULL;
632 for (i = 0; i < target->queue_size; ++i)
633 srp_free_iu(target->srp_host, ch->rx_ring[i]);
638 for (i = 0; i < target->queue_size; ++i)
639 srp_free_iu(target->srp_host, ch->tx_ring[i]);
645 static void srp_path_rec_completion(int status,
646 struct ib_sa_path_rec *pathrec,
649 struct srp_rdma_ch *ch = ch_ptr;
650 struct srp_target_port *target = ch->target;
654 shost_printk(KERN_ERR, target->scsi_host,
655 PFX "Got failed path rec status %d\n", status);
661 static int srp_lookup_path(struct srp_rdma_ch *ch)
663 struct srp_target_port *target = ch->target;
666 ch->path.numb_path = 1;
668 init_completion(&ch->done);
670 ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
671 target->srp_host->srp_dev->dev,
672 target->srp_host->port,
674 IB_SA_PATH_REC_SERVICE_ID |
675 IB_SA_PATH_REC_DGID |
676 IB_SA_PATH_REC_SGID |
677 IB_SA_PATH_REC_NUMB_PATH |
679 SRP_PATH_REC_TIMEOUT_MS,
681 srp_path_rec_completion,
682 ch, &ch->path_query);
683 if (ch->path_query_id < 0)
684 return ch->path_query_id;
686 ret = wait_for_completion_interruptible(&ch->done);
691 shost_printk(KERN_WARNING, target->scsi_host,
692 PFX "Path record query failed\n");
697 static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
699 struct srp_target_port *target = ch->target;
701 struct ib_cm_req_param param;
702 struct srp_login_req priv;
706 req = kzalloc(sizeof *req, GFP_KERNEL);
710 req->param.primary_path = &ch->path;
711 req->param.alternate_path = NULL;
712 req->param.service_id = target->service_id;
713 req->param.qp_num = ch->qp->qp_num;
714 req->param.qp_type = ch->qp->qp_type;
715 req->param.private_data = &req->priv;
716 req->param.private_data_len = sizeof req->priv;
717 req->param.flow_control = 1;
719 get_random_bytes(&req->param.starting_psn, 4);
720 req->param.starting_psn &= 0xffffff;
723 * Pick some arbitrary defaults here; we could make these
724 * module parameters if anyone cared about setting them.
726 req->param.responder_resources = 4;
727 req->param.remote_cm_response_timeout = 20;
728 req->param.local_cm_response_timeout = 20;
729 req->param.retry_count = target->tl_retry_count;
730 req->param.rnr_retry_count = 7;
731 req->param.max_cm_retries = 15;
733 req->priv.opcode = SRP_LOGIN_REQ;
735 req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
736 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
737 SRP_BUF_FORMAT_INDIRECT);
738 req->priv.req_flags = (multich ? SRP_MULTICHAN_MULTI :
739 SRP_MULTICHAN_SINGLE);
741 * In the published SRP specification (draft rev. 16a), the
742 * port identifier format is 8 bytes of ID extension followed
743 * by 8 bytes of GUID. Older drafts put the two halves in the
744 * opposite order, so that the GUID comes first.
746 * Targets conforming to these obsolete drafts can be
747 * recognized by the I/O Class they report.
749 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
750 memcpy(req->priv.initiator_port_id,
751 &target->sgid.global.interface_id, 8);
752 memcpy(req->priv.initiator_port_id + 8,
753 &target->initiator_ext, 8);
754 memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
755 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
757 memcpy(req->priv.initiator_port_id,
758 &target->initiator_ext, 8);
759 memcpy(req->priv.initiator_port_id + 8,
760 &target->sgid.global.interface_id, 8);
761 memcpy(req->priv.target_port_id, &target->id_ext, 8);
762 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
766 * Topspin/Cisco SRP targets will reject our login unless we
767 * zero out the first 8 bytes of our initiator port ID and set
768 * the second 8 bytes to the local node GUID.
770 if (srp_target_is_topspin(target)) {
771 shost_printk(KERN_DEBUG, target->scsi_host,
772 PFX "Topspin/Cisco initiator port ID workaround "
773 "activated for target GUID %016llx\n",
774 be64_to_cpu(target->ioc_guid));
775 memset(req->priv.initiator_port_id, 0, 8);
776 memcpy(req->priv.initiator_port_id + 8,
777 &target->srp_host->srp_dev->dev->node_guid, 8);
780 status = ib_send_cm_req(ch->cm_id, &req->param);
787 static bool srp_queue_remove_work(struct srp_target_port *target)
789 bool changed = false;
791 spin_lock_irq(&target->lock);
792 if (target->state != SRP_TARGET_REMOVED) {
793 target->state = SRP_TARGET_REMOVED;
796 spin_unlock_irq(&target->lock);
799 queue_work(srp_remove_wq, &target->remove_work);
804 static void srp_disconnect_target(struct srp_target_port *target)
806 struct srp_rdma_ch *ch;
809 /* XXX should send SRP_I_LOGOUT request */
811 for (i = 0; i < target->ch_count; i++) {
813 ch->connected = false;
814 if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
815 shost_printk(KERN_DEBUG, target->scsi_host,
816 PFX "Sending CM DREQ failed\n");
821 static void srp_free_req_data(struct srp_target_port *target,
822 struct srp_rdma_ch *ch)
824 struct srp_device *dev = target->srp_host->srp_dev;
825 struct ib_device *ibdev = dev->dev;
826 struct srp_request *req;
832 for (i = 0; i < target->req_ring_size; ++i) {
833 req = &ch->req_ring[i];
834 if (dev->use_fast_reg) {
837 kfree(req->fmr_list);
838 kfree(req->map_page);
840 if (req->indirect_dma_addr) {
841 ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
842 target->indirect_size,
845 kfree(req->indirect_desc);
852 static int srp_alloc_req_data(struct srp_rdma_ch *ch)
854 struct srp_target_port *target = ch->target;
855 struct srp_device *srp_dev = target->srp_host->srp_dev;
856 struct ib_device *ibdev = srp_dev->dev;
857 struct srp_request *req;
860 int i, ret = -ENOMEM;
862 ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring),
867 for (i = 0; i < target->req_ring_size; ++i) {
868 req = &ch->req_ring[i];
869 mr_list = kmalloc(target->mr_per_cmd * sizeof(void *),
873 if (srp_dev->use_fast_reg) {
874 req->fr_list = mr_list;
876 req->fmr_list = mr_list;
877 req->map_page = kmalloc(srp_dev->max_pages_per_mr *
878 sizeof(void *), GFP_KERNEL);
882 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
883 if (!req->indirect_desc)
886 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
887 target->indirect_size,
889 if (ib_dma_mapping_error(ibdev, dma_addr))
892 req->indirect_dma_addr = dma_addr;
901 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
902 * @shost: SCSI host whose attributes to remove from sysfs.
904 * Note: Any attributes defined in the host template and that did not exist
905 * before invocation of this function will be ignored.
907 static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
909 struct device_attribute **attr;
911 for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
912 device_remove_file(&shost->shost_dev, *attr);
915 static void srp_remove_target(struct srp_target_port *target)
917 struct srp_rdma_ch *ch;
920 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
922 srp_del_scsi_host_attr(target->scsi_host);
923 srp_rport_get(target->rport);
924 srp_remove_host(target->scsi_host);
925 scsi_remove_host(target->scsi_host);
926 srp_stop_rport_timers(target->rport);
927 srp_disconnect_target(target);
928 for (i = 0; i < target->ch_count; i++) {
930 srp_free_ch_ib(target, ch);
932 cancel_work_sync(&target->tl_err_work);
933 srp_rport_put(target->rport);
934 for (i = 0; i < target->ch_count; i++) {
936 srp_free_req_data(target, ch);
941 spin_lock(&target->srp_host->target_lock);
942 list_del(&target->list);
943 spin_unlock(&target->srp_host->target_lock);
945 scsi_host_put(target->scsi_host);
948 static void srp_remove_work(struct work_struct *work)
950 struct srp_target_port *target =
951 container_of(work, struct srp_target_port, remove_work);
953 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
955 srp_remove_target(target);
958 static void srp_rport_delete(struct srp_rport *rport)
960 struct srp_target_port *target = rport->lld_data;
962 srp_queue_remove_work(target);
966 * srp_connected_ch() - number of connected channels
967 * @target: SRP target port.
969 static int srp_connected_ch(struct srp_target_port *target)
973 for (i = 0; i < target->ch_count; i++)
974 c += target->ch[i].connected;
979 static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
981 struct srp_target_port *target = ch->target;
984 WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0);
986 ret = srp_lookup_path(ch);
991 init_completion(&ch->done);
992 ret = srp_send_req(ch, multich);
995 ret = wait_for_completion_interruptible(&ch->done);
1000 * The CM event handling code will set status to
1001 * SRP_PORT_REDIRECT if we get a port redirect REJ
1002 * back, or SRP_DLID_REDIRECT if we get a lid/qp
1003 * redirect REJ back.
1008 ch->connected = true;
1011 case SRP_PORT_REDIRECT:
1012 ret = srp_lookup_path(ch);
1017 case SRP_DLID_REDIRECT:
1020 case SRP_STALE_CONN:
1021 shost_printk(KERN_ERR, target->scsi_host, PFX
1022 "giving up on stale connection\n");
1032 return ret <= 0 ? ret : -ENODEV;
1035 static void srp_inv_rkey_err_done(struct ib_cq *cq, struct ib_wc *wc)
1037 srp_handle_qp_err(cq, wc, "INV RKEY");
1040 static int srp_inv_rkey(struct srp_request *req, struct srp_rdma_ch *ch,
1043 struct ib_send_wr *bad_wr;
1044 struct ib_send_wr wr = {
1045 .opcode = IB_WR_LOCAL_INV,
1049 .ex.invalidate_rkey = rkey,
1052 wr.wr_cqe = &req->reg_cqe;
1053 req->reg_cqe.done = srp_inv_rkey_err_done;
1054 return ib_post_send(ch->qp, &wr, &bad_wr);
1057 static void srp_unmap_data(struct scsi_cmnd *scmnd,
1058 struct srp_rdma_ch *ch,
1059 struct srp_request *req)
1061 struct srp_target_port *target = ch->target;
1062 struct srp_device *dev = target->srp_host->srp_dev;
1063 struct ib_device *ibdev = dev->dev;
1066 if (!scsi_sglist(scmnd) ||
1067 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1068 scmnd->sc_data_direction != DMA_FROM_DEVICE))
1071 if (dev->use_fast_reg) {
1072 struct srp_fr_desc **pfr;
1074 for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
1075 res = srp_inv_rkey(req, ch, (*pfr)->mr->rkey);
1077 shost_printk(KERN_ERR, target->scsi_host, PFX
1078 "Queueing INV WR for rkey %#x failed (%d)\n",
1079 (*pfr)->mr->rkey, res);
1080 queue_work(system_long_wq,
1081 &target->tl_err_work);
1085 srp_fr_pool_put(ch->fr_pool, req->fr_list,
1087 } else if (dev->use_fmr) {
1088 struct ib_pool_fmr **pfmr;
1090 for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)
1091 ib_fmr_pool_unmap(*pfmr);
1094 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
1095 scmnd->sc_data_direction);
1099 * srp_claim_req - Take ownership of the scmnd associated with a request.
1100 * @ch: SRP RDMA channel.
1101 * @req: SRP request.
1102 * @sdev: If not NULL, only take ownership for this SCSI device.
1103 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1104 * ownership of @req->scmnd if it equals @scmnd.
1107 * Either NULL or a pointer to the SCSI command the caller became owner of.
1109 static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
1110 struct srp_request *req,
1111 struct scsi_device *sdev,
1112 struct scsi_cmnd *scmnd)
1114 unsigned long flags;
1116 spin_lock_irqsave(&ch->lock, flags);
1118 (!sdev || req->scmnd->device == sdev) &&
1119 (!scmnd || req->scmnd == scmnd)) {
1125 spin_unlock_irqrestore(&ch->lock, flags);
1131 * srp_free_req() - Unmap data and adjust ch->req_lim.
1132 * @ch: SRP RDMA channel.
1133 * @req: Request to be freed.
1134 * @scmnd: SCSI command associated with @req.
1135 * @req_lim_delta: Amount to be added to @target->req_lim.
1137 static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1138 struct scsi_cmnd *scmnd, s32 req_lim_delta)
1140 unsigned long flags;
1142 srp_unmap_data(scmnd, ch, req);
1144 spin_lock_irqsave(&ch->lock, flags);
1145 ch->req_lim += req_lim_delta;
1146 spin_unlock_irqrestore(&ch->lock, flags);
1149 static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1150 struct scsi_device *sdev, int result)
1152 struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
1155 srp_free_req(ch, req, scmnd, 0);
1156 scmnd->result = result;
1157 scmnd->scsi_done(scmnd);
1161 static void srp_terminate_io(struct srp_rport *rport)
1163 struct srp_target_port *target = rport->lld_data;
1164 struct srp_rdma_ch *ch;
1165 struct Scsi_Host *shost = target->scsi_host;
1166 struct scsi_device *sdev;
1170 * Invoking srp_terminate_io() while srp_queuecommand() is running
1171 * is not safe. Hence the warning statement below.
1173 shost_for_each_device(sdev, shost)
1174 WARN_ON_ONCE(sdev->request_queue->request_fn_active);
1176 for (i = 0; i < target->ch_count; i++) {
1177 ch = &target->ch[i];
1179 for (j = 0; j < target->req_ring_size; ++j) {
1180 struct srp_request *req = &ch->req_ring[j];
1182 srp_finish_req(ch, req, NULL,
1183 DID_TRANSPORT_FAILFAST << 16);
1189 * It is up to the caller to ensure that srp_rport_reconnect() calls are
1190 * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1191 * srp_reset_device() or srp_reset_host() calls will occur while this function
1192 * is in progress. One way to realize that is not to call this function
1193 * directly but to call srp_reconnect_rport() instead since that last function
1194 * serializes calls of this function via rport->mutex and also blocks
1195 * srp_queuecommand() calls before invoking this function.
1197 static int srp_rport_reconnect(struct srp_rport *rport)
1199 struct srp_target_port *target = rport->lld_data;
1200 struct srp_rdma_ch *ch;
1202 bool multich = false;
1204 srp_disconnect_target(target);
1206 if (target->state == SRP_TARGET_SCANNING)
1210 * Now get a new local CM ID so that we avoid confusing the target in
1211 * case things are really fouled up. Doing so also ensures that all CM
1212 * callbacks will have finished before a new QP is allocated.
1214 for (i = 0; i < target->ch_count; i++) {
1215 ch = &target->ch[i];
1216 ret += srp_new_cm_id(ch);
1218 for (i = 0; i < target->ch_count; i++) {
1219 ch = &target->ch[i];
1220 for (j = 0; j < target->req_ring_size; ++j) {
1221 struct srp_request *req = &ch->req_ring[j];
1223 srp_finish_req(ch, req, NULL, DID_RESET << 16);
1226 for (i = 0; i < target->ch_count; i++) {
1227 ch = &target->ch[i];
1229 * Whether or not creating a new CM ID succeeded, create a new
1230 * QP. This guarantees that all completion callback function
1231 * invocations have finished before request resetting starts.
1233 ret += srp_create_ch_ib(ch);
1235 INIT_LIST_HEAD(&ch->free_tx);
1236 for (j = 0; j < target->queue_size; ++j)
1237 list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1240 target->qp_in_error = false;
1242 for (i = 0; i < target->ch_count; i++) {
1243 ch = &target->ch[i];
1246 ret = srp_connect_ch(ch, multich);
1251 shost_printk(KERN_INFO, target->scsi_host,
1252 PFX "reconnect succeeded\n");
1257 static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1258 unsigned int dma_len, u32 rkey)
1260 struct srp_direct_buf *desc = state->desc;
1262 WARN_ON_ONCE(!dma_len);
1264 desc->va = cpu_to_be64(dma_addr);
1265 desc->key = cpu_to_be32(rkey);
1266 desc->len = cpu_to_be32(dma_len);
1268 state->total_len += dma_len;
1273 static int srp_map_finish_fmr(struct srp_map_state *state,
1274 struct srp_rdma_ch *ch)
1276 struct srp_target_port *target = ch->target;
1277 struct srp_device *dev = target->srp_host->srp_dev;
1278 struct ib_pd *pd = target->pd;
1279 struct ib_pool_fmr *fmr;
1282 if (state->fmr.next >= state->fmr.end) {
1283 shost_printk(KERN_ERR, ch->target->scsi_host,
1284 PFX "Out of MRs (mr_per_cmd = %d)\n",
1285 ch->target->mr_per_cmd);
1289 WARN_ON_ONCE(!dev->use_fmr);
1291 if (state->npages == 0)
1294 if (state->npages == 1 && (pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)) {
1295 srp_map_desc(state, state->base_dma_addr, state->dma_len,
1296 pd->unsafe_global_rkey);
1300 fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
1301 state->npages, io_addr);
1303 return PTR_ERR(fmr);
1305 *state->fmr.next++ = fmr;
1308 srp_map_desc(state, state->base_dma_addr & ~dev->mr_page_mask,
1309 state->dma_len, fmr->fmr->rkey);
1318 static void srp_reg_mr_err_done(struct ib_cq *cq, struct ib_wc *wc)
1320 srp_handle_qp_err(cq, wc, "FAST REG");
1324 * Map up to sg_nents elements of state->sg where *sg_offset_p is the offset
1325 * where to start in the first element. If sg_offset_p != NULL then
1326 * *sg_offset_p is updated to the offset in state->sg[retval] of the first
1327 * byte that has not yet been mapped.
1329 static int srp_map_finish_fr(struct srp_map_state *state,
1330 struct srp_request *req,
1331 struct srp_rdma_ch *ch, int sg_nents,
1332 unsigned int *sg_offset_p)
1334 struct srp_target_port *target = ch->target;
1335 struct srp_device *dev = target->srp_host->srp_dev;
1336 struct ib_pd *pd = target->pd;
1337 struct ib_send_wr *bad_wr;
1338 struct ib_reg_wr wr;
1339 struct srp_fr_desc *desc;
1343 if (state->fr.next >= state->fr.end) {
1344 shost_printk(KERN_ERR, ch->target->scsi_host,
1345 PFX "Out of MRs (mr_per_cmd = %d)\n",
1346 ch->target->mr_per_cmd);
1350 WARN_ON_ONCE(!dev->use_fast_reg);
1352 if (sg_nents == 1 && (pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)) {
1353 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
1355 srp_map_desc(state, sg_dma_address(state->sg) + sg_offset,
1356 sg_dma_len(state->sg) - sg_offset,
1357 pd->unsafe_global_rkey);
1363 desc = srp_fr_pool_get(ch->fr_pool);
1367 rkey = ib_inc_rkey(desc->mr->rkey);
1368 ib_update_fast_reg_key(desc->mr, rkey);
1370 n = ib_map_mr_sg(desc->mr, state->sg, sg_nents, sg_offset_p,
1372 if (unlikely(n < 0)) {
1373 srp_fr_pool_put(ch->fr_pool, &desc, 1);
1374 pr_debug("%s: ib_map_mr_sg(%d, %d) returned %d.\n",
1375 dev_name(&req->scmnd->device->sdev_gendev), sg_nents,
1376 sg_offset_p ? *sg_offset_p : -1, n);
1380 WARN_ON_ONCE(desc->mr->length == 0);
1382 req->reg_cqe.done = srp_reg_mr_err_done;
1385 wr.wr.opcode = IB_WR_REG_MR;
1386 wr.wr.wr_cqe = &req->reg_cqe;
1388 wr.wr.send_flags = 0;
1390 wr.key = desc->mr->rkey;
1391 wr.access = (IB_ACCESS_LOCAL_WRITE |
1392 IB_ACCESS_REMOTE_READ |
1393 IB_ACCESS_REMOTE_WRITE);
1395 *state->fr.next++ = desc;
1398 srp_map_desc(state, desc->mr->iova,
1399 desc->mr->length, desc->mr->rkey);
1401 err = ib_post_send(ch->qp, &wr.wr, &bad_wr);
1402 if (unlikely(err)) {
1403 WARN_ON_ONCE(err == -ENOMEM);
1410 static int srp_map_sg_entry(struct srp_map_state *state,
1411 struct srp_rdma_ch *ch,
1412 struct scatterlist *sg)
1414 struct srp_target_port *target = ch->target;
1415 struct srp_device *dev = target->srp_host->srp_dev;
1416 struct ib_device *ibdev = dev->dev;
1417 dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
1418 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
1419 unsigned int len = 0;
1422 WARN_ON_ONCE(!dma_len);
1425 unsigned offset = dma_addr & ~dev->mr_page_mask;
1427 if (state->npages == dev->max_pages_per_mr ||
1428 (state->npages > 0 && offset != 0)) {
1429 ret = srp_map_finish_fmr(state, ch);
1434 len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);
1437 state->base_dma_addr = dma_addr;
1438 state->pages[state->npages++] = dma_addr & dev->mr_page_mask;
1439 state->dma_len += len;
1445 * If the end of the MR is not on a page boundary then we need to
1446 * close it out and start a new one -- we can only merge at page
1450 if ((dma_addr & ~dev->mr_page_mask) != 0)
1451 ret = srp_map_finish_fmr(state, ch);
1455 static int srp_map_sg_fmr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1456 struct srp_request *req, struct scatterlist *scat,
1459 struct scatterlist *sg;
1462 state->pages = req->map_page;
1463 state->fmr.next = req->fmr_list;
1464 state->fmr.end = req->fmr_list + ch->target->mr_per_cmd;
1466 for_each_sg(scat, sg, count, i) {
1467 ret = srp_map_sg_entry(state, ch, sg);
1472 ret = srp_map_finish_fmr(state, ch);
1479 static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1480 struct srp_request *req, struct scatterlist *scat,
1483 unsigned int sg_offset = 0;
1485 state->fr.next = req->fr_list;
1486 state->fr.end = req->fr_list + ch->target->mr_per_cmd;
1495 n = srp_map_finish_fr(state, req, ch, count, &sg_offset);
1496 if (unlikely(n < 0))
1500 for (i = 0; i < n; i++)
1501 state->sg = sg_next(state->sg);
1507 static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch,
1508 struct srp_request *req, struct scatterlist *scat,
1511 struct srp_target_port *target = ch->target;
1512 struct srp_device *dev = target->srp_host->srp_dev;
1513 struct scatterlist *sg;
1516 for_each_sg(scat, sg, count, i) {
1517 srp_map_desc(state, ib_sg_dma_address(dev->dev, sg),
1518 ib_sg_dma_len(dev->dev, sg),
1519 target->pd->unsafe_global_rkey);
1526 * Register the indirect data buffer descriptor with the HCA.
1528 * Note: since the indirect data buffer descriptor has been allocated with
1529 * kmalloc() it is guaranteed that this buffer is a physically contiguous
1532 static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
1533 void **next_mr, void **end_mr, u32 idb_len,
1536 struct srp_target_port *target = ch->target;
1537 struct srp_device *dev = target->srp_host->srp_dev;
1538 struct srp_map_state state;
1539 struct srp_direct_buf idb_desc;
1541 struct scatterlist idb_sg[1];
1544 memset(&state, 0, sizeof(state));
1545 memset(&idb_desc, 0, sizeof(idb_desc));
1546 state.gen.next = next_mr;
1547 state.gen.end = end_mr;
1548 state.desc = &idb_desc;
1549 state.base_dma_addr = req->indirect_dma_addr;
1550 state.dma_len = idb_len;
1552 if (dev->use_fast_reg) {
1554 sg_init_one(idb_sg, req->indirect_desc, idb_len);
1555 idb_sg->dma_address = req->indirect_dma_addr; /* hack! */
1556 #ifdef CONFIG_NEED_SG_DMA_LENGTH
1557 idb_sg->dma_length = idb_sg->length; /* hack^2 */
1559 ret = srp_map_finish_fr(&state, req, ch, 1, NULL);
1562 WARN_ON_ONCE(ret < 1);
1563 } else if (dev->use_fmr) {
1564 state.pages = idb_pages;
1565 state.pages[0] = (req->indirect_dma_addr &
1568 ret = srp_map_finish_fmr(&state, ch);
1575 *idb_rkey = idb_desc.key;
1580 static void srp_check_mapping(struct srp_map_state *state,
1581 struct srp_rdma_ch *ch, struct srp_request *req,
1582 struct scatterlist *scat, int count)
1584 struct srp_device *dev = ch->target->srp_host->srp_dev;
1585 struct srp_fr_desc **pfr;
1586 u64 desc_len = 0, mr_len = 0;
1589 for (i = 0; i < state->ndesc; i++)
1590 desc_len += be32_to_cpu(req->indirect_desc[i].len);
1591 if (dev->use_fast_reg)
1592 for (i = 0, pfr = req->fr_list; i < state->nmdesc; i++, pfr++)
1593 mr_len += (*pfr)->mr->length;
1594 else if (dev->use_fmr)
1595 for (i = 0; i < state->nmdesc; i++)
1596 mr_len += be32_to_cpu(req->indirect_desc[i].len);
1597 if (desc_len != scsi_bufflen(req->scmnd) ||
1598 mr_len > scsi_bufflen(req->scmnd))
1599 pr_err("Inconsistent: scsi len %d <> desc len %lld <> mr len %lld; ndesc %d; nmdesc = %d\n",
1600 scsi_bufflen(req->scmnd), desc_len, mr_len,
1601 state->ndesc, state->nmdesc);
1605 * srp_map_data() - map SCSI data buffer onto an SRP request
1606 * @scmnd: SCSI command to map
1607 * @ch: SRP RDMA channel
1610 * Returns the length in bytes of the SRP_CMD IU or a negative value if
1613 static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
1614 struct srp_request *req)
1616 struct srp_target_port *target = ch->target;
1617 struct ib_pd *pd = target->pd;
1618 struct scatterlist *scat;
1619 struct srp_cmd *cmd = req->cmd->buf;
1620 int len, nents, count, ret;
1621 struct srp_device *dev;
1622 struct ib_device *ibdev;
1623 struct srp_map_state state;
1624 struct srp_indirect_buf *indirect_hdr;
1625 u32 idb_len, table_len;
1629 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
1630 return sizeof (struct srp_cmd);
1632 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1633 scmnd->sc_data_direction != DMA_TO_DEVICE) {
1634 shost_printk(KERN_WARNING, target->scsi_host,
1635 PFX "Unhandled data direction %d\n",
1636 scmnd->sc_data_direction);
1640 nents = scsi_sg_count(scmnd);
1641 scat = scsi_sglist(scmnd);
1643 dev = target->srp_host->srp_dev;
1646 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
1647 if (unlikely(count == 0))
1650 fmt = SRP_DATA_DESC_DIRECT;
1651 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
1653 if (count == 1 && (pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)) {
1655 * The midlayer only generated a single gather/scatter
1656 * entry, or DMA mapping coalesced everything to a
1657 * single entry. So a direct descriptor along with
1658 * the DMA MR suffices.
1660 struct srp_direct_buf *buf = (void *) cmd->add_data;
1662 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
1663 buf->key = cpu_to_be32(pd->unsafe_global_rkey);
1664 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
1671 * We have more than one scatter/gather entry, so build our indirect
1672 * descriptor table, trying to merge as many entries as we can.
1674 indirect_hdr = (void *) cmd->add_data;
1676 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1677 target->indirect_size, DMA_TO_DEVICE);
1679 memset(&state, 0, sizeof(state));
1680 state.desc = req->indirect_desc;
1681 if (dev->use_fast_reg)
1682 ret = srp_map_sg_fr(&state, ch, req, scat, count);
1683 else if (dev->use_fmr)
1684 ret = srp_map_sg_fmr(&state, ch, req, scat, count);
1686 ret = srp_map_sg_dma(&state, ch, req, scat, count);
1687 req->nmdesc = state.nmdesc;
1692 DEFINE_DYNAMIC_DEBUG_METADATA(ddm,
1693 "Memory mapping consistency check");
1694 if (DYNAMIC_DEBUG_BRANCH(ddm))
1695 srp_check_mapping(&state, ch, req, scat, count);
1698 /* We've mapped the request, now pull as much of the indirect
1699 * descriptor table as we can into the command buffer. If this
1700 * target is not using an external indirect table, we are
1701 * guaranteed to fit into the command, as the SCSI layer won't
1702 * give us more S/G entries than we allow.
1704 if (state.ndesc == 1) {
1706 * Memory registration collapsed the sg-list into one entry,
1707 * so use a direct descriptor.
1709 struct srp_direct_buf *buf = (void *) cmd->add_data;
1711 *buf = req->indirect_desc[0];
1715 if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1716 !target->allow_ext_sg)) {
1717 shost_printk(KERN_ERR, target->scsi_host,
1718 "Could not fit S/G list into SRP_CMD\n");
1723 count = min(state.ndesc, target->cmd_sg_cnt);
1724 table_len = state.ndesc * sizeof (struct srp_direct_buf);
1725 idb_len = sizeof(struct srp_indirect_buf) + table_len;
1727 fmt = SRP_DATA_DESC_INDIRECT;
1728 len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
1729 len += count * sizeof (struct srp_direct_buf);
1731 memcpy(indirect_hdr->desc_list, req->indirect_desc,
1732 count * sizeof (struct srp_direct_buf));
1734 if (!(pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)) {
1735 ret = srp_map_idb(ch, req, state.gen.next, state.gen.end,
1736 idb_len, &idb_rkey);
1741 idb_rkey = cpu_to_be32(pd->unsafe_global_rkey);
1744 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
1745 indirect_hdr->table_desc.key = idb_rkey;
1746 indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1747 indirect_hdr->len = cpu_to_be32(state.total_len);
1749 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1750 cmd->data_out_desc_cnt = count;
1752 cmd->data_in_desc_cnt = count;
1754 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1758 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1759 cmd->buf_fmt = fmt << 4;
1766 srp_unmap_data(scmnd, ch, req);
1767 if (ret == -ENOMEM && req->nmdesc >= target->mr_pool_size)
1773 * Return an IU and possible credit to the free pool
1775 static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
1776 enum srp_iu_type iu_type)
1778 unsigned long flags;
1780 spin_lock_irqsave(&ch->lock, flags);
1781 list_add(&iu->list, &ch->free_tx);
1782 if (iu_type != SRP_IU_RSP)
1784 spin_unlock_irqrestore(&ch->lock, flags);
1788 * Must be called with ch->lock held to protect req_lim and free_tx.
1789 * If IU is not sent, it must be returned using srp_put_tx_iu().
1792 * An upper limit for the number of allocated information units for each
1794 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1795 * more than Scsi_Host.can_queue requests.
1796 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1797 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1798 * one unanswered SRP request to an initiator.
1800 static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
1801 enum srp_iu_type iu_type)
1803 struct srp_target_port *target = ch->target;
1804 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1807 lockdep_assert_held(&ch->lock);
1809 ib_process_cq_direct(ch->send_cq, -1);
1811 if (list_empty(&ch->free_tx))
1814 /* Initiator responses to target requests do not consume credits */
1815 if (iu_type != SRP_IU_RSP) {
1816 if (ch->req_lim <= rsv) {
1817 ++target->zero_req_lim;
1824 iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
1825 list_del(&iu->list);
1830 * Note: if this function is called from inside ib_drain_sq() then it will
1831 * be called without ch->lock being held. If ib_drain_sq() dequeues a WQE
1832 * with status IB_WC_SUCCESS then that's a bug.
1834 static void srp_send_done(struct ib_cq *cq, struct ib_wc *wc)
1836 struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
1837 struct srp_rdma_ch *ch = cq->cq_context;
1839 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1840 srp_handle_qp_err(cq, wc, "SEND");
1844 lockdep_assert_held(&ch->lock);
1846 list_add(&iu->list, &ch->free_tx);
1849 static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
1851 struct srp_target_port *target = ch->target;
1853 struct ib_send_wr wr, *bad_wr;
1855 list.addr = iu->dma;
1857 list.lkey = target->lkey;
1859 iu->cqe.done = srp_send_done;
1862 wr.wr_cqe = &iu->cqe;
1865 wr.opcode = IB_WR_SEND;
1866 wr.send_flags = IB_SEND_SIGNALED;
1868 return ib_post_send(ch->qp, &wr, &bad_wr);
1871 static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
1873 struct srp_target_port *target = ch->target;
1874 struct ib_recv_wr wr, *bad_wr;
1877 list.addr = iu->dma;
1878 list.length = iu->size;
1879 list.lkey = target->lkey;
1881 iu->cqe.done = srp_recv_done;
1884 wr.wr_cqe = &iu->cqe;
1888 return ib_post_recv(ch->qp, &wr, &bad_wr);
1891 static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
1893 struct srp_target_port *target = ch->target;
1894 struct srp_request *req;
1895 struct scsi_cmnd *scmnd;
1896 unsigned long flags;
1898 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
1899 spin_lock_irqsave(&ch->lock, flags);
1900 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1901 if (rsp->tag == ch->tsk_mgmt_tag) {
1902 ch->tsk_mgmt_status = -1;
1903 if (be32_to_cpu(rsp->resp_data_len) >= 4)
1904 ch->tsk_mgmt_status = rsp->data[3];
1905 complete(&ch->tsk_mgmt_done);
1907 shost_printk(KERN_ERR, target->scsi_host,
1908 "Received tsk mgmt response too late for tag %#llx\n",
1911 spin_unlock_irqrestore(&ch->lock, flags);
1913 scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
1914 if (scmnd && scmnd->host_scribble) {
1915 req = (void *)scmnd->host_scribble;
1916 scmnd = srp_claim_req(ch, req, NULL, scmnd);
1921 shost_printk(KERN_ERR, target->scsi_host,
1922 "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1923 rsp->tag, ch - target->ch, ch->qp->qp_num);
1925 spin_lock_irqsave(&ch->lock, flags);
1926 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1927 spin_unlock_irqrestore(&ch->lock, flags);
1931 scmnd->result = rsp->status;
1933 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1934 memcpy(scmnd->sense_buffer, rsp->data +
1935 be32_to_cpu(rsp->resp_data_len),
1936 min_t(int, be32_to_cpu(rsp->sense_data_len),
1937 SCSI_SENSE_BUFFERSIZE));
1940 if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
1941 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
1942 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
1943 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
1944 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1945 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1946 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1947 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
1949 srp_free_req(ch, req, scmnd,
1950 be32_to_cpu(rsp->req_lim_delta));
1952 scmnd->host_scribble = NULL;
1953 scmnd->scsi_done(scmnd);
1957 static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
1960 struct srp_target_port *target = ch->target;
1961 struct ib_device *dev = target->srp_host->srp_dev->dev;
1962 unsigned long flags;
1966 spin_lock_irqsave(&ch->lock, flags);
1967 ch->req_lim += req_delta;
1968 iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
1969 spin_unlock_irqrestore(&ch->lock, flags);
1972 shost_printk(KERN_ERR, target->scsi_host, PFX
1973 "no IU available to send response\n");
1977 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1978 memcpy(iu->buf, rsp, len);
1979 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1981 err = srp_post_send(ch, iu, len);
1983 shost_printk(KERN_ERR, target->scsi_host, PFX
1984 "unable to post response: %d\n", err);
1985 srp_put_tx_iu(ch, iu, SRP_IU_RSP);
1991 static void srp_process_cred_req(struct srp_rdma_ch *ch,
1992 struct srp_cred_req *req)
1994 struct srp_cred_rsp rsp = {
1995 .opcode = SRP_CRED_RSP,
1998 s32 delta = be32_to_cpu(req->req_lim_delta);
2000 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
2001 shost_printk(KERN_ERR, ch->target->scsi_host, PFX
2002 "problems processing SRP_CRED_REQ\n");
2005 static void srp_process_aer_req(struct srp_rdma_ch *ch,
2006 struct srp_aer_req *req)
2008 struct srp_target_port *target = ch->target;
2009 struct srp_aer_rsp rsp = {
2010 .opcode = SRP_AER_RSP,
2013 s32 delta = be32_to_cpu(req->req_lim_delta);
2015 shost_printk(KERN_ERR, target->scsi_host, PFX
2016 "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun));
2018 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
2019 shost_printk(KERN_ERR, target->scsi_host, PFX
2020 "problems processing SRP_AER_REQ\n");
2023 static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc)
2025 struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
2026 struct srp_rdma_ch *ch = cq->cq_context;
2027 struct srp_target_port *target = ch->target;
2028 struct ib_device *dev = target->srp_host->srp_dev->dev;
2032 if (unlikely(wc->status != IB_WC_SUCCESS)) {
2033 srp_handle_qp_err(cq, wc, "RECV");
2037 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
2040 opcode = *(u8 *) iu->buf;
2043 shost_printk(KERN_ERR, target->scsi_host,
2044 PFX "recv completion, opcode 0x%02x\n", opcode);
2045 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
2046 iu->buf, wc->byte_len, true);
2051 srp_process_rsp(ch, iu->buf);
2055 srp_process_cred_req(ch, iu->buf);
2059 srp_process_aer_req(ch, iu->buf);
2063 /* XXX Handle target logout */
2064 shost_printk(KERN_WARNING, target->scsi_host,
2065 PFX "Got target logout request\n");
2069 shost_printk(KERN_WARNING, target->scsi_host,
2070 PFX "Unhandled SRP opcode 0x%02x\n", opcode);
2074 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
2077 res = srp_post_recv(ch, iu);
2079 shost_printk(KERN_ERR, target->scsi_host,
2080 PFX "Recv failed with error code %d\n", res);
2084 * srp_tl_err_work() - handle a transport layer error
2085 * @work: Work structure embedded in an SRP target port.
2087 * Note: This function may get invoked before the rport has been created,
2088 * hence the target->rport test.
2090 static void srp_tl_err_work(struct work_struct *work)
2092 struct srp_target_port *target;
2094 target = container_of(work, struct srp_target_port, tl_err_work);
2096 srp_start_tl_fail_timers(target->rport);
2099 static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
2102 struct srp_rdma_ch *ch = cq->cq_context;
2103 struct srp_target_port *target = ch->target;
2105 if (ch->connected && !target->qp_in_error) {
2106 shost_printk(KERN_ERR, target->scsi_host,
2107 PFX "failed %s status %s (%d) for CQE %p\n",
2108 opname, ib_wc_status_msg(wc->status), wc->status,
2110 queue_work(system_long_wq, &target->tl_err_work);
2112 target->qp_in_error = true;
2115 static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
2117 struct srp_target_port *target = host_to_target(shost);
2118 struct srp_rport *rport = target->rport;
2119 struct srp_rdma_ch *ch;
2120 struct srp_request *req;
2122 struct srp_cmd *cmd;
2123 struct ib_device *dev;
2124 unsigned long flags;
2128 const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
2131 * The SCSI EH thread is the only context from which srp_queuecommand()
2132 * can get invoked for blocked devices (SDEV_BLOCK /
2133 * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
2134 * locking the rport mutex if invoked from inside the SCSI EH.
2137 mutex_lock(&rport->mutex);
2139 scmnd->result = srp_chkready(target->rport);
2140 if (unlikely(scmnd->result))
2143 WARN_ON_ONCE(scmnd->request->tag < 0);
2144 tag = blk_mq_unique_tag(scmnd->request);
2145 ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
2146 idx = blk_mq_unique_tag_to_tag(tag);
2147 WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n",
2148 dev_name(&shost->shost_gendev), tag, idx,
2149 target->req_ring_size);
2151 spin_lock_irqsave(&ch->lock, flags);
2152 iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
2153 spin_unlock_irqrestore(&ch->lock, flags);
2158 req = &ch->req_ring[idx];
2159 dev = target->srp_host->srp_dev->dev;
2160 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
2163 scmnd->host_scribble = (void *) req;
2166 memset(cmd, 0, sizeof *cmd);
2168 cmd->opcode = SRP_CMD;
2169 int_to_scsilun(scmnd->device->lun, &cmd->lun);
2171 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
2176 len = srp_map_data(scmnd, ch, req);
2178 shost_printk(KERN_ERR, target->scsi_host,
2179 PFX "Failed to map data (%d)\n", len);
2181 * If we ran out of memory descriptors (-ENOMEM) because an
2182 * application is queuing many requests with more than
2183 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
2184 * to reduce queue depth temporarily.
2186 scmnd->result = len == -ENOMEM ?
2187 DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
2191 ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
2194 if (srp_post_send(ch, iu, len)) {
2195 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
2203 mutex_unlock(&rport->mutex);
2208 srp_unmap_data(scmnd, ch, req);
2211 srp_put_tx_iu(ch, iu, SRP_IU_CMD);
2214 * Avoid that the loops that iterate over the request ring can
2215 * encounter a dangling SCSI command pointer.
2220 if (scmnd->result) {
2221 scmnd->scsi_done(scmnd);
2224 ret = SCSI_MLQUEUE_HOST_BUSY;
2231 * Note: the resources allocated in this function are freed in
2234 static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
2236 struct srp_target_port *target = ch->target;
2239 ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
2243 ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
2248 for (i = 0; i < target->queue_size; ++i) {
2249 ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2251 GFP_KERNEL, DMA_FROM_DEVICE);
2252 if (!ch->rx_ring[i])
2256 for (i = 0; i < target->queue_size; ++i) {
2257 ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
2259 GFP_KERNEL, DMA_TO_DEVICE);
2260 if (!ch->tx_ring[i])
2263 list_add(&ch->tx_ring[i]->list, &ch->free_tx);
2269 for (i = 0; i < target->queue_size; ++i) {
2270 srp_free_iu(target->srp_host, ch->rx_ring[i]);
2271 srp_free_iu(target->srp_host, ch->tx_ring[i]);
2284 static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2286 uint64_t T_tr_ns, max_compl_time_ms;
2287 uint32_t rq_tmo_jiffies;
2290 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2291 * table 91), both the QP timeout and the retry count have to be set
2292 * for RC QP's during the RTR to RTS transition.
2294 WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2295 (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2298 * Set target->rq_tmo_jiffies to one second more than the largest time
2299 * it can take before an error completion is generated. See also
2300 * C9-140..142 in the IBTA spec for more information about how to
2301 * convert the QP Local ACK Timeout value to nanoseconds.
2303 T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2304 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2305 do_div(max_compl_time_ms, NSEC_PER_MSEC);
2306 rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2308 return rq_tmo_jiffies;
2311 static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
2312 const struct srp_login_rsp *lrsp,
2313 struct srp_rdma_ch *ch)
2315 struct srp_target_port *target = ch->target;
2316 struct ib_qp_attr *qp_attr = NULL;
2321 if (lrsp->opcode == SRP_LOGIN_RSP) {
2322 ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2323 ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
2326 * Reserve credits for task management so we don't
2327 * bounce requests back to the SCSI mid-layer.
2329 target->scsi_host->can_queue
2330 = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
2331 target->scsi_host->can_queue);
2332 target->scsi_host->cmd_per_lun
2333 = min_t(int, target->scsi_host->can_queue,
2334 target->scsi_host->cmd_per_lun);
2336 shost_printk(KERN_WARNING, target->scsi_host,
2337 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2343 ret = srp_alloc_iu_bufs(ch);
2349 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
2353 qp_attr->qp_state = IB_QPS_RTR;
2354 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2358 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2362 for (i = 0; i < target->queue_size; i++) {
2363 struct srp_iu *iu = ch->rx_ring[i];
2365 ret = srp_post_recv(ch, iu);
2370 qp_attr->qp_state = IB_QPS_RTS;
2371 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2375 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2377 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2381 ret = ib_send_cm_rtu(cm_id, NULL, 0);
2390 static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
2391 struct ib_cm_event *event,
2392 struct srp_rdma_ch *ch)
2394 struct srp_target_port *target = ch->target;
2395 struct Scsi_Host *shost = target->scsi_host;
2396 struct ib_class_port_info *cpi;
2399 switch (event->param.rej_rcvd.reason) {
2400 case IB_CM_REJ_PORT_CM_REDIRECT:
2401 cpi = event->param.rej_rcvd.ari;
2402 ch->path.dlid = cpi->redirect_lid;
2403 ch->path.pkey = cpi->redirect_pkey;
2404 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
2405 memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16);
2407 ch->status = ch->path.dlid ?
2408 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2411 case IB_CM_REJ_PORT_REDIRECT:
2412 if (srp_target_is_topspin(target)) {
2414 * Topspin/Cisco SRP gateways incorrectly send
2415 * reject reason code 25 when they mean 24
2418 memcpy(ch->path.dgid.raw,
2419 event->param.rej_rcvd.ari, 16);
2421 shost_printk(KERN_DEBUG, shost,
2422 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
2423 be64_to_cpu(ch->path.dgid.global.subnet_prefix),
2424 be64_to_cpu(ch->path.dgid.global.interface_id));
2426 ch->status = SRP_PORT_REDIRECT;
2428 shost_printk(KERN_WARNING, shost,
2429 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
2430 ch->status = -ECONNRESET;
2434 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
2435 shost_printk(KERN_WARNING, shost,
2436 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
2437 ch->status = -ECONNRESET;
2440 case IB_CM_REJ_CONSUMER_DEFINED:
2441 opcode = *(u8 *) event->private_data;
2442 if (opcode == SRP_LOGIN_REJ) {
2443 struct srp_login_rej *rej = event->private_data;
2444 u32 reason = be32_to_cpu(rej->reason);
2446 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
2447 shost_printk(KERN_WARNING, shost,
2448 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
2450 shost_printk(KERN_WARNING, shost, PFX
2451 "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
2453 target->orig_dgid.raw, reason);
2455 shost_printk(KERN_WARNING, shost,
2456 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2457 " opcode 0x%02x\n", opcode);
2458 ch->status = -ECONNRESET;
2461 case IB_CM_REJ_STALE_CONN:
2462 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
2463 ch->status = SRP_STALE_CONN;
2467 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
2468 event->param.rej_rcvd.reason);
2469 ch->status = -ECONNRESET;
2473 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2475 struct srp_rdma_ch *ch = cm_id->context;
2476 struct srp_target_port *target = ch->target;
2479 switch (event->event) {
2480 case IB_CM_REQ_ERROR:
2481 shost_printk(KERN_DEBUG, target->scsi_host,
2482 PFX "Sending CM REQ failed\n");
2484 ch->status = -ECONNRESET;
2487 case IB_CM_REP_RECEIVED:
2489 srp_cm_rep_handler(cm_id, event->private_data, ch);
2492 case IB_CM_REJ_RECEIVED:
2493 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
2496 srp_cm_rej_handler(cm_id, event, ch);
2499 case IB_CM_DREQ_RECEIVED:
2500 shost_printk(KERN_WARNING, target->scsi_host,
2501 PFX "DREQ received - connection closed\n");
2502 ch->connected = false;
2503 if (ib_send_cm_drep(cm_id, NULL, 0))
2504 shost_printk(KERN_ERR, target->scsi_host,
2505 PFX "Sending CM DREP failed\n");
2506 queue_work(system_long_wq, &target->tl_err_work);
2509 case IB_CM_TIMEWAIT_EXIT:
2510 shost_printk(KERN_ERR, target->scsi_host,
2511 PFX "connection closed\n");
2517 case IB_CM_MRA_RECEIVED:
2518 case IB_CM_DREQ_ERROR:
2519 case IB_CM_DREP_RECEIVED:
2523 shost_printk(KERN_WARNING, target->scsi_host,
2524 PFX "Unhandled CM event %d\n", event->event);
2529 complete(&ch->done);
2535 * srp_change_queue_depth - setting device queue depth
2536 * @sdev: scsi device struct
2537 * @qdepth: requested queue depth
2539 * Returns queue depth.
2542 srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
2544 if (!sdev->tagged_supported)
2546 return scsi_change_queue_depth(sdev, qdepth);
2549 static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
2550 u8 func, u8 *status)
2552 struct srp_target_port *target = ch->target;
2553 struct srp_rport *rport = target->rport;
2554 struct ib_device *dev = target->srp_host->srp_dev->dev;
2556 struct srp_tsk_mgmt *tsk_mgmt;
2559 if (!ch->connected || target->qp_in_error)
2563 * Lock the rport mutex to avoid that srp_create_ch_ib() is
2564 * invoked while a task management function is being sent.
2566 mutex_lock(&rport->mutex);
2567 spin_lock_irq(&ch->lock);
2568 iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2569 spin_unlock_irq(&ch->lock);
2572 mutex_unlock(&rport->mutex);
2577 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2580 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2582 tsk_mgmt->opcode = SRP_TSK_MGMT;
2583 int_to_scsilun(lun, &tsk_mgmt->lun);
2584 tsk_mgmt->tsk_mgmt_func = func;
2585 tsk_mgmt->task_tag = req_tag;
2587 spin_lock_irq(&ch->lock);
2588 ch->tsk_mgmt_tag = (ch->tsk_mgmt_tag + 1) | SRP_TAG_TSK_MGMT;
2589 tsk_mgmt->tag = ch->tsk_mgmt_tag;
2590 spin_unlock_irq(&ch->lock);
2592 init_completion(&ch->tsk_mgmt_done);
2594 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2596 if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2597 srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
2598 mutex_unlock(&rport->mutex);
2602 res = wait_for_completion_timeout(&ch->tsk_mgmt_done,
2603 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS));
2604 if (res > 0 && status)
2605 *status = ch->tsk_mgmt_status;
2606 mutex_unlock(&rport->mutex);
2608 WARN_ON_ONCE(res < 0);
2610 return res > 0 ? 0 : -1;
2613 static int srp_abort(struct scsi_cmnd *scmnd)
2615 struct srp_target_port *target = host_to_target(scmnd->device->host);
2616 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
2619 struct srp_rdma_ch *ch;
2622 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
2626 tag = blk_mq_unique_tag(scmnd->request);
2627 ch_idx = blk_mq_unique_tag_to_hwq(tag);
2628 if (WARN_ON_ONCE(ch_idx >= target->ch_count))
2630 ch = &target->ch[ch_idx];
2631 if (!srp_claim_req(ch, req, NULL, scmnd))
2633 shost_printk(KERN_ERR, target->scsi_host,
2634 "Sending SRP abort for tag %#x\n", tag);
2635 if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
2636 SRP_TSK_ABORT_TASK, NULL) == 0)
2638 else if (target->rport->state == SRP_RPORT_LOST)
2642 srp_free_req(ch, req, scmnd, 0);
2643 scmnd->result = DID_ABORT << 16;
2644 scmnd->scsi_done(scmnd);
2649 static int srp_reset_device(struct scsi_cmnd *scmnd)
2651 struct srp_target_port *target = host_to_target(scmnd->device->host);
2652 struct srp_rdma_ch *ch;
2656 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
2658 ch = &target->ch[0];
2659 if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
2660 SRP_TSK_LUN_RESET, &status))
2665 for (i = 0; i < target->ch_count; i++) {
2666 ch = &target->ch[i];
2667 for (i = 0; i < target->req_ring_size; ++i) {
2668 struct srp_request *req = &ch->req_ring[i];
2670 srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
2677 static int srp_reset_host(struct scsi_cmnd *scmnd)
2679 struct srp_target_port *target = host_to_target(scmnd->device->host);
2681 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
2683 return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
2686 static int srp_slave_alloc(struct scsi_device *sdev)
2688 struct Scsi_Host *shost = sdev->host;
2689 struct srp_target_port *target = host_to_target(shost);
2690 struct srp_device *srp_dev = target->srp_host->srp_dev;
2693 blk_queue_virt_boundary(sdev->request_queue,
2694 ~srp_dev->mr_page_mask);
2699 static int srp_slave_configure(struct scsi_device *sdev)
2701 struct Scsi_Host *shost = sdev->host;
2702 struct srp_target_port *target = host_to_target(shost);
2703 struct request_queue *q = sdev->request_queue;
2704 unsigned long timeout;
2706 if (sdev->type == TYPE_DISK) {
2707 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2708 blk_queue_rq_timeout(q, timeout);
2714 static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
2717 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2719 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->id_ext));
2722 static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
2725 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2727 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid));
2730 static ssize_t show_service_id(struct device *dev,
2731 struct device_attribute *attr, char *buf)
2733 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2735 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->service_id));
2738 static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
2741 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2743 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey));
2746 static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
2749 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2751 return sprintf(buf, "%pI6\n", target->sgid.raw);
2754 static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
2757 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2758 struct srp_rdma_ch *ch = &target->ch[0];
2760 return sprintf(buf, "%pI6\n", ch->path.dgid.raw);
2763 static ssize_t show_orig_dgid(struct device *dev,
2764 struct device_attribute *attr, char *buf)
2766 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2768 return sprintf(buf, "%pI6\n", target->orig_dgid.raw);
2771 static ssize_t show_req_lim(struct device *dev,
2772 struct device_attribute *attr, char *buf)
2774 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2775 struct srp_rdma_ch *ch;
2776 int i, req_lim = INT_MAX;
2778 for (i = 0; i < target->ch_count; i++) {
2779 ch = &target->ch[i];
2780 req_lim = min(req_lim, ch->req_lim);
2782 return sprintf(buf, "%d\n", req_lim);
2785 static ssize_t show_zero_req_lim(struct device *dev,
2786 struct device_attribute *attr, char *buf)
2788 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2790 return sprintf(buf, "%d\n", target->zero_req_lim);
2793 static ssize_t show_local_ib_port(struct device *dev,
2794 struct device_attribute *attr, char *buf)
2796 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2798 return sprintf(buf, "%d\n", target->srp_host->port);
2801 static ssize_t show_local_ib_device(struct device *dev,
2802 struct device_attribute *attr, char *buf)
2804 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2806 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
2809 static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
2812 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2814 return sprintf(buf, "%d\n", target->ch_count);
2817 static ssize_t show_comp_vector(struct device *dev,
2818 struct device_attribute *attr, char *buf)
2820 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2822 return sprintf(buf, "%d\n", target->comp_vector);
2825 static ssize_t show_tl_retry_count(struct device *dev,
2826 struct device_attribute *attr, char *buf)
2828 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2830 return sprintf(buf, "%d\n", target->tl_retry_count);
2833 static ssize_t show_cmd_sg_entries(struct device *dev,
2834 struct device_attribute *attr, char *buf)
2836 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2838 return sprintf(buf, "%u\n", target->cmd_sg_cnt);
2841 static ssize_t show_allow_ext_sg(struct device *dev,
2842 struct device_attribute *attr, char *buf)
2844 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2846 return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
2849 static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
2850 static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
2851 static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
2852 static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
2853 static DEVICE_ATTR(sgid, S_IRUGO, show_sgid, NULL);
2854 static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
2855 static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
2856 static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
2857 static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
2858 static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
2859 static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
2860 static DEVICE_ATTR(ch_count, S_IRUGO, show_ch_count, NULL);
2861 static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL);
2862 static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL);
2863 static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
2864 static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL);
2866 static struct device_attribute *srp_host_attrs[] = {
2869 &dev_attr_service_id,
2873 &dev_attr_orig_dgid,
2875 &dev_attr_zero_req_lim,
2876 &dev_attr_local_ib_port,
2877 &dev_attr_local_ib_device,
2879 &dev_attr_comp_vector,
2880 &dev_attr_tl_retry_count,
2881 &dev_attr_cmd_sg_entries,
2882 &dev_attr_allow_ext_sg,
2886 static struct scsi_host_template srp_template = {
2887 .module = THIS_MODULE,
2888 .name = "InfiniBand SRP initiator",
2889 .proc_name = DRV_NAME,
2890 .slave_alloc = srp_slave_alloc,
2891 .slave_configure = srp_slave_configure,
2892 .info = srp_target_info,
2893 .queuecommand = srp_queuecommand,
2894 .change_queue_depth = srp_change_queue_depth,
2895 .eh_timed_out = srp_timed_out,
2896 .eh_abort_handler = srp_abort,
2897 .eh_device_reset_handler = srp_reset_device,
2898 .eh_host_reset_handler = srp_reset_host,
2899 .skip_settle_delay = true,
2900 .sg_tablesize = SRP_DEF_SG_TABLESIZE,
2901 .can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
2903 .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
2904 .use_clustering = ENABLE_CLUSTERING,
2905 .shost_attrs = srp_host_attrs,
2906 .track_queue_depth = 1,
2909 static int srp_sdev_count(struct Scsi_Host *host)
2911 struct scsi_device *sdev;
2914 shost_for_each_device(sdev, host)
2922 * < 0 upon failure. Caller is responsible for SRP target port cleanup.
2923 * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port
2924 * removal has been scheduled.
2925 * 0 and target->state != SRP_TARGET_REMOVED upon success.
2927 static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2929 struct srp_rport_identifiers ids;
2930 struct srp_rport *rport;
2932 target->state = SRP_TARGET_SCANNING;
2933 sprintf(target->target_name, "SRP.T10:%016llX",
2934 be64_to_cpu(target->id_ext));
2936 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
2939 memcpy(ids.port_id, &target->id_ext, 8);
2940 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
2941 ids.roles = SRP_RPORT_ROLE_TARGET;
2942 rport = srp_rport_add(target->scsi_host, &ids);
2943 if (IS_ERR(rport)) {
2944 scsi_remove_host(target->scsi_host);
2945 return PTR_ERR(rport);
2948 rport->lld_data = target;
2949 target->rport = rport;
2951 spin_lock(&host->target_lock);
2952 list_add_tail(&target->list, &host->target_list);
2953 spin_unlock(&host->target_lock);
2955 scsi_scan_target(&target->scsi_host->shost_gendev,
2956 0, target->scsi_id, SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
2958 if (srp_connected_ch(target) < target->ch_count ||
2959 target->qp_in_error) {
2960 shost_printk(KERN_INFO, target->scsi_host,
2961 PFX "SCSI scan failed - removing SCSI host\n");
2962 srp_queue_remove_work(target);
2966 pr_debug("%s: SCSI scan succeeded - detected %d LUNs\n",
2967 dev_name(&target->scsi_host->shost_gendev),
2968 srp_sdev_count(target->scsi_host));
2970 spin_lock_irq(&target->lock);
2971 if (target->state == SRP_TARGET_SCANNING)
2972 target->state = SRP_TARGET_LIVE;
2973 spin_unlock_irq(&target->lock);
2979 static void srp_release_dev(struct device *dev)
2981 struct srp_host *host =
2982 container_of(dev, struct srp_host, dev);
2984 complete(&host->released);
2987 static struct class srp_class = {
2988 .name = "infiniband_srp",
2989 .dev_release = srp_release_dev
2993 * srp_conn_unique() - check whether the connection to a target is unique
2995 * @target: SRP target port.
2997 static bool srp_conn_unique(struct srp_host *host,
2998 struct srp_target_port *target)
3000 struct srp_target_port *t;
3003 if (target->state == SRP_TARGET_REMOVED)
3008 spin_lock(&host->target_lock);
3009 list_for_each_entry(t, &host->target_list, list) {
3011 target->id_ext == t->id_ext &&
3012 target->ioc_guid == t->ioc_guid &&
3013 target->initiator_ext == t->initiator_ext) {
3018 spin_unlock(&host->target_lock);
3025 * Target ports are added by writing
3027 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
3028 * pkey=<P_Key>,service_id=<service ID>
3030 * to the add_target sysfs attribute.
3034 SRP_OPT_ID_EXT = 1 << 0,
3035 SRP_OPT_IOC_GUID = 1 << 1,
3036 SRP_OPT_DGID = 1 << 2,
3037 SRP_OPT_PKEY = 1 << 3,
3038 SRP_OPT_SERVICE_ID = 1 << 4,
3039 SRP_OPT_MAX_SECT = 1 << 5,
3040 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
3041 SRP_OPT_IO_CLASS = 1 << 7,
3042 SRP_OPT_INITIATOR_EXT = 1 << 8,
3043 SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
3044 SRP_OPT_ALLOW_EXT_SG = 1 << 10,
3045 SRP_OPT_SG_TABLESIZE = 1 << 11,
3046 SRP_OPT_COMP_VECTOR = 1 << 12,
3047 SRP_OPT_TL_RETRY_COUNT = 1 << 13,
3048 SRP_OPT_QUEUE_SIZE = 1 << 14,
3049 SRP_OPT_ALL = (SRP_OPT_ID_EXT |
3053 SRP_OPT_SERVICE_ID),
3056 static const match_table_t srp_opt_tokens = {
3057 { SRP_OPT_ID_EXT, "id_ext=%s" },
3058 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
3059 { SRP_OPT_DGID, "dgid=%s" },
3060 { SRP_OPT_PKEY, "pkey=%x" },
3061 { SRP_OPT_SERVICE_ID, "service_id=%s" },
3062 { SRP_OPT_MAX_SECT, "max_sect=%d" },
3063 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
3064 { SRP_OPT_IO_CLASS, "io_class=%x" },
3065 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
3066 { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
3067 { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
3068 { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
3069 { SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
3070 { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
3071 { SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
3072 { SRP_OPT_ERR, NULL }
3075 static int srp_parse_options(const char *buf, struct srp_target_port *target)
3077 char *options, *sep_opt;
3080 substring_t args[MAX_OPT_ARGS];
3086 options = kstrdup(buf, GFP_KERNEL);
3091 while ((p = strsep(&sep_opt, ",\n")) != NULL) {
3095 token = match_token(p, srp_opt_tokens, args);
3099 case SRP_OPT_ID_EXT:
3100 p = match_strdup(args);
3105 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
3109 case SRP_OPT_IOC_GUID:
3110 p = match_strdup(args);
3115 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
3120 p = match_strdup(args);
3125 if (strlen(p) != 32) {
3126 pr_warn("bad dest GID parameter '%s'\n", p);
3131 for (i = 0; i < 16; ++i) {
3132 strlcpy(dgid, p + i * 2, sizeof(dgid));
3133 if (sscanf(dgid, "%hhx",
3134 &target->orig_dgid.raw[i]) < 1) {
3144 if (match_hex(args, &token)) {
3145 pr_warn("bad P_Key parameter '%s'\n", p);
3148 target->pkey = cpu_to_be16(token);
3151 case SRP_OPT_SERVICE_ID:
3152 p = match_strdup(args);
3157 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
3161 case SRP_OPT_MAX_SECT:
3162 if (match_int(args, &token)) {
3163 pr_warn("bad max sect parameter '%s'\n", p);
3166 target->scsi_host->max_sectors = token;
3169 case SRP_OPT_QUEUE_SIZE:
3170 if (match_int(args, &token) || token < 1) {
3171 pr_warn("bad queue_size parameter '%s'\n", p);
3174 target->scsi_host->can_queue = token;
3175 target->queue_size = token + SRP_RSP_SQ_SIZE +
3176 SRP_TSK_MGMT_SQ_SIZE;
3177 if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3178 target->scsi_host->cmd_per_lun = token;
3181 case SRP_OPT_MAX_CMD_PER_LUN:
3182 if (match_int(args, &token) || token < 1) {
3183 pr_warn("bad max cmd_per_lun parameter '%s'\n",
3187 target->scsi_host->cmd_per_lun = token;
3190 case SRP_OPT_IO_CLASS:
3191 if (match_hex(args, &token)) {
3192 pr_warn("bad IO class parameter '%s'\n", p);
3195 if (token != SRP_REV10_IB_IO_CLASS &&
3196 token != SRP_REV16A_IB_IO_CLASS) {
3197 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3198 token, SRP_REV10_IB_IO_CLASS,
3199 SRP_REV16A_IB_IO_CLASS);
3202 target->io_class = token;
3205 case SRP_OPT_INITIATOR_EXT:
3206 p = match_strdup(args);
3211 target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
3215 case SRP_OPT_CMD_SG_ENTRIES:
3216 if (match_int(args, &token) || token < 1 || token > 255) {
3217 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3221 target->cmd_sg_cnt = token;
3224 case SRP_OPT_ALLOW_EXT_SG:
3225 if (match_int(args, &token)) {
3226 pr_warn("bad allow_ext_sg parameter '%s'\n", p);
3229 target->allow_ext_sg = !!token;
3232 case SRP_OPT_SG_TABLESIZE:
3233 if (match_int(args, &token) || token < 1 ||
3234 token > SG_MAX_SEGMENTS) {
3235 pr_warn("bad max sg_tablesize parameter '%s'\n",
3239 target->sg_tablesize = token;
3242 case SRP_OPT_COMP_VECTOR:
3243 if (match_int(args, &token) || token < 0) {
3244 pr_warn("bad comp_vector parameter '%s'\n", p);
3247 target->comp_vector = token;
3250 case SRP_OPT_TL_RETRY_COUNT:
3251 if (match_int(args, &token) || token < 2 || token > 7) {
3252 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
3256 target->tl_retry_count = token;
3260 pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3266 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
3269 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
3270 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
3271 !(srp_opt_tokens[i].token & opt_mask))
3272 pr_warn("target creation request is missing parameter '%s'\n",
3273 srp_opt_tokens[i].pattern);
3275 if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
3276 && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3277 pr_warn("cmd_per_lun = %d > queue_size = %d\n",
3278 target->scsi_host->cmd_per_lun,
3279 target->scsi_host->can_queue);
3286 static ssize_t srp_create_target(struct device *dev,
3287 struct device_attribute *attr,
3288 const char *buf, size_t count)
3290 struct srp_host *host =
3291 container_of(dev, struct srp_host, dev);
3292 struct Scsi_Host *target_host;
3293 struct srp_target_port *target;
3294 struct srp_rdma_ch *ch;
3295 struct srp_device *srp_dev = host->srp_dev;
3296 struct ib_device *ibdev = srp_dev->dev;
3297 int ret, node_idx, node, cpu, i;
3298 unsigned int max_sectors_per_mr, mr_per_cmd = 0;
3299 bool multich = false;
3301 target_host = scsi_host_alloc(&srp_template,
3302 sizeof (struct srp_target_port));
3306 target_host->transportt = ib_srp_transport_template;
3307 target_host->max_channel = 0;
3308 target_host->max_id = 1;
3309 target_host->max_lun = -1LL;
3310 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
3312 target = host_to_target(target_host);
3314 target->io_class = SRP_REV16A_IB_IO_CLASS;
3315 target->scsi_host = target_host;
3316 target->srp_host = host;
3317 target->pd = host->srp_dev->pd;
3318 target->lkey = host->srp_dev->pd->local_dma_lkey;
3319 target->cmd_sg_cnt = cmd_sg_entries;
3320 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
3321 target->allow_ext_sg = allow_ext_sg;
3322 target->tl_retry_count = 7;
3323 target->queue_size = SRP_DEFAULT_QUEUE_SIZE;
3326 * Avoid that the SCSI host can be removed by srp_remove_target()
3327 * before this function returns.
3329 scsi_host_get(target->scsi_host);
3331 ret = mutex_lock_interruptible(&host->add_target_mutex);
3335 ret = srp_parse_options(buf, target);
3339 target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
3341 if (!srp_conn_unique(target->srp_host, target)) {
3342 shost_printk(KERN_INFO, target->scsi_host,
3343 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
3344 be64_to_cpu(target->id_ext),
3345 be64_to_cpu(target->ioc_guid),
3346 be64_to_cpu(target->initiator_ext));
3351 if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
3352 target->cmd_sg_cnt < target->sg_tablesize) {
3353 pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
3354 target->sg_tablesize = target->cmd_sg_cnt;
3357 if (srp_dev->use_fast_reg || srp_dev->use_fmr) {
3359 * FR and FMR can only map one HCA page per entry. If the
3360 * start address is not aligned on a HCA page boundary two
3361 * entries will be used for the head and the tail although
3362 * these two entries combined contain at most one HCA page of
3363 * data. Hence the "+ 1" in the calculation below.
3365 * The indirect data buffer descriptor is contiguous so the
3366 * memory for that buffer will only be registered if
3367 * register_always is true. Hence add one to mr_per_cmd if
3368 * register_always has been set.
3370 max_sectors_per_mr = srp_dev->max_pages_per_mr <<
3371 (ilog2(srp_dev->mr_page_size) - 9);
3372 mr_per_cmd = register_always +
3373 (target->scsi_host->max_sectors + 1 +
3374 max_sectors_per_mr - 1) / max_sectors_per_mr;
3375 pr_debug("max_sectors = %u; max_pages_per_mr = %u; mr_page_size = %u; max_sectors_per_mr = %u; mr_per_cmd = %u\n",
3376 target->scsi_host->max_sectors,
3377 srp_dev->max_pages_per_mr, srp_dev->mr_page_size,
3378 max_sectors_per_mr, mr_per_cmd);
3381 target_host->sg_tablesize = target->sg_tablesize;
3382 target->mr_pool_size = target->scsi_host->can_queue * mr_per_cmd;
3383 target->mr_per_cmd = mr_per_cmd;
3384 target->indirect_size = target->sg_tablesize *
3385 sizeof (struct srp_direct_buf);
3386 target->max_iu_len = sizeof (struct srp_cmd) +
3387 sizeof (struct srp_indirect_buf) +
3388 target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
3390 INIT_WORK(&target->tl_err_work, srp_tl_err_work);
3391 INIT_WORK(&target->remove_work, srp_remove_work);
3392 spin_lock_init(&target->lock);
3393 ret = ib_query_gid(ibdev, host->port, 0, &target->sgid, NULL);
3398 target->ch_count = max_t(unsigned, num_online_nodes(),
3400 min(4 * num_online_nodes(),
3401 ibdev->num_comp_vectors),
3402 num_online_cpus()));
3403 target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3409 for_each_online_node(node) {
3410 const int ch_start = (node_idx * target->ch_count /
3411 num_online_nodes());
3412 const int ch_end = ((node_idx + 1) * target->ch_count /
3413 num_online_nodes());
3414 const int cv_start = (node_idx * ibdev->num_comp_vectors /
3415 num_online_nodes() + target->comp_vector)
3416 % ibdev->num_comp_vectors;
3417 const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors /
3418 num_online_nodes() + target->comp_vector)
3419 % ibdev->num_comp_vectors;
3422 for_each_online_cpu(cpu) {
3423 if (cpu_to_node(cpu) != node)
3425 if (ch_start + cpu_idx >= ch_end)
3427 ch = &target->ch[ch_start + cpu_idx];
3428 ch->target = target;
3429 ch->comp_vector = cv_start == cv_end ? cv_start :
3430 cv_start + cpu_idx % (cv_end - cv_start);
3431 spin_lock_init(&ch->lock);
3432 INIT_LIST_HEAD(&ch->free_tx);
3433 ret = srp_new_cm_id(ch);
3435 goto err_disconnect;
3437 ret = srp_create_ch_ib(ch);
3439 goto err_disconnect;
3441 ret = srp_alloc_req_data(ch);
3443 goto err_disconnect;
3445 ret = srp_connect_ch(ch, multich);
3447 shost_printk(KERN_ERR, target->scsi_host,
3448 PFX "Connection %d/%d to %pI6 failed\n",
3451 ch->target->orig_dgid.raw);
3452 if (node_idx == 0 && cpu_idx == 0) {
3455 srp_free_ch_ib(target, ch);
3456 srp_free_req_data(target, ch);
3457 target->ch_count = ch - target->ch;
3469 target->scsi_host->nr_hw_queues = target->ch_count;
3471 ret = srp_add_target(host, target);
3473 goto err_disconnect;
3475 if (target->state != SRP_TARGET_REMOVED) {
3476 shost_printk(KERN_DEBUG, target->scsi_host, PFX
3477 "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3478 be64_to_cpu(target->id_ext),
3479 be64_to_cpu(target->ioc_guid),
3480 be16_to_cpu(target->pkey),
3481 be64_to_cpu(target->service_id),
3482 target->sgid.raw, target->orig_dgid.raw);
3488 mutex_unlock(&host->add_target_mutex);
3491 scsi_host_put(target->scsi_host);
3493 scsi_host_put(target->scsi_host);
3498 srp_disconnect_target(target);
3501 for (i = 0; i < target->ch_count; i++) {
3502 ch = &target->ch[i];
3503 srp_free_ch_ib(target, ch);
3504 srp_free_req_data(target, ch);
3511 static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
3513 static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
3516 struct srp_host *host = container_of(dev, struct srp_host, dev);
3518 return sprintf(buf, "%s\n", host->srp_dev->dev->name);
3521 static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
3523 static ssize_t show_port(struct device *dev, struct device_attribute *attr,
3526 struct srp_host *host = container_of(dev, struct srp_host, dev);
3528 return sprintf(buf, "%d\n", host->port);
3531 static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
3533 static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
3535 struct srp_host *host;
3537 host = kzalloc(sizeof *host, GFP_KERNEL);
3541 INIT_LIST_HEAD(&host->target_list);
3542 spin_lock_init(&host->target_lock);
3543 init_completion(&host->released);
3544 mutex_init(&host->add_target_mutex);
3545 host->srp_dev = device;
3548 host->dev.class = &srp_class;
3549 host->dev.parent = device->dev->dma_device;
3550 dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
3552 if (device_register(&host->dev))
3554 if (device_create_file(&host->dev, &dev_attr_add_target))
3556 if (device_create_file(&host->dev, &dev_attr_ibdev))
3558 if (device_create_file(&host->dev, &dev_attr_port))
3564 device_unregister(&host->dev);
3572 static void srp_add_one(struct ib_device *device)
3574 struct srp_device *srp_dev;
3575 struct ib_device_attr *attr = &device->attrs;
3576 struct srp_host *host;
3577 int mr_page_shift, p;
3578 u64 max_pages_per_mr;
3579 unsigned int flags = 0;
3581 srp_dev = kzalloc(sizeof(*srp_dev), GFP_KERNEL);
3586 * Use the smallest page size supported by the HCA, down to a
3587 * minimum of 4096 bytes. We're unlikely to build large sglists
3588 * out of smaller entries.
3590 mr_page_shift = max(12, ffs(attr->page_size_cap) - 1);
3591 srp_dev->mr_page_size = 1 << mr_page_shift;
3592 srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1);
3593 max_pages_per_mr = attr->max_mr_size;
3594 do_div(max_pages_per_mr, srp_dev->mr_page_size);
3595 pr_debug("%s: %llu / %u = %llu <> %u\n", __func__,
3596 attr->max_mr_size, srp_dev->mr_page_size,
3597 max_pages_per_mr, SRP_MAX_PAGES_PER_MR);
3598 srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
3601 srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
3602 device->map_phys_fmr && device->unmap_fmr);
3603 srp_dev->has_fr = (attr->device_cap_flags &
3604 IB_DEVICE_MEM_MGT_EXTENSIONS);
3605 if (!never_register && !srp_dev->has_fmr && !srp_dev->has_fr) {
3606 dev_warn(&device->dev, "neither FMR nor FR is supported\n");
3607 } else if (!never_register &&
3608 attr->max_mr_size >= 2 * srp_dev->mr_page_size) {
3609 srp_dev->use_fast_reg = (srp_dev->has_fr &&
3610 (!srp_dev->has_fmr || prefer_fr));
3611 srp_dev->use_fmr = !srp_dev->use_fast_reg && srp_dev->has_fmr;
3614 if (never_register || !register_always ||
3615 (!srp_dev->has_fmr && !srp_dev->has_fr))
3616 flags |= IB_PD_UNSAFE_GLOBAL_RKEY;
3618 if (srp_dev->use_fast_reg) {
3619 srp_dev->max_pages_per_mr =
3620 min_t(u32, srp_dev->max_pages_per_mr,
3621 attr->max_fast_reg_page_list_len);
3623 srp_dev->mr_max_size = srp_dev->mr_page_size *
3624 srp_dev->max_pages_per_mr;
3625 pr_debug("%s: mr_page_shift = %d, device->max_mr_size = %#llx, device->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
3626 device->name, mr_page_shift, attr->max_mr_size,
3627 attr->max_fast_reg_page_list_len,
3628 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
3630 INIT_LIST_HEAD(&srp_dev->dev_list);
3632 srp_dev->dev = device;
3633 srp_dev->pd = ib_alloc_pd(device, flags);
3634 if (IS_ERR(srp_dev->pd))
3638 for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
3639 host = srp_add_port(srp_dev, p);
3641 list_add_tail(&host->list, &srp_dev->dev_list);
3644 ib_set_client_data(device, &srp_client, srp_dev);
3651 static void srp_remove_one(struct ib_device *device, void *client_data)
3653 struct srp_device *srp_dev;
3654 struct srp_host *host, *tmp_host;
3655 struct srp_target_port *target;
3657 srp_dev = client_data;
3661 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
3662 device_unregister(&host->dev);
3664 * Wait for the sysfs entry to go away, so that no new
3665 * target ports can be created.
3667 wait_for_completion(&host->released);
3670 * Remove all target ports.
3672 spin_lock(&host->target_lock);
3673 list_for_each_entry(target, &host->target_list, list)
3674 srp_queue_remove_work(target);
3675 spin_unlock(&host->target_lock);
3678 * Wait for tl_err and target port removal tasks.
3680 flush_workqueue(system_long_wq);
3681 flush_workqueue(srp_remove_wq);
3686 ib_dealloc_pd(srp_dev->pd);
3691 static struct srp_function_template ib_srp_transport_functions = {
3692 .has_rport_state = true,
3693 .reset_timer_if_blocked = true,
3694 .reconnect_delay = &srp_reconnect_delay,
3695 .fast_io_fail_tmo = &srp_fast_io_fail_tmo,
3696 .dev_loss_tmo = &srp_dev_loss_tmo,
3697 .reconnect = srp_rport_reconnect,
3698 .rport_delete = srp_rport_delete,
3699 .terminate_rport_io = srp_terminate_io,
3702 static int __init srp_init_module(void)
3706 if (srp_sg_tablesize) {
3707 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
3708 if (!cmd_sg_entries)
3709 cmd_sg_entries = srp_sg_tablesize;
3712 if (!cmd_sg_entries)
3713 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
3715 if (cmd_sg_entries > 255) {
3716 pr_warn("Clamping cmd_sg_entries to 255\n");
3717 cmd_sg_entries = 255;
3720 if (!indirect_sg_entries)
3721 indirect_sg_entries = cmd_sg_entries;
3722 else if (indirect_sg_entries < cmd_sg_entries) {
3723 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
3725 indirect_sg_entries = cmd_sg_entries;
3728 if (indirect_sg_entries > SG_MAX_SEGMENTS) {
3729 pr_warn("Clamping indirect_sg_entries to %u\n",
3731 indirect_sg_entries = SG_MAX_SEGMENTS;
3734 srp_remove_wq = create_workqueue("srp_remove");
3735 if (!srp_remove_wq) {
3741 ib_srp_transport_template =
3742 srp_attach_transport(&ib_srp_transport_functions);
3743 if (!ib_srp_transport_template)
3746 ret = class_register(&srp_class);
3748 pr_err("couldn't register class infiniband_srp\n");
3752 ib_sa_register_client(&srp_sa_client);
3754 ret = ib_register_client(&srp_client);
3756 pr_err("couldn't register IB client\n");
3764 ib_sa_unregister_client(&srp_sa_client);
3765 class_unregister(&srp_class);
3768 srp_release_transport(ib_srp_transport_template);
3771 destroy_workqueue(srp_remove_wq);
3775 static void __exit srp_cleanup_module(void)
3777 ib_unregister_client(&srp_client);
3778 ib_sa_unregister_client(&srp_sa_client);
3779 class_unregister(&srp_class);
3780 srp_release_transport(ib_srp_transport_template);
3781 destroy_workqueue(srp_remove_wq);
3784 module_init(srp_init_module);
3785 module_exit(srp_cleanup_module);