]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target...
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 22 Mar 2016 19:41:14 +0000 (12:41 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 22 Mar 2016 19:41:14 +0000 (12:41 -0700)
Pull SCSI target updates from Nicholas Bellinger:
 "The highlights this round include:

   - Add target_alloc_session() w/ callback helper for doing se_session
     allocation + tag + se_node_acl lookup.  (HCH + nab)

   - Tree-wide fabric driver conversion to use target_alloc_session()

   - Convert sbp-target to use percpu_ida tag pre-allocation, and
     TARGET_SCF_ACK_KREF I/O krefs (Chris Boot + nab)

   - Convert usb-gadget to use percpu_ida tag pre-allocation, and
     TARGET_SCF_ACK_KREF I/O krefs (Andrzej Pietrasiewicz + nab)

   - Convert xen-scsiback to use percpu_ida tag pre-allocation, and
     TARGET_SCF_ACK_KREF I/O krefs (Juergen Gross + nab)

   - Convert tcm_fc to use TARGET_SCF_ACK_KREF I/O + TMR krefs

   - Convert ib_srpt to use percpu_ida tag pre-allocation

   - Add DebugFS node for qla2xxx target sess list (Quinn)

   - Rework iser-target connection termination (Jenny + Sagi)

   - Convert iser-target to new CQ API (HCH)

   - Add pass-through WRITE_SAME support for IBLOCK (Mike Christie)

   - Introduce data_bitmap for asynchronous access of data area (Sheng
     Yang + Andy)

   - Fix target_release_cmd_kref shutdown comp leak (Himanshu Madhani)

  Also, there is a separate PULL request coming for cxgb4 NIC driver
  prerequisites for supporting hw iscsi segmentation offload (ISO), that
  will be the base for a number of v4.7 developments involving
  iscsi-target hw offloads"

* 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: (36 commits)
  target: Fix target_release_cmd_kref shutdown comp leak
  target: Avoid DataIN transfers for non-GOOD SAM status
  target/user: Report capability of handling out-of-order completions to userspace
  target/user: Fix size_t format-spec build warning
  target/user: Don't free expired command when time out
  target/user: Introduce data_bitmap, replace data_length/data_head/data_tail
  target/user: Free data ring in unified function
  target/user: Use iovec[] to describe continuous area
  target: Remove enum transport_lunflags_table
  target/iblock: pass WRITE_SAME to device if possible
  iser-target: Kill the ->isert_cmd back pointer in struct iser_tx_desc
  iser-target: Kill struct isert_rdma_wr
  iser-target: Convert to new CQ API
  iser-target: Split and properly type the login buffer
  iser-target: Remove ISER_RECV_DATA_SEG_LEN
  iser-target: Remove impossible condition from isert_wait_conn
  iser-target: Remove redundant wait in release_conn
  iser-target: Rework connection termination
  iser-target: Separate flows for np listeners and connections cma events
  iser-target: Add new state ISER_CONN_BOUND to isert_conn
  ...

1  2 
drivers/infiniband/ulp/srpt/ib_srpt.c
drivers/infiniband/ulp/srpt/ib_srpt.h
drivers/scsi/qla2xxx/qla_def.h
drivers/target/target_core_fabric_configfs.c
drivers/target/target_core_internal.h
drivers/usb/gadget/function/f_tcm.c
drivers/vhost/scsi.c
drivers/xen/xen-scsiback.c
include/target/target_core_base.h

index 25bdaeef25203abf5e250a334c40cbbadb051b01,b0707a7aac029b66b0d6c7ec9c029d9db31292a7..1d1309091abace1362ad9ca4608a5de7c5c3b576
@@@ -91,32 -91,76 +91,32 @@@ MODULE_PARM_DESC(srpt_service_guid
                 " instead of using the node_guid of the first HCA.");
  
  static struct ib_client srpt_client;
 -static void srpt_release_channel(struct srpt_rdma_ch *ch);
 +static void srpt_release_cmd(struct se_cmd *se_cmd);
 +static void srpt_free_ch(struct kref *kref);
  static int srpt_queue_status(struct se_cmd *cmd);
  static void srpt_recv_done(struct ib_cq *cq, struct ib_wc *wc);
  static void srpt_send_done(struct ib_cq *cq, struct ib_wc *wc);
 +static void srpt_process_wait_list(struct srpt_rdma_ch *ch);
  
 -/**
 - * opposite_dma_dir() - Swap DMA_TO_DEVICE and DMA_FROM_DEVICE.
 - */
 -static inline
 -enum dma_data_direction opposite_dma_dir(enum dma_data_direction dir)
 -{
 -      switch (dir) {
 -      case DMA_TO_DEVICE:     return DMA_FROM_DEVICE;
 -      case DMA_FROM_DEVICE:   return DMA_TO_DEVICE;
 -      default:                return dir;
 -      }
 -}
 -
 -/**
 - * srpt_sdev_name() - Return the name associated with the HCA.
 - *
 - * Examples are ib0, ib1, ...
 - */
 -static inline const char *srpt_sdev_name(struct srpt_device *sdev)
 -{
 -      return sdev->device->name;
 -}
 -
 -static enum rdma_ch_state srpt_get_ch_state(struct srpt_rdma_ch *ch)
 -{
 -      unsigned long flags;
 -      enum rdma_ch_state state;
 -
 -      spin_lock_irqsave(&ch->spinlock, flags);
 -      state = ch->state;
 -      spin_unlock_irqrestore(&ch->spinlock, flags);
 -      return state;
 -}
 -
 -static enum rdma_ch_state
 -srpt_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state new_state)
 -{
 -      unsigned long flags;
 -      enum rdma_ch_state prev;
 -
 -      spin_lock_irqsave(&ch->spinlock, flags);
 -      prev = ch->state;
 -      ch->state = new_state;
 -      spin_unlock_irqrestore(&ch->spinlock, flags);
 -      return prev;
 -}
 -
 -/**
 - * srpt_test_and_set_ch_state() - Test and set the channel state.
 - *
 - * Returns true if and only if the channel state has been set to the new state.
 +/*
 + * The only allowed channel state changes are those that change the channel
 + * state into a state with a higher numerical value. Hence the new > prev test.
   */
 -static bool
 -srpt_test_and_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state old,
 -                         enum rdma_ch_state new)
 +static bool srpt_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state new)
  {
        unsigned long flags;
        enum rdma_ch_state prev;
 +      bool changed = false;
  
        spin_lock_irqsave(&ch->spinlock, flags);
        prev = ch->state;
 -      if (prev == old)
 +      if (new > prev) {
                ch->state = new;
 +              changed = true;
 +      }
        spin_unlock_irqrestore(&ch->spinlock, flags);
 -      return prev == old;
 +
 +      return changed;
  }
  
  /**
@@@ -138,7 -182,7 +138,7 @@@ static void srpt_event_handler(struct i
                return;
  
        pr_debug("ASYNC event= %d on device= %s\n", event->event,
 -               srpt_sdev_name(sdev));
 +               sdev->device->name);
  
        switch (event->event) {
        case IB_EVENT_PORT_ERR:
@@@ -176,39 -220,25 +176,39 @@@ static void srpt_srq_event(struct ib_ev
        pr_info("SRQ event %d\n", event->event);
  }
  
 +static const char *get_ch_state_name(enum rdma_ch_state s)
 +{
 +      switch (s) {
 +      case CH_CONNECTING:
 +              return "connecting";
 +      case CH_LIVE:
 +              return "live";
 +      case CH_DISCONNECTING:
 +              return "disconnecting";
 +      case CH_DRAINING:
 +              return "draining";
 +      case CH_DISCONNECTED:
 +              return "disconnected";
 +      }
 +      return "???";
 +}
 +
  /**
   * srpt_qp_event() - QP event callback function.
   */
  static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch)
  {
        pr_debug("QP event %d on cm_id=%p sess_name=%s state=%d\n",
 -               event->event, ch->cm_id, ch->sess_name, srpt_get_ch_state(ch));
 +               event->event, ch->cm_id, ch->sess_name, ch->state);
  
        switch (event->event) {
        case IB_EVENT_COMM_EST:
                ib_cm_notify(ch->cm_id, event->event);
                break;
        case IB_EVENT_QP_LAST_WQE_REACHED:
 -              if (srpt_test_and_set_ch_state(ch, CH_DRAINING,
 -                                             CH_RELEASING))
 -                      srpt_release_channel(ch);
 -              else
 -                      pr_debug("%s: state %d - ignored LAST_WQE.\n",
 -                               ch->sess_name, srpt_get_ch_state(ch));
 +              pr_debug("%s-%d, state %s: received Last WQE event.\n",
 +                       ch->sess_name, ch->qp->qp_num,
 +                       get_ch_state_name(ch->state));
                break;
        default:
                pr_err("received unrecognized IB QP event %d\n", event->event);
@@@ -251,7 -281,7 +251,7 @@@ static void srpt_get_class_port_info(st
        struct ib_class_port_info *cif;
  
        cif = (struct ib_class_port_info *)mad->data;
 -      memset(cif, 0, sizeof *cif);
 +      memset(cif, 0, sizeof(*cif));
        cif->base_version = 1;
        cif->class_version = 1;
        cif->resp_time_value = 20;
@@@ -310,7 -340,7 +310,7 @@@ static void srpt_get_ioc(struct srpt_po
                return;
        }
  
 -      memset(iocp, 0, sizeof *iocp);
 +      memset(iocp, 0, sizeof(*iocp));
        strcpy(iocp->id_string, SRPT_ID_STRING);
        iocp->guid = cpu_to_be64(srpt_service_guid);
        iocp->vendor_id = cpu_to_be32(sdev->device->attrs.vendor_id);
@@@ -360,7 -390,7 +360,7 @@@ static void srpt_get_svc_entries(u64 io
        }
  
        svc_entries = (struct ib_dm_svc_entries *)mad->data;
 -      memset(svc_entries, 0, sizeof *svc_entries);
 +      memset(svc_entries, 0, sizeof(*svc_entries));
        svc_entries->service_entries[0].id = cpu_to_be64(ioc_guid);
        snprintf(svc_entries->service_entries[0].name,
                 sizeof(svc_entries->service_entries[0].name),
@@@ -454,7 -484,7 +454,7 @@@ static void srpt_mad_recv_handler(struc
        rsp->ah = ah;
  
        dm_mad = rsp->mad;
 -      memcpy(dm_mad, mad_wc->recv_buf.mad, sizeof *dm_mad);
 +      memcpy(dm_mad, mad_wc->recv_buf.mad, sizeof(*dm_mad));
        dm_mad->mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
        dm_mad->mad_hdr.status = 0;
  
@@@ -502,7 -532,7 +502,7 @@@ static int srpt_refresh_port(struct srp
        struct ib_port_attr port_attr;
        int ret;
  
 -      memset(&port_modify, 0, sizeof port_modify);
 +      memset(&port_modify, 0, sizeof(port_modify));
        port_modify.set_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
        port_modify.clr_port_cap_mask = 0;
  
                goto err_query_port;
  
        if (!sport->mad_agent) {
 -              memset(&reg_req, 0, sizeof reg_req);
 +              memset(&reg_req, 0, sizeof(reg_req));
                reg_req.mgmt_class = IB_MGMT_CLASS_DEVICE_MGMT;
                reg_req.mgmt_class_version = IB_MGMT_BASE_VERSION;
                set_bit(IB_MGMT_METHOD_GET, reg_req.method_mask);
@@@ -810,39 -840,6 +810,39 @@@ out
        return ret;
  }
  
 +/**
 + * srpt_zerolength_write() - Perform a zero-length RDMA write.
 + *
 + * A quote from the InfiniBand specification: C9-88: For an HCA responder
 + * using Reliable Connection service, for each zero-length RDMA READ or WRITE
 + * request, the R_Key shall not be validated, even if the request includes
 + * Immediate data.
 + */
 +static int srpt_zerolength_write(struct srpt_rdma_ch *ch)
 +{
 +      struct ib_send_wr wr, *bad_wr;
 +
 +      memset(&wr, 0, sizeof(wr));
 +      wr.opcode = IB_WR_RDMA_WRITE;
 +      wr.wr_cqe = &ch->zw_cqe;
 +      wr.send_flags = IB_SEND_SIGNALED;
 +      return ib_post_send(ch->qp, &wr, &bad_wr);
 +}
 +
 +static void srpt_zerolength_write_done(struct ib_cq *cq, struct ib_wc *wc)
 +{
 +      struct srpt_rdma_ch *ch = cq->cq_context;
 +
 +      if (wc->status == IB_WC_SUCCESS) {
 +              srpt_process_wait_list(ch);
 +      } else {
 +              if (srpt_set_ch_state(ch, CH_DISCONNECTED))
 +                      schedule_work(&ch->release_work);
 +              else
 +                      WARN_ONCE("%s-%d\n", ch->sess_name, ch->qp->qp_num);
 +      }
 +}
 +
  /**
   * srpt_get_desc_tbl() - Parse the data descriptors of an SRP_CMD request.
   * @ioctx: Pointer to the I/O context associated with the request.
@@@ -906,14 -903,14 +906,14 @@@ static int srpt_get_desc_tbl(struct srp
  
                db = (struct srp_direct_buf *)(srp_cmd->add_data
                                               + add_cdb_offset);
 -              memcpy(ioctx->rbufs, db, sizeof *db);
 +              memcpy(ioctx->rbufs, db, sizeof(*db));
                *data_len = be32_to_cpu(db->len);
        } else if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_INDIRECT) ||
                   ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_INDIRECT)) {
                idb = (struct srp_indirect_buf *)(srp_cmd->add_data
                                                  + add_cdb_offset);
  
 -              ioctx->n_rbuf = be32_to_cpu(idb->table_desc.len) / sizeof *db;
 +              ioctx->n_rbuf = be32_to_cpu(idb->table_desc.len) / sizeof(*db);
  
                if (ioctx->n_rbuf >
                    (srp_cmd->data_out_desc_cnt + srp_cmd->data_in_desc_cnt)) {
                        ioctx->rbufs = &ioctx->single_rbuf;
                else {
                        ioctx->rbufs =
 -                              kmalloc(ioctx->n_rbuf * sizeof *db, GFP_ATOMIC);
 +                              kmalloc(ioctx->n_rbuf * sizeof(*db), GFP_ATOMIC);
                        if (!ioctx->rbufs) {
                                ioctx->n_rbuf = 0;
                                ret = -ENOMEM;
                }
  
                db = idb->desc_list;
 -              memcpy(ioctx->rbufs, db, ioctx->n_rbuf * sizeof *db);
 +              memcpy(ioctx->rbufs, db, ioctx->n_rbuf * sizeof(*db));
                *data_len = be32_to_cpu(idb->len);
        }
  out:
@@@ -959,7 -956,7 +959,7 @@@ static int srpt_init_ch_qp(struct srpt_
        struct ib_qp_attr *attr;
        int ret;
  
 -      attr = kzalloc(sizeof *attr, GFP_KERNEL);
 +      attr = kzalloc(sizeof(*attr), GFP_KERNEL);
        if (!attr)
                return -ENOMEM;
  
@@@ -1073,7 -1070,7 +1073,7 @@@ static void srpt_unmap_sg_to_ib_sge(str
                dir = ioctx->cmd.data_direction;
                BUG_ON(dir == DMA_NONE);
                ib_dma_unmap_sg(ch->sport->sdev->device, sg, ioctx->sg_cnt,
 -                              opposite_dma_dir(dir));
 +                              target_reverse_dma_direction(&ioctx->cmd));
                ioctx->mapped_sg_count = 0;
        }
  }
@@@ -1110,7 -1107,7 +1110,7 @@@ static int srpt_map_sg_to_ib_sge(struc
        ioctx->sg_cnt = sg_cnt = cmd->t_data_nents;
  
        count = ib_dma_map_sg(ch->sport->sdev->device, sg, sg_cnt,
 -                            opposite_dma_dir(dir));
 +                            target_reverse_dma_direction(cmd));
        if (unlikely(!count))
                return -EAGAIN;
  
@@@ -1264,40 -1261,26 +1264,26 @@@ free_mem
   */
  static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
  {
+       struct se_session *se_sess;
        struct srpt_send_ioctx *ioctx;
-       unsigned long flags;
+       int tag;
  
        BUG_ON(!ch);
+       se_sess = ch->sess;
  
-       ioctx = NULL;
-       spin_lock_irqsave(&ch->spinlock, flags);
-       if (!list_empty(&ch->free_list)) {
-               ioctx = list_first_entry(&ch->free_list,
-                                        struct srpt_send_ioctx, free_list);
-               list_del(&ioctx->free_list);
+       tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
+       if (tag < 0) {
+               pr_err("Unable to obtain tag for srpt_send_ioctx\n");
+               return NULL;
        }
-       spin_unlock_irqrestore(&ch->spinlock, flags);
-       if (!ioctx)
-               return ioctx;
-       BUG_ON(ioctx->ch != ch);
+       ioctx = &((struct srpt_send_ioctx *)se_sess->sess_cmd_map)[tag];
+       memset(ioctx, 0, sizeof(struct srpt_send_ioctx));
+       ioctx->ch = ch;
        spin_lock_init(&ioctx->spinlock);
        ioctx->state = SRPT_STATE_NEW;
-       ioctx->n_rbuf = 0;
-       ioctx->rbufs = NULL;
-       ioctx->n_rdma = 0;
-       ioctx->n_rdma_wrs = 0;
-       ioctx->rdma_wrs = NULL;
-       ioctx->mapped_sg_count = 0;
        init_completion(&ioctx->tx_done);
-       ioctx->queue_status_only = false;
-       /*
-        * transport_init_se_cmd() does not initialize all fields, so do it
-        * here.
-        */
-       memset(&ioctx->cmd, 0, sizeof(ioctx->cmd));
-       memset(&ioctx->sense_data, 0, sizeof(ioctx->sense_data));
+       ioctx->cmd.map_tag = tag;
  
        return ioctx;
  }
@@@ -1316,7 -1299,10 +1302,7 @@@ static int srpt_abort_cmd(struct srpt_s
  
        /*
         * If the command is in a state where the target core is waiting for
 -       * the ib_srpt driver, change the state to the next state. Changing
 -       * the state of the command from SRPT_STATE_NEED_DATA to
 -       * SRPT_STATE_DATA_IN ensures that srpt_xmit_response() will call this
 -       * function a second time.
 +       * the ib_srpt driver, change the state to the next state.
         */
  
        spin_lock_irqsave(&ioctx->spinlock, flags);
        case SRPT_STATE_NEED_DATA:
                ioctx->state = SRPT_STATE_DATA_IN;
                break;
 -      case SRPT_STATE_DATA_IN:
        case SRPT_STATE_CMD_RSP_SENT:
        case SRPT_STATE_MGMT_RSP_SENT:
                ioctx->state = SRPT_STATE_DONE;
                break;
        default:
 +              WARN_ONCE(true, "%s: unexpected I/O context state %d\n",
 +                        __func__, state);
                break;
        }
        spin_unlock_irqrestore(&ioctx->spinlock, flags);
  
 -      if (state == SRPT_STATE_DONE) {
 -              struct srpt_rdma_ch *ch = ioctx->ch;
 -
 -              BUG_ON(ch->sess == NULL);
 -
 -              target_put_sess_cmd(&ioctx->cmd);
 -              goto out;
 -      }
 -
        pr_debug("Aborting cmd with state %d and tag %lld\n", state,
                 ioctx->cmd.tag);
  
        case SRPT_STATE_NEW:
        case SRPT_STATE_DATA_IN:
        case SRPT_STATE_MGMT:
 +      case SRPT_STATE_DONE:
                /*
                 * Do nothing - defer abort processing until
                 * srpt_queue_response() is invoked.
                 */
 -              WARN_ON(!transport_check_aborted_status(&ioctx->cmd, false));
                break;
        case SRPT_STATE_NEED_DATA:
 -              /* DMA_TO_DEVICE (write) - RDMA read error. */
 -
 -              /* XXX(hch): this is a horrible layering violation.. */
 -              spin_lock_irqsave(&ioctx->cmd.t_state_lock, flags);
 -              ioctx->cmd.transport_state &= ~CMD_T_ACTIVE;
 -              spin_unlock_irqrestore(&ioctx->cmd.t_state_lock, flags);
 +              pr_debug("tag %#llx: RDMA read error\n", ioctx->cmd.tag);
 +              transport_generic_request_failure(&ioctx->cmd,
 +                                      TCM_CHECK_CONDITION_ABORT_CMD);
                break;
        case SRPT_STATE_CMD_RSP_SENT:
                /*
                 * not been received in time.
                 */
                srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
 -              target_put_sess_cmd(&ioctx->cmd);
 +              transport_generic_free_cmd(&ioctx->cmd, 0);
                break;
        case SRPT_STATE_MGMT_RSP_SENT:
 -              srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
 -              target_put_sess_cmd(&ioctx->cmd);
 +              transport_generic_free_cmd(&ioctx->cmd, 0);
                break;
        default:
                WARN(1, "Unexpected command state (%d)", state);
                break;
        }
  
 -out:
        return state;
  }
  
@@@ -1409,14 -1408,9 +1395,14 @@@ static void srpt_rdma_write_done(struc
                container_of(wc->wr_cqe, struct srpt_send_ioctx, rdma_cqe);
  
        if (unlikely(wc->status != IB_WC_SUCCESS)) {
 +              /*
 +               * Note: if an RDMA write error completion is received that
 +               * means that a SEND also has been posted. Defer further
 +               * processing of the associated command until the send error
 +               * completion has been received.
 +               */
                pr_info("RDMA_WRITE for ioctx 0x%p failed with status %d\n",
                        ioctx, wc->status);
 -              srpt_abort_cmd(ioctx);
        }
  }
  
@@@ -1456,7 -1450,7 +1442,7 @@@ static int srpt_build_cmd_rsp(struct sr
        sense_data_len = ioctx->cmd.scsi_sense_length;
        WARN_ON(sense_data_len > sizeof(ioctx->sense_data));
  
 -      memset(srp_rsp, 0, sizeof *srp_rsp);
 +      memset(srp_rsp, 0, sizeof(*srp_rsp));
        srp_rsp->opcode = SRP_RSP;
        srp_rsp->req_lim_delta =
                cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0));
@@@ -1506,7 -1500,7 +1492,7 @@@ static int srpt_build_tskmgmt_rsp(struc
  
        srp_rsp = ioctx->ioctx.buf;
        BUG_ON(!srp_rsp);
 -      memset(srp_rsp, 0, sizeof *srp_rsp);
 +      memset(srp_rsp, 0, sizeof(*srp_rsp));
  
        srp_rsp->opcode = SRP_RSP;
        srp_rsp->req_lim_delta =
        return resp_len;
  }
  
 -#define NO_SUCH_LUN ((uint64_t)-1LL)
 -
 -/*
 - * SCSI LUN addressing method. See also SAM-2 and the section about
 - * eight byte LUNs.
 - */
 -enum scsi_lun_addr_method {
 -      SCSI_LUN_ADDR_METHOD_PERIPHERAL   = 0,
 -      SCSI_LUN_ADDR_METHOD_FLAT         = 1,
 -      SCSI_LUN_ADDR_METHOD_LUN          = 2,
 -      SCSI_LUN_ADDR_METHOD_EXTENDED_LUN = 3,
 -};
 -
 -/*
 - * srpt_unpack_lun() - Convert from network LUN to linear LUN.
 - *
 - * Convert an 2-byte, 4-byte, 6-byte or 8-byte LUN structure in network byte
 - * order (big endian) to a linear LUN. Supports three LUN addressing methods:
 - * peripheral, flat and logical unit. See also SAM-2, section 4.9.4 (page 40).
 - */
 -static uint64_t srpt_unpack_lun(const uint8_t *lun, int len)
 -{
 -      uint64_t res = NO_SUCH_LUN;
 -      int addressing_method;
 -
 -      if (unlikely(len < 2)) {
 -              pr_err("Illegal LUN length %d, expected 2 bytes or more\n",
 -                     len);
 -              goto out;
 -      }
 -
 -      switch (len) {
 -      case 8:
 -              if ((*((__be64 *)lun) &
 -                   cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0)
 -                      goto out_err;
 -              break;
 -      case 4:
 -              if (*((__be16 *)&lun[2]) != 0)
 -                      goto out_err;
 -              break;
 -      case 6:
 -              if (*((__be32 *)&lun[2]) != 0)
 -                      goto out_err;
 -              break;
 -      case 2:
 -              break;
 -      default:
 -              goto out_err;
 -      }
 -
 -      addressing_method = (*lun) >> 6; /* highest two bits of byte 0 */
 -      switch (addressing_method) {
 -      case SCSI_LUN_ADDR_METHOD_PERIPHERAL:
 -      case SCSI_LUN_ADDR_METHOD_FLAT:
 -      case SCSI_LUN_ADDR_METHOD_LUN:
 -              res = *(lun + 1) | (((*lun) & 0x3f) << 8);
 -              break;
 -
 -      case SCSI_LUN_ADDR_METHOD_EXTENDED_LUN:
 -      default:
 -              pr_err("Unimplemented LUN addressing method %u\n",
 -                     addressing_method);
 -              break;
 -      }
 -
 -out:
 -      return res;
 -
 -out_err:
 -      pr_err("Support for multi-level LUNs has not yet been implemented\n");
 -      goto out;
 -}
 -
  static int srpt_check_stop_free(struct se_cmd *cmd)
  {
        struct srpt_send_ioctx *ioctx = container_of(cmd,
  /**
   * srpt_handle_cmd() - Process SRP_CMD.
   */
 -static int srpt_handle_cmd(struct srpt_rdma_ch *ch,
 -                         struct srpt_recv_ioctx *recv_ioctx,
 -                         struct srpt_send_ioctx *send_ioctx)
 +static void srpt_handle_cmd(struct srpt_rdma_ch *ch,
 +                          struct srpt_recv_ioctx *recv_ioctx,
 +                          struct srpt_send_ioctx *send_ioctx)
  {
        struct se_cmd *cmd;
        struct srp_cmd *srp_cmd;
 -      uint64_t unpacked_lun;
        u64 data_len;
        enum dma_data_direction dir;
 -      sense_reason_t ret;
        int rc;
  
        BUG_ON(!send_ioctx);
        if (srpt_get_desc_tbl(send_ioctx, srp_cmd, &dir, &data_len)) {
                pr_err("0x%llx: parsing SRP descriptor table failed.\n",
                       srp_cmd->tag);
 -              ret = TCM_INVALID_CDB_FIELD;
 -              goto send_sense;
 +              goto release_ioctx;
        }
  
 -      unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_cmd->lun,
 -                                     sizeof(srp_cmd->lun));
        rc = target_submit_cmd(cmd, ch->sess, srp_cmd->cdb,
 -                      &send_ioctx->sense_data[0], unpacked_lun, data_len,
 -                      TCM_SIMPLE_TAG, dir, TARGET_SCF_ACK_KREF);
 +                             &send_ioctx->sense_data[0],
 +                             scsilun_to_int(&srp_cmd->lun), data_len,
 +                             TCM_SIMPLE_TAG, dir, TARGET_SCF_ACK_KREF);
        if (rc != 0) {
 -              ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 -              goto send_sense;
 +              pr_debug("target_submit_cmd() returned %d for tag %#llx\n", rc,
 +                       srp_cmd->tag);
 +              goto release_ioctx;
        }
 -      return 0;
 -
 -send_sense:
 -      transport_send_check_condition_and_sense(cmd, ret, 0);
 -      return -1;
 -}
 -
 -/**
 - * srpt_rx_mgmt_fn_tag() - Process a task management function by tag.
 - * @ch: RDMA channel of the task management request.
 - * @fn: Task management function to perform.
 - * @req_tag: Tag of the SRP task management request.
 - * @mgmt_ioctx: I/O context of the task management request.
 - *
 - * Returns zero if the target core will process the task management
 - * request asynchronously.
 - *
 - * Note: It is assumed that the initiator serializes tag-based task management
 - * requests.
 - */
 -static int srpt_rx_mgmt_fn_tag(struct srpt_send_ioctx *ioctx, u64 tag)
 -{
 -      struct srpt_device *sdev;
 -      struct srpt_rdma_ch *ch;
 -      struct srpt_send_ioctx *target;
 -      int ret, i;
 +      return;
  
 -      ret = -EINVAL;
 -      ch = ioctx->ch;
 -      BUG_ON(!ch);
 -      BUG_ON(!ch->sport);
 -      sdev = ch->sport->sdev;
 -      BUG_ON(!sdev);
 -      spin_lock_irq(&sdev->spinlock);
 -      for (i = 0; i < ch->rq_size; ++i) {
 -              target = ch->ioctx_ring[i];
 -              if (target->cmd.se_lun == ioctx->cmd.se_lun &&
 -                  target->cmd.tag == tag &&
 -                  srpt_get_cmd_state(target) != SRPT_STATE_DONE) {
 -                      ret = 0;
 -                      /* now let the target core abort &target->cmd; */
 -                      break;
 -              }
 -      }
 -      spin_unlock_irq(&sdev->spinlock);
 -      return ret;
 +release_ioctx:
 +      send_ioctx->state = SRPT_STATE_DONE;
 +      srpt_release_cmd(cmd);
  }
  
  static int srp_tmr_to_tcm(int fn)
@@@ -1618,6 -1730,8 +1604,6 @@@ static void srpt_handle_tsk_mgmt(struc
        struct srp_tsk_mgmt *srp_tsk;
        struct se_cmd *cmd;
        struct se_session *sess = ch->sess;
 -      uint64_t unpacked_lun;
 -      uint32_t tag = 0;
        int tcm_tmr;
        int rc;
  
        srpt_set_cmd_state(send_ioctx, SRPT_STATE_MGMT);
        send_ioctx->cmd.tag = srp_tsk->tag;
        tcm_tmr = srp_tmr_to_tcm(srp_tsk->tsk_mgmt_func);
 -      if (tcm_tmr < 0) {
 -              send_ioctx->cmd.se_tmr_req->response =
 -                      TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
 -              goto fail;
 -      }
 -      unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_tsk->lun,
 -                                     sizeof(srp_tsk->lun));
 -
 -      if (srp_tsk->tsk_mgmt_func == SRP_TSK_ABORT_TASK) {
 -              rc = srpt_rx_mgmt_fn_tag(send_ioctx, srp_tsk->task_tag);
 -              if (rc < 0) {
 -                      send_ioctx->cmd.se_tmr_req->response =
 -                                      TMR_TASK_DOES_NOT_EXIST;
 -                      goto fail;
 -              }
 -              tag = srp_tsk->task_tag;
 -      }
 -      rc = target_submit_tmr(&send_ioctx->cmd, sess, NULL, unpacked_lun,
 -                              srp_tsk, tcm_tmr, GFP_KERNEL, tag,
 -                              TARGET_SCF_ACK_KREF);
 +      rc = target_submit_tmr(&send_ioctx->cmd, sess, NULL,
 +                             scsilun_to_int(&srp_tsk->lun), srp_tsk, tcm_tmr,
 +                             GFP_KERNEL, srp_tsk->task_tag,
 +                             TARGET_SCF_ACK_KREF);
        if (rc != 0) {
                send_ioctx->cmd.se_tmr_req->response = TMR_FUNCTION_REJECTED;
                goto fail;
@@@ -1656,6 -1786,7 +1642,6 @@@ static void srpt_handle_new_iu(struct s
                               struct srpt_send_ioctx *send_ioctx)
  {
        struct srp_cmd *srp_cmd;
 -      enum rdma_ch_state ch_state;
  
        BUG_ON(!ch);
        BUG_ON(!recv_ioctx);
                                   recv_ioctx->ioctx.dma, srp_max_req_size,
                                   DMA_FROM_DEVICE);
  
 -      ch_state = srpt_get_ch_state(ch);
 -      if (unlikely(ch_state == CH_CONNECTING)) {
 +      if (unlikely(ch->state == CH_CONNECTING)) {
                list_add_tail(&recv_ioctx->wait_list, &ch->cmd_wait_list);
                goto out;
        }
  
 -      if (unlikely(ch_state != CH_LIVE))
 +      if (unlikely(ch->state != CH_LIVE))
                goto out;
  
        srp_cmd = recv_ioctx->ioctx.buf;
@@@ -1732,28 -1864,6 +1718,28 @@@ static void srpt_recv_done(struct ib_c
        }
  }
  
 +/*
 + * This function must be called from the context in which RDMA completions are
 + * processed because it accesses the wait list without protection against
 + * access from other threads.
 + */
 +static void srpt_process_wait_list(struct srpt_rdma_ch *ch)
 +{
 +      struct srpt_send_ioctx *ioctx;
 +
 +      while (!list_empty(&ch->cmd_wait_list) &&
 +             ch->state >= CH_LIVE &&
 +             (ioctx = srpt_get_send_ioctx(ch)) != NULL) {
 +              struct srpt_recv_ioctx *recv_ioctx;
 +
 +              recv_ioctx = list_first_entry(&ch->cmd_wait_list,
 +                                            struct srpt_recv_ioctx,
 +                                            wait_list);
 +              list_del(&recv_ioctx->wait_list);
 +              srpt_handle_new_iu(ch, recv_ioctx, ioctx);
 +      }
 +}
 +
  /**
   * Note: Although this has not yet been observed during tests, at least in
   * theory it is possible that the srpt_get_send_ioctx() call invoked by
@@@ -1781,10 -1891,15 +1767,10 @@@ static void srpt_send_done(struct ib_c
  
        atomic_inc(&ch->sq_wr_avail);
  
 -      if (wc->status != IB_WC_SUCCESS) {
 +      if (wc->status != IB_WC_SUCCESS)
                pr_info("sending response for ioctx 0x%p failed"
                        " with status %d\n", ioctx, wc->status);
  
 -              atomic_dec(&ch->req_lim);
 -              srpt_abort_cmd(ioctx);
 -              goto out;
 -      }
 -
        if (state != SRPT_STATE_DONE) {
                srpt_unmap_sg_to_ib_sge(ch, ioctx);
                transport_generic_free_cmd(&ioctx->cmd, 0);
                       " wr_id = %u.\n", ioctx->ioctx.index);
        }
  
 -out:
 -      while (!list_empty(&ch->cmd_wait_list) &&
 -             srpt_get_ch_state(ch) == CH_LIVE &&
 -             (ioctx = srpt_get_send_ioctx(ch)) != NULL) {
 -              struct srpt_recv_ioctx *recv_ioctx;
 -
 -              recv_ioctx = list_first_entry(&ch->cmd_wait_list,
 -                                            struct srpt_recv_ioctx,
 -                                            wait_list);
 -              list_del(&recv_ioctx->wait_list);
 -              srpt_handle_new_iu(ch, recv_ioctx, ioctx);
 -      }
 +      srpt_process_wait_list(ch);
  }
  
  /**
@@@ -1810,7 -1936,7 +1796,7 @@@ static int srpt_create_ch_ib(struct srp
        WARN_ON(ch->rq_size < 1);
  
        ret = -ENOMEM;
 -      qp_init = kzalloc(sizeof *qp_init, GFP_KERNEL);
 +      qp_init = kzalloc(sizeof(*qp_init), GFP_KERNEL);
        if (!qp_init)
                goto out;
  
@@@ -1877,102 -2003,168 +1863,102 @@@ static void srpt_destroy_ch_ib(struct s
  }
  
  /**
 - * __srpt_close_ch() - Close an RDMA channel by setting the QP error state.
 + * srpt_close_ch() - Close an RDMA channel.
   *
 - * Reset the QP and make sure all resources associated with the channel will
 - * be deallocated at an appropriate time.
 + * Make sure all resources associated with the channel will be deallocated at
 + * an appropriate time.
   *
 - * Note: The caller must hold ch->sport->sdev->spinlock.
 + * Returns true if and only if the channel state has been modified into
 + * CH_DRAINING.
   */
 -static void __srpt_close_ch(struct srpt_rdma_ch *ch)
 +static bool srpt_close_ch(struct srpt_rdma_ch *ch)
  {
 -      enum rdma_ch_state prev_state;
 -      unsigned long flags;
 +      int ret;
  
 -      spin_lock_irqsave(&ch->spinlock, flags);
 -      prev_state = ch->state;
 -      switch (prev_state) {
 -      case CH_CONNECTING:
 -      case CH_LIVE:
 -              ch->state = CH_DISCONNECTING;
 -              break;
 -      default:
 -              break;
 +      if (!srpt_set_ch_state(ch, CH_DRAINING)) {
 +              pr_debug("%s-%d: already closed\n", ch->sess_name,
 +                       ch->qp->qp_num);
 +              return false;
        }
 -      spin_unlock_irqrestore(&ch->spinlock, flags);
 -
 -      switch (prev_state) {
 -      case CH_CONNECTING:
 -              ib_send_cm_rej(ch->cm_id, IB_CM_REJ_NO_RESOURCES, NULL, 0,
 -                             NULL, 0);
 -              /* fall through */
 -      case CH_LIVE:
 -              if (ib_send_cm_dreq(ch->cm_id, NULL, 0) < 0)
 -                      pr_err("sending CM DREQ failed.\n");
 -              break;
 -      case CH_DISCONNECTING:
 -              break;
 -      case CH_DRAINING:
 -      case CH_RELEASING:
 -              break;
 -      }
 -}
 -
 -/**
 - * srpt_close_ch() - Close an RDMA channel.
 - */
 -static void srpt_close_ch(struct srpt_rdma_ch *ch)
 -{
 -      struct srpt_device *sdev;
  
 -      sdev = ch->sport->sdev;
 -      spin_lock_irq(&sdev->spinlock);
 -      __srpt_close_ch(ch);
 -      spin_unlock_irq(&sdev->spinlock);
 -}
 +      kref_get(&ch->kref);
  
 -/**
 - * srpt_shutdown_session() - Whether or not a session may be shut down.
 - */
 -static int srpt_shutdown_session(struct se_session *se_sess)
 -{
 -      struct srpt_rdma_ch *ch = se_sess->fabric_sess_ptr;
 -      unsigned long flags;
 +      ret = srpt_ch_qp_err(ch);
 +      if (ret < 0)
 +              pr_err("%s-%d: changing queue pair into error state failed: %d\n",
 +                     ch->sess_name, ch->qp->qp_num, ret);
  
 -      spin_lock_irqsave(&ch->spinlock, flags);
 -      if (ch->in_shutdown) {
 -              spin_unlock_irqrestore(&ch->spinlock, flags);
 -              return true;
 +      pr_debug("%s-%d: queued zerolength write\n", ch->sess_name,
 +               ch->qp->qp_num);
 +      ret = srpt_zerolength_write(ch);
 +      if (ret < 0) {
 +              pr_err("%s-%d: queuing zero-length write failed: %d\n",
 +                     ch->sess_name, ch->qp->qp_num, ret);
 +              if (srpt_set_ch_state(ch, CH_DISCONNECTED))
 +                      schedule_work(&ch->release_work);
 +              else
 +                      WARN_ON_ONCE(true);
        }
  
 -      ch->in_shutdown = true;
 -      target_sess_cmd_list_set_waiting(se_sess);
 -      spin_unlock_irqrestore(&ch->spinlock, flags);
 +      kref_put(&ch->kref, srpt_free_ch);
  
        return true;
  }
  
 -/**
 - * srpt_drain_channel() - Drain a channel by resetting the IB queue pair.
 - * @cm_id: Pointer to the CM ID of the channel to be drained.
 - *
 - * Note: Must be called from inside srpt_cm_handler to avoid a race between
 - * accessing sdev->spinlock and the call to kfree(sdev) in srpt_remove_one()
 - * (the caller of srpt_cm_handler holds the cm_id spinlock; srpt_remove_one()
 - * waits until all target sessions for the associated IB device have been
 - * unregistered and target session registration involves a call to
 - * ib_destroy_cm_id(), which locks the cm_id spinlock and hence waits until
 - * this function has finished).
 +/*
 + * Change the channel state into CH_DISCONNECTING. If a channel has not yet
 + * reached the connected state, close it. If a channel is in the connected
 + * state, send a DREQ. If a DREQ has been received, send a DREP. Note: it is
 + * the responsibility of the caller to ensure that this function is not
 + * invoked concurrently with the code that accepts a connection. This means
 + * that this function must either be invoked from inside a CM callback
 + * function or that it must be invoked with the srpt_port.mutex held.
   */
 -static void srpt_drain_channel(struct ib_cm_id *cm_id)
 +static int srpt_disconnect_ch(struct srpt_rdma_ch *ch)
  {
 -      struct srpt_device *sdev;
 -      struct srpt_rdma_ch *ch;
        int ret;
 -      bool do_reset = false;
  
 -      WARN_ON_ONCE(irqs_disabled());
 +      if (!srpt_set_ch_state(ch, CH_DISCONNECTING))
 +              return -ENOTCONN;
  
 -      sdev = cm_id->context;
 -      BUG_ON(!sdev);
 -      spin_lock_irq(&sdev->spinlock);
 -      list_for_each_entry(ch, &sdev->rch_list, list) {
 -              if (ch->cm_id == cm_id) {
 -                      do_reset = srpt_test_and_set_ch_state(ch,
 -                                      CH_CONNECTING, CH_DRAINING) ||
 -                                 srpt_test_and_set_ch_state(ch,
 -                                      CH_LIVE, CH_DRAINING) ||
 -                                 srpt_test_and_set_ch_state(ch,
 -                                      CH_DISCONNECTING, CH_DRAINING);
 -                      break;
 -              }
 -      }
 -      spin_unlock_irq(&sdev->spinlock);
 +      ret = ib_send_cm_dreq(ch->cm_id, NULL, 0);
 +      if (ret < 0)
 +              ret = ib_send_cm_drep(ch->cm_id, NULL, 0);
  
 -      if (do_reset) {
 -              if (ch->sess)
 -                      srpt_shutdown_session(ch->sess);
 +      if (ret < 0 && srpt_close_ch(ch))
 +              ret = 0;
  
 -              ret = srpt_ch_qp_err(ch);
 -              if (ret < 0)
 -                      pr_err("Setting queue pair in error state"
 -                             " failed: %d\n", ret);
 -      }
 +      return ret;
  }
  
 -/**
 - * srpt_find_channel() - Look up an RDMA channel.
 - * @cm_id: Pointer to the CM ID of the channel to be looked up.
 - *
 - * Return NULL if no matching RDMA channel has been found.
 - */
 -static struct srpt_rdma_ch *srpt_find_channel(struct srpt_device *sdev,
 -                                            struct ib_cm_id *cm_id)
 +static void __srpt_close_all_ch(struct srpt_device *sdev)
  {
        struct srpt_rdma_ch *ch;
 -      bool found;
  
 -      WARN_ON_ONCE(irqs_disabled());
 -      BUG_ON(!sdev);
 +      lockdep_assert_held(&sdev->mutex);
  
 -      found = false;
 -      spin_lock_irq(&sdev->spinlock);
        list_for_each_entry(ch, &sdev->rch_list, list) {
 -              if (ch->cm_id == cm_id) {
 -                      found = true;
 -                      break;
 -              }
 +              if (srpt_disconnect_ch(ch) >= 0)
 +                      pr_info("Closing channel %s-%d because target %s has been disabled\n",
 +                              ch->sess_name, ch->qp->qp_num,
 +                              sdev->device->name);
 +              srpt_close_ch(ch);
        }
 -      spin_unlock_irq(&sdev->spinlock);
 -
 -      return found ? ch : NULL;
  }
  
  /**
 - * srpt_release_channel() - Release channel resources.
 - *
 - * Schedules the actual release because:
 - * - Calling the ib_destroy_cm_id() call from inside an IB CM callback would
 - *   trigger a deadlock.
 - * - It is not safe to call TCM transport_* functions from interrupt context.
 + * srpt_shutdown_session() - Whether or not a session may be shut down.
   */
 -static void srpt_release_channel(struct srpt_rdma_ch *ch)
 +static int srpt_shutdown_session(struct se_session *se_sess)
 +{
 +      return 1;
 +}
 +
 +static void srpt_free_ch(struct kref *kref)
  {
 -      schedule_work(&ch->release_work);
 +      struct srpt_rdma_ch *ch = container_of(kref, struct srpt_rdma_ch, kref);
 +
 +      kfree(ch);
  }
  
  static void srpt_release_channel_work(struct work_struct *w)
        struct se_session *se_sess;
  
        ch = container_of(w, struct srpt_rdma_ch, release_work);
 -      pr_debug("ch = %p; ch->sess = %p; release_done = %p\n", ch, ch->sess,
 -               ch->release_done);
 +      pr_debug("%s: %s-%d; release_done = %p\n", __func__, ch->sess_name,
 +               ch->qp->qp_num, ch->release_done);
  
        sdev = ch->sport->sdev;
        BUG_ON(!sdev);
        se_sess = ch->sess;
        BUG_ON(!se_sess);
  
 +      target_sess_cmd_list_set_waiting(se_sess);
        target_wait_for_sess_cmds(se_sess);
  
        transport_deregister_session_configfs(se_sess);
                             ch->sport->sdev, ch->rq_size,
                             ch->rsp_size, DMA_TO_DEVICE);
  
 -      spin_lock_irq(&sdev->spinlock);
 -      list_del(&ch->list);
 -      spin_unlock_irq(&sdev->spinlock);
 -
 +      mutex_lock(&sdev->mutex);
 +      list_del_init(&ch->list);
        if (ch->release_done)
                complete(ch->release_done);
 +      mutex_unlock(&sdev->mutex);
  
        wake_up(&sdev->ch_releaseQ);
  
 -      kfree(ch);
 +      kref_put(&ch->kref, srpt_free_ch);
  }
  
  /**
@@@ -2034,9 -2226,8 +2020,8 @@@ static int srpt_cm_req_recv(struct ib_c
        struct srp_login_rej *rej;
        struct ib_cm_rep_param *rep_param;
        struct srpt_rdma_ch *ch, *tmp_ch;
-       struct se_node_acl *se_acl;
        u32 it_iu_len;
-       int i, ret = 0;
+       int ret = 0;
        unsigned char *p;
  
        WARN_ON_ONCE(irqs_disabled());
                be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[0]),
                be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[8]));
  
 -      rsp = kzalloc(sizeof *rsp, GFP_KERNEL);
 -      rej = kzalloc(sizeof *rej, GFP_KERNEL);
 -      rep_param = kzalloc(sizeof *rep_param, GFP_KERNEL);
 +      rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
 +      rej = kzalloc(sizeof(*rej), GFP_KERNEL);
 +      rep_param = kzalloc(sizeof(*rep_param), GFP_KERNEL);
  
        if (!rsp || !rej || !rep_param) {
                ret = -ENOMEM;
        if ((req->req_flags & SRP_MTCH_ACTION) == SRP_MULTICHAN_SINGLE) {
                rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_NO_CHAN;
  
 -              spin_lock_irq(&sdev->spinlock);
 +              mutex_lock(&sdev->mutex);
  
                list_for_each_entry_safe(ch, tmp_ch, &sdev->rch_list, list) {
                        if (!memcmp(ch->i_port_id, req->initiator_port_id, 16)
                            && param->port == ch->sport->port
                            && param->listen_id == ch->sport->sdev->cm_id
                            && ch->cm_id) {
 -                              enum rdma_ch_state ch_state;
 -
 -                              ch_state = srpt_get_ch_state(ch);
 -                              if (ch_state != CH_CONNECTING
 -                                  && ch_state != CH_LIVE)
 +                              if (srpt_disconnect_ch(ch) < 0)
                                        continue;
 -
 -                              /* found an existing channel */
 -                              pr_debug("Found existing channel %s"
 -                                       " cm_id= %p state= %d\n",
 -                                       ch->sess_name, ch->cm_id, ch_state);
 -
 -                              __srpt_close_ch(ch);
 -
 +                              pr_info("Relogin - closed existing channel %s\n",
 +                                      ch->sess_name);
                                rsp->rsp_flags =
                                        SRP_LOGIN_RSP_MULTICHAN_TERMINATED;
                        }
                }
  
 -              spin_unlock_irq(&sdev->spinlock);
 +              mutex_unlock(&sdev->mutex);
  
        } else
                rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_MAINTAINED;
                goto reject;
        }
  
 -      ch = kzalloc(sizeof *ch, GFP_KERNEL);
 +      ch = kzalloc(sizeof(*ch), GFP_KERNEL);
        if (!ch) {
                rej->reason = cpu_to_be32(
                              SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
                goto reject;
        }
  
 +      kref_init(&ch->kref);
 +      ch->zw_cqe.done = srpt_zerolength_write_done;
        INIT_WORK(&ch->release_work, srpt_release_channel_work);
        memcpy(ch->i_port_id, req->initiator_port_id, 16);
        memcpy(ch->t_port_id, req->target_port_id, 16);
        ch->sport = &sdev->port[param->port - 1];
        ch->cm_id = cm_id;
 +      cm_id->context = ch;
        /*
         * Avoid QUEUE_FULL conditions by limiting the number of buffers used
         * for the SRP protocol to the command queue size.
        if (!ch->ioctx_ring)
                goto free_ch;
  
-       INIT_LIST_HEAD(&ch->free_list);
-       for (i = 0; i < ch->rq_size; i++) {
-               ch->ioctx_ring[i]->ch = ch;
-               list_add_tail(&ch->ioctx_ring[i]->free_list, &ch->free_list);
-       }
        ret = srpt_create_ch_ib(ch);
        if (ret) {
                rej->reason = cpu_to_be32(
        pr_debug("registering session %s\n", ch->sess_name);
        p = &ch->sess_name[0];
  
-       ch->sess = transport_init_session(TARGET_PROT_NORMAL);
-       if (IS_ERR(ch->sess)) {
-               rej->reason = cpu_to_be32(
-                               SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
-               pr_debug("Failed to create session\n");
-               goto destroy_ib;
-       }
  try_again:
-       se_acl = core_tpg_get_initiator_node_acl(&sport->port_tpg_1, p);
-       if (!se_acl) {
+       ch->sess = target_alloc_session(&sport->port_tpg_1, ch->rq_size,
+                                       sizeof(struct srpt_send_ioctx),
+                                       TARGET_PROT_NORMAL, p, ch, NULL);
+       if (IS_ERR(ch->sess)) {
                pr_info("Rejected login because no ACL has been"
-                       " configured yet for initiator %s.\n", ch->sess_name);
+                       " configured yet for initiator %s.\n", p);
                /*
                 * XXX: Hack to retry of ch->i_port_id without leading '0x'
                 */
                        p += 2;
                        goto try_again;
                }
-               rej->reason = cpu_to_be32(
+               rej->reason = cpu_to_be32((PTR_ERR(ch->sess) == -ENOMEM) ?
+                               SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES :
                                SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED);
-               transport_free_session(ch->sess);
                goto destroy_ib;
        }
-       ch->sess->se_node_acl = se_acl;
-       transport_register_session(&sport->port_tpg_1, se_acl, ch->sess, ch);
  
        pr_debug("Establish connection sess=%p name=%s cm_id=%p\n", ch->sess,
                 ch->sess_name, ch->cm_id);
        /* create cm reply */
        rep_param->qp_num = ch->qp->qp_num;
        rep_param->private_data = (void *)rsp;
 -      rep_param->private_data_len = sizeof *rsp;
 +      rep_param->private_data_len = sizeof(*rsp);
        rep_param->rnr_retry_count = 7;
        rep_param->flow_control = 1;
        rep_param->failover_accepted = 0;
                goto release_channel;
        }
  
 -      spin_lock_irq(&sdev->spinlock);
 +      mutex_lock(&sdev->mutex);
        list_add_tail(&ch->list, &sdev->rch_list);
 -      spin_unlock_irq(&sdev->spinlock);
 +      mutex_unlock(&sdev->mutex);
  
        goto out;
  
  release_channel:
 -      srpt_set_ch_state(ch, CH_RELEASING);
 +      srpt_disconnect_ch(ch);
        transport_deregister_session_configfs(ch->sess);
        transport_deregister_session(ch->sess);
        ch->sess = NULL;
@@@ -2284,7 -2467,7 +2254,7 @@@ reject
                                   | SRP_BUF_FORMAT_INDIRECT);
  
        ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
 -                           (void *)rej, sizeof *rej);
 +                           (void *)rej, sizeof(*rej));
  
  out:
        kfree(rep_param);
        return ret;
  }
  
 -static void srpt_cm_rej_recv(struct ib_cm_id *cm_id)
 +static void srpt_cm_rej_recv(struct srpt_rdma_ch *ch,
 +                           enum ib_cm_rej_reason reason,
 +                           const u8 *private_data,
 +                           u8 private_data_len)
  {
 -      pr_info("Received IB REJ for cm_id %p.\n", cm_id);
 -      srpt_drain_channel(cm_id);
 +      char *priv = NULL;
 +      int i;
 +
 +      if (private_data_len && (priv = kmalloc(private_data_len * 3 + 1,
 +                                              GFP_KERNEL))) {
 +              for (i = 0; i < private_data_len; i++)
 +                      sprintf(priv + 3 * i, " %02x", private_data[i]);
 +      }
 +      pr_info("Received CM REJ for ch %s-%d; reason %d%s%s.\n",
 +              ch->sess_name, ch->qp->qp_num, reason, private_data_len ?
 +              "; private data" : "", priv ? priv : " (?)");
 +      kfree(priv);
  }
  
  /**
   * An IB_CM_RTU_RECEIVED message indicates that the connection is established
   * and that the recipient may begin transmitting (RTU = ready to use).
   */
 -static void srpt_cm_rtu_recv(struct ib_cm_id *cm_id)
 +static void srpt_cm_rtu_recv(struct srpt_rdma_ch *ch)
  {
 -      struct srpt_rdma_ch *ch;
        int ret;
  
 -      ch = srpt_find_channel(cm_id->context, cm_id);
 -      BUG_ON(!ch);
 -
 -      if (srpt_test_and_set_ch_state(ch, CH_CONNECTING, CH_LIVE)) {
 -              struct srpt_recv_ioctx *ioctx, *ioctx_tmp;
 -
 +      if (srpt_set_ch_state(ch, CH_LIVE)) {
                ret = srpt_ch_qp_rts(ch, ch->qp);
  
 -              list_for_each_entry_safe(ioctx, ioctx_tmp, &ch->cmd_wait_list,
 -                                       wait_list) {
 -                      list_del(&ioctx->wait_list);
 -                      srpt_handle_new_iu(ch, ioctx, NULL);
 -              }
 -              if (ret)
 +              if (ret == 0) {
 +                      /* Trigger wait list processing. */
 +                      ret = srpt_zerolength_write(ch);
 +                      WARN_ONCE(ret < 0, "%d\n", ret);
 +              } else {
                        srpt_close_ch(ch);
 +              }
        }
  }
  
 -static void srpt_cm_timewait_exit(struct ib_cm_id *cm_id)
 -{
 -      pr_info("Received IB TimeWait exit for cm_id %p.\n", cm_id);
 -      srpt_drain_channel(cm_id);
 -}
 -
 -static void srpt_cm_rep_error(struct ib_cm_id *cm_id)
 -{
 -      pr_info("Received IB REP error for cm_id %p.\n", cm_id);
 -      srpt_drain_channel(cm_id);
 -}
 -
 -/**
 - * srpt_cm_dreq_recv() - Process reception of a DREQ message.
 - */
 -static void srpt_cm_dreq_recv(struct ib_cm_id *cm_id)
 -{
 -      struct srpt_rdma_ch *ch;
 -      unsigned long flags;
 -      bool send_drep = false;
 -
 -      ch = srpt_find_channel(cm_id->context, cm_id);
 -      BUG_ON(!ch);
 -
 -      pr_debug("cm_id= %p ch->state= %d\n", cm_id, srpt_get_ch_state(ch));
 -
 -      spin_lock_irqsave(&ch->spinlock, flags);
 -      switch (ch->state) {
 -      case CH_CONNECTING:
 -      case CH_LIVE:
 -              send_drep = true;
 -              ch->state = CH_DISCONNECTING;
 -              break;
 -      case CH_DISCONNECTING:
 -      case CH_DRAINING:
 -      case CH_RELEASING:
 -              WARN(true, "unexpected channel state %d\n", ch->state);
 -              break;
 -      }
 -      spin_unlock_irqrestore(&ch->spinlock, flags);
 -
 -      if (send_drep) {
 -              if (ib_send_cm_drep(ch->cm_id, NULL, 0) < 0)
 -                      pr_err("Sending IB DREP failed.\n");
 -              pr_info("Received DREQ and sent DREP for session %s.\n",
 -                      ch->sess_name);
 -      }
 -}
 -
 -/**
 - * srpt_cm_drep_recv() - Process reception of a DREP message.
 - */
 -static void srpt_cm_drep_recv(struct ib_cm_id *cm_id)
 -{
 -      pr_info("Received InfiniBand DREP message for cm_id %p.\n", cm_id);
 -      srpt_drain_channel(cm_id);
 -}
 -
  /**
   * srpt_cm_handler() - IB connection manager callback function.
   *
   */
  static int srpt_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
  {
 +      struct srpt_rdma_ch *ch = cm_id->context;
        int ret;
  
        ret = 0;
                                       event->private_data);
                break;
        case IB_CM_REJ_RECEIVED:
 -              srpt_cm_rej_recv(cm_id);
 +              srpt_cm_rej_recv(ch, event->param.rej_rcvd.reason,
 +                               event->private_data,
 +                               IB_CM_REJ_PRIVATE_DATA_SIZE);
                break;
        case IB_CM_RTU_RECEIVED:
        case IB_CM_USER_ESTABLISHED:
 -              srpt_cm_rtu_recv(cm_id);
 +              srpt_cm_rtu_recv(ch);
                break;
        case IB_CM_DREQ_RECEIVED:
 -              srpt_cm_dreq_recv(cm_id);
 +              srpt_disconnect_ch(ch);
                break;
        case IB_CM_DREP_RECEIVED:
 -              srpt_cm_drep_recv(cm_id);
 +              pr_info("Received CM DREP message for ch %s-%d.\n",
 +                      ch->sess_name, ch->qp->qp_num);
 +              srpt_close_ch(ch);
                break;
        case IB_CM_TIMEWAIT_EXIT:
 -              srpt_cm_timewait_exit(cm_id);
 +              pr_info("Received CM TimeWait exit for ch %s-%d.\n",
 +                      ch->sess_name, ch->qp->qp_num);
 +              srpt_close_ch(ch);
                break;
        case IB_CM_REP_ERROR:
 -              srpt_cm_rep_error(cm_id);
 +              pr_info("Received CM REP error for ch %s-%d.\n", ch->sess_name,
 +                      ch->qp->qp_num);
                break;
        case IB_CM_DREQ_ERROR:
 -              pr_info("Received IB DREQ ERROR event.\n");
 +              pr_info("Received CM DREQ ERROR event.\n");
                break;
        case IB_CM_MRA_RECEIVED:
 -              pr_info("Received IB MRA event\n");
 +              pr_info("Received CM MRA event\n");
                break;
        default:
 -              pr_err("received unrecognized IB CM event %d\n", event->event);
 +              pr_err("received unrecognized CM event %d\n", event->event);
                break;
        }
  
@@@ -2499,14 -2725,41 +2469,14 @@@ static int srpt_write_pending_status(st
   */
  static int srpt_write_pending(struct se_cmd *se_cmd)
  {
 -      struct srpt_rdma_ch *ch;
 -      struct srpt_send_ioctx *ioctx;
 +      struct srpt_send_ioctx *ioctx =
 +              container_of(se_cmd, struct srpt_send_ioctx, cmd);
 +      struct srpt_rdma_ch *ch = ioctx->ch;
        enum srpt_command_state new_state;
 -      enum rdma_ch_state ch_state;
 -      int ret;
 -
 -      ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
  
        new_state = srpt_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA);
        WARN_ON(new_state == SRPT_STATE_DONE);
 -
 -      ch = ioctx->ch;
 -      BUG_ON(!ch);
 -
 -      ch_state = srpt_get_ch_state(ch);
 -      switch (ch_state) {
 -      case CH_CONNECTING:
 -              WARN(true, "unexpected channel state %d\n", ch_state);
 -              ret = -EINVAL;
 -              goto out;
 -      case CH_LIVE:
 -              break;
 -      case CH_DISCONNECTING:
 -      case CH_DRAINING:
 -      case CH_RELEASING:
 -              pr_debug("cmd with tag %lld: channel disconnecting\n",
 -                       ioctx->cmd.tag);
 -              srpt_set_cmd_state(ioctx, SRPT_STATE_DATA_IN);
 -              ret = -EINVAL;
 -              goto out;
 -      }
 -      ret = srpt_xfer_data(ch, ioctx);
 -
 -out:
 -      return ret;
 +      return srpt_xfer_data(ch, ioctx);
  }
  
  static u8 tcm_to_srp_tsk_mgmt_status(const int tcm_mgmt_status)
@@@ -2637,25 -2890,36 +2607,25 @@@ static void srpt_refresh_port_work(stru
        srpt_refresh_port(sport);
  }
  
 -static int srpt_ch_list_empty(struct srpt_device *sdev)
 -{
 -      int res;
 -
 -      spin_lock_irq(&sdev->spinlock);
 -      res = list_empty(&sdev->rch_list);
 -      spin_unlock_irq(&sdev->spinlock);
 -
 -      return res;
 -}
 -
  /**
   * srpt_release_sdev() - Free the channel resources associated with a target.
   */
  static int srpt_release_sdev(struct srpt_device *sdev)
  {
 -      struct srpt_rdma_ch *ch, *tmp_ch;
 -      int res;
 +      int i, res;
  
        WARN_ON_ONCE(irqs_disabled());
  
        BUG_ON(!sdev);
  
 -      spin_lock_irq(&sdev->spinlock);
 -      list_for_each_entry_safe(ch, tmp_ch, &sdev->rch_list, list)
 -              __srpt_close_ch(ch);
 -      spin_unlock_irq(&sdev->spinlock);
 +      mutex_lock(&sdev->mutex);
 +      for (i = 0; i < ARRAY_SIZE(sdev->port); i++)
 +              sdev->port[i].enabled = false;
 +      __srpt_close_all_ch(sdev);
 +      mutex_unlock(&sdev->mutex);
  
        res = wait_event_interruptible(sdev->ch_releaseQ,
 -                                     srpt_ch_list_empty(sdev));
 +                                     list_empty_careful(&sdev->rch_list));
        if (res)
                pr_err("%s: interrupted.\n", __func__);
  
@@@ -2709,14 -2973,14 +2679,14 @@@ static void srpt_add_one(struct ib_devi
        pr_debug("device = %p, device->dma_ops = %p\n", device,
                 device->dma_ops);
  
 -      sdev = kzalloc(sizeof *sdev, GFP_KERNEL);
 +      sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
        if (!sdev)
                goto err;
  
        sdev->device = device;
        INIT_LIST_HEAD(&sdev->rch_list);
        init_waitqueue_head(&sdev->ch_releaseQ);
 -      spin_lock_init(&sdev->spinlock);
 +      mutex_init(&sdev->mutex);
  
        sdev->pd = ib_alloc_pd(device);
        if (IS_ERR(sdev->pd))
  
                if (srpt_refresh_port(sport)) {
                        pr_err("MAD registration failed for %s-%d.\n",
 -                             srpt_sdev_name(sdev), i);
 +                             sdev->device->name, i);
                        goto err_ring;
                }
                snprintf(sport->port_guid, sizeof(sport->port_guid),
@@@ -2911,7 -3175,7 +2881,7 @@@ static void srpt_release_cmd(struct se_
        struct srpt_send_ioctx *ioctx = container_of(se_cmd,
                                struct srpt_send_ioctx, cmd);
        struct srpt_rdma_ch *ch = ioctx->ch;
-       unsigned long flags;
+       struct se_session *se_sess = ch->sess;
  
        WARN_ON(ioctx->state != SRPT_STATE_DONE);
        WARN_ON(ioctx->mapped_sg_count != 0);
                ioctx->n_rbuf = 0;
        }
  
-       spin_lock_irqsave(&ch->spinlock, flags);
-       list_add(&ioctx->free_list, &ch->free_list);
-       spin_unlock_irqrestore(&ch->spinlock, flags);
+       percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
  }
  
  /**
  static void srpt_close_session(struct se_session *se_sess)
  {
        DECLARE_COMPLETION_ONSTACK(release_done);
 -      struct srpt_rdma_ch *ch;
 -      struct srpt_device *sdev;
 -      unsigned long res;
 -
 -      ch = se_sess->fabric_sess_ptr;
 -      WARN_ON(ch->sess != se_sess);
 +      struct srpt_rdma_ch *ch = se_sess->fabric_sess_ptr;
 +      struct srpt_device *sdev = ch->sport->sdev;
 +      bool wait;
  
 -      pr_debug("ch %p state %d\n", ch, srpt_get_ch_state(ch));
 +      pr_debug("ch %s-%d state %d\n", ch->sess_name, ch->qp->qp_num,
 +               ch->state);
  
 -      sdev = ch->sport->sdev;
 -      spin_lock_irq(&sdev->spinlock);
 +      mutex_lock(&sdev->mutex);
        BUG_ON(ch->release_done);
        ch->release_done = &release_done;
 -      __srpt_close_ch(ch);
 -      spin_unlock_irq(&sdev->spinlock);
 +      wait = !list_empty(&ch->list);
 +      srpt_disconnect_ch(ch);
 +      mutex_unlock(&sdev->mutex);
  
 -      res = wait_for_completion_timeout(&release_done, 60 * HZ);
 -      WARN_ON(res == 0);
 +      if (!wait)
 +              return;
 +
 +      while (wait_for_completion_timeout(&release_done, 180 * HZ) == 0)
 +              pr_info("%s(%s-%d state %d): still waiting ...\n", __func__,
 +                      ch->sess_name, ch->qp->qp_num, ch->state);
  }
  
  /**
@@@ -3164,8 -3424,6 +3132,8 @@@ static ssize_t srpt_tpg_enable_store(st
  {
        struct se_portal_group *se_tpg = to_tpg(item);
        struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
 +      struct srpt_device *sdev = sport->sdev;
 +      struct srpt_rdma_ch *ch;
        unsigned long tmp;
          int ret;
  
                pr_err("Illegal value for srpt_tpg_store_enable: %lu\n", tmp);
                return -EINVAL;
        }
 -      if (tmp == 1)
 -              sport->enabled = true;
 -      else
 -              sport->enabled = false;
 +      if (sport->enabled == tmp)
 +              goto out;
 +      sport->enabled = tmp;
 +      if (sport->enabled)
 +              goto out;
 +
 +      mutex_lock(&sdev->mutex);
 +      list_for_each_entry(ch, &sdev->rch_list, list) {
 +              if (ch->sport == sport) {
 +                      pr_debug("%s: ch %p %s-%d\n", __func__, ch,
 +                               ch->sess_name, ch->qp->qp_num);
 +                      srpt_disconnect_ch(ch);
 +                      srpt_close_ch(ch);
 +              }
 +      }
 +      mutex_unlock(&sdev->mutex);
  
 +out:
        return count;
  }
  
@@@ -3288,6 -3533,7 +3256,6 @@@ static struct configfs_attribute *srpt_
  static const struct target_core_fabric_ops srpt_template = {
        .module                         = THIS_MODULE,
        .name                           = "srpt",
 -      .node_acl_size                  = sizeof(struct srpt_node_acl),
        .get_fabric_name                = srpt_get_fabric_name,
        .tpg_get_wwn                    = srpt_get_fabric_wwn,
        .tpg_get_tag                    = srpt_get_tag,
index af9b8b527340c80f4c8af515cc4aa641a5c5b426,6fbb6e79407c6b6bf2e2695f4e9100368d1ee1de..ca288f019315cda7142169ddbe5269772a7f398c
@@@ -179,7 -179,6 +179,6 @@@ struct srpt_recv_ioctx 
   * struct srpt_send_ioctx - SRPT send I/O context.
   * @ioctx:       See above.
   * @ch:          Channel pointer.
-  * @free_list:   Node in srpt_rdma_ch.free_list.
   * @n_rbuf:      Number of data buffers in the received SRP command.
   * @rbufs:       Pointer to SRP data buffer array.
   * @single_rbuf: SRP data buffer if the command has only a single buffer.
@@@ -202,7 -201,6 +201,6 @@@ struct srpt_send_ioctx 
        struct srp_direct_buf   *rbufs;
        struct srp_direct_buf   single_rbuf;
        struct scatterlist      *sg;
-       struct list_head        free_list;
        spinlock_t              spinlock;
        enum srpt_command_state state;
        struct se_cmd           cmd;
  
  /**
   * enum rdma_ch_state - SRP channel state.
 - * @CH_CONNECTING:     QP is in RTR state; waiting for RTU.
 - * @CH_LIVE:           QP is in RTS state.
 - * @CH_DISCONNECTING:    DREQ has been received; waiting for DREP
 - *                       or DREQ has been send and waiting for DREP
 - *                       or .
 - * @CH_DRAINING:       QP is in ERR state; waiting for last WQE event.
 - * @CH_RELEASING:      Last WQE event has been received; releasing resources.
 + * @CH_CONNECTING:    QP is in RTR state; waiting for RTU.
 + * @CH_LIVE:        QP is in RTS state.
 + * @CH_DISCONNECTING: DREQ has been sent and waiting for DREP or DREQ has
 + *                    been received.
 + * @CH_DRAINING:      DREP has been received or waiting for DREP timed out
 + *                    and last work request has been queued.
 + * @CH_DISCONNECTED:  Last completion has been received.
   */
  enum rdma_ch_state {
        CH_CONNECTING,
        CH_LIVE,
        CH_DISCONNECTING,
        CH_DRAINING,
 -      CH_RELEASING
 +      CH_DISCONNECTED,
  };
  
  /**
@@@ -267,8 -265,6 +265,8 @@@ struct srpt_rdma_ch 
        struct ib_cm_id         *cm_id;
        struct ib_qp            *qp;
        struct ib_cq            *cq;
 +      struct ib_cqe           zw_cqe;
 +      struct kref             kref;
        int                     rq_size;
        u32                     rsp_size;
        atomic_t                sq_wr_avail;
        u8                      sess_name[36];
        struct work_struct      release_work;
        struct completion       *release_done;
 -      bool                    in_shutdown;
  };
  
  /**
@@@ -344,7 -341,7 +342,7 @@@ struct srpt_port 
   * @ioctx_ring:    Per-HCA SRQ.
   * @rch_list:      Per-device channel list -- see also srpt_rdma_ch.list.
   * @ch_releaseQ:   Enables waiting for removal from rch_list.
 - * @spinlock:      Protects rch_list and tpg.
 + * @mutex:         Protects rch_list.
   * @port:          Information about the ports owned by this HCA.
   * @event_handler: Per-HCA asynchronous IB event handler.
   * @list:          Node in srpt_dev_list.
@@@ -358,10 -355,18 +356,10 @@@ struct srpt_device 
        struct srpt_recv_ioctx  **ioctx_ring;
        struct list_head        rch_list;
        wait_queue_head_t       ch_releaseQ;
 -      spinlock_t              spinlock;
 +      struct mutex            mutex;
        struct srpt_port        port[2];
        struct ib_event_handler event_handler;
        struct list_head        list;
  };
  
 -/**
 - * struct srpt_node_acl - Per-initiator ACL data (managed via configfs).
 - * @nacl:      Target core node ACL information.
 - */
 -struct srpt_node_acl {
 -      struct se_node_acl      nacl;
 -};
 -
  #endif                                /* IB_SRPT_H */
index ceb452dd143c972ac728f2b9326ff1eff0187e02,e6c5bcf2416217e90b11eb0b6200fc5d7925b1fb..47f8b9b49bac310335eb9ab9deebbf6997394873
@@@ -1060,12 -1060,6 +1060,12 @@@ struct mbx_cmd_32 
  #define FSTATE_FATAL_ERROR         4
  #define FSTATE_LOOP_BACK_CONN      5
  
 +#define QLA27XX_IMG_STATUS_VER_MAJOR   0x01
 +#define QLA27XX_IMG_STATUS_VER_MINOR    0x00
 +#define QLA27XX_IMG_STATUS_SIGN   0xFACEFADE
 +#define QLA27XX_PRIMARY_IMAGE  1
 +#define QLA27XX_SECONDARY_IMAGE    2
 +
  /*
   * Port Database structure definition
   * Little endian except where noted.
@@@ -1254,41 -1248,13 +1254,41 @@@ struct link_statistics 
        uint32_t inval_xmit_word_cnt;
        uint32_t inval_crc_cnt;
        uint32_t lip_cnt;
 -      uint32_t unused1[0x1a];
 +      uint32_t link_up_cnt;
 +      uint32_t link_down_loop_init_tmo;
 +      uint32_t link_down_los;
 +      uint32_t link_down_loss_rcv_clk;
 +      uint32_t reserved0[5];
 +      uint32_t port_cfg_chg;
 +      uint32_t reserved1[11];
 +      uint32_t rsp_q_full;
 +      uint32_t atio_q_full;
 +      uint32_t drop_ae;
 +      uint32_t els_proto_err;
 +      uint32_t reserved2;
        uint32_t tx_frames;
        uint32_t rx_frames;
        uint32_t discarded_frames;
        uint32_t dropped_frames;
 -      uint32_t unused2[1];
 +      uint32_t reserved3;
        uint32_t nos_rcvd;
 +      uint32_t reserved4[4];
 +      uint32_t tx_prjt;
 +      uint32_t rcv_exfail;
 +      uint32_t rcv_abts;
 +      uint32_t seq_frm_miss;
 +      uint32_t corr_err;
 +      uint32_t mb_rqst;
 +      uint32_t nport_full;
 +      uint32_t eofa;
 +      uint32_t reserved5;
 +      uint32_t fpm_recv_word_cnt_lo;
 +      uint32_t fpm_recv_word_cnt_hi;
 +      uint32_t fpm_disc_word_cnt_lo;
 +      uint32_t fpm_disc_word_cnt_hi;
 +      uint32_t fpm_xmit_word_cnt_lo;
 +      uint32_t fpm_xmit_word_cnt_hi;
 +      uint32_t reserved6[70];
  };
  
  /*
@@@ -2963,6 -2929,7 +2963,7 @@@ struct qlt_hw_data 
  
        uint8_t tgt_node_name[WWN_SIZE];
  
+       struct dentry *dfs_tgt_sess;
        struct list_head q_full_list;
        uint32_t num_pend_cmds;
        uint32_t num_qfull_cmds_alloc;
@@@ -3467,20 -3434,14 +3468,20 @@@ struct qla_hw_data 
        uint32_t        flt_region_flt;
        uint32_t        flt_region_fdt;
        uint32_t        flt_region_boot;
 +      uint32_t        flt_region_boot_sec;
        uint32_t        flt_region_fw;
 +      uint32_t        flt_region_fw_sec;
        uint32_t        flt_region_vpd_nvram;
        uint32_t        flt_region_vpd;
 +      uint32_t        flt_region_vpd_sec;
        uint32_t        flt_region_nvram;
        uint32_t        flt_region_npiv_conf;
        uint32_t        flt_region_gold_fw;
        uint32_t        flt_region_fcp_prio;
        uint32_t        flt_region_bootload;
 +      uint32_t        flt_region_img_status_pri;
 +      uint32_t        flt_region_img_status_sec;
 +      uint8_t         active_image;
  
        /* Needed for BEACON */
        uint16_t        beacon_blink_led;
@@@ -3611,7 -3572,6 +3612,7 @@@ typedef struct scsi_qla_host 
                uint32_t        delete_progress:1;
  
                uint32_t        fw_tgt_reported:1;
 +              uint32_t        bbcr_enable:1;
        } flags;
  
        atomic_t        loop_state;
        atomic_t        vref_count;
        struct qla8044_reset_template reset_tmplt;
        struct qla_tgt_counters tgt_counters;
 +      uint16_t        bbcr;
  } scsi_qla_host_t;
  
 +struct qla27xx_image_status {
 +      uint8_t image_status_mask;
 +      uint16_t generation_number;
 +      uint8_t reserved[3];
 +      uint8_t ver_minor;
 +      uint8_t ver_major;
 +      uint32_t checksum;
 +      uint32_t signature;
 +} __packed;
 +
  #define SET_VP_IDX    1
  #define SET_AL_PA     2
  #define RESET_VP_IDX  3
index 8caef31da415bc88606a286928303be624ecbd7a,8cc68be712301d3116ca3d21f450c495cd059188..1bd5c72b663e29a32d50c88dd0fccf344bb72f30
@@@ -78,7 -78,7 +78,7 @@@ static int target_fabric_mappedlun_link
                        struct se_lun_acl, se_lun_group);
        struct se_portal_group *se_tpg;
        struct config_item *nacl_ci, *tpg_ci, *tpg_ci_s, *wwn_ci, *wwn_ci_s;
-       int lun_access;
+       bool lun_access_ro;
  
        if (lun->lun_link_magic != SE_LUN_LINK_MAGIC) {
                pr_err("Bad lun->lun_link_magic, not a valid lun_ci pointer:"
        }
        /*
         * If this struct se_node_acl was dynamically generated with
-        * tpg_1/attrib/generate_node_acls=1, use the existing deve->lun_flags,
-        * which be will write protected (READ-ONLY) when
+        * tpg_1/attrib/generate_node_acls=1, use the existing
+        * deve->lun_access_ro value, which will be true when
         * tpg_1/attrib/demo_mode_write_protect=1
         */
        rcu_read_lock();
        deve = target_nacl_find_deve(lacl->se_lun_nacl, lacl->mapped_lun);
        if (deve)
-               lun_access = deve->lun_flags;
+               lun_access_ro = deve->lun_access_ro;
        else
-               lun_access =
+               lun_access_ro =
                        (se_tpg->se_tpg_tfo->tpg_check_prod_mode_write_protect(
-                               se_tpg)) ? TRANSPORT_LUNFLAGS_READ_ONLY :
-                                          TRANSPORT_LUNFLAGS_READ_WRITE;
+                               se_tpg)) ? true : false;
        rcu_read_unlock();
        /*
         * Determine the actual mapped LUN value user wants..
         * This value is what the SCSI Initiator actually sees the
         * $FABRIC/$WWPN/$TPGT/lun/lun_* as on their SCSI Initiator Ports.
         */
-       return core_dev_add_initiator_node_lun_acl(se_tpg, lacl, lun, lun_access);
+       return core_dev_add_initiator_node_lun_acl(se_tpg, lacl, lun, lun_access_ro);
  }
  
  static int target_fabric_mappedlun_unlink(
@@@ -167,8 -166,7 +166,7 @@@ static ssize_t target_fabric_mappedlun_
        rcu_read_lock();
        deve = target_nacl_find_deve(se_nacl, lacl->mapped_lun);
        if (deve) {
-               len = sprintf(page, "%d\n",
-                       (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY) ? 1 : 0);
+               len = sprintf(page, "%d\n", deve->lun_access_ro);
        }
        rcu_read_unlock();
  
@@@ -181,25 -179,23 +179,23 @@@ static ssize_t target_fabric_mappedlun_
        struct se_lun_acl *lacl = item_to_lun_acl(item);
        struct se_node_acl *se_nacl = lacl->se_lun_nacl;
        struct se_portal_group *se_tpg = se_nacl->se_tpg;
-       unsigned long op;
+       unsigned long wp;
        int ret;
  
-       ret = kstrtoul(page, 0, &op);
+       ret = kstrtoul(page, 0, &wp);
        if (ret)
                return ret;
  
-       if ((op != 1) && (op != 0))
+       if ((wp != 1) && (wp != 0))
                return -EINVAL;
  
-       core_update_device_list_access(lacl->mapped_lun, (op) ?
-                       TRANSPORT_LUNFLAGS_READ_ONLY :
-                       TRANSPORT_LUNFLAGS_READ_WRITE,
-                       lacl->se_lun_nacl);
+       /* wp=1 means lun_access_ro=true */
+       core_update_device_list_access(lacl->mapped_lun, wp, lacl->se_lun_nacl);
  
        pr_debug("%s_ConfigFS: Changed Initiator ACL: %s"
                " Mapped LUN: %llu Write Protect bit to %s\n",
                se_tpg->se_tpg_tfo->get_fabric_name(),
-               se_nacl->initiatorname, lacl->mapped_lun, (op) ? "ON" : "OFF");
+               se_nacl->initiatorname, lacl->mapped_lun, (wp) ? "ON" : "OFF");
  
        return count;
  
@@@ -273,10 -269,18 +269,10 @@@ static struct config_group *target_fabr
        struct se_portal_group *se_tpg = se_nacl->se_tpg;
        struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
        struct se_lun_acl *lacl = NULL;
 -      struct config_item *acl_ci;
 -      struct config_group *lacl_cg = NULL, *ml_stat_grp = NULL;
        char *buf;
        unsigned long long mapped_lun;
        int ret = 0;
  
 -      acl_ci = &group->cg_item;
 -      if (!acl_ci) {
 -              pr_err("Unable to locatel acl_ci\n");
 -              return NULL;
 -      }
 -
        buf = kzalloc(strlen(name) + 1, GFP_KERNEL);
        if (!buf) {
                pr_err("Unable to allocate memory for name buf\n");
                goto out;
        }
  
 -      lacl_cg = &lacl->se_lun_group;
 -      lacl_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
 -                              GFP_KERNEL);
 -      if (!lacl_cg->default_groups) {
 -              pr_err("Unable to allocate lacl_cg->default_groups\n");
 -              ret = -ENOMEM;
 -              goto out;
 -      }
 -
        config_group_init_type_name(&lacl->se_lun_group, name,
                        &tf->tf_tpg_mappedlun_cit);
 +
        config_group_init_type_name(&lacl->ml_stat_grps.stat_group,
                        "statistics", &tf->tf_tpg_mappedlun_stat_cit);
 -      lacl_cg->default_groups[0] = &lacl->ml_stat_grps.stat_group;
 -      lacl_cg->default_groups[1] = NULL;
 -
 -      ml_stat_grp = &lacl->ml_stat_grps.stat_group;
 -      ml_stat_grp->default_groups = kmalloc(sizeof(struct config_group *) * 3,
 -                              GFP_KERNEL);
 -      if (!ml_stat_grp->default_groups) {
 -              pr_err("Unable to allocate ml_stat_grp->default_groups\n");
 -              ret = -ENOMEM;
 -              goto out;
 -      }
 +      configfs_add_default_group(&lacl->ml_stat_grps.stat_group,
 +                      &lacl->se_lun_group);
 +
        target_stat_setup_mappedlun_default_groups(lacl);
  
        kfree(buf);
        return &lacl->se_lun_group;
  out:
 -      if (lacl_cg)
 -              kfree(lacl_cg->default_groups);
        kfree(lacl);
        kfree(buf);
        return ERR_PTR(ret);
@@@ -331,9 -353,25 +327,9 @@@ static void target_fabric_drop_mappedlu
  {
        struct se_lun_acl *lacl = container_of(to_config_group(item),
                        struct se_lun_acl, se_lun_group);
 -      struct config_item *df_item;
 -      struct config_group *lacl_cg = NULL, *ml_stat_grp = NULL;
 -      int i;
 -
 -      ml_stat_grp = &lacl->ml_stat_grps.stat_group;
 -      for (i = 0; ml_stat_grp->default_groups[i]; i++) {
 -              df_item = &ml_stat_grp->default_groups[i]->cg_item;
 -              ml_stat_grp->default_groups[i] = NULL;
 -              config_item_put(df_item);
 -      }
 -      kfree(ml_stat_grp->default_groups);
  
 -      lacl_cg = &lacl->se_lun_group;
 -      for (i = 0; lacl_cg->default_groups[i]; i++) {
 -              df_item = &lacl_cg->default_groups[i]->cg_item;
 -              lacl_cg->default_groups[i] = NULL;
 -              config_item_put(df_item);
 -      }
 -      kfree(lacl_cg->default_groups);
 +      configfs_remove_default_groups(&lacl->ml_stat_grps.stat_group);
 +      configfs_remove_default_groups(&lacl->se_lun_group);
  
        config_item_put(item);
  }
@@@ -382,6 -420,7 +378,6 @@@ static struct config_group *target_fabr
                        struct se_portal_group, tpg_acl_group);
        struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
        struct se_node_acl *se_nacl;
 -      struct config_group *nacl_cg;
  
        se_nacl = core_tpg_add_initiator_node_acl(se_tpg, name);
        if (IS_ERR(se_nacl))
                }
        }
  
 -      nacl_cg = &se_nacl->acl_group;
 -      nacl_cg->default_groups = se_nacl->acl_default_groups;
 -      nacl_cg->default_groups[0] = &se_nacl->acl_attrib_group;
 -      nacl_cg->default_groups[1] = &se_nacl->acl_auth_group;
 -      nacl_cg->default_groups[2] = &se_nacl->acl_param_group;
 -      nacl_cg->default_groups[3] = &se_nacl->acl_fabric_stat_group;
 -      nacl_cg->default_groups[4] = NULL;
 -
        config_group_init_type_name(&se_nacl->acl_group, name,
                        &tf->tf_tpg_nacl_base_cit);
 +
        config_group_init_type_name(&se_nacl->acl_attrib_group, "attrib",
                        &tf->tf_tpg_nacl_attrib_cit);
 +      configfs_add_default_group(&se_nacl->acl_attrib_group,
 +                      &se_nacl->acl_group);
 +
        config_group_init_type_name(&se_nacl->acl_auth_group, "auth",
                        &tf->tf_tpg_nacl_auth_cit);
 +      configfs_add_default_group(&se_nacl->acl_auth_group,
 +                      &se_nacl->acl_group);
 +
        config_group_init_type_name(&se_nacl->acl_param_group, "param",
                        &tf->tf_tpg_nacl_param_cit);
 +      configfs_add_default_group(&se_nacl->acl_param_group,
 +                      &se_nacl->acl_group);
 +
        config_group_init_type_name(&se_nacl->acl_fabric_stat_group,
                        "fabric_statistics", &tf->tf_tpg_nacl_stat_cit);
 +      configfs_add_default_group(&se_nacl->acl_fabric_stat_group,
 +                      &se_nacl->acl_group);
  
        return &se_nacl->acl_group;
  }
@@@ -427,9 -462,16 +423,9 @@@ static void target_fabric_drop_nodeacl
  {
        struct se_node_acl *se_nacl = container_of(to_config_group(item),
                        struct se_node_acl, acl_group);
 -      struct config_item *df_item;
 -      struct config_group *nacl_cg;
 -      int i;
 -
 -      nacl_cg = &se_nacl->acl_group;
 -      for (i = 0; nacl_cg->default_groups[i]; i++) {
 -              df_item = &nacl_cg->default_groups[i]->cg_item;
 -              nacl_cg->default_groups[i] = NULL;
 -              config_item_put(df_item);
 -      }
 +
 +      configfs_remove_default_groups(&se_nacl->acl_group);
 +
        /*
         * struct se_node_acl free is done in target_fabric_nacl_base_release()
         */
@@@ -749,6 -791,7 +745,6 @@@ static struct config_group *target_fabr
        struct se_portal_group *se_tpg = container_of(group,
                        struct se_portal_group, tpg_lun_group);
        struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
 -      struct config_group *lun_cg = NULL, *port_stat_grp = NULL;
        unsigned long long unpacked_lun;
        int errno;
  
        if (IS_ERR(lun))
                return ERR_CAST(lun);
  
 -      lun_cg = &lun->lun_group;
 -      lun_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2,
 -                              GFP_KERNEL);
 -      if (!lun_cg->default_groups) {
 -              pr_err("Unable to allocate lun_cg->default_groups\n");
 -              kfree(lun);
 -              return ERR_PTR(-ENOMEM);
 -      }
 -
        config_group_init_type_name(&lun->lun_group, name,
                        &tf->tf_tpg_port_cit);
 +
        config_group_init_type_name(&lun->port_stat_grps.stat_group,
                        "statistics", &tf->tf_tpg_port_stat_cit);
 -      lun_cg->default_groups[0] = &lun->port_stat_grps.stat_group;
 -      lun_cg->default_groups[1] = NULL;
 -
 -      port_stat_grp = &lun->port_stat_grps.stat_group;
 -      port_stat_grp->default_groups =  kzalloc(sizeof(struct config_group *) * 4,
 -                              GFP_KERNEL);
 -      if (!port_stat_grp->default_groups) {
 -              pr_err("Unable to allocate port_stat_grp->default_groups\n");
 -              kfree(lun_cg->default_groups);
 -              kfree(lun);
 -              return ERR_PTR(-ENOMEM);
 -      }
 +      configfs_add_default_group(&lun->port_stat_grps.stat_group,
 +                      &lun->lun_group);
 +
        target_stat_setup_port_default_groups(lun);
  
        return &lun->lun_group;
@@@ -784,9 -844,25 +780,9 @@@ static void target_fabric_drop_lun
  {
        struct se_lun *lun = container_of(to_config_group(item),
                                struct se_lun, lun_group);
 -      struct config_item *df_item;
 -      struct config_group *lun_cg, *port_stat_grp;
 -      int i;
 -
 -      port_stat_grp = &lun->port_stat_grps.stat_group;
 -      for (i = 0; port_stat_grp->default_groups[i]; i++) {
 -              df_item = &port_stat_grp->default_groups[i]->cg_item;
 -              port_stat_grp->default_groups[i] = NULL;
 -              config_item_put(df_item);
 -      }
 -      kfree(port_stat_grp->default_groups);
  
 -      lun_cg = &lun->lun_group;
 -      for (i = 0; lun_cg->default_groups[i]; i++) {
 -              df_item = &lun_cg->default_groups[i]->cg_item;
 -              lun_cg->default_groups[i] = NULL;
 -              config_item_put(df_item);
 -      }
 -      kfree(lun_cg->default_groups);
 +      configfs_remove_default_groups(&lun->port_stat_grps.stat_group);
 +      configfs_remove_default_groups(&lun->lun_group);
  
        config_item_put(item);
  }
@@@ -842,39 -918,32 +838,39 @@@ static struct config_group *target_fabr
        se_tpg = tf->tf_ops->fabric_make_tpg(wwn, group, name);
        if (!se_tpg || IS_ERR(se_tpg))
                return ERR_PTR(-EINVAL);
 -      /*
 -       * Setup default groups from pre-allocated se_tpg->tpg_default_groups
 -       */
 -      se_tpg->tpg_group.default_groups = se_tpg->tpg_default_groups;
 -      se_tpg->tpg_group.default_groups[0] = &se_tpg->tpg_lun_group;
 -      se_tpg->tpg_group.default_groups[1] = &se_tpg->tpg_np_group;
 -      se_tpg->tpg_group.default_groups[2] = &se_tpg->tpg_acl_group;
 -      se_tpg->tpg_group.default_groups[3] = &se_tpg->tpg_attrib_group;
 -      se_tpg->tpg_group.default_groups[4] = &se_tpg->tpg_auth_group;
 -      se_tpg->tpg_group.default_groups[5] = &se_tpg->tpg_param_group;
 -      se_tpg->tpg_group.default_groups[6] = NULL;
  
        config_group_init_type_name(&se_tpg->tpg_group, name,
                        &tf->tf_tpg_base_cit);
 +
        config_group_init_type_name(&se_tpg->tpg_lun_group, "lun",
                        &tf->tf_tpg_lun_cit);
 +      configfs_add_default_group(&se_tpg->tpg_lun_group,
 +                      &se_tpg->tpg_group);
 +
        config_group_init_type_name(&se_tpg->tpg_np_group, "np",
                        &tf->tf_tpg_np_cit);
 +      configfs_add_default_group(&se_tpg->tpg_np_group,
 +                      &se_tpg->tpg_group);
 +
        config_group_init_type_name(&se_tpg->tpg_acl_group, "acls",
                        &tf->tf_tpg_nacl_cit);
 +      configfs_add_default_group(&se_tpg->tpg_acl_group,
 +                      &se_tpg->tpg_group);
 +
        config_group_init_type_name(&se_tpg->tpg_attrib_group, "attrib",
                        &tf->tf_tpg_attrib_cit);
 +      configfs_add_default_group(&se_tpg->tpg_attrib_group,
 +                      &se_tpg->tpg_group);
 +
        config_group_init_type_name(&se_tpg->tpg_auth_group, "auth",
                        &tf->tf_tpg_auth_cit);
 +      configfs_add_default_group(&se_tpg->tpg_auth_group,
 +                      &se_tpg->tpg_group);
 +
        config_group_init_type_name(&se_tpg->tpg_param_group, "param",
                        &tf->tf_tpg_param_cit);
 +      configfs_add_default_group(&se_tpg->tpg_param_group,
 +                      &se_tpg->tpg_group);
  
        return &se_tpg->tpg_group;
  }
@@@ -885,8 -954,19 +881,8 @@@ static void target_fabric_drop_tpg
  {
        struct se_portal_group *se_tpg = container_of(to_config_group(item),
                                struct se_portal_group, tpg_group);
 -      struct config_group *tpg_cg = &se_tpg->tpg_group;
 -      struct config_item *df_item;
 -      int i;
 -      /*
 -       * Release default groups, but do not release tpg_cg->default_groups
 -       * memory as it is statically allocated at se_tpg->tpg_default_groups.
 -       */
 -      for (i = 0; tpg_cg->default_groups[i]; i++) {
 -              df_item = &tpg_cg->default_groups[i]->cg_item;
 -              tpg_cg->default_groups[i] = NULL;
 -              config_item_put(df_item);
 -      }
  
 +      configfs_remove_default_groups(&se_tpg->tpg_group);
        config_item_put(item);
  }
  
@@@ -942,12 -1022,16 +938,12 @@@ static struct config_group *target_fabr
                return ERR_PTR(-EINVAL);
  
        wwn->wwn_tf = tf;
 -      /*
 -       * Setup default groups from pre-allocated wwn->wwn_default_groups
 -       */
 -      wwn->wwn_group.default_groups = wwn->wwn_default_groups;
 -      wwn->wwn_group.default_groups[0] = &wwn->fabric_stat_group;
 -      wwn->wwn_group.default_groups[1] = NULL;
  
        config_group_init_type_name(&wwn->wwn_group, name, &tf->tf_tpg_cit);
 +
        config_group_init_type_name(&wwn->fabric_stat_group, "fabric_statistics",
                        &tf->tf_wwn_fabric_stats_cit);
 +      configfs_add_default_group(&wwn->fabric_stat_group, &wwn->wwn_group);
  
        return &wwn->wwn_group;
  }
@@@ -958,8 -1042,16 +954,8 @@@ static void target_fabric_drop_wwn
  {
        struct se_wwn *wwn = container_of(to_config_group(item),
                                struct se_wwn, wwn_group);
 -      struct config_item *df_item;
 -      struct config_group *cg = &wwn->wwn_group;
 -      int i;
 -
 -      for (i = 0; cg->default_groups[i]; i++) {
 -              df_item = &cg->default_groups[i]->cg_item;
 -              cg->default_groups[i] = NULL;
 -              config_item_put(df_item);
 -      }
  
 +      configfs_remove_default_groups(&wwn->wwn_group);
        config_item_put(item);
  }
  
index 4a7cf499cdfa242f2c164bf1d494276319d1b05f,040cf5202e548b4e97a2e899425c67e082a04802..86b4a8375628cbaa3f7474718ee7f15164bad8b5
@@@ -22,6 -22,7 +22,6 @@@ struct target_fabric_configfs 
        struct list_head        tf_list;
        struct config_group     tf_group;
        struct config_group     tf_disc_group;
 -      struct config_group     *tf_default_groups[2];
        const struct target_core_fabric_ops *tf_ops;
  
        struct config_item_type tf_discovery_cit;
@@@ -59,10 -60,10 +59,10 @@@ struct se_dev_entry *core_get_se_deve_f
  void  target_pr_kref_release(struct kref *);
  void  core_free_device_list_for_node(struct se_node_acl *,
                struct se_portal_group *);
- void  core_update_device_list_access(u64, u32, struct se_node_acl *);
+ void  core_update_device_list_access(u64, bool, struct se_node_acl *);
  struct se_dev_entry *target_nacl_find_deve(struct se_node_acl *, u64);
  int   core_enable_device_list_for_node(struct se_lun *, struct se_lun_acl *,
-               u64, u32, struct se_node_acl *, struct se_portal_group *);
+               u64, bool, struct se_node_acl *, struct se_portal_group *);
  void  core_disable_device_list_for_node(struct se_lun *, struct se_dev_entry *,
                struct se_node_acl *, struct se_portal_group *);
  void  core_clear_lun_from_tpg(struct se_lun *, struct se_portal_group *);
@@@ -72,7 -73,7 +72,7 @@@ void  core_dev_del_lun(struct se_portal_
  struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group *,
                struct se_node_acl *, u64, int *);
  int   core_dev_add_initiator_node_lun_acl(struct se_portal_group *,
-               struct se_lun_acl *, struct se_lun *lun, u32);
+               struct se_lun_acl *, struct se_lun *lun, bool);
  int   core_dev_del_initiator_node_lun_acl(struct se_lun *,
                struct se_lun_acl *);
  void  core_dev_free_initiator_node_lun_acl(struct se_portal_group *,
@@@ -118,7 -119,7 +118,7 @@@ void       core_tpg_add_node_to_devs(struct s
  void  core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *);
  struct se_lun *core_tpg_alloc_lun(struct se_portal_group *, u64);
  int   core_tpg_add_lun(struct se_portal_group *, struct se_lun *,
-               u32, struct se_device *);
+               bool, struct se_device *);
  void core_tpg_remove_lun(struct se_portal_group *, struct se_lun *);
  struct se_node_acl *core_tpg_add_initiator_node_acl(struct se_portal_group *tpg,
                const char *initiatorname);
index dfb733047a4c87b1b9de38a4fd2ec615e75fd508,7e7227bf981992e49d1b877bb504f63ab2b82c45..2ace0295408e536e2834165a004ae11d3256f6ec
@@@ -41,13 -41,6 +41,6 @@@ static inline struct f_uas *to_f_uas(st
        return container_of(f, struct f_uas, function);
  }
  
- static void usbg_cmd_release(struct kref *);
- static inline void usbg_cleanup_cmd(struct usbg_cmd *cmd)
- {
-       kref_put(&cmd->ref, usbg_cmd_release);
- }
  /* Start bot.c code */
  
  static int bot_enqueue_cmd_cbw(struct f_uas *fu)
@@@ -68,7 -61,7 +61,7 @@@ static void bot_status_complete(struct 
        struct usbg_cmd *cmd = req->context;
        struct f_uas *fu = cmd->fu;
  
-       usbg_cleanup_cmd(cmd);
+       transport_generic_free_cmd(&cmd->se_cmd, 0);
        if (req->status < 0) {
                pr_err("ERR %s(%d)\n", __func__, __LINE__);
                return;
@@@ -605,7 -598,7 +598,7 @@@ static void uasp_status_data_cmpl(struc
                break;
  
        case UASP_QUEUE_COMMAND:
-               usbg_cleanup_cmd(cmd);
+               transport_generic_free_cmd(&cmd->se_cmd, 0);
                usb_ep_queue(fu->ep_cmd, fu->cmd.req, GFP_ATOMIC);
                break;
  
        return;
  
  cleanup:
-       usbg_cleanup_cmd(cmd);
+       transport_generic_free_cmd(&cmd->se_cmd, 0);
  }
  
  static int uasp_send_status_response(struct usbg_cmd *cmd)
@@@ -977,7 -970,7 +970,7 @@@ static void usbg_data_write_cmpl(struc
        return;
  
  cleanup:
-       usbg_cleanup_cmd(cmd);
+       transport_generic_free_cmd(&cmd->se_cmd, 0);
  }
  
  static int usbg_prepare_w_request(struct usbg_cmd *cmd, struct usb_request *req)
@@@ -1046,7 -1039,7 +1039,7 @@@ static void usbg_cmd_work(struct work_s
        struct se_cmd *se_cmd;
        struct tcm_usbg_nexus *tv_nexus;
        struct usbg_tpg *tpg;
-       int dir;
+       int dir, flags = (TARGET_SCF_UNKNOWN_SIZE | TARGET_SCF_ACK_KREF);
  
        se_cmd = &cmd->se_cmd;
        tpg = cmd->fu->tpg;
                goto out;
        }
  
-       if (target_submit_cmd(se_cmd, tv_nexus->tvn_se_sess,
-                       cmd->cmd_buf, cmd->sense_iu.sense, cmd->unpacked_lun,
-                       0, cmd->prio_attr, dir, TARGET_SCF_UNKNOWN_SIZE) < 0)
+       if (target_submit_cmd(se_cmd, tv_nexus->tvn_se_sess, cmd->cmd_buf,
+                             cmd->sense_iu.sense, cmd->unpacked_lun, 0,
+                             cmd->prio_attr, dir, flags) < 0)
                goto out;
  
        return;
  out:
        transport_send_check_condition_and_sense(se_cmd,
                        TCM_UNSUPPORTED_SCSI_OPCODE, 1);
-       usbg_cleanup_cmd(cmd);
+       transport_generic_free_cmd(&cmd->se_cmd, 0);
  }
  
+ static struct usbg_cmd *usbg_get_cmd(struct f_uas *fu,
+               struct tcm_usbg_nexus *tv_nexus, u32 scsi_tag)
+ {
+       struct se_session *se_sess = tv_nexus->tvn_se_sess;
+       struct usbg_cmd *cmd;
+       int tag;
+       tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_ATOMIC);
+       if (tag < 0)
+               return ERR_PTR(-ENOMEM);
+       cmd = &((struct usbg_cmd *)se_sess->sess_cmd_map)[tag];
+       memset(cmd, 0, sizeof(*cmd));
+       cmd->se_cmd.map_tag = tag;
+       cmd->se_cmd.tag = cmd->tag = scsi_tag;
+       cmd->fu = fu;
+       return cmd;
+ }
+ static void usbg_release_cmd(struct se_cmd *);
  static int usbg_submit_command(struct f_uas *fu,
                void *cmdbuf, unsigned int len)
  {
        struct command_iu *cmd_iu = cmdbuf;
        struct usbg_cmd *cmd;
-       struct usbg_tpg *tpg;
-       struct tcm_usbg_nexus *tv_nexus;
+       struct usbg_tpg *tpg = fu->tpg;
+       struct tcm_usbg_nexus *tv_nexus = tpg->tpg_nexus;
        u32 cmd_len;
+       u16 scsi_tag;
  
        if (cmd_iu->iu_id != IU_ID_COMMAND) {
                pr_err("Unsupported type %d\n", cmd_iu->iu_id);
                return -EINVAL;
        }
  
-       cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
-       if (!cmd)
-               return -ENOMEM;
-       cmd->fu = fu;
-       /* XXX until I figure out why I can't free in on complete */
-       kref_init(&cmd->ref);
-       kref_get(&cmd->ref);
+       tv_nexus = tpg->tpg_nexus;
+       if (!tv_nexus) {
+               pr_err("Missing nexus, ignoring command\n");
+               return -EINVAL;
+       }
  
-       tpg = fu->tpg;
        cmd_len = (cmd_iu->len & ~0x3) + 16;
        if (cmd_len > USBG_MAX_CMD)
-               goto err;
+               return -EINVAL;
  
+       scsi_tag = be16_to_cpup(&cmd_iu->tag);
+       cmd = usbg_get_cmd(fu, tv_nexus, scsi_tag);
+       if (IS_ERR(cmd)) {
+               pr_err("usbg_get_cmd failed\n");
+               return -ENOMEM;
+       }
        memcpy(cmd->cmd_buf, cmd_iu->cdb, cmd_len);
  
-       cmd->tag = be16_to_cpup(&cmd_iu->tag);
-       cmd->se_cmd.tag = cmd->tag;
        if (fu->flags & USBG_USE_STREAMS) {
                if (cmd->tag > UASP_SS_EP_COMP_NUM_STREAMS)
                        goto err;
                cmd->stream = &fu->stream[0];
        }
  
-       tv_nexus = tpg->tpg_nexus;
-       if (!tv_nexus) {
-               pr_err("Missing nexus, ignoring command\n");
-               goto err;
-       }
        switch (cmd_iu->prio_attr & 0x7) {
        case UAS_HEAD_TAG:
                cmd->prio_attr = TCM_HEAD_TAG;
  
        return 0;
  err:
-       kfree(cmd);
+       usbg_release_cmd(&cmd->se_cmd);
        return -EINVAL;
  }
  
@@@ -1182,7 -1191,7 +1191,7 @@@ static void bot_cmd_work(struct work_st
  out:
        transport_send_check_condition_and_sense(se_cmd,
                                TCM_UNSUPPORTED_SCSI_OPCODE, 1);
-       usbg_cleanup_cmd(cmd);
+       transport_generic_free_cmd(&cmd->se_cmd, 0);
  }
  
  static int bot_submit_command(struct f_uas *fu,
  {
        struct bulk_cb_wrap *cbw = cmdbuf;
        struct usbg_cmd *cmd;
-       struct usbg_tpg *tpg;
+       struct usbg_tpg *tpg = fu->tpg;
        struct tcm_usbg_nexus *tv_nexus;
        u32 cmd_len;
  
        if (cmd_len < 1 || cmd_len > 16)
                return -EINVAL;
  
-       cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
-       if (!cmd)
-               return -ENOMEM;
-       cmd->fu = fu;
-       /* XXX until I figure out why I can't free in on complete */
-       kref_init(&cmd->ref);
-       kref_get(&cmd->ref);
-       tpg = fu->tpg;
-       memcpy(cmd->cmd_buf, cbw->CDB, cmd_len);
-       cmd->bot_tag = cbw->Tag;
        tv_nexus = tpg->tpg_nexus;
        if (!tv_nexus) {
                pr_err("Missing nexus, ignoring command\n");
-               goto err;
+               return -ENODEV;
        }
  
+       cmd = usbg_get_cmd(fu, tv_nexus, cbw->Tag);
+       if (IS_ERR(cmd)) {
+               pr_err("usbg_get_cmd failed\n");
+               return -ENOMEM;
+       }
+       memcpy(cmd->cmd_buf, cbw->CDB, cmd_len);
+       cmd->bot_tag = cbw->Tag;
        cmd->prio_attr = TCM_SIMPLE_TAG;
        cmd->unpacked_lun = cbw->Lun;
        cmd->is_read = cbw->Flags & US_BULK_FLAG_IN ? 1 : 0;
        queue_work(tpg->workqueue, &cmd->work);
  
        return 0;
- err:
-       kfree(cmd);
-       return -EINVAL;
  }
  
  /* Start fabric.c code */
@@@ -1282,20 -1280,14 +1280,14 @@@ static u32 usbg_tpg_get_inst_index(stru
        return 1;
  }
  
- static void usbg_cmd_release(struct kref *ref)
- {
-       struct usbg_cmd *cmd = container_of(ref, struct usbg_cmd,
-                       ref);
-       transport_generic_free_cmd(&cmd->se_cmd, 0);
- }
  static void usbg_release_cmd(struct se_cmd *se_cmd)
  {
        struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd,
                        se_cmd);
+       struct se_session *se_sess = se_cmd->se_sess;
        kfree(cmd->data_buf);
-       kfree(cmd);
+       percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
  }
  
  static int usbg_shutdown_session(struct se_session *se_sess)
        return ret;
  }
  
+ static int usbg_alloc_sess_cb(struct se_portal_group *se_tpg,
+                             struct se_session *se_sess, void *p)
+ {
+       struct usbg_tpg *tpg = container_of(se_tpg,
+                               struct usbg_tpg, se_tpg);
+       tpg->tpg_nexus = p;
+       return 0;
+ }
  static int tcm_usbg_make_nexus(struct usbg_tpg *tpg, char *name)
  {
-       struct se_portal_group *se_tpg;
        struct tcm_usbg_nexus *tv_nexus;
-       int ret;
+       int ret = 0;
  
        mutex_lock(&tpg->tpg_mutex);
        if (tpg->tpg_nexus) {
                ret = -EEXIST;
                pr_debug("tpg->tpg_nexus already exists\n");
-               goto err_unlock;
+               goto out_unlock;
        }
-       se_tpg = &tpg->se_tpg;
  
-       ret = -ENOMEM;
        tv_nexus = kzalloc(sizeof(*tv_nexus), GFP_KERNEL);
-       if (!tv_nexus)
-               goto err_unlock;
-       tv_nexus->tvn_se_sess = transport_init_session(TARGET_PROT_NORMAL);
-       if (IS_ERR(tv_nexus->tvn_se_sess))
-               goto err_free;
+       if (!tv_nexus) {
+               ret = -ENOMEM;
+               goto out_unlock;
+       }
  
-       /*
-        * Since we are running in 'demo mode' this call with generate a
-        * struct se_node_acl for the tcm_vhost struct se_portal_group with
-        * the SCSI Initiator port name of the passed configfs group 'name'.
-        */
-       tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
-                       se_tpg, name);
-       if (!tv_nexus->tvn_se_sess->se_node_acl) {
+       tv_nexus->tvn_se_sess = target_alloc_session(&tpg->se_tpg,
+                                                    USB_G_DEFAULT_SESSION_TAGS,
+                                                    sizeof(struct usbg_cmd),
+                                                    TARGET_PROT_NORMAL, name,
+                                                    tv_nexus, usbg_alloc_sess_cb);
+       if (IS_ERR(tv_nexus->tvn_se_sess)) {
  #define MAKE_NEXUS_MSG "core_tpg_check_initiator_node_acl() failed for %s\n"
                pr_debug(MAKE_NEXUS_MSG, name);
  #undef MAKE_NEXUS_MSG
-               goto err_session;
+               ret = PTR_ERR(tv_nexus->tvn_se_sess);
+               kfree(tv_nexus);
        }
-       /*
-        * Now register the TCM vHost virtual I_T Nexus as active.
-        */
-       transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
-                       tv_nexus->tvn_se_sess, tv_nexus);
-       tpg->tpg_nexus = tv_nexus;
-       mutex_unlock(&tpg->tpg_mutex);
-       return 0;
  
- err_session:
-       transport_free_session(tv_nexus->tvn_se_sess);
- err_free:
-       kfree(tv_nexus);
- err_unlock:
+ out_unlock:
        mutex_unlock(&tpg->tpg_mutex);
        return ret;
  }
@@@ -1735,11 -1720,7 +1720,7 @@@ static void usbg_port_unlink(struct se_
  
  static int usbg_check_stop_free(struct se_cmd *se_cmd)
  {
-       struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd,
-                       se_cmd);
-       kref_put(&cmd->ref, usbg_cmd_release);
-       return 1;
+       return target_put_sess_cmd(se_cmd);
  }
  
  static const struct target_core_fabric_ops usbg_ops = {
@@@ -2098,7 -2079,7 +2079,7 @@@ static int tcm_bind(struct usb_configur
        uasp_fs_cmd_desc.bEndpointAddress = uasp_ss_cmd_desc.bEndpointAddress;
  
        ret = usb_assign_descriptors(f, uasp_fs_function_desc,
 -                      uasp_hs_function_desc, uasp_ss_function_desc);
 +                      uasp_hs_function_desc, uasp_ss_function_desc, NULL);
        if (ret)
                goto ep_fail;
  
diff --combined drivers/vhost/scsi.c
index f898686cdd93be27f7dce0a62dbebc8c8824d181,cd5f20f14d5ad802c31fb465a757a28bbfb2a8fe..0e6fd556c9827e0b50cdb19d6d2592673ff654bf
@@@ -1274,7 -1274,7 +1274,7 @@@ vhost_scsi_set_endpoint(struct vhost_sc
                        vq = &vs->vqs[i].vq;
                        mutex_lock(&vq->mutex);
                        vq->private_data = vs_tpg;
 -                      vhost_init_used(vq);
 +                      vhost_vq_init_access(vq);
                        mutex_unlock(&vq->mutex);
                }
                ret = 0;
@@@ -1664,8 -1664,7 +1664,7 @@@ static void vhost_scsi_port_unlink(stru
        mutex_unlock(&vhost_scsi_mutex);
  }
  
- static void vhost_scsi_free_cmd_map_res(struct vhost_scsi_nexus *nexus,
-                                      struct se_session *se_sess)
+ static void vhost_scsi_free_cmd_map_res(struct se_session *se_sess)
  {
        struct vhost_scsi_cmd *tv_cmd;
        unsigned int i;
@@@ -1721,98 -1720,82 +1720,82 @@@ static struct configfs_attribute *vhost
        NULL,
  };
  
- static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
-                               const char *name)
+ static int vhost_scsi_nexus_cb(struct se_portal_group *se_tpg,
+                              struct se_session *se_sess, void *p)
  {
-       struct se_portal_group *se_tpg;
-       struct se_session *se_sess;
-       struct vhost_scsi_nexus *tv_nexus;
        struct vhost_scsi_cmd *tv_cmd;
        unsigned int i;
  
-       mutex_lock(&tpg->tv_tpg_mutex);
-       if (tpg->tpg_nexus) {
-               mutex_unlock(&tpg->tv_tpg_mutex);
-               pr_debug("tpg->tpg_nexus already exists\n");
-               return -EEXIST;
-       }
-       se_tpg = &tpg->se_tpg;
-       tv_nexus = kzalloc(sizeof(struct vhost_scsi_nexus), GFP_KERNEL);
-       if (!tv_nexus) {
-               mutex_unlock(&tpg->tv_tpg_mutex);
-               pr_err("Unable to allocate struct vhost_scsi_nexus\n");
-               return -ENOMEM;
-       }
-       /*
-        *  Initialize the struct se_session pointer and setup tagpool
-        *  for struct vhost_scsi_cmd descriptors
-        */
-       tv_nexus->tvn_se_sess = transport_init_session_tags(
-                                       VHOST_SCSI_DEFAULT_TAGS,
-                                       sizeof(struct vhost_scsi_cmd),
-                                       TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS);
-       if (IS_ERR(tv_nexus->tvn_se_sess)) {
-               mutex_unlock(&tpg->tv_tpg_mutex);
-               kfree(tv_nexus);
-               return -ENOMEM;
-       }
-       se_sess = tv_nexus->tvn_se_sess;
        for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) {
                tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i];
  
                tv_cmd->tvc_sgl = kzalloc(sizeof(struct scatterlist) *
                                        VHOST_SCSI_PREALLOC_SGLS, GFP_KERNEL);
                if (!tv_cmd->tvc_sgl) {
-                       mutex_unlock(&tpg->tv_tpg_mutex);
                        pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
                        goto out;
                }
  
                tv_cmd->tvc_upages = kzalloc(sizeof(struct page *) *
-                                       VHOST_SCSI_PREALLOC_UPAGES, GFP_KERNEL);
+                               VHOST_SCSI_PREALLOC_UPAGES, GFP_KERNEL);
                if (!tv_cmd->tvc_upages) {
-                       mutex_unlock(&tpg->tv_tpg_mutex);
                        pr_err("Unable to allocate tv_cmd->tvc_upages\n");
                        goto out;
                }
  
                tv_cmd->tvc_prot_sgl = kzalloc(sizeof(struct scatterlist) *
-                                       VHOST_SCSI_PREALLOC_PROT_SGLS, GFP_KERNEL);
+                               VHOST_SCSI_PREALLOC_PROT_SGLS, GFP_KERNEL);
                if (!tv_cmd->tvc_prot_sgl) {
-                       mutex_unlock(&tpg->tv_tpg_mutex);
                        pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n");
                        goto out;
                }
        }
+       return 0;
+ out:
+       vhost_scsi_free_cmd_map_res(se_sess);
+       return -ENOMEM;
+ }
+ static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
+                               const char *name)
+ {
+       struct se_portal_group *se_tpg;
+       struct vhost_scsi_nexus *tv_nexus;
+       mutex_lock(&tpg->tv_tpg_mutex);
+       if (tpg->tpg_nexus) {
+               mutex_unlock(&tpg->tv_tpg_mutex);
+               pr_debug("tpg->tpg_nexus already exists\n");
+               return -EEXIST;
+       }
+       se_tpg = &tpg->se_tpg;
+       tv_nexus = kzalloc(sizeof(struct vhost_scsi_nexus), GFP_KERNEL);
+       if (!tv_nexus) {
+               mutex_unlock(&tpg->tv_tpg_mutex);
+               pr_err("Unable to allocate struct vhost_scsi_nexus\n");
+               return -ENOMEM;
+       }
        /*
         * Since we are running in 'demo mode' this call with generate a
         * struct se_node_acl for the vhost_scsi struct se_portal_group with
         * the SCSI Initiator port name of the passed configfs group 'name'.
         */
-       tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
-                               se_tpg, (unsigned char *)name);
-       if (!tv_nexus->tvn_se_sess->se_node_acl) {
+       tv_nexus->tvn_se_sess = target_alloc_session(&tpg->se_tpg,
+                                       VHOST_SCSI_DEFAULT_TAGS,
+                                       sizeof(struct vhost_scsi_cmd),
+                                       TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
+                                       (unsigned char *)name, tv_nexus,
+                                       vhost_scsi_nexus_cb);
+       if (IS_ERR(tv_nexus->tvn_se_sess)) {
                mutex_unlock(&tpg->tv_tpg_mutex);
-               pr_debug("core_tpg_check_initiator_node_acl() failed"
-                               " for %s\n", name);
-               goto out;
+               kfree(tv_nexus);
+               return -ENOMEM;
        }
-       /*
-        * Now register the TCM vhost virtual I_T Nexus as active.
-        */
-       transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
-                       tv_nexus->tvn_se_sess, tv_nexus);
        tpg->tpg_nexus = tv_nexus;
  
        mutex_unlock(&tpg->tv_tpg_mutex);
        return 0;
- out:
-       vhost_scsi_free_cmd_map_res(tv_nexus, se_sess);
-       transport_free_session(se_sess);
-       kfree(tv_nexus);
-       return -ENOMEM;
  }
  
  static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg)
                " %s Initiator Port: %s\n", vhost_scsi_dump_proto_id(tpg->tport),
                tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
  
-       vhost_scsi_free_cmd_map_res(tv_nexus, se_sess);
+       vhost_scsi_free_cmd_map_res(se_sess);
        /*
         * Release the SCSI I_T Nexus to the emulated vhost Target Port
         */
index c46ee189466f81ab584282651494fad765a38417,652c8cec800f189579995daffa296b613f9b89d6..ff932624eaad885c9cc02c311c6bb2f90f99a54e
@@@ -141,6 -141,8 +141,8 @@@ struct scsiback_tmr 
        wait_queue_head_t tmr_wait;
  };
  
+ #define VSCSI_DEFAULT_SESSION_TAGS    128
  struct scsiback_nexus {
        /* Pointer to TCM session for I_T Nexus */
        struct se_session *tvn_se_sess;
@@@ -190,7 -192,6 +192,6 @@@ module_param_named(max_buffer_pages, sc
  MODULE_PARM_DESC(max_buffer_pages,
  "Maximum number of free pages to keep in backend buffer");
  
- static struct kmem_cache *scsiback_cachep;
  static DEFINE_SPINLOCK(free_pages_lock);
  static int free_pages_num;
  static LIST_HEAD(scsiback_free_pages);
@@@ -321,11 -322,11 +322,11 @@@ static void scsiback_free_translation_e
        kfree(entry);
  }
  
- static void scsiback_do_resp_with_sense(char *sense_buffer, int32_t result,
-                       uint32_t resid, struct vscsibk_pend *pending_req)
+ static void scsiback_send_response(struct vscsibk_info *info,
+                       char *sense_buffer, int32_t result, uint32_t resid,
+                       uint16_t rqid)
  {
        struct vscsiif_response *ring_res;
-       struct vscsibk_info *info = pending_req->info;
        int notify;
        struct scsi_sense_hdr sshdr;
        unsigned long flags;
        info->ring.rsp_prod_pvt++;
  
        ring_res->rslt   = result;
-       ring_res->rqid   = pending_req->rqid;
+       ring_res->rqid   = rqid;
  
        if (sense_buffer != NULL &&
            scsi_normalize_sense(sense_buffer, VSCSIIF_SENSE_BUFFERSIZE,
  
        if (notify)
                notify_remote_via_irq(info->irq);
+ }
+ static void scsiback_do_resp_with_sense(char *sense_buffer, int32_t result,
+                       uint32_t resid, struct vscsibk_pend *pending_req)
+ {
+       scsiback_send_response(pending_req->info, sense_buffer, result,
+                              resid, pending_req->rqid);
  
        if (pending_req->v2p)
                kref_put(&pending_req->v2p->kref,
@@@ -380,6 -388,12 +388,12 @@@ static void scsiback_cmd_done(struct vs
        scsiback_fast_flush_area(pending_req);
        scsiback_do_resp_with_sense(sense_buffer, errors, resid, pending_req);
        scsiback_put(info);
+       /*
+        * Drop the extra KREF_ACK reference taken by target_submit_cmd_map_sgls()
+        * ahead of scsiback_check_stop_free() ->  transport_generic_free_cmd()
+        * final se_cmd->cmd_kref put.
+        */
+       target_put_sess_cmd(&pending_req->se_cmd);
  }
  
  static void scsiback_cmd_exec(struct vscsibk_pend *pending_req)
        struct se_session *sess = pending_req->v2p->tpg->tpg_nexus->tvn_se_sess;
        int rc;
  
-       memset(pending_req->sense_buffer, 0, VSCSIIF_SENSE_BUFFERSIZE);
-       memset(se_cmd, 0, sizeof(*se_cmd));
        scsiback_get(pending_req->info);
        se_cmd->tag = pending_req->rqid;
        rc = target_submit_cmd_map_sgls(se_cmd, sess, pending_req->cmnd,
                        pending_req->sense_buffer, pending_req->v2p->lun,
                        pending_req->data_len, 0,
-                       pending_req->sc_data_direction, 0,
+                       pending_req->sc_data_direction, TARGET_SCF_ACK_KREF,
                        pending_req->sgl, pending_req->n_sg,
                        NULL, 0, NULL, 0);
        if (rc < 0) {
@@@ -586,45 -596,40 +596,40 @@@ static void scsiback_disconnect(struct 
  static void scsiback_device_action(struct vscsibk_pend *pending_req,
        enum tcm_tmreq_table act, int tag)
  {
-       int rc, err = FAILED;
        struct scsiback_tpg *tpg = pending_req->v2p->tpg;
+       struct scsiback_nexus *nexus = tpg->tpg_nexus;
        struct se_cmd *se_cmd = &pending_req->se_cmd;
        struct scsiback_tmr *tmr;
+       u64 unpacked_lun = pending_req->v2p->lun;
+       int rc, err = FAILED;
  
        tmr = kzalloc(sizeof(struct scsiback_tmr), GFP_KERNEL);
-       if (!tmr)
-               goto out;
+       if (!tmr) {
+               target_put_sess_cmd(se_cmd);
+               goto err;
+       }
  
        init_waitqueue_head(&tmr->tmr_wait);
  
-       transport_init_se_cmd(se_cmd, tpg->se_tpg.se_tpg_tfo,
-               tpg->tpg_nexus->tvn_se_sess, 0, DMA_NONE, TCM_SIMPLE_TAG,
-               &pending_req->sense_buffer[0]);
-       rc = core_tmr_alloc_req(se_cmd, tmr, act, GFP_KERNEL);
-       if (rc < 0)
-               goto out;
-       se_cmd->se_tmr_req->ref_task_tag = tag;
-       if (transport_lookup_tmr_lun(se_cmd, pending_req->v2p->lun) < 0)
-               goto out;
+       rc = target_submit_tmr(&pending_req->se_cmd, nexus->tvn_se_sess,
+                              &pending_req->sense_buffer[0],
+                              unpacked_lun, tmr, act, GFP_KERNEL,
+                              tag, TARGET_SCF_ACK_KREF);
+       if (rc)
+               goto err;
  
-       transport_generic_handle_tmr(se_cmd);
        wait_event(tmr->tmr_wait, atomic_read(&tmr->tmr_complete));
  
        err = (se_cmd->se_tmr_req->response == TMR_FUNCTION_COMPLETE) ?
                SUCCESS : FAILED;
  
- out:
-       if (tmr) {
-               transport_generic_free_cmd(&pending_req->se_cmd, 1);
+       scsiback_do_resp_with_sense(NULL, err, 0, pending_req);
+       transport_generic_free_cmd(&pending_req->se_cmd, 1);
+       return;
+ err:
+       if (tmr)
                kfree(tmr);
-       }
        scsiback_do_resp_with_sense(NULL, err, 0, pending_req);
-       kmem_cache_free(scsiback_cachep, pending_req);
  }
  
  /*
@@@ -653,15 -658,53 +658,53 @@@ out
        return entry;
  }
  
- static int prepare_pending_reqs(struct vscsibk_info *info,
-                               struct vscsiif_request *ring_req,
-                               struct vscsibk_pend *pending_req)
+ static struct vscsibk_pend *scsiback_get_pend_req(struct vscsiif_back_ring *ring,
+                               struct v2p_entry *v2p)
+ {
+       struct scsiback_tpg *tpg = v2p->tpg;
+       struct scsiback_nexus *nexus = tpg->tpg_nexus;
+       struct se_session *se_sess = nexus->tvn_se_sess;
+       struct vscsibk_pend *req;
+       int tag, i;
+       tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
+       if (tag < 0) {
+               pr_err("Unable to obtain tag for vscsiif_request\n");
+               return ERR_PTR(-ENOMEM);
+       }
+       req = &((struct vscsibk_pend *)se_sess->sess_cmd_map)[tag];
+       memset(req, 0, sizeof(*req));
+       req->se_cmd.map_tag = tag;
+       for (i = 0; i < VSCSI_MAX_GRANTS; i++)
+               req->grant_handles[i] = SCSIBACK_INVALID_HANDLE;
+       return req;
+ }
+ static struct vscsibk_pend *prepare_pending_reqs(struct vscsibk_info *info,
+                               struct vscsiif_back_ring *ring,
+                               struct vscsiif_request *ring_req)
  {
+       struct vscsibk_pend *pending_req;
        struct v2p_entry *v2p;
        struct ids_tuple vir;
  
-       pending_req->rqid       = ring_req->rqid;
-       pending_req->info       = info;
+       /* request range check from frontend */
+       if ((ring_req->sc_data_direction != DMA_BIDIRECTIONAL) &&
+               (ring_req->sc_data_direction != DMA_TO_DEVICE) &&
+               (ring_req->sc_data_direction != DMA_FROM_DEVICE) &&
+               (ring_req->sc_data_direction != DMA_NONE)) {
+               pr_debug("invalid parameter data_dir = %d\n",
+                       ring_req->sc_data_direction);
+               return ERR_PTR(-EINVAL);
+       }
+       if (ring_req->cmd_len > VSCSIIF_MAX_COMMAND_SIZE) {
+               pr_debug("invalid parameter cmd_len = %d\n",
+                       ring_req->cmd_len);
+               return ERR_PTR(-EINVAL);
+       }
  
        vir.chn = ring_req->channel;
        vir.tgt = ring_req->id;
  
        v2p = scsiback_do_translation(info, &vir);
        if (!v2p) {
-               pending_req->v2p = NULL;
                pr_debug("the v2p of (chn:%d, tgt:%d, lun:%d) doesn't exist.\n",
-                       vir.chn, vir.tgt, vir.lun);
-               return -ENODEV;
+                        vir.chn, vir.tgt, vir.lun);
+               return ERR_PTR(-ENODEV);
        }
-       pending_req->v2p = v2p;
  
-       /* request range check from frontend */
-       pending_req->sc_data_direction = ring_req->sc_data_direction;
-       if ((pending_req->sc_data_direction != DMA_BIDIRECTIONAL) &&
-               (pending_req->sc_data_direction != DMA_TO_DEVICE) &&
-               (pending_req->sc_data_direction != DMA_FROM_DEVICE) &&
-               (pending_req->sc_data_direction != DMA_NONE)) {
-               pr_debug("invalid parameter data_dir = %d\n",
-                       pending_req->sc_data_direction);
-               return -EINVAL;
+       pending_req = scsiback_get_pend_req(ring, v2p);
+       if (IS_ERR(pending_req)) {
+               kref_put(&v2p->kref, scsiback_free_translation_entry);
+               return ERR_PTR(-ENOMEM);
        }
+       pending_req->rqid = ring_req->rqid;
+       pending_req->info = info;
+       pending_req->v2p = v2p;
+       pending_req->sc_data_direction = ring_req->sc_data_direction;
        pending_req->cmd_len = ring_req->cmd_len;
-       if (pending_req->cmd_len > VSCSIIF_MAX_COMMAND_SIZE) {
-               pr_debug("invalid parameter cmd_len = %d\n",
-                       pending_req->cmd_len);
-               return -EINVAL;
-       }
        memcpy(pending_req->cmnd, ring_req->cmnd, pending_req->cmd_len);
  
-       return 0;
+       return pending_req;
  }
  
  static int scsiback_do_cmd_fn(struct vscsibk_info *info)
        struct vscsiif_request ring_req;
        struct vscsibk_pend *pending_req;
        RING_IDX rc, rp;
-       int err, more_to_do;
+       int more_to_do;
        uint32_t result;
  
        rc = ring->req_cons;
        while ((rc != rp)) {
                if (RING_REQUEST_CONS_OVERFLOW(ring, rc))
                        break;
-               pending_req = kmem_cache_alloc(scsiback_cachep, GFP_KERNEL);
-               if (!pending_req)
-                       return 1;
  
                RING_COPY_REQUEST(ring, rc, &ring_req);
                ring->req_cons = ++rc;
  
-               err = prepare_pending_reqs(info, &ring_req, pending_req);
-               if (err) {
-                       switch (err) {
+               pending_req = prepare_pending_reqs(info, ring, &ring_req);
+               if (IS_ERR(pending_req)) {
+                       switch (PTR_ERR(pending_req)) {
                        case -ENODEV:
                                result = DID_NO_CONNECT;
                                break;
                                result = DRIVER_ERROR;
                                break;
                        }
-                       scsiback_do_resp_with_sense(NULL, result << 24, 0,
-                                                   pending_req);
-                       kmem_cache_free(scsiback_cachep, pending_req);
+                       scsiback_send_response(info, NULL, result << 24, 0,
+                                              ring_req.rqid);
                        return 1;
                }
  
                        if (scsiback_gnttab_data_map(&ring_req, pending_req)) {
                                scsiback_fast_flush_area(pending_req);
                                scsiback_do_resp_with_sense(NULL,
-                                       DRIVER_ERROR << 24, 0, pending_req);
-                               kmem_cache_free(scsiback_cachep, pending_req);
+                                               DRIVER_ERROR << 24, 0, pending_req);
+                               transport_generic_free_cmd(&pending_req->se_cmd, 0);
                        } else {
                                scsiback_cmd_exec(pending_req);
                        }
                        break;
                default:
                        pr_err_ratelimited("invalid request\n");
-                       scsiback_do_resp_with_sense(NULL, DRIVER_ERROR << 24,
-                                                   0, pending_req);
-                       kmem_cache_free(scsiback_cachep, pending_req);
+                       scsiback_do_resp_with_sense(NULL, DRIVER_ERROR << 24, 0,
+                                                   pending_req);
+                       transport_generic_free_cmd(&pending_req->se_cmd, 0);
                        break;
                }
  
@@@ -848,24 -878,6 +878,24 @@@ static int scsiback_map(struct vscsibk_
        return scsiback_init_sring(info, ring_ref, evtchn);
  }
  
 +/*
 +  Check for a translation entry being present
 +*/
 +static struct v2p_entry *scsiback_chk_translation_entry(
 +      struct vscsibk_info *info, struct ids_tuple *v)
 +{
 +      struct list_head *head = &(info->v2p_entry_lists);
 +      struct v2p_entry *entry;
 +
 +      list_for_each_entry(entry, head, l)
 +              if ((entry->v.chn == v->chn) &&
 +                  (entry->v.tgt == v->tgt) &&
 +                  (entry->v.lun == v->lun))
 +                      return entry;
 +
 +      return NULL;
 +}
 +
  /*
    Add a new translation entry
  */
@@@ -873,7 -885,9 +903,7 @@@ static int scsiback_add_translation_ent
                                          char *phy, struct ids_tuple *v)
  {
        int err = 0;
 -      struct v2p_entry *entry;
        struct v2p_entry *new;
 -      struct list_head *head = &(info->v2p_entry_lists);
        unsigned long flags;
        char *lunp;
        unsigned long long unpacked_lun;
        spin_lock_irqsave(&info->v2p_lock, flags);
  
        /* Check double assignment to identical virtual ID */
 -      list_for_each_entry(entry, head, l) {
 -              if ((entry->v.chn == v->chn) &&
 -                  (entry->v.tgt == v->tgt) &&
 -                  (entry->v.lun == v->lun)) {
 -                      pr_warn("Virtual ID is already used. Assignment was not performed.\n");
 -                      err = -EEXIST;
 -                      goto out;
 -              }
 -
 +      if (scsiback_chk_translation_entry(info, v)) {
 +              pr_warn("Virtual ID is already used. Assignment was not performed.\n");
 +              err = -EEXIST;
 +              goto out;
        }
  
        /* Create a new translation entry and add to the list */
        new->v = *v;
        new->tpg = tpg;
        new->lun = unpacked_lun;
 -      list_add_tail(&new->l, head);
 +      list_add_tail(&new->l, &info->v2p_entry_lists);
  
  out:
        spin_unlock_irqrestore(&info->v2p_lock, flags);
  
  out_free:
 -      mutex_lock(&tpg->tv_tpg_mutex);
 -      tpg->tv_tpg_fe_count--;
 -      mutex_unlock(&tpg->tv_tpg_mutex);
 -
 -      if (err)
 +      if (err) {
 +              mutex_lock(&tpg->tv_tpg_mutex);
 +              tpg->tv_tpg_fe_count--;
 +              mutex_unlock(&tpg->tv_tpg_mutex);
                kfree(new);
 +      }
  
        return err;
  }
@@@ -967,40 -986,39 +997,40 @@@ static void __scsiback_del_translation_
  }
  
  /*
 -  Delete the translation entry specfied
 +  Delete the translation entry specified
  */
  static int scsiback_del_translation_entry(struct vscsibk_info *info,
                                          struct ids_tuple *v)
  {
        struct v2p_entry *entry;
 -      struct list_head *head = &(info->v2p_entry_lists);
        unsigned long flags;
 +      int ret = 0;
  
        spin_lock_irqsave(&info->v2p_lock, flags);
        /* Find out the translation entry specified */
 -      list_for_each_entry(entry, head, l) {
 -              if ((entry->v.chn == v->chn) &&
 -                  (entry->v.tgt == v->tgt) &&
 -                  (entry->v.lun == v->lun)) {
 -                      goto found;
 -              }
 -      }
 -
 -      spin_unlock_irqrestore(&info->v2p_lock, flags);
 -      return 1;
 -
 -found:
 -      /* Delete the translation entry specfied */
 -      __scsiback_del_translation_entry(entry);
 +      entry = scsiback_chk_translation_entry(info, v);
 +      if (entry)
 +              __scsiback_del_translation_entry(entry);
 +      else
 +              ret = -ENOENT;
  
        spin_unlock_irqrestore(&info->v2p_lock, flags);
 -      return 0;
 +      return ret;
  }
  
  static void scsiback_do_add_lun(struct vscsibk_info *info, const char *state,
                                char *phy, struct ids_tuple *vir, int try)
  {
 +      struct v2p_entry *entry;
 +      unsigned long flags;
 +
 +      if (try) {
 +              spin_lock_irqsave(&info->v2p_lock, flags);
 +              entry = scsiback_chk_translation_entry(info, vir);
 +              spin_unlock_irqrestore(&info->v2p_lock, flags);
 +              if (entry)
 +                      return;
 +      }
        if (!scsiback_add_translation_entry(info, phy, vir)) {
                if (xenbus_printf(XBT_NIL, info->dev->nodename, state,
                                  "%d", XenbusStateInitialised)) {
@@@ -1353,24 -1371,20 +1383,20 @@@ static u32 scsiback_tpg_get_inst_index(
  
  static int scsiback_check_stop_free(struct se_cmd *se_cmd)
  {
-       /*
-        * Do not release struct se_cmd's containing a valid TMR pointer.
-        * These will be released directly in scsiback_device_action()
-        * with transport_generic_free_cmd().
-        */
-       if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
-               return 0;
-       transport_generic_free_cmd(se_cmd, 0);
-       return 1;
+       return transport_generic_free_cmd(se_cmd, 0);
  }
  
  static void scsiback_release_cmd(struct se_cmd *se_cmd)
  {
-       struct vscsibk_pend *pending_req = container_of(se_cmd,
-                               struct vscsibk_pend, se_cmd);
+       struct se_session *se_sess = se_cmd->se_sess;
+       struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
+       if (se_tmr && se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) {
+               struct scsiback_tmr *tmr = se_tmr->fabric_tmr_ptr;
+               kfree(tmr);
+       }
  
-       kmem_cache_free(scsiback_cachep, pending_req);
+       percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
  }
  
  static int scsiback_shutdown_session(struct se_session *se_sess)
@@@ -1494,61 -1508,49 +1520,49 @@@ static struct configfs_attribute *scsib
        NULL,
  };
  
+ static int scsiback_alloc_sess_cb(struct se_portal_group *se_tpg,
+                                 struct se_session *se_sess, void *p)
+ {
+       struct scsiback_tpg *tpg = container_of(se_tpg,
+                               struct scsiback_tpg, se_tpg);
+       tpg->tpg_nexus = p;
+       return 0;
+ }
  static int scsiback_make_nexus(struct scsiback_tpg *tpg,
                                const char *name)
  {
-       struct se_portal_group *se_tpg;
-       struct se_session *se_sess;
        struct scsiback_nexus *tv_nexus;
+       int ret = 0;
  
        mutex_lock(&tpg->tv_tpg_mutex);
        if (tpg->tpg_nexus) {
-               mutex_unlock(&tpg->tv_tpg_mutex);
                pr_debug("tpg->tpg_nexus already exists\n");
-               return -EEXIST;
+               ret = -EEXIST;
+               goto out_unlock;
        }
-       se_tpg = &tpg->se_tpg;
  
        tv_nexus = kzalloc(sizeof(struct scsiback_nexus), GFP_KERNEL);
        if (!tv_nexus) {
-               mutex_unlock(&tpg->tv_tpg_mutex);
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto out_unlock;
        }
-       /*
-        * Initialize the struct se_session pointer
-        */
-       tv_nexus->tvn_se_sess = transport_init_session(TARGET_PROT_NORMAL);
+       tv_nexus->tvn_se_sess = target_alloc_session(&tpg->se_tpg,
+                                                    VSCSI_DEFAULT_SESSION_TAGS,
+                                                    sizeof(struct vscsibk_pend),
+                                                    TARGET_PROT_NORMAL, name,
+                                                    tv_nexus, scsiback_alloc_sess_cb);
        if (IS_ERR(tv_nexus->tvn_se_sess)) {
-               mutex_unlock(&tpg->tv_tpg_mutex);
                kfree(tv_nexus);
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto out_unlock;
        }
-       se_sess = tv_nexus->tvn_se_sess;
-       /*
-        * Since we are running in 'demo mode' this call with generate a
-        * struct se_node_acl for the scsiback struct se_portal_group with
-        * the SCSI Initiator port name of the passed configfs group 'name'.
-        */
-       tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
-                               se_tpg, (unsigned char *)name);
-       if (!tv_nexus->tvn_se_sess->se_node_acl) {
-               mutex_unlock(&tpg->tv_tpg_mutex);
-               pr_debug("core_tpg_check_initiator_node_acl() failed for %s\n",
-                        name);
-               goto out;
-       }
-       /* Now register the TCM pvscsi virtual I_T Nexus as active. */
-       transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl,
-                       tv_nexus->tvn_se_sess, tv_nexus);
-       tpg->tpg_nexus = tv_nexus;
  
+ out_unlock:
        mutex_unlock(&tpg->tv_tpg_mutex);
-       return 0;
- out:
-       transport_free_session(se_sess);
-       kfree(tv_nexus);
-       return -ENOMEM;
+       return ret;
  }
  
  static int scsiback_drop_nexus(struct scsiback_tpg *tpg)
@@@ -1866,16 -1868,6 +1880,6 @@@ static struct xenbus_driver scsiback_dr
        .otherend_changed       = scsiback_frontend_changed
  };
  
- static void scsiback_init_pend(void *p)
- {
-       struct vscsibk_pend *pend = p;
-       int i;
-       memset(pend, 0, sizeof(*pend));
-       for (i = 0; i < VSCSI_MAX_GRANTS; i++)
-               pend->grant_handles[i] = SCSIBACK_INVALID_HANDLE;
- }
  static int __init scsiback_init(void)
  {
        int ret;
        pr_debug("xen-pvscsi: fabric module %s on %s/%s on "UTS_RELEASE"\n",
                 VSCSI_VERSION, utsname()->sysname, utsname()->machine);
  
-       scsiback_cachep = kmem_cache_create("vscsiif_cache",
-               sizeof(struct vscsibk_pend), 0, 0, scsiback_init_pend);
-       if (!scsiback_cachep)
-               return -ENOMEM;
        ret = xenbus_register_backend(&scsiback_driver);
        if (ret)
-               goto out_cache_destroy;
+               goto out;
  
        ret = target_register_template(&scsiback_ops);
        if (ret)
  
  out_unregister_xenbus:
        xenbus_unregister_driver(&scsiback_driver);
- out_cache_destroy:
-       kmem_cache_destroy(scsiback_cachep);
+ out:
        pr_err("%s: error %d\n", __func__, ret);
        return ret;
  }
@@@ -1920,7 -1906,6 +1918,6 @@@ static void __exit scsiback_exit(void
        }
        target_unregister_template(&scsiback_ops);
        xenbus_unregister_driver(&scsiback_driver);
-       kmem_cache_destroy(scsiback_cachep);
  }
  
  module_init(scsiback_init);
index 1b09cac065085cbae02f718bd63edd7d266048bc,fca03b993bec083b2e5d36efd08dd19bf3433004..3e0dd86360a24bd45889d57c6609ba40ac2681f9
@@@ -144,12 -144,6 +144,6 @@@ enum se_cmd_flags_table 
        SCF_USE_CPUID                   = 0x00800000,
  };
  
- /* struct se_dev_entry->lun_flags and struct se_lun->lun_access */
- enum transport_lunflags_table {
-       TRANSPORT_LUNFLAGS_READ_ONLY            = 0x01,
-       TRANSPORT_LUNFLAGS_READ_WRITE           = 0x02,
- };
  /*
   * Used by transport_send_check_condition_and_sense()
   * to signal which ASC/ASCQ sense payload should be built.
@@@ -560,6 -554,7 +554,6 @@@ struct se_node_acl 
        struct config_group     acl_auth_group;
        struct config_group     acl_param_group;
        struct config_group     acl_fabric_stat_group;
 -      struct config_group     *acl_default_groups[5];
        struct list_head        acl_list;
        struct list_head        acl_sess_list;
        struct completion       acl_free_comp;
@@@ -633,11 -628,10 +627,10 @@@ struct se_lun_acl 
  };
  
  struct se_dev_entry {
-       /* See transport_lunflags_table */
        u64                     mapped_lun;
        u64                     pr_res_key;
        u64                     creation_time;
-       u32                     lun_flags;
+       bool                    lun_access_ro;
        u32                     attach_count;
        atomic_long_t           total_cmds;
        atomic_long_t           read_bytes;
@@@ -711,7 -705,7 +704,7 @@@ struct se_lun 
        u64                     unpacked_lun;
  #define SE_LUN_LINK_MAGIC                     0xffff7771
        u32                     lun_link_magic;
-       u32                     lun_access;
+       bool                    lun_access_ro;
        u32                     lun_index;
  
        /* RELATIVE TARGET PORT IDENTIFER */
@@@ -886,6 -880,7 +879,6 @@@ struct se_portal_group 
        const struct target_core_fabric_ops *se_tpg_tfo;
        struct se_wwn           *se_tpg_wwn;
        struct config_group     tpg_group;
 -      struct config_group     *tpg_default_groups[7];
        struct config_group     tpg_lun_group;
        struct config_group     tpg_np_group;
        struct config_group     tpg_acl_group;
@@@ -921,6 -916,7 +914,6 @@@ static inline struct se_portal_group *p
  struct se_wwn {
        struct target_fabric_configfs *wwn_tf;
        struct config_group     wwn_group;
 -      struct config_group     *wwn_default_groups[2];
        struct config_group     fabric_stat_group;
  };