2 * Copyright (C) 2013 Shaohua Li <shli@kernel.org>
3 * Copyright (C) 2014 Red Hat, Inc.
4 * Copyright (C) 2015 Arrikto, Inc.
5 * Copyright (C) 2017 Chinamobile, Inc.
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 #include <linux/spinlock.h>
22 #include <linux/module.h>
23 #include <linux/idr.h>
24 #include <linux/kernel.h>
25 #include <linux/timer.h>
26 #include <linux/parser.h>
27 #include <linux/vmalloc.h>
28 #include <linux/uio_driver.h>
29 #include <linux/radix-tree.h>
30 #include <linux/stringify.h>
31 #include <linux/bitops.h>
32 #include <linux/highmem.h>
33 #include <linux/configfs.h>
34 #include <linux/mutex.h>
35 #include <linux/workqueue.h>
36 #include <net/genetlink.h>
37 #include <scsi/scsi_common.h>
38 #include <scsi/scsi_proto.h>
39 #include <target/target_core_base.h>
40 #include <target/target_core_fabric.h>
41 #include <target/target_core_backend.h>
43 #include <linux/target_core_user.h>
46 * Define a shared-memory interface for LIO to pass SCSI commands and
47 * data to userspace for processing. This is to allow backends that
48 * are too complex for in-kernel support to be possible.
50 * It uses the UIO framework to do a lot of the device-creation and
51 * introspection work for us.
53 * See the .h file for how the ring is laid out. Note that while the
54 * command ring is defined, the particulars of the data area are
55 * not. Offset values in the command entry point to other locations
56 * internal to the mmap()ed area. There is separate space outside the
57 * command ring for data buffers. This leaves maximum flexibility for
58 * moving buffer allocations, or even page flipping or other
59 * allocation techniques, without altering the command ring layout.
62 * The user process must be assumed to be malicious. There's no way to
63 * prevent it breaking the command ring protocol if it wants, but in
64 * order to prevent other issues we must only ever read *data* from
65 * the shared memory area, not offsets or sizes. This applies to
66 * command ring entries as well as the mailbox. Extra code needed for
67 * this may have a 'UAM' comment.
70 #define TCMU_TIME_OUT (30 * MSEC_PER_SEC)
72 /* For cmd area, the size is fixed 8MB */
73 #define CMDR_SIZE (8 * 1024 * 1024)
76 * For data area, the block size is PAGE_SIZE and
77 * the total size is 256K * PAGE_SIZE.
79 #define DATA_BLOCK_SIZE PAGE_SIZE
80 #define DATA_BLOCK_BITS (256 * 1024)
81 #define DATA_SIZE (DATA_BLOCK_BITS * DATA_BLOCK_SIZE)
82 #define DATA_BLOCK_INIT_BITS 128
84 /* The total size of the ring is 8M + 256K * PAGE_SIZE */
85 #define TCMU_RING_SIZE (CMDR_SIZE + DATA_SIZE)
87 /* Default maximum of the global data blocks(512K * PAGE_SIZE) */
88 #define TCMU_GLOBAL_MAX_BLOCKS (512 * 1024)
90 static u8 tcmu_kern_cmd_reply_supported;
92 static struct device *tcmu_root_device;
98 #define TCMU_CONFIG_LEN 256
101 /* wake up thread waiting for reply */
102 struct completion complete;
108 struct list_head node;
110 struct se_device se_dev;
115 #define TCMU_DEV_BIT_OPEN 0
116 #define TCMU_DEV_BIT_BROKEN 1
119 struct uio_info uio_info;
123 struct tcmu_mailbox *mb_addr;
126 u32 cmdr_last_cleaned;
127 /* Offset of data area from start of mb */
128 /* Must add data_off and mb_addr to get the address */
132 wait_queue_head_t wait_cmdr;
133 struct mutex cmdr_lock;
138 DECLARE_BITMAP(data_bitmap, DATA_BLOCK_BITS);
139 struct radix_tree_root data_blocks;
143 struct timer_list timeout;
144 unsigned int cmd_time_out;
145 struct list_head timedout_entry;
147 spinlock_t nl_cmd_lock;
148 struct tcmu_nl_cmd curr_nl_cmd;
149 /* wake up threads waiting on curr_nl_cmd */
150 wait_queue_head_t nl_cmd_wq;
152 char dev_config[TCMU_CONFIG_LEN];
154 int nl_reply_supported;
157 #define TCMU_DEV(_se_dev) container_of(_se_dev, struct tcmu_dev, se_dev)
159 #define CMDR_OFF sizeof(struct tcmu_mailbox)
162 struct se_cmd *se_cmd;
163 struct tcmu_dev *tcmu_dev;
167 /* Can't use se_cmd when cleaning up expired cmds, because if
168 cmd has been completed then accessing se_cmd is off limits */
173 unsigned long deadline;
175 #define TCMU_CMD_BIT_EXPIRED 0
179 static DEFINE_MUTEX(root_udev_mutex);
180 static LIST_HEAD(root_udev);
182 static DEFINE_SPINLOCK(timed_out_udevs_lock);
183 static LIST_HEAD(timed_out_udevs);
185 static atomic_t global_db_count = ATOMIC_INIT(0);
186 static struct work_struct tcmu_unmap_work;
188 static struct kmem_cache *tcmu_cmd_cache;
190 /* multicast group */
191 enum tcmu_multicast_groups {
195 static const struct genl_multicast_group tcmu_mcgrps[] = {
196 [TCMU_MCGRP_CONFIG] = { .name = "config", },
199 static struct nla_policy tcmu_attr_policy[TCMU_ATTR_MAX+1] = {
200 [TCMU_ATTR_DEVICE] = { .type = NLA_STRING },
201 [TCMU_ATTR_MINOR] = { .type = NLA_U32 },
202 [TCMU_ATTR_CMD_STATUS] = { .type = NLA_S32 },
203 [TCMU_ATTR_DEVICE_ID] = { .type = NLA_U32 },
204 [TCMU_ATTR_SUPP_KERN_CMD_REPLY] = { .type = NLA_U8 },
207 static int tcmu_genl_cmd_done(struct genl_info *info, int completed_cmd)
209 struct se_device *dev;
210 struct tcmu_dev *udev;
211 struct tcmu_nl_cmd *nl_cmd;
212 int dev_id, rc, ret = 0;
213 bool is_removed = (completed_cmd == TCMU_CMD_REMOVED_DEVICE);
215 if (!info->attrs[TCMU_ATTR_CMD_STATUS] ||
216 !info->attrs[TCMU_ATTR_DEVICE_ID]) {
217 printk(KERN_ERR "TCMU_ATTR_CMD_STATUS or TCMU_ATTR_DEVICE_ID not set, doing nothing\n");
221 dev_id = nla_get_u32(info->attrs[TCMU_ATTR_DEVICE_ID]);
222 rc = nla_get_s32(info->attrs[TCMU_ATTR_CMD_STATUS]);
224 dev = target_find_device(dev_id, !is_removed);
226 printk(KERN_ERR "tcmu nl cmd %u/%u completion could not find device with dev id %u.\n",
227 completed_cmd, rc, dev_id);
230 udev = TCMU_DEV(dev);
232 spin_lock(&udev->nl_cmd_lock);
233 nl_cmd = &udev->curr_nl_cmd;
235 pr_debug("genl cmd done got id %d curr %d done %d rc %d\n", dev_id,
236 nl_cmd->cmd, completed_cmd, rc);
238 if (nl_cmd->cmd != completed_cmd) {
239 printk(KERN_ERR "Mismatched commands (Expecting reply for %d. Current %d).\n",
240 completed_cmd, nl_cmd->cmd);
246 spin_unlock(&udev->nl_cmd_lock);
248 target_undepend_item(&dev->dev_group.cg_item);
250 complete(&nl_cmd->complete);
254 static int tcmu_genl_rm_dev_done(struct sk_buff *skb, struct genl_info *info)
256 return tcmu_genl_cmd_done(info, TCMU_CMD_REMOVED_DEVICE);
259 static int tcmu_genl_add_dev_done(struct sk_buff *skb, struct genl_info *info)
261 return tcmu_genl_cmd_done(info, TCMU_CMD_ADDED_DEVICE);
264 static int tcmu_genl_reconfig_dev_done(struct sk_buff *skb,
265 struct genl_info *info)
267 return tcmu_genl_cmd_done(info, TCMU_CMD_RECONFIG_DEVICE);
270 static int tcmu_genl_set_features(struct sk_buff *skb, struct genl_info *info)
272 if (info->attrs[TCMU_ATTR_SUPP_KERN_CMD_REPLY]) {
273 tcmu_kern_cmd_reply_supported =
274 nla_get_u8(info->attrs[TCMU_ATTR_SUPP_KERN_CMD_REPLY]);
275 printk(KERN_INFO "tcmu daemon: command reply support %u.\n",
276 tcmu_kern_cmd_reply_supported);
282 static const struct genl_ops tcmu_genl_ops[] = {
284 .cmd = TCMU_CMD_SET_FEATURES,
285 .flags = GENL_ADMIN_PERM,
286 .policy = tcmu_attr_policy,
287 .doit = tcmu_genl_set_features,
290 .cmd = TCMU_CMD_ADDED_DEVICE_DONE,
291 .flags = GENL_ADMIN_PERM,
292 .policy = tcmu_attr_policy,
293 .doit = tcmu_genl_add_dev_done,
296 .cmd = TCMU_CMD_REMOVED_DEVICE_DONE,
297 .flags = GENL_ADMIN_PERM,
298 .policy = tcmu_attr_policy,
299 .doit = tcmu_genl_rm_dev_done,
302 .cmd = TCMU_CMD_RECONFIG_DEVICE_DONE,
303 .flags = GENL_ADMIN_PERM,
304 .policy = tcmu_attr_policy,
305 .doit = tcmu_genl_reconfig_dev_done,
309 /* Our generic netlink family */
310 static struct genl_family tcmu_genl_family __ro_after_init = {
311 .module = THIS_MODULE,
315 .maxattr = TCMU_ATTR_MAX,
316 .mcgrps = tcmu_mcgrps,
317 .n_mcgrps = ARRAY_SIZE(tcmu_mcgrps),
319 .ops = tcmu_genl_ops,
320 .n_ops = ARRAY_SIZE(tcmu_genl_ops),
323 #define tcmu_cmd_set_dbi_cur(cmd, index) ((cmd)->dbi_cur = (index))
324 #define tcmu_cmd_reset_dbi_cur(cmd) tcmu_cmd_set_dbi_cur(cmd, 0)
325 #define tcmu_cmd_set_dbi(cmd, index) ((cmd)->dbi[(cmd)->dbi_cur++] = (index))
326 #define tcmu_cmd_get_dbi(cmd) ((cmd)->dbi[(cmd)->dbi_cur++])
328 static void tcmu_cmd_free_data(struct tcmu_cmd *tcmu_cmd, uint32_t len)
330 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
333 for (i = 0; i < len; i++)
334 clear_bit(tcmu_cmd->dbi[i], udev->data_bitmap);
337 static inline bool tcmu_get_empty_block(struct tcmu_dev *udev,
338 struct tcmu_cmd *tcmu_cmd)
343 dbi = find_first_zero_bit(udev->data_bitmap, udev->dbi_thresh);
344 if (dbi == udev->dbi_thresh)
347 page = radix_tree_lookup(&udev->data_blocks, dbi);
349 if (atomic_add_return(1, &global_db_count) >
350 TCMU_GLOBAL_MAX_BLOCKS) {
351 atomic_dec(&global_db_count);
355 /* try to get new page from the mm */
356 page = alloc_page(GFP_KERNEL);
360 ret = radix_tree_insert(&udev->data_blocks, dbi, page);
365 if (dbi > udev->dbi_max)
368 set_bit(dbi, udev->data_bitmap);
369 tcmu_cmd_set_dbi(tcmu_cmd, dbi);
375 atomic_dec(&global_db_count);
379 static bool tcmu_get_empty_blocks(struct tcmu_dev *udev,
380 struct tcmu_cmd *tcmu_cmd)
384 udev->waiting_global = false;
386 for (i = tcmu_cmd->dbi_cur; i < tcmu_cmd->dbi_cnt; i++) {
387 if (!tcmu_get_empty_block(udev, tcmu_cmd))
393 udev->waiting_global = true;
394 schedule_work(&tcmu_unmap_work);
398 static inline struct page *
399 tcmu_get_block_page(struct tcmu_dev *udev, uint32_t dbi)
401 return radix_tree_lookup(&udev->data_blocks, dbi);
404 static inline void tcmu_free_cmd(struct tcmu_cmd *tcmu_cmd)
406 kfree(tcmu_cmd->dbi);
407 kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
410 static inline size_t tcmu_cmd_get_data_length(struct tcmu_cmd *tcmu_cmd)
412 struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
413 size_t data_length = round_up(se_cmd->data_length, DATA_BLOCK_SIZE);
415 if (se_cmd->se_cmd_flags & SCF_BIDI) {
416 BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
417 data_length += round_up(se_cmd->t_bidi_data_sg->length,
424 static inline uint32_t tcmu_cmd_get_block_cnt(struct tcmu_cmd *tcmu_cmd)
426 size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd);
428 return data_length / DATA_BLOCK_SIZE;
431 static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
433 struct se_device *se_dev = se_cmd->se_dev;
434 struct tcmu_dev *udev = TCMU_DEV(se_dev);
435 struct tcmu_cmd *tcmu_cmd;
437 tcmu_cmd = kmem_cache_zalloc(tcmu_cmd_cache, GFP_KERNEL);
441 tcmu_cmd->se_cmd = se_cmd;
442 tcmu_cmd->tcmu_dev = udev;
444 tcmu_cmd_reset_dbi_cur(tcmu_cmd);
445 tcmu_cmd->dbi_cnt = tcmu_cmd_get_block_cnt(tcmu_cmd);
446 tcmu_cmd->dbi = kcalloc(tcmu_cmd->dbi_cnt, sizeof(uint32_t),
448 if (!tcmu_cmd->dbi) {
449 kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
456 static inline void tcmu_flush_dcache_range(void *vaddr, size_t size)
458 unsigned long offset = offset_in_page(vaddr);
459 void *start = vaddr - offset;
461 size = round_up(size+offset, PAGE_SIZE);
464 flush_dcache_page(virt_to_page(start));
471 * Some ring helper functions. We don't assume size is a power of 2 so
472 * we can't use circ_buf.h.
474 static inline size_t spc_used(size_t head, size_t tail, size_t size)
476 int diff = head - tail;
484 static inline size_t spc_free(size_t head, size_t tail, size_t size)
486 /* Keep 1 byte unused or we can't tell full from empty */
487 return (size - spc_used(head, tail, size) - 1);
490 static inline size_t head_to_end(size_t head, size_t size)
495 static inline void new_iov(struct iovec **iov, int *iov_cnt,
496 struct tcmu_dev *udev)
505 memset(iovec, 0, sizeof(struct iovec));
508 #define UPDATE_HEAD(head, used, size) smp_store_release(&head, ((head % size) + used) % size)
510 /* offset is relative to mb_addr */
511 static inline size_t get_block_offset_user(struct tcmu_dev *dev,
512 int dbi, int remaining)
514 return dev->data_off + dbi * DATA_BLOCK_SIZE +
515 DATA_BLOCK_SIZE - remaining;
518 static inline size_t iov_tail(struct iovec *iov)
520 return (size_t)iov->iov_base + iov->iov_len;
523 static void scatter_data_area(struct tcmu_dev *udev,
524 struct tcmu_cmd *tcmu_cmd, struct scatterlist *data_sg,
525 unsigned int data_nents, struct iovec **iov,
526 int *iov_cnt, bool copy_data)
529 int block_remaining = 0;
530 void *from, *to = NULL;
531 size_t copy_bytes, to_offset, offset;
532 struct scatterlist *sg;
535 for_each_sg(data_sg, sg, data_nents, i) {
536 int sg_remaining = sg->length;
537 from = kmap_atomic(sg_page(sg)) + sg->offset;
538 while (sg_remaining > 0) {
539 if (block_remaining == 0) {
543 block_remaining = DATA_BLOCK_SIZE;
544 dbi = tcmu_cmd_get_dbi(tcmu_cmd);
545 page = tcmu_get_block_page(udev, dbi);
546 to = kmap_atomic(page);
549 copy_bytes = min_t(size_t, sg_remaining,
551 to_offset = get_block_offset_user(udev, dbi,
555 to_offset == iov_tail(*iov)) {
556 (*iov)->iov_len += copy_bytes;
558 new_iov(iov, iov_cnt, udev);
559 (*iov)->iov_base = (void __user *)to_offset;
560 (*iov)->iov_len = copy_bytes;
563 offset = DATA_BLOCK_SIZE - block_remaining;
565 from + sg->length - sg_remaining,
567 tcmu_flush_dcache_range(to, copy_bytes);
569 sg_remaining -= copy_bytes;
570 block_remaining -= copy_bytes;
572 kunmap_atomic(from - sg->offset);
578 static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
581 struct se_cmd *se_cmd = cmd->se_cmd;
583 int block_remaining = 0;
584 void *from = NULL, *to;
585 size_t copy_bytes, offset;
586 struct scatterlist *sg, *data_sg;
588 unsigned int data_nents;
592 data_sg = se_cmd->t_data_sg;
593 data_nents = se_cmd->t_data_nents;
597 * For bidi case, the first count blocks are for Data-Out
598 * buffer blocks, and before gathering the Data-In buffer
599 * the Data-Out buffer blocks should be discarded.
601 count = DIV_ROUND_UP(se_cmd->data_length, DATA_BLOCK_SIZE);
603 data_sg = se_cmd->t_bidi_data_sg;
604 data_nents = se_cmd->t_bidi_data_nents;
607 tcmu_cmd_set_dbi_cur(cmd, count);
609 for_each_sg(data_sg, sg, data_nents, i) {
610 int sg_remaining = sg->length;
611 to = kmap_atomic(sg_page(sg)) + sg->offset;
612 while (sg_remaining > 0) {
613 if (block_remaining == 0) {
617 block_remaining = DATA_BLOCK_SIZE;
618 dbi = tcmu_cmd_get_dbi(cmd);
619 page = tcmu_get_block_page(udev, dbi);
620 from = kmap_atomic(page);
622 copy_bytes = min_t(size_t, sg_remaining,
624 offset = DATA_BLOCK_SIZE - block_remaining;
625 tcmu_flush_dcache_range(from, copy_bytes);
626 memcpy(to + sg->length - sg_remaining, from + offset,
629 sg_remaining -= copy_bytes;
630 block_remaining -= copy_bytes;
632 kunmap_atomic(to - sg->offset);
638 static inline size_t spc_bitmap_free(unsigned long *bitmap, uint32_t thresh)
640 return DATA_BLOCK_SIZE * (thresh - bitmap_weight(bitmap, thresh));
644 * We can't queue a command until we have space available on the cmd ring *and*
645 * space available on the data area.
647 * Called with ring lock held.
649 static bool is_ring_space_avail(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
650 size_t cmd_size, size_t data_needed)
652 struct tcmu_mailbox *mb = udev->mb_addr;
653 uint32_t blocks_needed = (data_needed + DATA_BLOCK_SIZE - 1)
655 size_t space, cmd_needed;
658 tcmu_flush_dcache_range(mb, sizeof(*mb));
660 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
663 * If cmd end-of-ring space is too small then we need space for a NOP plus
664 * original cmd - cmds are internally contiguous.
666 if (head_to_end(cmd_head, udev->cmdr_size) >= cmd_size)
667 cmd_needed = cmd_size;
669 cmd_needed = cmd_size + head_to_end(cmd_head, udev->cmdr_size);
671 space = spc_free(cmd_head, udev->cmdr_last_cleaned, udev->cmdr_size);
672 if (space < cmd_needed) {
673 pr_debug("no cmd space: %u %u %u\n", cmd_head,
674 udev->cmdr_last_cleaned, udev->cmdr_size);
678 /* try to check and get the data blocks as needed */
679 space = spc_bitmap_free(udev->data_bitmap, udev->dbi_thresh);
680 if (space < data_needed) {
681 unsigned long blocks_left = DATA_BLOCK_BITS - udev->dbi_thresh;
684 if (blocks_left < blocks_needed) {
685 pr_debug("no data space: only %lu available, but ask for %zu\n",
686 blocks_left * DATA_BLOCK_SIZE,
691 /* Try to expand the thresh */
692 if (!udev->dbi_thresh) {
693 /* From idle state */
694 uint32_t init_thresh = DATA_BLOCK_INIT_BITS;
696 udev->dbi_thresh = max(blocks_needed, init_thresh);
699 * Grow the data area by max(blocks needed,
700 * dbi_thresh / 2), but limited to the max
701 * DATA_BLOCK_BITS size.
703 grow = max(blocks_needed, udev->dbi_thresh / 2);
704 udev->dbi_thresh += grow;
705 if (udev->dbi_thresh > DATA_BLOCK_BITS)
706 udev->dbi_thresh = DATA_BLOCK_BITS;
710 return tcmu_get_empty_blocks(udev, cmd);
713 static inline size_t tcmu_cmd_get_base_cmd_size(size_t iov_cnt)
715 return max(offsetof(struct tcmu_cmd_entry, req.iov[iov_cnt]),
716 sizeof(struct tcmu_cmd_entry));
719 static inline size_t tcmu_cmd_get_cmd_size(struct tcmu_cmd *tcmu_cmd,
720 size_t base_command_size)
722 struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
725 command_size = base_command_size +
726 round_up(scsi_command_size(se_cmd->t_task_cdb),
729 WARN_ON(command_size & (TCMU_OP_ALIGN_SIZE-1));
734 static int tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd)
736 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
737 unsigned long tmo = udev->cmd_time_out;
740 if (tcmu_cmd->cmd_id)
743 cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT);
745 pr_err("tcmu: Could not allocate cmd id.\n");
748 tcmu_cmd->cmd_id = cmd_id;
753 tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo));
754 mod_timer(&udev->timeout, tcmu_cmd->deadline);
758 static sense_reason_t
759 tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
761 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
762 struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
763 size_t base_command_size, command_size;
764 struct tcmu_mailbox *mb;
765 struct tcmu_cmd_entry *entry;
770 bool copy_to_data_area;
771 size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd);
773 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags))
774 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
777 * Must be a certain minimum size for response sense info, but
778 * also may be larger if the iov array is large.
780 * We prepare as many iovs as possbile for potential uses here,
781 * because it's expensive to tell how many regions are freed in
782 * the bitmap & global data pool, as the size calculated here
783 * will only be used to do the checks.
785 * The size will be recalculated later as actually needed to save
788 base_command_size = tcmu_cmd_get_base_cmd_size(tcmu_cmd->dbi_cnt);
789 command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size);
791 mutex_lock(&udev->cmdr_lock);
794 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
795 if ((command_size > (udev->cmdr_size / 2)) ||
796 data_length > udev->data_size) {
797 pr_warn("TCMU: Request of size %zu/%zu is too big for %u/%zu "
798 "cmd ring/data area\n", command_size, data_length,
799 udev->cmdr_size, udev->data_size);
800 mutex_unlock(&udev->cmdr_lock);
801 return TCM_INVALID_CDB_FIELD;
804 while (!is_ring_space_avail(udev, tcmu_cmd, command_size, data_length)) {
809 * Don't leave commands partially setup because the unmap
810 * thread might need the blocks to make forward progress.
812 tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cur);
813 tcmu_cmd_reset_dbi_cur(tcmu_cmd);
815 prepare_to_wait(&udev->wait_cmdr, &__wait, TASK_INTERRUPTIBLE);
817 pr_debug("sleeping for ring space\n");
818 mutex_unlock(&udev->cmdr_lock);
819 if (udev->cmd_time_out)
820 ret = schedule_timeout(
821 msecs_to_jiffies(udev->cmd_time_out));
823 ret = schedule_timeout(msecs_to_jiffies(TCMU_TIME_OUT));
824 finish_wait(&udev->wait_cmdr, &__wait);
826 pr_warn("tcmu: command timed out\n");
827 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
830 mutex_lock(&udev->cmdr_lock);
832 /* We dropped cmdr_lock, cmd_head is stale */
833 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
836 /* Insert a PAD if end-of-ring space is too small */
837 if (head_to_end(cmd_head, udev->cmdr_size) < command_size) {
838 size_t pad_size = head_to_end(cmd_head, udev->cmdr_size);
840 entry = (void *) mb + CMDR_OFF + cmd_head;
841 tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_PAD);
842 tcmu_hdr_set_len(&entry->hdr.len_op, pad_size);
843 entry->hdr.cmd_id = 0; /* not used for PAD */
844 entry->hdr.kflags = 0;
845 entry->hdr.uflags = 0;
846 tcmu_flush_dcache_range(entry, sizeof(*entry));
848 UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size);
849 tcmu_flush_dcache_range(mb, sizeof(*mb));
851 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
852 WARN_ON(cmd_head != 0);
855 entry = (void *) mb + CMDR_OFF + cmd_head;
856 memset(entry, 0, command_size);
857 tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD);
859 /* Handle allocating space from the data area */
860 tcmu_cmd_reset_dbi_cur(tcmu_cmd);
861 iov = &entry->req.iov[0];
863 copy_to_data_area = (se_cmd->data_direction == DMA_TO_DEVICE
864 || se_cmd->se_cmd_flags & SCF_BIDI);
865 scatter_data_area(udev, tcmu_cmd, se_cmd->t_data_sg,
866 se_cmd->t_data_nents, &iov, &iov_cnt,
868 entry->req.iov_cnt = iov_cnt;
870 /* Handle BIDI commands */
872 if (se_cmd->se_cmd_flags & SCF_BIDI) {
874 scatter_data_area(udev, tcmu_cmd, se_cmd->t_bidi_data_sg,
875 se_cmd->t_bidi_data_nents, &iov, &iov_cnt,
878 entry->req.iov_bidi_cnt = iov_cnt;
880 ret = tcmu_setup_cmd_timer(tcmu_cmd);
882 tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt);
883 mutex_unlock(&udev->cmdr_lock);
884 return TCM_OUT_OF_RESOURCES;
886 entry->hdr.cmd_id = tcmu_cmd->cmd_id;
889 * Recalaulate the command's base size and size according
890 * to the actual needs
892 base_command_size = tcmu_cmd_get_base_cmd_size(entry->req.iov_cnt +
893 entry->req.iov_bidi_cnt);
894 command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size);
896 tcmu_hdr_set_len(&entry->hdr.len_op, command_size);
898 /* All offsets relative to mb_addr, not start of entry! */
899 cdb_off = CMDR_OFF + cmd_head + base_command_size;
900 memcpy((void *) mb + cdb_off, se_cmd->t_task_cdb, scsi_command_size(se_cmd->t_task_cdb));
901 entry->req.cdb_off = cdb_off;
902 tcmu_flush_dcache_range(entry, sizeof(*entry));
904 UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size);
905 tcmu_flush_dcache_range(mb, sizeof(*mb));
906 mutex_unlock(&udev->cmdr_lock);
908 /* TODO: only if FLUSH and FUA? */
909 uio_event_notify(&udev->uio_info);
911 if (udev->cmd_time_out)
912 mod_timer(&udev->timeout, round_jiffies_up(jiffies +
913 msecs_to_jiffies(udev->cmd_time_out)));
918 static sense_reason_t
919 tcmu_queue_cmd(struct se_cmd *se_cmd)
921 struct tcmu_cmd *tcmu_cmd;
924 tcmu_cmd = tcmu_alloc_cmd(se_cmd);
926 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
928 ret = tcmu_queue_cmd_ring(tcmu_cmd);
929 if (ret != TCM_NO_SENSE) {
930 pr_err("TCMU: Could not queue command\n");
932 tcmu_free_cmd(tcmu_cmd);
938 static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *entry)
940 struct se_cmd *se_cmd = cmd->se_cmd;
941 struct tcmu_dev *udev = cmd->tcmu_dev;
944 * cmd has been completed already from timeout, just reclaim
945 * data area space and free cmd
947 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
950 tcmu_cmd_reset_dbi_cur(cmd);
952 if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) {
953 pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n",
955 entry->rsp.scsi_status = SAM_STAT_CHECK_CONDITION;
956 } else if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) {
957 transport_copy_sense_to_cmd(se_cmd, entry->rsp.sense_buffer);
958 } else if (se_cmd->se_cmd_flags & SCF_BIDI) {
959 /* Get Data-In buffer before clean up */
960 gather_data_area(udev, cmd, true);
961 } else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
962 gather_data_area(udev, cmd, false);
963 } else if (se_cmd->data_direction == DMA_TO_DEVICE) {
965 } else if (se_cmd->data_direction != DMA_NONE) {
966 pr_warn("TCMU: data direction was %d!\n",
967 se_cmd->data_direction);
970 target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status);
974 tcmu_cmd_free_data(cmd, cmd->dbi_cnt);
978 static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
980 struct tcmu_mailbox *mb;
983 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) {
984 pr_err("ring broken, not handling completions\n");
989 tcmu_flush_dcache_range(mb, sizeof(*mb));
991 while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) {
993 struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned;
994 struct tcmu_cmd *cmd;
996 tcmu_flush_dcache_range(entry, sizeof(*entry));
998 if (tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_PAD) {
999 UPDATE_HEAD(udev->cmdr_last_cleaned,
1000 tcmu_hdr_get_len(entry->hdr.len_op),
1004 WARN_ON(tcmu_hdr_get_op(entry->hdr.len_op) != TCMU_OP_CMD);
1006 cmd = idr_remove(&udev->commands, entry->hdr.cmd_id);
1008 pr_err("cmd_id not found, ring is broken\n");
1009 set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags);
1013 tcmu_handle_completion(cmd, entry);
1015 UPDATE_HEAD(udev->cmdr_last_cleaned,
1016 tcmu_hdr_get_len(entry->hdr.len_op),
1022 if (mb->cmd_tail == mb->cmd_head)
1023 del_timer(&udev->timeout); /* no more pending cmds */
1025 wake_up(&udev->wait_cmdr);
1030 static int tcmu_check_expired_cmd(int id, void *p, void *data)
1032 struct tcmu_cmd *cmd = p;
1034 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
1037 if (!time_after(jiffies, cmd->deadline))
1040 set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags);
1041 target_complete_cmd(cmd->se_cmd, SAM_STAT_CHECK_CONDITION);
1047 static void tcmu_device_timedout(struct timer_list *t)
1049 struct tcmu_dev *udev = from_timer(udev, t, timeout);
1051 pr_debug("%s cmd timeout has expired\n", udev->name);
1053 spin_lock(&timed_out_udevs_lock);
1054 if (list_empty(&udev->timedout_entry))
1055 list_add_tail(&udev->timedout_entry, &timed_out_udevs);
1056 spin_unlock(&timed_out_udevs_lock);
1058 schedule_work(&tcmu_unmap_work);
1061 static int tcmu_attach_hba(struct se_hba *hba, u32 host_id)
1063 struct tcmu_hba *tcmu_hba;
1065 tcmu_hba = kzalloc(sizeof(struct tcmu_hba), GFP_KERNEL);
1069 tcmu_hba->host_id = host_id;
1070 hba->hba_ptr = tcmu_hba;
1075 static void tcmu_detach_hba(struct se_hba *hba)
1077 kfree(hba->hba_ptr);
1078 hba->hba_ptr = NULL;
1081 static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
1083 struct tcmu_dev *udev;
1085 udev = kzalloc(sizeof(struct tcmu_dev), GFP_KERNEL);
1088 kref_init(&udev->kref);
1090 udev->name = kstrdup(name, GFP_KERNEL);
1097 udev->cmd_time_out = TCMU_TIME_OUT;
1099 init_waitqueue_head(&udev->wait_cmdr);
1100 mutex_init(&udev->cmdr_lock);
1102 INIT_LIST_HEAD(&udev->timedout_entry);
1103 idr_init(&udev->commands);
1105 timer_setup(&udev->timeout, tcmu_device_timedout, 0);
1107 init_waitqueue_head(&udev->nl_cmd_wq);
1108 spin_lock_init(&udev->nl_cmd_lock);
1110 INIT_RADIX_TREE(&udev->data_blocks, GFP_KERNEL);
1112 return &udev->se_dev;
1115 static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on)
1117 struct tcmu_dev *tcmu_dev = container_of(info, struct tcmu_dev, uio_info);
1119 mutex_lock(&tcmu_dev->cmdr_lock);
1120 tcmu_handle_completions(tcmu_dev);
1121 mutex_unlock(&tcmu_dev->cmdr_lock);
1127 * mmap code from uio.c. Copied here because we want to hook mmap()
1128 * and this stuff must come along.
1130 static int tcmu_find_mem_index(struct vm_area_struct *vma)
1132 struct tcmu_dev *udev = vma->vm_private_data;
1133 struct uio_info *info = &udev->uio_info;
1135 if (vma->vm_pgoff < MAX_UIO_MAPS) {
1136 if (info->mem[vma->vm_pgoff].size == 0)
1138 return (int)vma->vm_pgoff;
1143 static struct page *tcmu_try_get_block_page(struct tcmu_dev *udev, uint32_t dbi)
1148 mutex_lock(&udev->cmdr_lock);
1149 page = tcmu_get_block_page(udev, dbi);
1151 mutex_unlock(&udev->cmdr_lock);
1156 * Normally it shouldn't be here:
1157 * Only when the userspace has touched the blocks which
1158 * are out of the tcmu_cmd's data iov[], and will return
1161 pr_warn("Block(%u) out of cmd's iov[] has been touched!\n", dbi);
1162 pr_warn("Mostly it will be a bug of userspace, please have a check!\n");
1164 if (dbi >= udev->dbi_thresh) {
1165 /* Extern the udev->dbi_thresh to dbi + 1 */
1166 udev->dbi_thresh = dbi + 1;
1167 udev->dbi_max = dbi;
1170 page = radix_tree_lookup(&udev->data_blocks, dbi);
1172 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
1174 mutex_unlock(&udev->cmdr_lock);
1178 ret = radix_tree_insert(&udev->data_blocks, dbi, page);
1180 mutex_unlock(&udev->cmdr_lock);
1186 * Since this case is rare in page fault routine, here we
1187 * will allow the global_db_count >= TCMU_GLOBAL_MAX_BLOCKS
1188 * to reduce possible page fault call trace.
1190 atomic_inc(&global_db_count);
1192 mutex_unlock(&udev->cmdr_lock);
1197 static int tcmu_vma_fault(struct vm_fault *vmf)
1199 struct tcmu_dev *udev = vmf->vma->vm_private_data;
1200 struct uio_info *info = &udev->uio_info;
1202 unsigned long offset;
1205 int mi = tcmu_find_mem_index(vmf->vma);
1207 return VM_FAULT_SIGBUS;
1210 * We need to subtract mi because userspace uses offset = N*PAGE_SIZE
1213 offset = (vmf->pgoff - mi) << PAGE_SHIFT;
1215 if (offset < udev->data_off) {
1216 /* For the vmalloc()ed cmd area pages */
1217 addr = (void *)(unsigned long)info->mem[mi].addr + offset;
1218 page = vmalloc_to_page(addr);
1222 /* For the dynamically growing data area pages */
1223 dbi = (offset - udev->data_off) / DATA_BLOCK_SIZE;
1224 page = tcmu_try_get_block_page(udev, dbi);
1226 return VM_FAULT_NOPAGE;
1234 static const struct vm_operations_struct tcmu_vm_ops = {
1235 .fault = tcmu_vma_fault,
1238 static int tcmu_mmap(struct uio_info *info, struct vm_area_struct *vma)
1240 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
1242 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
1243 vma->vm_ops = &tcmu_vm_ops;
1245 vma->vm_private_data = udev;
1247 /* Ensure the mmap is exactly the right size */
1248 if (vma_pages(vma) != (TCMU_RING_SIZE >> PAGE_SHIFT))
1254 static int tcmu_open(struct uio_info *info, struct inode *inode)
1256 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
1258 /* O_EXCL not supported for char devs, so fake it? */
1259 if (test_and_set_bit(TCMU_DEV_BIT_OPEN, &udev->flags))
1262 udev->inode = inode;
1263 kref_get(&udev->kref);
1270 static void tcmu_dev_call_rcu(struct rcu_head *p)
1272 struct se_device *dev = container_of(p, struct se_device, rcu_head);
1273 struct tcmu_dev *udev = TCMU_DEV(dev);
1275 kfree(udev->uio_info.name);
1280 static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
1282 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
1283 kmem_cache_free(tcmu_cmd_cache, cmd);
1289 static void tcmu_blocks_release(struct radix_tree_root *blocks,
1295 for (i = start; i < end; i++) {
1296 page = radix_tree_delete(blocks, i);
1299 atomic_dec(&global_db_count);
1304 static void tcmu_dev_kref_release(struct kref *kref)
1306 struct tcmu_dev *udev = container_of(kref, struct tcmu_dev, kref);
1307 struct se_device *dev = &udev->se_dev;
1308 struct tcmu_cmd *cmd;
1309 bool all_expired = true;
1312 vfree(udev->mb_addr);
1313 udev->mb_addr = NULL;
1315 spin_lock_bh(&timed_out_udevs_lock);
1316 if (!list_empty(&udev->timedout_entry))
1317 list_del(&udev->timedout_entry);
1318 spin_unlock_bh(&timed_out_udevs_lock);
1320 /* Upper layer should drain all requests before calling this */
1321 mutex_lock(&udev->cmdr_lock);
1322 idr_for_each_entry(&udev->commands, cmd, i) {
1323 if (tcmu_check_and_free_pending_cmd(cmd) != 0)
1324 all_expired = false;
1326 idr_destroy(&udev->commands);
1327 WARN_ON(!all_expired);
1329 tcmu_blocks_release(&udev->data_blocks, 0, udev->dbi_max + 1);
1330 mutex_unlock(&udev->cmdr_lock);
1332 call_rcu(&dev->rcu_head, tcmu_dev_call_rcu);
1335 static int tcmu_release(struct uio_info *info, struct inode *inode)
1337 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
1339 clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags);
1341 pr_debug("close\n");
1342 /* release ref from open */
1343 kref_put(&udev->kref, tcmu_dev_kref_release);
1347 static void tcmu_init_genl_cmd_reply(struct tcmu_dev *udev, int cmd)
1349 struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd;
1351 if (!tcmu_kern_cmd_reply_supported)
1354 if (udev->nl_reply_supported <= 0)
1358 spin_lock(&udev->nl_cmd_lock);
1360 if (nl_cmd->cmd != TCMU_CMD_UNSPEC) {
1361 spin_unlock(&udev->nl_cmd_lock);
1362 pr_debug("sleeping for open nl cmd\n");
1363 wait_event(udev->nl_cmd_wq, (nl_cmd->cmd == TCMU_CMD_UNSPEC));
1367 memset(nl_cmd, 0, sizeof(*nl_cmd));
1369 init_completion(&nl_cmd->complete);
1371 spin_unlock(&udev->nl_cmd_lock);
1374 static int tcmu_wait_genl_cmd_reply(struct tcmu_dev *udev)
1376 struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd;
1378 DEFINE_WAIT(__wait);
1380 if (!tcmu_kern_cmd_reply_supported)
1383 if (udev->nl_reply_supported <= 0)
1386 pr_debug("sleeping for nl reply\n");
1387 wait_for_completion(&nl_cmd->complete);
1389 spin_lock(&udev->nl_cmd_lock);
1390 nl_cmd->cmd = TCMU_CMD_UNSPEC;
1391 ret = nl_cmd->status;
1393 spin_unlock(&udev->nl_cmd_lock);
1395 wake_up_all(&udev->nl_cmd_wq);
1400 static int tcmu_netlink_event(struct tcmu_dev *udev, enum tcmu_genl_cmd cmd,
1401 int reconfig_attr, const void *reconfig_data)
1403 struct sk_buff *skb;
1407 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
1411 msg_header = genlmsg_put(skb, 0, 0, &tcmu_genl_family, 0, cmd);
1415 ret = nla_put_string(skb, TCMU_ATTR_DEVICE, udev->uio_info.name);
1419 ret = nla_put_u32(skb, TCMU_ATTR_MINOR, udev->uio_info.uio_dev->minor);
1423 ret = nla_put_u32(skb, TCMU_ATTR_DEVICE_ID, udev->se_dev.dev_index);
1427 if (cmd == TCMU_CMD_RECONFIG_DEVICE) {
1428 switch (reconfig_attr) {
1429 case TCMU_ATTR_DEV_CFG:
1430 ret = nla_put_string(skb, reconfig_attr, reconfig_data);
1432 case TCMU_ATTR_DEV_SIZE:
1433 ret = nla_put_u64_64bit(skb, reconfig_attr,
1434 *((u64 *)reconfig_data),
1437 case TCMU_ATTR_WRITECACHE:
1438 ret = nla_put_u8(skb, reconfig_attr,
1439 *((u8 *)reconfig_data));
1449 genlmsg_end(skb, msg_header);
1451 tcmu_init_genl_cmd_reply(udev, cmd);
1453 ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0,
1454 TCMU_MCGRP_CONFIG, GFP_KERNEL);
1455 /* We don't care if no one is listening */
1459 ret = tcmu_wait_genl_cmd_reply(udev);
1467 static int tcmu_update_uio_info(struct tcmu_dev *udev)
1469 struct tcmu_hba *hba = udev->hba->hba_ptr;
1470 struct uio_info *info;
1474 info = &udev->uio_info;
1475 size = snprintf(NULL, 0, "tcm-user/%u/%s/%s", hba->host_id, udev->name,
1477 size += 1; /* for \0 */
1478 str = kmalloc(size, GFP_KERNEL);
1482 used = snprintf(str, size, "tcm-user/%u/%s", hba->host_id, udev->name);
1483 if (udev->dev_config[0])
1484 snprintf(str + used, size - used, "/%s", udev->dev_config);
1486 /* If the old string exists, free it */
1493 static int tcmu_configure_device(struct se_device *dev)
1495 struct tcmu_dev *udev = TCMU_DEV(dev);
1496 struct uio_info *info;
1497 struct tcmu_mailbox *mb;
1500 ret = tcmu_update_uio_info(udev);
1504 info = &udev->uio_info;
1506 udev->mb_addr = vzalloc(CMDR_SIZE);
1507 if (!udev->mb_addr) {
1512 /* mailbox fits in first part of CMDR space */
1513 udev->cmdr_size = CMDR_SIZE - CMDR_OFF;
1514 udev->data_off = CMDR_SIZE;
1515 udev->data_size = DATA_SIZE;
1516 udev->dbi_thresh = 0; /* Default in Idle state */
1517 udev->waiting_global = false;
1519 /* Initialise the mailbox of the ring buffer */
1521 mb->version = TCMU_MAILBOX_VERSION;
1522 mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC;
1523 mb->cmdr_off = CMDR_OFF;
1524 mb->cmdr_size = udev->cmdr_size;
1526 WARN_ON(!PAGE_ALIGNED(udev->data_off));
1527 WARN_ON(udev->data_size % PAGE_SIZE);
1528 WARN_ON(udev->data_size % DATA_BLOCK_SIZE);
1530 info->version = __stringify(TCMU_MAILBOX_VERSION);
1532 info->mem[0].name = "tcm-user command & data buffer";
1533 info->mem[0].addr = (phys_addr_t)(uintptr_t)udev->mb_addr;
1534 info->mem[0].size = TCMU_RING_SIZE;
1535 info->mem[0].memtype = UIO_MEM_NONE;
1537 info->irqcontrol = tcmu_irqcontrol;
1538 info->irq = UIO_IRQ_CUSTOM;
1540 info->mmap = tcmu_mmap;
1541 info->open = tcmu_open;
1542 info->release = tcmu_release;
1544 ret = uio_register_device(tcmu_root_device, info);
1548 /* User can set hw_block_size before enable the device */
1549 if (dev->dev_attrib.hw_block_size == 0)
1550 dev->dev_attrib.hw_block_size = 512;
1551 /* Other attributes can be configured in userspace */
1552 if (!dev->dev_attrib.hw_max_sectors)
1553 dev->dev_attrib.hw_max_sectors = 128;
1554 if (!dev->dev_attrib.emulate_write_cache)
1555 dev->dev_attrib.emulate_write_cache = 0;
1556 dev->dev_attrib.hw_queue_depth = 128;
1558 /* If user didn't explicitly disable netlink reply support, use
1559 * module scope setting.
1561 if (udev->nl_reply_supported >= 0)
1562 udev->nl_reply_supported = tcmu_kern_cmd_reply_supported;
1565 * Get a ref incase userspace does a close on the uio device before
1566 * LIO has initiated tcmu_free_device.
1568 kref_get(&udev->kref);
1570 ret = tcmu_netlink_event(udev, TCMU_CMD_ADDED_DEVICE, 0, NULL);
1574 mutex_lock(&root_udev_mutex);
1575 list_add(&udev->node, &root_udev);
1576 mutex_unlock(&root_udev_mutex);
1581 kref_put(&udev->kref, tcmu_dev_kref_release);
1582 uio_unregister_device(&udev->uio_info);
1584 vfree(udev->mb_addr);
1585 udev->mb_addr = NULL;
1593 static bool tcmu_dev_configured(struct tcmu_dev *udev)
1595 return udev->uio_info.uio_dev ? true : false;
1598 static void tcmu_free_device(struct se_device *dev)
1600 struct tcmu_dev *udev = TCMU_DEV(dev);
1602 /* release ref from init */
1603 kref_put(&udev->kref, tcmu_dev_kref_release);
1606 static void tcmu_destroy_device(struct se_device *dev)
1608 struct tcmu_dev *udev = TCMU_DEV(dev);
1610 del_timer_sync(&udev->timeout);
1612 mutex_lock(&root_udev_mutex);
1613 list_del(&udev->node);
1614 mutex_unlock(&root_udev_mutex);
1616 tcmu_netlink_event(udev, TCMU_CMD_REMOVED_DEVICE, 0, NULL);
1618 uio_unregister_device(&udev->uio_info);
1620 /* release ref from configure */
1621 kref_put(&udev->kref, tcmu_dev_kref_release);
1625 Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_hw_max_sectors,
1626 Opt_nl_reply_supported, Opt_err,
1629 static match_table_t tokens = {
1630 {Opt_dev_config, "dev_config=%s"},
1631 {Opt_dev_size, "dev_size=%u"},
1632 {Opt_hw_block_size, "hw_block_size=%u"},
1633 {Opt_hw_max_sectors, "hw_max_sectors=%u"},
1634 {Opt_nl_reply_supported, "nl_reply_supported=%d"},
1638 static int tcmu_set_dev_attrib(substring_t *arg, u32 *dev_attrib)
1640 unsigned long tmp_ul;
1644 arg_p = match_strdup(arg);
1648 ret = kstrtoul(arg_p, 0, &tmp_ul);
1651 pr_err("kstrtoul() failed for dev attrib\n");
1655 pr_err("dev attrib must be nonzero\n");
1658 *dev_attrib = tmp_ul;
1662 static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
1663 const char *page, ssize_t count)
1665 struct tcmu_dev *udev = TCMU_DEV(dev);
1666 char *orig, *ptr, *opts, *arg_p;
1667 substring_t args[MAX_OPT_ARGS];
1670 opts = kstrdup(page, GFP_KERNEL);
1676 while ((ptr = strsep(&opts, ",\n")) != NULL) {
1680 token = match_token(ptr, tokens, args);
1682 case Opt_dev_config:
1683 if (match_strlcpy(udev->dev_config, &args[0],
1684 TCMU_CONFIG_LEN) == 0) {
1688 pr_debug("TCMU: Referencing Path: %s\n", udev->dev_config);
1691 arg_p = match_strdup(&args[0]);
1696 ret = kstrtoul(arg_p, 0, (unsigned long *) &udev->dev_size);
1699 pr_err("kstrtoul() failed for dev_size=\n");
1701 case Opt_hw_block_size:
1702 ret = tcmu_set_dev_attrib(&args[0],
1703 &(dev->dev_attrib.hw_block_size));
1705 case Opt_hw_max_sectors:
1706 ret = tcmu_set_dev_attrib(&args[0],
1707 &(dev->dev_attrib.hw_max_sectors));
1709 case Opt_nl_reply_supported:
1710 arg_p = match_strdup(&args[0]);
1715 ret = kstrtoint(arg_p, 0, &udev->nl_reply_supported);
1718 pr_err("kstrtoint() failed for nl_reply_supported=\n");
1729 return (!ret) ? count : ret;
1732 static ssize_t tcmu_show_configfs_dev_params(struct se_device *dev, char *b)
1734 struct tcmu_dev *udev = TCMU_DEV(dev);
1737 bl = sprintf(b + bl, "Config: %s ",
1738 udev->dev_config[0] ? udev->dev_config : "NULL");
1739 bl += sprintf(b + bl, "Size: %zu\n", udev->dev_size);
1744 static sector_t tcmu_get_blocks(struct se_device *dev)
1746 struct tcmu_dev *udev = TCMU_DEV(dev);
1748 return div_u64(udev->dev_size - dev->dev_attrib.block_size,
1749 dev->dev_attrib.block_size);
1752 static sense_reason_t
1753 tcmu_parse_cdb(struct se_cmd *cmd)
1755 return passthrough_parse_cdb(cmd, tcmu_queue_cmd);
1758 static ssize_t tcmu_cmd_time_out_show(struct config_item *item, char *page)
1760 struct se_dev_attrib *da = container_of(to_config_group(item),
1761 struct se_dev_attrib, da_group);
1762 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
1764 return snprintf(page, PAGE_SIZE, "%lu\n", udev->cmd_time_out / MSEC_PER_SEC);
1767 static ssize_t tcmu_cmd_time_out_store(struct config_item *item, const char *page,
1770 struct se_dev_attrib *da = container_of(to_config_group(item),
1771 struct se_dev_attrib, da_group);
1772 struct tcmu_dev *udev = container_of(da->da_dev,
1773 struct tcmu_dev, se_dev);
1777 if (da->da_dev->export_count) {
1778 pr_err("Unable to set tcmu cmd_time_out while exports exist\n");
1782 ret = kstrtou32(page, 0, &val);
1786 udev->cmd_time_out = val * MSEC_PER_SEC;
1789 CONFIGFS_ATTR(tcmu_, cmd_time_out);
1791 static ssize_t tcmu_dev_config_show(struct config_item *item, char *page)
1793 struct se_dev_attrib *da = container_of(to_config_group(item),
1794 struct se_dev_attrib, da_group);
1795 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
1797 return snprintf(page, PAGE_SIZE, "%s\n", udev->dev_config);
1800 static ssize_t tcmu_dev_config_store(struct config_item *item, const char *page,
1803 struct se_dev_attrib *da = container_of(to_config_group(item),
1804 struct se_dev_attrib, da_group);
1805 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
1809 if (!len || len > TCMU_CONFIG_LEN - 1)
1812 /* Check if device has been configured before */
1813 if (tcmu_dev_configured(udev)) {
1814 ret = tcmu_netlink_event(udev, TCMU_CMD_RECONFIG_DEVICE,
1815 TCMU_ATTR_DEV_CFG, page);
1817 pr_err("Unable to reconfigure device\n");
1820 strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN);
1822 ret = tcmu_update_uio_info(udev);
1827 strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN);
1831 CONFIGFS_ATTR(tcmu_, dev_config);
1833 static ssize_t tcmu_dev_size_show(struct config_item *item, char *page)
1835 struct se_dev_attrib *da = container_of(to_config_group(item),
1836 struct se_dev_attrib, da_group);
1837 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
1839 return snprintf(page, PAGE_SIZE, "%zu\n", udev->dev_size);
1842 static ssize_t tcmu_dev_size_store(struct config_item *item, const char *page,
1845 struct se_dev_attrib *da = container_of(to_config_group(item),
1846 struct se_dev_attrib, da_group);
1847 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
1851 ret = kstrtou64(page, 0, &val);
1855 /* Check if device has been configured before */
1856 if (tcmu_dev_configured(udev)) {
1857 ret = tcmu_netlink_event(udev, TCMU_CMD_RECONFIG_DEVICE,
1858 TCMU_ATTR_DEV_SIZE, &val);
1860 pr_err("Unable to reconfigure device\n");
1864 udev->dev_size = val;
1867 CONFIGFS_ATTR(tcmu_, dev_size);
1869 static ssize_t tcmu_nl_reply_supported_show(struct config_item *item,
1872 struct se_dev_attrib *da = container_of(to_config_group(item),
1873 struct se_dev_attrib, da_group);
1874 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
1876 return snprintf(page, PAGE_SIZE, "%d\n", udev->nl_reply_supported);
1879 static ssize_t tcmu_nl_reply_supported_store(struct config_item *item,
1880 const char *page, size_t count)
1882 struct se_dev_attrib *da = container_of(to_config_group(item),
1883 struct se_dev_attrib, da_group);
1884 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
1888 ret = kstrtos8(page, 0, &val);
1892 udev->nl_reply_supported = val;
1895 CONFIGFS_ATTR(tcmu_, nl_reply_supported);
1897 static ssize_t tcmu_emulate_write_cache_show(struct config_item *item,
1900 struct se_dev_attrib *da = container_of(to_config_group(item),
1901 struct se_dev_attrib, da_group);
1903 return snprintf(page, PAGE_SIZE, "%i\n", da->emulate_write_cache);
1906 static ssize_t tcmu_emulate_write_cache_store(struct config_item *item,
1907 const char *page, size_t count)
1909 struct se_dev_attrib *da = container_of(to_config_group(item),
1910 struct se_dev_attrib, da_group);
1911 struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
1915 ret = kstrtou8(page, 0, &val);
1919 /* Check if device has been configured before */
1920 if (tcmu_dev_configured(udev)) {
1921 ret = tcmu_netlink_event(udev, TCMU_CMD_RECONFIG_DEVICE,
1922 TCMU_ATTR_WRITECACHE, &val);
1924 pr_err("Unable to reconfigure device\n");
1929 da->emulate_write_cache = val;
1932 CONFIGFS_ATTR(tcmu_, emulate_write_cache);
1934 static struct configfs_attribute *tcmu_attrib_attrs[] = {
1935 &tcmu_attr_cmd_time_out,
1936 &tcmu_attr_dev_config,
1937 &tcmu_attr_dev_size,
1938 &tcmu_attr_emulate_write_cache,
1939 &tcmu_attr_nl_reply_supported,
1943 static struct configfs_attribute **tcmu_attrs;
1945 static struct target_backend_ops tcmu_ops = {
1947 .owner = THIS_MODULE,
1948 .transport_flags = TRANSPORT_FLAG_PASSTHROUGH,
1949 .attach_hba = tcmu_attach_hba,
1950 .detach_hba = tcmu_detach_hba,
1951 .alloc_device = tcmu_alloc_device,
1952 .configure_device = tcmu_configure_device,
1953 .destroy_device = tcmu_destroy_device,
1954 .free_device = tcmu_free_device,
1955 .parse_cdb = tcmu_parse_cdb,
1956 .set_configfs_dev_params = tcmu_set_configfs_dev_params,
1957 .show_configfs_dev_params = tcmu_show_configfs_dev_params,
1958 .get_device_type = sbc_get_device_type,
1959 .get_blocks = tcmu_get_blocks,
1960 .tb_dev_attrib_attrs = NULL,
1964 static void find_free_blocks(void)
1966 struct tcmu_dev *udev;
1968 uint32_t start, end, block;
1970 mutex_lock(&root_udev_mutex);
1971 list_for_each_entry(udev, &root_udev, node) {
1972 mutex_lock(&udev->cmdr_lock);
1974 /* Try to complete the finished commands first */
1975 tcmu_handle_completions(udev);
1977 /* Skip the udevs waiting the global pool or in idle */
1978 if (udev->waiting_global || !udev->dbi_thresh) {
1979 mutex_unlock(&udev->cmdr_lock);
1983 end = udev->dbi_max + 1;
1984 block = find_last_bit(udev->data_bitmap, end);
1985 if (block == udev->dbi_max) {
1987 * The last bit is dbi_max, so there is
1988 * no need to shrink any blocks.
1990 mutex_unlock(&udev->cmdr_lock);
1992 } else if (block == end) {
1993 /* The current udev will goto idle state */
1994 udev->dbi_thresh = start = 0;
1997 udev->dbi_thresh = start = block + 1;
1998 udev->dbi_max = block;
2001 /* Here will truncate the data area from off */
2002 off = udev->data_off + start * DATA_BLOCK_SIZE;
2003 unmap_mapping_range(udev->inode->i_mapping, off, 0, 1);
2005 /* Release the block pages */
2006 tcmu_blocks_release(&udev->data_blocks, start, end);
2007 mutex_unlock(&udev->cmdr_lock);
2009 mutex_unlock(&root_udev_mutex);
2012 static void run_cmdr_queues(void)
2014 struct tcmu_dev *udev;
2017 * Try to wake up the udevs who are waiting
2018 * for the global data block pool.
2020 mutex_lock(&root_udev_mutex);
2021 list_for_each_entry(udev, &root_udev, node) {
2022 mutex_lock(&udev->cmdr_lock);
2023 if (!udev->waiting_global) {
2024 mutex_unlock(&udev->cmdr_lock);
2027 mutex_unlock(&udev->cmdr_lock);
2029 wake_up(&udev->wait_cmdr);
2031 mutex_unlock(&root_udev_mutex);
2034 static void check_timedout_devices(void)
2036 struct tcmu_dev *udev, *tmp_dev;
2039 spin_lock_bh(&timed_out_udevs_lock);
2040 list_splice_init(&timed_out_udevs, &devs);
2042 list_for_each_entry_safe(udev, tmp_dev, &devs, timedout_entry) {
2043 list_del_init(&udev->timedout_entry);
2044 spin_unlock_bh(&timed_out_udevs_lock);
2046 mutex_lock(&udev->cmdr_lock);
2047 idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL);
2048 mutex_unlock(&udev->cmdr_lock);
2050 spin_lock_bh(&timed_out_udevs_lock);
2053 spin_unlock_bh(&timed_out_udevs_lock);
2056 static void tcmu_unmap_work_fn(struct work_struct *work)
2058 check_timedout_devices();
2063 static int __init tcmu_module_init(void)
2065 int ret, i, k, len = 0;
2067 BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0);
2069 INIT_WORK(&tcmu_unmap_work, tcmu_unmap_work_fn);
2071 tcmu_cmd_cache = kmem_cache_create("tcmu_cmd_cache",
2072 sizeof(struct tcmu_cmd),
2073 __alignof__(struct tcmu_cmd),
2075 if (!tcmu_cmd_cache)
2078 tcmu_root_device = root_device_register("tcm_user");
2079 if (IS_ERR(tcmu_root_device)) {
2080 ret = PTR_ERR(tcmu_root_device);
2081 goto out_free_cache;
2084 ret = genl_register_family(&tcmu_genl_family);
2086 goto out_unreg_device;
2089 for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) {
2090 len += sizeof(struct configfs_attribute *);
2092 for (i = 0; tcmu_attrib_attrs[i] != NULL; i++) {
2093 len += sizeof(struct configfs_attribute *);
2095 len += sizeof(struct configfs_attribute *);
2097 tcmu_attrs = kzalloc(len, GFP_KERNEL);
2100 goto out_unreg_genl;
2103 for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) {
2104 tcmu_attrs[i] = passthrough_attrib_attrs[i];
2106 for (k = 0; tcmu_attrib_attrs[k] != NULL; k++) {
2107 tcmu_attrs[i] = tcmu_attrib_attrs[k];
2110 tcmu_ops.tb_dev_attrib_attrs = tcmu_attrs;
2112 ret = transport_backend_register(&tcmu_ops);
2121 genl_unregister_family(&tcmu_genl_family);
2123 root_device_unregister(tcmu_root_device);
2125 kmem_cache_destroy(tcmu_cmd_cache);
2130 static void __exit tcmu_module_exit(void)
2132 cancel_work_sync(&tcmu_unmap_work);
2133 target_backend_unregister(&tcmu_ops);
2135 genl_unregister_family(&tcmu_genl_family);
2136 root_device_unregister(tcmu_root_device);
2137 kmem_cache_destroy(tcmu_cmd_cache);
2140 MODULE_DESCRIPTION("TCM USER subsystem plugin");
2141 MODULE_AUTHOR("Shaohua Li <shli@kernel.org>");
2142 MODULE_AUTHOR("Andy Grover <agrover@redhat.com>");
2143 MODULE_LICENSE("GPL");
2145 module_init(tcmu_module_init);
2146 module_exit(tcmu_module_exit);