1 /*******************************************************************************
2 * Filename: target_core_configfs.c
4 * This file contains ConfigFS logic for the Generic Target Engine project.
6 * (c) Copyright 2008-2013 Datera, Inc.
8 * Nicholas A. Bellinger <nab@kernel.org>
10 * based on configfs Copyright (C) 2005 Oracle. All rights reserved.
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 ****************************************************************************/
23 #include <linux/module.h>
24 #include <linux/moduleparam.h>
25 #include <generated/utsrelease.h>
26 #include <linux/utsname.h>
27 #include <linux/init.h>
29 #include <linux/namei.h>
30 #include <linux/slab.h>
31 #include <linux/types.h>
32 #include <linux/delay.h>
33 #include <linux/unistd.h>
34 #include <linux/string.h>
35 #include <linux/parser.h>
36 #include <linux/syscalls.h>
37 #include <linux/configfs.h>
38 #include <linux/spinlock.h>
40 #include <target/target_core_base.h>
41 #include <target/target_core_backend.h>
42 #include <target/target_core_fabric.h>
44 #include "target_core_internal.h"
45 #include "target_core_alua.h"
46 #include "target_core_pr.h"
47 #include "target_core_rd.h"
48 #include "target_core_xcopy.h"
50 #define TB_CIT_SETUP(_name, _item_ops, _group_ops, _attrs) \
51 static void target_core_setup_##_name##_cit(struct target_backend *tb) \
53 struct config_item_type *cit = &tb->tb_##_name##_cit; \
55 cit->ct_item_ops = _item_ops; \
56 cit->ct_group_ops = _group_ops; \
57 cit->ct_attrs = _attrs; \
58 cit->ct_owner = tb->ops->owner; \
59 pr_debug("Setup generic %s\n", __stringify(_name)); \
62 #define TB_CIT_SETUP_DRV(_name, _item_ops, _group_ops) \
63 static void target_core_setup_##_name##_cit(struct target_backend *tb) \
65 struct config_item_type *cit = &tb->tb_##_name##_cit; \
67 cit->ct_item_ops = _item_ops; \
68 cit->ct_group_ops = _group_ops; \
69 cit->ct_attrs = tb->ops->tb_##_name##_attrs; \
70 cit->ct_owner = tb->ops->owner; \
71 pr_debug("Setup generic %s\n", __stringify(_name)); \
74 extern struct t10_alua_lu_gp *default_lu_gp;
76 static LIST_HEAD(g_tf_list);
77 static DEFINE_MUTEX(g_tf_lock);
79 static struct config_group target_core_hbagroup;
80 static struct config_group alua_group;
81 static struct config_group alua_lu_gps_group;
83 static inline struct se_hba *
84 item_to_hba(struct config_item *item)
86 return container_of(to_config_group(item), struct se_hba, hba_group);
90 * Attributes for /sys/kernel/config/target/
92 static ssize_t target_core_item_version_show(struct config_item *item,
95 return sprintf(page, "Target Engine Core ConfigFS Infrastructure %s"
96 " on %s/%s on "UTS_RELEASE"\n", TARGET_CORE_VERSION,
97 utsname()->sysname, utsname()->machine);
100 CONFIGFS_ATTR_RO(target_core_item_, version);
102 char db_root[DB_ROOT_LEN] = DB_ROOT_DEFAULT;
103 static char db_root_stage[DB_ROOT_LEN];
105 static ssize_t target_core_item_dbroot_show(struct config_item *item,
108 return sprintf(page, "%s\n", db_root);
111 static ssize_t target_core_item_dbroot_store(struct config_item *item,
112 const char *page, size_t count)
117 mutex_lock(&g_tf_lock);
118 if (!list_empty(&g_tf_list)) {
119 mutex_unlock(&g_tf_lock);
120 pr_err("db_root: cannot be changed: target drivers registered");
124 if (count > (DB_ROOT_LEN - 1)) {
125 mutex_unlock(&g_tf_lock);
126 pr_err("db_root: count %d exceeds DB_ROOT_LEN-1: %u\n",
127 (int)count, DB_ROOT_LEN - 1);
131 read_bytes = snprintf(db_root_stage, DB_ROOT_LEN, "%s", page);
133 mutex_unlock(&g_tf_lock);
136 if (db_root_stage[read_bytes - 1] == '\n')
137 db_root_stage[read_bytes - 1] = '\0';
139 /* validate new db root before accepting it */
140 fp = filp_open(db_root_stage, O_RDONLY, 0);
142 mutex_unlock(&g_tf_lock);
143 pr_err("db_root: cannot open: %s\n", db_root_stage);
146 if (!S_ISDIR(file_inode(fp)->i_mode)) {
147 filp_close(fp, NULL);
148 mutex_unlock(&g_tf_lock);
149 pr_err("db_root: not a directory: %s\n", db_root_stage);
152 filp_close(fp, NULL);
154 strncpy(db_root, db_root_stage, read_bytes);
156 mutex_unlock(&g_tf_lock);
158 pr_debug("Target_Core_ConfigFS: db_root set to %s\n", db_root);
163 CONFIGFS_ATTR(target_core_item_, dbroot);
165 static struct target_fabric_configfs *target_core_get_fabric(
168 struct target_fabric_configfs *tf;
173 mutex_lock(&g_tf_lock);
174 list_for_each_entry(tf, &g_tf_list, tf_list) {
175 if (!strcmp(tf->tf_ops->name, name)) {
176 atomic_inc(&tf->tf_access_cnt);
177 mutex_unlock(&g_tf_lock);
181 mutex_unlock(&g_tf_lock);
187 * Called from struct target_core_group_ops->make_group()
189 static struct config_group *target_core_register_fabric(
190 struct config_group *group,
193 struct target_fabric_configfs *tf;
196 pr_debug("Target_Core_ConfigFS: REGISTER -> group: %p name:"
197 " %s\n", group, name);
199 tf = target_core_get_fabric(name);
201 pr_debug("target_core_register_fabric() trying autoload for %s\n",
205 * Below are some hardcoded request_module() calls to automatically
206 * local fabric modules when the following is called:
208 * mkdir -p /sys/kernel/config/target/$MODULE_NAME
210 * Note that this does not limit which TCM fabric module can be
211 * registered, but simply provids auto loading logic for modules with
212 * mkdir(2) system calls with known TCM fabric modules.
215 if (!strncmp(name, "iscsi", 5)) {
217 * Automatically load the LIO Target fabric module when the
218 * following is called:
220 * mkdir -p $CONFIGFS/target/iscsi
222 ret = request_module("iscsi_target_mod");
224 pr_debug("request_module() failed for"
225 " iscsi_target_mod.ko: %d\n", ret);
226 return ERR_PTR(-EINVAL);
228 } else if (!strncmp(name, "loopback", 8)) {
230 * Automatically load the tcm_loop fabric module when the
231 * following is called:
233 * mkdir -p $CONFIGFS/target/loopback
235 ret = request_module("tcm_loop");
237 pr_debug("request_module() failed for"
238 " tcm_loop.ko: %d\n", ret);
239 return ERR_PTR(-EINVAL);
243 tf = target_core_get_fabric(name);
247 pr_debug("target_core_get_fabric() failed for %s\n",
249 return ERR_PTR(-EINVAL);
251 pr_debug("Target_Core_ConfigFS: REGISTER -> Located fabric:"
252 " %s\n", tf->tf_ops->name);
254 * On a successful target_core_get_fabric() look, the returned
255 * struct target_fabric_configfs *tf will contain a usage reference.
257 pr_debug("Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n",
260 config_group_init_type_name(&tf->tf_group, name, &tf->tf_wwn_cit);
262 config_group_init_type_name(&tf->tf_disc_group, "discovery_auth",
263 &tf->tf_discovery_cit);
264 configfs_add_default_group(&tf->tf_disc_group, &tf->tf_group);
266 pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric: %s\n",
267 config_item_name(&tf->tf_group.cg_item));
268 return &tf->tf_group;
272 * Called from struct target_core_group_ops->drop_item()
274 static void target_core_deregister_fabric(
275 struct config_group *group,
276 struct config_item *item)
278 struct target_fabric_configfs *tf = container_of(
279 to_config_group(item), struct target_fabric_configfs, tf_group);
281 pr_debug("Target_Core_ConfigFS: DEREGISTER -> Looking up %s in"
282 " tf list\n", config_item_name(item));
284 pr_debug("Target_Core_ConfigFS: DEREGISTER -> located fabric:"
285 " %s\n", tf->tf_ops->name);
286 atomic_dec(&tf->tf_access_cnt);
288 pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing ci"
289 " %s\n", config_item_name(item));
291 configfs_remove_default_groups(&tf->tf_group);
292 config_item_put(item);
295 static struct configfs_group_operations target_core_fabric_group_ops = {
296 .make_group = &target_core_register_fabric,
297 .drop_item = &target_core_deregister_fabric,
301 * All item attributes appearing in /sys/kernel/target/ appear here.
303 static struct configfs_attribute *target_core_fabric_item_attrs[] = {
304 &target_core_item_attr_version,
305 &target_core_item_attr_dbroot,
310 * Provides Fabrics Groups and Item Attributes for /sys/kernel/config/target/
312 static const struct config_item_type target_core_fabrics_item = {
313 .ct_group_ops = &target_core_fabric_group_ops,
314 .ct_attrs = target_core_fabric_item_attrs,
315 .ct_owner = THIS_MODULE,
318 static struct configfs_subsystem target_core_fabrics = {
321 .ci_namebuf = "target",
322 .ci_type = &target_core_fabrics_item,
327 int target_depend_item(struct config_item *item)
329 return configfs_depend_item(&target_core_fabrics, item);
331 EXPORT_SYMBOL(target_depend_item);
333 void target_undepend_item(struct config_item *item)
335 return configfs_undepend_item(item);
337 EXPORT_SYMBOL(target_undepend_item);
339 /*##############################################################################
340 // Start functions called by external Target Fabrics Modules
341 //############################################################################*/
343 static int target_fabric_tf_ops_check(const struct target_core_fabric_ops *tfo)
346 pr_err("Missing tfo->name\n");
349 if (strlen(tfo->name) >= TARGET_FABRIC_NAME_SIZE) {
350 pr_err("Passed name: %s exceeds TARGET_FABRIC"
351 "_NAME_SIZE\n", tfo->name);
354 if (!tfo->fabric_name) {
355 pr_err("Missing tfo->fabric_name\n");
358 if (!tfo->tpg_get_wwn) {
359 pr_err("Missing tfo->tpg_get_wwn()\n");
362 if (!tfo->tpg_get_tag) {
363 pr_err("Missing tfo->tpg_get_tag()\n");
366 if (!tfo->tpg_check_demo_mode) {
367 pr_err("Missing tfo->tpg_check_demo_mode()\n");
370 if (!tfo->tpg_check_demo_mode_cache) {
371 pr_err("Missing tfo->tpg_check_demo_mode_cache()\n");
374 if (!tfo->tpg_check_demo_mode_write_protect) {
375 pr_err("Missing tfo->tpg_check_demo_mode_write_protect()\n");
378 if (!tfo->tpg_check_prod_mode_write_protect) {
379 pr_err("Missing tfo->tpg_check_prod_mode_write_protect()\n");
382 if (!tfo->tpg_get_inst_index) {
383 pr_err("Missing tfo->tpg_get_inst_index()\n");
386 if (!tfo->release_cmd) {
387 pr_err("Missing tfo->release_cmd()\n");
390 if (!tfo->sess_get_index) {
391 pr_err("Missing tfo->sess_get_index()\n");
394 if (!tfo->write_pending) {
395 pr_err("Missing tfo->write_pending()\n");
398 if (!tfo->write_pending_status) {
399 pr_err("Missing tfo->write_pending_status()\n");
402 if (!tfo->set_default_node_attributes) {
403 pr_err("Missing tfo->set_default_node_attributes()\n");
406 if (!tfo->get_cmd_state) {
407 pr_err("Missing tfo->get_cmd_state()\n");
410 if (!tfo->queue_data_in) {
411 pr_err("Missing tfo->queue_data_in()\n");
414 if (!tfo->queue_status) {
415 pr_err("Missing tfo->queue_status()\n");
418 if (!tfo->queue_tm_rsp) {
419 pr_err("Missing tfo->queue_tm_rsp()\n");
422 if (!tfo->aborted_task) {
423 pr_err("Missing tfo->aborted_task()\n");
426 if (!tfo->check_stop_free) {
427 pr_err("Missing tfo->check_stop_free()\n");
431 * We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn()
432 * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in
433 * target_core_fabric_configfs.c WWN+TPG group context code.
435 if (!tfo->fabric_make_wwn) {
436 pr_err("Missing tfo->fabric_make_wwn()\n");
439 if (!tfo->fabric_drop_wwn) {
440 pr_err("Missing tfo->fabric_drop_wwn()\n");
443 if (!tfo->fabric_make_tpg) {
444 pr_err("Missing tfo->fabric_make_tpg()\n");
447 if (!tfo->fabric_drop_tpg) {
448 pr_err("Missing tfo->fabric_drop_tpg()\n");
455 int target_register_template(const struct target_core_fabric_ops *fo)
457 struct target_fabric_configfs *tf;
460 ret = target_fabric_tf_ops_check(fo);
464 tf = kzalloc(sizeof(struct target_fabric_configfs), GFP_KERNEL);
466 pr_err("%s: could not allocate memory!\n", __func__);
470 INIT_LIST_HEAD(&tf->tf_list);
471 atomic_set(&tf->tf_access_cnt, 0);
473 target_fabric_setup_cits(tf);
475 mutex_lock(&g_tf_lock);
476 list_add_tail(&tf->tf_list, &g_tf_list);
477 mutex_unlock(&g_tf_lock);
481 EXPORT_SYMBOL(target_register_template);
483 void target_unregister_template(const struct target_core_fabric_ops *fo)
485 struct target_fabric_configfs *t;
487 mutex_lock(&g_tf_lock);
488 list_for_each_entry(t, &g_tf_list, tf_list) {
489 if (!strcmp(t->tf_ops->name, fo->name)) {
490 BUG_ON(atomic_read(&t->tf_access_cnt));
491 list_del(&t->tf_list);
492 mutex_unlock(&g_tf_lock);
494 * Wait for any outstanding fabric se_deve_entry->rcu_head
495 * callbacks to complete post kfree_rcu(), before allowing
496 * fabric driver unload of TFO->module to proceed.
503 mutex_unlock(&g_tf_lock);
505 EXPORT_SYMBOL(target_unregister_template);
507 /*##############################################################################
508 // Stop functions called by external Target Fabrics Modules
509 //############################################################################*/
511 static inline struct se_dev_attrib *to_attrib(struct config_item *item)
513 return container_of(to_config_group(item), struct se_dev_attrib,
517 /* Start functions for struct config_item_type tb_dev_attrib_cit */
518 #define DEF_CONFIGFS_ATTRIB_SHOW(_name) \
519 static ssize_t _name##_show(struct config_item *item, char *page) \
521 return snprintf(page, PAGE_SIZE, "%u\n", to_attrib(item)->_name); \
524 DEF_CONFIGFS_ATTRIB_SHOW(emulate_model_alias);
525 DEF_CONFIGFS_ATTRIB_SHOW(emulate_dpo);
526 DEF_CONFIGFS_ATTRIB_SHOW(emulate_fua_write);
527 DEF_CONFIGFS_ATTRIB_SHOW(emulate_fua_read);
528 DEF_CONFIGFS_ATTRIB_SHOW(emulate_write_cache);
529 DEF_CONFIGFS_ATTRIB_SHOW(emulate_ua_intlck_ctrl);
530 DEF_CONFIGFS_ATTRIB_SHOW(emulate_tas);
531 DEF_CONFIGFS_ATTRIB_SHOW(emulate_tpu);
532 DEF_CONFIGFS_ATTRIB_SHOW(emulate_tpws);
533 DEF_CONFIGFS_ATTRIB_SHOW(emulate_caw);
534 DEF_CONFIGFS_ATTRIB_SHOW(emulate_3pc);
535 DEF_CONFIGFS_ATTRIB_SHOW(emulate_pr);
536 DEF_CONFIGFS_ATTRIB_SHOW(pi_prot_type);
537 DEF_CONFIGFS_ATTRIB_SHOW(hw_pi_prot_type);
538 DEF_CONFIGFS_ATTRIB_SHOW(pi_prot_verify);
539 DEF_CONFIGFS_ATTRIB_SHOW(enforce_pr_isids);
540 DEF_CONFIGFS_ATTRIB_SHOW(is_nonrot);
541 DEF_CONFIGFS_ATTRIB_SHOW(emulate_rest_reord);
542 DEF_CONFIGFS_ATTRIB_SHOW(force_pr_aptpl);
543 DEF_CONFIGFS_ATTRIB_SHOW(hw_block_size);
544 DEF_CONFIGFS_ATTRIB_SHOW(block_size);
545 DEF_CONFIGFS_ATTRIB_SHOW(hw_max_sectors);
546 DEF_CONFIGFS_ATTRIB_SHOW(optimal_sectors);
547 DEF_CONFIGFS_ATTRIB_SHOW(hw_queue_depth);
548 DEF_CONFIGFS_ATTRIB_SHOW(queue_depth);
549 DEF_CONFIGFS_ATTRIB_SHOW(max_unmap_lba_count);
550 DEF_CONFIGFS_ATTRIB_SHOW(max_unmap_block_desc_count);
551 DEF_CONFIGFS_ATTRIB_SHOW(unmap_granularity);
552 DEF_CONFIGFS_ATTRIB_SHOW(unmap_granularity_alignment);
553 DEF_CONFIGFS_ATTRIB_SHOW(unmap_zeroes_data);
554 DEF_CONFIGFS_ATTRIB_SHOW(max_write_same_len);
556 #define DEF_CONFIGFS_ATTRIB_STORE_U32(_name) \
557 static ssize_t _name##_store(struct config_item *item, const char *page,\
560 struct se_dev_attrib *da = to_attrib(item); \
564 ret = kstrtou32(page, 0, &val); \
571 DEF_CONFIGFS_ATTRIB_STORE_U32(max_unmap_lba_count);
572 DEF_CONFIGFS_ATTRIB_STORE_U32(max_unmap_block_desc_count);
573 DEF_CONFIGFS_ATTRIB_STORE_U32(unmap_granularity);
574 DEF_CONFIGFS_ATTRIB_STORE_U32(unmap_granularity_alignment);
575 DEF_CONFIGFS_ATTRIB_STORE_U32(max_write_same_len);
577 #define DEF_CONFIGFS_ATTRIB_STORE_BOOL(_name) \
578 static ssize_t _name##_store(struct config_item *item, const char *page, \
581 struct se_dev_attrib *da = to_attrib(item); \
585 ret = strtobool(page, &flag); \
592 DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_fua_write);
593 DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_caw);
594 DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_3pc);
595 DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_pr);
596 DEF_CONFIGFS_ATTRIB_STORE_BOOL(enforce_pr_isids);
597 DEF_CONFIGFS_ATTRIB_STORE_BOOL(is_nonrot);
599 #define DEF_CONFIGFS_ATTRIB_STORE_STUB(_name) \
600 static ssize_t _name##_store(struct config_item *item, const char *page,\
603 printk_once(KERN_WARNING \
604 "ignoring deprecated %s attribute\n", \
605 __stringify(_name)); \
609 DEF_CONFIGFS_ATTRIB_STORE_STUB(emulate_dpo);
610 DEF_CONFIGFS_ATTRIB_STORE_STUB(emulate_fua_read);
612 static void dev_set_t10_wwn_model_alias(struct se_device *dev)
614 const char *configname;
616 configname = config_item_name(&dev->dev_group.cg_item);
617 if (strlen(configname) >= 16) {
618 pr_warn("dev[%p]: Backstore name '%s' is too long for "
619 "INQUIRY_MODEL, truncating to 16 bytes\n", dev,
622 snprintf(&dev->t10_wwn.model[0], 16, "%s", configname);
625 static ssize_t emulate_model_alias_store(struct config_item *item,
626 const char *page, size_t count)
628 struct se_dev_attrib *da = to_attrib(item);
629 struct se_device *dev = da->da_dev;
633 if (dev->export_count) {
634 pr_err("dev[%p]: Unable to change model alias"
635 " while export_count is %d\n",
636 dev, dev->export_count);
640 ret = strtobool(page, &flag);
645 dev_set_t10_wwn_model_alias(dev);
647 strncpy(&dev->t10_wwn.model[0],
648 dev->transport->inquiry_prod, 16);
650 da->emulate_model_alias = flag;
654 static ssize_t emulate_write_cache_store(struct config_item *item,
655 const char *page, size_t count)
657 struct se_dev_attrib *da = to_attrib(item);
661 ret = strtobool(page, &flag);
665 if (flag && da->da_dev->transport->get_write_cache) {
666 pr_err("emulate_write_cache not supported for this device\n");
670 da->emulate_write_cache = flag;
671 pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
676 static ssize_t emulate_ua_intlck_ctrl_store(struct config_item *item,
677 const char *page, size_t count)
679 struct se_dev_attrib *da = to_attrib(item);
683 ret = kstrtou32(page, 0, &val);
687 if (val != 0 && val != 1 && val != 2) {
688 pr_err("Illegal value %d\n", val);
692 if (da->da_dev->export_count) {
693 pr_err("dev[%p]: Unable to change SE Device"
694 " UA_INTRLCK_CTRL while export_count is %d\n",
695 da->da_dev, da->da_dev->export_count);
698 da->emulate_ua_intlck_ctrl = val;
699 pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
704 static ssize_t emulate_tas_store(struct config_item *item,
705 const char *page, size_t count)
707 struct se_dev_attrib *da = to_attrib(item);
711 ret = strtobool(page, &flag);
715 if (da->da_dev->export_count) {
716 pr_err("dev[%p]: Unable to change SE Device TAS while"
717 " export_count is %d\n",
718 da->da_dev, da->da_dev->export_count);
721 da->emulate_tas = flag;
722 pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
723 da->da_dev, flag ? "Enabled" : "Disabled");
728 static ssize_t emulate_tpu_store(struct config_item *item,
729 const char *page, size_t count)
731 struct se_dev_attrib *da = to_attrib(item);
735 ret = strtobool(page, &flag);
740 * We expect this value to be non-zero when generic Block Layer
741 * Discard supported is detected iblock_create_virtdevice().
743 if (flag && !da->max_unmap_block_desc_count) {
744 pr_err("Generic Block Discard not supported\n");
748 da->emulate_tpu = flag;
749 pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
754 static ssize_t emulate_tpws_store(struct config_item *item,
755 const char *page, size_t count)
757 struct se_dev_attrib *da = to_attrib(item);
761 ret = strtobool(page, &flag);
766 * We expect this value to be non-zero when generic Block Layer
767 * Discard supported is detected iblock_create_virtdevice().
769 if (flag && !da->max_unmap_block_desc_count) {
770 pr_err("Generic Block Discard not supported\n");
774 da->emulate_tpws = flag;
775 pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
780 static ssize_t pi_prot_type_store(struct config_item *item,
781 const char *page, size_t count)
783 struct se_dev_attrib *da = to_attrib(item);
784 int old_prot = da->pi_prot_type, ret;
785 struct se_device *dev = da->da_dev;
788 ret = kstrtou32(page, 0, &flag);
792 if (flag != 0 && flag != 1 && flag != 2 && flag != 3) {
793 pr_err("Illegal value %d for pi_prot_type\n", flag);
797 pr_err("DIF TYPE2 protection currently not supported\n");
800 if (da->hw_pi_prot_type) {
801 pr_warn("DIF protection enabled on underlying hardware,"
805 if (!dev->transport->init_prot || !dev->transport->free_prot) {
806 /* 0 is only allowed value for non-supporting backends */
810 pr_err("DIF protection not supported by backend: %s\n",
811 dev->transport->name);
814 if (!target_dev_configured(dev)) {
815 pr_err("DIF protection requires device to be configured\n");
818 if (dev->export_count) {
819 pr_err("dev[%p]: Unable to change SE Device PROT type while"
820 " export_count is %d\n", dev, dev->export_count);
824 da->pi_prot_type = flag;
826 if (flag && !old_prot) {
827 ret = dev->transport->init_prot(dev);
829 da->pi_prot_type = old_prot;
830 da->pi_prot_verify = (bool) da->pi_prot_type;
834 } else if (!flag && old_prot) {
835 dev->transport->free_prot(dev);
838 da->pi_prot_verify = (bool) da->pi_prot_type;
839 pr_debug("dev[%p]: SE Device Protection Type: %d\n", dev, flag);
843 static ssize_t pi_prot_format_store(struct config_item *item,
844 const char *page, size_t count)
846 struct se_dev_attrib *da = to_attrib(item);
847 struct se_device *dev = da->da_dev;
851 ret = strtobool(page, &flag);
858 if (!dev->transport->format_prot) {
859 pr_err("DIF protection format not supported by backend %s\n",
860 dev->transport->name);
863 if (!target_dev_configured(dev)) {
864 pr_err("DIF protection format requires device to be configured\n");
867 if (dev->export_count) {
868 pr_err("dev[%p]: Unable to format SE Device PROT type while"
869 " export_count is %d\n", dev, dev->export_count);
873 ret = dev->transport->format_prot(dev);
877 pr_debug("dev[%p]: SE Device Protection Format complete\n", dev);
881 static ssize_t pi_prot_verify_store(struct config_item *item,
882 const char *page, size_t count)
884 struct se_dev_attrib *da = to_attrib(item);
888 ret = strtobool(page, &flag);
893 da->pi_prot_verify = flag;
896 if (da->hw_pi_prot_type) {
897 pr_warn("DIF protection enabled on underlying hardware,"
901 if (!da->pi_prot_type) {
902 pr_warn("DIF protection not supported by backend, ignoring\n");
905 da->pi_prot_verify = flag;
910 static ssize_t force_pr_aptpl_store(struct config_item *item,
911 const char *page, size_t count)
913 struct se_dev_attrib *da = to_attrib(item);
917 ret = strtobool(page, &flag);
920 if (da->da_dev->export_count) {
921 pr_err("dev[%p]: Unable to set force_pr_aptpl while"
922 " export_count is %d\n",
923 da->da_dev, da->da_dev->export_count);
927 da->force_pr_aptpl = flag;
928 pr_debug("dev[%p]: SE Device force_pr_aptpl: %d\n", da->da_dev, flag);
932 static ssize_t emulate_rest_reord_store(struct config_item *item,
933 const char *page, size_t count)
935 struct se_dev_attrib *da = to_attrib(item);
939 ret = strtobool(page, &flag);
944 printk(KERN_ERR "dev[%p]: SE Device emulation of restricted"
945 " reordering not implemented\n", da->da_dev);
948 da->emulate_rest_reord = flag;
949 pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n",
954 static ssize_t unmap_zeroes_data_store(struct config_item *item,
955 const char *page, size_t count)
957 struct se_dev_attrib *da = to_attrib(item);
961 ret = strtobool(page, &flag);
965 if (da->da_dev->export_count) {
966 pr_err("dev[%p]: Unable to change SE Device"
967 " unmap_zeroes_data while export_count is %d\n",
968 da->da_dev, da->da_dev->export_count);
972 * We expect this value to be non-zero when generic Block Layer
973 * Discard supported is detected iblock_configure_device().
975 if (flag && !da->max_unmap_block_desc_count) {
976 pr_err("dev[%p]: Thin Provisioning LBPRZ will not be set"
977 " because max_unmap_block_desc_count is zero\n",
981 da->unmap_zeroes_data = flag;
982 pr_debug("dev[%p]: SE Device Thin Provisioning LBPRZ bit: %d\n",
988 * Note, this can only be called on unexported SE Device Object.
990 static ssize_t queue_depth_store(struct config_item *item,
991 const char *page, size_t count)
993 struct se_dev_attrib *da = to_attrib(item);
994 struct se_device *dev = da->da_dev;
998 ret = kstrtou32(page, 0, &val);
1002 if (dev->export_count) {
1003 pr_err("dev[%p]: Unable to change SE Device TCQ while"
1004 " export_count is %d\n",
1005 dev, dev->export_count);
1009 pr_err("dev[%p]: Illegal ZERO value for queue_depth\n", dev);
1013 if (val > dev->dev_attrib.queue_depth) {
1014 if (val > dev->dev_attrib.hw_queue_depth) {
1015 pr_err("dev[%p]: Passed queue_depth:"
1016 " %u exceeds TCM/SE_Device MAX"
1017 " TCQ: %u\n", dev, val,
1018 dev->dev_attrib.hw_queue_depth);
1022 da->queue_depth = dev->queue_depth = val;
1023 pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n", dev, val);
1027 static ssize_t optimal_sectors_store(struct config_item *item,
1028 const char *page, size_t count)
1030 struct se_dev_attrib *da = to_attrib(item);
1034 ret = kstrtou32(page, 0, &val);
1038 if (da->da_dev->export_count) {
1039 pr_err("dev[%p]: Unable to change SE Device"
1040 " optimal_sectors while export_count is %d\n",
1041 da->da_dev, da->da_dev->export_count);
1044 if (val > da->hw_max_sectors) {
1045 pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
1046 " greater than hw_max_sectors: %u\n",
1047 da->da_dev, val, da->hw_max_sectors);
1051 da->optimal_sectors = val;
1052 pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n",
1057 static ssize_t block_size_store(struct config_item *item,
1058 const char *page, size_t count)
1060 struct se_dev_attrib *da = to_attrib(item);
1064 ret = kstrtou32(page, 0, &val);
1068 if (da->da_dev->export_count) {
1069 pr_err("dev[%p]: Unable to change SE Device block_size"
1070 " while export_count is %d\n",
1071 da->da_dev, da->da_dev->export_count);
1075 if (val != 512 && val != 1024 && val != 2048 && val != 4096) {
1076 pr_err("dev[%p]: Illegal value for block_device: %u"
1077 " for SE device, must be 512, 1024, 2048 or 4096\n",
1082 da->block_size = val;
1083 if (da->max_bytes_per_io)
1084 da->hw_max_sectors = da->max_bytes_per_io / val;
1086 pr_debug("dev[%p]: SE Device block_size changed to %u\n",
1091 static ssize_t alua_support_show(struct config_item *item, char *page)
1093 struct se_dev_attrib *da = to_attrib(item);
1094 u8 flags = da->da_dev->transport->transport_flags;
1096 return snprintf(page, PAGE_SIZE, "%d\n",
1097 flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA ? 0 : 1);
1100 static ssize_t pgr_support_show(struct config_item *item, char *page)
1102 struct se_dev_attrib *da = to_attrib(item);
1103 u8 flags = da->da_dev->transport->transport_flags;
1105 return snprintf(page, PAGE_SIZE, "%d\n",
1106 flags & TRANSPORT_FLAG_PASSTHROUGH_PGR ? 0 : 1);
1109 CONFIGFS_ATTR(, emulate_model_alias);
1110 CONFIGFS_ATTR(, emulate_dpo);
1111 CONFIGFS_ATTR(, emulate_fua_write);
1112 CONFIGFS_ATTR(, emulate_fua_read);
1113 CONFIGFS_ATTR(, emulate_write_cache);
1114 CONFIGFS_ATTR(, emulate_ua_intlck_ctrl);
1115 CONFIGFS_ATTR(, emulate_tas);
1116 CONFIGFS_ATTR(, emulate_tpu);
1117 CONFIGFS_ATTR(, emulate_tpws);
1118 CONFIGFS_ATTR(, emulate_caw);
1119 CONFIGFS_ATTR(, emulate_3pc);
1120 CONFIGFS_ATTR(, emulate_pr);
1121 CONFIGFS_ATTR(, pi_prot_type);
1122 CONFIGFS_ATTR_RO(, hw_pi_prot_type);
1123 CONFIGFS_ATTR_WO(, pi_prot_format);
1124 CONFIGFS_ATTR(, pi_prot_verify);
1125 CONFIGFS_ATTR(, enforce_pr_isids);
1126 CONFIGFS_ATTR(, is_nonrot);
1127 CONFIGFS_ATTR(, emulate_rest_reord);
1128 CONFIGFS_ATTR(, force_pr_aptpl);
1129 CONFIGFS_ATTR_RO(, hw_block_size);
1130 CONFIGFS_ATTR(, block_size);
1131 CONFIGFS_ATTR_RO(, hw_max_sectors);
1132 CONFIGFS_ATTR(, optimal_sectors);
1133 CONFIGFS_ATTR_RO(, hw_queue_depth);
1134 CONFIGFS_ATTR(, queue_depth);
1135 CONFIGFS_ATTR(, max_unmap_lba_count);
1136 CONFIGFS_ATTR(, max_unmap_block_desc_count);
1137 CONFIGFS_ATTR(, unmap_granularity);
1138 CONFIGFS_ATTR(, unmap_granularity_alignment);
1139 CONFIGFS_ATTR(, unmap_zeroes_data);
1140 CONFIGFS_ATTR(, max_write_same_len);
1141 CONFIGFS_ATTR_RO(, alua_support);
1142 CONFIGFS_ATTR_RO(, pgr_support);
1145 * dev_attrib attributes for devices using the target core SBC/SPC
1146 * interpreter. Any backend using spc_parse_cdb should be using
1149 struct configfs_attribute *sbc_attrib_attrs[] = {
1150 &attr_emulate_model_alias,
1152 &attr_emulate_fua_write,
1153 &attr_emulate_fua_read,
1154 &attr_emulate_write_cache,
1155 &attr_emulate_ua_intlck_ctrl,
1163 &attr_hw_pi_prot_type,
1164 &attr_pi_prot_format,
1165 &attr_pi_prot_verify,
1166 &attr_enforce_pr_isids,
1168 &attr_emulate_rest_reord,
1169 &attr_force_pr_aptpl,
1170 &attr_hw_block_size,
1172 &attr_hw_max_sectors,
1173 &attr_optimal_sectors,
1174 &attr_hw_queue_depth,
1176 &attr_max_unmap_lba_count,
1177 &attr_max_unmap_block_desc_count,
1178 &attr_unmap_granularity,
1179 &attr_unmap_granularity_alignment,
1180 &attr_unmap_zeroes_data,
1181 &attr_max_write_same_len,
1186 EXPORT_SYMBOL(sbc_attrib_attrs);
1189 * Minimal dev_attrib attributes for devices passing through CDBs.
1190 * In this case we only provide a few read-only attributes for
1191 * backwards compatibility.
1193 struct configfs_attribute *passthrough_attrib_attrs[] = {
1194 &attr_hw_pi_prot_type,
1195 &attr_hw_block_size,
1196 &attr_hw_max_sectors,
1197 &attr_hw_queue_depth,
1202 EXPORT_SYMBOL(passthrough_attrib_attrs);
1204 TB_CIT_SETUP_DRV(dev_attrib, NULL, NULL);
1205 TB_CIT_SETUP_DRV(dev_action, NULL, NULL);
1207 /* End functions for struct config_item_type tb_dev_attrib_cit */
1209 /* Start functions for struct config_item_type tb_dev_wwn_cit */
1211 static struct t10_wwn *to_t10_wwn(struct config_item *item)
1213 return container_of(to_config_group(item), struct t10_wwn, t10_wwn_group);
1217 * VPD page 0x80 Unit serial
1219 static ssize_t target_wwn_vpd_unit_serial_show(struct config_item *item,
1222 return sprintf(page, "T10 VPD Unit Serial Number: %s\n",
1223 &to_t10_wwn(item)->unit_serial[0]);
1226 static ssize_t target_wwn_vpd_unit_serial_store(struct config_item *item,
1227 const char *page, size_t count)
1229 struct t10_wwn *t10_wwn = to_t10_wwn(item);
1230 struct se_device *dev = t10_wwn->t10_dev;
1231 unsigned char buf[INQUIRY_VPD_SERIAL_LEN];
1234 * If Linux/SCSI subsystem_api_t plugin got a VPD Unit Serial
1235 * from the struct scsi_device level firmware, do not allow
1236 * VPD Unit Serial to be emulated.
1238 * Note this struct scsi_device could also be emulating VPD
1239 * information from its drivers/scsi LLD. But for now we assume
1240 * it is doing 'the right thing' wrt a world wide unique
1241 * VPD Unit Serial Number that OS dependent multipath can depend on.
1243 if (dev->dev_flags & DF_FIRMWARE_VPD_UNIT_SERIAL) {
1244 pr_err("Underlying SCSI device firmware provided VPD"
1245 " Unit Serial, ignoring request\n");
1249 if (strlen(page) >= INQUIRY_VPD_SERIAL_LEN) {
1250 pr_err("Emulated VPD Unit Serial exceeds"
1251 " INQUIRY_VPD_SERIAL_LEN: %d\n", INQUIRY_VPD_SERIAL_LEN);
1255 * Check to see if any active $FABRIC_MOD exports exist. If they
1256 * do exist, fail here as changing this information on the fly
1257 * (underneath the initiator side OS dependent multipath code)
1258 * could cause negative effects.
1260 if (dev->export_count) {
1261 pr_err("Unable to set VPD Unit Serial while"
1262 " active %d $FABRIC_MOD exports exist\n",
1268 * This currently assumes ASCII encoding for emulated VPD Unit Serial.
1270 * Also, strip any newline added from the userspace
1271 * echo $UUID > $TARGET/$HBA/$STORAGE_OBJECT/wwn/vpd_unit_serial
1273 memset(buf, 0, INQUIRY_VPD_SERIAL_LEN);
1274 snprintf(buf, INQUIRY_VPD_SERIAL_LEN, "%s", page);
1275 snprintf(dev->t10_wwn.unit_serial, INQUIRY_VPD_SERIAL_LEN,
1276 "%s", strstrip(buf));
1277 dev->dev_flags |= DF_EMULATED_VPD_UNIT_SERIAL;
1279 pr_debug("Target_Core_ConfigFS: Set emulated VPD Unit Serial:"
1280 " %s\n", dev->t10_wwn.unit_serial);
1286 * VPD page 0x83 Protocol Identifier
1288 static ssize_t target_wwn_vpd_protocol_identifier_show(struct config_item *item,
1291 struct t10_wwn *t10_wwn = to_t10_wwn(item);
1292 struct t10_vpd *vpd;
1293 unsigned char buf[VPD_TMP_BUF_SIZE];
1296 memset(buf, 0, VPD_TMP_BUF_SIZE);
1298 spin_lock(&t10_wwn->t10_vpd_lock);
1299 list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) {
1300 if (!vpd->protocol_identifier_set)
1303 transport_dump_vpd_proto_id(vpd, buf, VPD_TMP_BUF_SIZE);
1305 if (len + strlen(buf) >= PAGE_SIZE)
1308 len += sprintf(page+len, "%s", buf);
1310 spin_unlock(&t10_wwn->t10_vpd_lock);
1316 * Generic wrapper for dumping VPD identifiers by association.
1318 #define DEF_DEV_WWN_ASSOC_SHOW(_name, _assoc) \
1319 static ssize_t target_wwn_##_name##_show(struct config_item *item, \
1322 struct t10_wwn *t10_wwn = to_t10_wwn(item); \
1323 struct t10_vpd *vpd; \
1324 unsigned char buf[VPD_TMP_BUF_SIZE]; \
1327 spin_lock(&t10_wwn->t10_vpd_lock); \
1328 list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) { \
1329 if (vpd->association != _assoc) \
1332 memset(buf, 0, VPD_TMP_BUF_SIZE); \
1333 transport_dump_vpd_assoc(vpd, buf, VPD_TMP_BUF_SIZE); \
1334 if (len + strlen(buf) >= PAGE_SIZE) \
1336 len += sprintf(page+len, "%s", buf); \
1338 memset(buf, 0, VPD_TMP_BUF_SIZE); \
1339 transport_dump_vpd_ident_type(vpd, buf, VPD_TMP_BUF_SIZE); \
1340 if (len + strlen(buf) >= PAGE_SIZE) \
1342 len += sprintf(page+len, "%s", buf); \
1344 memset(buf, 0, VPD_TMP_BUF_SIZE); \
1345 transport_dump_vpd_ident(vpd, buf, VPD_TMP_BUF_SIZE); \
1346 if (len + strlen(buf) >= PAGE_SIZE) \
1348 len += sprintf(page+len, "%s", buf); \
1350 spin_unlock(&t10_wwn->t10_vpd_lock); \
1355 /* VPD page 0x83 Association: Logical Unit */
1356 DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_logical_unit, 0x00);
1357 /* VPD page 0x83 Association: Target Port */
1358 DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_target_port, 0x10);
1359 /* VPD page 0x83 Association: SCSI Target Device */
1360 DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_scsi_target_device, 0x20);
1362 CONFIGFS_ATTR(target_wwn_, vpd_unit_serial);
1363 CONFIGFS_ATTR_RO(target_wwn_, vpd_protocol_identifier);
1364 CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_logical_unit);
1365 CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_target_port);
1366 CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_scsi_target_device);
1368 static struct configfs_attribute *target_core_dev_wwn_attrs[] = {
1369 &target_wwn_attr_vpd_unit_serial,
1370 &target_wwn_attr_vpd_protocol_identifier,
1371 &target_wwn_attr_vpd_assoc_logical_unit,
1372 &target_wwn_attr_vpd_assoc_target_port,
1373 &target_wwn_attr_vpd_assoc_scsi_target_device,
1377 TB_CIT_SETUP(dev_wwn, NULL, NULL, target_core_dev_wwn_attrs);
1379 /* End functions for struct config_item_type tb_dev_wwn_cit */
1381 /* Start functions for struct config_item_type tb_dev_pr_cit */
1383 static struct se_device *pr_to_dev(struct config_item *item)
1385 return container_of(to_config_group(item), struct se_device,
1389 static ssize_t target_core_dev_pr_show_spc3_res(struct se_device *dev,
1392 struct se_node_acl *se_nacl;
1393 struct t10_pr_registration *pr_reg;
1394 char i_buf[PR_REG_ISID_ID_LEN];
1396 memset(i_buf, 0, PR_REG_ISID_ID_LEN);
1398 pr_reg = dev->dev_pr_res_holder;
1400 return sprintf(page, "No SPC-3 Reservation holder\n");
1402 se_nacl = pr_reg->pr_reg_nacl;
1403 core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
1405 return sprintf(page, "SPC-3 Reservation: %s Initiator: %s%s\n",
1406 se_nacl->se_tpg->se_tpg_tfo->fabric_name,
1407 se_nacl->initiatorname, i_buf);
1410 static ssize_t target_core_dev_pr_show_spc2_res(struct se_device *dev,
1413 struct se_node_acl *se_nacl;
1416 se_nacl = dev->dev_reserved_node_acl;
1419 "SPC-2 Reservation: %s Initiator: %s\n",
1420 se_nacl->se_tpg->se_tpg_tfo->fabric_name,
1421 se_nacl->initiatorname);
1423 len = sprintf(page, "No SPC-2 Reservation holder\n");
1428 static ssize_t target_pr_res_holder_show(struct config_item *item, char *page)
1430 struct se_device *dev = pr_to_dev(item);
1433 if (!dev->dev_attrib.emulate_pr)
1434 return sprintf(page, "SPC_RESERVATIONS_DISABLED\n");
1436 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)
1437 return sprintf(page, "Passthrough\n");
1439 spin_lock(&dev->dev_reservation_lock);
1440 if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
1441 ret = target_core_dev_pr_show_spc2_res(dev, page);
1443 ret = target_core_dev_pr_show_spc3_res(dev, page);
1444 spin_unlock(&dev->dev_reservation_lock);
1448 static ssize_t target_pr_res_pr_all_tgt_pts_show(struct config_item *item,
1451 struct se_device *dev = pr_to_dev(item);
1454 spin_lock(&dev->dev_reservation_lock);
1455 if (!dev->dev_pr_res_holder) {
1456 len = sprintf(page, "No SPC-3 Reservation holder\n");
1457 } else if (dev->dev_pr_res_holder->pr_reg_all_tg_pt) {
1458 len = sprintf(page, "SPC-3 Reservation: All Target"
1459 " Ports registration\n");
1461 len = sprintf(page, "SPC-3 Reservation: Single"
1462 " Target Port registration\n");
1465 spin_unlock(&dev->dev_reservation_lock);
1469 static ssize_t target_pr_res_pr_generation_show(struct config_item *item,
1472 return sprintf(page, "0x%08x\n", pr_to_dev(item)->t10_pr.pr_generation);
1476 static ssize_t target_pr_res_pr_holder_tg_port_show(struct config_item *item,
1479 struct se_device *dev = pr_to_dev(item);
1480 struct se_node_acl *se_nacl;
1481 struct se_portal_group *se_tpg;
1482 struct t10_pr_registration *pr_reg;
1483 const struct target_core_fabric_ops *tfo;
1486 spin_lock(&dev->dev_reservation_lock);
1487 pr_reg = dev->dev_pr_res_holder;
1489 len = sprintf(page, "No SPC-3 Reservation holder\n");
1493 se_nacl = pr_reg->pr_reg_nacl;
1494 se_tpg = se_nacl->se_tpg;
1495 tfo = se_tpg->se_tpg_tfo;
1497 len += sprintf(page+len, "SPC-3 Reservation: %s"
1498 " Target Node Endpoint: %s\n", tfo->fabric_name,
1499 tfo->tpg_get_wwn(se_tpg));
1500 len += sprintf(page+len, "SPC-3 Reservation: Relative Port"
1501 " Identifier Tag: %hu %s Portal Group Tag: %hu"
1502 " %s Logical Unit: %llu\n", pr_reg->tg_pt_sep_rtpi,
1503 tfo->fabric_name, tfo->tpg_get_tag(se_tpg),
1504 tfo->fabric_name, pr_reg->pr_aptpl_target_lun);
1507 spin_unlock(&dev->dev_reservation_lock);
1512 static ssize_t target_pr_res_pr_registered_i_pts_show(struct config_item *item,
1515 struct se_device *dev = pr_to_dev(item);
1516 const struct target_core_fabric_ops *tfo;
1517 struct t10_pr_registration *pr_reg;
1518 unsigned char buf[384];
1519 char i_buf[PR_REG_ISID_ID_LEN];
1523 len += sprintf(page+len, "SPC-3 PR Registrations:\n");
1525 spin_lock(&dev->t10_pr.registration_lock);
1526 list_for_each_entry(pr_reg, &dev->t10_pr.registration_list,
1529 memset(buf, 0, 384);
1530 memset(i_buf, 0, PR_REG_ISID_ID_LEN);
1531 tfo = pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo;
1532 core_pr_dump_initiator_port(pr_reg, i_buf,
1533 PR_REG_ISID_ID_LEN);
1534 sprintf(buf, "%s Node: %s%s Key: 0x%016Lx PRgen: 0x%08x\n",
1536 pr_reg->pr_reg_nacl->initiatorname, i_buf, pr_reg->pr_res_key,
1537 pr_reg->pr_res_generation);
1539 if (len + strlen(buf) >= PAGE_SIZE)
1542 len += sprintf(page+len, "%s", buf);
1545 spin_unlock(&dev->t10_pr.registration_lock);
1548 len += sprintf(page+len, "None\n");
1553 static ssize_t target_pr_res_pr_type_show(struct config_item *item, char *page)
1555 struct se_device *dev = pr_to_dev(item);
1556 struct t10_pr_registration *pr_reg;
1559 spin_lock(&dev->dev_reservation_lock);
1560 pr_reg = dev->dev_pr_res_holder;
1562 len = sprintf(page, "SPC-3 Reservation Type: %s\n",
1563 core_scsi3_pr_dump_type(pr_reg->pr_res_type));
1565 len = sprintf(page, "No SPC-3 Reservation holder\n");
1568 spin_unlock(&dev->dev_reservation_lock);
1572 static ssize_t target_pr_res_type_show(struct config_item *item, char *page)
1574 struct se_device *dev = pr_to_dev(item);
1576 if (!dev->dev_attrib.emulate_pr)
1577 return sprintf(page, "SPC_RESERVATIONS_DISABLED\n");
1578 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)
1579 return sprintf(page, "SPC_PASSTHROUGH\n");
1580 if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
1581 return sprintf(page, "SPC2_RESERVATIONS\n");
1583 return sprintf(page, "SPC3_PERSISTENT_RESERVATIONS\n");
1586 static ssize_t target_pr_res_aptpl_active_show(struct config_item *item,
1589 struct se_device *dev = pr_to_dev(item);
1591 if (!dev->dev_attrib.emulate_pr ||
1592 (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR))
1595 return sprintf(page, "APTPL Bit Status: %s\n",
1596 (dev->t10_pr.pr_aptpl_active) ? "Activated" : "Disabled");
1599 static ssize_t target_pr_res_aptpl_metadata_show(struct config_item *item,
1602 struct se_device *dev = pr_to_dev(item);
1604 if (!dev->dev_attrib.emulate_pr ||
1605 (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR))
1608 return sprintf(page, "Ready to process PR APTPL metadata..\n");
1612 Opt_initiator_fabric, Opt_initiator_node, Opt_initiator_sid,
1613 Opt_sa_res_key, Opt_res_holder, Opt_res_type, Opt_res_scope,
1614 Opt_res_all_tg_pt, Opt_mapped_lun, Opt_target_fabric,
1615 Opt_target_node, Opt_tpgt, Opt_port_rtpi, Opt_target_lun, Opt_err
1618 static match_table_t tokens = {
1619 {Opt_initiator_fabric, "initiator_fabric=%s"},
1620 {Opt_initiator_node, "initiator_node=%s"},
1621 {Opt_initiator_sid, "initiator_sid=%s"},
1622 {Opt_sa_res_key, "sa_res_key=%s"},
1623 {Opt_res_holder, "res_holder=%d"},
1624 {Opt_res_type, "res_type=%d"},
1625 {Opt_res_scope, "res_scope=%d"},
1626 {Opt_res_all_tg_pt, "res_all_tg_pt=%d"},
1627 {Opt_mapped_lun, "mapped_lun=%u"},
1628 {Opt_target_fabric, "target_fabric=%s"},
1629 {Opt_target_node, "target_node=%s"},
1630 {Opt_tpgt, "tpgt=%d"},
1631 {Opt_port_rtpi, "port_rtpi=%d"},
1632 {Opt_target_lun, "target_lun=%u"},
1636 static ssize_t target_pr_res_aptpl_metadata_store(struct config_item *item,
1637 const char *page, size_t count)
1639 struct se_device *dev = pr_to_dev(item);
1640 unsigned char *i_fabric = NULL, *i_port = NULL, *isid = NULL;
1641 unsigned char *t_fabric = NULL, *t_port = NULL;
1642 char *orig, *ptr, *opts;
1643 substring_t args[MAX_OPT_ARGS];
1644 unsigned long long tmp_ll;
1646 u64 mapped_lun = 0, target_lun = 0;
1647 int ret = -1, res_holder = 0, all_tg_pt = 0, arg, token;
1651 if (!dev->dev_attrib.emulate_pr ||
1652 (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR))
1654 if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
1657 if (dev->export_count) {
1658 pr_debug("Unable to process APTPL metadata while"
1659 " active fabric exports exist\n");
1663 opts = kstrdup(page, GFP_KERNEL);
1668 while ((ptr = strsep(&opts, ",\n")) != NULL) {
1672 token = match_token(ptr, tokens, args);
1674 case Opt_initiator_fabric:
1675 i_fabric = match_strdup(args);
1681 case Opt_initiator_node:
1682 i_port = match_strdup(args);
1687 if (strlen(i_port) >= PR_APTPL_MAX_IPORT_LEN) {
1688 pr_err("APTPL metadata initiator_node="
1689 " exceeds PR_APTPL_MAX_IPORT_LEN: %d\n",
1690 PR_APTPL_MAX_IPORT_LEN);
1695 case Opt_initiator_sid:
1696 isid = match_strdup(args);
1701 if (strlen(isid) >= PR_REG_ISID_LEN) {
1702 pr_err("APTPL metadata initiator_isid"
1703 "= exceeds PR_REG_ISID_LEN: %d\n",
1709 case Opt_sa_res_key:
1710 ret = match_u64(args, &tmp_ll);
1712 pr_err("kstrtoull() failed for sa_res_key=\n");
1715 sa_res_key = (u64)tmp_ll;
1718 * PR APTPL Metadata for Reservation
1720 case Opt_res_holder:
1721 ret = match_int(args, &arg);
1727 ret = match_int(args, &arg);
1733 ret = match_int(args, &arg);
1737 case Opt_res_all_tg_pt:
1738 ret = match_int(args, &arg);
1741 all_tg_pt = (int)arg;
1743 case Opt_mapped_lun:
1744 ret = match_u64(args, &tmp_ll);
1747 mapped_lun = (u64)tmp_ll;
1750 * PR APTPL Metadata for Target Port
1752 case Opt_target_fabric:
1753 t_fabric = match_strdup(args);
1759 case Opt_target_node:
1760 t_port = match_strdup(args);
1765 if (strlen(t_port) >= PR_APTPL_MAX_TPORT_LEN) {
1766 pr_err("APTPL metadata target_node="
1767 " exceeds PR_APTPL_MAX_TPORT_LEN: %d\n",
1768 PR_APTPL_MAX_TPORT_LEN);
1774 ret = match_int(args, &arg);
1780 ret = match_int(args, &arg);
1784 case Opt_target_lun:
1785 ret = match_u64(args, &tmp_ll);
1788 target_lun = (u64)tmp_ll;
1795 if (!i_port || !t_port || !sa_res_key) {
1796 pr_err("Illegal parameters for APTPL registration\n");
1801 if (res_holder && !(type)) {
1802 pr_err("Illegal PR type: 0x%02x for reservation"
1808 ret = core_scsi3_alloc_aptpl_registration(&dev->t10_pr, sa_res_key,
1809 i_port, isid, mapped_lun, t_port, tpgt, target_lun,
1810 res_holder, all_tg_pt, type);
1818 return (ret == 0) ? count : ret;
1822 CONFIGFS_ATTR_RO(target_pr_, res_holder);
1823 CONFIGFS_ATTR_RO(target_pr_, res_pr_all_tgt_pts);
1824 CONFIGFS_ATTR_RO(target_pr_, res_pr_generation);
1825 CONFIGFS_ATTR_RO(target_pr_, res_pr_holder_tg_port);
1826 CONFIGFS_ATTR_RO(target_pr_, res_pr_registered_i_pts);
1827 CONFIGFS_ATTR_RO(target_pr_, res_pr_type);
1828 CONFIGFS_ATTR_RO(target_pr_, res_type);
1829 CONFIGFS_ATTR_RO(target_pr_, res_aptpl_active);
1830 CONFIGFS_ATTR(target_pr_, res_aptpl_metadata);
1832 static struct configfs_attribute *target_core_dev_pr_attrs[] = {
1833 &target_pr_attr_res_holder,
1834 &target_pr_attr_res_pr_all_tgt_pts,
1835 &target_pr_attr_res_pr_generation,
1836 &target_pr_attr_res_pr_holder_tg_port,
1837 &target_pr_attr_res_pr_registered_i_pts,
1838 &target_pr_attr_res_pr_type,
1839 &target_pr_attr_res_type,
1840 &target_pr_attr_res_aptpl_active,
1841 &target_pr_attr_res_aptpl_metadata,
1845 TB_CIT_SETUP(dev_pr, NULL, NULL, target_core_dev_pr_attrs);
1847 /* End functions for struct config_item_type tb_dev_pr_cit */
1849 /* Start functions for struct config_item_type tb_dev_cit */
1851 static inline struct se_device *to_device(struct config_item *item)
1853 return container_of(to_config_group(item), struct se_device, dev_group);
1856 static ssize_t target_dev_info_show(struct config_item *item, char *page)
1858 struct se_device *dev = to_device(item);
1860 ssize_t read_bytes = 0;
1862 transport_dump_dev_state(dev, page, &bl);
1864 read_bytes += dev->transport->show_configfs_dev_params(dev,
1869 static ssize_t target_dev_control_store(struct config_item *item,
1870 const char *page, size_t count)
1872 struct se_device *dev = to_device(item);
1874 return dev->transport->set_configfs_dev_params(dev, page, count);
1877 static ssize_t target_dev_alias_show(struct config_item *item, char *page)
1879 struct se_device *dev = to_device(item);
1881 if (!(dev->dev_flags & DF_USING_ALIAS))
1884 return snprintf(page, PAGE_SIZE, "%s\n", dev->dev_alias);
1887 static ssize_t target_dev_alias_store(struct config_item *item,
1888 const char *page, size_t count)
1890 struct se_device *dev = to_device(item);
1891 struct se_hba *hba = dev->se_hba;
1894 if (count > (SE_DEV_ALIAS_LEN-1)) {
1895 pr_err("alias count: %d exceeds"
1896 " SE_DEV_ALIAS_LEN-1: %u\n", (int)count,
1897 SE_DEV_ALIAS_LEN-1);
1901 read_bytes = snprintf(&dev->dev_alias[0], SE_DEV_ALIAS_LEN, "%s", page);
1904 if (dev->dev_alias[read_bytes - 1] == '\n')
1905 dev->dev_alias[read_bytes - 1] = '\0';
1907 dev->dev_flags |= DF_USING_ALIAS;
1909 pr_debug("Target_Core_ConfigFS: %s/%s set alias: %s\n",
1910 config_item_name(&hba->hba_group.cg_item),
1911 config_item_name(&dev->dev_group.cg_item),
1917 static ssize_t target_dev_udev_path_show(struct config_item *item, char *page)
1919 struct se_device *dev = to_device(item);
1921 if (!(dev->dev_flags & DF_USING_UDEV_PATH))
1924 return snprintf(page, PAGE_SIZE, "%s\n", dev->udev_path);
1927 static ssize_t target_dev_udev_path_store(struct config_item *item,
1928 const char *page, size_t count)
1930 struct se_device *dev = to_device(item);
1931 struct se_hba *hba = dev->se_hba;
1934 if (count > (SE_UDEV_PATH_LEN-1)) {
1935 pr_err("udev_path count: %d exceeds"
1936 " SE_UDEV_PATH_LEN-1: %u\n", (int)count,
1937 SE_UDEV_PATH_LEN-1);
1941 read_bytes = snprintf(&dev->udev_path[0], SE_UDEV_PATH_LEN,
1945 if (dev->udev_path[read_bytes - 1] == '\n')
1946 dev->udev_path[read_bytes - 1] = '\0';
1948 dev->dev_flags |= DF_USING_UDEV_PATH;
1950 pr_debug("Target_Core_ConfigFS: %s/%s set udev_path: %s\n",
1951 config_item_name(&hba->hba_group.cg_item),
1952 config_item_name(&dev->dev_group.cg_item),
1958 static ssize_t target_dev_enable_show(struct config_item *item, char *page)
1960 struct se_device *dev = to_device(item);
1962 return snprintf(page, PAGE_SIZE, "%d\n", target_dev_configured(dev));
1965 static ssize_t target_dev_enable_store(struct config_item *item,
1966 const char *page, size_t count)
1968 struct se_device *dev = to_device(item);
1972 ptr = strstr(page, "1");
1974 pr_err("For dev_enable ops, only valid value"
1979 ret = target_configure_device(dev);
1985 static ssize_t target_dev_alua_lu_gp_show(struct config_item *item, char *page)
1987 struct se_device *dev = to_device(item);
1988 struct config_item *lu_ci;
1989 struct t10_alua_lu_gp *lu_gp;
1990 struct t10_alua_lu_gp_member *lu_gp_mem;
1993 lu_gp_mem = dev->dev_alua_lu_gp_mem;
1997 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1998 lu_gp = lu_gp_mem->lu_gp;
2000 lu_ci = &lu_gp->lu_gp_group.cg_item;
2001 len += sprintf(page, "LU Group Alias: %s\nLU Group ID: %hu\n",
2002 config_item_name(lu_ci), lu_gp->lu_gp_id);
2004 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
2009 static ssize_t target_dev_alua_lu_gp_store(struct config_item *item,
2010 const char *page, size_t count)
2012 struct se_device *dev = to_device(item);
2013 struct se_hba *hba = dev->se_hba;
2014 struct t10_alua_lu_gp *lu_gp = NULL, *lu_gp_new = NULL;
2015 struct t10_alua_lu_gp_member *lu_gp_mem;
2016 unsigned char buf[LU_GROUP_NAME_BUF];
2019 lu_gp_mem = dev->dev_alua_lu_gp_mem;
2023 if (count > LU_GROUP_NAME_BUF) {
2024 pr_err("ALUA LU Group Alias too large!\n");
2027 memset(buf, 0, LU_GROUP_NAME_BUF);
2028 memcpy(buf, page, count);
2030 * Any ALUA logical unit alias besides "NULL" means we will be
2031 * making a new group association.
2033 if (strcmp(strstrip(buf), "NULL")) {
2035 * core_alua_get_lu_gp_by_name() will increment reference to
2036 * struct t10_alua_lu_gp. This reference is released with
2037 * core_alua_get_lu_gp_by_name below().
2039 lu_gp_new = core_alua_get_lu_gp_by_name(strstrip(buf));
2044 spin_lock(&lu_gp_mem->lu_gp_mem_lock);
2045 lu_gp = lu_gp_mem->lu_gp;
2048 * Clearing an existing lu_gp association, and replacing
2052 pr_debug("Target_Core_ConfigFS: Releasing %s/%s"
2053 " from ALUA LU Group: core/alua/lu_gps/%s, ID:"
2055 config_item_name(&hba->hba_group.cg_item),
2056 config_item_name(&dev->dev_group.cg_item),
2057 config_item_name(&lu_gp->lu_gp_group.cg_item),
2060 __core_alua_drop_lu_gp_mem(lu_gp_mem, lu_gp);
2061 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
2066 * Removing existing association of lu_gp_mem with lu_gp
2068 __core_alua_drop_lu_gp_mem(lu_gp_mem, lu_gp);
2072 * Associate lu_gp_mem with lu_gp_new.
2074 __core_alua_attach_lu_gp_mem(lu_gp_mem, lu_gp_new);
2075 spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
2077 pr_debug("Target_Core_ConfigFS: %s %s/%s to ALUA LU Group:"
2078 " core/alua/lu_gps/%s, ID: %hu\n",
2079 (move) ? "Moving" : "Adding",
2080 config_item_name(&hba->hba_group.cg_item),
2081 config_item_name(&dev->dev_group.cg_item),
2082 config_item_name(&lu_gp_new->lu_gp_group.cg_item),
2083 lu_gp_new->lu_gp_id);
2085 core_alua_put_lu_gp_from_name(lu_gp_new);
2089 static ssize_t target_dev_lba_map_show(struct config_item *item, char *page)
2091 struct se_device *dev = to_device(item);
2092 struct t10_alua_lba_map *map;
2093 struct t10_alua_lba_map_member *mem;
2098 spin_lock(&dev->t10_alua.lba_map_lock);
2099 if (!list_empty(&dev->t10_alua.lba_map_list))
2100 bl += sprintf(b + bl, "%u %u\n",
2101 dev->t10_alua.lba_map_segment_size,
2102 dev->t10_alua.lba_map_segment_multiplier);
2103 list_for_each_entry(map, &dev->t10_alua.lba_map_list, lba_map_list) {
2104 bl += sprintf(b + bl, "%llu %llu",
2105 map->lba_map_first_lba, map->lba_map_last_lba);
2106 list_for_each_entry(mem, &map->lba_map_mem_list,
2108 switch (mem->lba_map_mem_alua_state) {
2109 case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
2112 case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
2115 case ALUA_ACCESS_STATE_STANDBY:
2118 case ALUA_ACCESS_STATE_UNAVAILABLE:
2125 bl += sprintf(b + bl, " %d:%c",
2126 mem->lba_map_mem_alua_pg_id, state);
2128 bl += sprintf(b + bl, "\n");
2130 spin_unlock(&dev->t10_alua.lba_map_lock);
2134 static ssize_t target_dev_lba_map_store(struct config_item *item,
2135 const char *page, size_t count)
2137 struct se_device *dev = to_device(item);
2138 struct t10_alua_lba_map *lba_map = NULL;
2139 struct list_head lba_list;
2140 char *map_entries, *orig, *ptr;
2142 int pg_num = -1, pg;
2143 int ret = 0, num = 0, pg_id, alua_state;
2144 unsigned long start_lba = -1, end_lba = -1;
2145 unsigned long segment_size = -1, segment_mult = -1;
2147 orig = map_entries = kstrdup(page, GFP_KERNEL);
2151 INIT_LIST_HEAD(&lba_list);
2152 while ((ptr = strsep(&map_entries, "\n")) != NULL) {
2157 if (sscanf(ptr, "%lu %lu\n",
2158 &segment_size, &segment_mult) != 2) {
2159 pr_err("Invalid line %d\n", num);
2166 if (sscanf(ptr, "%lu %lu", &start_lba, &end_lba) != 2) {
2167 pr_err("Invalid line %d\n", num);
2171 ptr = strchr(ptr, ' ');
2173 pr_err("Invalid line %d, missing end lba\n", num);
2178 ptr = strchr(ptr, ' ');
2180 pr_err("Invalid line %d, missing state definitions\n",
2186 lba_map = core_alua_allocate_lba_map(&lba_list,
2187 start_lba, end_lba);
2188 if (IS_ERR(lba_map)) {
2189 ret = PTR_ERR(lba_map);
2193 while (sscanf(ptr, "%d:%c", &pg_id, &state) == 2) {
2196 alua_state = ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED;
2199 alua_state = ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED;
2202 alua_state = ALUA_ACCESS_STATE_STANDBY;
2205 alua_state = ALUA_ACCESS_STATE_UNAVAILABLE;
2208 pr_err("Invalid ALUA state '%c'\n", state);
2213 ret = core_alua_allocate_lba_map_mem(lba_map,
2216 pr_err("Invalid target descriptor %d:%c "
2222 ptr = strchr(ptr, ' ');
2230 else if (pg != pg_num) {
2231 pr_err("Only %d from %d port groups definitions "
2232 "at line %d\n", pg, pg_num, num);
2240 core_alua_free_lba_map(&lba_list);
2243 core_alua_set_lba_map(dev, &lba_list,
2244 segment_size, segment_mult);
2249 CONFIGFS_ATTR_RO(target_dev_, info);
2250 CONFIGFS_ATTR_WO(target_dev_, control);
2251 CONFIGFS_ATTR(target_dev_, alias);
2252 CONFIGFS_ATTR(target_dev_, udev_path);
2253 CONFIGFS_ATTR(target_dev_, enable);
2254 CONFIGFS_ATTR(target_dev_, alua_lu_gp);
2255 CONFIGFS_ATTR(target_dev_, lba_map);
2257 static struct configfs_attribute *target_core_dev_attrs[] = {
2258 &target_dev_attr_info,
2259 &target_dev_attr_control,
2260 &target_dev_attr_alias,
2261 &target_dev_attr_udev_path,
2262 &target_dev_attr_enable,
2263 &target_dev_attr_alua_lu_gp,
2264 &target_dev_attr_lba_map,
2268 static void target_core_dev_release(struct config_item *item)
2270 struct config_group *dev_cg = to_config_group(item);
2271 struct se_device *dev =
2272 container_of(dev_cg, struct se_device, dev_group);
2274 target_free_device(dev);
2278 * Used in target_core_fabric_configfs.c to verify valid se_device symlink
2279 * within target_fabric_port_link()
2281 struct configfs_item_operations target_core_dev_item_ops = {
2282 .release = target_core_dev_release,
2285 TB_CIT_SETUP(dev, &target_core_dev_item_ops, NULL, target_core_dev_attrs);
2287 /* End functions for struct config_item_type tb_dev_cit */
2289 /* Start functions for struct config_item_type target_core_alua_lu_gp_cit */
2291 static inline struct t10_alua_lu_gp *to_lu_gp(struct config_item *item)
2293 return container_of(to_config_group(item), struct t10_alua_lu_gp,
2297 static ssize_t target_lu_gp_lu_gp_id_show(struct config_item *item, char *page)
2299 struct t10_alua_lu_gp *lu_gp = to_lu_gp(item);
2301 if (!lu_gp->lu_gp_valid_id)
2303 return sprintf(page, "%hu\n", lu_gp->lu_gp_id);
2306 static ssize_t target_lu_gp_lu_gp_id_store(struct config_item *item,
2307 const char *page, size_t count)
2309 struct t10_alua_lu_gp *lu_gp = to_lu_gp(item);
2310 struct config_group *alua_lu_gp_cg = &lu_gp->lu_gp_group;
2311 unsigned long lu_gp_id;
2314 ret = kstrtoul(page, 0, &lu_gp_id);
2316 pr_err("kstrtoul() returned %d for"
2317 " lu_gp_id\n", ret);
2320 if (lu_gp_id > 0x0000ffff) {
2321 pr_err("ALUA lu_gp_id: %lu exceeds maximum:"
2322 " 0x0000ffff\n", lu_gp_id);
2326 ret = core_alua_set_lu_gp_id(lu_gp, (u16)lu_gp_id);
2330 pr_debug("Target_Core_ConfigFS: Set ALUA Logical Unit"
2331 " Group: core/alua/lu_gps/%s to ID: %hu\n",
2332 config_item_name(&alua_lu_gp_cg->cg_item),
2338 static ssize_t target_lu_gp_members_show(struct config_item *item, char *page)
2340 struct t10_alua_lu_gp *lu_gp = to_lu_gp(item);
2341 struct se_device *dev;
2343 struct t10_alua_lu_gp_member *lu_gp_mem;
2344 ssize_t len = 0, cur_len;
2345 unsigned char buf[LU_GROUP_NAME_BUF];
2347 memset(buf, 0, LU_GROUP_NAME_BUF);
2349 spin_lock(&lu_gp->lu_gp_lock);
2350 list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
2351 dev = lu_gp_mem->lu_gp_mem_dev;
2354 cur_len = snprintf(buf, LU_GROUP_NAME_BUF, "%s/%s\n",
2355 config_item_name(&hba->hba_group.cg_item),
2356 config_item_name(&dev->dev_group.cg_item));
2357 cur_len++; /* Extra byte for NULL terminator */
2359 if ((cur_len + len) > PAGE_SIZE) {
2360 pr_warn("Ran out of lu_gp_show_attr"
2361 "_members buffer\n");
2364 memcpy(page+len, buf, cur_len);
2367 spin_unlock(&lu_gp->lu_gp_lock);
2372 CONFIGFS_ATTR(target_lu_gp_, lu_gp_id);
2373 CONFIGFS_ATTR_RO(target_lu_gp_, members);
2375 static struct configfs_attribute *target_core_alua_lu_gp_attrs[] = {
2376 &target_lu_gp_attr_lu_gp_id,
2377 &target_lu_gp_attr_members,
2381 static void target_core_alua_lu_gp_release(struct config_item *item)
2383 struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item),
2384 struct t10_alua_lu_gp, lu_gp_group);
2386 core_alua_free_lu_gp(lu_gp);
2389 static struct configfs_item_operations target_core_alua_lu_gp_ops = {
2390 .release = target_core_alua_lu_gp_release,
2393 static const struct config_item_type target_core_alua_lu_gp_cit = {
2394 .ct_item_ops = &target_core_alua_lu_gp_ops,
2395 .ct_attrs = target_core_alua_lu_gp_attrs,
2396 .ct_owner = THIS_MODULE,
2399 /* End functions for struct config_item_type target_core_alua_lu_gp_cit */
2401 /* Start functions for struct config_item_type target_core_alua_lu_gps_cit */
2403 static struct config_group *target_core_alua_create_lu_gp(
2404 struct config_group *group,
2407 struct t10_alua_lu_gp *lu_gp;
2408 struct config_group *alua_lu_gp_cg = NULL;
2409 struct config_item *alua_lu_gp_ci = NULL;
2411 lu_gp = core_alua_allocate_lu_gp(name, 0);
2415 alua_lu_gp_cg = &lu_gp->lu_gp_group;
2416 alua_lu_gp_ci = &alua_lu_gp_cg->cg_item;
2418 config_group_init_type_name(alua_lu_gp_cg, name,
2419 &target_core_alua_lu_gp_cit);
2421 pr_debug("Target_Core_ConfigFS: Allocated ALUA Logical Unit"
2422 " Group: core/alua/lu_gps/%s\n",
2423 config_item_name(alua_lu_gp_ci));
2425 return alua_lu_gp_cg;
2429 static void target_core_alua_drop_lu_gp(
2430 struct config_group *group,
2431 struct config_item *item)
2433 struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item),
2434 struct t10_alua_lu_gp, lu_gp_group);
2436 pr_debug("Target_Core_ConfigFS: Releasing ALUA Logical Unit"
2437 " Group: core/alua/lu_gps/%s, ID: %hu\n",
2438 config_item_name(item), lu_gp->lu_gp_id);
2440 * core_alua_free_lu_gp() is called from target_core_alua_lu_gp_ops->release()
2441 * -> target_core_alua_lu_gp_release()
2443 config_item_put(item);
2446 static struct configfs_group_operations target_core_alua_lu_gps_group_ops = {
2447 .make_group = &target_core_alua_create_lu_gp,
2448 .drop_item = &target_core_alua_drop_lu_gp,
2451 static const struct config_item_type target_core_alua_lu_gps_cit = {
2452 .ct_item_ops = NULL,
2453 .ct_group_ops = &target_core_alua_lu_gps_group_ops,
2454 .ct_owner = THIS_MODULE,
2457 /* End functions for struct config_item_type target_core_alua_lu_gps_cit */
2459 /* Start functions for struct config_item_type target_core_alua_tg_pt_gp_cit */
2461 static inline struct t10_alua_tg_pt_gp *to_tg_pt_gp(struct config_item *item)
2463 return container_of(to_config_group(item), struct t10_alua_tg_pt_gp,
2467 static ssize_t target_tg_pt_gp_alua_access_state_show(struct config_item *item,
2470 return sprintf(page, "%d\n",
2471 to_tg_pt_gp(item)->tg_pt_gp_alua_access_state);
2474 static ssize_t target_tg_pt_gp_alua_access_state_store(struct config_item *item,
2475 const char *page, size_t count)
2477 struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
2478 struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
2482 if (!tg_pt_gp->tg_pt_gp_valid_id) {
2483 pr_err("Unable to do implicit ALUA on non valid"
2484 " tg_pt_gp ID: %hu\n", tg_pt_gp->tg_pt_gp_valid_id);
2487 if (!target_dev_configured(dev)) {
2488 pr_err("Unable to set alua_access_state while device is"
2489 " not configured\n");
2493 ret = kstrtoul(page, 0, &tmp);
2495 pr_err("Unable to extract new ALUA access state from"
2499 new_state = (int)tmp;
2501 if (!(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA)) {
2502 pr_err("Unable to process implicit configfs ALUA"
2503 " transition while TPGS_IMPLICIT_ALUA is disabled\n");
2506 if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA &&
2507 new_state == ALUA_ACCESS_STATE_LBA_DEPENDENT) {
2508 /* LBA DEPENDENT is only allowed with implicit ALUA */
2509 pr_err("Unable to process implicit configfs ALUA transition"
2510 " while explicit ALUA management is enabled\n");
2514 ret = core_alua_do_port_transition(tg_pt_gp, dev,
2515 NULL, NULL, new_state, 0);
2516 return (!ret) ? count : -EINVAL;
2519 static ssize_t target_tg_pt_gp_alua_access_status_show(struct config_item *item,
2522 struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
2523 return sprintf(page, "%s\n",
2524 core_alua_dump_status(tg_pt_gp->tg_pt_gp_alua_access_status));
2527 static ssize_t target_tg_pt_gp_alua_access_status_store(
2528 struct config_item *item, const char *page, size_t count)
2530 struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
2532 int new_status, ret;
2534 if (!tg_pt_gp->tg_pt_gp_valid_id) {
2535 pr_err("Unable to do set ALUA access status on non"
2536 " valid tg_pt_gp ID: %hu\n",
2537 tg_pt_gp->tg_pt_gp_valid_id);
2541 ret = kstrtoul(page, 0, &tmp);
2543 pr_err("Unable to extract new ALUA access status"
2544 " from %s\n", page);
2547 new_status = (int)tmp;
2549 if ((new_status != ALUA_STATUS_NONE) &&
2550 (new_status != ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
2551 (new_status != ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA)) {
2552 pr_err("Illegal ALUA access status: 0x%02x\n",
2557 tg_pt_gp->tg_pt_gp_alua_access_status = new_status;
2561 static ssize_t target_tg_pt_gp_alua_access_type_show(struct config_item *item,
2564 return core_alua_show_access_type(to_tg_pt_gp(item), page);
2567 static ssize_t target_tg_pt_gp_alua_access_type_store(struct config_item *item,
2568 const char *page, size_t count)
2570 return core_alua_store_access_type(to_tg_pt_gp(item), page, count);
2573 #define ALUA_SUPPORTED_STATE_ATTR(_name, _bit) \
2574 static ssize_t target_tg_pt_gp_alua_support_##_name##_show( \
2575 struct config_item *item, char *p) \
2577 struct t10_alua_tg_pt_gp *t = to_tg_pt_gp(item); \
2578 return sprintf(p, "%d\n", \
2579 !!(t->tg_pt_gp_alua_supported_states & _bit)); \
2582 static ssize_t target_tg_pt_gp_alua_support_##_name##_store( \
2583 struct config_item *item, const char *p, size_t c) \
2585 struct t10_alua_tg_pt_gp *t = to_tg_pt_gp(item); \
2586 unsigned long tmp; \
2589 if (!t->tg_pt_gp_valid_id) { \
2590 pr_err("Unable to do set " #_name " ALUA state on non" \
2591 " valid tg_pt_gp ID: %hu\n", \
2592 t->tg_pt_gp_valid_id); \
2596 ret = kstrtoul(p, 0, &tmp); \
2598 pr_err("Invalid value '%s', must be '0' or '1'\n", p); \
2602 pr_err("Invalid value '%ld', must be '0' or '1'\n", tmp); \
2606 t->tg_pt_gp_alua_supported_states |= _bit; \
2608 t->tg_pt_gp_alua_supported_states &= ~_bit; \
2613 ALUA_SUPPORTED_STATE_ATTR(transitioning, ALUA_T_SUP);
2614 ALUA_SUPPORTED_STATE_ATTR(offline, ALUA_O_SUP);
2615 ALUA_SUPPORTED_STATE_ATTR(lba_dependent, ALUA_LBD_SUP);
2616 ALUA_SUPPORTED_STATE_ATTR(unavailable, ALUA_U_SUP);
2617 ALUA_SUPPORTED_STATE_ATTR(standby, ALUA_S_SUP);
2618 ALUA_SUPPORTED_STATE_ATTR(active_optimized, ALUA_AO_SUP);
2619 ALUA_SUPPORTED_STATE_ATTR(active_nonoptimized, ALUA_AN_SUP);
2621 static ssize_t target_tg_pt_gp_alua_write_metadata_show(
2622 struct config_item *item, char *page)
2624 return sprintf(page, "%d\n",
2625 to_tg_pt_gp(item)->tg_pt_gp_write_metadata);
2628 static ssize_t target_tg_pt_gp_alua_write_metadata_store(
2629 struct config_item *item, const char *page, size_t count)
2631 struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
2635 ret = kstrtoul(page, 0, &tmp);
2637 pr_err("Unable to extract alua_write_metadata\n");
2641 if ((tmp != 0) && (tmp != 1)) {
2642 pr_err("Illegal value for alua_write_metadata:"
2646 tg_pt_gp->tg_pt_gp_write_metadata = (int)tmp;
2651 static ssize_t target_tg_pt_gp_nonop_delay_msecs_show(struct config_item *item,
2654 return core_alua_show_nonop_delay_msecs(to_tg_pt_gp(item), page);
2657 static ssize_t target_tg_pt_gp_nonop_delay_msecs_store(struct config_item *item,
2658 const char *page, size_t count)
2660 return core_alua_store_nonop_delay_msecs(to_tg_pt_gp(item), page,
2664 static ssize_t target_tg_pt_gp_trans_delay_msecs_show(struct config_item *item,
2667 return core_alua_show_trans_delay_msecs(to_tg_pt_gp(item), page);
2670 static ssize_t target_tg_pt_gp_trans_delay_msecs_store(struct config_item *item,
2671 const char *page, size_t count)
2673 return core_alua_store_trans_delay_msecs(to_tg_pt_gp(item), page,
2677 static ssize_t target_tg_pt_gp_implicit_trans_secs_show(
2678 struct config_item *item, char *page)
2680 return core_alua_show_implicit_trans_secs(to_tg_pt_gp(item), page);
2683 static ssize_t target_tg_pt_gp_implicit_trans_secs_store(
2684 struct config_item *item, const char *page, size_t count)
2686 return core_alua_store_implicit_trans_secs(to_tg_pt_gp(item), page,
2690 static ssize_t target_tg_pt_gp_preferred_show(struct config_item *item,
2693 return core_alua_show_preferred_bit(to_tg_pt_gp(item), page);
2696 static ssize_t target_tg_pt_gp_preferred_store(struct config_item *item,
2697 const char *page, size_t count)
2699 return core_alua_store_preferred_bit(to_tg_pt_gp(item), page, count);
2702 static ssize_t target_tg_pt_gp_tg_pt_gp_id_show(struct config_item *item,
2705 struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
2707 if (!tg_pt_gp->tg_pt_gp_valid_id)
2709 return sprintf(page, "%hu\n", tg_pt_gp->tg_pt_gp_id);
2712 static ssize_t target_tg_pt_gp_tg_pt_gp_id_store(struct config_item *item,
2713 const char *page, size_t count)
2715 struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
2716 struct config_group *alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group;
2717 unsigned long tg_pt_gp_id;
2720 ret = kstrtoul(page, 0, &tg_pt_gp_id);
2722 pr_err("ALUA tg_pt_gp_id: invalid value '%s' for tg_pt_gp_id\n",
2726 if (tg_pt_gp_id > 0x0000ffff) {
2727 pr_err("ALUA tg_pt_gp_id: %lu exceeds maximum: 0x0000ffff\n",
2732 ret = core_alua_set_tg_pt_gp_id(tg_pt_gp, (u16)tg_pt_gp_id);
2736 pr_debug("Target_Core_ConfigFS: Set ALUA Target Port Group: "
2737 "core/alua/tg_pt_gps/%s to ID: %hu\n",
2738 config_item_name(&alua_tg_pt_gp_cg->cg_item),
2739 tg_pt_gp->tg_pt_gp_id);
2744 static ssize_t target_tg_pt_gp_members_show(struct config_item *item,
2747 struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
2749 ssize_t len = 0, cur_len;
2750 unsigned char buf[TG_PT_GROUP_NAME_BUF];
2752 memset(buf, 0, TG_PT_GROUP_NAME_BUF);
2754 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
2755 list_for_each_entry(lun, &tg_pt_gp->tg_pt_gp_lun_list,
2756 lun_tg_pt_gp_link) {
2757 struct se_portal_group *tpg = lun->lun_tpg;
2759 cur_len = snprintf(buf, TG_PT_GROUP_NAME_BUF, "%s/%s/tpgt_%hu"
2760 "/%s\n", tpg->se_tpg_tfo->fabric_name,
2761 tpg->se_tpg_tfo->tpg_get_wwn(tpg),
2762 tpg->se_tpg_tfo->tpg_get_tag(tpg),
2763 config_item_name(&lun->lun_group.cg_item));
2764 cur_len++; /* Extra byte for NULL terminator */
2766 if ((cur_len + len) > PAGE_SIZE) {
2767 pr_warn("Ran out of lu_gp_show_attr"
2768 "_members buffer\n");
2771 memcpy(page+len, buf, cur_len);
2774 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
2779 CONFIGFS_ATTR(target_tg_pt_gp_, alua_access_state);
2780 CONFIGFS_ATTR(target_tg_pt_gp_, alua_access_status);
2781 CONFIGFS_ATTR(target_tg_pt_gp_, alua_access_type);
2782 CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_transitioning);
2783 CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_offline);
2784 CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_lba_dependent);
2785 CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_unavailable);
2786 CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_standby);
2787 CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_active_optimized);
2788 CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_active_nonoptimized);
2789 CONFIGFS_ATTR(target_tg_pt_gp_, alua_write_metadata);
2790 CONFIGFS_ATTR(target_tg_pt_gp_, nonop_delay_msecs);
2791 CONFIGFS_ATTR(target_tg_pt_gp_, trans_delay_msecs);
2792 CONFIGFS_ATTR(target_tg_pt_gp_, implicit_trans_secs);
2793 CONFIGFS_ATTR(target_tg_pt_gp_, preferred);
2794 CONFIGFS_ATTR(target_tg_pt_gp_, tg_pt_gp_id);
2795 CONFIGFS_ATTR_RO(target_tg_pt_gp_, members);
2797 static struct configfs_attribute *target_core_alua_tg_pt_gp_attrs[] = {
2798 &target_tg_pt_gp_attr_alua_access_state,
2799 &target_tg_pt_gp_attr_alua_access_status,
2800 &target_tg_pt_gp_attr_alua_access_type,
2801 &target_tg_pt_gp_attr_alua_support_transitioning,
2802 &target_tg_pt_gp_attr_alua_support_offline,
2803 &target_tg_pt_gp_attr_alua_support_lba_dependent,
2804 &target_tg_pt_gp_attr_alua_support_unavailable,
2805 &target_tg_pt_gp_attr_alua_support_standby,
2806 &target_tg_pt_gp_attr_alua_support_active_nonoptimized,
2807 &target_tg_pt_gp_attr_alua_support_active_optimized,
2808 &target_tg_pt_gp_attr_alua_write_metadata,
2809 &target_tg_pt_gp_attr_nonop_delay_msecs,
2810 &target_tg_pt_gp_attr_trans_delay_msecs,
2811 &target_tg_pt_gp_attr_implicit_trans_secs,
2812 &target_tg_pt_gp_attr_preferred,
2813 &target_tg_pt_gp_attr_tg_pt_gp_id,
2814 &target_tg_pt_gp_attr_members,
2818 static void target_core_alua_tg_pt_gp_release(struct config_item *item)
2820 struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item),
2821 struct t10_alua_tg_pt_gp, tg_pt_gp_group);
2823 core_alua_free_tg_pt_gp(tg_pt_gp);
2826 static struct configfs_item_operations target_core_alua_tg_pt_gp_ops = {
2827 .release = target_core_alua_tg_pt_gp_release,
2830 static const struct config_item_type target_core_alua_tg_pt_gp_cit = {
2831 .ct_item_ops = &target_core_alua_tg_pt_gp_ops,
2832 .ct_attrs = target_core_alua_tg_pt_gp_attrs,
2833 .ct_owner = THIS_MODULE,
2836 /* End functions for struct config_item_type target_core_alua_tg_pt_gp_cit */
2838 /* Start functions for struct config_item_type tb_alua_tg_pt_gps_cit */
2840 static struct config_group *target_core_alua_create_tg_pt_gp(
2841 struct config_group *group,
2844 struct t10_alua *alua = container_of(group, struct t10_alua,
2845 alua_tg_pt_gps_group);
2846 struct t10_alua_tg_pt_gp *tg_pt_gp;
2847 struct config_group *alua_tg_pt_gp_cg = NULL;
2848 struct config_item *alua_tg_pt_gp_ci = NULL;
2850 tg_pt_gp = core_alua_allocate_tg_pt_gp(alua->t10_dev, name, 0);
2854 alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group;
2855 alua_tg_pt_gp_ci = &alua_tg_pt_gp_cg->cg_item;
2857 config_group_init_type_name(alua_tg_pt_gp_cg, name,
2858 &target_core_alua_tg_pt_gp_cit);
2860 pr_debug("Target_Core_ConfigFS: Allocated ALUA Target Port"
2861 " Group: alua/tg_pt_gps/%s\n",
2862 config_item_name(alua_tg_pt_gp_ci));
2864 return alua_tg_pt_gp_cg;
2867 static void target_core_alua_drop_tg_pt_gp(
2868 struct config_group *group,
2869 struct config_item *item)
2871 struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item),
2872 struct t10_alua_tg_pt_gp, tg_pt_gp_group);
2874 pr_debug("Target_Core_ConfigFS: Releasing ALUA Target Port"
2875 " Group: alua/tg_pt_gps/%s, ID: %hu\n",
2876 config_item_name(item), tg_pt_gp->tg_pt_gp_id);
2878 * core_alua_free_tg_pt_gp() is called from target_core_alua_tg_pt_gp_ops->release()
2879 * -> target_core_alua_tg_pt_gp_release().
2881 config_item_put(item);
2884 static struct configfs_group_operations target_core_alua_tg_pt_gps_group_ops = {
2885 .make_group = &target_core_alua_create_tg_pt_gp,
2886 .drop_item = &target_core_alua_drop_tg_pt_gp,
2889 TB_CIT_SETUP(dev_alua_tg_pt_gps, NULL, &target_core_alua_tg_pt_gps_group_ops, NULL);
2891 /* End functions for struct config_item_type tb_alua_tg_pt_gps_cit */
2893 /* Start functions for struct config_item_type target_core_alua_cit */
2896 * target_core_alua_cit is a ConfigFS group that lives under
2897 * /sys/kernel/config/target/core/alua. There are default groups
2898 * core/alua/lu_gps and core/alua/tg_pt_gps that are attached to
2899 * target_core_alua_cit in target_core_init_configfs() below.
2901 static const struct config_item_type target_core_alua_cit = {
2902 .ct_item_ops = NULL,
2904 .ct_owner = THIS_MODULE,
2907 /* End functions for struct config_item_type target_core_alua_cit */
2909 /* Start functions for struct config_item_type tb_dev_stat_cit */
2911 static struct config_group *target_core_stat_mkdir(
2912 struct config_group *group,
2915 return ERR_PTR(-ENOSYS);
2918 static void target_core_stat_rmdir(
2919 struct config_group *group,
2920 struct config_item *item)
2925 static struct configfs_group_operations target_core_stat_group_ops = {
2926 .make_group = &target_core_stat_mkdir,
2927 .drop_item = &target_core_stat_rmdir,
2930 TB_CIT_SETUP(dev_stat, NULL, &target_core_stat_group_ops, NULL);
2932 /* End functions for struct config_item_type tb_dev_stat_cit */
2934 /* Start functions for struct config_item_type target_core_hba_cit */
2936 static struct config_group *target_core_make_subdev(
2937 struct config_group *group,
2940 struct t10_alua_tg_pt_gp *tg_pt_gp;
2941 struct config_item *hba_ci = &group->cg_item;
2942 struct se_hba *hba = item_to_hba(hba_ci);
2943 struct target_backend *tb = hba->backend;
2944 struct se_device *dev;
2945 int errno = -ENOMEM, ret;
2947 ret = mutex_lock_interruptible(&hba->hba_access_mutex);
2949 return ERR_PTR(ret);
2951 dev = target_alloc_device(hba, name);
2955 config_group_init_type_name(&dev->dev_group, name, &tb->tb_dev_cit);
2957 config_group_init_type_name(&dev->dev_action_group, "action",
2958 &tb->tb_dev_action_cit);
2959 configfs_add_default_group(&dev->dev_action_group, &dev->dev_group);
2961 config_group_init_type_name(&dev->dev_attrib.da_group, "attrib",
2962 &tb->tb_dev_attrib_cit);
2963 configfs_add_default_group(&dev->dev_attrib.da_group, &dev->dev_group);
2965 config_group_init_type_name(&dev->dev_pr_group, "pr",
2966 &tb->tb_dev_pr_cit);
2967 configfs_add_default_group(&dev->dev_pr_group, &dev->dev_group);
2969 config_group_init_type_name(&dev->t10_wwn.t10_wwn_group, "wwn",
2970 &tb->tb_dev_wwn_cit);
2971 configfs_add_default_group(&dev->t10_wwn.t10_wwn_group,
2974 config_group_init_type_name(&dev->t10_alua.alua_tg_pt_gps_group,
2975 "alua", &tb->tb_dev_alua_tg_pt_gps_cit);
2976 configfs_add_default_group(&dev->t10_alua.alua_tg_pt_gps_group,
2979 config_group_init_type_name(&dev->dev_stat_grps.stat_group,
2980 "statistics", &tb->tb_dev_stat_cit);
2981 configfs_add_default_group(&dev->dev_stat_grps.stat_group,
2985 * Add core/$HBA/$DEV/alua/default_tg_pt_gp
2987 tg_pt_gp = core_alua_allocate_tg_pt_gp(dev, "default_tg_pt_gp", 1);
2989 goto out_free_device;
2990 dev->t10_alua.default_tg_pt_gp = tg_pt_gp;
2992 config_group_init_type_name(&tg_pt_gp->tg_pt_gp_group,
2993 "default_tg_pt_gp", &target_core_alua_tg_pt_gp_cit);
2994 configfs_add_default_group(&tg_pt_gp->tg_pt_gp_group,
2995 &dev->t10_alua.alua_tg_pt_gps_group);
2998 * Add core/$HBA/$DEV/statistics/ default groups
3000 target_stat_setup_dev_default_groups(dev);
3002 mutex_unlock(&hba->hba_access_mutex);
3003 return &dev->dev_group;
3006 target_free_device(dev);
3008 mutex_unlock(&hba->hba_access_mutex);
3009 return ERR_PTR(errno);
3012 static void target_core_drop_subdev(
3013 struct config_group *group,
3014 struct config_item *item)
3016 struct config_group *dev_cg = to_config_group(item);
3017 struct se_device *dev =
3018 container_of(dev_cg, struct se_device, dev_group);
3021 hba = item_to_hba(&dev->se_hba->hba_group.cg_item);
3023 mutex_lock(&hba->hba_access_mutex);
3025 configfs_remove_default_groups(&dev->dev_stat_grps.stat_group);
3026 configfs_remove_default_groups(&dev->t10_alua.alua_tg_pt_gps_group);
3029 * core_alua_free_tg_pt_gp() is called from ->default_tg_pt_gp
3030 * directly from target_core_alua_tg_pt_gp_release().
3032 dev->t10_alua.default_tg_pt_gp = NULL;
3034 configfs_remove_default_groups(dev_cg);
3037 * se_dev is released from target_core_dev_item_ops->release()
3039 config_item_put(item);
3040 mutex_unlock(&hba->hba_access_mutex);
3043 static struct configfs_group_operations target_core_hba_group_ops = {
3044 .make_group = target_core_make_subdev,
3045 .drop_item = target_core_drop_subdev,
3049 static inline struct se_hba *to_hba(struct config_item *item)
3051 return container_of(to_config_group(item), struct se_hba, hba_group);
3054 static ssize_t target_hba_info_show(struct config_item *item, char *page)
3056 struct se_hba *hba = to_hba(item);
3058 return sprintf(page, "HBA Index: %d plugin: %s version: %s\n",
3059 hba->hba_id, hba->backend->ops->name,
3060 TARGET_CORE_VERSION);
3063 static ssize_t target_hba_mode_show(struct config_item *item, char *page)
3065 struct se_hba *hba = to_hba(item);
3068 if (hba->hba_flags & HBA_FLAGS_PSCSI_MODE)
3071 return sprintf(page, "%d\n", hba_mode);
3074 static ssize_t target_hba_mode_store(struct config_item *item,
3075 const char *page, size_t count)
3077 struct se_hba *hba = to_hba(item);
3078 unsigned long mode_flag;
3081 if (hba->backend->ops->pmode_enable_hba == NULL)
3084 ret = kstrtoul(page, 0, &mode_flag);
3086 pr_err("Unable to extract hba mode flag: %d\n", ret);
3090 if (hba->dev_count) {
3091 pr_err("Unable to set hba_mode with active devices\n");
3095 ret = hba->backend->ops->pmode_enable_hba(hba, mode_flag);
3099 hba->hba_flags |= HBA_FLAGS_PSCSI_MODE;
3101 hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE;
3106 CONFIGFS_ATTR_RO(target_, hba_info);
3107 CONFIGFS_ATTR(target_, hba_mode);
3109 static void target_core_hba_release(struct config_item *item)
3111 struct se_hba *hba = container_of(to_config_group(item),
3112 struct se_hba, hba_group);
3113 core_delete_hba(hba);
3116 static struct configfs_attribute *target_core_hba_attrs[] = {
3117 &target_attr_hba_info,
3118 &target_attr_hba_mode,
3122 static struct configfs_item_operations target_core_hba_item_ops = {
3123 .release = target_core_hba_release,
3126 static const struct config_item_type target_core_hba_cit = {
3127 .ct_item_ops = &target_core_hba_item_ops,
3128 .ct_group_ops = &target_core_hba_group_ops,
3129 .ct_attrs = target_core_hba_attrs,
3130 .ct_owner = THIS_MODULE,
3133 static struct config_group *target_core_call_addhbatotarget(
3134 struct config_group *group,
3137 char *se_plugin_str, *str, *str2;
3139 char buf[TARGET_CORE_NAME_MAX_LEN];
3140 unsigned long plugin_dep_id = 0;
3143 memset(buf, 0, TARGET_CORE_NAME_MAX_LEN);
3144 if (strlen(name) >= TARGET_CORE_NAME_MAX_LEN) {
3145 pr_err("Passed *name strlen(): %d exceeds"
3146 " TARGET_CORE_NAME_MAX_LEN: %d\n", (int)strlen(name),
3147 TARGET_CORE_NAME_MAX_LEN);
3148 return ERR_PTR(-ENAMETOOLONG);
3150 snprintf(buf, TARGET_CORE_NAME_MAX_LEN, "%s", name);
3152 str = strstr(buf, "_");
3154 pr_err("Unable to locate \"_\" for $SUBSYSTEM_PLUGIN_$HOST_ID\n");
3155 return ERR_PTR(-EINVAL);
3157 se_plugin_str = buf;
3159 * Special case for subsystem plugins that have "_" in their names.
3160 * Namely rd_direct and rd_mcp..
3162 str2 = strstr(str+1, "_");
3164 *str2 = '\0'; /* Terminate for *se_plugin_str */
3165 str2++; /* Skip to start of plugin dependent ID */
3168 *str = '\0'; /* Terminate for *se_plugin_str */
3169 str++; /* Skip to start of plugin dependent ID */
3172 ret = kstrtoul(str, 0, &plugin_dep_id);
3174 pr_err("kstrtoul() returned %d for"
3175 " plugin_dep_id\n", ret);
3176 return ERR_PTR(ret);
3179 * Load up TCM subsystem plugins if they have not already been loaded.
3181 transport_subsystem_check_init();
3183 hba = core_alloc_hba(se_plugin_str, plugin_dep_id, 0);
3185 return ERR_CAST(hba);
3187 config_group_init_type_name(&hba->hba_group, name,
3188 &target_core_hba_cit);
3190 return &hba->hba_group;
3193 static void target_core_call_delhbafromtarget(
3194 struct config_group *group,
3195 struct config_item *item)
3198 * core_delete_hba() is called from target_core_hba_item_ops->release()
3199 * -> target_core_hba_release()
3201 config_item_put(item);
3204 static struct configfs_group_operations target_core_group_ops = {
3205 .make_group = target_core_call_addhbatotarget,
3206 .drop_item = target_core_call_delhbafromtarget,
3209 static const struct config_item_type target_core_cit = {
3210 .ct_item_ops = NULL,
3211 .ct_group_ops = &target_core_group_ops,
3213 .ct_owner = THIS_MODULE,
3216 /* Stop functions for struct config_item_type target_core_hba_cit */
3218 void target_setup_backend_cits(struct target_backend *tb)
3220 target_core_setup_dev_cit(tb);
3221 target_core_setup_dev_action_cit(tb);
3222 target_core_setup_dev_attrib_cit(tb);
3223 target_core_setup_dev_pr_cit(tb);
3224 target_core_setup_dev_wwn_cit(tb);
3225 target_core_setup_dev_alua_tg_pt_gps_cit(tb);
3226 target_core_setup_dev_stat_cit(tb);
3229 static void target_init_dbroot(void)
3233 snprintf(db_root_stage, DB_ROOT_LEN, DB_ROOT_PREFERRED);
3234 fp = filp_open(db_root_stage, O_RDONLY, 0);
3236 pr_err("db_root: cannot open: %s\n", db_root_stage);
3239 if (!S_ISDIR(file_inode(fp)->i_mode)) {
3240 filp_close(fp, NULL);
3241 pr_err("db_root: not a valid directory: %s\n", db_root_stage);
3244 filp_close(fp, NULL);
3246 strncpy(db_root, db_root_stage, DB_ROOT_LEN);
3247 pr_debug("Target_Core_ConfigFS: db_root set to %s\n", db_root);
3250 static int __init target_core_init_configfs(void)
3252 struct configfs_subsystem *subsys = &target_core_fabrics;
3253 struct t10_alua_lu_gp *lu_gp;
3256 pr_debug("TARGET_CORE[0]: Loading Generic Kernel Storage"
3257 " Engine: %s on %s/%s on "UTS_RELEASE"\n",
3258 TARGET_CORE_VERSION, utsname()->sysname, utsname()->machine);
3260 config_group_init(&subsys->su_group);
3261 mutex_init(&subsys->su_mutex);
3263 ret = init_se_kmem_caches();
3267 * Create $CONFIGFS/target/core default group for HBA <-> Storage Object
3268 * and ALUA Logical Unit Group and Target Port Group infrastructure.
3270 config_group_init_type_name(&target_core_hbagroup, "core",
3272 configfs_add_default_group(&target_core_hbagroup, &subsys->su_group);
3275 * Create ALUA infrastructure under /sys/kernel/config/target/core/alua/
3277 config_group_init_type_name(&alua_group, "alua", &target_core_alua_cit);
3278 configfs_add_default_group(&alua_group, &target_core_hbagroup);
3281 * Add ALUA Logical Unit Group and Target Port Group ConfigFS
3282 * groups under /sys/kernel/config/target/core/alua/
3284 config_group_init_type_name(&alua_lu_gps_group, "lu_gps",
3285 &target_core_alua_lu_gps_cit);
3286 configfs_add_default_group(&alua_lu_gps_group, &alua_group);
3289 * Add core/alua/lu_gps/default_lu_gp
3291 lu_gp = core_alua_allocate_lu_gp("default_lu_gp", 1);
3292 if (IS_ERR(lu_gp)) {
3297 config_group_init_type_name(&lu_gp->lu_gp_group, "default_lu_gp",
3298 &target_core_alua_lu_gp_cit);
3299 configfs_add_default_group(&lu_gp->lu_gp_group, &alua_lu_gps_group);
3301 default_lu_gp = lu_gp;
3304 * Register the target_core_mod subsystem with configfs.
3306 ret = configfs_register_subsystem(subsys);
3308 pr_err("Error %d while registering subsystem %s\n",
3309 ret, subsys->su_group.cg_item.ci_namebuf);
3312 pr_debug("TARGET_CORE[0]: Initialized ConfigFS Fabric"
3313 " Infrastructure: "TARGET_CORE_VERSION" on %s/%s"
3314 " on "UTS_RELEASE"\n", utsname()->sysname, utsname()->machine);
3316 * Register built-in RAMDISK subsystem logic for virtual LUN 0
3318 ret = rd_module_init();
3322 ret = core_dev_setup_virtual_lun0();
3326 ret = target_xcopy_setup_pt();
3330 target_init_dbroot();
3335 configfs_unregister_subsystem(subsys);
3336 core_dev_release_virtual_lun0();
3339 if (default_lu_gp) {
3340 core_alua_free_lu_gp(default_lu_gp);
3341 default_lu_gp = NULL;
3343 release_se_kmem_caches();
3347 static void __exit target_core_exit_configfs(void)
3349 configfs_remove_default_groups(&alua_lu_gps_group);
3350 configfs_remove_default_groups(&alua_group);
3351 configfs_remove_default_groups(&target_core_hbagroup);
3354 * We expect subsys->su_group.default_groups to be released
3355 * by configfs subsystem provider logic..
3357 configfs_unregister_subsystem(&target_core_fabrics);
3359 core_alua_free_lu_gp(default_lu_gp);
3360 default_lu_gp = NULL;
3362 pr_debug("TARGET_CORE[0]: Released ConfigFS Fabric"
3363 " Infrastructure\n");
3365 core_dev_release_virtual_lun0();
3367 target_xcopy_release_pt();
3368 release_se_kmem_caches();
3371 MODULE_DESCRIPTION("Target_Core_Mod/ConfigFS");
3372 MODULE_AUTHOR("nab@Linux-iSCSI.org");
3373 MODULE_LICENSE("GPL");
3375 module_init(target_core_init_configfs);
3376 module_exit(target_core_exit_configfs);