]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/target/target_core_configfs.c
scsi: target: drop unnecessary get_fabric_name() accessor from fabric_ops
[linux.git] / drivers / target / target_core_configfs.c
1 /*******************************************************************************
2  * Filename:  target_core_configfs.c
3  *
4  * This file contains ConfigFS logic for the Generic Target Engine project.
5  *
6  * (c) Copyright 2008-2013 Datera, Inc.
7  *
8  * Nicholas A. Bellinger <nab@kernel.org>
9  *
10  * based on configfs Copyright (C) 2005 Oracle.  All rights reserved.
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License as published by
14  * the Free Software Foundation; either version 2 of the License, or
15  * (at your option) any later version.
16  *
17  * This program is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20  * GNU General Public License for more details.
21  ****************************************************************************/
22
23 #include <linux/module.h>
24 #include <linux/moduleparam.h>
25 #include <generated/utsrelease.h>
26 #include <linux/utsname.h>
27 #include <linux/init.h>
28 #include <linux/fs.h>
29 #include <linux/namei.h>
30 #include <linux/slab.h>
31 #include <linux/types.h>
32 #include <linux/delay.h>
33 #include <linux/unistd.h>
34 #include <linux/string.h>
35 #include <linux/parser.h>
36 #include <linux/syscalls.h>
37 #include <linux/configfs.h>
38 #include <linux/spinlock.h>
39
40 #include <target/target_core_base.h>
41 #include <target/target_core_backend.h>
42 #include <target/target_core_fabric.h>
43
44 #include "target_core_internal.h"
45 #include "target_core_alua.h"
46 #include "target_core_pr.h"
47 #include "target_core_rd.h"
48 #include "target_core_xcopy.h"
49
50 #define TB_CIT_SETUP(_name, _item_ops, _group_ops, _attrs)              \
51 static void target_core_setup_##_name##_cit(struct target_backend *tb)  \
52 {                                                                       \
53         struct config_item_type *cit = &tb->tb_##_name##_cit;           \
54                                                                         \
55         cit->ct_item_ops = _item_ops;                                   \
56         cit->ct_group_ops = _group_ops;                                 \
57         cit->ct_attrs = _attrs;                                         \
58         cit->ct_owner = tb->ops->owner;                                 \
59         pr_debug("Setup generic %s\n", __stringify(_name));             \
60 }
61
62 #define TB_CIT_SETUP_DRV(_name, _item_ops, _group_ops)                  \
63 static void target_core_setup_##_name##_cit(struct target_backend *tb)  \
64 {                                                                       \
65         struct config_item_type *cit = &tb->tb_##_name##_cit;           \
66                                                                         \
67         cit->ct_item_ops = _item_ops;                                   \
68         cit->ct_group_ops = _group_ops;                                 \
69         cit->ct_attrs = tb->ops->tb_##_name##_attrs;                    \
70         cit->ct_owner = tb->ops->owner;                                 \
71         pr_debug("Setup generic %s\n", __stringify(_name));             \
72 }
73
74 extern struct t10_alua_lu_gp *default_lu_gp;
75
76 static LIST_HEAD(g_tf_list);
77 static DEFINE_MUTEX(g_tf_lock);
78
79 static struct config_group target_core_hbagroup;
80 static struct config_group alua_group;
81 static struct config_group alua_lu_gps_group;
82
83 static inline struct se_hba *
84 item_to_hba(struct config_item *item)
85 {
86         return container_of(to_config_group(item), struct se_hba, hba_group);
87 }
88
89 /*
90  * Attributes for /sys/kernel/config/target/
91  */
92 static ssize_t target_core_item_version_show(struct config_item *item,
93                 char *page)
94 {
95         return sprintf(page, "Target Engine Core ConfigFS Infrastructure %s"
96                 " on %s/%s on "UTS_RELEASE"\n", TARGET_CORE_VERSION,
97                 utsname()->sysname, utsname()->machine);
98 }
99
100 CONFIGFS_ATTR_RO(target_core_item_, version);
101
102 char db_root[DB_ROOT_LEN] = DB_ROOT_DEFAULT;
103 static char db_root_stage[DB_ROOT_LEN];
104
105 static ssize_t target_core_item_dbroot_show(struct config_item *item,
106                                             char *page)
107 {
108         return sprintf(page, "%s\n", db_root);
109 }
110
111 static ssize_t target_core_item_dbroot_store(struct config_item *item,
112                                         const char *page, size_t count)
113 {
114         ssize_t read_bytes;
115         struct file *fp;
116
117         mutex_lock(&g_tf_lock);
118         if (!list_empty(&g_tf_list)) {
119                 mutex_unlock(&g_tf_lock);
120                 pr_err("db_root: cannot be changed: target drivers registered");
121                 return -EINVAL;
122         }
123
124         if (count > (DB_ROOT_LEN - 1)) {
125                 mutex_unlock(&g_tf_lock);
126                 pr_err("db_root: count %d exceeds DB_ROOT_LEN-1: %u\n",
127                        (int)count, DB_ROOT_LEN - 1);
128                 return -EINVAL;
129         }
130
131         read_bytes = snprintf(db_root_stage, DB_ROOT_LEN, "%s", page);
132         if (!read_bytes) {
133                 mutex_unlock(&g_tf_lock);
134                 return -EINVAL;
135         }
136         if (db_root_stage[read_bytes - 1] == '\n')
137                 db_root_stage[read_bytes - 1] = '\0';
138
139         /* validate new db root before accepting it */
140         fp = filp_open(db_root_stage, O_RDONLY, 0);
141         if (IS_ERR(fp)) {
142                 mutex_unlock(&g_tf_lock);
143                 pr_err("db_root: cannot open: %s\n", db_root_stage);
144                 return -EINVAL;
145         }
146         if (!S_ISDIR(file_inode(fp)->i_mode)) {
147                 filp_close(fp, NULL);
148                 mutex_unlock(&g_tf_lock);
149                 pr_err("db_root: not a directory: %s\n", db_root_stage);
150                 return -EINVAL;
151         }
152         filp_close(fp, NULL);
153
154         strncpy(db_root, db_root_stage, read_bytes);
155
156         mutex_unlock(&g_tf_lock);
157
158         pr_debug("Target_Core_ConfigFS: db_root set to %s\n", db_root);
159
160         return read_bytes;
161 }
162
163 CONFIGFS_ATTR(target_core_item_, dbroot);
164
165 static struct target_fabric_configfs *target_core_get_fabric(
166         const char *name)
167 {
168         struct target_fabric_configfs *tf;
169
170         if (!name)
171                 return NULL;
172
173         mutex_lock(&g_tf_lock);
174         list_for_each_entry(tf, &g_tf_list, tf_list) {
175                 if (!strcmp(tf->tf_ops->name, name)) {
176                         atomic_inc(&tf->tf_access_cnt);
177                         mutex_unlock(&g_tf_lock);
178                         return tf;
179                 }
180         }
181         mutex_unlock(&g_tf_lock);
182
183         return NULL;
184 }
185
186 /*
187  * Called from struct target_core_group_ops->make_group()
188  */
189 static struct config_group *target_core_register_fabric(
190         struct config_group *group,
191         const char *name)
192 {
193         struct target_fabric_configfs *tf;
194         int ret;
195
196         pr_debug("Target_Core_ConfigFS: REGISTER -> group: %p name:"
197                         " %s\n", group, name);
198
199         tf = target_core_get_fabric(name);
200         if (!tf) {
201                 pr_debug("target_core_register_fabric() trying autoload for %s\n",
202                          name);
203
204                 /*
205                  * Below are some hardcoded request_module() calls to automatically
206                  * local fabric modules when the following is called:
207                  *
208                  * mkdir -p /sys/kernel/config/target/$MODULE_NAME
209                  *
210                  * Note that this does not limit which TCM fabric module can be
211                  * registered, but simply provids auto loading logic for modules with
212                  * mkdir(2) system calls with known TCM fabric modules.
213                  */
214
215                 if (!strncmp(name, "iscsi", 5)) {
216                         /*
217                          * Automatically load the LIO Target fabric module when the
218                          * following is called:
219                          *
220                          * mkdir -p $CONFIGFS/target/iscsi
221                          */
222                         ret = request_module("iscsi_target_mod");
223                         if (ret < 0) {
224                                 pr_debug("request_module() failed for"
225                                          " iscsi_target_mod.ko: %d\n", ret);
226                                 return ERR_PTR(-EINVAL);
227                         }
228                 } else if (!strncmp(name, "loopback", 8)) {
229                         /*
230                          * Automatically load the tcm_loop fabric module when the
231                          * following is called:
232                          *
233                          * mkdir -p $CONFIGFS/target/loopback
234                          */
235                         ret = request_module("tcm_loop");
236                         if (ret < 0) {
237                                 pr_debug("request_module() failed for"
238                                          " tcm_loop.ko: %d\n", ret);
239                                 return ERR_PTR(-EINVAL);
240                         }
241                 }
242
243                 tf = target_core_get_fabric(name);
244         }
245
246         if (!tf) {
247                 pr_debug("target_core_get_fabric() failed for %s\n",
248                          name);
249                 return ERR_PTR(-EINVAL);
250         }
251         pr_debug("Target_Core_ConfigFS: REGISTER -> Located fabric:"
252                         " %s\n", tf->tf_ops->name);
253         /*
254          * On a successful target_core_get_fabric() look, the returned
255          * struct target_fabric_configfs *tf will contain a usage reference.
256          */
257         pr_debug("Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n",
258                         &tf->tf_wwn_cit);
259
260         config_group_init_type_name(&tf->tf_group, name, &tf->tf_wwn_cit);
261
262         config_group_init_type_name(&tf->tf_disc_group, "discovery_auth",
263                         &tf->tf_discovery_cit);
264         configfs_add_default_group(&tf->tf_disc_group, &tf->tf_group);
265
266         pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric: %s\n",
267                  config_item_name(&tf->tf_group.cg_item));
268         return &tf->tf_group;
269 }
270
271 /*
272  * Called from struct target_core_group_ops->drop_item()
273  */
274 static void target_core_deregister_fabric(
275         struct config_group *group,
276         struct config_item *item)
277 {
278         struct target_fabric_configfs *tf = container_of(
279                 to_config_group(item), struct target_fabric_configfs, tf_group);
280
281         pr_debug("Target_Core_ConfigFS: DEREGISTER -> Looking up %s in"
282                 " tf list\n", config_item_name(item));
283
284         pr_debug("Target_Core_ConfigFS: DEREGISTER -> located fabric:"
285                         " %s\n", tf->tf_ops->name);
286         atomic_dec(&tf->tf_access_cnt);
287
288         pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing ci"
289                         " %s\n", config_item_name(item));
290
291         configfs_remove_default_groups(&tf->tf_group);
292         config_item_put(item);
293 }
294
295 static struct configfs_group_operations target_core_fabric_group_ops = {
296         .make_group     = &target_core_register_fabric,
297         .drop_item      = &target_core_deregister_fabric,
298 };
299
300 /*
301  * All item attributes appearing in /sys/kernel/target/ appear here.
302  */
303 static struct configfs_attribute *target_core_fabric_item_attrs[] = {
304         &target_core_item_attr_version,
305         &target_core_item_attr_dbroot,
306         NULL,
307 };
308
309 /*
310  * Provides Fabrics Groups and Item Attributes for /sys/kernel/config/target/
311  */
312 static const struct config_item_type target_core_fabrics_item = {
313         .ct_group_ops   = &target_core_fabric_group_ops,
314         .ct_attrs       = target_core_fabric_item_attrs,
315         .ct_owner       = THIS_MODULE,
316 };
317
318 static struct configfs_subsystem target_core_fabrics = {
319         .su_group = {
320                 .cg_item = {
321                         .ci_namebuf = "target",
322                         .ci_type = &target_core_fabrics_item,
323                 },
324         },
325 };
326
327 int target_depend_item(struct config_item *item)
328 {
329         return configfs_depend_item(&target_core_fabrics, item);
330 }
331 EXPORT_SYMBOL(target_depend_item);
332
333 void target_undepend_item(struct config_item *item)
334 {
335         return configfs_undepend_item(item);
336 }
337 EXPORT_SYMBOL(target_undepend_item);
338
339 /*##############################################################################
340 // Start functions called by external Target Fabrics Modules
341 //############################################################################*/
342
343 static int target_fabric_tf_ops_check(const struct target_core_fabric_ops *tfo)
344 {
345         if (!tfo->name) {
346                 pr_err("Missing tfo->name\n");
347                 return -EINVAL;
348         }
349         if (strlen(tfo->name) >= TARGET_FABRIC_NAME_SIZE) {
350                 pr_err("Passed name: %s exceeds TARGET_FABRIC"
351                         "_NAME_SIZE\n", tfo->name);
352                 return -EINVAL;
353         }
354         if (!tfo->fabric_name) {
355                 pr_err("Missing tfo->fabric_name\n");
356                 return -EINVAL;
357         }
358         if (!tfo->tpg_get_wwn) {
359                 pr_err("Missing tfo->tpg_get_wwn()\n");
360                 return -EINVAL;
361         }
362         if (!tfo->tpg_get_tag) {
363                 pr_err("Missing tfo->tpg_get_tag()\n");
364                 return -EINVAL;
365         }
366         if (!tfo->tpg_check_demo_mode) {
367                 pr_err("Missing tfo->tpg_check_demo_mode()\n");
368                 return -EINVAL;
369         }
370         if (!tfo->tpg_check_demo_mode_cache) {
371                 pr_err("Missing tfo->tpg_check_demo_mode_cache()\n");
372                 return -EINVAL;
373         }
374         if (!tfo->tpg_check_demo_mode_write_protect) {
375                 pr_err("Missing tfo->tpg_check_demo_mode_write_protect()\n");
376                 return -EINVAL;
377         }
378         if (!tfo->tpg_check_prod_mode_write_protect) {
379                 pr_err("Missing tfo->tpg_check_prod_mode_write_protect()\n");
380                 return -EINVAL;
381         }
382         if (!tfo->tpg_get_inst_index) {
383                 pr_err("Missing tfo->tpg_get_inst_index()\n");
384                 return -EINVAL;
385         }
386         if (!tfo->release_cmd) {
387                 pr_err("Missing tfo->release_cmd()\n");
388                 return -EINVAL;
389         }
390         if (!tfo->sess_get_index) {
391                 pr_err("Missing tfo->sess_get_index()\n");
392                 return -EINVAL;
393         }
394         if (!tfo->write_pending) {
395                 pr_err("Missing tfo->write_pending()\n");
396                 return -EINVAL;
397         }
398         if (!tfo->write_pending_status) {
399                 pr_err("Missing tfo->write_pending_status()\n");
400                 return -EINVAL;
401         }
402         if (!tfo->set_default_node_attributes) {
403                 pr_err("Missing tfo->set_default_node_attributes()\n");
404                 return -EINVAL;
405         }
406         if (!tfo->get_cmd_state) {
407                 pr_err("Missing tfo->get_cmd_state()\n");
408                 return -EINVAL;
409         }
410         if (!tfo->queue_data_in) {
411                 pr_err("Missing tfo->queue_data_in()\n");
412                 return -EINVAL;
413         }
414         if (!tfo->queue_status) {
415                 pr_err("Missing tfo->queue_status()\n");
416                 return -EINVAL;
417         }
418         if (!tfo->queue_tm_rsp) {
419                 pr_err("Missing tfo->queue_tm_rsp()\n");
420                 return -EINVAL;
421         }
422         if (!tfo->aborted_task) {
423                 pr_err("Missing tfo->aborted_task()\n");
424                 return -EINVAL;
425         }
426         if (!tfo->check_stop_free) {
427                 pr_err("Missing tfo->check_stop_free()\n");
428                 return -EINVAL;
429         }
430         /*
431          * We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn()
432          * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in
433          * target_core_fabric_configfs.c WWN+TPG group context code.
434          */
435         if (!tfo->fabric_make_wwn) {
436                 pr_err("Missing tfo->fabric_make_wwn()\n");
437                 return -EINVAL;
438         }
439         if (!tfo->fabric_drop_wwn) {
440                 pr_err("Missing tfo->fabric_drop_wwn()\n");
441                 return -EINVAL;
442         }
443         if (!tfo->fabric_make_tpg) {
444                 pr_err("Missing tfo->fabric_make_tpg()\n");
445                 return -EINVAL;
446         }
447         if (!tfo->fabric_drop_tpg) {
448                 pr_err("Missing tfo->fabric_drop_tpg()\n");
449                 return -EINVAL;
450         }
451
452         return 0;
453 }
454
455 int target_register_template(const struct target_core_fabric_ops *fo)
456 {
457         struct target_fabric_configfs *tf;
458         int ret;
459
460         ret = target_fabric_tf_ops_check(fo);
461         if (ret)
462                 return ret;
463
464         tf = kzalloc(sizeof(struct target_fabric_configfs), GFP_KERNEL);
465         if (!tf) {
466                 pr_err("%s: could not allocate memory!\n", __func__);
467                 return -ENOMEM;
468         }
469
470         INIT_LIST_HEAD(&tf->tf_list);
471         atomic_set(&tf->tf_access_cnt, 0);
472         tf->tf_ops = fo;
473         target_fabric_setup_cits(tf);
474
475         mutex_lock(&g_tf_lock);
476         list_add_tail(&tf->tf_list, &g_tf_list);
477         mutex_unlock(&g_tf_lock);
478
479         return 0;
480 }
481 EXPORT_SYMBOL(target_register_template);
482
483 void target_unregister_template(const struct target_core_fabric_ops *fo)
484 {
485         struct target_fabric_configfs *t;
486
487         mutex_lock(&g_tf_lock);
488         list_for_each_entry(t, &g_tf_list, tf_list) {
489                 if (!strcmp(t->tf_ops->name, fo->name)) {
490                         BUG_ON(atomic_read(&t->tf_access_cnt));
491                         list_del(&t->tf_list);
492                         mutex_unlock(&g_tf_lock);
493                         /*
494                          * Wait for any outstanding fabric se_deve_entry->rcu_head
495                          * callbacks to complete post kfree_rcu(), before allowing
496                          * fabric driver unload of TFO->module to proceed.
497                          */
498                         rcu_barrier();
499                         kfree(t);
500                         return;
501                 }
502         }
503         mutex_unlock(&g_tf_lock);
504 }
505 EXPORT_SYMBOL(target_unregister_template);
506
507 /*##############################################################################
508 // Stop functions called by external Target Fabrics Modules
509 //############################################################################*/
510
511 static inline struct se_dev_attrib *to_attrib(struct config_item *item)
512 {
513         return container_of(to_config_group(item), struct se_dev_attrib,
514                         da_group);
515 }
516
517 /* Start functions for struct config_item_type tb_dev_attrib_cit */
518 #define DEF_CONFIGFS_ATTRIB_SHOW(_name)                                 \
519 static ssize_t _name##_show(struct config_item *item, char *page)       \
520 {                                                                       \
521         return snprintf(page, PAGE_SIZE, "%u\n", to_attrib(item)->_name); \
522 }
523
524 DEF_CONFIGFS_ATTRIB_SHOW(emulate_model_alias);
525 DEF_CONFIGFS_ATTRIB_SHOW(emulate_dpo);
526 DEF_CONFIGFS_ATTRIB_SHOW(emulate_fua_write);
527 DEF_CONFIGFS_ATTRIB_SHOW(emulate_fua_read);
528 DEF_CONFIGFS_ATTRIB_SHOW(emulate_write_cache);
529 DEF_CONFIGFS_ATTRIB_SHOW(emulate_ua_intlck_ctrl);
530 DEF_CONFIGFS_ATTRIB_SHOW(emulate_tas);
531 DEF_CONFIGFS_ATTRIB_SHOW(emulate_tpu);
532 DEF_CONFIGFS_ATTRIB_SHOW(emulate_tpws);
533 DEF_CONFIGFS_ATTRIB_SHOW(emulate_caw);
534 DEF_CONFIGFS_ATTRIB_SHOW(emulate_3pc);
535 DEF_CONFIGFS_ATTRIB_SHOW(emulate_pr);
536 DEF_CONFIGFS_ATTRIB_SHOW(pi_prot_type);
537 DEF_CONFIGFS_ATTRIB_SHOW(hw_pi_prot_type);
538 DEF_CONFIGFS_ATTRIB_SHOW(pi_prot_verify);
539 DEF_CONFIGFS_ATTRIB_SHOW(enforce_pr_isids);
540 DEF_CONFIGFS_ATTRIB_SHOW(is_nonrot);
541 DEF_CONFIGFS_ATTRIB_SHOW(emulate_rest_reord);
542 DEF_CONFIGFS_ATTRIB_SHOW(force_pr_aptpl);
543 DEF_CONFIGFS_ATTRIB_SHOW(hw_block_size);
544 DEF_CONFIGFS_ATTRIB_SHOW(block_size);
545 DEF_CONFIGFS_ATTRIB_SHOW(hw_max_sectors);
546 DEF_CONFIGFS_ATTRIB_SHOW(optimal_sectors);
547 DEF_CONFIGFS_ATTRIB_SHOW(hw_queue_depth);
548 DEF_CONFIGFS_ATTRIB_SHOW(queue_depth);
549 DEF_CONFIGFS_ATTRIB_SHOW(max_unmap_lba_count);
550 DEF_CONFIGFS_ATTRIB_SHOW(max_unmap_block_desc_count);
551 DEF_CONFIGFS_ATTRIB_SHOW(unmap_granularity);
552 DEF_CONFIGFS_ATTRIB_SHOW(unmap_granularity_alignment);
553 DEF_CONFIGFS_ATTRIB_SHOW(unmap_zeroes_data);
554 DEF_CONFIGFS_ATTRIB_SHOW(max_write_same_len);
555
556 #define DEF_CONFIGFS_ATTRIB_STORE_U32(_name)                            \
557 static ssize_t _name##_store(struct config_item *item, const char *page,\
558                 size_t count)                                           \
559 {                                                                       \
560         struct se_dev_attrib *da = to_attrib(item);                     \
561         u32 val;                                                        \
562         int ret;                                                        \
563                                                                         \
564         ret = kstrtou32(page, 0, &val);                                 \
565         if (ret < 0)                                                    \
566                 return ret;                                             \
567         da->_name = val;                                                \
568         return count;                                                   \
569 }
570
571 DEF_CONFIGFS_ATTRIB_STORE_U32(max_unmap_lba_count);
572 DEF_CONFIGFS_ATTRIB_STORE_U32(max_unmap_block_desc_count);
573 DEF_CONFIGFS_ATTRIB_STORE_U32(unmap_granularity);
574 DEF_CONFIGFS_ATTRIB_STORE_U32(unmap_granularity_alignment);
575 DEF_CONFIGFS_ATTRIB_STORE_U32(max_write_same_len);
576
577 #define DEF_CONFIGFS_ATTRIB_STORE_BOOL(_name)                           \
578 static ssize_t _name##_store(struct config_item *item, const char *page,        \
579                 size_t count)                                           \
580 {                                                                       \
581         struct se_dev_attrib *da = to_attrib(item);                     \
582         bool flag;                                                      \
583         int ret;                                                        \
584                                                                         \
585         ret = strtobool(page, &flag);                                   \
586         if (ret < 0)                                                    \
587                 return ret;                                             \
588         da->_name = flag;                                               \
589         return count;                                                   \
590 }
591
592 DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_fua_write);
593 DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_caw);
594 DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_3pc);
595 DEF_CONFIGFS_ATTRIB_STORE_BOOL(emulate_pr);
596 DEF_CONFIGFS_ATTRIB_STORE_BOOL(enforce_pr_isids);
597 DEF_CONFIGFS_ATTRIB_STORE_BOOL(is_nonrot);
598
599 #define DEF_CONFIGFS_ATTRIB_STORE_STUB(_name)                           \
600 static ssize_t _name##_store(struct config_item *item, const char *page,\
601                 size_t count)                                           \
602 {                                                                       \
603         printk_once(KERN_WARNING                                        \
604                 "ignoring deprecated %s attribute\n",                   \
605                 __stringify(_name));                                    \
606         return count;                                                   \
607 }
608
609 DEF_CONFIGFS_ATTRIB_STORE_STUB(emulate_dpo);
610 DEF_CONFIGFS_ATTRIB_STORE_STUB(emulate_fua_read);
611
612 static void dev_set_t10_wwn_model_alias(struct se_device *dev)
613 {
614         const char *configname;
615
616         configname = config_item_name(&dev->dev_group.cg_item);
617         if (strlen(configname) >= 16) {
618                 pr_warn("dev[%p]: Backstore name '%s' is too long for "
619                         "INQUIRY_MODEL, truncating to 16 bytes\n", dev,
620                         configname);
621         }
622         snprintf(&dev->t10_wwn.model[0], 16, "%s", configname);
623 }
624
625 static ssize_t emulate_model_alias_store(struct config_item *item,
626                 const char *page, size_t count)
627 {
628         struct se_dev_attrib *da = to_attrib(item);
629         struct se_device *dev = da->da_dev;
630         bool flag;
631         int ret;
632
633         if (dev->export_count) {
634                 pr_err("dev[%p]: Unable to change model alias"
635                         " while export_count is %d\n",
636                         dev, dev->export_count);
637                 return -EINVAL;
638         }
639
640         ret = strtobool(page, &flag);
641         if (ret < 0)
642                 return ret;
643
644         if (flag) {
645                 dev_set_t10_wwn_model_alias(dev);
646         } else {
647                 strncpy(&dev->t10_wwn.model[0],
648                         dev->transport->inquiry_prod, 16);
649         }
650         da->emulate_model_alias = flag;
651         return count;
652 }
653
654 static ssize_t emulate_write_cache_store(struct config_item *item,
655                 const char *page, size_t count)
656 {
657         struct se_dev_attrib *da = to_attrib(item);
658         bool flag;
659         int ret;
660
661         ret = strtobool(page, &flag);
662         if (ret < 0)
663                 return ret;
664
665         if (flag && da->da_dev->transport->get_write_cache) {
666                 pr_err("emulate_write_cache not supported for this device\n");
667                 return -EINVAL;
668         }
669
670         da->emulate_write_cache = flag;
671         pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
672                         da->da_dev, flag);
673         return count;
674 }
675
676 static ssize_t emulate_ua_intlck_ctrl_store(struct config_item *item,
677                 const char *page, size_t count)
678 {
679         struct se_dev_attrib *da = to_attrib(item);
680         u32 val;
681         int ret;
682
683         ret = kstrtou32(page, 0, &val);
684         if (ret < 0)
685                 return ret;
686
687         if (val != 0 && val != 1 && val != 2) {
688                 pr_err("Illegal value %d\n", val);
689                 return -EINVAL;
690         }
691
692         if (da->da_dev->export_count) {
693                 pr_err("dev[%p]: Unable to change SE Device"
694                         " UA_INTRLCK_CTRL while export_count is %d\n",
695                         da->da_dev, da->da_dev->export_count);
696                 return -EINVAL;
697         }
698         da->emulate_ua_intlck_ctrl = val;
699         pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
700                 da->da_dev, val);
701         return count;
702 }
703
704 static ssize_t emulate_tas_store(struct config_item *item,
705                 const char *page, size_t count)
706 {
707         struct se_dev_attrib *da = to_attrib(item);
708         bool flag;
709         int ret;
710
711         ret = strtobool(page, &flag);
712         if (ret < 0)
713                 return ret;
714
715         if (da->da_dev->export_count) {
716                 pr_err("dev[%p]: Unable to change SE Device TAS while"
717                         " export_count is %d\n",
718                         da->da_dev, da->da_dev->export_count);
719                 return -EINVAL;
720         }
721         da->emulate_tas = flag;
722         pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
723                 da->da_dev, flag ? "Enabled" : "Disabled");
724
725         return count;
726 }
727
728 static ssize_t emulate_tpu_store(struct config_item *item,
729                 const char *page, size_t count)
730 {
731         struct se_dev_attrib *da = to_attrib(item);
732         bool flag;
733         int ret;
734
735         ret = strtobool(page, &flag);
736         if (ret < 0)
737                 return ret;
738
739         /*
740          * We expect this value to be non-zero when generic Block Layer
741          * Discard supported is detected iblock_create_virtdevice().
742          */
743         if (flag && !da->max_unmap_block_desc_count) {
744                 pr_err("Generic Block Discard not supported\n");
745                 return -ENOSYS;
746         }
747
748         da->emulate_tpu = flag;
749         pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
750                 da->da_dev, flag);
751         return count;
752 }
753
754 static ssize_t emulate_tpws_store(struct config_item *item,
755                 const char *page, size_t count)
756 {
757         struct se_dev_attrib *da = to_attrib(item);
758         bool flag;
759         int ret;
760
761         ret = strtobool(page, &flag);
762         if (ret < 0)
763                 return ret;
764
765         /*
766          * We expect this value to be non-zero when generic Block Layer
767          * Discard supported is detected iblock_create_virtdevice().
768          */
769         if (flag && !da->max_unmap_block_desc_count) {
770                 pr_err("Generic Block Discard not supported\n");
771                 return -ENOSYS;
772         }
773
774         da->emulate_tpws = flag;
775         pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
776                                 da->da_dev, flag);
777         return count;
778 }
779
780 static ssize_t pi_prot_type_store(struct config_item *item,
781                 const char *page, size_t count)
782 {
783         struct se_dev_attrib *da = to_attrib(item);
784         int old_prot = da->pi_prot_type, ret;
785         struct se_device *dev = da->da_dev;
786         u32 flag;
787
788         ret = kstrtou32(page, 0, &flag);
789         if (ret < 0)
790                 return ret;
791
792         if (flag != 0 && flag != 1 && flag != 2 && flag != 3) {
793                 pr_err("Illegal value %d for pi_prot_type\n", flag);
794                 return -EINVAL;
795         }
796         if (flag == 2) {
797                 pr_err("DIF TYPE2 protection currently not supported\n");
798                 return -ENOSYS;
799         }
800         if (da->hw_pi_prot_type) {
801                 pr_warn("DIF protection enabled on underlying hardware,"
802                         " ignoring\n");
803                 return count;
804         }
805         if (!dev->transport->init_prot || !dev->transport->free_prot) {
806                 /* 0 is only allowed value for non-supporting backends */
807                 if (flag == 0)
808                         return count;
809
810                 pr_err("DIF protection not supported by backend: %s\n",
811                        dev->transport->name);
812                 return -ENOSYS;
813         }
814         if (!target_dev_configured(dev)) {
815                 pr_err("DIF protection requires device to be configured\n");
816                 return -ENODEV;
817         }
818         if (dev->export_count) {
819                 pr_err("dev[%p]: Unable to change SE Device PROT type while"
820                        " export_count is %d\n", dev, dev->export_count);
821                 return -EINVAL;
822         }
823
824         da->pi_prot_type = flag;
825
826         if (flag && !old_prot) {
827                 ret = dev->transport->init_prot(dev);
828                 if (ret) {
829                         da->pi_prot_type = old_prot;
830                         da->pi_prot_verify = (bool) da->pi_prot_type;
831                         return ret;
832                 }
833
834         } else if (!flag && old_prot) {
835                 dev->transport->free_prot(dev);
836         }
837
838         da->pi_prot_verify = (bool) da->pi_prot_type;
839         pr_debug("dev[%p]: SE Device Protection Type: %d\n", dev, flag);
840         return count;
841 }
842
843 static ssize_t pi_prot_format_store(struct config_item *item,
844                 const char *page, size_t count)
845 {
846         struct se_dev_attrib *da = to_attrib(item);
847         struct se_device *dev = da->da_dev;
848         bool flag;
849         int ret;
850
851         ret = strtobool(page, &flag);
852         if (ret < 0)
853                 return ret;
854
855         if (!flag)
856                 return count;
857
858         if (!dev->transport->format_prot) {
859                 pr_err("DIF protection format not supported by backend %s\n",
860                        dev->transport->name);
861                 return -ENOSYS;
862         }
863         if (!target_dev_configured(dev)) {
864                 pr_err("DIF protection format requires device to be configured\n");
865                 return -ENODEV;
866         }
867         if (dev->export_count) {
868                 pr_err("dev[%p]: Unable to format SE Device PROT type while"
869                        " export_count is %d\n", dev, dev->export_count);
870                 return -EINVAL;
871         }
872
873         ret = dev->transport->format_prot(dev);
874         if (ret)
875                 return ret;
876
877         pr_debug("dev[%p]: SE Device Protection Format complete\n", dev);
878         return count;
879 }
880
881 static ssize_t pi_prot_verify_store(struct config_item *item,
882                 const char *page, size_t count)
883 {
884         struct se_dev_attrib *da = to_attrib(item);
885         bool flag;
886         int ret;
887
888         ret = strtobool(page, &flag);
889         if (ret < 0)
890                 return ret;
891
892         if (!flag) {
893                 da->pi_prot_verify = flag;
894                 return count;
895         }
896         if (da->hw_pi_prot_type) {
897                 pr_warn("DIF protection enabled on underlying hardware,"
898                         " ignoring\n");
899                 return count;
900         }
901         if (!da->pi_prot_type) {
902                 pr_warn("DIF protection not supported by backend, ignoring\n");
903                 return count;
904         }
905         da->pi_prot_verify = flag;
906
907         return count;
908 }
909
910 static ssize_t force_pr_aptpl_store(struct config_item *item,
911                 const char *page, size_t count)
912 {
913         struct se_dev_attrib *da = to_attrib(item);
914         bool flag;
915         int ret;
916
917         ret = strtobool(page, &flag);
918         if (ret < 0)
919                 return ret;
920         if (da->da_dev->export_count) {
921                 pr_err("dev[%p]: Unable to set force_pr_aptpl while"
922                        " export_count is %d\n",
923                        da->da_dev, da->da_dev->export_count);
924                 return -EINVAL;
925         }
926
927         da->force_pr_aptpl = flag;
928         pr_debug("dev[%p]: SE Device force_pr_aptpl: %d\n", da->da_dev, flag);
929         return count;
930 }
931
932 static ssize_t emulate_rest_reord_store(struct config_item *item,
933                 const char *page, size_t count)
934 {
935         struct se_dev_attrib *da = to_attrib(item);
936         bool flag;
937         int ret;
938
939         ret = strtobool(page, &flag);
940         if (ret < 0)
941                 return ret;
942
943         if (flag != 0) {
944                 printk(KERN_ERR "dev[%p]: SE Device emulation of restricted"
945                         " reordering not implemented\n", da->da_dev);
946                 return -ENOSYS;
947         }
948         da->emulate_rest_reord = flag;
949         pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n",
950                 da->da_dev, flag);
951         return count;
952 }
953
954 static ssize_t unmap_zeroes_data_store(struct config_item *item,
955                 const char *page, size_t count)
956 {
957         struct se_dev_attrib *da = to_attrib(item);
958         bool flag;
959         int ret;
960
961         ret = strtobool(page, &flag);
962         if (ret < 0)
963                 return ret;
964
965         if (da->da_dev->export_count) {
966                 pr_err("dev[%p]: Unable to change SE Device"
967                        " unmap_zeroes_data while export_count is %d\n",
968                        da->da_dev, da->da_dev->export_count);
969                 return -EINVAL;
970         }
971         /*
972          * We expect this value to be non-zero when generic Block Layer
973          * Discard supported is detected iblock_configure_device().
974          */
975         if (flag && !da->max_unmap_block_desc_count) {
976                 pr_err("dev[%p]: Thin Provisioning LBPRZ will not be set"
977                        " because max_unmap_block_desc_count is zero\n",
978                        da->da_dev);
979                 return -ENOSYS;
980         }
981         da->unmap_zeroes_data = flag;
982         pr_debug("dev[%p]: SE Device Thin Provisioning LBPRZ bit: %d\n",
983                  da->da_dev, flag);
984         return count;
985 }
986
987 /*
988  * Note, this can only be called on unexported SE Device Object.
989  */
990 static ssize_t queue_depth_store(struct config_item *item,
991                 const char *page, size_t count)
992 {
993         struct se_dev_attrib *da = to_attrib(item);
994         struct se_device *dev = da->da_dev;
995         u32 val;
996         int ret;
997
998         ret = kstrtou32(page, 0, &val);
999         if (ret < 0)
1000                 return ret;
1001
1002         if (dev->export_count) {
1003                 pr_err("dev[%p]: Unable to change SE Device TCQ while"
1004                         " export_count is %d\n",
1005                         dev, dev->export_count);
1006                 return -EINVAL;
1007         }
1008         if (!val) {
1009                 pr_err("dev[%p]: Illegal ZERO value for queue_depth\n", dev);
1010                 return -EINVAL;
1011         }
1012
1013         if (val > dev->dev_attrib.queue_depth) {
1014                 if (val > dev->dev_attrib.hw_queue_depth) {
1015                         pr_err("dev[%p]: Passed queue_depth:"
1016                                 " %u exceeds TCM/SE_Device MAX"
1017                                 " TCQ: %u\n", dev, val,
1018                                 dev->dev_attrib.hw_queue_depth);
1019                         return -EINVAL;
1020                 }
1021         }
1022         da->queue_depth = dev->queue_depth = val;
1023         pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n", dev, val);
1024         return count;
1025 }
1026
1027 static ssize_t optimal_sectors_store(struct config_item *item,
1028                 const char *page, size_t count)
1029 {
1030         struct se_dev_attrib *da = to_attrib(item);
1031         u32 val;
1032         int ret;
1033
1034         ret = kstrtou32(page, 0, &val);
1035         if (ret < 0)
1036                 return ret;
1037
1038         if (da->da_dev->export_count) {
1039                 pr_err("dev[%p]: Unable to change SE Device"
1040                         " optimal_sectors while export_count is %d\n",
1041                         da->da_dev, da->da_dev->export_count);
1042                 return -EINVAL;
1043         }
1044         if (val > da->hw_max_sectors) {
1045                 pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
1046                         " greater than hw_max_sectors: %u\n",
1047                         da->da_dev, val, da->hw_max_sectors);
1048                 return -EINVAL;
1049         }
1050
1051         da->optimal_sectors = val;
1052         pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n",
1053                         da->da_dev, val);
1054         return count;
1055 }
1056
1057 static ssize_t block_size_store(struct config_item *item,
1058                 const char *page, size_t count)
1059 {
1060         struct se_dev_attrib *da = to_attrib(item);
1061         u32 val;
1062         int ret;
1063
1064         ret = kstrtou32(page, 0, &val);
1065         if (ret < 0)
1066                 return ret;
1067
1068         if (da->da_dev->export_count) {
1069                 pr_err("dev[%p]: Unable to change SE Device block_size"
1070                         " while export_count is %d\n",
1071                         da->da_dev, da->da_dev->export_count);
1072                 return -EINVAL;
1073         }
1074
1075         if (val != 512 && val != 1024 && val != 2048 && val != 4096) {
1076                 pr_err("dev[%p]: Illegal value for block_device: %u"
1077                         " for SE device, must be 512, 1024, 2048 or 4096\n",
1078                         da->da_dev, val);
1079                 return -EINVAL;
1080         }
1081
1082         da->block_size = val;
1083         if (da->max_bytes_per_io)
1084                 da->hw_max_sectors = da->max_bytes_per_io / val;
1085
1086         pr_debug("dev[%p]: SE Device block_size changed to %u\n",
1087                         da->da_dev, val);
1088         return count;
1089 }
1090
1091 static ssize_t alua_support_show(struct config_item *item, char *page)
1092 {
1093         struct se_dev_attrib *da = to_attrib(item);
1094         u8 flags = da->da_dev->transport->transport_flags;
1095
1096         return snprintf(page, PAGE_SIZE, "%d\n",
1097                         flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA ? 0 : 1);
1098 }
1099
1100 static ssize_t pgr_support_show(struct config_item *item, char *page)
1101 {
1102         struct se_dev_attrib *da = to_attrib(item);
1103         u8 flags = da->da_dev->transport->transport_flags;
1104
1105         return snprintf(page, PAGE_SIZE, "%d\n",
1106                         flags & TRANSPORT_FLAG_PASSTHROUGH_PGR ? 0 : 1);
1107 }
1108
1109 CONFIGFS_ATTR(, emulate_model_alias);
1110 CONFIGFS_ATTR(, emulate_dpo);
1111 CONFIGFS_ATTR(, emulate_fua_write);
1112 CONFIGFS_ATTR(, emulate_fua_read);
1113 CONFIGFS_ATTR(, emulate_write_cache);
1114 CONFIGFS_ATTR(, emulate_ua_intlck_ctrl);
1115 CONFIGFS_ATTR(, emulate_tas);
1116 CONFIGFS_ATTR(, emulate_tpu);
1117 CONFIGFS_ATTR(, emulate_tpws);
1118 CONFIGFS_ATTR(, emulate_caw);
1119 CONFIGFS_ATTR(, emulate_3pc);
1120 CONFIGFS_ATTR(, emulate_pr);
1121 CONFIGFS_ATTR(, pi_prot_type);
1122 CONFIGFS_ATTR_RO(, hw_pi_prot_type);
1123 CONFIGFS_ATTR_WO(, pi_prot_format);
1124 CONFIGFS_ATTR(, pi_prot_verify);
1125 CONFIGFS_ATTR(, enforce_pr_isids);
1126 CONFIGFS_ATTR(, is_nonrot);
1127 CONFIGFS_ATTR(, emulate_rest_reord);
1128 CONFIGFS_ATTR(, force_pr_aptpl);
1129 CONFIGFS_ATTR_RO(, hw_block_size);
1130 CONFIGFS_ATTR(, block_size);
1131 CONFIGFS_ATTR_RO(, hw_max_sectors);
1132 CONFIGFS_ATTR(, optimal_sectors);
1133 CONFIGFS_ATTR_RO(, hw_queue_depth);
1134 CONFIGFS_ATTR(, queue_depth);
1135 CONFIGFS_ATTR(, max_unmap_lba_count);
1136 CONFIGFS_ATTR(, max_unmap_block_desc_count);
1137 CONFIGFS_ATTR(, unmap_granularity);
1138 CONFIGFS_ATTR(, unmap_granularity_alignment);
1139 CONFIGFS_ATTR(, unmap_zeroes_data);
1140 CONFIGFS_ATTR(, max_write_same_len);
1141 CONFIGFS_ATTR_RO(, alua_support);
1142 CONFIGFS_ATTR_RO(, pgr_support);
1143
1144 /*
1145  * dev_attrib attributes for devices using the target core SBC/SPC
1146  * interpreter.  Any backend using spc_parse_cdb should be using
1147  * these.
1148  */
1149 struct configfs_attribute *sbc_attrib_attrs[] = {
1150         &attr_emulate_model_alias,
1151         &attr_emulate_dpo,
1152         &attr_emulate_fua_write,
1153         &attr_emulate_fua_read,
1154         &attr_emulate_write_cache,
1155         &attr_emulate_ua_intlck_ctrl,
1156         &attr_emulate_tas,
1157         &attr_emulate_tpu,
1158         &attr_emulate_tpws,
1159         &attr_emulate_caw,
1160         &attr_emulate_3pc,
1161         &attr_emulate_pr,
1162         &attr_pi_prot_type,
1163         &attr_hw_pi_prot_type,
1164         &attr_pi_prot_format,
1165         &attr_pi_prot_verify,
1166         &attr_enforce_pr_isids,
1167         &attr_is_nonrot,
1168         &attr_emulate_rest_reord,
1169         &attr_force_pr_aptpl,
1170         &attr_hw_block_size,
1171         &attr_block_size,
1172         &attr_hw_max_sectors,
1173         &attr_optimal_sectors,
1174         &attr_hw_queue_depth,
1175         &attr_queue_depth,
1176         &attr_max_unmap_lba_count,
1177         &attr_max_unmap_block_desc_count,
1178         &attr_unmap_granularity,
1179         &attr_unmap_granularity_alignment,
1180         &attr_unmap_zeroes_data,
1181         &attr_max_write_same_len,
1182         &attr_alua_support,
1183         &attr_pgr_support,
1184         NULL,
1185 };
1186 EXPORT_SYMBOL(sbc_attrib_attrs);
1187
1188 /*
1189  * Minimal dev_attrib attributes for devices passing through CDBs.
1190  * In this case we only provide a few read-only attributes for
1191  * backwards compatibility.
1192  */
1193 struct configfs_attribute *passthrough_attrib_attrs[] = {
1194         &attr_hw_pi_prot_type,
1195         &attr_hw_block_size,
1196         &attr_hw_max_sectors,
1197         &attr_hw_queue_depth,
1198         &attr_alua_support,
1199         &attr_pgr_support,
1200         NULL,
1201 };
1202 EXPORT_SYMBOL(passthrough_attrib_attrs);
1203
1204 TB_CIT_SETUP_DRV(dev_attrib, NULL, NULL);
1205 TB_CIT_SETUP_DRV(dev_action, NULL, NULL);
1206
1207 /* End functions for struct config_item_type tb_dev_attrib_cit */
1208
1209 /*  Start functions for struct config_item_type tb_dev_wwn_cit */
1210
1211 static struct t10_wwn *to_t10_wwn(struct config_item *item)
1212 {
1213         return container_of(to_config_group(item), struct t10_wwn, t10_wwn_group);
1214 }
1215
1216 /*
1217  * VPD page 0x80 Unit serial
1218  */
1219 static ssize_t target_wwn_vpd_unit_serial_show(struct config_item *item,
1220                 char *page)
1221 {
1222         return sprintf(page, "T10 VPD Unit Serial Number: %s\n",
1223                 &to_t10_wwn(item)->unit_serial[0]);
1224 }
1225
1226 static ssize_t target_wwn_vpd_unit_serial_store(struct config_item *item,
1227                 const char *page, size_t count)
1228 {
1229         struct t10_wwn *t10_wwn = to_t10_wwn(item);
1230         struct se_device *dev = t10_wwn->t10_dev;
1231         unsigned char buf[INQUIRY_VPD_SERIAL_LEN];
1232
1233         /*
1234          * If Linux/SCSI subsystem_api_t plugin got a VPD Unit Serial
1235          * from the struct scsi_device level firmware, do not allow
1236          * VPD Unit Serial to be emulated.
1237          *
1238          * Note this struct scsi_device could also be emulating VPD
1239          * information from its drivers/scsi LLD.  But for now we assume
1240          * it is doing 'the right thing' wrt a world wide unique
1241          * VPD Unit Serial Number that OS dependent multipath can depend on.
1242          */
1243         if (dev->dev_flags & DF_FIRMWARE_VPD_UNIT_SERIAL) {
1244                 pr_err("Underlying SCSI device firmware provided VPD"
1245                         " Unit Serial, ignoring request\n");
1246                 return -EOPNOTSUPP;
1247         }
1248
1249         if (strlen(page) >= INQUIRY_VPD_SERIAL_LEN) {
1250                 pr_err("Emulated VPD Unit Serial exceeds"
1251                 " INQUIRY_VPD_SERIAL_LEN: %d\n", INQUIRY_VPD_SERIAL_LEN);
1252                 return -EOVERFLOW;
1253         }
1254         /*
1255          * Check to see if any active $FABRIC_MOD exports exist.  If they
1256          * do exist, fail here as changing this information on the fly
1257          * (underneath the initiator side OS dependent multipath code)
1258          * could cause negative effects.
1259          */
1260         if (dev->export_count) {
1261                 pr_err("Unable to set VPD Unit Serial while"
1262                         " active %d $FABRIC_MOD exports exist\n",
1263                         dev->export_count);
1264                 return -EINVAL;
1265         }
1266
1267         /*
1268          * This currently assumes ASCII encoding for emulated VPD Unit Serial.
1269          *
1270          * Also, strip any newline added from the userspace
1271          * echo $UUID > $TARGET/$HBA/$STORAGE_OBJECT/wwn/vpd_unit_serial
1272          */
1273         memset(buf, 0, INQUIRY_VPD_SERIAL_LEN);
1274         snprintf(buf, INQUIRY_VPD_SERIAL_LEN, "%s", page);
1275         snprintf(dev->t10_wwn.unit_serial, INQUIRY_VPD_SERIAL_LEN,
1276                         "%s", strstrip(buf));
1277         dev->dev_flags |= DF_EMULATED_VPD_UNIT_SERIAL;
1278
1279         pr_debug("Target_Core_ConfigFS: Set emulated VPD Unit Serial:"
1280                         " %s\n", dev->t10_wwn.unit_serial);
1281
1282         return count;
1283 }
1284
1285 /*
1286  * VPD page 0x83 Protocol Identifier
1287  */
1288 static ssize_t target_wwn_vpd_protocol_identifier_show(struct config_item *item,
1289                 char *page)
1290 {
1291         struct t10_wwn *t10_wwn = to_t10_wwn(item);
1292         struct t10_vpd *vpd;
1293         unsigned char buf[VPD_TMP_BUF_SIZE];
1294         ssize_t len = 0;
1295
1296         memset(buf, 0, VPD_TMP_BUF_SIZE);
1297
1298         spin_lock(&t10_wwn->t10_vpd_lock);
1299         list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) {
1300                 if (!vpd->protocol_identifier_set)
1301                         continue;
1302
1303                 transport_dump_vpd_proto_id(vpd, buf, VPD_TMP_BUF_SIZE);
1304
1305                 if (len + strlen(buf) >= PAGE_SIZE)
1306                         break;
1307
1308                 len += sprintf(page+len, "%s", buf);
1309         }
1310         spin_unlock(&t10_wwn->t10_vpd_lock);
1311
1312         return len;
1313 }
1314
1315 /*
1316  * Generic wrapper for dumping VPD identifiers by association.
1317  */
1318 #define DEF_DEV_WWN_ASSOC_SHOW(_name, _assoc)                           \
1319 static ssize_t target_wwn_##_name##_show(struct config_item *item,      \
1320                 char *page)                                             \
1321 {                                                                       \
1322         struct t10_wwn *t10_wwn = to_t10_wwn(item);                     \
1323         struct t10_vpd *vpd;                                            \
1324         unsigned char buf[VPD_TMP_BUF_SIZE];                            \
1325         ssize_t len = 0;                                                \
1326                                                                         \
1327         spin_lock(&t10_wwn->t10_vpd_lock);                              \
1328         list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) {    \
1329                 if (vpd->association != _assoc)                         \
1330                         continue;                                       \
1331                                                                         \
1332                 memset(buf, 0, VPD_TMP_BUF_SIZE);                       \
1333                 transport_dump_vpd_assoc(vpd, buf, VPD_TMP_BUF_SIZE);   \
1334                 if (len + strlen(buf) >= PAGE_SIZE)                     \
1335                         break;                                          \
1336                 len += sprintf(page+len, "%s", buf);                    \
1337                                                                         \
1338                 memset(buf, 0, VPD_TMP_BUF_SIZE);                       \
1339                 transport_dump_vpd_ident_type(vpd, buf, VPD_TMP_BUF_SIZE); \
1340                 if (len + strlen(buf) >= PAGE_SIZE)                     \
1341                         break;                                          \
1342                 len += sprintf(page+len, "%s", buf);                    \
1343                                                                         \
1344                 memset(buf, 0, VPD_TMP_BUF_SIZE);                       \
1345                 transport_dump_vpd_ident(vpd, buf, VPD_TMP_BUF_SIZE); \
1346                 if (len + strlen(buf) >= PAGE_SIZE)                     \
1347                         break;                                          \
1348                 len += sprintf(page+len, "%s", buf);                    \
1349         }                                                               \
1350         spin_unlock(&t10_wwn->t10_vpd_lock);                            \
1351                                                                         \
1352         return len;                                                     \
1353 }
1354
1355 /* VPD page 0x83 Association: Logical Unit */
1356 DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_logical_unit, 0x00);
1357 /* VPD page 0x83 Association: Target Port */
1358 DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_target_port, 0x10);
1359 /* VPD page 0x83 Association: SCSI Target Device */
1360 DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_scsi_target_device, 0x20);
1361
1362 CONFIGFS_ATTR(target_wwn_, vpd_unit_serial);
1363 CONFIGFS_ATTR_RO(target_wwn_, vpd_protocol_identifier);
1364 CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_logical_unit);
1365 CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_target_port);
1366 CONFIGFS_ATTR_RO(target_wwn_, vpd_assoc_scsi_target_device);
1367
1368 static struct configfs_attribute *target_core_dev_wwn_attrs[] = {
1369         &target_wwn_attr_vpd_unit_serial,
1370         &target_wwn_attr_vpd_protocol_identifier,
1371         &target_wwn_attr_vpd_assoc_logical_unit,
1372         &target_wwn_attr_vpd_assoc_target_port,
1373         &target_wwn_attr_vpd_assoc_scsi_target_device,
1374         NULL,
1375 };
1376
1377 TB_CIT_SETUP(dev_wwn, NULL, NULL, target_core_dev_wwn_attrs);
1378
1379 /*  End functions for struct config_item_type tb_dev_wwn_cit */
1380
1381 /*  Start functions for struct config_item_type tb_dev_pr_cit */
1382
1383 static struct se_device *pr_to_dev(struct config_item *item)
1384 {
1385         return container_of(to_config_group(item), struct se_device,
1386                         dev_pr_group);
1387 }
1388
1389 static ssize_t target_core_dev_pr_show_spc3_res(struct se_device *dev,
1390                 char *page)
1391 {
1392         struct se_node_acl *se_nacl;
1393         struct t10_pr_registration *pr_reg;
1394         char i_buf[PR_REG_ISID_ID_LEN];
1395
1396         memset(i_buf, 0, PR_REG_ISID_ID_LEN);
1397
1398         pr_reg = dev->dev_pr_res_holder;
1399         if (!pr_reg)
1400                 return sprintf(page, "No SPC-3 Reservation holder\n");
1401
1402         se_nacl = pr_reg->pr_reg_nacl;
1403         core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
1404
1405         return sprintf(page, "SPC-3 Reservation: %s Initiator: %s%s\n",
1406                 se_nacl->se_tpg->se_tpg_tfo->fabric_name,
1407                 se_nacl->initiatorname, i_buf);
1408 }
1409
1410 static ssize_t target_core_dev_pr_show_spc2_res(struct se_device *dev,
1411                 char *page)
1412 {
1413         struct se_node_acl *se_nacl;
1414         ssize_t len;
1415
1416         se_nacl = dev->dev_reserved_node_acl;
1417         if (se_nacl) {
1418                 len = sprintf(page,
1419                               "SPC-2 Reservation: %s Initiator: %s\n",
1420                               se_nacl->se_tpg->se_tpg_tfo->fabric_name,
1421                               se_nacl->initiatorname);
1422         } else {
1423                 len = sprintf(page, "No SPC-2 Reservation holder\n");
1424         }
1425         return len;
1426 }
1427
1428 static ssize_t target_pr_res_holder_show(struct config_item *item, char *page)
1429 {
1430         struct se_device *dev = pr_to_dev(item);
1431         int ret;
1432
1433         if (!dev->dev_attrib.emulate_pr)
1434                 return sprintf(page, "SPC_RESERVATIONS_DISABLED\n");
1435
1436         if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)
1437                 return sprintf(page, "Passthrough\n");
1438
1439         spin_lock(&dev->dev_reservation_lock);
1440         if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
1441                 ret = target_core_dev_pr_show_spc2_res(dev, page);
1442         else
1443                 ret = target_core_dev_pr_show_spc3_res(dev, page);
1444         spin_unlock(&dev->dev_reservation_lock);
1445         return ret;
1446 }
1447
1448 static ssize_t target_pr_res_pr_all_tgt_pts_show(struct config_item *item,
1449                 char *page)
1450 {
1451         struct se_device *dev = pr_to_dev(item);
1452         ssize_t len = 0;
1453
1454         spin_lock(&dev->dev_reservation_lock);
1455         if (!dev->dev_pr_res_holder) {
1456                 len = sprintf(page, "No SPC-3 Reservation holder\n");
1457         } else if (dev->dev_pr_res_holder->pr_reg_all_tg_pt) {
1458                 len = sprintf(page, "SPC-3 Reservation: All Target"
1459                         " Ports registration\n");
1460         } else {
1461                 len = sprintf(page, "SPC-3 Reservation: Single"
1462                         " Target Port registration\n");
1463         }
1464
1465         spin_unlock(&dev->dev_reservation_lock);
1466         return len;
1467 }
1468
1469 static ssize_t target_pr_res_pr_generation_show(struct config_item *item,
1470                 char *page)
1471 {
1472         return sprintf(page, "0x%08x\n", pr_to_dev(item)->t10_pr.pr_generation);
1473 }
1474
1475
1476 static ssize_t target_pr_res_pr_holder_tg_port_show(struct config_item *item,
1477                 char *page)
1478 {
1479         struct se_device *dev = pr_to_dev(item);
1480         struct se_node_acl *se_nacl;
1481         struct se_portal_group *se_tpg;
1482         struct t10_pr_registration *pr_reg;
1483         const struct target_core_fabric_ops *tfo;
1484         ssize_t len = 0;
1485
1486         spin_lock(&dev->dev_reservation_lock);
1487         pr_reg = dev->dev_pr_res_holder;
1488         if (!pr_reg) {
1489                 len = sprintf(page, "No SPC-3 Reservation holder\n");
1490                 goto out_unlock;
1491         }
1492
1493         se_nacl = pr_reg->pr_reg_nacl;
1494         se_tpg = se_nacl->se_tpg;
1495         tfo = se_tpg->se_tpg_tfo;
1496
1497         len += sprintf(page+len, "SPC-3 Reservation: %s"
1498                 " Target Node Endpoint: %s\n", tfo->fabric_name,
1499                 tfo->tpg_get_wwn(se_tpg));
1500         len += sprintf(page+len, "SPC-3 Reservation: Relative Port"
1501                 " Identifier Tag: %hu %s Portal Group Tag: %hu"
1502                 " %s Logical Unit: %llu\n", pr_reg->tg_pt_sep_rtpi,
1503                 tfo->fabric_name, tfo->tpg_get_tag(se_tpg),
1504                 tfo->fabric_name, pr_reg->pr_aptpl_target_lun);
1505
1506 out_unlock:
1507         spin_unlock(&dev->dev_reservation_lock);
1508         return len;
1509 }
1510
1511
1512 static ssize_t target_pr_res_pr_registered_i_pts_show(struct config_item *item,
1513                 char *page)
1514 {
1515         struct se_device *dev = pr_to_dev(item);
1516         const struct target_core_fabric_ops *tfo;
1517         struct t10_pr_registration *pr_reg;
1518         unsigned char buf[384];
1519         char i_buf[PR_REG_ISID_ID_LEN];
1520         ssize_t len = 0;
1521         int reg_count = 0;
1522
1523         len += sprintf(page+len, "SPC-3 PR Registrations:\n");
1524
1525         spin_lock(&dev->t10_pr.registration_lock);
1526         list_for_each_entry(pr_reg, &dev->t10_pr.registration_list,
1527                         pr_reg_list) {
1528
1529                 memset(buf, 0, 384);
1530                 memset(i_buf, 0, PR_REG_ISID_ID_LEN);
1531                 tfo = pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo;
1532                 core_pr_dump_initiator_port(pr_reg, i_buf,
1533                                         PR_REG_ISID_ID_LEN);
1534                 sprintf(buf, "%s Node: %s%s Key: 0x%016Lx PRgen: 0x%08x\n",
1535                         tfo->fabric_name,
1536                         pr_reg->pr_reg_nacl->initiatorname, i_buf, pr_reg->pr_res_key,
1537                         pr_reg->pr_res_generation);
1538
1539                 if (len + strlen(buf) >= PAGE_SIZE)
1540                         break;
1541
1542                 len += sprintf(page+len, "%s", buf);
1543                 reg_count++;
1544         }
1545         spin_unlock(&dev->t10_pr.registration_lock);
1546
1547         if (!reg_count)
1548                 len += sprintf(page+len, "None\n");
1549
1550         return len;
1551 }
1552
1553 static ssize_t target_pr_res_pr_type_show(struct config_item *item, char *page)
1554 {
1555         struct se_device *dev = pr_to_dev(item);
1556         struct t10_pr_registration *pr_reg;
1557         ssize_t len = 0;
1558
1559         spin_lock(&dev->dev_reservation_lock);
1560         pr_reg = dev->dev_pr_res_holder;
1561         if (pr_reg) {
1562                 len = sprintf(page, "SPC-3 Reservation Type: %s\n",
1563                         core_scsi3_pr_dump_type(pr_reg->pr_res_type));
1564         } else {
1565                 len = sprintf(page, "No SPC-3 Reservation holder\n");
1566         }
1567
1568         spin_unlock(&dev->dev_reservation_lock);
1569         return len;
1570 }
1571
1572 static ssize_t target_pr_res_type_show(struct config_item *item, char *page)
1573 {
1574         struct se_device *dev = pr_to_dev(item);
1575
1576         if (!dev->dev_attrib.emulate_pr)
1577                 return sprintf(page, "SPC_RESERVATIONS_DISABLED\n");
1578         if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR)
1579                 return sprintf(page, "SPC_PASSTHROUGH\n");
1580         if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
1581                 return sprintf(page, "SPC2_RESERVATIONS\n");
1582
1583         return sprintf(page, "SPC3_PERSISTENT_RESERVATIONS\n");
1584 }
1585
1586 static ssize_t target_pr_res_aptpl_active_show(struct config_item *item,
1587                 char *page)
1588 {
1589         struct se_device *dev = pr_to_dev(item);
1590
1591         if (!dev->dev_attrib.emulate_pr ||
1592             (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR))
1593                 return 0;
1594
1595         return sprintf(page, "APTPL Bit Status: %s\n",
1596                 (dev->t10_pr.pr_aptpl_active) ? "Activated" : "Disabled");
1597 }
1598
1599 static ssize_t target_pr_res_aptpl_metadata_show(struct config_item *item,
1600                 char *page)
1601 {
1602         struct se_device *dev = pr_to_dev(item);
1603
1604         if (!dev->dev_attrib.emulate_pr ||
1605             (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR))
1606                 return 0;
1607
1608         return sprintf(page, "Ready to process PR APTPL metadata..\n");
1609 }
1610
1611 enum {
1612         Opt_initiator_fabric, Opt_initiator_node, Opt_initiator_sid,
1613         Opt_sa_res_key, Opt_res_holder, Opt_res_type, Opt_res_scope,
1614         Opt_res_all_tg_pt, Opt_mapped_lun, Opt_target_fabric,
1615         Opt_target_node, Opt_tpgt, Opt_port_rtpi, Opt_target_lun, Opt_err
1616 };
1617
1618 static match_table_t tokens = {
1619         {Opt_initiator_fabric, "initiator_fabric=%s"},
1620         {Opt_initiator_node, "initiator_node=%s"},
1621         {Opt_initiator_sid, "initiator_sid=%s"},
1622         {Opt_sa_res_key, "sa_res_key=%s"},
1623         {Opt_res_holder, "res_holder=%d"},
1624         {Opt_res_type, "res_type=%d"},
1625         {Opt_res_scope, "res_scope=%d"},
1626         {Opt_res_all_tg_pt, "res_all_tg_pt=%d"},
1627         {Opt_mapped_lun, "mapped_lun=%u"},
1628         {Opt_target_fabric, "target_fabric=%s"},
1629         {Opt_target_node, "target_node=%s"},
1630         {Opt_tpgt, "tpgt=%d"},
1631         {Opt_port_rtpi, "port_rtpi=%d"},
1632         {Opt_target_lun, "target_lun=%u"},
1633         {Opt_err, NULL}
1634 };
1635
1636 static ssize_t target_pr_res_aptpl_metadata_store(struct config_item *item,
1637                 const char *page, size_t count)
1638 {
1639         struct se_device *dev = pr_to_dev(item);
1640         unsigned char *i_fabric = NULL, *i_port = NULL, *isid = NULL;
1641         unsigned char *t_fabric = NULL, *t_port = NULL;
1642         char *orig, *ptr, *opts;
1643         substring_t args[MAX_OPT_ARGS];
1644         unsigned long long tmp_ll;
1645         u64 sa_res_key = 0;
1646         u64 mapped_lun = 0, target_lun = 0;
1647         int ret = -1, res_holder = 0, all_tg_pt = 0, arg, token;
1648         u16 tpgt = 0;
1649         u8 type = 0;
1650
1651         if (!dev->dev_attrib.emulate_pr ||
1652             (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR))
1653                 return count;
1654         if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
1655                 return count;
1656
1657         if (dev->export_count) {
1658                 pr_debug("Unable to process APTPL metadata while"
1659                         " active fabric exports exist\n");
1660                 return -EINVAL;
1661         }
1662
1663         opts = kstrdup(page, GFP_KERNEL);
1664         if (!opts)
1665                 return -ENOMEM;
1666
1667         orig = opts;
1668         while ((ptr = strsep(&opts, ",\n")) != NULL) {
1669                 if (!*ptr)
1670                         continue;
1671
1672                 token = match_token(ptr, tokens, args);
1673                 switch (token) {
1674                 case Opt_initiator_fabric:
1675                         i_fabric = match_strdup(args);
1676                         if (!i_fabric) {
1677                                 ret = -ENOMEM;
1678                                 goto out;
1679                         }
1680                         break;
1681                 case Opt_initiator_node:
1682                         i_port = match_strdup(args);
1683                         if (!i_port) {
1684                                 ret = -ENOMEM;
1685                                 goto out;
1686                         }
1687                         if (strlen(i_port) >= PR_APTPL_MAX_IPORT_LEN) {
1688                                 pr_err("APTPL metadata initiator_node="
1689                                         " exceeds PR_APTPL_MAX_IPORT_LEN: %d\n",
1690                                         PR_APTPL_MAX_IPORT_LEN);
1691                                 ret = -EINVAL;
1692                                 break;
1693                         }
1694                         break;
1695                 case Opt_initiator_sid:
1696                         isid = match_strdup(args);
1697                         if (!isid) {
1698                                 ret = -ENOMEM;
1699                                 goto out;
1700                         }
1701                         if (strlen(isid) >= PR_REG_ISID_LEN) {
1702                                 pr_err("APTPL metadata initiator_isid"
1703                                         "= exceeds PR_REG_ISID_LEN: %d\n",
1704                                         PR_REG_ISID_LEN);
1705                                 ret = -EINVAL;
1706                                 break;
1707                         }
1708                         break;
1709                 case Opt_sa_res_key:
1710                         ret = match_u64(args,  &tmp_ll);
1711                         if (ret < 0) {
1712                                 pr_err("kstrtoull() failed for sa_res_key=\n");
1713                                 goto out;
1714                         }
1715                         sa_res_key = (u64)tmp_ll;
1716                         break;
1717                 /*
1718                  * PR APTPL Metadata for Reservation
1719                  */
1720                 case Opt_res_holder:
1721                         ret = match_int(args, &arg);
1722                         if (ret)
1723                                 goto out;
1724                         res_holder = arg;
1725                         break;
1726                 case Opt_res_type:
1727                         ret = match_int(args, &arg);
1728                         if (ret)
1729                                 goto out;
1730                         type = (u8)arg;
1731                         break;
1732                 case Opt_res_scope:
1733                         ret = match_int(args, &arg);
1734                         if (ret)
1735                                 goto out;
1736                         break;
1737                 case Opt_res_all_tg_pt:
1738                         ret = match_int(args, &arg);
1739                         if (ret)
1740                                 goto out;
1741                         all_tg_pt = (int)arg;
1742                         break;
1743                 case Opt_mapped_lun:
1744                         ret = match_u64(args, &tmp_ll);
1745                         if (ret)
1746                                 goto out;
1747                         mapped_lun = (u64)tmp_ll;
1748                         break;
1749                 /*
1750                  * PR APTPL Metadata for Target Port
1751                  */
1752                 case Opt_target_fabric:
1753                         t_fabric = match_strdup(args);
1754                         if (!t_fabric) {
1755                                 ret = -ENOMEM;
1756                                 goto out;
1757                         }
1758                         break;
1759                 case Opt_target_node:
1760                         t_port = match_strdup(args);
1761                         if (!t_port) {
1762                                 ret = -ENOMEM;
1763                                 goto out;
1764                         }
1765                         if (strlen(t_port) >= PR_APTPL_MAX_TPORT_LEN) {
1766                                 pr_err("APTPL metadata target_node="
1767                                         " exceeds PR_APTPL_MAX_TPORT_LEN: %d\n",
1768                                         PR_APTPL_MAX_TPORT_LEN);
1769                                 ret = -EINVAL;
1770                                 break;
1771                         }
1772                         break;
1773                 case Opt_tpgt:
1774                         ret = match_int(args, &arg);
1775                         if (ret)
1776                                 goto out;
1777                         tpgt = (u16)arg;
1778                         break;
1779                 case Opt_port_rtpi:
1780                         ret = match_int(args, &arg);
1781                         if (ret)
1782                                 goto out;
1783                         break;
1784                 case Opt_target_lun:
1785                         ret = match_u64(args, &tmp_ll);
1786                         if (ret)
1787                                 goto out;
1788                         target_lun = (u64)tmp_ll;
1789                         break;
1790                 default:
1791                         break;
1792                 }
1793         }
1794
1795         if (!i_port || !t_port || !sa_res_key) {
1796                 pr_err("Illegal parameters for APTPL registration\n");
1797                 ret = -EINVAL;
1798                 goto out;
1799         }
1800
1801         if (res_holder && !(type)) {
1802                 pr_err("Illegal PR type: 0x%02x for reservation"
1803                                 " holder\n", type);
1804                 ret = -EINVAL;
1805                 goto out;
1806         }
1807
1808         ret = core_scsi3_alloc_aptpl_registration(&dev->t10_pr, sa_res_key,
1809                         i_port, isid, mapped_lun, t_port, tpgt, target_lun,
1810                         res_holder, all_tg_pt, type);
1811 out:
1812         kfree(i_fabric);
1813         kfree(i_port);
1814         kfree(isid);
1815         kfree(t_fabric);
1816         kfree(t_port);
1817         kfree(orig);
1818         return (ret == 0) ? count : ret;
1819 }
1820
1821
1822 CONFIGFS_ATTR_RO(target_pr_, res_holder);
1823 CONFIGFS_ATTR_RO(target_pr_, res_pr_all_tgt_pts);
1824 CONFIGFS_ATTR_RO(target_pr_, res_pr_generation);
1825 CONFIGFS_ATTR_RO(target_pr_, res_pr_holder_tg_port);
1826 CONFIGFS_ATTR_RO(target_pr_, res_pr_registered_i_pts);
1827 CONFIGFS_ATTR_RO(target_pr_, res_pr_type);
1828 CONFIGFS_ATTR_RO(target_pr_, res_type);
1829 CONFIGFS_ATTR_RO(target_pr_, res_aptpl_active);
1830 CONFIGFS_ATTR(target_pr_, res_aptpl_metadata);
1831
1832 static struct configfs_attribute *target_core_dev_pr_attrs[] = {
1833         &target_pr_attr_res_holder,
1834         &target_pr_attr_res_pr_all_tgt_pts,
1835         &target_pr_attr_res_pr_generation,
1836         &target_pr_attr_res_pr_holder_tg_port,
1837         &target_pr_attr_res_pr_registered_i_pts,
1838         &target_pr_attr_res_pr_type,
1839         &target_pr_attr_res_type,
1840         &target_pr_attr_res_aptpl_active,
1841         &target_pr_attr_res_aptpl_metadata,
1842         NULL,
1843 };
1844
1845 TB_CIT_SETUP(dev_pr, NULL, NULL, target_core_dev_pr_attrs);
1846
1847 /*  End functions for struct config_item_type tb_dev_pr_cit */
1848
1849 /*  Start functions for struct config_item_type tb_dev_cit */
1850
1851 static inline struct se_device *to_device(struct config_item *item)
1852 {
1853         return container_of(to_config_group(item), struct se_device, dev_group);
1854 }
1855
1856 static ssize_t target_dev_info_show(struct config_item *item, char *page)
1857 {
1858         struct se_device *dev = to_device(item);
1859         int bl = 0;
1860         ssize_t read_bytes = 0;
1861
1862         transport_dump_dev_state(dev, page, &bl);
1863         read_bytes += bl;
1864         read_bytes += dev->transport->show_configfs_dev_params(dev,
1865                         page+read_bytes);
1866         return read_bytes;
1867 }
1868
1869 static ssize_t target_dev_control_store(struct config_item *item,
1870                 const char *page, size_t count)
1871 {
1872         struct se_device *dev = to_device(item);
1873
1874         return dev->transport->set_configfs_dev_params(dev, page, count);
1875 }
1876
1877 static ssize_t target_dev_alias_show(struct config_item *item, char *page)
1878 {
1879         struct se_device *dev = to_device(item);
1880
1881         if (!(dev->dev_flags & DF_USING_ALIAS))
1882                 return 0;
1883
1884         return snprintf(page, PAGE_SIZE, "%s\n", dev->dev_alias);
1885 }
1886
1887 static ssize_t target_dev_alias_store(struct config_item *item,
1888                 const char *page, size_t count)
1889 {
1890         struct se_device *dev = to_device(item);
1891         struct se_hba *hba = dev->se_hba;
1892         ssize_t read_bytes;
1893
1894         if (count > (SE_DEV_ALIAS_LEN-1)) {
1895                 pr_err("alias count: %d exceeds"
1896                         " SE_DEV_ALIAS_LEN-1: %u\n", (int)count,
1897                         SE_DEV_ALIAS_LEN-1);
1898                 return -EINVAL;
1899         }
1900
1901         read_bytes = snprintf(&dev->dev_alias[0], SE_DEV_ALIAS_LEN, "%s", page);
1902         if (!read_bytes)
1903                 return -EINVAL;
1904         if (dev->dev_alias[read_bytes - 1] == '\n')
1905                 dev->dev_alias[read_bytes - 1] = '\0';
1906
1907         dev->dev_flags |= DF_USING_ALIAS;
1908
1909         pr_debug("Target_Core_ConfigFS: %s/%s set alias: %s\n",
1910                 config_item_name(&hba->hba_group.cg_item),
1911                 config_item_name(&dev->dev_group.cg_item),
1912                 dev->dev_alias);
1913
1914         return read_bytes;
1915 }
1916
1917 static ssize_t target_dev_udev_path_show(struct config_item *item, char *page)
1918 {
1919         struct se_device *dev = to_device(item);
1920
1921         if (!(dev->dev_flags & DF_USING_UDEV_PATH))
1922                 return 0;
1923
1924         return snprintf(page, PAGE_SIZE, "%s\n", dev->udev_path);
1925 }
1926
1927 static ssize_t target_dev_udev_path_store(struct config_item *item,
1928                 const char *page, size_t count)
1929 {
1930         struct se_device *dev = to_device(item);
1931         struct se_hba *hba = dev->se_hba;
1932         ssize_t read_bytes;
1933
1934         if (count > (SE_UDEV_PATH_LEN-1)) {
1935                 pr_err("udev_path count: %d exceeds"
1936                         " SE_UDEV_PATH_LEN-1: %u\n", (int)count,
1937                         SE_UDEV_PATH_LEN-1);
1938                 return -EINVAL;
1939         }
1940
1941         read_bytes = snprintf(&dev->udev_path[0], SE_UDEV_PATH_LEN,
1942                         "%s", page);
1943         if (!read_bytes)
1944                 return -EINVAL;
1945         if (dev->udev_path[read_bytes - 1] == '\n')
1946                 dev->udev_path[read_bytes - 1] = '\0';
1947
1948         dev->dev_flags |= DF_USING_UDEV_PATH;
1949
1950         pr_debug("Target_Core_ConfigFS: %s/%s set udev_path: %s\n",
1951                 config_item_name(&hba->hba_group.cg_item),
1952                 config_item_name(&dev->dev_group.cg_item),
1953                 dev->udev_path);
1954
1955         return read_bytes;
1956 }
1957
1958 static ssize_t target_dev_enable_show(struct config_item *item, char *page)
1959 {
1960         struct se_device *dev = to_device(item);
1961
1962         return snprintf(page, PAGE_SIZE, "%d\n", target_dev_configured(dev));
1963 }
1964
1965 static ssize_t target_dev_enable_store(struct config_item *item,
1966                 const char *page, size_t count)
1967 {
1968         struct se_device *dev = to_device(item);
1969         char *ptr;
1970         int ret;
1971
1972         ptr = strstr(page, "1");
1973         if (!ptr) {
1974                 pr_err("For dev_enable ops, only valid value"
1975                                 " is \"1\"\n");
1976                 return -EINVAL;
1977         }
1978
1979         ret = target_configure_device(dev);
1980         if (ret)
1981                 return ret;
1982         return count;
1983 }
1984
1985 static ssize_t target_dev_alua_lu_gp_show(struct config_item *item, char *page)
1986 {
1987         struct se_device *dev = to_device(item);
1988         struct config_item *lu_ci;
1989         struct t10_alua_lu_gp *lu_gp;
1990         struct t10_alua_lu_gp_member *lu_gp_mem;
1991         ssize_t len = 0;
1992
1993         lu_gp_mem = dev->dev_alua_lu_gp_mem;
1994         if (!lu_gp_mem)
1995                 return 0;
1996
1997         spin_lock(&lu_gp_mem->lu_gp_mem_lock);
1998         lu_gp = lu_gp_mem->lu_gp;
1999         if (lu_gp) {
2000                 lu_ci = &lu_gp->lu_gp_group.cg_item;
2001                 len += sprintf(page, "LU Group Alias: %s\nLU Group ID: %hu\n",
2002                         config_item_name(lu_ci), lu_gp->lu_gp_id);
2003         }
2004         spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
2005
2006         return len;
2007 }
2008
2009 static ssize_t target_dev_alua_lu_gp_store(struct config_item *item,
2010                 const char *page, size_t count)
2011 {
2012         struct se_device *dev = to_device(item);
2013         struct se_hba *hba = dev->se_hba;
2014         struct t10_alua_lu_gp *lu_gp = NULL, *lu_gp_new = NULL;
2015         struct t10_alua_lu_gp_member *lu_gp_mem;
2016         unsigned char buf[LU_GROUP_NAME_BUF];
2017         int move = 0;
2018
2019         lu_gp_mem = dev->dev_alua_lu_gp_mem;
2020         if (!lu_gp_mem)
2021                 return count;
2022
2023         if (count > LU_GROUP_NAME_BUF) {
2024                 pr_err("ALUA LU Group Alias too large!\n");
2025                 return -EINVAL;
2026         }
2027         memset(buf, 0, LU_GROUP_NAME_BUF);
2028         memcpy(buf, page, count);
2029         /*
2030          * Any ALUA logical unit alias besides "NULL" means we will be
2031          * making a new group association.
2032          */
2033         if (strcmp(strstrip(buf), "NULL")) {
2034                 /*
2035                  * core_alua_get_lu_gp_by_name() will increment reference to
2036                  * struct t10_alua_lu_gp.  This reference is released with
2037                  * core_alua_get_lu_gp_by_name below().
2038                  */
2039                 lu_gp_new = core_alua_get_lu_gp_by_name(strstrip(buf));
2040                 if (!lu_gp_new)
2041                         return -ENODEV;
2042         }
2043
2044         spin_lock(&lu_gp_mem->lu_gp_mem_lock);
2045         lu_gp = lu_gp_mem->lu_gp;
2046         if (lu_gp) {
2047                 /*
2048                  * Clearing an existing lu_gp association, and replacing
2049                  * with NULL
2050                  */
2051                 if (!lu_gp_new) {
2052                         pr_debug("Target_Core_ConfigFS: Releasing %s/%s"
2053                                 " from ALUA LU Group: core/alua/lu_gps/%s, ID:"
2054                                 " %hu\n",
2055                                 config_item_name(&hba->hba_group.cg_item),
2056                                 config_item_name(&dev->dev_group.cg_item),
2057                                 config_item_name(&lu_gp->lu_gp_group.cg_item),
2058                                 lu_gp->lu_gp_id);
2059
2060                         __core_alua_drop_lu_gp_mem(lu_gp_mem, lu_gp);
2061                         spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
2062
2063                         return count;
2064                 }
2065                 /*
2066                  * Removing existing association of lu_gp_mem with lu_gp
2067                  */
2068                 __core_alua_drop_lu_gp_mem(lu_gp_mem, lu_gp);
2069                 move = 1;
2070         }
2071         /*
2072          * Associate lu_gp_mem with lu_gp_new.
2073          */
2074         __core_alua_attach_lu_gp_mem(lu_gp_mem, lu_gp_new);
2075         spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
2076
2077         pr_debug("Target_Core_ConfigFS: %s %s/%s to ALUA LU Group:"
2078                 " core/alua/lu_gps/%s, ID: %hu\n",
2079                 (move) ? "Moving" : "Adding",
2080                 config_item_name(&hba->hba_group.cg_item),
2081                 config_item_name(&dev->dev_group.cg_item),
2082                 config_item_name(&lu_gp_new->lu_gp_group.cg_item),
2083                 lu_gp_new->lu_gp_id);
2084
2085         core_alua_put_lu_gp_from_name(lu_gp_new);
2086         return count;
2087 }
2088
2089 static ssize_t target_dev_lba_map_show(struct config_item *item, char *page)
2090 {
2091         struct se_device *dev = to_device(item);
2092         struct t10_alua_lba_map *map;
2093         struct t10_alua_lba_map_member *mem;
2094         char *b = page;
2095         int bl = 0;
2096         char state;
2097
2098         spin_lock(&dev->t10_alua.lba_map_lock);
2099         if (!list_empty(&dev->t10_alua.lba_map_list))
2100             bl += sprintf(b + bl, "%u %u\n",
2101                           dev->t10_alua.lba_map_segment_size,
2102                           dev->t10_alua.lba_map_segment_multiplier);
2103         list_for_each_entry(map, &dev->t10_alua.lba_map_list, lba_map_list) {
2104                 bl += sprintf(b + bl, "%llu %llu",
2105                               map->lba_map_first_lba, map->lba_map_last_lba);
2106                 list_for_each_entry(mem, &map->lba_map_mem_list,
2107                                     lba_map_mem_list) {
2108                         switch (mem->lba_map_mem_alua_state) {
2109                         case ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED:
2110                                 state = 'O';
2111                                 break;
2112                         case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
2113                                 state = 'A';
2114                                 break;
2115                         case ALUA_ACCESS_STATE_STANDBY:
2116                                 state = 'S';
2117                                 break;
2118                         case ALUA_ACCESS_STATE_UNAVAILABLE:
2119                                 state = 'U';
2120                                 break;
2121                         default:
2122                                 state = '.';
2123                                 break;
2124                         }
2125                         bl += sprintf(b + bl, " %d:%c",
2126                                       mem->lba_map_mem_alua_pg_id, state);
2127                 }
2128                 bl += sprintf(b + bl, "\n");
2129         }
2130         spin_unlock(&dev->t10_alua.lba_map_lock);
2131         return bl;
2132 }
2133
2134 static ssize_t target_dev_lba_map_store(struct config_item *item,
2135                 const char *page, size_t count)
2136 {
2137         struct se_device *dev = to_device(item);
2138         struct t10_alua_lba_map *lba_map = NULL;
2139         struct list_head lba_list;
2140         char *map_entries, *orig, *ptr;
2141         char state;
2142         int pg_num = -1, pg;
2143         int ret = 0, num = 0, pg_id, alua_state;
2144         unsigned long start_lba = -1, end_lba = -1;
2145         unsigned long segment_size = -1, segment_mult = -1;
2146
2147         orig = map_entries = kstrdup(page, GFP_KERNEL);
2148         if (!map_entries)
2149                 return -ENOMEM;
2150
2151         INIT_LIST_HEAD(&lba_list);
2152         while ((ptr = strsep(&map_entries, "\n")) != NULL) {
2153                 if (!*ptr)
2154                         continue;
2155
2156                 if (num == 0) {
2157                         if (sscanf(ptr, "%lu %lu\n",
2158                                    &segment_size, &segment_mult) != 2) {
2159                                 pr_err("Invalid line %d\n", num);
2160                                 ret = -EINVAL;
2161                                 break;
2162                         }
2163                         num++;
2164                         continue;
2165                 }
2166                 if (sscanf(ptr, "%lu %lu", &start_lba, &end_lba) != 2) {
2167                         pr_err("Invalid line %d\n", num);
2168                         ret = -EINVAL;
2169                         break;
2170                 }
2171                 ptr = strchr(ptr, ' ');
2172                 if (!ptr) {
2173                         pr_err("Invalid line %d, missing end lba\n", num);
2174                         ret = -EINVAL;
2175                         break;
2176                 }
2177                 ptr++;
2178                 ptr = strchr(ptr, ' ');
2179                 if (!ptr) {
2180                         pr_err("Invalid line %d, missing state definitions\n",
2181                                num);
2182                         ret = -EINVAL;
2183                         break;
2184                 }
2185                 ptr++;
2186                 lba_map = core_alua_allocate_lba_map(&lba_list,
2187                                                      start_lba, end_lba);
2188                 if (IS_ERR(lba_map)) {
2189                         ret = PTR_ERR(lba_map);
2190                         break;
2191                 }
2192                 pg = 0;
2193                 while (sscanf(ptr, "%d:%c", &pg_id, &state) == 2) {
2194                         switch (state) {
2195                         case 'O':
2196                                 alua_state = ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED;
2197                                 break;
2198                         case 'A':
2199                                 alua_state = ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED;
2200                                 break;
2201                         case 'S':
2202                                 alua_state = ALUA_ACCESS_STATE_STANDBY;
2203                                 break;
2204                         case 'U':
2205                                 alua_state = ALUA_ACCESS_STATE_UNAVAILABLE;
2206                                 break;
2207                         default:
2208                                 pr_err("Invalid ALUA state '%c'\n", state);
2209                                 ret = -EINVAL;
2210                                 goto out;
2211                         }
2212
2213                         ret = core_alua_allocate_lba_map_mem(lba_map,
2214                                                              pg_id, alua_state);
2215                         if (ret) {
2216                                 pr_err("Invalid target descriptor %d:%c "
2217                                        "at line %d\n",
2218                                        pg_id, state, num);
2219                                 break;
2220                         }
2221                         pg++;
2222                         ptr = strchr(ptr, ' ');
2223                         if (ptr)
2224                                 ptr++;
2225                         else
2226                                 break;
2227                 }
2228                 if (pg_num == -1)
2229                     pg_num = pg;
2230                 else if (pg != pg_num) {
2231                         pr_err("Only %d from %d port groups definitions "
2232                                "at line %d\n", pg, pg_num, num);
2233                         ret = -EINVAL;
2234                         break;
2235                 }
2236                 num++;
2237         }
2238 out:
2239         if (ret) {
2240                 core_alua_free_lba_map(&lba_list);
2241                 count = ret;
2242         } else
2243                 core_alua_set_lba_map(dev, &lba_list,
2244                                       segment_size, segment_mult);
2245         kfree(orig);
2246         return count;
2247 }
2248
2249 CONFIGFS_ATTR_RO(target_dev_, info);
2250 CONFIGFS_ATTR_WO(target_dev_, control);
2251 CONFIGFS_ATTR(target_dev_, alias);
2252 CONFIGFS_ATTR(target_dev_, udev_path);
2253 CONFIGFS_ATTR(target_dev_, enable);
2254 CONFIGFS_ATTR(target_dev_, alua_lu_gp);
2255 CONFIGFS_ATTR(target_dev_, lba_map);
2256
2257 static struct configfs_attribute *target_core_dev_attrs[] = {
2258         &target_dev_attr_info,
2259         &target_dev_attr_control,
2260         &target_dev_attr_alias,
2261         &target_dev_attr_udev_path,
2262         &target_dev_attr_enable,
2263         &target_dev_attr_alua_lu_gp,
2264         &target_dev_attr_lba_map,
2265         NULL,
2266 };
2267
2268 static void target_core_dev_release(struct config_item *item)
2269 {
2270         struct config_group *dev_cg = to_config_group(item);
2271         struct se_device *dev =
2272                 container_of(dev_cg, struct se_device, dev_group);
2273
2274         target_free_device(dev);
2275 }
2276
2277 /*
2278  * Used in target_core_fabric_configfs.c to verify valid se_device symlink
2279  * within target_fabric_port_link()
2280  */
2281 struct configfs_item_operations target_core_dev_item_ops = {
2282         .release                = target_core_dev_release,
2283 };
2284
2285 TB_CIT_SETUP(dev, &target_core_dev_item_ops, NULL, target_core_dev_attrs);
2286
2287 /* End functions for struct config_item_type tb_dev_cit */
2288
2289 /* Start functions for struct config_item_type target_core_alua_lu_gp_cit */
2290
2291 static inline struct t10_alua_lu_gp *to_lu_gp(struct config_item *item)
2292 {
2293         return container_of(to_config_group(item), struct t10_alua_lu_gp,
2294                         lu_gp_group);
2295 }
2296
2297 static ssize_t target_lu_gp_lu_gp_id_show(struct config_item *item, char *page)
2298 {
2299         struct t10_alua_lu_gp *lu_gp = to_lu_gp(item);
2300
2301         if (!lu_gp->lu_gp_valid_id)
2302                 return 0;
2303         return sprintf(page, "%hu\n", lu_gp->lu_gp_id);
2304 }
2305
2306 static ssize_t target_lu_gp_lu_gp_id_store(struct config_item *item,
2307                 const char *page, size_t count)
2308 {
2309         struct t10_alua_lu_gp *lu_gp = to_lu_gp(item);
2310         struct config_group *alua_lu_gp_cg = &lu_gp->lu_gp_group;
2311         unsigned long lu_gp_id;
2312         int ret;
2313
2314         ret = kstrtoul(page, 0, &lu_gp_id);
2315         if (ret < 0) {
2316                 pr_err("kstrtoul() returned %d for"
2317                         " lu_gp_id\n", ret);
2318                 return ret;
2319         }
2320         if (lu_gp_id > 0x0000ffff) {
2321                 pr_err("ALUA lu_gp_id: %lu exceeds maximum:"
2322                         " 0x0000ffff\n", lu_gp_id);
2323                 return -EINVAL;
2324         }
2325
2326         ret = core_alua_set_lu_gp_id(lu_gp, (u16)lu_gp_id);
2327         if (ret < 0)
2328                 return -EINVAL;
2329
2330         pr_debug("Target_Core_ConfigFS: Set ALUA Logical Unit"
2331                 " Group: core/alua/lu_gps/%s to ID: %hu\n",
2332                 config_item_name(&alua_lu_gp_cg->cg_item),
2333                 lu_gp->lu_gp_id);
2334
2335         return count;
2336 }
2337
2338 static ssize_t target_lu_gp_members_show(struct config_item *item, char *page)
2339 {
2340         struct t10_alua_lu_gp *lu_gp = to_lu_gp(item);
2341         struct se_device *dev;
2342         struct se_hba *hba;
2343         struct t10_alua_lu_gp_member *lu_gp_mem;
2344         ssize_t len = 0, cur_len;
2345         unsigned char buf[LU_GROUP_NAME_BUF];
2346
2347         memset(buf, 0, LU_GROUP_NAME_BUF);
2348
2349         spin_lock(&lu_gp->lu_gp_lock);
2350         list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
2351                 dev = lu_gp_mem->lu_gp_mem_dev;
2352                 hba = dev->se_hba;
2353
2354                 cur_len = snprintf(buf, LU_GROUP_NAME_BUF, "%s/%s\n",
2355                         config_item_name(&hba->hba_group.cg_item),
2356                         config_item_name(&dev->dev_group.cg_item));
2357                 cur_len++; /* Extra byte for NULL terminator */
2358
2359                 if ((cur_len + len) > PAGE_SIZE) {
2360                         pr_warn("Ran out of lu_gp_show_attr"
2361                                 "_members buffer\n");
2362                         break;
2363                 }
2364                 memcpy(page+len, buf, cur_len);
2365                 len += cur_len;
2366         }
2367         spin_unlock(&lu_gp->lu_gp_lock);
2368
2369         return len;
2370 }
2371
2372 CONFIGFS_ATTR(target_lu_gp_, lu_gp_id);
2373 CONFIGFS_ATTR_RO(target_lu_gp_, members);
2374
2375 static struct configfs_attribute *target_core_alua_lu_gp_attrs[] = {
2376         &target_lu_gp_attr_lu_gp_id,
2377         &target_lu_gp_attr_members,
2378         NULL,
2379 };
2380
2381 static void target_core_alua_lu_gp_release(struct config_item *item)
2382 {
2383         struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item),
2384                         struct t10_alua_lu_gp, lu_gp_group);
2385
2386         core_alua_free_lu_gp(lu_gp);
2387 }
2388
2389 static struct configfs_item_operations target_core_alua_lu_gp_ops = {
2390         .release                = target_core_alua_lu_gp_release,
2391 };
2392
2393 static const struct config_item_type target_core_alua_lu_gp_cit = {
2394         .ct_item_ops            = &target_core_alua_lu_gp_ops,
2395         .ct_attrs               = target_core_alua_lu_gp_attrs,
2396         .ct_owner               = THIS_MODULE,
2397 };
2398
2399 /* End functions for struct config_item_type target_core_alua_lu_gp_cit */
2400
2401 /* Start functions for struct config_item_type target_core_alua_lu_gps_cit */
2402
2403 static struct config_group *target_core_alua_create_lu_gp(
2404         struct config_group *group,
2405         const char *name)
2406 {
2407         struct t10_alua_lu_gp *lu_gp;
2408         struct config_group *alua_lu_gp_cg = NULL;
2409         struct config_item *alua_lu_gp_ci = NULL;
2410
2411         lu_gp = core_alua_allocate_lu_gp(name, 0);
2412         if (IS_ERR(lu_gp))
2413                 return NULL;
2414
2415         alua_lu_gp_cg = &lu_gp->lu_gp_group;
2416         alua_lu_gp_ci = &alua_lu_gp_cg->cg_item;
2417
2418         config_group_init_type_name(alua_lu_gp_cg, name,
2419                         &target_core_alua_lu_gp_cit);
2420
2421         pr_debug("Target_Core_ConfigFS: Allocated ALUA Logical Unit"
2422                 " Group: core/alua/lu_gps/%s\n",
2423                 config_item_name(alua_lu_gp_ci));
2424
2425         return alua_lu_gp_cg;
2426
2427 }
2428
2429 static void target_core_alua_drop_lu_gp(
2430         struct config_group *group,
2431         struct config_item *item)
2432 {
2433         struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item),
2434                         struct t10_alua_lu_gp, lu_gp_group);
2435
2436         pr_debug("Target_Core_ConfigFS: Releasing ALUA Logical Unit"
2437                 " Group: core/alua/lu_gps/%s, ID: %hu\n",
2438                 config_item_name(item), lu_gp->lu_gp_id);
2439         /*
2440          * core_alua_free_lu_gp() is called from target_core_alua_lu_gp_ops->release()
2441          * -> target_core_alua_lu_gp_release()
2442          */
2443         config_item_put(item);
2444 }
2445
2446 static struct configfs_group_operations target_core_alua_lu_gps_group_ops = {
2447         .make_group             = &target_core_alua_create_lu_gp,
2448         .drop_item              = &target_core_alua_drop_lu_gp,
2449 };
2450
2451 static const struct config_item_type target_core_alua_lu_gps_cit = {
2452         .ct_item_ops            = NULL,
2453         .ct_group_ops           = &target_core_alua_lu_gps_group_ops,
2454         .ct_owner               = THIS_MODULE,
2455 };
2456
2457 /* End functions for struct config_item_type target_core_alua_lu_gps_cit */
2458
2459 /* Start functions for struct config_item_type target_core_alua_tg_pt_gp_cit */
2460
2461 static inline struct t10_alua_tg_pt_gp *to_tg_pt_gp(struct config_item *item)
2462 {
2463         return container_of(to_config_group(item), struct t10_alua_tg_pt_gp,
2464                         tg_pt_gp_group);
2465 }
2466
2467 static ssize_t target_tg_pt_gp_alua_access_state_show(struct config_item *item,
2468                 char *page)
2469 {
2470         return sprintf(page, "%d\n",
2471                        to_tg_pt_gp(item)->tg_pt_gp_alua_access_state);
2472 }
2473
2474 static ssize_t target_tg_pt_gp_alua_access_state_store(struct config_item *item,
2475                 const char *page, size_t count)
2476 {
2477         struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
2478         struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
2479         unsigned long tmp;
2480         int new_state, ret;
2481
2482         if (!tg_pt_gp->tg_pt_gp_valid_id) {
2483                 pr_err("Unable to do implicit ALUA on non valid"
2484                         " tg_pt_gp ID: %hu\n", tg_pt_gp->tg_pt_gp_valid_id);
2485                 return -EINVAL;
2486         }
2487         if (!target_dev_configured(dev)) {
2488                 pr_err("Unable to set alua_access_state while device is"
2489                        " not configured\n");
2490                 return -ENODEV;
2491         }
2492
2493         ret = kstrtoul(page, 0, &tmp);
2494         if (ret < 0) {
2495                 pr_err("Unable to extract new ALUA access state from"
2496                                 " %s\n", page);
2497                 return ret;
2498         }
2499         new_state = (int)tmp;
2500
2501         if (!(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICIT_ALUA)) {
2502                 pr_err("Unable to process implicit configfs ALUA"
2503                         " transition while TPGS_IMPLICIT_ALUA is disabled\n");
2504                 return -EINVAL;
2505         }
2506         if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA &&
2507             new_state == ALUA_ACCESS_STATE_LBA_DEPENDENT) {
2508                 /* LBA DEPENDENT is only allowed with implicit ALUA */
2509                 pr_err("Unable to process implicit configfs ALUA transition"
2510                        " while explicit ALUA management is enabled\n");
2511                 return -EINVAL;
2512         }
2513
2514         ret = core_alua_do_port_transition(tg_pt_gp, dev,
2515                                         NULL, NULL, new_state, 0);
2516         return (!ret) ? count : -EINVAL;
2517 }
2518
2519 static ssize_t target_tg_pt_gp_alua_access_status_show(struct config_item *item,
2520                 char *page)
2521 {
2522         struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
2523         return sprintf(page, "%s\n",
2524                 core_alua_dump_status(tg_pt_gp->tg_pt_gp_alua_access_status));
2525 }
2526
2527 static ssize_t target_tg_pt_gp_alua_access_status_store(
2528                 struct config_item *item, const char *page, size_t count)
2529 {
2530         struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
2531         unsigned long tmp;
2532         int new_status, ret;
2533
2534         if (!tg_pt_gp->tg_pt_gp_valid_id) {
2535                 pr_err("Unable to do set ALUA access status on non"
2536                         " valid tg_pt_gp ID: %hu\n",
2537                         tg_pt_gp->tg_pt_gp_valid_id);
2538                 return -EINVAL;
2539         }
2540
2541         ret = kstrtoul(page, 0, &tmp);
2542         if (ret < 0) {
2543                 pr_err("Unable to extract new ALUA access status"
2544                                 " from %s\n", page);
2545                 return ret;
2546         }
2547         new_status = (int)tmp;
2548
2549         if ((new_status != ALUA_STATUS_NONE) &&
2550             (new_status != ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG) &&
2551             (new_status != ALUA_STATUS_ALTERED_BY_IMPLICIT_ALUA)) {
2552                 pr_err("Illegal ALUA access status: 0x%02x\n",
2553                                 new_status);
2554                 return -EINVAL;
2555         }
2556
2557         tg_pt_gp->tg_pt_gp_alua_access_status = new_status;
2558         return count;
2559 }
2560
2561 static ssize_t target_tg_pt_gp_alua_access_type_show(struct config_item *item,
2562                 char *page)
2563 {
2564         return core_alua_show_access_type(to_tg_pt_gp(item), page);
2565 }
2566
2567 static ssize_t target_tg_pt_gp_alua_access_type_store(struct config_item *item,
2568                 const char *page, size_t count)
2569 {
2570         return core_alua_store_access_type(to_tg_pt_gp(item), page, count);
2571 }
2572
2573 #define ALUA_SUPPORTED_STATE_ATTR(_name, _bit)                          \
2574 static ssize_t target_tg_pt_gp_alua_support_##_name##_show(             \
2575                 struct config_item *item, char *p)                      \
2576 {                                                                       \
2577         struct t10_alua_tg_pt_gp *t = to_tg_pt_gp(item);                \
2578         return sprintf(p, "%d\n",                                       \
2579                 !!(t->tg_pt_gp_alua_supported_states & _bit));          \
2580 }                                                                       \
2581                                                                         \
2582 static ssize_t target_tg_pt_gp_alua_support_##_name##_store(            \
2583                 struct config_item *item, const char *p, size_t c)      \
2584 {                                                                       \
2585         struct t10_alua_tg_pt_gp *t = to_tg_pt_gp(item);                \
2586         unsigned long tmp;                                              \
2587         int ret;                                                        \
2588                                                                         \
2589         if (!t->tg_pt_gp_valid_id) {                                    \
2590                 pr_err("Unable to do set " #_name " ALUA state on non"  \
2591                        " valid tg_pt_gp ID: %hu\n",                     \
2592                        t->tg_pt_gp_valid_id);                           \
2593                 return -EINVAL;                                         \
2594         }                                                               \
2595                                                                         \
2596         ret = kstrtoul(p, 0, &tmp);                                     \
2597         if (ret < 0) {                                                  \
2598                 pr_err("Invalid value '%s', must be '0' or '1'\n", p);  \
2599                 return -EINVAL;                                         \
2600         }                                                               \
2601         if (tmp > 1) {                                                  \
2602                 pr_err("Invalid value '%ld', must be '0' or '1'\n", tmp); \
2603                 return -EINVAL;                                         \
2604         }                                                               \
2605         if (tmp)                                                        \
2606                 t->tg_pt_gp_alua_supported_states |= _bit;              \
2607         else                                                            \
2608                 t->tg_pt_gp_alua_supported_states &= ~_bit;             \
2609                                                                         \
2610         return c;                                                       \
2611 }
2612
2613 ALUA_SUPPORTED_STATE_ATTR(transitioning, ALUA_T_SUP);
2614 ALUA_SUPPORTED_STATE_ATTR(offline, ALUA_O_SUP);
2615 ALUA_SUPPORTED_STATE_ATTR(lba_dependent, ALUA_LBD_SUP);
2616 ALUA_SUPPORTED_STATE_ATTR(unavailable, ALUA_U_SUP);
2617 ALUA_SUPPORTED_STATE_ATTR(standby, ALUA_S_SUP);
2618 ALUA_SUPPORTED_STATE_ATTR(active_optimized, ALUA_AO_SUP);
2619 ALUA_SUPPORTED_STATE_ATTR(active_nonoptimized, ALUA_AN_SUP);
2620
2621 static ssize_t target_tg_pt_gp_alua_write_metadata_show(
2622                 struct config_item *item, char *page)
2623 {
2624         return sprintf(page, "%d\n",
2625                 to_tg_pt_gp(item)->tg_pt_gp_write_metadata);
2626 }
2627
2628 static ssize_t target_tg_pt_gp_alua_write_metadata_store(
2629                 struct config_item *item, const char *page, size_t count)
2630 {
2631         struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
2632         unsigned long tmp;
2633         int ret;
2634
2635         ret = kstrtoul(page, 0, &tmp);
2636         if (ret < 0) {
2637                 pr_err("Unable to extract alua_write_metadata\n");
2638                 return ret;
2639         }
2640
2641         if ((tmp != 0) && (tmp != 1)) {
2642                 pr_err("Illegal value for alua_write_metadata:"
2643                         " %lu\n", tmp);
2644                 return -EINVAL;
2645         }
2646         tg_pt_gp->tg_pt_gp_write_metadata = (int)tmp;
2647
2648         return count;
2649 }
2650
2651 static ssize_t target_tg_pt_gp_nonop_delay_msecs_show(struct config_item *item,
2652                 char *page)
2653 {
2654         return core_alua_show_nonop_delay_msecs(to_tg_pt_gp(item), page);
2655 }
2656
2657 static ssize_t target_tg_pt_gp_nonop_delay_msecs_store(struct config_item *item,
2658                 const char *page, size_t count)
2659 {
2660         return core_alua_store_nonop_delay_msecs(to_tg_pt_gp(item), page,
2661                         count);
2662 }
2663
2664 static ssize_t target_tg_pt_gp_trans_delay_msecs_show(struct config_item *item,
2665                 char *page)
2666 {
2667         return core_alua_show_trans_delay_msecs(to_tg_pt_gp(item), page);
2668 }
2669
2670 static ssize_t target_tg_pt_gp_trans_delay_msecs_store(struct config_item *item,
2671                 const char *page, size_t count)
2672 {
2673         return core_alua_store_trans_delay_msecs(to_tg_pt_gp(item), page,
2674                         count);
2675 }
2676
2677 static ssize_t target_tg_pt_gp_implicit_trans_secs_show(
2678                 struct config_item *item, char *page)
2679 {
2680         return core_alua_show_implicit_trans_secs(to_tg_pt_gp(item), page);
2681 }
2682
2683 static ssize_t target_tg_pt_gp_implicit_trans_secs_store(
2684                 struct config_item *item, const char *page, size_t count)
2685 {
2686         return core_alua_store_implicit_trans_secs(to_tg_pt_gp(item), page,
2687                         count);
2688 }
2689
2690 static ssize_t target_tg_pt_gp_preferred_show(struct config_item *item,
2691                 char *page)
2692 {
2693         return core_alua_show_preferred_bit(to_tg_pt_gp(item), page);
2694 }
2695
2696 static ssize_t target_tg_pt_gp_preferred_store(struct config_item *item,
2697                 const char *page, size_t count)
2698 {
2699         return core_alua_store_preferred_bit(to_tg_pt_gp(item), page, count);
2700 }
2701
2702 static ssize_t target_tg_pt_gp_tg_pt_gp_id_show(struct config_item *item,
2703                 char *page)
2704 {
2705         struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
2706
2707         if (!tg_pt_gp->tg_pt_gp_valid_id)
2708                 return 0;
2709         return sprintf(page, "%hu\n", tg_pt_gp->tg_pt_gp_id);
2710 }
2711
2712 static ssize_t target_tg_pt_gp_tg_pt_gp_id_store(struct config_item *item,
2713                 const char *page, size_t count)
2714 {
2715         struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
2716         struct config_group *alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group;
2717         unsigned long tg_pt_gp_id;
2718         int ret;
2719
2720         ret = kstrtoul(page, 0, &tg_pt_gp_id);
2721         if (ret < 0) {
2722                 pr_err("ALUA tg_pt_gp_id: invalid value '%s' for tg_pt_gp_id\n",
2723                        page);
2724                 return ret;
2725         }
2726         if (tg_pt_gp_id > 0x0000ffff) {
2727                 pr_err("ALUA tg_pt_gp_id: %lu exceeds maximum: 0x0000ffff\n",
2728                        tg_pt_gp_id);
2729                 return -EINVAL;
2730         }
2731
2732         ret = core_alua_set_tg_pt_gp_id(tg_pt_gp, (u16)tg_pt_gp_id);
2733         if (ret < 0)
2734                 return -EINVAL;
2735
2736         pr_debug("Target_Core_ConfigFS: Set ALUA Target Port Group: "
2737                 "core/alua/tg_pt_gps/%s to ID: %hu\n",
2738                 config_item_name(&alua_tg_pt_gp_cg->cg_item),
2739                 tg_pt_gp->tg_pt_gp_id);
2740
2741         return count;
2742 }
2743
2744 static ssize_t target_tg_pt_gp_members_show(struct config_item *item,
2745                 char *page)
2746 {
2747         struct t10_alua_tg_pt_gp *tg_pt_gp = to_tg_pt_gp(item);
2748         struct se_lun *lun;
2749         ssize_t len = 0, cur_len;
2750         unsigned char buf[TG_PT_GROUP_NAME_BUF];
2751
2752         memset(buf, 0, TG_PT_GROUP_NAME_BUF);
2753
2754         spin_lock(&tg_pt_gp->tg_pt_gp_lock);
2755         list_for_each_entry(lun, &tg_pt_gp->tg_pt_gp_lun_list,
2756                         lun_tg_pt_gp_link) {
2757                 struct se_portal_group *tpg = lun->lun_tpg;
2758
2759                 cur_len = snprintf(buf, TG_PT_GROUP_NAME_BUF, "%s/%s/tpgt_%hu"
2760                         "/%s\n", tpg->se_tpg_tfo->fabric_name,
2761                         tpg->se_tpg_tfo->tpg_get_wwn(tpg),
2762                         tpg->se_tpg_tfo->tpg_get_tag(tpg),
2763                         config_item_name(&lun->lun_group.cg_item));
2764                 cur_len++; /* Extra byte for NULL terminator */
2765
2766                 if ((cur_len + len) > PAGE_SIZE) {
2767                         pr_warn("Ran out of lu_gp_show_attr"
2768                                 "_members buffer\n");
2769                         break;
2770                 }
2771                 memcpy(page+len, buf, cur_len);
2772                 len += cur_len;
2773         }
2774         spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
2775
2776         return len;
2777 }
2778
2779 CONFIGFS_ATTR(target_tg_pt_gp_, alua_access_state);
2780 CONFIGFS_ATTR(target_tg_pt_gp_, alua_access_status);
2781 CONFIGFS_ATTR(target_tg_pt_gp_, alua_access_type);
2782 CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_transitioning);
2783 CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_offline);
2784 CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_lba_dependent);
2785 CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_unavailable);
2786 CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_standby);
2787 CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_active_optimized);
2788 CONFIGFS_ATTR(target_tg_pt_gp_, alua_support_active_nonoptimized);
2789 CONFIGFS_ATTR(target_tg_pt_gp_, alua_write_metadata);
2790 CONFIGFS_ATTR(target_tg_pt_gp_, nonop_delay_msecs);
2791 CONFIGFS_ATTR(target_tg_pt_gp_, trans_delay_msecs);
2792 CONFIGFS_ATTR(target_tg_pt_gp_, implicit_trans_secs);
2793 CONFIGFS_ATTR(target_tg_pt_gp_, preferred);
2794 CONFIGFS_ATTR(target_tg_pt_gp_, tg_pt_gp_id);
2795 CONFIGFS_ATTR_RO(target_tg_pt_gp_, members);
2796
2797 static struct configfs_attribute *target_core_alua_tg_pt_gp_attrs[] = {
2798         &target_tg_pt_gp_attr_alua_access_state,
2799         &target_tg_pt_gp_attr_alua_access_status,
2800         &target_tg_pt_gp_attr_alua_access_type,
2801         &target_tg_pt_gp_attr_alua_support_transitioning,
2802         &target_tg_pt_gp_attr_alua_support_offline,
2803         &target_tg_pt_gp_attr_alua_support_lba_dependent,
2804         &target_tg_pt_gp_attr_alua_support_unavailable,
2805         &target_tg_pt_gp_attr_alua_support_standby,
2806         &target_tg_pt_gp_attr_alua_support_active_nonoptimized,
2807         &target_tg_pt_gp_attr_alua_support_active_optimized,
2808         &target_tg_pt_gp_attr_alua_write_metadata,
2809         &target_tg_pt_gp_attr_nonop_delay_msecs,
2810         &target_tg_pt_gp_attr_trans_delay_msecs,
2811         &target_tg_pt_gp_attr_implicit_trans_secs,
2812         &target_tg_pt_gp_attr_preferred,
2813         &target_tg_pt_gp_attr_tg_pt_gp_id,
2814         &target_tg_pt_gp_attr_members,
2815         NULL,
2816 };
2817
2818 static void target_core_alua_tg_pt_gp_release(struct config_item *item)
2819 {
2820         struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item),
2821                         struct t10_alua_tg_pt_gp, tg_pt_gp_group);
2822
2823         core_alua_free_tg_pt_gp(tg_pt_gp);
2824 }
2825
2826 static struct configfs_item_operations target_core_alua_tg_pt_gp_ops = {
2827         .release                = target_core_alua_tg_pt_gp_release,
2828 };
2829
2830 static const struct config_item_type target_core_alua_tg_pt_gp_cit = {
2831         .ct_item_ops            = &target_core_alua_tg_pt_gp_ops,
2832         .ct_attrs               = target_core_alua_tg_pt_gp_attrs,
2833         .ct_owner               = THIS_MODULE,
2834 };
2835
2836 /* End functions for struct config_item_type target_core_alua_tg_pt_gp_cit */
2837
2838 /* Start functions for struct config_item_type tb_alua_tg_pt_gps_cit */
2839
2840 static struct config_group *target_core_alua_create_tg_pt_gp(
2841         struct config_group *group,
2842         const char *name)
2843 {
2844         struct t10_alua *alua = container_of(group, struct t10_alua,
2845                                         alua_tg_pt_gps_group);
2846         struct t10_alua_tg_pt_gp *tg_pt_gp;
2847         struct config_group *alua_tg_pt_gp_cg = NULL;
2848         struct config_item *alua_tg_pt_gp_ci = NULL;
2849
2850         tg_pt_gp = core_alua_allocate_tg_pt_gp(alua->t10_dev, name, 0);
2851         if (!tg_pt_gp)
2852                 return NULL;
2853
2854         alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group;
2855         alua_tg_pt_gp_ci = &alua_tg_pt_gp_cg->cg_item;
2856
2857         config_group_init_type_name(alua_tg_pt_gp_cg, name,
2858                         &target_core_alua_tg_pt_gp_cit);
2859
2860         pr_debug("Target_Core_ConfigFS: Allocated ALUA Target Port"
2861                 " Group: alua/tg_pt_gps/%s\n",
2862                 config_item_name(alua_tg_pt_gp_ci));
2863
2864         return alua_tg_pt_gp_cg;
2865 }
2866
2867 static void target_core_alua_drop_tg_pt_gp(
2868         struct config_group *group,
2869         struct config_item *item)
2870 {
2871         struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item),
2872                         struct t10_alua_tg_pt_gp, tg_pt_gp_group);
2873
2874         pr_debug("Target_Core_ConfigFS: Releasing ALUA Target Port"
2875                 " Group: alua/tg_pt_gps/%s, ID: %hu\n",
2876                 config_item_name(item), tg_pt_gp->tg_pt_gp_id);
2877         /*
2878          * core_alua_free_tg_pt_gp() is called from target_core_alua_tg_pt_gp_ops->release()
2879          * -> target_core_alua_tg_pt_gp_release().
2880          */
2881         config_item_put(item);
2882 }
2883
2884 static struct configfs_group_operations target_core_alua_tg_pt_gps_group_ops = {
2885         .make_group             = &target_core_alua_create_tg_pt_gp,
2886         .drop_item              = &target_core_alua_drop_tg_pt_gp,
2887 };
2888
2889 TB_CIT_SETUP(dev_alua_tg_pt_gps, NULL, &target_core_alua_tg_pt_gps_group_ops, NULL);
2890
2891 /* End functions for struct config_item_type tb_alua_tg_pt_gps_cit */
2892
2893 /* Start functions for struct config_item_type target_core_alua_cit */
2894
2895 /*
2896  * target_core_alua_cit is a ConfigFS group that lives under
2897  * /sys/kernel/config/target/core/alua.  There are default groups
2898  * core/alua/lu_gps and core/alua/tg_pt_gps that are attached to
2899  * target_core_alua_cit in target_core_init_configfs() below.
2900  */
2901 static const struct config_item_type target_core_alua_cit = {
2902         .ct_item_ops            = NULL,
2903         .ct_attrs               = NULL,
2904         .ct_owner               = THIS_MODULE,
2905 };
2906
2907 /* End functions for struct config_item_type target_core_alua_cit */
2908
2909 /* Start functions for struct config_item_type tb_dev_stat_cit */
2910
2911 static struct config_group *target_core_stat_mkdir(
2912         struct config_group *group,
2913         const char *name)
2914 {
2915         return ERR_PTR(-ENOSYS);
2916 }
2917
2918 static void target_core_stat_rmdir(
2919         struct config_group *group,
2920         struct config_item *item)
2921 {
2922         return;
2923 }
2924
2925 static struct configfs_group_operations target_core_stat_group_ops = {
2926         .make_group             = &target_core_stat_mkdir,
2927         .drop_item              = &target_core_stat_rmdir,
2928 };
2929
2930 TB_CIT_SETUP(dev_stat, NULL, &target_core_stat_group_ops, NULL);
2931
2932 /* End functions for struct config_item_type tb_dev_stat_cit */
2933
2934 /* Start functions for struct config_item_type target_core_hba_cit */
2935
2936 static struct config_group *target_core_make_subdev(
2937         struct config_group *group,
2938         const char *name)
2939 {
2940         struct t10_alua_tg_pt_gp *tg_pt_gp;
2941         struct config_item *hba_ci = &group->cg_item;
2942         struct se_hba *hba = item_to_hba(hba_ci);
2943         struct target_backend *tb = hba->backend;
2944         struct se_device *dev;
2945         int errno = -ENOMEM, ret;
2946
2947         ret = mutex_lock_interruptible(&hba->hba_access_mutex);
2948         if (ret)
2949                 return ERR_PTR(ret);
2950
2951         dev = target_alloc_device(hba, name);
2952         if (!dev)
2953                 goto out_unlock;
2954
2955         config_group_init_type_name(&dev->dev_group, name, &tb->tb_dev_cit);
2956
2957         config_group_init_type_name(&dev->dev_action_group, "action",
2958                         &tb->tb_dev_action_cit);
2959         configfs_add_default_group(&dev->dev_action_group, &dev->dev_group);
2960
2961         config_group_init_type_name(&dev->dev_attrib.da_group, "attrib",
2962                         &tb->tb_dev_attrib_cit);
2963         configfs_add_default_group(&dev->dev_attrib.da_group, &dev->dev_group);
2964
2965         config_group_init_type_name(&dev->dev_pr_group, "pr",
2966                         &tb->tb_dev_pr_cit);
2967         configfs_add_default_group(&dev->dev_pr_group, &dev->dev_group);
2968
2969         config_group_init_type_name(&dev->t10_wwn.t10_wwn_group, "wwn",
2970                         &tb->tb_dev_wwn_cit);
2971         configfs_add_default_group(&dev->t10_wwn.t10_wwn_group,
2972                         &dev->dev_group);
2973
2974         config_group_init_type_name(&dev->t10_alua.alua_tg_pt_gps_group,
2975                         "alua", &tb->tb_dev_alua_tg_pt_gps_cit);
2976         configfs_add_default_group(&dev->t10_alua.alua_tg_pt_gps_group,
2977                         &dev->dev_group);
2978
2979         config_group_init_type_name(&dev->dev_stat_grps.stat_group,
2980                         "statistics", &tb->tb_dev_stat_cit);
2981         configfs_add_default_group(&dev->dev_stat_grps.stat_group,
2982                         &dev->dev_group);
2983
2984         /*
2985          * Add core/$HBA/$DEV/alua/default_tg_pt_gp
2986          */
2987         tg_pt_gp = core_alua_allocate_tg_pt_gp(dev, "default_tg_pt_gp", 1);
2988         if (!tg_pt_gp)
2989                 goto out_free_device;
2990         dev->t10_alua.default_tg_pt_gp = tg_pt_gp;
2991
2992         config_group_init_type_name(&tg_pt_gp->tg_pt_gp_group,
2993                         "default_tg_pt_gp", &target_core_alua_tg_pt_gp_cit);
2994         configfs_add_default_group(&tg_pt_gp->tg_pt_gp_group,
2995                         &dev->t10_alua.alua_tg_pt_gps_group);
2996
2997         /*
2998          * Add core/$HBA/$DEV/statistics/ default groups
2999          */
3000         target_stat_setup_dev_default_groups(dev);
3001
3002         mutex_unlock(&hba->hba_access_mutex);
3003         return &dev->dev_group;
3004
3005 out_free_device:
3006         target_free_device(dev);
3007 out_unlock:
3008         mutex_unlock(&hba->hba_access_mutex);
3009         return ERR_PTR(errno);
3010 }
3011
3012 static void target_core_drop_subdev(
3013         struct config_group *group,
3014         struct config_item *item)
3015 {
3016         struct config_group *dev_cg = to_config_group(item);
3017         struct se_device *dev =
3018                 container_of(dev_cg, struct se_device, dev_group);
3019         struct se_hba *hba;
3020
3021         hba = item_to_hba(&dev->se_hba->hba_group.cg_item);
3022
3023         mutex_lock(&hba->hba_access_mutex);
3024
3025         configfs_remove_default_groups(&dev->dev_stat_grps.stat_group);
3026         configfs_remove_default_groups(&dev->t10_alua.alua_tg_pt_gps_group);
3027
3028         /*
3029          * core_alua_free_tg_pt_gp() is called from ->default_tg_pt_gp
3030          * directly from target_core_alua_tg_pt_gp_release().
3031          */
3032         dev->t10_alua.default_tg_pt_gp = NULL;
3033
3034         configfs_remove_default_groups(dev_cg);
3035
3036         /*
3037          * se_dev is released from target_core_dev_item_ops->release()
3038          */
3039         config_item_put(item);
3040         mutex_unlock(&hba->hba_access_mutex);
3041 }
3042
3043 static struct configfs_group_operations target_core_hba_group_ops = {
3044         .make_group             = target_core_make_subdev,
3045         .drop_item              = target_core_drop_subdev,
3046 };
3047
3048
3049 static inline struct se_hba *to_hba(struct config_item *item)
3050 {
3051         return container_of(to_config_group(item), struct se_hba, hba_group);
3052 }
3053
3054 static ssize_t target_hba_info_show(struct config_item *item, char *page)
3055 {
3056         struct se_hba *hba = to_hba(item);
3057
3058         return sprintf(page, "HBA Index: %d plugin: %s version: %s\n",
3059                         hba->hba_id, hba->backend->ops->name,
3060                         TARGET_CORE_VERSION);
3061 }
3062
3063 static ssize_t target_hba_mode_show(struct config_item *item, char *page)
3064 {
3065         struct se_hba *hba = to_hba(item);
3066         int hba_mode = 0;
3067
3068         if (hba->hba_flags & HBA_FLAGS_PSCSI_MODE)
3069                 hba_mode = 1;
3070
3071         return sprintf(page, "%d\n", hba_mode);
3072 }
3073
3074 static ssize_t target_hba_mode_store(struct config_item *item,
3075                 const char *page, size_t count)
3076 {
3077         struct se_hba *hba = to_hba(item);
3078         unsigned long mode_flag;
3079         int ret;
3080
3081         if (hba->backend->ops->pmode_enable_hba == NULL)
3082                 return -EINVAL;
3083
3084         ret = kstrtoul(page, 0, &mode_flag);
3085         if (ret < 0) {
3086                 pr_err("Unable to extract hba mode flag: %d\n", ret);
3087                 return ret;
3088         }
3089
3090         if (hba->dev_count) {
3091                 pr_err("Unable to set hba_mode with active devices\n");
3092                 return -EINVAL;
3093         }
3094
3095         ret = hba->backend->ops->pmode_enable_hba(hba, mode_flag);
3096         if (ret < 0)
3097                 return -EINVAL;
3098         if (ret > 0)
3099                 hba->hba_flags |= HBA_FLAGS_PSCSI_MODE;
3100         else if (ret == 0)
3101                 hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE;
3102
3103         return count;
3104 }
3105
3106 CONFIGFS_ATTR_RO(target_, hba_info);
3107 CONFIGFS_ATTR(target_, hba_mode);
3108
3109 static void target_core_hba_release(struct config_item *item)
3110 {
3111         struct se_hba *hba = container_of(to_config_group(item),
3112                                 struct se_hba, hba_group);
3113         core_delete_hba(hba);
3114 }
3115
3116 static struct configfs_attribute *target_core_hba_attrs[] = {
3117         &target_attr_hba_info,
3118         &target_attr_hba_mode,
3119         NULL,
3120 };
3121
3122 static struct configfs_item_operations target_core_hba_item_ops = {
3123         .release                = target_core_hba_release,
3124 };
3125
3126 static const struct config_item_type target_core_hba_cit = {
3127         .ct_item_ops            = &target_core_hba_item_ops,
3128         .ct_group_ops           = &target_core_hba_group_ops,
3129         .ct_attrs               = target_core_hba_attrs,
3130         .ct_owner               = THIS_MODULE,
3131 };
3132
3133 static struct config_group *target_core_call_addhbatotarget(
3134         struct config_group *group,
3135         const char *name)
3136 {
3137         char *se_plugin_str, *str, *str2;
3138         struct se_hba *hba;
3139         char buf[TARGET_CORE_NAME_MAX_LEN];
3140         unsigned long plugin_dep_id = 0;
3141         int ret;
3142
3143         memset(buf, 0, TARGET_CORE_NAME_MAX_LEN);
3144         if (strlen(name) >= TARGET_CORE_NAME_MAX_LEN) {
3145                 pr_err("Passed *name strlen(): %d exceeds"
3146                         " TARGET_CORE_NAME_MAX_LEN: %d\n", (int)strlen(name),
3147                         TARGET_CORE_NAME_MAX_LEN);
3148                 return ERR_PTR(-ENAMETOOLONG);
3149         }
3150         snprintf(buf, TARGET_CORE_NAME_MAX_LEN, "%s", name);
3151
3152         str = strstr(buf, "_");
3153         if (!str) {
3154                 pr_err("Unable to locate \"_\" for $SUBSYSTEM_PLUGIN_$HOST_ID\n");
3155                 return ERR_PTR(-EINVAL);
3156         }
3157         se_plugin_str = buf;
3158         /*
3159          * Special case for subsystem plugins that have "_" in their names.
3160          * Namely rd_direct and rd_mcp..
3161          */
3162         str2 = strstr(str+1, "_");
3163         if (str2) {
3164                 *str2 = '\0'; /* Terminate for *se_plugin_str */
3165                 str2++; /* Skip to start of plugin dependent ID */
3166                 str = str2;
3167         } else {
3168                 *str = '\0'; /* Terminate for *se_plugin_str */
3169                 str++; /* Skip to start of plugin dependent ID */
3170         }
3171
3172         ret = kstrtoul(str, 0, &plugin_dep_id);
3173         if (ret < 0) {
3174                 pr_err("kstrtoul() returned %d for"
3175                                 " plugin_dep_id\n", ret);
3176                 return ERR_PTR(ret);
3177         }
3178         /*
3179          * Load up TCM subsystem plugins if they have not already been loaded.
3180          */
3181         transport_subsystem_check_init();
3182
3183         hba = core_alloc_hba(se_plugin_str, plugin_dep_id, 0);
3184         if (IS_ERR(hba))
3185                 return ERR_CAST(hba);
3186
3187         config_group_init_type_name(&hba->hba_group, name,
3188                         &target_core_hba_cit);
3189
3190         return &hba->hba_group;
3191 }
3192
3193 static void target_core_call_delhbafromtarget(
3194         struct config_group *group,
3195         struct config_item *item)
3196 {
3197         /*
3198          * core_delete_hba() is called from target_core_hba_item_ops->release()
3199          * -> target_core_hba_release()
3200          */
3201         config_item_put(item);
3202 }
3203
3204 static struct configfs_group_operations target_core_group_ops = {
3205         .make_group     = target_core_call_addhbatotarget,
3206         .drop_item      = target_core_call_delhbafromtarget,
3207 };
3208
3209 static const struct config_item_type target_core_cit = {
3210         .ct_item_ops    = NULL,
3211         .ct_group_ops   = &target_core_group_ops,
3212         .ct_attrs       = NULL,
3213         .ct_owner       = THIS_MODULE,
3214 };
3215
3216 /* Stop functions for struct config_item_type target_core_hba_cit */
3217
3218 void target_setup_backend_cits(struct target_backend *tb)
3219 {
3220         target_core_setup_dev_cit(tb);
3221         target_core_setup_dev_action_cit(tb);
3222         target_core_setup_dev_attrib_cit(tb);
3223         target_core_setup_dev_pr_cit(tb);
3224         target_core_setup_dev_wwn_cit(tb);
3225         target_core_setup_dev_alua_tg_pt_gps_cit(tb);
3226         target_core_setup_dev_stat_cit(tb);
3227 }
3228
3229 static void target_init_dbroot(void)
3230 {
3231         struct file *fp;
3232
3233         snprintf(db_root_stage, DB_ROOT_LEN, DB_ROOT_PREFERRED);
3234         fp = filp_open(db_root_stage, O_RDONLY, 0);
3235         if (IS_ERR(fp)) {
3236                 pr_err("db_root: cannot open: %s\n", db_root_stage);
3237                 return;
3238         }
3239         if (!S_ISDIR(file_inode(fp)->i_mode)) {
3240                 filp_close(fp, NULL);
3241                 pr_err("db_root: not a valid directory: %s\n", db_root_stage);
3242                 return;
3243         }
3244         filp_close(fp, NULL);
3245
3246         strncpy(db_root, db_root_stage, DB_ROOT_LEN);
3247         pr_debug("Target_Core_ConfigFS: db_root set to %s\n", db_root);
3248 }
3249
3250 static int __init target_core_init_configfs(void)
3251 {
3252         struct configfs_subsystem *subsys = &target_core_fabrics;
3253         struct t10_alua_lu_gp *lu_gp;
3254         int ret;
3255
3256         pr_debug("TARGET_CORE[0]: Loading Generic Kernel Storage"
3257                 " Engine: %s on %s/%s on "UTS_RELEASE"\n",
3258                 TARGET_CORE_VERSION, utsname()->sysname, utsname()->machine);
3259
3260         config_group_init(&subsys->su_group);
3261         mutex_init(&subsys->su_mutex);
3262
3263         ret = init_se_kmem_caches();
3264         if (ret < 0)
3265                 return ret;
3266         /*
3267          * Create $CONFIGFS/target/core default group for HBA <-> Storage Object
3268          * and ALUA Logical Unit Group and Target Port Group infrastructure.
3269          */
3270         config_group_init_type_name(&target_core_hbagroup, "core",
3271                         &target_core_cit);
3272         configfs_add_default_group(&target_core_hbagroup, &subsys->su_group);
3273
3274         /*
3275          * Create ALUA infrastructure under /sys/kernel/config/target/core/alua/
3276          */
3277         config_group_init_type_name(&alua_group, "alua", &target_core_alua_cit);
3278         configfs_add_default_group(&alua_group, &target_core_hbagroup);
3279
3280         /*
3281          * Add ALUA Logical Unit Group and Target Port Group ConfigFS
3282          * groups under /sys/kernel/config/target/core/alua/
3283          */
3284         config_group_init_type_name(&alua_lu_gps_group, "lu_gps",
3285                         &target_core_alua_lu_gps_cit);
3286         configfs_add_default_group(&alua_lu_gps_group, &alua_group);
3287
3288         /*
3289          * Add core/alua/lu_gps/default_lu_gp
3290          */
3291         lu_gp = core_alua_allocate_lu_gp("default_lu_gp", 1);
3292         if (IS_ERR(lu_gp)) {
3293                 ret = -ENOMEM;
3294                 goto out_global;
3295         }
3296
3297         config_group_init_type_name(&lu_gp->lu_gp_group, "default_lu_gp",
3298                                 &target_core_alua_lu_gp_cit);
3299         configfs_add_default_group(&lu_gp->lu_gp_group, &alua_lu_gps_group);
3300
3301         default_lu_gp = lu_gp;
3302
3303         /*
3304          * Register the target_core_mod subsystem with configfs.
3305          */
3306         ret = configfs_register_subsystem(subsys);
3307         if (ret < 0) {
3308                 pr_err("Error %d while registering subsystem %s\n",
3309                         ret, subsys->su_group.cg_item.ci_namebuf);
3310                 goto out_global;
3311         }
3312         pr_debug("TARGET_CORE[0]: Initialized ConfigFS Fabric"
3313                 " Infrastructure: "TARGET_CORE_VERSION" on %s/%s"
3314                 " on "UTS_RELEASE"\n", utsname()->sysname, utsname()->machine);
3315         /*
3316          * Register built-in RAMDISK subsystem logic for virtual LUN 0
3317          */
3318         ret = rd_module_init();
3319         if (ret < 0)
3320                 goto out;
3321
3322         ret = core_dev_setup_virtual_lun0();
3323         if (ret < 0)
3324                 goto out;
3325
3326         ret = target_xcopy_setup_pt();
3327         if (ret < 0)
3328                 goto out;
3329
3330         target_init_dbroot();
3331
3332         return 0;
3333
3334 out:
3335         configfs_unregister_subsystem(subsys);
3336         core_dev_release_virtual_lun0();
3337         rd_module_exit();
3338 out_global:
3339         if (default_lu_gp) {
3340                 core_alua_free_lu_gp(default_lu_gp);
3341                 default_lu_gp = NULL;
3342         }
3343         release_se_kmem_caches();
3344         return ret;
3345 }
3346
3347 static void __exit target_core_exit_configfs(void)
3348 {
3349         configfs_remove_default_groups(&alua_lu_gps_group);
3350         configfs_remove_default_groups(&alua_group);
3351         configfs_remove_default_groups(&target_core_hbagroup);
3352
3353         /*
3354          * We expect subsys->su_group.default_groups to be released
3355          * by configfs subsystem provider logic..
3356          */
3357         configfs_unregister_subsystem(&target_core_fabrics);
3358
3359         core_alua_free_lu_gp(default_lu_gp);
3360         default_lu_gp = NULL;
3361
3362         pr_debug("TARGET_CORE[0]: Released ConfigFS Fabric"
3363                         " Infrastructure\n");
3364
3365         core_dev_release_virtual_lun0();
3366         rd_module_exit();
3367         target_xcopy_release_pt();
3368         release_se_kmem_caches();
3369 }
3370
3371 MODULE_DESCRIPTION("Target_Core_Mod/ConfigFS");
3372 MODULE_AUTHOR("nab@Linux-iSCSI.org");
3373 MODULE_LICENSE("GPL");
3374
3375 module_init(target_core_init_configfs);
3376 module_exit(target_core_exit_configfs);