1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2014 Red Hat, Inc.
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_sysfs.h"
13 #include "xfs_log_priv.h"
14 #include "xfs_mount.h"
16 struct xfs_sysfs_attr {
17 struct attribute attr;
18 ssize_t (*show)(struct kobject *kobject, char *buf);
19 ssize_t (*store)(struct kobject *kobject, const char *buf,
23 static inline struct xfs_sysfs_attr *
24 to_attr(struct attribute *attr)
26 return container_of(attr, struct xfs_sysfs_attr, attr);
29 #define XFS_SYSFS_ATTR_RW(name) \
30 static struct xfs_sysfs_attr xfs_sysfs_attr_##name = __ATTR_RW(name)
31 #define XFS_SYSFS_ATTR_RO(name) \
32 static struct xfs_sysfs_attr xfs_sysfs_attr_##name = __ATTR_RO(name)
33 #define XFS_SYSFS_ATTR_WO(name) \
34 static struct xfs_sysfs_attr xfs_sysfs_attr_##name = __ATTR_WO(name)
36 #define ATTR_LIST(name) &xfs_sysfs_attr_##name.attr
39 xfs_sysfs_object_show(
40 struct kobject *kobject,
41 struct attribute *attr,
44 struct xfs_sysfs_attr *xfs_attr = to_attr(attr);
46 return xfs_attr->show ? xfs_attr->show(kobject, buf) : 0;
50 xfs_sysfs_object_store(
51 struct kobject *kobject,
52 struct attribute *attr,
56 struct xfs_sysfs_attr *xfs_attr = to_attr(attr);
58 return xfs_attr->store ? xfs_attr->store(kobject, buf, count) : 0;
61 static const struct sysfs_ops xfs_sysfs_ops = {
62 .show = xfs_sysfs_object_show,
63 .store = xfs_sysfs_object_store,
67 * xfs_mount kobject. The mp kobject also serves as the per-mount parent object
68 * that is identified by the fsname under sysfs.
71 static inline struct xfs_mount *
72 to_mp(struct kobject *kobject)
74 struct xfs_kobj *kobj = to_kobj(kobject);
76 return container_of(kobj, struct xfs_mount, m_kobj);
79 static struct attribute *xfs_mp_attrs[] = {
83 struct kobj_type xfs_mp_ktype = {
84 .release = xfs_sysfs_release,
85 .sysfs_ops = &xfs_sysfs_ops,
86 .default_attrs = xfs_mp_attrs,
94 struct kobject *kobject,
101 ret = kstrtoint(buf, 0, &val);
106 xfs_globals.bug_on_assert = true;
108 xfs_globals.bug_on_assert = false;
117 struct kobject *kobject,
120 return snprintf(buf, PAGE_SIZE, "%d\n", xfs_globals.bug_on_assert ? 1 : 0);
122 XFS_SYSFS_ATTR_RW(bug_on_assert);
125 log_recovery_delay_store(
126 struct kobject *kobject,
133 ret = kstrtoint(buf, 0, &val);
137 if (val < 0 || val > 60)
140 xfs_globals.log_recovery_delay = val;
146 log_recovery_delay_show(
147 struct kobject *kobject,
150 return snprintf(buf, PAGE_SIZE, "%d\n", xfs_globals.log_recovery_delay);
152 XFS_SYSFS_ATTR_RW(log_recovery_delay);
156 struct kobject *kobject,
163 ret = kstrtoint(buf, 0, &val);
167 if (val < 0 || val > 60)
170 xfs_globals.mount_delay = val;
177 struct kobject *kobject,
180 return snprintf(buf, PAGE_SIZE, "%d\n", xfs_globals.mount_delay);
182 XFS_SYSFS_ATTR_RW(mount_delay);
186 struct kobject *kobject,
192 ret = kstrtobool(buf, &xfs_globals.always_cow);
200 struct kobject *kobject,
203 return snprintf(buf, PAGE_SIZE, "%d\n", xfs_globals.always_cow);
205 XFS_SYSFS_ATTR_RW(always_cow);
209 * Override how many threads the parallel work queue is allowed to create.
210 * This has to be a debug-only global (instead of an errortag) because one of
211 * the main users of parallel workqueues is mount time quotacheck.
215 struct kobject *kobject,
222 ret = kstrtoint(buf, 0, &val);
226 if (val < -1 || val > num_possible_cpus())
229 xfs_globals.pwork_threads = val;
236 struct kobject *kobject,
239 return snprintf(buf, PAGE_SIZE, "%d\n", xfs_globals.pwork_threads);
241 XFS_SYSFS_ATTR_RW(pwork_threads);
244 static struct attribute *xfs_dbg_attrs[] = {
245 ATTR_LIST(bug_on_assert),
246 ATTR_LIST(log_recovery_delay),
247 ATTR_LIST(mount_delay),
248 ATTR_LIST(always_cow),
250 ATTR_LIST(pwork_threads),
255 struct kobj_type xfs_dbg_ktype = {
256 .release = xfs_sysfs_release,
257 .sysfs_ops = &xfs_sysfs_ops,
258 .default_attrs = xfs_dbg_attrs,
265 static inline struct xstats *
266 to_xstats(struct kobject *kobject)
268 struct xfs_kobj *kobj = to_kobj(kobject);
270 return container_of(kobj, struct xstats, xs_kobj);
275 struct kobject *kobject,
278 struct xstats *stats = to_xstats(kobject);
280 return xfs_stats_format(stats->xs_stats, buf);
282 XFS_SYSFS_ATTR_RO(stats);
286 struct kobject *kobject,
292 struct xstats *stats = to_xstats(kobject);
294 ret = kstrtoint(buf, 0, &val);
301 xfs_stats_clearall(stats->xs_stats);
304 XFS_SYSFS_ATTR_WO(stats_clear);
306 static struct attribute *xfs_stats_attrs[] = {
308 ATTR_LIST(stats_clear),
312 struct kobj_type xfs_stats_ktype = {
313 .release = xfs_sysfs_release,
314 .sysfs_ops = &xfs_sysfs_ops,
315 .default_attrs = xfs_stats_attrs,
320 static inline struct xlog *
321 to_xlog(struct kobject *kobject)
323 struct xfs_kobj *kobj = to_kobj(kobject);
325 return container_of(kobj, struct xlog, l_kobj);
330 struct kobject *kobject,
335 struct xlog *log = to_xlog(kobject);
337 spin_lock(&log->l_icloglock);
338 cycle = log->l_curr_cycle;
339 block = log->l_curr_block;
340 spin_unlock(&log->l_icloglock);
342 return snprintf(buf, PAGE_SIZE, "%d:%d\n", cycle, block);
344 XFS_SYSFS_ATTR_RO(log_head_lsn);
348 struct kobject *kobject,
353 struct xlog *log = to_xlog(kobject);
355 xlog_crack_atomic_lsn(&log->l_tail_lsn, &cycle, &block);
356 return snprintf(buf, PAGE_SIZE, "%d:%d\n", cycle, block);
358 XFS_SYSFS_ATTR_RO(log_tail_lsn);
361 reserve_grant_head_show(
362 struct kobject *kobject,
368 struct xlog *log = to_xlog(kobject);
370 xlog_crack_grant_head(&log->l_reserve_head.grant, &cycle, &bytes);
371 return snprintf(buf, PAGE_SIZE, "%d:%d\n", cycle, bytes);
373 XFS_SYSFS_ATTR_RO(reserve_grant_head);
376 write_grant_head_show(
377 struct kobject *kobject,
382 struct xlog *log = to_xlog(kobject);
384 xlog_crack_grant_head(&log->l_write_head.grant, &cycle, &bytes);
385 return snprintf(buf, PAGE_SIZE, "%d:%d\n", cycle, bytes);
387 XFS_SYSFS_ATTR_RO(write_grant_head);
389 static struct attribute *xfs_log_attrs[] = {
390 ATTR_LIST(log_head_lsn),
391 ATTR_LIST(log_tail_lsn),
392 ATTR_LIST(reserve_grant_head),
393 ATTR_LIST(write_grant_head),
397 struct kobj_type xfs_log_ktype = {
398 .release = xfs_sysfs_release,
399 .sysfs_ops = &xfs_sysfs_ops,
400 .default_attrs = xfs_log_attrs,
404 * Metadata IO error configuration
406 * The sysfs structure here is:
407 * ...xfs/<dev>/error/<class>/<errno>/<error_attrs>
409 * where <class> allows us to discriminate between data IO and metadata IO,
410 * and any other future type of IO (e.g. special inode or directory error
411 * handling) we care to support.
413 static inline struct xfs_error_cfg *
414 to_error_cfg(struct kobject *kobject)
416 struct xfs_kobj *kobj = to_kobj(kobject);
417 return container_of(kobj, struct xfs_error_cfg, kobj);
420 static inline struct xfs_mount *
421 err_to_mp(struct kobject *kobject)
423 struct xfs_kobj *kobj = to_kobj(kobject);
424 return container_of(kobj, struct xfs_mount, m_error_kobj);
429 struct kobject *kobject,
433 struct xfs_error_cfg *cfg = to_error_cfg(kobject);
435 if (cfg->max_retries == XFS_ERR_RETRY_FOREVER)
438 retries = cfg->max_retries;
440 return snprintf(buf, PAGE_SIZE, "%d\n", retries);
445 struct kobject *kobject,
449 struct xfs_error_cfg *cfg = to_error_cfg(kobject);
453 ret = kstrtoint(buf, 0, &val);
461 cfg->max_retries = XFS_ERR_RETRY_FOREVER;
463 cfg->max_retries = val;
466 XFS_SYSFS_ATTR_RW(max_retries);
469 retry_timeout_seconds_show(
470 struct kobject *kobject,
474 struct xfs_error_cfg *cfg = to_error_cfg(kobject);
476 if (cfg->retry_timeout == XFS_ERR_RETRY_FOREVER)
479 timeout = jiffies_to_msecs(cfg->retry_timeout) / MSEC_PER_SEC;
481 return snprintf(buf, PAGE_SIZE, "%d\n", timeout);
485 retry_timeout_seconds_store(
486 struct kobject *kobject,
490 struct xfs_error_cfg *cfg = to_error_cfg(kobject);
494 ret = kstrtoint(buf, 0, &val);
498 /* 1 day timeout maximum, -1 means infinite */
499 if (val < -1 || val > 86400)
503 cfg->retry_timeout = XFS_ERR_RETRY_FOREVER;
505 cfg->retry_timeout = msecs_to_jiffies(val * MSEC_PER_SEC);
506 ASSERT(msecs_to_jiffies(val * MSEC_PER_SEC) < LONG_MAX);
510 XFS_SYSFS_ATTR_RW(retry_timeout_seconds);
513 fail_at_unmount_show(
514 struct kobject *kobject,
517 struct xfs_mount *mp = err_to_mp(kobject);
519 return snprintf(buf, PAGE_SIZE, "%d\n", mp->m_fail_unmount);
523 fail_at_unmount_store(
524 struct kobject *kobject,
528 struct xfs_mount *mp = err_to_mp(kobject);
532 ret = kstrtoint(buf, 0, &val);
536 if (val < 0 || val > 1)
539 mp->m_fail_unmount = val;
542 XFS_SYSFS_ATTR_RW(fail_at_unmount);
544 static struct attribute *xfs_error_attrs[] = {
545 ATTR_LIST(max_retries),
546 ATTR_LIST(retry_timeout_seconds),
551 static struct kobj_type xfs_error_cfg_ktype = {
552 .release = xfs_sysfs_release,
553 .sysfs_ops = &xfs_sysfs_ops,
554 .default_attrs = xfs_error_attrs,
557 static struct kobj_type xfs_error_ktype = {
558 .release = xfs_sysfs_release,
559 .sysfs_ops = &xfs_sysfs_ops,
563 * Error initialization tables. These need to be ordered in the same
564 * order as the enums used to index the array. All class init tables need to
565 * define a "default" behaviour as the first entry, all other entries can be
568 struct xfs_error_init {
571 int retry_timeout; /* in seconds */
574 static const struct xfs_error_init xfs_error_meta_init[XFS_ERR_ERRNO_MAX] = {
576 .max_retries = XFS_ERR_RETRY_FOREVER,
577 .retry_timeout = XFS_ERR_RETRY_FOREVER,
580 .max_retries = XFS_ERR_RETRY_FOREVER,
581 .retry_timeout = XFS_ERR_RETRY_FOREVER,
584 .max_retries = XFS_ERR_RETRY_FOREVER,
585 .retry_timeout = XFS_ERR_RETRY_FOREVER,
588 .max_retries = 0, /* We can't recover from devices disappearing */
594 xfs_error_sysfs_init_class(
595 struct xfs_mount *mp,
597 const char *parent_name,
598 struct xfs_kobj *parent_kobj,
599 const struct xfs_error_init init[])
601 struct xfs_error_cfg *cfg;
605 ASSERT(class < XFS_ERR_CLASS_MAX);
607 error = xfs_sysfs_init(parent_kobj, &xfs_error_ktype,
608 &mp->m_error_kobj, parent_name);
612 for (i = 0; i < XFS_ERR_ERRNO_MAX; i++) {
613 cfg = &mp->m_error_cfg[class][i];
614 error = xfs_sysfs_init(&cfg->kobj, &xfs_error_cfg_ktype,
615 parent_kobj, init[i].name);
619 cfg->max_retries = init[i].max_retries;
620 if (init[i].retry_timeout == XFS_ERR_RETRY_FOREVER)
621 cfg->retry_timeout = XFS_ERR_RETRY_FOREVER;
623 cfg->retry_timeout = msecs_to_jiffies(
624 init[i].retry_timeout * MSEC_PER_SEC);
629 /* unwind the entries that succeeded */
630 for (i--; i >= 0; i--) {
631 cfg = &mp->m_error_cfg[class][i];
632 xfs_sysfs_del(&cfg->kobj);
634 xfs_sysfs_del(parent_kobj);
639 xfs_error_sysfs_init(
640 struct xfs_mount *mp)
644 /* .../xfs/<dev>/error/ */
645 error = xfs_sysfs_init(&mp->m_error_kobj, &xfs_error_ktype,
646 &mp->m_kobj, "error");
650 error = sysfs_create_file(&mp->m_error_kobj.kobject,
651 ATTR_LIST(fail_at_unmount));
656 /* .../xfs/<dev>/error/metadata/ */
657 error = xfs_error_sysfs_init_class(mp, XFS_ERR_METADATA,
658 "metadata", &mp->m_error_meta_kobj,
659 xfs_error_meta_init);
666 xfs_sysfs_del(&mp->m_error_kobj);
672 struct xfs_mount *mp)
674 struct xfs_error_cfg *cfg;
677 for (i = 0; i < XFS_ERR_CLASS_MAX; i++) {
678 for (j = 0; j < XFS_ERR_ERRNO_MAX; j++) {
679 cfg = &mp->m_error_cfg[i][j];
681 xfs_sysfs_del(&cfg->kobj);
684 xfs_sysfs_del(&mp->m_error_meta_kobj);
685 xfs_sysfs_del(&mp->m_error_kobj);
688 struct xfs_error_cfg *
690 struct xfs_mount *mp,
694 struct xfs_error_cfg *cfg;
701 cfg = &mp->m_error_cfg[error_class][XFS_ERR_EIO];
704 cfg = &mp->m_error_cfg[error_class][XFS_ERR_ENOSPC];
707 cfg = &mp->m_error_cfg[error_class][XFS_ERR_ENODEV];
710 cfg = &mp->m_error_cfg[error_class][XFS_ERR_DEFAULT];