]> asedeno.scripts.mit.edu Git - linux.git/blob - drivers/md/bcache/sysfs.c
Merge branch 'i2c/for-5.5' of git://git.kernel.org/pub/scm/linux/kernel/git/wsa/linux
[linux.git] / drivers / md / bcache / sysfs.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * bcache sysfs interfaces
4  *
5  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6  * Copyright 2012 Google, Inc.
7  */
8
9 #include "bcache.h"
10 #include "sysfs.h"
11 #include "btree.h"
12 #include "request.h"
13 #include "writeback.h"
14
15 #include <linux/blkdev.h>
16 #include <linux/sort.h>
17 #include <linux/sched/clock.h>
18
19 extern bool bcache_is_reboot;
20
21 /* Default is 0 ("writethrough") */
22 static const char * const bch_cache_modes[] = {
23         "writethrough",
24         "writeback",
25         "writearound",
26         "none",
27         NULL
28 };
29
30 /* Default is 0 ("auto") */
31 static const char * const bch_stop_on_failure_modes[] = {
32         "auto",
33         "always",
34         NULL
35 };
36
37 static const char * const cache_replacement_policies[] = {
38         "lru",
39         "fifo",
40         "random",
41         NULL
42 };
43
44 static const char * const error_actions[] = {
45         "unregister",
46         "panic",
47         NULL
48 };
49
50 write_attribute(attach);
51 write_attribute(detach);
52 write_attribute(unregister);
53 write_attribute(stop);
54 write_attribute(clear_stats);
55 write_attribute(trigger_gc);
56 write_attribute(prune_cache);
57 write_attribute(flash_vol_create);
58
59 read_attribute(bucket_size);
60 read_attribute(block_size);
61 read_attribute(nbuckets);
62 read_attribute(tree_depth);
63 read_attribute(root_usage_percent);
64 read_attribute(priority_stats);
65 read_attribute(btree_cache_size);
66 read_attribute(btree_cache_max_chain);
67 read_attribute(cache_available_percent);
68 read_attribute(written);
69 read_attribute(btree_written);
70 read_attribute(metadata_written);
71 read_attribute(active_journal_entries);
72 read_attribute(backing_dev_name);
73 read_attribute(backing_dev_uuid);
74
75 sysfs_time_stats_attribute(btree_gc,    sec, ms);
76 sysfs_time_stats_attribute(btree_split, sec, us);
77 sysfs_time_stats_attribute(btree_sort,  ms,  us);
78 sysfs_time_stats_attribute(btree_read,  ms,  us);
79
80 read_attribute(btree_nodes);
81 read_attribute(btree_used_percent);
82 read_attribute(average_key_size);
83 read_attribute(dirty_data);
84 read_attribute(bset_tree_stats);
85
86 read_attribute(state);
87 read_attribute(cache_read_races);
88 read_attribute(reclaim);
89 read_attribute(reclaimed_journal_buckets);
90 read_attribute(flush_write);
91 read_attribute(writeback_keys_done);
92 read_attribute(writeback_keys_failed);
93 read_attribute(io_errors);
94 read_attribute(congested);
95 read_attribute(cutoff_writeback);
96 read_attribute(cutoff_writeback_sync);
97 rw_attribute(congested_read_threshold_us);
98 rw_attribute(congested_write_threshold_us);
99
100 rw_attribute(sequential_cutoff);
101 rw_attribute(data_csum);
102 rw_attribute(cache_mode);
103 rw_attribute(stop_when_cache_set_failed);
104 rw_attribute(writeback_metadata);
105 rw_attribute(writeback_running);
106 rw_attribute(writeback_percent);
107 rw_attribute(writeback_delay);
108 rw_attribute(writeback_rate);
109
110 rw_attribute(writeback_rate_update_seconds);
111 rw_attribute(writeback_rate_i_term_inverse);
112 rw_attribute(writeback_rate_p_term_inverse);
113 rw_attribute(writeback_rate_minimum);
114 read_attribute(writeback_rate_debug);
115
116 read_attribute(stripe_size);
117 read_attribute(partial_stripes_expensive);
118
119 rw_attribute(synchronous);
120 rw_attribute(journal_delay_ms);
121 rw_attribute(io_disable);
122 rw_attribute(discard);
123 rw_attribute(running);
124 rw_attribute(label);
125 rw_attribute(readahead);
126 rw_attribute(errors);
127 rw_attribute(io_error_limit);
128 rw_attribute(io_error_halflife);
129 rw_attribute(verify);
130 rw_attribute(bypass_torture_test);
131 rw_attribute(key_merging_disabled);
132 rw_attribute(gc_always_rewrite);
133 rw_attribute(expensive_debug_checks);
134 rw_attribute(cache_replacement_policy);
135 rw_attribute(btree_shrinker_disabled);
136 rw_attribute(copy_gc_enabled);
137 rw_attribute(idle_max_writeback_rate);
138 rw_attribute(gc_after_writeback);
139 rw_attribute(size);
140
141 static ssize_t bch_snprint_string_list(char *buf,
142                                        size_t size,
143                                        const char * const list[],
144                                        size_t selected)
145 {
146         char *out = buf;
147         size_t i;
148
149         for (i = 0; list[i]; i++)
150                 out += snprintf(out, buf + size - out,
151                                 i == selected ? "[%s] " : "%s ", list[i]);
152
153         out[-1] = '\n';
154         return out - buf;
155 }
156
157 SHOW(__bch_cached_dev)
158 {
159         struct cached_dev *dc = container_of(kobj, struct cached_dev,
160                                              disk.kobj);
161         char const *states[] = { "no cache", "clean", "dirty", "inconsistent" };
162         int wb = dc->writeback_running;
163
164 #define var(stat)               (dc->stat)
165
166         if (attr == &sysfs_cache_mode)
167                 return bch_snprint_string_list(buf, PAGE_SIZE,
168                                                bch_cache_modes,
169                                                BDEV_CACHE_MODE(&dc->sb));
170
171         if (attr == &sysfs_stop_when_cache_set_failed)
172                 return bch_snprint_string_list(buf, PAGE_SIZE,
173                                                bch_stop_on_failure_modes,
174                                                dc->stop_when_cache_set_failed);
175
176
177         sysfs_printf(data_csum,         "%i", dc->disk.data_csum);
178         var_printf(verify,              "%i");
179         var_printf(bypass_torture_test, "%i");
180         var_printf(writeback_metadata,  "%i");
181         var_printf(writeback_running,   "%i");
182         var_print(writeback_delay);
183         var_print(writeback_percent);
184         sysfs_hprint(writeback_rate,
185                      wb ? atomic_long_read(&dc->writeback_rate.rate) << 9 : 0);
186         sysfs_printf(io_errors,         "%i", atomic_read(&dc->io_errors));
187         sysfs_printf(io_error_limit,    "%i", dc->error_limit);
188         sysfs_printf(io_disable,        "%i", dc->io_disable);
189         var_print(writeback_rate_update_seconds);
190         var_print(writeback_rate_i_term_inverse);
191         var_print(writeback_rate_p_term_inverse);
192         var_print(writeback_rate_minimum);
193
194         if (attr == &sysfs_writeback_rate_debug) {
195                 char rate[20];
196                 char dirty[20];
197                 char target[20];
198                 char proportional[20];
199                 char integral[20];
200                 char change[20];
201                 s64 next_io;
202
203                 /*
204                  * Except for dirty and target, other values should
205                  * be 0 if writeback is not running.
206                  */
207                 bch_hprint(rate,
208                            wb ? atomic_long_read(&dc->writeback_rate.rate) << 9
209                               : 0);
210                 bch_hprint(dirty, bcache_dev_sectors_dirty(&dc->disk) << 9);
211                 bch_hprint(target, dc->writeback_rate_target << 9);
212                 bch_hprint(proportional,
213                            wb ? dc->writeback_rate_proportional << 9 : 0);
214                 bch_hprint(integral,
215                            wb ? dc->writeback_rate_integral_scaled << 9 : 0);
216                 bch_hprint(change, wb ? dc->writeback_rate_change << 9 : 0);
217                 next_io = wb ? div64_s64(dc->writeback_rate.next-local_clock(),
218                                          NSEC_PER_MSEC) : 0;
219
220                 return sprintf(buf,
221                                "rate:\t\t%s/sec\n"
222                                "dirty:\t\t%s\n"
223                                "target:\t\t%s\n"
224                                "proportional:\t%s\n"
225                                "integral:\t%s\n"
226                                "change:\t\t%s/sec\n"
227                                "next io:\t%llims\n",
228                                rate, dirty, target, proportional,
229                                integral, change, next_io);
230         }
231
232         sysfs_hprint(dirty_data,
233                      bcache_dev_sectors_dirty(&dc->disk) << 9);
234
235         sysfs_hprint(stripe_size,        ((uint64_t)dc->disk.stripe_size) << 9);
236         var_printf(partial_stripes_expensive,   "%u");
237
238         var_hprint(sequential_cutoff);
239         var_hprint(readahead);
240
241         sysfs_print(running,            atomic_read(&dc->running));
242         sysfs_print(state,              states[BDEV_STATE(&dc->sb)]);
243
244         if (attr == &sysfs_label) {
245                 memcpy(buf, dc->sb.label, SB_LABEL_SIZE);
246                 buf[SB_LABEL_SIZE + 1] = '\0';
247                 strcat(buf, "\n");
248                 return strlen(buf);
249         }
250
251         if (attr == &sysfs_backing_dev_name) {
252                 snprintf(buf, BDEVNAME_SIZE + 1, "%s", dc->backing_dev_name);
253                 strcat(buf, "\n");
254                 return strlen(buf);
255         }
256
257         if (attr == &sysfs_backing_dev_uuid) {
258                 /* convert binary uuid into 36-byte string plus '\0' */
259                 snprintf(buf, 36+1, "%pU", dc->sb.uuid);
260                 strcat(buf, "\n");
261                 return strlen(buf);
262         }
263
264 #undef var
265         return 0;
266 }
267 SHOW_LOCKED(bch_cached_dev)
268
269 STORE(__cached_dev)
270 {
271         struct cached_dev *dc = container_of(kobj, struct cached_dev,
272                                              disk.kobj);
273         ssize_t v;
274         struct cache_set *c;
275         struct kobj_uevent_env *env;
276
277         /* no user space access if system is rebooting */
278         if (bcache_is_reboot)
279                 return -EBUSY;
280
281 #define d_strtoul(var)          sysfs_strtoul(var, dc->var)
282 #define d_strtoul_nonzero(var)  sysfs_strtoul_clamp(var, dc->var, 1, INT_MAX)
283 #define d_strtoi_h(var)         sysfs_hatoi(var, dc->var)
284
285         sysfs_strtoul(data_csum,        dc->disk.data_csum);
286         d_strtoul(verify);
287         sysfs_strtoul_bool(bypass_torture_test, dc->bypass_torture_test);
288         sysfs_strtoul_bool(writeback_metadata, dc->writeback_metadata);
289         sysfs_strtoul_bool(writeback_running, dc->writeback_running);
290         sysfs_strtoul_clamp(writeback_delay, dc->writeback_delay, 0, UINT_MAX);
291
292         sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent,
293                             0, bch_cutoff_writeback);
294
295         if (attr == &sysfs_writeback_rate) {
296                 ssize_t ret;
297                 long int v = atomic_long_read(&dc->writeback_rate.rate);
298
299                 ret = strtoul_safe_clamp(buf, v, 1, INT_MAX);
300
301                 if (!ret) {
302                         atomic_long_set(&dc->writeback_rate.rate, v);
303                         ret = size;
304                 }
305
306                 return ret;
307         }
308
309         sysfs_strtoul_clamp(writeback_rate_update_seconds,
310                             dc->writeback_rate_update_seconds,
311                             1, WRITEBACK_RATE_UPDATE_SECS_MAX);
312         sysfs_strtoul_clamp(writeback_rate_i_term_inverse,
313                             dc->writeback_rate_i_term_inverse,
314                             1, UINT_MAX);
315         sysfs_strtoul_clamp(writeback_rate_p_term_inverse,
316                             dc->writeback_rate_p_term_inverse,
317                             1, UINT_MAX);
318         sysfs_strtoul_clamp(writeback_rate_minimum,
319                             dc->writeback_rate_minimum,
320                             1, UINT_MAX);
321
322         sysfs_strtoul_clamp(io_error_limit, dc->error_limit, 0, INT_MAX);
323
324         if (attr == &sysfs_io_disable) {
325                 int v = strtoul_or_return(buf);
326
327                 dc->io_disable = v ? 1 : 0;
328         }
329
330         sysfs_strtoul_clamp(sequential_cutoff,
331                             dc->sequential_cutoff,
332                             0, UINT_MAX);
333         d_strtoi_h(readahead);
334
335         if (attr == &sysfs_clear_stats)
336                 bch_cache_accounting_clear(&dc->accounting);
337
338         if (attr == &sysfs_running &&
339             strtoul_or_return(buf)) {
340                 v = bch_cached_dev_run(dc);
341                 if (v)
342                         return v;
343         }
344
345         if (attr == &sysfs_cache_mode) {
346                 v = __sysfs_match_string(bch_cache_modes, -1, buf);
347                 if (v < 0)
348                         return v;
349
350                 if ((unsigned int) v != BDEV_CACHE_MODE(&dc->sb)) {
351                         SET_BDEV_CACHE_MODE(&dc->sb, v);
352                         bch_write_bdev_super(dc, NULL);
353                 }
354         }
355
356         if (attr == &sysfs_stop_when_cache_set_failed) {
357                 v = __sysfs_match_string(bch_stop_on_failure_modes, -1, buf);
358                 if (v < 0)
359                         return v;
360
361                 dc->stop_when_cache_set_failed = v;
362         }
363
364         if (attr == &sysfs_label) {
365                 if (size > SB_LABEL_SIZE)
366                         return -EINVAL;
367                 memcpy(dc->sb.label, buf, size);
368                 if (size < SB_LABEL_SIZE)
369                         dc->sb.label[size] = '\0';
370                 if (size && dc->sb.label[size - 1] == '\n')
371                         dc->sb.label[size - 1] = '\0';
372                 bch_write_bdev_super(dc, NULL);
373                 if (dc->disk.c) {
374                         memcpy(dc->disk.c->uuids[dc->disk.id].label,
375                                buf, SB_LABEL_SIZE);
376                         bch_uuid_write(dc->disk.c);
377                 }
378                 env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
379                 if (!env)
380                         return -ENOMEM;
381                 add_uevent_var(env, "DRIVER=bcache");
382                 add_uevent_var(env, "CACHED_UUID=%pU", dc->sb.uuid),
383                 add_uevent_var(env, "CACHED_LABEL=%s", buf);
384                 kobject_uevent_env(&disk_to_dev(dc->disk.disk)->kobj,
385                                    KOBJ_CHANGE,
386                                    env->envp);
387                 kfree(env);
388         }
389
390         if (attr == &sysfs_attach) {
391                 uint8_t         set_uuid[16];
392
393                 if (bch_parse_uuid(buf, set_uuid) < 16)
394                         return -EINVAL;
395
396                 v = -ENOENT;
397                 list_for_each_entry(c, &bch_cache_sets, list) {
398                         v = bch_cached_dev_attach(dc, c, set_uuid);
399                         if (!v)
400                                 return size;
401                 }
402                 if (v == -ENOENT)
403                         pr_err("Can't attach %s: cache set not found", buf);
404                 return v;
405         }
406
407         if (attr == &sysfs_detach && dc->disk.c)
408                 bch_cached_dev_detach(dc);
409
410         if (attr == &sysfs_stop)
411                 bcache_device_stop(&dc->disk);
412
413         return size;
414 }
415
416 STORE(bch_cached_dev)
417 {
418         struct cached_dev *dc = container_of(kobj, struct cached_dev,
419                                              disk.kobj);
420
421         /* no user space access if system is rebooting */
422         if (bcache_is_reboot)
423                 return -EBUSY;
424
425         mutex_lock(&bch_register_lock);
426         size = __cached_dev_store(kobj, attr, buf, size);
427
428         if (attr == &sysfs_writeback_running) {
429                 /* dc->writeback_running changed in __cached_dev_store() */
430                 if (IS_ERR_OR_NULL(dc->writeback_thread)) {
431                         /*
432                          * reject setting it to 1 via sysfs if writeback
433                          * kthread is not created yet.
434                          */
435                         if (dc->writeback_running) {
436                                 dc->writeback_running = false;
437                                 pr_err("%s: failed to run non-existent writeback thread",
438                                                 dc->disk.disk->disk_name);
439                         }
440                 } else
441                         /*
442                          * writeback kthread will check if dc->writeback_running
443                          * is true or false.
444                          */
445                         bch_writeback_queue(dc);
446         }
447
448         /*
449          * Only set BCACHE_DEV_WB_RUNNING when cached device attached to
450          * a cache set, otherwise it doesn't make sense.
451          */
452         if (attr == &sysfs_writeback_percent)
453                 if ((dc->disk.c != NULL) &&
454                     (!test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)))
455                         schedule_delayed_work(&dc->writeback_rate_update,
456                                       dc->writeback_rate_update_seconds * HZ);
457
458         mutex_unlock(&bch_register_lock);
459         return size;
460 }
461
462 static struct attribute *bch_cached_dev_files[] = {
463         &sysfs_attach,
464         &sysfs_detach,
465         &sysfs_stop,
466 #if 0
467         &sysfs_data_csum,
468 #endif
469         &sysfs_cache_mode,
470         &sysfs_stop_when_cache_set_failed,
471         &sysfs_writeback_metadata,
472         &sysfs_writeback_running,
473         &sysfs_writeback_delay,
474         &sysfs_writeback_percent,
475         &sysfs_writeback_rate,
476         &sysfs_writeback_rate_update_seconds,
477         &sysfs_writeback_rate_i_term_inverse,
478         &sysfs_writeback_rate_p_term_inverse,
479         &sysfs_writeback_rate_minimum,
480         &sysfs_writeback_rate_debug,
481         &sysfs_io_errors,
482         &sysfs_io_error_limit,
483         &sysfs_io_disable,
484         &sysfs_dirty_data,
485         &sysfs_stripe_size,
486         &sysfs_partial_stripes_expensive,
487         &sysfs_sequential_cutoff,
488         &sysfs_clear_stats,
489         &sysfs_running,
490         &sysfs_state,
491         &sysfs_label,
492         &sysfs_readahead,
493 #ifdef CONFIG_BCACHE_DEBUG
494         &sysfs_verify,
495         &sysfs_bypass_torture_test,
496 #endif
497         &sysfs_backing_dev_name,
498         &sysfs_backing_dev_uuid,
499         NULL
500 };
501 KTYPE(bch_cached_dev);
502
503 SHOW(bch_flash_dev)
504 {
505         struct bcache_device *d = container_of(kobj, struct bcache_device,
506                                                kobj);
507         struct uuid_entry *u = &d->c->uuids[d->id];
508
509         sysfs_printf(data_csum, "%i", d->data_csum);
510         sysfs_hprint(size,      u->sectors << 9);
511
512         if (attr == &sysfs_label) {
513                 memcpy(buf, u->label, SB_LABEL_SIZE);
514                 buf[SB_LABEL_SIZE + 1] = '\0';
515                 strcat(buf, "\n");
516                 return strlen(buf);
517         }
518
519         return 0;
520 }
521
522 STORE(__bch_flash_dev)
523 {
524         struct bcache_device *d = container_of(kobj, struct bcache_device,
525                                                kobj);
526         struct uuid_entry *u = &d->c->uuids[d->id];
527
528         /* no user space access if system is rebooting */
529         if (bcache_is_reboot)
530                 return -EBUSY;
531
532         sysfs_strtoul(data_csum,        d->data_csum);
533
534         if (attr == &sysfs_size) {
535                 uint64_t v;
536
537                 strtoi_h_or_return(buf, v);
538
539                 u->sectors = v >> 9;
540                 bch_uuid_write(d->c);
541                 set_capacity(d->disk, u->sectors);
542         }
543
544         if (attr == &sysfs_label) {
545                 memcpy(u->label, buf, SB_LABEL_SIZE);
546                 bch_uuid_write(d->c);
547         }
548
549         if (attr == &sysfs_unregister) {
550                 set_bit(BCACHE_DEV_DETACHING, &d->flags);
551                 bcache_device_stop(d);
552         }
553
554         return size;
555 }
556 STORE_LOCKED(bch_flash_dev)
557
558 static struct attribute *bch_flash_dev_files[] = {
559         &sysfs_unregister,
560 #if 0
561         &sysfs_data_csum,
562 #endif
563         &sysfs_label,
564         &sysfs_size,
565         NULL
566 };
567 KTYPE(bch_flash_dev);
568
569 struct bset_stats_op {
570         struct btree_op op;
571         size_t nodes;
572         struct bset_stats stats;
573 };
574
575 static int bch_btree_bset_stats(struct btree_op *b_op, struct btree *b)
576 {
577         struct bset_stats_op *op = container_of(b_op, struct bset_stats_op, op);
578
579         op->nodes++;
580         bch_btree_keys_stats(&b->keys, &op->stats);
581
582         return MAP_CONTINUE;
583 }
584
585 static int bch_bset_print_stats(struct cache_set *c, char *buf)
586 {
587         struct bset_stats_op op;
588         int ret;
589
590         memset(&op, 0, sizeof(op));
591         bch_btree_op_init(&op.op, -1);
592
593         ret = bch_btree_map_nodes(&op.op, c, &ZERO_KEY, bch_btree_bset_stats);
594         if (ret < 0)
595                 return ret;
596
597         return snprintf(buf, PAGE_SIZE,
598                         "btree nodes:           %zu\n"
599                         "written sets:          %zu\n"
600                         "unwritten sets:                %zu\n"
601                         "written key bytes:     %zu\n"
602                         "unwritten key bytes:   %zu\n"
603                         "floats:                        %zu\n"
604                         "failed:                        %zu\n",
605                         op.nodes,
606                         op.stats.sets_written, op.stats.sets_unwritten,
607                         op.stats.bytes_written, op.stats.bytes_unwritten,
608                         op.stats.floats, op.stats.failed);
609 }
610
611 static unsigned int bch_root_usage(struct cache_set *c)
612 {
613         unsigned int bytes = 0;
614         struct bkey *k;
615         struct btree *b;
616         struct btree_iter iter;
617
618         goto lock_root;
619
620         do {
621                 rw_unlock(false, b);
622 lock_root:
623                 b = c->root;
624                 rw_lock(false, b, b->level);
625         } while (b != c->root);
626
627         for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
628                 bytes += bkey_bytes(k);
629
630         rw_unlock(false, b);
631
632         return (bytes * 100) / btree_bytes(c);
633 }
634
635 static size_t bch_cache_size(struct cache_set *c)
636 {
637         size_t ret = 0;
638         struct btree *b;
639
640         mutex_lock(&c->bucket_lock);
641         list_for_each_entry(b, &c->btree_cache, list)
642                 ret += 1 << (b->keys.page_order + PAGE_SHIFT);
643
644         mutex_unlock(&c->bucket_lock);
645         return ret;
646 }
647
648 static unsigned int bch_cache_max_chain(struct cache_set *c)
649 {
650         unsigned int ret = 0;
651         struct hlist_head *h;
652
653         mutex_lock(&c->bucket_lock);
654
655         for (h = c->bucket_hash;
656              h < c->bucket_hash + (1 << BUCKET_HASH_BITS);
657              h++) {
658                 unsigned int i = 0;
659                 struct hlist_node *p;
660
661                 hlist_for_each(p, h)
662                         i++;
663
664                 ret = max(ret, i);
665         }
666
667         mutex_unlock(&c->bucket_lock);
668         return ret;
669 }
670
671 static unsigned int bch_btree_used(struct cache_set *c)
672 {
673         return div64_u64(c->gc_stats.key_bytes * 100,
674                          (c->gc_stats.nodes ?: 1) * btree_bytes(c));
675 }
676
677 static unsigned int bch_average_key_size(struct cache_set *c)
678 {
679         return c->gc_stats.nkeys
680                 ? div64_u64(c->gc_stats.data, c->gc_stats.nkeys)
681                 : 0;
682 }
683
684 SHOW(__bch_cache_set)
685 {
686         struct cache_set *c = container_of(kobj, struct cache_set, kobj);
687
688         sysfs_print(synchronous,                CACHE_SYNC(&c->sb));
689         sysfs_print(journal_delay_ms,           c->journal_delay_ms);
690         sysfs_hprint(bucket_size,               bucket_bytes(c));
691         sysfs_hprint(block_size,                block_bytes(c));
692         sysfs_print(tree_depth,                 c->root->level);
693         sysfs_print(root_usage_percent,         bch_root_usage(c));
694
695         sysfs_hprint(btree_cache_size,          bch_cache_size(c));
696         sysfs_print(btree_cache_max_chain,      bch_cache_max_chain(c));
697         sysfs_print(cache_available_percent,    100 - c->gc_stats.in_use);
698
699         sysfs_print_time_stats(&c->btree_gc_time,       btree_gc, sec, ms);
700         sysfs_print_time_stats(&c->btree_split_time,    btree_split, sec, us);
701         sysfs_print_time_stats(&c->sort.time,           btree_sort, ms, us);
702         sysfs_print_time_stats(&c->btree_read_time,     btree_read, ms, us);
703
704         sysfs_print(btree_used_percent, bch_btree_used(c));
705         sysfs_print(btree_nodes,        c->gc_stats.nodes);
706         sysfs_hprint(average_key_size,  bch_average_key_size(c));
707
708         sysfs_print(cache_read_races,
709                     atomic_long_read(&c->cache_read_races));
710
711         sysfs_print(reclaim,
712                     atomic_long_read(&c->reclaim));
713
714         sysfs_print(reclaimed_journal_buckets,
715                     atomic_long_read(&c->reclaimed_journal_buckets));
716
717         sysfs_print(flush_write,
718                     atomic_long_read(&c->flush_write));
719
720         sysfs_print(writeback_keys_done,
721                     atomic_long_read(&c->writeback_keys_done));
722         sysfs_print(writeback_keys_failed,
723                     atomic_long_read(&c->writeback_keys_failed));
724
725         if (attr == &sysfs_errors)
726                 return bch_snprint_string_list(buf, PAGE_SIZE, error_actions,
727                                                c->on_error);
728
729         /* See count_io_errors for why 88 */
730         sysfs_print(io_error_halflife,  c->error_decay * 88);
731         sysfs_print(io_error_limit,     c->error_limit);
732
733         sysfs_hprint(congested,
734                      ((uint64_t) bch_get_congested(c)) << 9);
735         sysfs_print(congested_read_threshold_us,
736                     c->congested_read_threshold_us);
737         sysfs_print(congested_write_threshold_us,
738                     c->congested_write_threshold_us);
739
740         sysfs_print(cutoff_writeback, bch_cutoff_writeback);
741         sysfs_print(cutoff_writeback_sync, bch_cutoff_writeback_sync);
742
743         sysfs_print(active_journal_entries,     fifo_used(&c->journal.pin));
744         sysfs_printf(verify,                    "%i", c->verify);
745         sysfs_printf(key_merging_disabled,      "%i", c->key_merging_disabled);
746         sysfs_printf(expensive_debug_checks,
747                      "%i", c->expensive_debug_checks);
748         sysfs_printf(gc_always_rewrite,         "%i", c->gc_always_rewrite);
749         sysfs_printf(btree_shrinker_disabled,   "%i", c->shrinker_disabled);
750         sysfs_printf(copy_gc_enabled,           "%i", c->copy_gc_enabled);
751         sysfs_printf(idle_max_writeback_rate,   "%i",
752                      c->idle_max_writeback_rate_enabled);
753         sysfs_printf(gc_after_writeback,        "%i", c->gc_after_writeback);
754         sysfs_printf(io_disable,                "%i",
755                      test_bit(CACHE_SET_IO_DISABLE, &c->flags));
756
757         if (attr == &sysfs_bset_tree_stats)
758                 return bch_bset_print_stats(c, buf);
759
760         return 0;
761 }
762 SHOW_LOCKED(bch_cache_set)
763
764 STORE(__bch_cache_set)
765 {
766         struct cache_set *c = container_of(kobj, struct cache_set, kobj);
767         ssize_t v;
768
769         /* no user space access if system is rebooting */
770         if (bcache_is_reboot)
771                 return -EBUSY;
772
773         if (attr == &sysfs_unregister)
774                 bch_cache_set_unregister(c);
775
776         if (attr == &sysfs_stop)
777                 bch_cache_set_stop(c);
778
779         if (attr == &sysfs_synchronous) {
780                 bool sync = strtoul_or_return(buf);
781
782                 if (sync != CACHE_SYNC(&c->sb)) {
783                         SET_CACHE_SYNC(&c->sb, sync);
784                         bcache_write_super(c);
785                 }
786         }
787
788         if (attr == &sysfs_flash_vol_create) {
789                 int r;
790                 uint64_t v;
791
792                 strtoi_h_or_return(buf, v);
793
794                 r = bch_flash_dev_create(c, v);
795                 if (r)
796                         return r;
797         }
798
799         if (attr == &sysfs_clear_stats) {
800                 atomic_long_set(&c->writeback_keys_done,        0);
801                 atomic_long_set(&c->writeback_keys_failed,      0);
802
803                 memset(&c->gc_stats, 0, sizeof(struct gc_stat));
804                 bch_cache_accounting_clear(&c->accounting);
805         }
806
807         if (attr == &sysfs_trigger_gc)
808                 force_wake_up_gc(c);
809
810         if (attr == &sysfs_prune_cache) {
811                 struct shrink_control sc;
812
813                 sc.gfp_mask = GFP_KERNEL;
814                 sc.nr_to_scan = strtoul_or_return(buf);
815                 c->shrink.scan_objects(&c->shrink, &sc);
816         }
817
818         sysfs_strtoul_clamp(congested_read_threshold_us,
819                             c->congested_read_threshold_us,
820                             0, UINT_MAX);
821         sysfs_strtoul_clamp(congested_write_threshold_us,
822                             c->congested_write_threshold_us,
823                             0, UINT_MAX);
824
825         if (attr == &sysfs_errors) {
826                 v = __sysfs_match_string(error_actions, -1, buf);
827                 if (v < 0)
828                         return v;
829
830                 c->on_error = v;
831         }
832
833         sysfs_strtoul_clamp(io_error_limit, c->error_limit, 0, UINT_MAX);
834
835         /* See count_io_errors() for why 88 */
836         if (attr == &sysfs_io_error_halflife) {
837                 unsigned long v = 0;
838                 ssize_t ret;
839
840                 ret = strtoul_safe_clamp(buf, v, 0, UINT_MAX);
841                 if (!ret) {
842                         c->error_decay = v / 88;
843                         return size;
844                 }
845                 return ret;
846         }
847
848         if (attr == &sysfs_io_disable) {
849                 v = strtoul_or_return(buf);
850                 if (v) {
851                         if (test_and_set_bit(CACHE_SET_IO_DISABLE,
852                                              &c->flags))
853                                 pr_warn("CACHE_SET_IO_DISABLE already set");
854                 } else {
855                         if (!test_and_clear_bit(CACHE_SET_IO_DISABLE,
856                                                 &c->flags))
857                                 pr_warn("CACHE_SET_IO_DISABLE already cleared");
858                 }
859         }
860
861         sysfs_strtoul_clamp(journal_delay_ms,
862                             c->journal_delay_ms,
863                             0, USHRT_MAX);
864         sysfs_strtoul_bool(verify,              c->verify);
865         sysfs_strtoul_bool(key_merging_disabled, c->key_merging_disabled);
866         sysfs_strtoul(expensive_debug_checks,   c->expensive_debug_checks);
867         sysfs_strtoul_bool(gc_always_rewrite,   c->gc_always_rewrite);
868         sysfs_strtoul_bool(btree_shrinker_disabled, c->shrinker_disabled);
869         sysfs_strtoul_bool(copy_gc_enabled,     c->copy_gc_enabled);
870         sysfs_strtoul_bool(idle_max_writeback_rate,
871                            c->idle_max_writeback_rate_enabled);
872
873         /*
874          * write gc_after_writeback here may overwrite an already set
875          * BCH_DO_AUTO_GC, it doesn't matter because this flag will be
876          * set in next chance.
877          */
878         sysfs_strtoul_clamp(gc_after_writeback, c->gc_after_writeback, 0, 1);
879
880         return size;
881 }
882 STORE_LOCKED(bch_cache_set)
883
884 SHOW(bch_cache_set_internal)
885 {
886         struct cache_set *c = container_of(kobj, struct cache_set, internal);
887
888         return bch_cache_set_show(&c->kobj, attr, buf);
889 }
890
891 STORE(bch_cache_set_internal)
892 {
893         struct cache_set *c = container_of(kobj, struct cache_set, internal);
894
895         /* no user space access if system is rebooting */
896         if (bcache_is_reboot)
897                 return -EBUSY;
898
899         return bch_cache_set_store(&c->kobj, attr, buf, size);
900 }
901
902 static void bch_cache_set_internal_release(struct kobject *k)
903 {
904 }
905
906 static struct attribute *bch_cache_set_files[] = {
907         &sysfs_unregister,
908         &sysfs_stop,
909         &sysfs_synchronous,
910         &sysfs_journal_delay_ms,
911         &sysfs_flash_vol_create,
912
913         &sysfs_bucket_size,
914         &sysfs_block_size,
915         &sysfs_tree_depth,
916         &sysfs_root_usage_percent,
917         &sysfs_btree_cache_size,
918         &sysfs_cache_available_percent,
919
920         &sysfs_average_key_size,
921
922         &sysfs_errors,
923         &sysfs_io_error_limit,
924         &sysfs_io_error_halflife,
925         &sysfs_congested,
926         &sysfs_congested_read_threshold_us,
927         &sysfs_congested_write_threshold_us,
928         &sysfs_clear_stats,
929         NULL
930 };
931 KTYPE(bch_cache_set);
932
933 static struct attribute *bch_cache_set_internal_files[] = {
934         &sysfs_active_journal_entries,
935
936         sysfs_time_stats_attribute_list(btree_gc, sec, ms)
937         sysfs_time_stats_attribute_list(btree_split, sec, us)
938         sysfs_time_stats_attribute_list(btree_sort, ms, us)
939         sysfs_time_stats_attribute_list(btree_read, ms, us)
940
941         &sysfs_btree_nodes,
942         &sysfs_btree_used_percent,
943         &sysfs_btree_cache_max_chain,
944
945         &sysfs_bset_tree_stats,
946         &sysfs_cache_read_races,
947         &sysfs_reclaim,
948         &sysfs_reclaimed_journal_buckets,
949         &sysfs_flush_write,
950         &sysfs_writeback_keys_done,
951         &sysfs_writeback_keys_failed,
952
953         &sysfs_trigger_gc,
954         &sysfs_prune_cache,
955 #ifdef CONFIG_BCACHE_DEBUG
956         &sysfs_verify,
957         &sysfs_key_merging_disabled,
958         &sysfs_expensive_debug_checks,
959 #endif
960         &sysfs_gc_always_rewrite,
961         &sysfs_btree_shrinker_disabled,
962         &sysfs_copy_gc_enabled,
963         &sysfs_idle_max_writeback_rate,
964         &sysfs_gc_after_writeback,
965         &sysfs_io_disable,
966         &sysfs_cutoff_writeback,
967         &sysfs_cutoff_writeback_sync,
968         NULL
969 };
970 KTYPE(bch_cache_set_internal);
971
972 static int __bch_cache_cmp(const void *l, const void *r)
973 {
974         cond_resched();
975         return *((uint16_t *)r) - *((uint16_t *)l);
976 }
977
978 SHOW(__bch_cache)
979 {
980         struct cache *ca = container_of(kobj, struct cache, kobj);
981
982         sysfs_hprint(bucket_size,       bucket_bytes(ca));
983         sysfs_hprint(block_size,        block_bytes(ca));
984         sysfs_print(nbuckets,           ca->sb.nbuckets);
985         sysfs_print(discard,            ca->discard);
986         sysfs_hprint(written, atomic_long_read(&ca->sectors_written) << 9);
987         sysfs_hprint(btree_written,
988                      atomic_long_read(&ca->btree_sectors_written) << 9);
989         sysfs_hprint(metadata_written,
990                      (atomic_long_read(&ca->meta_sectors_written) +
991                       atomic_long_read(&ca->btree_sectors_written)) << 9);
992
993         sysfs_print(io_errors,
994                     atomic_read(&ca->io_errors) >> IO_ERROR_SHIFT);
995
996         if (attr == &sysfs_cache_replacement_policy)
997                 return bch_snprint_string_list(buf, PAGE_SIZE,
998                                                cache_replacement_policies,
999                                                CACHE_REPLACEMENT(&ca->sb));
1000
1001         if (attr == &sysfs_priority_stats) {
1002                 struct bucket *b;
1003                 size_t n = ca->sb.nbuckets, i;
1004                 size_t unused = 0, available = 0, dirty = 0, meta = 0;
1005                 uint64_t sum = 0;
1006                 /* Compute 31 quantiles */
1007                 uint16_t q[31], *p, *cached;
1008                 ssize_t ret;
1009
1010                 cached = p = vmalloc(array_size(sizeof(uint16_t),
1011                                                 ca->sb.nbuckets));
1012                 if (!p)
1013                         return -ENOMEM;
1014
1015                 mutex_lock(&ca->set->bucket_lock);
1016                 for_each_bucket(b, ca) {
1017                         if (!GC_SECTORS_USED(b))
1018                                 unused++;
1019                         if (GC_MARK(b) == GC_MARK_RECLAIMABLE)
1020                                 available++;
1021                         if (GC_MARK(b) == GC_MARK_DIRTY)
1022                                 dirty++;
1023                         if (GC_MARK(b) == GC_MARK_METADATA)
1024                                 meta++;
1025                 }
1026
1027                 for (i = ca->sb.first_bucket; i < n; i++)
1028                         p[i] = ca->buckets[i].prio;
1029                 mutex_unlock(&ca->set->bucket_lock);
1030
1031                 sort(p, n, sizeof(uint16_t), __bch_cache_cmp, NULL);
1032
1033                 while (n &&
1034                        !cached[n - 1])
1035                         --n;
1036
1037                 while (cached < p + n &&
1038                        *cached == BTREE_PRIO)
1039                         cached++, n--;
1040
1041                 for (i = 0; i < n; i++)
1042                         sum += INITIAL_PRIO - cached[i];
1043
1044                 if (n)
1045                         do_div(sum, n);
1046
1047                 for (i = 0; i < ARRAY_SIZE(q); i++)
1048                         q[i] = INITIAL_PRIO - cached[n * (i + 1) /
1049                                 (ARRAY_SIZE(q) + 1)];
1050
1051                 vfree(p);
1052
1053                 ret = scnprintf(buf, PAGE_SIZE,
1054                                 "Unused:                %zu%%\n"
1055                                 "Clean:         %zu%%\n"
1056                                 "Dirty:         %zu%%\n"
1057                                 "Metadata:      %zu%%\n"
1058                                 "Average:       %llu\n"
1059                                 "Sectors per Q: %zu\n"
1060                                 "Quantiles:     [",
1061                                 unused * 100 / (size_t) ca->sb.nbuckets,
1062                                 available * 100 / (size_t) ca->sb.nbuckets,
1063                                 dirty * 100 / (size_t) ca->sb.nbuckets,
1064                                 meta * 100 / (size_t) ca->sb.nbuckets, sum,
1065                                 n * ca->sb.bucket_size / (ARRAY_SIZE(q) + 1));
1066
1067                 for (i = 0; i < ARRAY_SIZE(q); i++)
1068                         ret += scnprintf(buf + ret, PAGE_SIZE - ret,
1069                                          "%u ", q[i]);
1070                 ret--;
1071
1072                 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "]\n");
1073
1074                 return ret;
1075         }
1076
1077         return 0;
1078 }
1079 SHOW_LOCKED(bch_cache)
1080
1081 STORE(__bch_cache)
1082 {
1083         struct cache *ca = container_of(kobj, struct cache, kobj);
1084         ssize_t v;
1085
1086         /* no user space access if system is rebooting */
1087         if (bcache_is_reboot)
1088                 return -EBUSY;
1089
1090         if (attr == &sysfs_discard) {
1091                 bool v = strtoul_or_return(buf);
1092
1093                 if (blk_queue_discard(bdev_get_queue(ca->bdev)))
1094                         ca->discard = v;
1095
1096                 if (v != CACHE_DISCARD(&ca->sb)) {
1097                         SET_CACHE_DISCARD(&ca->sb, v);
1098                         bcache_write_super(ca->set);
1099                 }
1100         }
1101
1102         if (attr == &sysfs_cache_replacement_policy) {
1103                 v = __sysfs_match_string(cache_replacement_policies, -1, buf);
1104                 if (v < 0)
1105                         return v;
1106
1107                 if ((unsigned int) v != CACHE_REPLACEMENT(&ca->sb)) {
1108                         mutex_lock(&ca->set->bucket_lock);
1109                         SET_CACHE_REPLACEMENT(&ca->sb, v);
1110                         mutex_unlock(&ca->set->bucket_lock);
1111
1112                         bcache_write_super(ca->set);
1113                 }
1114         }
1115
1116         if (attr == &sysfs_clear_stats) {
1117                 atomic_long_set(&ca->sectors_written, 0);
1118                 atomic_long_set(&ca->btree_sectors_written, 0);
1119                 atomic_long_set(&ca->meta_sectors_written, 0);
1120                 atomic_set(&ca->io_count, 0);
1121                 atomic_set(&ca->io_errors, 0);
1122         }
1123
1124         return size;
1125 }
1126 STORE_LOCKED(bch_cache)
1127
1128 static struct attribute *bch_cache_files[] = {
1129         &sysfs_bucket_size,
1130         &sysfs_block_size,
1131         &sysfs_nbuckets,
1132         &sysfs_priority_stats,
1133         &sysfs_discard,
1134         &sysfs_written,
1135         &sysfs_btree_written,
1136         &sysfs_metadata_written,
1137         &sysfs_io_errors,
1138         &sysfs_clear_stats,
1139         &sysfs_cache_replacement_policy,
1140         NULL
1141 };
1142 KTYPE(bch_cache);