2 * bcache sysfs interfaces
4 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
5 * Copyright 2012 Google, Inc.
8 #ifndef NO_BCACHEFS_SYSFS
11 #include "alloc_background.h"
13 #include "btree_cache.h"
15 #include "btree_iter.h"
16 #include "btree_update.h"
17 #include "btree_update_interior.h"
20 #include "disk_groups.h"
27 #include "rebalance.h"
32 #include <linux/blkdev.h>
33 #include <linux/sort.h>
34 #include <linux/sched/clock.h>
38 #define SYSFS_OPS(type) \
39 struct sysfs_ops type ## _sysfs_ops = { \
40 .show = type ## _show, \
41 .store = type ## _store \
45 static ssize_t fn ## _show(struct kobject *kobj, struct attribute *attr,\
49 static ssize_t fn ## _store(struct kobject *kobj, struct attribute *attr,\
50 const char *buf, size_t size) \
52 #define __sysfs_attribute(_name, _mode) \
53 static struct attribute sysfs_##_name = \
54 { .name = #_name, .mode = _mode }
56 #define write_attribute(n) __sysfs_attribute(n, S_IWUSR)
57 #define read_attribute(n) __sysfs_attribute(n, S_IRUGO)
58 #define rw_attribute(n) __sysfs_attribute(n, S_IRUGO|S_IWUSR)
60 #define sysfs_printf(file, fmt, ...) \
62 if (attr == &sysfs_ ## file) \
63 return scnprintf(buf, PAGE_SIZE, fmt "\n", __VA_ARGS__);\
66 #define sysfs_print(file, var) \
68 if (attr == &sysfs_ ## file) \
69 return snprint(buf, PAGE_SIZE, var); \
72 #define sysfs_hprint(file, val) \
74 if (attr == &sysfs_ ## file) { \
75 struct printbuf out = _PBUF(buf, PAGE_SIZE); \
76 bch2_hprint(&out, val); \
78 return out.pos - buf; \
82 #define var_printf(_var, fmt) sysfs_printf(_var, fmt, var(_var))
83 #define var_print(_var) sysfs_print(_var, var(_var))
84 #define var_hprint(_var) sysfs_hprint(_var, var(_var))
86 #define sysfs_strtoul(file, var) \
88 if (attr == &sysfs_ ## file) \
89 return strtoul_safe(buf, var) ?: (ssize_t) size; \
92 #define sysfs_strtoul_clamp(file, var, min, max) \
94 if (attr == &sysfs_ ## file) \
95 return strtoul_safe_clamp(buf, var, min, max) \
99 #define strtoul_or_return(cp) \
102 int _r = kstrtoul(cp, 10, &_v); \
108 #define strtoul_restrict_or_return(cp, min, max) \
110 unsigned long __v = 0; \
111 int _r = strtoul_safe_restrict(cp, __v, min, max); \
117 #define strtoi_h_or_return(cp) \
120 int _r = strtoi_h(cp, &_v); \
126 #define sysfs_hatoi(file, var) \
128 if (attr == &sysfs_ ## file) \
129 return strtoi_h(buf, &var) ?: (ssize_t) size; \
132 write_attribute(trigger_journal_flush);
133 write_attribute(trigger_btree_coalesce);
134 write_attribute(trigger_gc);
135 write_attribute(trigger_alloc_write);
136 write_attribute(prune_cache);
137 rw_attribute(btree_gc_periodic);
139 read_attribute(uuid);
140 read_attribute(minor);
141 read_attribute(bucket_size);
142 read_attribute(block_size);
143 read_attribute(btree_node_size);
144 read_attribute(first_bucket);
145 read_attribute(nbuckets);
146 read_attribute(durability);
147 read_attribute(iodone);
149 read_attribute(io_latency_read);
150 read_attribute(io_latency_write);
151 read_attribute(io_latency_stats_read);
152 read_attribute(io_latency_stats_write);
153 read_attribute(congested);
155 read_attribute(bucket_quantiles_last_read);
156 read_attribute(bucket_quantiles_last_write);
157 read_attribute(bucket_quantiles_fragmentation);
158 read_attribute(bucket_quantiles_oldest_gen);
160 read_attribute(reserve_stats);
161 read_attribute(btree_cache_size);
162 read_attribute(compression_stats);
163 read_attribute(journal_debug);
164 read_attribute(journal_pins);
165 read_attribute(btree_updates);
166 read_attribute(dirty_btree_nodes);
168 read_attribute(internal_uuid);
170 read_attribute(has_data);
171 read_attribute(alloc_debug);
172 write_attribute(wake_allocator);
174 read_attribute(read_realloc_races);
175 read_attribute(extent_migrate_done);
176 read_attribute(extent_migrate_raced);
178 rw_attribute(journal_write_delay_ms);
179 rw_attribute(journal_reclaim_delay_ms);
181 rw_attribute(discard);
182 rw_attribute(cache_replacement_policy);
185 rw_attribute(copy_gc_enabled);
186 sysfs_pd_controller_attribute(copy_gc);
188 rw_attribute(rebalance_enabled);
189 sysfs_pd_controller_attribute(rebalance);
190 read_attribute(rebalance_work);
191 rw_attribute(promote_whole_extents);
193 read_attribute(new_stripes);
195 rw_attribute(pd_controllers_update_seconds);
197 read_attribute(meta_replicas_have);
198 read_attribute(data_replicas_have);
200 #ifdef CONFIG_BCACHEFS_TESTS
201 write_attribute(perf_test);
202 #endif /* CONFIG_BCACHEFS_TESTS */
204 #define BCH_DEBUG_PARAM(name, description) \
208 #undef BCH_DEBUG_PARAM
211 static struct attribute sysfs_time_stat_##_name = \
212 { .name = #_name, .mode = S_IRUGO };
216 static struct attribute sysfs_state_rw = {
221 static size_t bch2_btree_cache_size(struct bch_fs *c)
226 mutex_lock(&c->btree_cache.lock);
227 list_for_each_entry(b, &c->btree_cache.live, list)
228 ret += btree_bytes(c);
230 mutex_unlock(&c->btree_cache.lock);
234 static ssize_t show_fs_alloc_debug(struct bch_fs *c, char *buf)
236 struct printbuf out = _PBUF(buf, PAGE_SIZE);
237 struct bch_fs_usage *fs_usage = bch2_fs_usage_read(c);
243 pr_buf(&out, "capacity:\t\t\t%llu\n", c->capacity);
245 pr_buf(&out, "hidden:\t\t\t\t%llu\n",
247 pr_buf(&out, "data:\t\t\t\t%llu\n",
249 pr_buf(&out, "cached:\t\t\t\t%llu\n",
251 pr_buf(&out, "reserved:\t\t\t%llu\n",
252 fs_usage->s.reserved);
253 pr_buf(&out, "nr_inodes:\t\t\t%llu\n",
254 fs_usage->s.nr_inodes);
255 pr_buf(&out, "online reserved:\t\t%llu\n",
256 fs_usage->s.online_reserved);
259 i < ARRAY_SIZE(fs_usage->persistent_reserved);
261 pr_buf(&out, "%u replicas:\n", i + 1);
262 pr_buf(&out, "\treserved:\t\t%llu\n",
263 fs_usage->persistent_reserved[i]);
266 for (i = 0; i < c->replicas.nr; i++) {
267 struct bch_replicas_entry *e =
268 cpu_replicas_entry(&c->replicas, i);
271 bch2_replicas_entry_to_text(&out, e);
272 pr_buf(&out, ":\t%llu\n", fs_usage->data[i]);
275 percpu_up_read_preempt_enable(&c->mark_lock);
279 return out.pos - buf;
282 static ssize_t bch2_compression_stats(struct bch_fs *c, char *buf)
284 struct btree_iter iter;
286 u64 nr_uncompressed_extents = 0, uncompressed_sectors = 0,
287 nr_compressed_extents = 0,
288 compressed_sectors_compressed = 0,
289 compressed_sectors_uncompressed = 0;
291 if (!bch2_fs_running(c))
294 for_each_btree_key(&iter, c, BTREE_ID_EXTENTS, POS_MIN, 0, k)
295 if (k.k->type == KEY_TYPE_extent) {
296 struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
297 const union bch_extent_entry *entry;
298 struct extent_ptr_decoded p;
300 extent_for_each_ptr_decode(e, p, entry) {
301 if (p.crc.compression_type == BCH_COMPRESSION_NONE) {
302 nr_uncompressed_extents++;
303 uncompressed_sectors += e.k->size;
305 nr_compressed_extents++;
306 compressed_sectors_compressed +=
307 p.crc.compressed_size;
308 compressed_sectors_uncompressed +=
309 p.crc.uncompressed_size;
312 /* only looking at the first ptr */
316 bch2_btree_iter_unlock(&iter);
318 return scnprintf(buf, PAGE_SIZE,
319 "uncompressed data:\n"
320 " nr extents: %llu\n"
321 " size (bytes): %llu\n"
323 " nr extents: %llu\n"
324 " compressed size (bytes): %llu\n"
325 " uncompressed size (bytes): %llu\n",
326 nr_uncompressed_extents,
327 uncompressed_sectors << 9,
328 nr_compressed_extents,
329 compressed_sectors_compressed << 9,
330 compressed_sectors_uncompressed << 9);
333 static ssize_t bch2_new_stripes(struct bch_fs *c, char *buf)
335 char *out = buf, *end = buf + PAGE_SIZE;
336 struct ec_stripe_head *h;
337 struct ec_stripe_new *s;
339 mutex_lock(&c->ec_new_stripe_lock);
340 list_for_each_entry(h, &c->ec_new_stripe_list, list) {
341 out += scnprintf(out, end - out,
342 "target %u algo %u redundancy %u:\n",
343 h->target, h->algo, h->redundancy);
346 out += scnprintf(out, end - out,
347 "\tpending: blocks %u allocated %u\n",
349 bitmap_weight(h->s->blocks_allocated,
352 mutex_lock(&h->lock);
353 list_for_each_entry(s, &h->stripes, list)
354 out += scnprintf(out, end - out,
355 "\tin flight: blocks %u allocated %u pin %u\n",
357 bitmap_weight(s->blocks_allocated,
359 atomic_read(&s->pin));
360 mutex_unlock(&h->lock);
363 mutex_unlock(&c->ec_new_stripe_lock);
370 struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
372 sysfs_print(minor, c->minor);
373 sysfs_printf(internal_uuid, "%pU", c->sb.uuid.b);
375 sysfs_print(journal_write_delay_ms, c->journal.write_delay_ms);
376 sysfs_print(journal_reclaim_delay_ms, c->journal.reclaim_delay_ms);
378 sysfs_print(block_size, block_bytes(c));
379 sysfs_print(btree_node_size, btree_bytes(c));
380 sysfs_hprint(btree_cache_size, bch2_btree_cache_size(c));
382 sysfs_print(read_realloc_races,
383 atomic_long_read(&c->read_realloc_races));
384 sysfs_print(extent_migrate_done,
385 atomic_long_read(&c->extent_migrate_done));
386 sysfs_print(extent_migrate_raced,
387 atomic_long_read(&c->extent_migrate_raced));
389 sysfs_printf(btree_gc_periodic, "%u", (int) c->btree_gc_periodic);
391 sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled);
393 sysfs_print(pd_controllers_update_seconds,
394 c->pd_controllers_update_seconds);
396 sysfs_printf(rebalance_enabled, "%i", c->rebalance.enabled);
397 sysfs_pd_controller_show(rebalance, &c->rebalance.pd); /* XXX */
399 if (attr == &sysfs_rebalance_work)
400 return bch2_rebalance_work_show(c, buf);
402 sysfs_print(promote_whole_extents, c->promote_whole_extents);
404 sysfs_printf(meta_replicas_have, "%i", bch2_replicas_online(c, true));
405 sysfs_printf(data_replicas_have, "%i", bch2_replicas_online(c, false));
409 if (attr == &sysfs_alloc_debug)
410 return show_fs_alloc_debug(c, buf);
412 if (attr == &sysfs_journal_debug)
413 return bch2_journal_print_debug(&c->journal, buf);
415 if (attr == &sysfs_journal_pins)
416 return bch2_journal_print_pins(&c->journal, buf);
418 if (attr == &sysfs_btree_updates)
419 return bch2_btree_updates_print(c, buf);
421 if (attr == &sysfs_dirty_btree_nodes)
422 return bch2_dirty_btree_nodes_print(c, buf);
424 if (attr == &sysfs_compression_stats)
425 return bch2_compression_stats(c, buf);
427 if (attr == &sysfs_new_stripes)
428 return bch2_new_stripes(c, buf);
430 #define BCH_DEBUG_PARAM(name, description) sysfs_print(name, c->name);
432 #undef BCH_DEBUG_PARAM
439 struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
441 sysfs_strtoul(journal_write_delay_ms, c->journal.write_delay_ms);
442 sysfs_strtoul(journal_reclaim_delay_ms, c->journal.reclaim_delay_ms);
444 if (attr == &sysfs_btree_gc_periodic) {
445 ssize_t ret = strtoul_safe(buf, c->btree_gc_periodic)
448 wake_up_process(c->gc_thread);
452 if (attr == &sysfs_copy_gc_enabled) {
455 ssize_t ret = strtoul_safe(buf, c->copy_gc_enabled)
458 for_each_member_device(ca, c, i)
459 if (ca->copygc_thread)
460 wake_up_process(ca->copygc_thread);
464 if (attr == &sysfs_rebalance_enabled) {
465 ssize_t ret = strtoul_safe(buf, c->rebalance.enabled)
472 sysfs_strtoul(pd_controllers_update_seconds,
473 c->pd_controllers_update_seconds);
474 sysfs_pd_controller_store(rebalance, &c->rebalance.pd);
476 sysfs_strtoul(promote_whole_extents, c->promote_whole_extents);
480 #define BCH_DEBUG_PARAM(name, description) sysfs_strtoul(name, c->name);
482 #undef BCH_DEBUG_PARAM
484 if (!bch2_fs_running(c))
489 if (attr == &sysfs_trigger_journal_flush)
490 bch2_journal_meta_async(&c->journal, NULL);
492 if (attr == &sysfs_trigger_btree_coalesce)
495 if (attr == &sysfs_trigger_gc)
496 bch2_gc(c, NULL, false);
498 if (attr == &sysfs_trigger_alloc_write) {
501 bch2_alloc_write(c, false, &wrote);
504 if (attr == &sysfs_prune_cache) {
505 struct shrink_control sc;
507 sc.gfp_mask = GFP_KERNEL;
508 sc.nr_to_scan = strtoul_or_return(buf);
509 c->btree_cache.shrink.scan_objects(&c->btree_cache.shrink, &sc);
511 #ifdef CONFIG_BCACHEFS_TESTS
512 if (attr == &sysfs_perf_test) {
513 char *tmp = kstrdup(buf, GFP_KERNEL), *p = tmp;
514 char *test = strsep(&p, " \t\n");
515 char *nr_str = strsep(&p, " \t\n");
516 char *threads_str = strsep(&p, " \t\n");
522 !(ret = kstrtouint(threads_str, 10, &threads)) &&
523 !(ret = bch2_strtoull_h(nr_str, &nr)))
524 bch2_btree_perf_test(c, test, nr, threads);
535 struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
537 mutex_lock(&c->state_lock);
538 size = __bch2_fs_store(kobj, attr, buf, size);
539 mutex_unlock(&c->state_lock);
545 struct attribute *bch2_fs_files[] = {
548 &sysfs_btree_node_size,
549 &sysfs_btree_cache_size,
551 &sysfs_meta_replicas_have,
552 &sysfs_data_replicas_have,
554 &sysfs_journal_write_delay_ms,
555 &sysfs_journal_reclaim_delay_ms,
557 &sysfs_promote_whole_extents,
559 &sysfs_compression_stats,
561 #ifdef CONFIG_BCACHEFS_TESTS
567 /* internal dir - just a wrapper */
569 SHOW(bch2_fs_internal)
571 struct bch_fs *c = container_of(kobj, struct bch_fs, internal);
572 return bch2_fs_show(&c->kobj, attr, buf);
575 STORE(bch2_fs_internal)
577 struct bch_fs *c = container_of(kobj, struct bch_fs, internal);
578 return bch2_fs_store(&c->kobj, attr, buf, size);
580 SYSFS_OPS(bch2_fs_internal);
582 struct attribute *bch2_fs_internal_files[] = {
584 &sysfs_journal_debug,
586 &sysfs_btree_updates,
587 &sysfs_dirty_btree_nodes,
589 &sysfs_read_realloc_races,
590 &sysfs_extent_migrate_done,
591 &sysfs_extent_migrate_raced,
593 &sysfs_trigger_journal_flush,
594 &sysfs_trigger_btree_coalesce,
596 &sysfs_trigger_alloc_write,
599 &sysfs_copy_gc_enabled,
601 &sysfs_rebalance_enabled,
602 &sysfs_rebalance_work,
603 sysfs_pd_controller_files(rebalance),
607 &sysfs_internal_uuid,
609 #define BCH_DEBUG_PARAM(name, description) &sysfs_##name,
611 #undef BCH_DEBUG_PARAM
618 SHOW(bch2_fs_opts_dir)
620 struct printbuf out = _PBUF(buf, PAGE_SIZE);
621 struct bch_fs *c = container_of(kobj, struct bch_fs, opts_dir);
622 const struct bch_option *opt = container_of(attr, struct bch_option, attr);
623 int id = opt - bch2_opt_table;
624 u64 v = bch2_opt_get_by_id(&c->opts, id);
626 bch2_opt_to_text(&out, c, opt, v, OPT_SHOW_FULL_LIST);
629 return out.pos - buf;
632 STORE(bch2_fs_opts_dir)
634 struct bch_fs *c = container_of(kobj, struct bch_fs, opts_dir);
635 const struct bch_option *opt = container_of(attr, struct bch_option, attr);
636 int ret, id = opt - bch2_opt_table;
640 tmp = kstrdup(buf, GFP_KERNEL);
644 ret = bch2_opt_parse(c, opt, strim(tmp), &v);
650 ret = bch2_opt_check_may_set(c, id, v);
654 if (opt->set_sb != SET_NO_SB_OPT) {
655 mutex_lock(&c->sb_lock);
656 opt->set_sb(c->disk_sb.sb, v);
658 mutex_unlock(&c->sb_lock);
661 bch2_opt_set_by_id(&c->opts, id, v);
663 if ((id == Opt_background_target ||
664 id == Opt_background_compression) && v) {
665 bch2_rebalance_add_work(c, S64_MAX);
671 SYSFS_OPS(bch2_fs_opts_dir);
673 struct attribute *bch2_fs_opts_dir_files[] = { NULL };
675 int bch2_opts_create_sysfs_files(struct kobject *kobj)
677 const struct bch_option *i;
680 for (i = bch2_opt_table;
681 i < bch2_opt_table + bch2_opts_nr;
683 if (!(i->mode & (OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME)))
686 ret = sysfs_create_file(kobj, &i->attr);
696 SHOW(bch2_fs_time_stats)
698 struct bch_fs *c = container_of(kobj, struct bch_fs, time_stats);
701 if (attr == &sysfs_time_stat_##name) \
702 return bch2_time_stats_print(&c->times[BCH_TIME_##name],\
710 STORE(bch2_fs_time_stats)
714 SYSFS_OPS(bch2_fs_time_stats);
716 struct attribute *bch2_fs_time_stats_files[] = {
718 &sysfs_time_stat_##name,
724 typedef unsigned (bucket_map_fn)(struct bch_fs *, struct bch_dev *,
727 static unsigned bucket_last_io_fn(struct bch_fs *c, struct bch_dev *ca,
728 size_t b, void *private)
730 int rw = (private ? 1 : 0);
732 return bucket_last_io(c, bucket(ca, b), rw);
735 static unsigned bucket_sectors_used_fn(struct bch_fs *c, struct bch_dev *ca,
736 size_t b, void *private)
738 struct bucket *g = bucket(ca, b);
739 return bucket_sectors_used(g->mark);
742 static unsigned bucket_oldest_gen_fn(struct bch_fs *c, struct bch_dev *ca,
743 size_t b, void *private)
745 return bucket_gc_gen(ca, b);
748 static int unsigned_cmp(const void *_l, const void *_r)
750 unsigned l = *((unsigned *) _l);
751 unsigned r = *((unsigned *) _r);
753 return (l > r) - (l < r);
756 static ssize_t show_quantiles(struct bch_fs *c, struct bch_dev *ca,
757 char *buf, bucket_map_fn *fn, void *private)
760 /* Compute 31 quantiles */
764 down_read(&ca->bucket_lock);
767 p = vzalloc(n * sizeof(unsigned));
769 up_read(&ca->bucket_lock);
773 for (i = ca->mi.first_bucket; i < n; i++)
774 p[i] = fn(c, ca, i, private);
776 sort(p, n, sizeof(unsigned), unsigned_cmp, NULL);
777 up_read(&ca->bucket_lock);
783 for (i = 0; i < ARRAY_SIZE(q); i++)
784 q[i] = p[n * (i + 1) / (ARRAY_SIZE(q) + 1)];
788 for (i = 0; i < ARRAY_SIZE(q); i++)
789 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
796 static ssize_t show_reserve_stats(struct bch_dev *ca, char *buf)
798 struct printbuf out = _PBUF(buf, PAGE_SIZE);
799 enum alloc_reserve i;
801 spin_lock(&ca->freelist_lock);
803 pr_buf(&out, "free_inc:\t%zu\t%zu\n",
804 fifo_used(&ca->free_inc),
807 for (i = 0; i < RESERVE_NR; i++)
808 pr_buf(&out, "free[%u]:\t%zu\t%zu\n", i,
809 fifo_used(&ca->free[i]),
812 spin_unlock(&ca->freelist_lock);
814 return out.pos - buf;
817 static ssize_t show_dev_alloc_debug(struct bch_dev *ca, char *buf)
819 struct bch_fs *c = ca->fs;
820 struct bch_dev_usage stats = bch2_dev_usage_read(c, ca);
821 unsigned i, nr[BCH_DATA_NR];
823 memset(nr, 0, sizeof(nr));
825 for (i = 0; i < ARRAY_SIZE(c->open_buckets); i++)
826 nr[c->open_buckets[i].type]++;
828 return scnprintf(buf, PAGE_SIZE,
829 "free_inc: %zu/%zu\n"
830 "free[RESERVE_BTREE]: %zu/%zu\n"
831 "free[RESERVE_MOVINGGC]: %zu/%zu\n"
832 "free[RESERVE_NONE]: %zu/%zu\n"
841 " erasure coded: %llu\n"
849 " fragmented: %llu\n"
850 " copygc threshold: %llu\n"
851 "freelist_wait: %s\n"
852 "open buckets: %u/%u (reserved %u)\n"
853 "open_buckets_wait: %s\n"
854 "open_buckets_btree: %u\n"
855 "open_buckets_user: %u\n"
856 "btree reserve cache: %u\n",
857 fifo_used(&ca->free_inc), ca->free_inc.size,
858 fifo_used(&ca->free[RESERVE_BTREE]), ca->free[RESERVE_BTREE].size,
859 fifo_used(&ca->free[RESERVE_MOVINGGC]), ca->free[RESERVE_MOVINGGC].size,
860 fifo_used(&ca->free[RESERVE_NONE]), ca->free[RESERVE_NONE].size,
861 ca->mi.nbuckets - ca->mi.first_bucket,
863 stats.buckets[BCH_DATA_SB],
864 stats.buckets[BCH_DATA_JOURNAL],
865 stats.buckets[BCH_DATA_BTREE],
866 stats.buckets[BCH_DATA_USER],
867 stats.buckets[BCH_DATA_CACHED],
869 ca->mi.nbuckets - ca->mi.first_bucket - stats.buckets_unavailable,
870 stats.sectors[BCH_DATA_SB],
871 stats.sectors[BCH_DATA_JOURNAL],
872 stats.sectors[BCH_DATA_BTREE],
873 stats.sectors[BCH_DATA_USER],
874 stats.sectors[BCH_DATA_CACHED],
875 stats.sectors_fragmented,
876 ca->copygc_threshold,
877 c->freelist_wait.list.first ? "waiting" : "empty",
878 c->open_buckets_nr_free, OPEN_BUCKETS_COUNT,
879 BTREE_NODE_OPEN_BUCKET_RESERVE,
880 c->open_buckets_wait.list.first ? "waiting" : "empty",
883 c->btree_reserve_cache_nr);
886 static const char * const bch2_rw[] = {
892 static ssize_t show_dev_iodone(struct bch_dev *ca, char *buf)
894 struct printbuf out = _PBUF(buf, PAGE_SIZE);
897 for (rw = 0; rw < 2; rw++) {
898 pr_buf(&out, "%s:\n", bch2_rw[rw]);
900 for (i = 1; i < BCH_DATA_NR; i++)
901 pr_buf(&out, "%-12s:%12llu\n",
903 percpu_u64_get(&ca->io_done->sectors[rw][i]) << 9);
906 return out.pos - buf;
911 struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj);
912 struct bch_fs *c = ca->fs;
913 struct printbuf out = _PBUF(buf, PAGE_SIZE);
915 sysfs_printf(uuid, "%pU\n", ca->uuid.b);
917 sysfs_print(bucket_size, bucket_bytes(ca));
918 sysfs_print(block_size, block_bytes(c));
919 sysfs_print(first_bucket, ca->mi.first_bucket);
920 sysfs_print(nbuckets, ca->mi.nbuckets);
921 sysfs_print(durability, ca->mi.durability);
922 sysfs_print(discard, ca->mi.discard);
924 if (attr == &sysfs_label) {
926 mutex_lock(&c->sb_lock);
927 bch2_disk_path_to_text(&out, &c->disk_sb,
929 mutex_unlock(&c->sb_lock);
931 pr_buf(&out, "none");
935 return out.pos - buf;
938 if (attr == &sysfs_has_data) {
939 bch2_flags_to_text(&out, bch2_data_types,
940 bch2_dev_has_data(c, ca));
942 return out.pos - buf;
945 sysfs_pd_controller_show(copy_gc, &ca->copygc_pd);
947 if (attr == &sysfs_cache_replacement_policy) {
948 bch2_string_opt_to_text(&out,
949 bch2_cache_replacement_policies,
952 return out.pos - buf;
955 if (attr == &sysfs_state_rw) {
956 bch2_string_opt_to_text(&out, bch2_dev_state,
959 return out.pos - buf;
962 if (attr == &sysfs_iodone)
963 return show_dev_iodone(ca, buf);
965 sysfs_print(io_latency_read, atomic64_read(&ca->cur_latency[READ]));
966 sysfs_print(io_latency_write, atomic64_read(&ca->cur_latency[WRITE]));
968 if (attr == &sysfs_io_latency_stats_read)
969 return bch2_time_stats_print(&ca->io_latency[READ], buf, PAGE_SIZE);
970 if (attr == &sysfs_io_latency_stats_write)
971 return bch2_time_stats_print(&ca->io_latency[WRITE], buf, PAGE_SIZE);
973 sysfs_printf(congested, "%u%%",
974 clamp(atomic_read(&ca->congested), 0, CONGESTED_MAX)
975 * 100 / CONGESTED_MAX);
977 if (attr == &sysfs_bucket_quantiles_last_read)
978 return show_quantiles(c, ca, buf, bucket_last_io_fn, (void *) 0);
979 if (attr == &sysfs_bucket_quantiles_last_write)
980 return show_quantiles(c, ca, buf, bucket_last_io_fn, (void *) 1);
981 if (attr == &sysfs_bucket_quantiles_fragmentation)
982 return show_quantiles(c, ca, buf, bucket_sectors_used_fn, NULL);
983 if (attr == &sysfs_bucket_quantiles_oldest_gen)
984 return show_quantiles(c, ca, buf, bucket_oldest_gen_fn, NULL);
986 if (attr == &sysfs_reserve_stats)
987 return show_reserve_stats(ca, buf);
988 if (attr == &sysfs_alloc_debug)
989 return show_dev_alloc_debug(ca, buf);
996 struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj);
997 struct bch_fs *c = ca->fs;
998 struct bch_member *mi;
1000 sysfs_pd_controller_store(copy_gc, &ca->copygc_pd);
1002 if (attr == &sysfs_discard) {
1003 bool v = strtoul_or_return(buf);
1005 mutex_lock(&c->sb_lock);
1006 mi = &bch2_sb_get_members(c->disk_sb.sb)->members[ca->dev_idx];
1008 if (v != BCH_MEMBER_DISCARD(mi)) {
1009 SET_BCH_MEMBER_DISCARD(mi, v);
1010 bch2_write_super(c);
1012 mutex_unlock(&c->sb_lock);
1015 if (attr == &sysfs_cache_replacement_policy) {
1016 ssize_t v = __sysfs_match_string(bch2_cache_replacement_policies, -1, buf);
1021 mutex_lock(&c->sb_lock);
1022 mi = &bch2_sb_get_members(c->disk_sb.sb)->members[ca->dev_idx];
1024 if ((unsigned) v != BCH_MEMBER_REPLACEMENT(mi)) {
1025 SET_BCH_MEMBER_REPLACEMENT(mi, v);
1026 bch2_write_super(c);
1028 mutex_unlock(&c->sb_lock);
1031 if (attr == &sysfs_label) {
1035 tmp = kstrdup(buf, GFP_KERNEL);
1039 ret = bch2_dev_group_set(c, ca, strim(tmp));
1045 if (attr == &sysfs_wake_allocator)
1046 bch2_wake_allocator(ca);
1050 SYSFS_OPS(bch2_dev);
1052 struct attribute *bch2_dev_files[] = {
1056 &sysfs_first_bucket,
1062 &sysfs_cache_replacement_policy,
1069 &sysfs_io_latency_read,
1070 &sysfs_io_latency_write,
1071 &sysfs_io_latency_stats_read,
1072 &sysfs_io_latency_stats_write,
1075 /* alloc info - other stats: */
1076 &sysfs_bucket_quantiles_last_read,
1077 &sysfs_bucket_quantiles_last_write,
1078 &sysfs_bucket_quantiles_fragmentation,
1079 &sysfs_bucket_quantiles_oldest_gen,
1081 &sysfs_reserve_stats,
1085 &sysfs_wake_allocator,
1087 sysfs_pd_controller_files(copy_gc),
1091 #endif /* _BCACHEFS_SYSFS_H_ */