1 // SPDX-License-Identifier: GPL-2.0
3 * bcache sysfs interfaces
5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6 * Copyright 2012 Google, Inc.
9 #ifndef NO_BCACHEFS_SYSFS
12 #include "alloc_background.h"
14 #include "btree_cache.h"
16 #include "btree_iter.h"
17 #include "btree_update.h"
18 #include "btree_update_interior.h"
22 #include "disk_groups.h"
29 #include "rebalance.h"
34 #include <linux/blkdev.h>
35 #include <linux/sort.h>
36 #include <linux/sched/clock.h>
40 #define SYSFS_OPS(type) \
41 struct sysfs_ops type ## _sysfs_ops = { \
42 .show = type ## _show, \
43 .store = type ## _store \
47 static ssize_t fn ## _show(struct kobject *kobj, struct attribute *attr,\
51 static ssize_t fn ## _store(struct kobject *kobj, struct attribute *attr,\
52 const char *buf, size_t size) \
54 #define __sysfs_attribute(_name, _mode) \
55 static struct attribute sysfs_##_name = \
56 { .name = #_name, .mode = _mode }
58 #define write_attribute(n) __sysfs_attribute(n, S_IWUSR)
59 #define read_attribute(n) __sysfs_attribute(n, S_IRUGO)
60 #define rw_attribute(n) __sysfs_attribute(n, S_IRUGO|S_IWUSR)
62 #define sysfs_printf(file, fmt, ...) \
64 if (attr == &sysfs_ ## file) \
65 return scnprintf(buf, PAGE_SIZE, fmt "\n", __VA_ARGS__);\
68 #define sysfs_print(file, var) \
70 if (attr == &sysfs_ ## file) \
71 return snprint(buf, PAGE_SIZE, var); \
74 #define sysfs_hprint(file, val) \
76 if (attr == &sysfs_ ## file) { \
77 struct printbuf out = _PBUF(buf, PAGE_SIZE); \
78 bch2_hprint(&out, val); \
80 return out.pos - buf; \
84 #define var_printf(_var, fmt) sysfs_printf(_var, fmt, var(_var))
85 #define var_print(_var) sysfs_print(_var, var(_var))
86 #define var_hprint(_var) sysfs_hprint(_var, var(_var))
88 #define sysfs_strtoul(file, var) \
90 if (attr == &sysfs_ ## file) \
91 return strtoul_safe(buf, var) ?: (ssize_t) size; \
94 #define sysfs_strtoul_clamp(file, var, min, max) \
96 if (attr == &sysfs_ ## file) \
97 return strtoul_safe_clamp(buf, var, min, max) \
101 #define strtoul_or_return(cp) \
104 int _r = kstrtoul(cp, 10, &_v); \
110 #define strtoul_restrict_or_return(cp, min, max) \
112 unsigned long __v = 0; \
113 int _r = strtoul_safe_restrict(cp, __v, min, max); \
119 #define strtoi_h_or_return(cp) \
122 int _r = strtoi_h(cp, &_v); \
128 #define sysfs_hatoi(file, var) \
130 if (attr == &sysfs_ ## file) \
131 return strtoi_h(buf, &var) ?: (ssize_t) size; \
134 write_attribute(trigger_journal_flush);
135 write_attribute(trigger_btree_coalesce);
136 write_attribute(trigger_gc);
137 write_attribute(trigger_alloc_write);
138 write_attribute(prune_cache);
139 rw_attribute(btree_gc_periodic);
141 read_attribute(uuid);
142 read_attribute(minor);
143 read_attribute(bucket_size);
144 read_attribute(block_size);
145 read_attribute(btree_node_size);
146 read_attribute(first_bucket);
147 read_attribute(nbuckets);
148 read_attribute(durability);
149 read_attribute(iodone);
151 read_attribute(io_latency_read);
152 read_attribute(io_latency_write);
153 read_attribute(io_latency_stats_read);
154 read_attribute(io_latency_stats_write);
155 read_attribute(congested);
157 read_attribute(bucket_quantiles_last_read);
158 read_attribute(bucket_quantiles_last_write);
159 read_attribute(bucket_quantiles_fragmentation);
160 read_attribute(bucket_quantiles_oldest_gen);
162 read_attribute(reserve_stats);
163 read_attribute(btree_cache_size);
164 read_attribute(compression_stats);
165 read_attribute(journal_debug);
166 read_attribute(journal_pins);
167 read_attribute(btree_updates);
168 read_attribute(dirty_btree_nodes);
170 read_attribute(internal_uuid);
172 read_attribute(has_data);
173 read_attribute(alloc_debug);
174 write_attribute(wake_allocator);
176 read_attribute(read_realloc_races);
177 read_attribute(extent_migrate_done);
178 read_attribute(extent_migrate_raced);
180 rw_attribute(journal_write_delay_ms);
181 rw_attribute(journal_reclaim_delay_ms);
183 rw_attribute(discard);
184 rw_attribute(cache_replacement_policy);
187 rw_attribute(copy_gc_enabled);
188 sysfs_pd_controller_attribute(copy_gc);
190 rw_attribute(rebalance_enabled);
191 sysfs_pd_controller_attribute(rebalance);
192 read_attribute(rebalance_work);
193 rw_attribute(promote_whole_extents);
195 read_attribute(new_stripes);
197 rw_attribute(pd_controllers_update_seconds);
199 read_attribute(meta_replicas_have);
200 read_attribute(data_replicas_have);
202 read_attribute(io_timers_read);
203 read_attribute(io_timers_write);
205 #ifdef CONFIG_BCACHEFS_TESTS
206 write_attribute(perf_test);
207 #endif /* CONFIG_BCACHEFS_TESTS */
209 #define BCH_DEBUG_PARAM(name, description) \
213 #undef BCH_DEBUG_PARAM
216 static struct attribute sysfs_time_stat_##_name = \
217 { .name = #_name, .mode = S_IRUGO };
221 static struct attribute sysfs_state_rw = {
226 static size_t bch2_btree_cache_size(struct bch_fs *c)
231 mutex_lock(&c->btree_cache.lock);
232 list_for_each_entry(b, &c->btree_cache.live, list)
233 ret += btree_bytes(c);
235 mutex_unlock(&c->btree_cache.lock);
239 static ssize_t show_fs_alloc_debug(struct bch_fs *c, char *buf)
241 struct printbuf out = _PBUF(buf, PAGE_SIZE);
242 struct bch_fs_usage *fs_usage = bch2_fs_usage_read(c);
247 bch2_fs_usage_to_text(&out, c, fs_usage);
249 percpu_up_read(&c->mark_lock);
253 return out.pos - buf;
256 static ssize_t bch2_compression_stats(struct bch_fs *c, char *buf)
258 struct btree_trans trans;
259 struct btree_iter *iter;
261 u64 nr_uncompressed_extents = 0, uncompressed_sectors = 0,
262 nr_compressed_extents = 0,
263 compressed_sectors_compressed = 0,
264 compressed_sectors_uncompressed = 0;
267 if (!test_bit(BCH_FS_STARTED, &c->flags))
270 bch2_trans_init(&trans, c, 0, 0);
272 for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, POS_MIN, 0, k, ret)
273 if (k.k->type == KEY_TYPE_extent) {
274 struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
275 const union bch_extent_entry *entry;
276 struct extent_ptr_decoded p;
278 extent_for_each_ptr_decode(e, p, entry) {
279 if (p.crc.compression_type == BCH_COMPRESSION_NONE) {
280 nr_uncompressed_extents++;
281 uncompressed_sectors += e.k->size;
283 nr_compressed_extents++;
284 compressed_sectors_compressed +=
285 p.crc.compressed_size;
286 compressed_sectors_uncompressed +=
287 p.crc.uncompressed_size;
290 /* only looking at the first ptr */
295 ret = bch2_trans_exit(&trans) ?: ret;
299 return scnprintf(buf, PAGE_SIZE,
300 "uncompressed data:\n"
301 " nr extents: %llu\n"
302 " size (bytes): %llu\n"
304 " nr extents: %llu\n"
305 " compressed size (bytes): %llu\n"
306 " uncompressed size (bytes): %llu\n",
307 nr_uncompressed_extents,
308 uncompressed_sectors << 9,
309 nr_compressed_extents,
310 compressed_sectors_compressed << 9,
311 compressed_sectors_uncompressed << 9);
314 static ssize_t bch2_new_stripes(struct bch_fs *c, char *buf)
316 char *out = buf, *end = buf + PAGE_SIZE;
317 struct ec_stripe_head *h;
318 struct ec_stripe_new *s;
320 mutex_lock(&c->ec_new_stripe_lock);
321 list_for_each_entry(h, &c->ec_new_stripe_list, list) {
322 out += scnprintf(out, end - out,
323 "target %u algo %u redundancy %u:\n",
324 h->target, h->algo, h->redundancy);
327 out += scnprintf(out, end - out,
328 "\tpending: blocks %u allocated %u\n",
330 bitmap_weight(h->s->blocks_allocated,
333 mutex_lock(&h->lock);
334 list_for_each_entry(s, &h->stripes, list)
335 out += scnprintf(out, end - out,
336 "\tin flight: blocks %u allocated %u pin %u\n",
338 bitmap_weight(s->blocks_allocated,
340 atomic_read(&s->pin));
341 mutex_unlock(&h->lock);
344 mutex_unlock(&c->ec_new_stripe_lock);
351 struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
353 sysfs_print(minor, c->minor);
354 sysfs_printf(internal_uuid, "%pU", c->sb.uuid.b);
356 sysfs_print(journal_write_delay_ms, c->journal.write_delay_ms);
357 sysfs_print(journal_reclaim_delay_ms, c->journal.reclaim_delay_ms);
359 sysfs_print(block_size, block_bytes(c));
360 sysfs_print(btree_node_size, btree_bytes(c));
361 sysfs_hprint(btree_cache_size, bch2_btree_cache_size(c));
363 sysfs_print(read_realloc_races,
364 atomic_long_read(&c->read_realloc_races));
365 sysfs_print(extent_migrate_done,
366 atomic_long_read(&c->extent_migrate_done));
367 sysfs_print(extent_migrate_raced,
368 atomic_long_read(&c->extent_migrate_raced));
370 sysfs_printf(btree_gc_periodic, "%u", (int) c->btree_gc_periodic);
372 sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled);
374 sysfs_print(pd_controllers_update_seconds,
375 c->pd_controllers_update_seconds);
377 sysfs_printf(rebalance_enabled, "%i", c->rebalance.enabled);
378 sysfs_pd_controller_show(rebalance, &c->rebalance.pd); /* XXX */
380 if (attr == &sysfs_rebalance_work)
381 return bch2_rebalance_work_show(c, buf);
383 sysfs_print(promote_whole_extents, c->promote_whole_extents);
385 sysfs_printf(meta_replicas_have, "%i", bch2_replicas_online(c, true));
386 sysfs_printf(data_replicas_have, "%i", bch2_replicas_online(c, false));
390 if (attr == &sysfs_alloc_debug)
391 return show_fs_alloc_debug(c, buf);
393 if (attr == &sysfs_journal_debug)
394 return bch2_journal_print_debug(&c->journal, buf);
396 if (attr == &sysfs_journal_pins)
397 return bch2_journal_print_pins(&c->journal, buf);
399 if (attr == &sysfs_btree_updates)
400 return bch2_btree_updates_print(c, buf);
402 if (attr == &sysfs_dirty_btree_nodes)
403 return bch2_dirty_btree_nodes_print(c, buf);
405 if (attr == &sysfs_compression_stats)
406 return bch2_compression_stats(c, buf);
408 if (attr == &sysfs_new_stripes)
409 return bch2_new_stripes(c, buf);
411 if (attr == &sysfs_io_timers_read)
412 return bch2_io_timers_show(&c->io_clock[READ], buf);
413 if (attr == &sysfs_io_timers_write)
414 return bch2_io_timers_show(&c->io_clock[WRITE], buf);
416 #define BCH_DEBUG_PARAM(name, description) sysfs_print(name, c->name);
418 #undef BCH_DEBUG_PARAM
425 struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
427 sysfs_strtoul(journal_write_delay_ms, c->journal.write_delay_ms);
428 sysfs_strtoul(journal_reclaim_delay_ms, c->journal.reclaim_delay_ms);
430 if (attr == &sysfs_btree_gc_periodic) {
431 ssize_t ret = strtoul_safe(buf, c->btree_gc_periodic)
434 wake_up_process(c->gc_thread);
438 if (attr == &sysfs_copy_gc_enabled) {
441 ssize_t ret = strtoul_safe(buf, c->copy_gc_enabled)
444 for_each_member_device(ca, c, i)
445 if (ca->copygc_thread)
446 wake_up_process(ca->copygc_thread);
450 if (attr == &sysfs_rebalance_enabled) {
451 ssize_t ret = strtoul_safe(buf, c->rebalance.enabled)
458 sysfs_strtoul(pd_controllers_update_seconds,
459 c->pd_controllers_update_seconds);
460 sysfs_pd_controller_store(rebalance, &c->rebalance.pd);
462 sysfs_strtoul(promote_whole_extents, c->promote_whole_extents);
466 #define BCH_DEBUG_PARAM(name, description) sysfs_strtoul(name, c->name);
468 #undef BCH_DEBUG_PARAM
470 if (!test_bit(BCH_FS_STARTED, &c->flags))
475 if (attr == &sysfs_trigger_journal_flush)
476 bch2_journal_meta_async(&c->journal, NULL);
478 if (attr == &sysfs_trigger_btree_coalesce)
481 if (attr == &sysfs_trigger_gc)
482 bch2_gc(c, NULL, false, false);
484 if (attr == &sysfs_trigger_alloc_write) {
487 bch2_alloc_write(c, 0, &wrote);
490 if (attr == &sysfs_prune_cache) {
491 struct shrink_control sc;
493 sc.gfp_mask = GFP_KERNEL;
494 sc.nr_to_scan = strtoul_or_return(buf);
495 c->btree_cache.shrink.scan_objects(&c->btree_cache.shrink, &sc);
497 #ifdef CONFIG_BCACHEFS_TESTS
498 if (attr == &sysfs_perf_test) {
499 char *tmp = kstrdup(buf, GFP_KERNEL), *p = tmp;
500 char *test = strsep(&p, " \t\n");
501 char *nr_str = strsep(&p, " \t\n");
502 char *threads_str = strsep(&p, " \t\n");
508 !(ret = kstrtouint(threads_str, 10, &threads)) &&
509 !(ret = bch2_strtoull_h(nr_str, &nr)))
510 bch2_btree_perf_test(c, test, nr, threads);
521 struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
523 mutex_lock(&c->state_lock);
524 size = __bch2_fs_store(kobj, attr, buf, size);
525 mutex_unlock(&c->state_lock);
531 struct attribute *bch2_fs_files[] = {
534 &sysfs_btree_node_size,
535 &sysfs_btree_cache_size,
537 &sysfs_meta_replicas_have,
538 &sysfs_data_replicas_have,
540 &sysfs_journal_write_delay_ms,
541 &sysfs_journal_reclaim_delay_ms,
543 &sysfs_promote_whole_extents,
545 &sysfs_compression_stats,
547 #ifdef CONFIG_BCACHEFS_TESTS
553 /* internal dir - just a wrapper */
555 SHOW(bch2_fs_internal)
557 struct bch_fs *c = container_of(kobj, struct bch_fs, internal);
558 return bch2_fs_show(&c->kobj, attr, buf);
561 STORE(bch2_fs_internal)
563 struct bch_fs *c = container_of(kobj, struct bch_fs, internal);
564 return bch2_fs_store(&c->kobj, attr, buf, size);
566 SYSFS_OPS(bch2_fs_internal);
568 struct attribute *bch2_fs_internal_files[] = {
570 &sysfs_journal_debug,
572 &sysfs_btree_updates,
573 &sysfs_dirty_btree_nodes,
575 &sysfs_read_realloc_races,
576 &sysfs_extent_migrate_done,
577 &sysfs_extent_migrate_raced,
579 &sysfs_trigger_journal_flush,
580 &sysfs_trigger_btree_coalesce,
582 &sysfs_trigger_alloc_write,
585 &sysfs_copy_gc_enabled,
587 &sysfs_rebalance_enabled,
588 &sysfs_rebalance_work,
589 sysfs_pd_controller_files(rebalance),
593 &sysfs_io_timers_read,
594 &sysfs_io_timers_write,
596 &sysfs_internal_uuid,
598 #define BCH_DEBUG_PARAM(name, description) &sysfs_##name,
600 #undef BCH_DEBUG_PARAM
607 SHOW(bch2_fs_opts_dir)
609 struct printbuf out = _PBUF(buf, PAGE_SIZE);
610 struct bch_fs *c = container_of(kobj, struct bch_fs, opts_dir);
611 const struct bch_option *opt = container_of(attr, struct bch_option, attr);
612 int id = opt - bch2_opt_table;
613 u64 v = bch2_opt_get_by_id(&c->opts, id);
615 bch2_opt_to_text(&out, c, opt, v, OPT_SHOW_FULL_LIST);
618 return out.pos - buf;
621 STORE(bch2_fs_opts_dir)
623 struct bch_fs *c = container_of(kobj, struct bch_fs, opts_dir);
624 const struct bch_option *opt = container_of(attr, struct bch_option, attr);
625 int ret, id = opt - bch2_opt_table;
629 tmp = kstrdup(buf, GFP_KERNEL);
633 ret = bch2_opt_parse(c, opt, strim(tmp), &v);
639 ret = bch2_opt_check_may_set(c, id, v);
643 if (opt->set_sb != SET_NO_SB_OPT) {
644 mutex_lock(&c->sb_lock);
645 opt->set_sb(c->disk_sb.sb, v);
647 mutex_unlock(&c->sb_lock);
650 bch2_opt_set_by_id(&c->opts, id, v);
652 if ((id == Opt_background_target ||
653 id == Opt_background_compression) && v) {
654 bch2_rebalance_add_work(c, S64_MAX);
660 SYSFS_OPS(bch2_fs_opts_dir);
662 struct attribute *bch2_fs_opts_dir_files[] = { NULL };
664 int bch2_opts_create_sysfs_files(struct kobject *kobj)
666 const struct bch_option *i;
669 for (i = bch2_opt_table;
670 i < bch2_opt_table + bch2_opts_nr;
672 if (!(i->mode & (OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME)))
675 ret = sysfs_create_file(kobj, &i->attr);
685 SHOW(bch2_fs_time_stats)
687 struct bch_fs *c = container_of(kobj, struct bch_fs, time_stats);
690 if (attr == &sysfs_time_stat_##name) \
691 return bch2_time_stats_print(&c->times[BCH_TIME_##name],\
699 STORE(bch2_fs_time_stats)
703 SYSFS_OPS(bch2_fs_time_stats);
705 struct attribute *bch2_fs_time_stats_files[] = {
707 &sysfs_time_stat_##name,
713 typedef unsigned (bucket_map_fn)(struct bch_fs *, struct bch_dev *,
716 static unsigned bucket_last_io_fn(struct bch_fs *c, struct bch_dev *ca,
717 size_t b, void *private)
719 int rw = (private ? 1 : 0);
721 return bucket_last_io(c, bucket(ca, b), rw);
724 static unsigned bucket_sectors_used_fn(struct bch_fs *c, struct bch_dev *ca,
725 size_t b, void *private)
727 struct bucket *g = bucket(ca, b);
728 return bucket_sectors_used(g->mark);
731 static unsigned bucket_oldest_gen_fn(struct bch_fs *c, struct bch_dev *ca,
732 size_t b, void *private)
734 return bucket_gc_gen(ca, b);
737 static int unsigned_cmp(const void *_l, const void *_r)
739 const unsigned *l = _l;
740 const unsigned *r = _r;
742 return cmp_int(*l, *r);
745 static ssize_t show_quantiles(struct bch_fs *c, struct bch_dev *ca,
746 char *buf, bucket_map_fn *fn, void *private)
749 /* Compute 31 quantiles */
753 down_read(&ca->bucket_lock);
756 p = vzalloc(n * sizeof(unsigned));
758 up_read(&ca->bucket_lock);
762 for (i = ca->mi.first_bucket; i < n; i++)
763 p[i] = fn(c, ca, i, private);
765 sort(p, n, sizeof(unsigned), unsigned_cmp, NULL);
766 up_read(&ca->bucket_lock);
772 for (i = 0; i < ARRAY_SIZE(q); i++)
773 q[i] = p[n * (i + 1) / (ARRAY_SIZE(q) + 1)];
777 for (i = 0; i < ARRAY_SIZE(q); i++)
778 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
785 static ssize_t show_reserve_stats(struct bch_dev *ca, char *buf)
787 struct printbuf out = _PBUF(buf, PAGE_SIZE);
788 enum alloc_reserve i;
790 spin_lock(&ca->fs->freelist_lock);
792 pr_buf(&out, "free_inc:\t%zu\t%zu\n",
793 fifo_used(&ca->free_inc),
796 for (i = 0; i < RESERVE_NR; i++)
797 pr_buf(&out, "free[%u]:\t%zu\t%zu\n", i,
798 fifo_used(&ca->free[i]),
801 spin_unlock(&ca->fs->freelist_lock);
803 return out.pos - buf;
806 static ssize_t show_dev_alloc_debug(struct bch_dev *ca, char *buf)
808 struct bch_fs *c = ca->fs;
809 struct bch_dev_usage stats = bch2_dev_usage_read(c, ca);
810 unsigned i, nr[BCH_DATA_NR];
812 memset(nr, 0, sizeof(nr));
814 for (i = 0; i < ARRAY_SIZE(c->open_buckets); i++)
815 nr[c->open_buckets[i].type]++;
817 return scnprintf(buf, PAGE_SIZE,
818 "free_inc: %zu/%zu\n"
819 "free[RESERVE_BTREE]: %zu/%zu\n"
820 "free[RESERVE_MOVINGGC]: %zu/%zu\n"
821 "free[RESERVE_NONE]: %zu/%zu\n"
830 " erasure coded: %llu\n"
838 " fragmented: %llu\n"
839 " copygc threshold: %llu\n"
840 "freelist_wait: %s\n"
841 "open buckets: %u/%u (reserved %u)\n"
842 "open_buckets_wait: %s\n"
843 "open_buckets_btree: %u\n"
844 "open_buckets_user: %u\n"
845 "btree reserve cache: %u\n",
846 fifo_used(&ca->free_inc), ca->free_inc.size,
847 fifo_used(&ca->free[RESERVE_BTREE]), ca->free[RESERVE_BTREE].size,
848 fifo_used(&ca->free[RESERVE_MOVINGGC]), ca->free[RESERVE_MOVINGGC].size,
849 fifo_used(&ca->free[RESERVE_NONE]), ca->free[RESERVE_NONE].size,
850 ca->mi.nbuckets - ca->mi.first_bucket,
852 stats.buckets[BCH_DATA_SB],
853 stats.buckets[BCH_DATA_JOURNAL],
854 stats.buckets[BCH_DATA_BTREE],
855 stats.buckets[BCH_DATA_USER],
856 stats.buckets[BCH_DATA_CACHED],
858 ca->mi.nbuckets - ca->mi.first_bucket - stats.buckets_unavailable,
859 stats.sectors[BCH_DATA_SB],
860 stats.sectors[BCH_DATA_JOURNAL],
861 stats.sectors[BCH_DATA_BTREE],
862 stats.sectors[BCH_DATA_USER],
863 stats.sectors[BCH_DATA_CACHED],
864 stats.sectors_fragmented,
865 ca->copygc_threshold,
866 c->freelist_wait.list.first ? "waiting" : "empty",
867 c->open_buckets_nr_free, OPEN_BUCKETS_COUNT,
868 BTREE_NODE_OPEN_BUCKET_RESERVE,
869 c->open_buckets_wait.list.first ? "waiting" : "empty",
872 c->btree_reserve_cache_nr);
875 static const char * const bch2_rw[] = {
881 static ssize_t show_dev_iodone(struct bch_dev *ca, char *buf)
883 struct printbuf out = _PBUF(buf, PAGE_SIZE);
886 for (rw = 0; rw < 2; rw++) {
887 pr_buf(&out, "%s:\n", bch2_rw[rw]);
889 for (i = 1; i < BCH_DATA_NR; i++)
890 pr_buf(&out, "%-12s:%12llu\n",
892 percpu_u64_get(&ca->io_done->sectors[rw][i]) << 9);
895 return out.pos - buf;
900 struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj);
901 struct bch_fs *c = ca->fs;
902 struct printbuf out = _PBUF(buf, PAGE_SIZE);
904 sysfs_printf(uuid, "%pU\n", ca->uuid.b);
906 sysfs_print(bucket_size, bucket_bytes(ca));
907 sysfs_print(block_size, block_bytes(c));
908 sysfs_print(first_bucket, ca->mi.first_bucket);
909 sysfs_print(nbuckets, ca->mi.nbuckets);
910 sysfs_print(durability, ca->mi.durability);
911 sysfs_print(discard, ca->mi.discard);
913 if (attr == &sysfs_label) {
915 mutex_lock(&c->sb_lock);
916 bch2_disk_path_to_text(&out, &c->disk_sb,
918 mutex_unlock(&c->sb_lock);
920 pr_buf(&out, "none");
924 return out.pos - buf;
927 if (attr == &sysfs_has_data) {
928 bch2_flags_to_text(&out, bch2_data_types,
929 bch2_dev_has_data(c, ca));
931 return out.pos - buf;
934 sysfs_pd_controller_show(copy_gc, &ca->copygc_pd);
936 if (attr == &sysfs_cache_replacement_policy) {
937 bch2_string_opt_to_text(&out,
938 bch2_cache_replacement_policies,
941 return out.pos - buf;
944 if (attr == &sysfs_state_rw) {
945 bch2_string_opt_to_text(&out, bch2_dev_state,
948 return out.pos - buf;
951 if (attr == &sysfs_iodone)
952 return show_dev_iodone(ca, buf);
954 sysfs_print(io_latency_read, atomic64_read(&ca->cur_latency[READ]));
955 sysfs_print(io_latency_write, atomic64_read(&ca->cur_latency[WRITE]));
957 if (attr == &sysfs_io_latency_stats_read)
958 return bch2_time_stats_print(&ca->io_latency[READ], buf, PAGE_SIZE);
959 if (attr == &sysfs_io_latency_stats_write)
960 return bch2_time_stats_print(&ca->io_latency[WRITE], buf, PAGE_SIZE);
962 sysfs_printf(congested, "%u%%",
963 clamp(atomic_read(&ca->congested), 0, CONGESTED_MAX)
964 * 100 / CONGESTED_MAX);
966 if (attr == &sysfs_bucket_quantiles_last_read)
967 return show_quantiles(c, ca, buf, bucket_last_io_fn, (void *) 0);
968 if (attr == &sysfs_bucket_quantiles_last_write)
969 return show_quantiles(c, ca, buf, bucket_last_io_fn, (void *) 1);
970 if (attr == &sysfs_bucket_quantiles_fragmentation)
971 return show_quantiles(c, ca, buf, bucket_sectors_used_fn, NULL);
972 if (attr == &sysfs_bucket_quantiles_oldest_gen)
973 return show_quantiles(c, ca, buf, bucket_oldest_gen_fn, NULL);
975 if (attr == &sysfs_reserve_stats)
976 return show_reserve_stats(ca, buf);
977 if (attr == &sysfs_alloc_debug)
978 return show_dev_alloc_debug(ca, buf);
985 struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj);
986 struct bch_fs *c = ca->fs;
987 struct bch_member *mi;
989 sysfs_pd_controller_store(copy_gc, &ca->copygc_pd);
991 if (attr == &sysfs_discard) {
992 bool v = strtoul_or_return(buf);
994 mutex_lock(&c->sb_lock);
995 mi = &bch2_sb_get_members(c->disk_sb.sb)->members[ca->dev_idx];
997 if (v != BCH_MEMBER_DISCARD(mi)) {
998 SET_BCH_MEMBER_DISCARD(mi, v);
1001 mutex_unlock(&c->sb_lock);
1004 if (attr == &sysfs_cache_replacement_policy) {
1005 ssize_t v = __sysfs_match_string(bch2_cache_replacement_policies, -1, buf);
1010 mutex_lock(&c->sb_lock);
1011 mi = &bch2_sb_get_members(c->disk_sb.sb)->members[ca->dev_idx];
1013 if ((unsigned) v != BCH_MEMBER_REPLACEMENT(mi)) {
1014 SET_BCH_MEMBER_REPLACEMENT(mi, v);
1015 bch2_write_super(c);
1017 mutex_unlock(&c->sb_lock);
1020 if (attr == &sysfs_label) {
1024 tmp = kstrdup(buf, GFP_KERNEL);
1028 ret = bch2_dev_group_set(c, ca, strim(tmp));
1034 if (attr == &sysfs_wake_allocator)
1035 bch2_wake_allocator(ca);
1039 SYSFS_OPS(bch2_dev);
1041 struct attribute *bch2_dev_files[] = {
1045 &sysfs_first_bucket,
1051 &sysfs_cache_replacement_policy,
1058 &sysfs_io_latency_read,
1059 &sysfs_io_latency_write,
1060 &sysfs_io_latency_stats_read,
1061 &sysfs_io_latency_stats_write,
1064 /* alloc info - other stats: */
1065 &sysfs_bucket_quantiles_last_read,
1066 &sysfs_bucket_quantiles_last_write,
1067 &sysfs_bucket_quantiles_fragmentation,
1068 &sysfs_bucket_quantiles_oldest_gen,
1070 &sysfs_reserve_stats,
1074 &sysfs_wake_allocator,
1076 sysfs_pd_controller_files(copy_gc),
1080 #endif /* _BCACHEFS_SYSFS_H_ */