2 * bcache sysfs interfaces
4 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
5 * Copyright 2012 Google, Inc.
8 #ifndef NO_BCACHEFS_SYSFS
11 #include "alloc_background.h"
13 #include "btree_cache.h"
15 #include "btree_iter.h"
16 #include "btree_update.h"
17 #include "btree_update_interior.h"
20 #include "disk_groups.h"
27 #include "rebalance.h"
32 #include <linux/blkdev.h>
33 #include <linux/sort.h>
34 #include <linux/sched/clock.h>
38 #define SYSFS_OPS(type) \
39 struct sysfs_ops type ## _sysfs_ops = { \
40 .show = type ## _show, \
41 .store = type ## _store \
45 static ssize_t fn ## _show(struct kobject *kobj, struct attribute *attr,\
49 static ssize_t fn ## _store(struct kobject *kobj, struct attribute *attr,\
50 const char *buf, size_t size) \
52 #define __sysfs_attribute(_name, _mode) \
53 static struct attribute sysfs_##_name = \
54 { .name = #_name, .mode = _mode }
56 #define write_attribute(n) __sysfs_attribute(n, S_IWUSR)
57 #define read_attribute(n) __sysfs_attribute(n, S_IRUGO)
58 #define rw_attribute(n) __sysfs_attribute(n, S_IRUGO|S_IWUSR)
60 #define sysfs_printf(file, fmt, ...) \
62 if (attr == &sysfs_ ## file) \
63 return scnprintf(buf, PAGE_SIZE, fmt "\n", __VA_ARGS__);\
66 #define sysfs_print(file, var) \
68 if (attr == &sysfs_ ## file) \
69 return snprint(buf, PAGE_SIZE, var); \
72 #define sysfs_hprint(file, val) \
74 if (attr == &sysfs_ ## file) { \
75 ssize_t ret = bch2_hprint(buf, val); \
81 #define var_printf(_var, fmt) sysfs_printf(_var, fmt, var(_var))
82 #define var_print(_var) sysfs_print(_var, var(_var))
83 #define var_hprint(_var) sysfs_hprint(_var, var(_var))
85 #define sysfs_strtoul(file, var) \
87 if (attr == &sysfs_ ## file) \
88 return strtoul_safe(buf, var) ?: (ssize_t) size; \
91 #define sysfs_strtoul_clamp(file, var, min, max) \
93 if (attr == &sysfs_ ## file) \
94 return strtoul_safe_clamp(buf, var, min, max) \
98 #define strtoul_or_return(cp) \
101 int _r = kstrtoul(cp, 10, &_v); \
107 #define strtoul_restrict_or_return(cp, min, max) \
109 unsigned long __v = 0; \
110 int _r = strtoul_safe_restrict(cp, __v, min, max); \
116 #define strtoi_h_or_return(cp) \
119 int _r = strtoi_h(cp, &_v); \
125 #define sysfs_hatoi(file, var) \
127 if (attr == &sysfs_ ## file) \
128 return strtoi_h(buf, &var) ?: (ssize_t) size; \
131 write_attribute(trigger_journal_flush);
132 write_attribute(trigger_btree_coalesce);
133 write_attribute(trigger_gc);
134 write_attribute(prune_cache);
135 rw_attribute(btree_gc_periodic);
137 read_attribute(uuid);
138 read_attribute(minor);
139 read_attribute(bucket_size);
140 read_attribute(block_size);
141 read_attribute(btree_node_size);
142 read_attribute(first_bucket);
143 read_attribute(nbuckets);
144 read_attribute(durability);
145 read_attribute(iodone);
147 read_attribute(io_latency_read);
148 read_attribute(io_latency_write);
149 read_attribute(io_latency_stats_read);
150 read_attribute(io_latency_stats_write);
151 read_attribute(congested);
153 read_attribute(bucket_quantiles_last_read);
154 read_attribute(bucket_quantiles_last_write);
155 read_attribute(bucket_quantiles_fragmentation);
156 read_attribute(bucket_quantiles_oldest_gen);
158 read_attribute(reserve_stats);
159 read_attribute(btree_cache_size);
160 read_attribute(compression_stats);
161 read_attribute(journal_debug);
162 read_attribute(journal_pins);
163 read_attribute(btree_updates);
164 read_attribute(dirty_btree_nodes);
166 read_attribute(internal_uuid);
168 read_attribute(has_data);
169 read_attribute(alloc_debug);
170 write_attribute(wake_allocator);
172 read_attribute(read_realloc_races);
173 read_attribute(extent_migrate_done);
174 read_attribute(extent_migrate_raced);
176 rw_attribute(journal_write_delay_ms);
177 rw_attribute(journal_reclaim_delay_ms);
179 rw_attribute(discard);
180 rw_attribute(cache_replacement_policy);
183 rw_attribute(copy_gc_enabled);
184 sysfs_pd_controller_attribute(copy_gc);
186 rw_attribute(rebalance_enabled);
187 sysfs_pd_controller_attribute(rebalance);
188 read_attribute(rebalance_work);
189 rw_attribute(promote_whole_extents);
191 read_attribute(new_stripes);
193 rw_attribute(pd_controllers_update_seconds);
195 read_attribute(meta_replicas_have);
196 read_attribute(data_replicas_have);
198 #ifdef CONFIG_BCACHEFS_TESTS
199 write_attribute(perf_test);
200 #endif /* CONFIG_BCACHEFS_TESTS */
202 #define BCH_DEBUG_PARAM(name, description) \
206 #undef BCH_DEBUG_PARAM
209 static struct attribute sysfs_time_stat_##_name = \
210 { .name = #_name, .mode = S_IRUGO };
214 static struct attribute sysfs_state_rw = {
219 static size_t bch2_btree_cache_size(struct bch_fs *c)
224 mutex_lock(&c->btree_cache.lock);
225 list_for_each_entry(b, &c->btree_cache.live, list)
226 ret += btree_bytes(c);
228 mutex_unlock(&c->btree_cache.lock);
232 static ssize_t show_fs_alloc_debug(struct bch_fs *c, char *buf)
234 struct printbuf out = _PBUF(buf, PAGE_SIZE);
235 struct bch_fs_usage stats = bch2_fs_usage_read(c);
236 unsigned replicas, type;
238 pr_buf(&out, "capacity:\t\t%llu\n", c->capacity);
240 for (replicas = 0; replicas < ARRAY_SIZE(stats.replicas); replicas++) {
241 pr_buf(&out, "%u replicas:\n", replicas + 1);
243 for (type = BCH_DATA_SB; type < BCH_DATA_NR; type++)
244 pr_buf(&out, "\t%s:\t\t%llu\n",
245 bch2_data_types[type],
246 stats.replicas[replicas].data[type]);
247 pr_buf(&out, "\terasure coded:\t%llu\n",
248 stats.replicas[replicas].ec_data);
249 pr_buf(&out, "\treserved:\t%llu\n",
250 stats.replicas[replicas].persistent_reserved);
253 pr_buf(&out, "bucket usage\n");
255 for (type = BCH_DATA_SB; type < BCH_DATA_NR; type++)
256 pr_buf(&out, "\t%s:\t\t%llu\n",
257 bch2_data_types[type],
258 stats.buckets[type]);
260 pr_buf(&out, "online reserved:\t%llu\n",
261 stats.online_reserved);
263 return out.pos - buf;
266 static ssize_t bch2_compression_stats(struct bch_fs *c, char *buf)
268 struct btree_iter iter;
270 u64 nr_uncompressed_extents = 0, uncompressed_sectors = 0,
271 nr_compressed_extents = 0,
272 compressed_sectors_compressed = 0,
273 compressed_sectors_uncompressed = 0;
275 if (!bch2_fs_running(c))
278 for_each_btree_key(&iter, c, BTREE_ID_EXTENTS, POS_MIN, 0, k)
279 if (k.k->type == KEY_TYPE_extent) {
280 struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
281 const union bch_extent_entry *entry;
282 struct extent_ptr_decoded p;
284 extent_for_each_ptr_decode(e, p, entry) {
285 if (p.crc.compression_type == BCH_COMPRESSION_NONE) {
286 nr_uncompressed_extents++;
287 uncompressed_sectors += e.k->size;
289 nr_compressed_extents++;
290 compressed_sectors_compressed +=
291 p.crc.compressed_size;
292 compressed_sectors_uncompressed +=
293 p.crc.uncompressed_size;
296 /* only looking at the first ptr */
300 bch2_btree_iter_unlock(&iter);
302 return scnprintf(buf, PAGE_SIZE,
303 "uncompressed data:\n"
304 " nr extents: %llu\n"
305 " size (bytes): %llu\n"
307 " nr extents: %llu\n"
308 " compressed size (bytes): %llu\n"
309 " uncompressed size (bytes): %llu\n",
310 nr_uncompressed_extents,
311 uncompressed_sectors << 9,
312 nr_compressed_extents,
313 compressed_sectors_compressed << 9,
314 compressed_sectors_uncompressed << 9);
317 static ssize_t bch2_new_stripes(struct bch_fs *c, char *buf)
319 char *out = buf, *end = buf + PAGE_SIZE;
320 struct ec_stripe_head *h;
321 struct ec_stripe_new *s;
323 mutex_lock(&c->ec_new_stripe_lock);
324 list_for_each_entry(h, &c->ec_new_stripe_list, list) {
325 out += scnprintf(out, end - out,
326 "target %u algo %u redundancy %u:\n",
327 h->target, h->algo, h->redundancy);
330 out += scnprintf(out, end - out,
331 "\tpending: blocks %u allocated %u\n",
333 bitmap_weight(h->s->blocks_allocated,
336 mutex_lock(&h->lock);
337 list_for_each_entry(s, &h->stripes, list)
338 out += scnprintf(out, end - out,
339 "\tin flight: blocks %u allocated %u pin %u\n",
341 bitmap_weight(s->blocks_allocated,
343 atomic_read(&s->pin));
344 mutex_unlock(&h->lock);
347 mutex_unlock(&c->ec_new_stripe_lock);
354 struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
356 sysfs_print(minor, c->minor);
357 sysfs_printf(internal_uuid, "%pU", c->sb.uuid.b);
359 sysfs_print(journal_write_delay_ms, c->journal.write_delay_ms);
360 sysfs_print(journal_reclaim_delay_ms, c->journal.reclaim_delay_ms);
362 sysfs_print(block_size, block_bytes(c));
363 sysfs_print(btree_node_size, btree_bytes(c));
364 sysfs_hprint(btree_cache_size, bch2_btree_cache_size(c));
366 sysfs_print(read_realloc_races,
367 atomic_long_read(&c->read_realloc_races));
368 sysfs_print(extent_migrate_done,
369 atomic_long_read(&c->extent_migrate_done));
370 sysfs_print(extent_migrate_raced,
371 atomic_long_read(&c->extent_migrate_raced));
373 sysfs_printf(btree_gc_periodic, "%u", (int) c->btree_gc_periodic);
375 sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled);
377 sysfs_print(pd_controllers_update_seconds,
378 c->pd_controllers_update_seconds);
380 sysfs_printf(rebalance_enabled, "%i", c->rebalance.enabled);
381 sysfs_pd_controller_show(rebalance, &c->rebalance.pd); /* XXX */
383 if (attr == &sysfs_rebalance_work)
384 return bch2_rebalance_work_show(c, buf);
386 sysfs_print(promote_whole_extents, c->promote_whole_extents);
388 sysfs_printf(meta_replicas_have, "%i", bch2_replicas_online(c, true));
389 sysfs_printf(data_replicas_have, "%i", bch2_replicas_online(c, false));
393 if (attr == &sysfs_alloc_debug)
394 return show_fs_alloc_debug(c, buf);
396 if (attr == &sysfs_journal_debug)
397 return bch2_journal_print_debug(&c->journal, buf);
399 if (attr == &sysfs_journal_pins)
400 return bch2_journal_print_pins(&c->journal, buf);
402 if (attr == &sysfs_btree_updates)
403 return bch2_btree_updates_print(c, buf);
405 if (attr == &sysfs_dirty_btree_nodes)
406 return bch2_dirty_btree_nodes_print(c, buf);
408 if (attr == &sysfs_compression_stats)
409 return bch2_compression_stats(c, buf);
411 if (attr == &sysfs_new_stripes)
412 return bch2_new_stripes(c, buf);
414 #define BCH_DEBUG_PARAM(name, description) sysfs_print(name, c->name);
416 #undef BCH_DEBUG_PARAM
423 struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
425 sysfs_strtoul(journal_write_delay_ms, c->journal.write_delay_ms);
426 sysfs_strtoul(journal_reclaim_delay_ms, c->journal.reclaim_delay_ms);
428 if (attr == &sysfs_btree_gc_periodic) {
429 ssize_t ret = strtoul_safe(buf, c->btree_gc_periodic)
432 wake_up_process(c->gc_thread);
436 if (attr == &sysfs_copy_gc_enabled) {
439 ssize_t ret = strtoul_safe(buf, c->copy_gc_enabled)
442 for_each_member_device(ca, c, i)
443 if (ca->copygc_thread)
444 wake_up_process(ca->copygc_thread);
448 if (attr == &sysfs_rebalance_enabled) {
449 ssize_t ret = strtoul_safe(buf, c->rebalance.enabled)
456 sysfs_strtoul(pd_controllers_update_seconds,
457 c->pd_controllers_update_seconds);
458 sysfs_pd_controller_store(rebalance, &c->rebalance.pd);
460 sysfs_strtoul(promote_whole_extents, c->promote_whole_extents);
464 #define BCH_DEBUG_PARAM(name, description) sysfs_strtoul(name, c->name);
466 #undef BCH_DEBUG_PARAM
468 if (!bch2_fs_running(c))
473 if (attr == &sysfs_trigger_journal_flush)
474 bch2_journal_meta_async(&c->journal, NULL);
476 if (attr == &sysfs_trigger_btree_coalesce)
479 if (attr == &sysfs_trigger_gc)
480 bch2_gc(c, NULL, false);
482 if (attr == &sysfs_prune_cache) {
483 struct shrink_control sc;
485 sc.gfp_mask = GFP_KERNEL;
486 sc.nr_to_scan = strtoul_or_return(buf);
487 c->btree_cache.shrink.scan_objects(&c->btree_cache.shrink, &sc);
489 #ifdef CONFIG_BCACHEFS_TESTS
490 if (attr == &sysfs_perf_test) {
491 char *tmp = kstrdup(buf, GFP_KERNEL), *p = tmp;
492 char *test = strsep(&p, " \t\n");
493 char *nr_str = strsep(&p, " \t\n");
494 char *threads_str = strsep(&p, " \t\n");
500 !(ret = kstrtouint(threads_str, 10, &threads)) &&
501 !(ret = bch2_strtoull_h(nr_str, &nr)))
502 bch2_btree_perf_test(c, test, nr, threads);
513 struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
515 mutex_lock(&c->state_lock);
516 size = __bch2_fs_store(kobj, attr, buf, size);
517 mutex_unlock(&c->state_lock);
523 struct attribute *bch2_fs_files[] = {
526 &sysfs_btree_node_size,
527 &sysfs_btree_cache_size,
529 &sysfs_meta_replicas_have,
530 &sysfs_data_replicas_have,
532 &sysfs_journal_write_delay_ms,
533 &sysfs_journal_reclaim_delay_ms,
535 &sysfs_promote_whole_extents,
537 &sysfs_compression_stats,
539 #ifdef CONFIG_BCACHEFS_TESTS
545 /* internal dir - just a wrapper */
547 SHOW(bch2_fs_internal)
549 struct bch_fs *c = container_of(kobj, struct bch_fs, internal);
550 return bch2_fs_show(&c->kobj, attr, buf);
553 STORE(bch2_fs_internal)
555 struct bch_fs *c = container_of(kobj, struct bch_fs, internal);
556 return bch2_fs_store(&c->kobj, attr, buf, size);
558 SYSFS_OPS(bch2_fs_internal);
560 struct attribute *bch2_fs_internal_files[] = {
562 &sysfs_journal_debug,
564 &sysfs_btree_updates,
565 &sysfs_dirty_btree_nodes,
567 &sysfs_read_realloc_races,
568 &sysfs_extent_migrate_done,
569 &sysfs_extent_migrate_raced,
571 &sysfs_trigger_journal_flush,
572 &sysfs_trigger_btree_coalesce,
576 &sysfs_copy_gc_enabled,
578 &sysfs_rebalance_enabled,
579 &sysfs_rebalance_work,
580 sysfs_pd_controller_files(rebalance),
584 &sysfs_internal_uuid,
586 #define BCH_DEBUG_PARAM(name, description) &sysfs_##name,
588 #undef BCH_DEBUG_PARAM
595 SHOW(bch2_fs_opts_dir)
597 struct printbuf out = _PBUF(buf, PAGE_SIZE);
598 struct bch_fs *c = container_of(kobj, struct bch_fs, opts_dir);
599 const struct bch_option *opt = container_of(attr, struct bch_option, attr);
600 int id = opt - bch2_opt_table;
601 u64 v = bch2_opt_get_by_id(&c->opts, id);
603 bch2_opt_to_text(&out, c, opt, v, OPT_SHOW_FULL_LIST);
606 return out.pos - buf;
609 STORE(bch2_fs_opts_dir)
611 struct bch_fs *c = container_of(kobj, struct bch_fs, opts_dir);
612 const struct bch_option *opt = container_of(attr, struct bch_option, attr);
613 int ret, id = opt - bch2_opt_table;
617 tmp = kstrdup(buf, GFP_KERNEL);
621 ret = bch2_opt_parse(c, opt, strim(tmp), &v);
627 ret = bch2_opt_check_may_set(c, id, v);
631 if (opt->set_sb != SET_NO_SB_OPT) {
632 mutex_lock(&c->sb_lock);
633 opt->set_sb(c->disk_sb.sb, v);
635 mutex_unlock(&c->sb_lock);
638 bch2_opt_set_by_id(&c->opts, id, v);
640 if ((id == Opt_background_target ||
641 id == Opt_background_compression) && v) {
642 bch2_rebalance_add_work(c, S64_MAX);
648 SYSFS_OPS(bch2_fs_opts_dir);
650 struct attribute *bch2_fs_opts_dir_files[] = { NULL };
652 int bch2_opts_create_sysfs_files(struct kobject *kobj)
654 const struct bch_option *i;
657 for (i = bch2_opt_table;
658 i < bch2_opt_table + bch2_opts_nr;
660 if (i->mode == OPT_INTERNAL)
663 ret = sysfs_create_file(kobj, &i->attr);
673 SHOW(bch2_fs_time_stats)
675 struct bch_fs *c = container_of(kobj, struct bch_fs, time_stats);
678 if (attr == &sysfs_time_stat_##name) \
679 return bch2_time_stats_print(&c->times[BCH_TIME_##name],\
687 STORE(bch2_fs_time_stats)
691 SYSFS_OPS(bch2_fs_time_stats);
693 struct attribute *bch2_fs_time_stats_files[] = {
695 &sysfs_time_stat_##name,
701 typedef unsigned (bucket_map_fn)(struct bch_fs *, struct bch_dev *,
704 static unsigned bucket_last_io_fn(struct bch_fs *c, struct bch_dev *ca,
705 size_t b, void *private)
707 int rw = (private ? 1 : 0);
709 return bucket_last_io(c, bucket(ca, b), rw);
712 static unsigned bucket_sectors_used_fn(struct bch_fs *c, struct bch_dev *ca,
713 size_t b, void *private)
715 struct bucket *g = bucket(ca, b);
716 return bucket_sectors_used(g->mark);
719 static unsigned bucket_oldest_gen_fn(struct bch_fs *c, struct bch_dev *ca,
720 size_t b, void *private)
722 return bucket_gc_gen(ca, b);
725 static int unsigned_cmp(const void *_l, const void *_r)
727 unsigned l = *((unsigned *) _l);
728 unsigned r = *((unsigned *) _r);
730 return (l > r) - (l < r);
733 static ssize_t show_quantiles(struct bch_fs *c, struct bch_dev *ca,
734 char *buf, bucket_map_fn *fn, void *private)
737 /* Compute 31 quantiles */
741 down_read(&ca->bucket_lock);
744 p = vzalloc(n * sizeof(unsigned));
746 up_read(&ca->bucket_lock);
750 for (i = ca->mi.first_bucket; i < n; i++)
751 p[i] = fn(c, ca, i, private);
753 sort(p, n, sizeof(unsigned), unsigned_cmp, NULL);
754 up_read(&ca->bucket_lock);
760 for (i = 0; i < ARRAY_SIZE(q); i++)
761 q[i] = p[n * (i + 1) / (ARRAY_SIZE(q) + 1)];
765 for (i = 0; i < ARRAY_SIZE(q); i++)
766 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
773 static ssize_t show_reserve_stats(struct bch_dev *ca, char *buf)
775 struct printbuf out = _PBUF(buf, PAGE_SIZE);
776 enum alloc_reserve i;
778 spin_lock(&ca->freelist_lock);
780 pr_buf(&out, "free_inc:\t%zu\t%zu\n",
781 fifo_used(&ca->free_inc),
784 for (i = 0; i < RESERVE_NR; i++)
785 pr_buf(&out, "free[%u]:\t%zu\t%zu\n", i,
786 fifo_used(&ca->free[i]),
789 spin_unlock(&ca->freelist_lock);
791 return out.pos - buf;
794 static ssize_t show_dev_alloc_debug(struct bch_dev *ca, char *buf)
796 struct bch_fs *c = ca->fs;
797 struct bch_dev_usage stats = bch2_dev_usage_read(c, ca);
799 return scnprintf(buf, PAGE_SIZE,
800 "free_inc: %zu/%zu\n"
801 "free[RESERVE_BTREE]: %zu/%zu\n"
802 "free[RESERVE_MOVINGGC]: %zu/%zu\n"
803 "free[RESERVE_NONE]: %zu/%zu\n"
812 " erasure coded: %llu\n"
820 " fragmented: %llu\n"
821 " copygc threshold: %llu\n"
822 "freelist_wait: %s\n"
823 "open buckets: %u/%u (reserved %u)\n"
824 "open_buckets_wait: %s\n",
825 fifo_used(&ca->free_inc), ca->free_inc.size,
826 fifo_used(&ca->free[RESERVE_BTREE]), ca->free[RESERVE_BTREE].size,
827 fifo_used(&ca->free[RESERVE_MOVINGGC]), ca->free[RESERVE_MOVINGGC].size,
828 fifo_used(&ca->free[RESERVE_NONE]), ca->free[RESERVE_NONE].size,
829 ca->mi.nbuckets - ca->mi.first_bucket,
831 stats.buckets[BCH_DATA_SB],
832 stats.buckets[BCH_DATA_JOURNAL],
833 stats.buckets[BCH_DATA_BTREE],
834 stats.buckets[BCH_DATA_USER],
835 stats.buckets[BCH_DATA_CACHED],
837 ca->mi.nbuckets - ca->mi.first_bucket - stats.buckets_unavailable,
838 stats.sectors[BCH_DATA_SB],
839 stats.sectors[BCH_DATA_JOURNAL],
840 stats.sectors[BCH_DATA_BTREE],
841 stats.sectors[BCH_DATA_USER],
842 stats.sectors[BCH_DATA_CACHED],
843 stats.sectors_fragmented,
844 ca->copygc_threshold,
845 c->freelist_wait.list.first ? "waiting" : "empty",
846 c->open_buckets_nr_free, OPEN_BUCKETS_COUNT, BTREE_NODE_RESERVE,
847 c->open_buckets_wait.list.first ? "waiting" : "empty");
850 static const char * const bch2_rw[] = {
856 static ssize_t show_dev_iodone(struct bch_dev *ca, char *buf)
858 struct printbuf out = _PBUF(buf, PAGE_SIZE);
861 for (rw = 0; rw < 2; rw++) {
862 pr_buf(&out, "%s:\n", bch2_rw[rw]);
864 for (i = 1; i < BCH_DATA_NR; i++) {
867 for_each_possible_cpu(cpu)
868 n += per_cpu_ptr(ca->io_done, cpu)->sectors[rw][i];
870 pr_buf(&out, "%-12s:%12llu\n",
871 bch2_data_types[i], n << 9);
875 return out.pos - buf;
880 struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj);
881 struct bch_fs *c = ca->fs;
882 struct printbuf out = _PBUF(buf, PAGE_SIZE);
884 sysfs_printf(uuid, "%pU\n", ca->uuid.b);
886 sysfs_print(bucket_size, bucket_bytes(ca));
887 sysfs_print(block_size, block_bytes(c));
888 sysfs_print(first_bucket, ca->mi.first_bucket);
889 sysfs_print(nbuckets, ca->mi.nbuckets);
890 sysfs_print(durability, ca->mi.durability);
891 sysfs_print(discard, ca->mi.discard);
893 if (attr == &sysfs_label) {
895 mutex_lock(&c->sb_lock);
896 bch2_disk_path_to_text(&out, &c->disk_sb,
898 mutex_unlock(&c->sb_lock);
900 pr_buf(&out, "none");
904 return out.pos - buf;
907 if (attr == &sysfs_has_data) {
908 bch2_flags_to_text(&out, bch2_data_types,
909 bch2_dev_has_data(c, ca));
911 return out.pos - buf;
914 sysfs_pd_controller_show(copy_gc, &ca->copygc_pd);
916 if (attr == &sysfs_cache_replacement_policy) {
917 bch2_string_opt_to_text(&out,
918 bch2_cache_replacement_policies,
921 return out.pos - buf;
924 if (attr == &sysfs_state_rw) {
925 bch2_string_opt_to_text(&out, bch2_dev_state,
928 return out.pos - buf;
931 if (attr == &sysfs_iodone)
932 return show_dev_iodone(ca, buf);
934 sysfs_print(io_latency_read, atomic64_read(&ca->cur_latency[READ]));
935 sysfs_print(io_latency_write, atomic64_read(&ca->cur_latency[WRITE]));
937 if (attr == &sysfs_io_latency_stats_read)
938 return bch2_time_stats_print(&ca->io_latency[READ], buf, PAGE_SIZE);
939 if (attr == &sysfs_io_latency_stats_write)
940 return bch2_time_stats_print(&ca->io_latency[WRITE], buf, PAGE_SIZE);
942 sysfs_printf(congested, "%u%%",
943 clamp(atomic_read(&ca->congested), 0, CONGESTED_MAX)
944 * 100 / CONGESTED_MAX);
946 if (attr == &sysfs_bucket_quantiles_last_read)
947 return show_quantiles(c, ca, buf, bucket_last_io_fn, (void *) 0);
948 if (attr == &sysfs_bucket_quantiles_last_write)
949 return show_quantiles(c, ca, buf, bucket_last_io_fn, (void *) 1);
950 if (attr == &sysfs_bucket_quantiles_fragmentation)
951 return show_quantiles(c, ca, buf, bucket_sectors_used_fn, NULL);
952 if (attr == &sysfs_bucket_quantiles_oldest_gen)
953 return show_quantiles(c, ca, buf, bucket_oldest_gen_fn, NULL);
955 if (attr == &sysfs_reserve_stats)
956 return show_reserve_stats(ca, buf);
957 if (attr == &sysfs_alloc_debug)
958 return show_dev_alloc_debug(ca, buf);
965 struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj);
966 struct bch_fs *c = ca->fs;
967 struct bch_member *mi;
969 sysfs_pd_controller_store(copy_gc, &ca->copygc_pd);
971 if (attr == &sysfs_discard) {
972 bool v = strtoul_or_return(buf);
974 mutex_lock(&c->sb_lock);
975 mi = &bch2_sb_get_members(c->disk_sb.sb)->members[ca->dev_idx];
977 if (v != BCH_MEMBER_DISCARD(mi)) {
978 SET_BCH_MEMBER_DISCARD(mi, v);
981 mutex_unlock(&c->sb_lock);
984 if (attr == &sysfs_cache_replacement_policy) {
985 ssize_t v = __sysfs_match_string(bch2_cache_replacement_policies, -1, buf);
990 mutex_lock(&c->sb_lock);
991 mi = &bch2_sb_get_members(c->disk_sb.sb)->members[ca->dev_idx];
993 if ((unsigned) v != BCH_MEMBER_REPLACEMENT(mi)) {
994 SET_BCH_MEMBER_REPLACEMENT(mi, v);
997 mutex_unlock(&c->sb_lock);
1000 if (attr == &sysfs_label) {
1004 tmp = kstrdup(buf, GFP_KERNEL);
1008 ret = bch2_dev_group_set(c, ca, strim(tmp));
1014 if (attr == &sysfs_wake_allocator)
1015 bch2_wake_allocator(ca);
1019 SYSFS_OPS(bch2_dev);
1021 struct attribute *bch2_dev_files[] = {
1025 &sysfs_first_bucket,
1031 &sysfs_cache_replacement_policy,
1038 &sysfs_io_latency_read,
1039 &sysfs_io_latency_write,
1040 &sysfs_io_latency_stats_read,
1041 &sysfs_io_latency_stats_write,
1044 /* alloc info - other stats: */
1045 &sysfs_bucket_quantiles_last_read,
1046 &sysfs_bucket_quantiles_last_write,
1047 &sysfs_bucket_quantiles_fragmentation,
1048 &sysfs_bucket_quantiles_oldest_gen,
1050 &sysfs_reserve_stats,
1054 &sysfs_wake_allocator,
1056 sysfs_pd_controller_files(copy_gc),
1060 #endif /* _BCACHEFS_SYSFS_H_ */