1 // SPDX-License-Identifier: GPL-2.0
3 * bcache sysfs interfaces
5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6 * Copyright 2012 Google, Inc.
9 #ifndef NO_BCACHEFS_SYSFS
12 #include "alloc_background.h"
13 #include "alloc_foreground.h"
15 #include "btree_cache.h"
17 #include "btree_iter.h"
18 #include "btree_key_cache.h"
19 #include "btree_update.h"
20 #include "btree_update_interior.h"
24 #include "disk_groups.h"
31 #include "rebalance.h"
36 #include <linux/blkdev.h>
37 #include <linux/pretty-printers.h>
38 #include <linux/sort.h>
39 #include <linux/sched/clock.h>
43 #define SYSFS_OPS(type) \
44 const struct sysfs_ops type ## _sysfs_ops = { \
45 .show = type ## _show, \
46 .store = type ## _store \
50 static ssize_t fn ## _to_text(struct printbuf *, \
51 struct kobject *, struct attribute *);\
53 static ssize_t fn ## _show(struct kobject *kobj, struct attribute *attr,\
56 struct printbuf out = PRINTBUF; \
57 ssize_t ret = fn ## _to_text(&out, kobj, attr); \
59 if (out.pos && out.buf[out.pos - 1] != '\n') \
62 if (!ret && out.allocation_failure) \
66 ret = min_t(size_t, out.pos, PAGE_SIZE - 1); \
67 memcpy(buf, out.buf, ret); \
69 printbuf_exit(&out); \
73 static ssize_t fn ## _to_text(struct printbuf *out, struct kobject *kobj,\
74 struct attribute *attr)
77 static ssize_t fn ## _store(struct kobject *kobj, struct attribute *attr,\
78 const char *buf, size_t size) \
80 #define __sysfs_attribute(_name, _mode) \
81 static struct attribute sysfs_##_name = \
82 { .name = #_name, .mode = _mode }
84 #define write_attribute(n) __sysfs_attribute(n, S_IWUSR)
85 #define read_attribute(n) __sysfs_attribute(n, S_IRUGO)
86 #define rw_attribute(n) __sysfs_attribute(n, S_IRUGO|S_IWUSR)
88 #define sysfs_printf(file, fmt, ...) \
90 if (attr == &sysfs_ ## file) \
91 prt_printf(out, fmt "\n", __VA_ARGS__); \
94 #define sysfs_print(file, var) \
96 if (attr == &sysfs_ ## file) \
100 #define sysfs_hprint(file, val) \
102 if (attr == &sysfs_ ## file) \
103 prt_human_readable_s64(out, val); \
106 #define var_printf(_var, fmt) sysfs_printf(_var, fmt, var(_var))
107 #define var_print(_var) sysfs_print(_var, var(_var))
108 #define var_hprint(_var) sysfs_hprint(_var, var(_var))
110 #define sysfs_strtoul(file, var) \
112 if (attr == &sysfs_ ## file) \
113 return strtoul_safe(buf, var) ?: (ssize_t) size; \
116 #define sysfs_strtoul_clamp(file, var, min, max) \
118 if (attr == &sysfs_ ## file) \
119 return strtoul_safe_clamp(buf, var, min, max) \
123 #define strtoul_or_return(cp) \
126 int _r = kstrtoul(cp, 10, &_v); \
132 #define strtoul_restrict_or_return(cp, min, max) \
134 unsigned long __v = 0; \
135 int _r = strtoul_safe_restrict(cp, __v, min, max); \
141 #define strtoi_h_or_return(cp) \
144 int _r = strtoi_h(cp, &_v); \
150 #define sysfs_hatoi(file, var) \
152 if (attr == &sysfs_ ## file) \
153 return strtoi_h(buf, &var) ?: (ssize_t) size; \
156 write_attribute(trigger_gc);
157 write_attribute(trigger_discards);
158 write_attribute(trigger_invalidates);
159 write_attribute(prune_cache);
160 rw_attribute(btree_gc_periodic);
161 rw_attribute(gc_gens_pos);
163 read_attribute(uuid);
164 read_attribute(minor);
165 read_attribute(bucket_size);
166 read_attribute(first_bucket);
167 read_attribute(nbuckets);
168 read_attribute(durability);
169 read_attribute(iodone);
171 read_attribute(io_latency_read);
172 read_attribute(io_latency_write);
173 read_attribute(io_latency_stats_read);
174 read_attribute(io_latency_stats_write);
175 read_attribute(congested);
177 read_attribute(btree_avg_write_size);
179 read_attribute(btree_cache_size);
180 read_attribute(compression_stats);
181 read_attribute(journal_debug);
182 read_attribute(btree_updates);
183 read_attribute(btree_cache);
184 read_attribute(btree_key_cache);
185 read_attribute(btree_transactions);
186 read_attribute(stripes_heap);
187 read_attribute(open_buckets);
189 read_attribute(internal_uuid);
191 read_attribute(has_data);
192 read_attribute(alloc_debug);
194 read_attribute(read_realloc_races);
195 read_attribute(extent_migrate_done);
196 read_attribute(extent_migrate_raced);
197 read_attribute(bucket_alloc_fail);
199 #define x(t, n, ...) read_attribute(t);
200 BCH_PERSISTENT_COUNTERS()
203 rw_attribute(discard);
206 rw_attribute(copy_gc_enabled);
207 read_attribute(copy_gc_wait);
209 rw_attribute(rebalance_enabled);
210 sysfs_pd_controller_attribute(rebalance);
211 read_attribute(rebalance_work);
212 rw_attribute(promote_whole_extents);
214 read_attribute(new_stripes);
216 read_attribute(io_timers_read);
217 read_attribute(io_timers_write);
219 read_attribute(data_jobs);
221 #ifdef CONFIG_BCACHEFS_TESTS
222 write_attribute(perf_test);
223 #endif /* CONFIG_BCACHEFS_TESTS */
226 static struct attribute sysfs_time_stat_##_name = \
227 { .name = #_name, .mode = S_IRUGO };
231 static struct attribute sysfs_state_rw = {
236 static size_t bch2_btree_cache_size(struct bch_fs *c)
241 mutex_lock(&c->btree_cache.lock);
242 list_for_each_entry(b, &c->btree_cache.live, list)
243 ret += btree_bytes(c);
245 mutex_unlock(&c->btree_cache.lock);
249 static size_t bch2_btree_avg_write_size(struct bch_fs *c)
251 u64 nr = atomic64_read(&c->btree_writes_nr);
252 u64 sectors = atomic64_read(&c->btree_writes_sectors);
254 return nr ? div64_u64(sectors, nr) : 0;
257 static long data_progress_to_text(struct printbuf *out, struct bch_fs *c)
260 struct bch_move_stats *stats;
262 mutex_lock(&c->data_progress_lock);
263 list_for_each_entry(stats, &c->data_progress_list, list) {
264 prt_printf(out, "%s: data type %s btree_id %s position: ",
266 bch2_data_types[stats->data_type],
267 bch2_btree_ids[stats->btree_id]);
268 bch2_bpos_to_text(out, stats->pos);
269 prt_printf(out, "%s", "\n");
272 mutex_unlock(&c->data_progress_lock);
276 static int bch2_compression_stats_to_text(struct printbuf *out, struct bch_fs *c)
278 struct btree_trans trans;
279 struct btree_iter iter;
282 u64 nr_uncompressed_extents = 0,
283 nr_compressed_extents = 0,
284 nr_incompressible_extents = 0,
285 uncompressed_sectors = 0,
286 incompressible_sectors = 0,
287 compressed_sectors_compressed = 0,
288 compressed_sectors_uncompressed = 0;
291 if (!test_bit(BCH_FS_STARTED, &c->flags))
294 bch2_trans_init(&trans, c, 0, 0);
296 for (id = 0; id < BTREE_ID_NR; id++) {
297 if (!((1U << id) & BTREE_ID_HAS_PTRS))
300 for_each_btree_key(&trans, iter, id, POS_MIN,
301 BTREE_ITER_ALL_SNAPSHOTS, k, ret) {
302 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
303 const union bch_extent_entry *entry;
304 struct extent_ptr_decoded p;
305 bool compressed = false, uncompressed = false, incompressible = false;
307 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
308 switch (p.crc.compression_type) {
309 case BCH_COMPRESSION_TYPE_none:
311 uncompressed_sectors += k.k->size;
313 case BCH_COMPRESSION_TYPE_incompressible:
314 incompressible = true;
315 incompressible_sectors += k.k->size;
318 compressed_sectors_compressed +=
319 p.crc.compressed_size;
320 compressed_sectors_uncompressed +=
321 p.crc.uncompressed_size;
328 nr_incompressible_extents++;
329 else if (uncompressed)
330 nr_uncompressed_extents++;
332 nr_compressed_extents++;
334 bch2_trans_iter_exit(&trans, &iter);
337 bch2_trans_exit(&trans);
342 prt_printf(out, "uncompressed:\n");
343 prt_printf(out, " nr extents: %llu\n", nr_uncompressed_extents);
344 prt_printf(out, " size: ");
345 prt_human_readable_u64(out, uncompressed_sectors << 9);
346 prt_printf(out, "\n");
348 prt_printf(out, "compressed:\n");
349 prt_printf(out, " nr extents: %llu\n", nr_compressed_extents);
350 prt_printf(out, " compressed size: ");
351 prt_human_readable_u64(out, compressed_sectors_compressed << 9);
352 prt_printf(out, "\n");
353 prt_printf(out, " uncompressed size: ");
354 prt_human_readable_u64(out, compressed_sectors_uncompressed << 9);
355 prt_printf(out, "\n");
357 prt_printf(out, "incompressible:\n");
358 prt_printf(out, " nr extents: %llu\n", nr_incompressible_extents);
359 prt_printf(out, " size: ");
360 prt_human_readable_u64(out, incompressible_sectors << 9);
361 prt_printf(out, "\n");
365 static void bch2_gc_gens_pos_to_text(struct printbuf *out, struct bch_fs *c)
367 prt_printf(out, "%s: ", bch2_btree_ids[c->gc_gens_btree]);
368 bch2_bpos_to_text(out, c->gc_gens_pos);
369 prt_printf(out, "\n");
374 struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
376 sysfs_print(minor, c->minor);
377 sysfs_printf(internal_uuid, "%pU", c->sb.uuid.b);
379 sysfs_hprint(btree_cache_size, bch2_btree_cache_size(c));
380 sysfs_hprint(btree_avg_write_size, bch2_btree_avg_write_size(c));
382 sysfs_print(read_realloc_races,
383 atomic_long_read(&c->read_realloc_races));
384 sysfs_print(extent_migrate_done,
385 atomic_long_read(&c->extent_migrate_done));
386 sysfs_print(extent_migrate_raced,
387 atomic_long_read(&c->extent_migrate_raced));
388 sysfs_print(bucket_alloc_fail,
389 atomic_long_read(&c->bucket_alloc_fail));
391 sysfs_printf(btree_gc_periodic, "%u", (int) c->btree_gc_periodic);
393 if (attr == &sysfs_gc_gens_pos)
394 bch2_gc_gens_pos_to_text(out, c);
396 sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled);
398 sysfs_printf(rebalance_enabled, "%i", c->rebalance.enabled);
399 sysfs_pd_controller_show(rebalance, &c->rebalance.pd); /* XXX */
400 sysfs_hprint(copy_gc_wait,
401 max(0LL, c->copygc_wait -
402 atomic64_read(&c->io_clock[WRITE].now)) << 9);
404 if (attr == &sysfs_rebalance_work)
405 bch2_rebalance_work_to_text(out, c);
407 sysfs_print(promote_whole_extents, c->promote_whole_extents);
411 if (attr == &sysfs_journal_debug)
412 bch2_journal_debug_to_text(out, &c->journal);
414 if (attr == &sysfs_btree_updates)
415 bch2_btree_updates_to_text(out, c);
417 if (attr == &sysfs_btree_cache)
418 bch2_btree_cache_to_text(out, c);
420 if (attr == &sysfs_btree_key_cache)
421 bch2_btree_key_cache_to_text(out, &c->btree_key_cache);
423 if (attr == &sysfs_btree_transactions)
424 bch2_btree_trans_to_text(out, c);
426 if (attr == &sysfs_stripes_heap)
427 bch2_stripes_heap_to_text(out, c);
429 if (attr == &sysfs_open_buckets)
430 bch2_open_buckets_to_text(out, c);
432 if (attr == &sysfs_compression_stats)
433 bch2_compression_stats_to_text(out, c);
435 if (attr == &sysfs_new_stripes)
436 bch2_new_stripes_to_text(out, c);
438 if (attr == &sysfs_io_timers_read)
439 bch2_io_timers_to_text(out, &c->io_clock[READ]);
441 if (attr == &sysfs_io_timers_write)
442 bch2_io_timers_to_text(out, &c->io_clock[WRITE]);
444 if (attr == &sysfs_data_jobs)
445 data_progress_to_text(out, c);
452 struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
454 if (attr == &sysfs_btree_gc_periodic) {
455 ssize_t ret = strtoul_safe(buf, c->btree_gc_periodic)
458 wake_up_process(c->gc_thread);
462 if (attr == &sysfs_copy_gc_enabled) {
463 ssize_t ret = strtoul_safe(buf, c->copy_gc_enabled)
466 if (c->copygc_thread)
467 wake_up_process(c->copygc_thread);
471 if (attr == &sysfs_rebalance_enabled) {
472 ssize_t ret = strtoul_safe(buf, c->rebalance.enabled)
479 sysfs_pd_controller_store(rebalance, &c->rebalance.pd);
481 sysfs_strtoul(promote_whole_extents, c->promote_whole_extents);
485 if (!test_bit(BCH_FS_STARTED, &c->flags))
490 if (!test_bit(BCH_FS_RW, &c->flags))
493 if (attr == &sysfs_prune_cache) {
494 struct shrink_control sc;
496 sc.gfp_mask = GFP_KERNEL;
497 sc.nr_to_scan = strtoul_or_return(buf);
498 c->btree_cache.shrink.scan_objects(&c->btree_cache.shrink, &sc);
501 if (attr == &sysfs_trigger_gc) {
503 * Full gc is currently incompatible with btree key cache:
506 down_read(&c->state_lock);
507 bch2_gc(c, false, false);
508 up_read(&c->state_lock);
514 if (attr == &sysfs_trigger_discards)
517 if (attr == &sysfs_trigger_invalidates)
518 bch2_do_invalidates(c);
520 #ifdef CONFIG_BCACHEFS_TESTS
521 if (attr == &sysfs_perf_test) {
522 char *tmp = kstrdup(buf, GFP_KERNEL), *p = tmp;
523 char *test = strsep(&p, " \t\n");
524 char *nr_str = strsep(&p, " \t\n");
525 char *threads_str = strsep(&p, " \t\n");
531 !(ret = kstrtouint(threads_str, 10, &threads)) &&
532 !(ret = bch2_strtoull_h(nr_str, &nr)))
533 ret = bch2_btree_perf_test(c, test, nr, threads);
544 struct attribute *bch2_fs_files[] = {
546 &sysfs_btree_cache_size,
547 &sysfs_btree_avg_write_size,
549 &sysfs_promote_whole_extents,
551 &sysfs_compression_stats,
553 #ifdef CONFIG_BCACHEFS_TESTS
561 SHOW(bch2_fs_counters)
563 struct bch_fs *c = container_of(kobj, struct bch_fs, counters_kobj);
565 u64 counter_since_mount = 0;
567 out->tabstops[0] = 32;
569 if (attr == &sysfs_##t) { \
570 counter = percpu_u64_get(&c->counters[BCH_COUNTER_##t]);\
571 counter_since_mount = counter - c->counters_on_mount[BCH_COUNTER_##t];\
572 prt_printf(out, "since mount:"); \
574 prt_human_readable_u64(out, counter_since_mount << 9); \
577 prt_printf(out, "since filesystem creation:"); \
579 prt_human_readable_u64(out, counter << 9); \
582 BCH_PERSISTENT_COUNTERS()
587 STORE(bch2_fs_counters) {
591 SYSFS_OPS(bch2_fs_counters);
593 struct attribute *bch2_fs_counters_files[] = {
596 BCH_PERSISTENT_COUNTERS()
600 /* internal dir - just a wrapper */
602 SHOW(bch2_fs_internal)
604 struct bch_fs *c = container_of(kobj, struct bch_fs, internal);
605 return bch2_fs_to_text(out, &c->kobj, attr);
608 STORE(bch2_fs_internal)
610 struct bch_fs *c = container_of(kobj, struct bch_fs, internal);
611 return bch2_fs_store(&c->kobj, attr, buf, size);
613 SYSFS_OPS(bch2_fs_internal);
615 struct attribute *bch2_fs_internal_files[] = {
616 &sysfs_journal_debug,
617 &sysfs_btree_updates,
619 &sysfs_btree_key_cache,
620 &sysfs_btree_transactions,
624 &sysfs_io_timers_read,
625 &sysfs_io_timers_write,
628 &sysfs_trigger_discards,
629 &sysfs_trigger_invalidates,
632 &sysfs_read_realloc_races,
633 &sysfs_extent_migrate_done,
634 &sysfs_extent_migrate_raced,
635 &sysfs_bucket_alloc_fail,
639 &sysfs_copy_gc_enabled,
642 &sysfs_rebalance_enabled,
643 &sysfs_rebalance_work,
644 sysfs_pd_controller_files(rebalance),
648 &sysfs_internal_uuid,
654 SHOW(bch2_fs_opts_dir)
656 struct bch_fs *c = container_of(kobj, struct bch_fs, opts_dir);
657 const struct bch_option *opt = container_of(attr, struct bch_option, attr);
658 int id = opt - bch2_opt_table;
659 u64 v = bch2_opt_get_by_id(&c->opts, id);
661 bch2_opt_to_text(out, c, c->disk_sb.sb, opt, v, OPT_SHOW_FULL_LIST);
667 STORE(bch2_fs_opts_dir)
669 struct bch_fs *c = container_of(kobj, struct bch_fs, opts_dir);
670 const struct bch_option *opt = container_of(attr, struct bch_option, attr);
671 int ret, id = opt - bch2_opt_table;
676 * We don't need to take c->writes for correctness, but it eliminates an
677 * unsightly error message in the dmesg log when we're RO:
679 if (unlikely(!percpu_ref_tryget(&c->writes)))
682 tmp = kstrdup(buf, GFP_KERNEL);
688 ret = bch2_opt_parse(c, opt, strim(tmp), &v, NULL);
694 ret = bch2_opt_check_may_set(c, id, v);
698 bch2_opt_set_sb(c, opt, v);
699 bch2_opt_set_by_id(&c->opts, id, v);
701 if ((id == Opt_background_target ||
702 id == Opt_background_compression) && v) {
703 bch2_rebalance_add_work(c, S64_MAX);
709 percpu_ref_put(&c->writes);
712 SYSFS_OPS(bch2_fs_opts_dir);
714 struct attribute *bch2_fs_opts_dir_files[] = { NULL };
716 int bch2_opts_create_sysfs_files(struct kobject *kobj)
718 const struct bch_option *i;
721 for (i = bch2_opt_table;
722 i < bch2_opt_table + bch2_opts_nr;
724 if (!(i->flags & OPT_FS))
727 ret = sysfs_create_file(kobj, &i->attr);
737 SHOW(bch2_fs_time_stats)
739 struct bch_fs *c = container_of(kobj, struct bch_fs, time_stats);
742 if (attr == &sysfs_time_stat_##name) \
743 bch2_time_stats_to_text(out, &c->times[BCH_TIME_##name]);
750 STORE(bch2_fs_time_stats)
754 SYSFS_OPS(bch2_fs_time_stats);
756 struct attribute *bch2_fs_time_stats_files[] = {
758 &sysfs_time_stat_##name,
764 static void dev_alloc_debug_to_text(struct printbuf *out, struct bch_dev *ca)
766 struct bch_fs *c = ca->fs;
767 struct bch_dev_usage stats = bch2_dev_usage_read(ca);
768 unsigned i, nr[BCH_DATA_NR];
770 memset(nr, 0, sizeof(nr));
772 for (i = 0; i < ARRAY_SIZE(c->open_buckets); i++)
773 nr[c->open_buckets[i].data_type]++;
776 "\t\t\t buckets\t sectors fragmented\n"
777 "capacity\t%16llu\n",
778 ca->mi.nbuckets - ca->mi.first_bucket);
780 for (i = 0; i < BCH_DATA_NR; i++)
781 prt_printf(out, "%-16s%16llu%16llu%16llu\n",
782 bch2_data_types[i], stats.d[i].buckets,
783 stats.d[i].sectors, stats.d[i].fragmented);
788 "freelist_wait\t\t%s\n"
789 "open buckets allocated\t%u\n"
790 "open buckets this dev\t%u\n"
791 "open buckets total\t%u\n"
792 "open_buckets_wait\t%s\n"
793 "open_buckets_btree\t%u\n"
794 "open_buckets_user\t%u\n"
795 "buckets_to_invalidate\t%llu\n"
796 "btree reserve cache\t%u\n",
798 c->freelist_wait.list.first ? "waiting" : "empty",
799 OPEN_BUCKETS_COUNT - c->open_buckets_nr_free,
802 c->open_buckets_wait.list.first ? "waiting" : "empty",
805 should_invalidate_buckets(ca, stats),
806 c->btree_reserve_cache_nr);
809 static const char * const bch2_rw[] = {
815 static void dev_iodone_to_text(struct printbuf *out, struct bch_dev *ca)
819 for (rw = 0; rw < 2; rw++) {
820 prt_printf(out, "%s:\n", bch2_rw[rw]);
822 for (i = 1; i < BCH_DATA_NR; i++)
823 prt_printf(out, "%-12s:%12llu\n",
825 percpu_u64_get(&ca->io_done->sectors[rw][i]) << 9);
831 struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj);
832 struct bch_fs *c = ca->fs;
834 sysfs_printf(uuid, "%pU\n", ca->uuid.b);
836 sysfs_print(bucket_size, bucket_bytes(ca));
837 sysfs_print(first_bucket, ca->mi.first_bucket);
838 sysfs_print(nbuckets, ca->mi.nbuckets);
839 sysfs_print(durability, ca->mi.durability);
840 sysfs_print(discard, ca->mi.discard);
842 if (attr == &sysfs_label) {
844 mutex_lock(&c->sb_lock);
845 bch2_disk_path_to_text(out, c->disk_sb.sb,
847 mutex_unlock(&c->sb_lock);
853 if (attr == &sysfs_has_data) {
854 prt_bitflags(out, bch2_data_types, bch2_dev_has_data(c, ca));
858 if (attr == &sysfs_state_rw) {
859 prt_string_option(out, bch2_member_states, ca->mi.state);
863 if (attr == &sysfs_iodone)
864 dev_iodone_to_text(out, ca);
866 sysfs_print(io_latency_read, atomic64_read(&ca->cur_latency[READ]));
867 sysfs_print(io_latency_write, atomic64_read(&ca->cur_latency[WRITE]));
869 if (attr == &sysfs_io_latency_stats_read)
870 bch2_time_stats_to_text(out, &ca->io_latency[READ]);
872 if (attr == &sysfs_io_latency_stats_write)
873 bch2_time_stats_to_text(out, &ca->io_latency[WRITE]);
875 sysfs_printf(congested, "%u%%",
876 clamp(atomic_read(&ca->congested), 0, CONGESTED_MAX)
877 * 100 / CONGESTED_MAX);
879 if (attr == &sysfs_alloc_debug)
880 dev_alloc_debug_to_text(out, ca);
887 struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj);
888 struct bch_fs *c = ca->fs;
889 struct bch_member *mi;
891 if (attr == &sysfs_discard) {
892 bool v = strtoul_or_return(buf);
894 mutex_lock(&c->sb_lock);
895 mi = &bch2_sb_get_members(c->disk_sb.sb)->members[ca->dev_idx];
897 if (v != BCH_MEMBER_DISCARD(mi)) {
898 SET_BCH_MEMBER_DISCARD(mi, v);
901 mutex_unlock(&c->sb_lock);
904 if (attr == &sysfs_label) {
908 tmp = kstrdup(buf, GFP_KERNEL);
912 ret = bch2_dev_group_set(c, ca, strim(tmp));
922 struct attribute *bch2_dev_files[] = {
937 &sysfs_io_latency_read,
938 &sysfs_io_latency_write,
939 &sysfs_io_latency_stats_read,
940 &sysfs_io_latency_stats_write,
948 #endif /* _BCACHEFS_SYSFS_H_ */