1 // SPDX-License-Identifier: GPL-2.0
3 * bcache sysfs interfaces
5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6 * Copyright 2012 Google, Inc.
9 #ifndef NO_BCACHEFS_SYSFS
12 #include "alloc_background.h"
14 #include "btree_cache.h"
16 #include "btree_iter.h"
17 #include "btree_key_cache.h"
18 #include "btree_update.h"
19 #include "btree_update_interior.h"
23 #include "disk_groups.h"
30 #include "rebalance.h"
35 #include <linux/blkdev.h>
36 #include <linux/sort.h>
37 #include <linux/sched/clock.h>
41 #define SYSFS_OPS(type) \
42 struct sysfs_ops type ## _sysfs_ops = { \
43 .show = type ## _show, \
44 .store = type ## _store \
48 static ssize_t fn ## _show(struct kobject *kobj, struct attribute *attr,\
52 static ssize_t fn ## _store(struct kobject *kobj, struct attribute *attr,\
53 const char *buf, size_t size) \
55 #define __sysfs_attribute(_name, _mode) \
56 static struct attribute sysfs_##_name = \
57 { .name = #_name, .mode = _mode }
59 #define write_attribute(n) __sysfs_attribute(n, S_IWUSR)
60 #define read_attribute(n) __sysfs_attribute(n, S_IRUGO)
61 #define rw_attribute(n) __sysfs_attribute(n, S_IRUGO|S_IWUSR)
63 #define sysfs_printf(file, fmt, ...) \
65 if (attr == &sysfs_ ## file) \
66 return scnprintf(buf, PAGE_SIZE, fmt "\n", __VA_ARGS__);\
69 #define sysfs_print(file, var) \
71 if (attr == &sysfs_ ## file) \
72 return snprint(buf, PAGE_SIZE, var); \
75 #define sysfs_hprint(file, val) \
77 if (attr == &sysfs_ ## file) { \
78 bch2_hprint(&out, val); \
80 return out.pos - buf; \
84 #define var_printf(_var, fmt) sysfs_printf(_var, fmt, var(_var))
85 #define var_print(_var) sysfs_print(_var, var(_var))
86 #define var_hprint(_var) sysfs_hprint(_var, var(_var))
88 #define sysfs_strtoul(file, var) \
90 if (attr == &sysfs_ ## file) \
91 return strtoul_safe(buf, var) ?: (ssize_t) size; \
94 #define sysfs_strtoul_clamp(file, var, min, max) \
96 if (attr == &sysfs_ ## file) \
97 return strtoul_safe_clamp(buf, var, min, max) \
101 #define strtoul_or_return(cp) \
104 int _r = kstrtoul(cp, 10, &_v); \
110 #define strtoul_restrict_or_return(cp, min, max) \
112 unsigned long __v = 0; \
113 int _r = strtoul_safe_restrict(cp, __v, min, max); \
119 #define strtoi_h_or_return(cp) \
122 int _r = strtoi_h(cp, &_v); \
128 #define sysfs_hatoi(file, var) \
130 if (attr == &sysfs_ ## file) \
131 return strtoi_h(buf, &var) ?: (ssize_t) size; \
134 write_attribute(trigger_journal_flush);
135 write_attribute(trigger_gc);
136 write_attribute(prune_cache);
137 rw_attribute(btree_gc_periodic);
138 rw_attribute(gc_gens_pos);
140 read_attribute(uuid);
141 read_attribute(minor);
142 read_attribute(bucket_size);
143 read_attribute(first_bucket);
144 read_attribute(nbuckets);
145 read_attribute(durability);
146 read_attribute(iodone);
148 read_attribute(io_latency_read);
149 read_attribute(io_latency_write);
150 read_attribute(io_latency_stats_read);
151 read_attribute(io_latency_stats_write);
152 read_attribute(congested);
154 read_attribute(btree_avg_write_size);
156 read_attribute(reserve_stats);
157 read_attribute(btree_cache_size);
158 read_attribute(compression_stats);
159 read_attribute(journal_debug);
160 read_attribute(journal_pins);
161 read_attribute(btree_updates);
162 read_attribute(dirty_btree_nodes);
163 read_attribute(btree_cache);
164 read_attribute(btree_key_cache);
165 read_attribute(btree_transactions);
166 read_attribute(stripes_heap);
167 read_attribute(open_buckets);
169 read_attribute(internal_uuid);
171 read_attribute(has_data);
172 read_attribute(alloc_debug);
173 write_attribute(wake_allocator);
175 read_attribute(read_realloc_races);
176 read_attribute(extent_migrate_done);
177 read_attribute(extent_migrate_raced);
179 rw_attribute(discard);
180 rw_attribute(cache_replacement_policy);
183 rw_attribute(copy_gc_enabled);
184 read_attribute(copy_gc_wait);
186 rw_attribute(rebalance_enabled);
187 sysfs_pd_controller_attribute(rebalance);
188 read_attribute(rebalance_work);
189 rw_attribute(promote_whole_extents);
191 read_attribute(new_stripes);
193 read_attribute(io_timers_read);
194 read_attribute(io_timers_write);
196 read_attribute(data_op_data_progress);
198 #ifdef CONFIG_BCACHEFS_TESTS
199 write_attribute(perf_test);
200 #endif /* CONFIG_BCACHEFS_TESTS */
203 static struct attribute sysfs_time_stat_##_name = \
204 { .name = #_name, .mode = S_IRUGO };
208 static struct attribute sysfs_state_rw = {
213 static size_t bch2_btree_cache_size(struct bch_fs *c)
218 mutex_lock(&c->btree_cache.lock);
219 list_for_each_entry(b, &c->btree_cache.live, list)
220 ret += btree_bytes(c);
222 mutex_unlock(&c->btree_cache.lock);
226 static size_t bch2_btree_avg_write_size(struct bch_fs *c)
228 u64 nr = atomic64_read(&c->btree_writes_nr);
229 u64 sectors = atomic64_read(&c->btree_writes_sectors);
231 return nr ? div64_u64(sectors, nr) : 0;
234 static long stats_to_text(struct printbuf *out, struct bch_fs *c,
235 struct bch_move_stats *stats)
237 pr_buf(out, "%s: data type %s btree_id %s position: ",
239 bch2_data_types[stats->data_type],
240 bch2_btree_ids[stats->btree_id]);
241 bch2_bpos_to_text(out, stats->pos);
242 pr_buf(out, "%s", "\n");
247 static long data_progress_to_text(struct printbuf *out, struct bch_fs *c)
250 struct bch_move_stats *iter;
252 mutex_lock(&c->data_progress_lock);
254 if (list_empty(&c->data_progress_list))
255 pr_buf(out, "%s", "no progress to report\n");
257 list_for_each_entry(iter, &c->data_progress_list, list) {
258 stats_to_text(out, c, iter);
261 mutex_unlock(&c->data_progress_lock);
265 static int bch2_compression_stats_to_text(struct printbuf *out, struct bch_fs *c)
267 struct btree_trans trans;
268 struct btree_iter iter;
270 u64 nr_uncompressed_extents = 0, uncompressed_sectors = 0,
271 nr_compressed_extents = 0,
272 compressed_sectors_compressed = 0,
273 compressed_sectors_uncompressed = 0;
276 if (!test_bit(BCH_FS_STARTED, &c->flags))
279 bch2_trans_init(&trans, c, 0, 0);
281 for_each_btree_key(&trans, iter, BTREE_ID_extents, POS_MIN, 0, k, ret)
282 if (k.k->type == KEY_TYPE_extent) {
283 struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
284 const union bch_extent_entry *entry;
285 struct extent_ptr_decoded p;
287 extent_for_each_ptr_decode(e, p, entry) {
288 if (!crc_is_compressed(p.crc)) {
289 nr_uncompressed_extents++;
290 uncompressed_sectors += e.k->size;
292 nr_compressed_extents++;
293 compressed_sectors_compressed +=
294 p.crc.compressed_size;
295 compressed_sectors_uncompressed +=
296 p.crc.uncompressed_size;
299 /* only looking at the first ptr */
303 bch2_trans_iter_exit(&trans, &iter);
305 bch2_trans_exit(&trans);
310 "uncompressed data:\n"
311 " nr extents: %llu\n"
312 " size (bytes): %llu\n"
314 " nr extents: %llu\n"
315 " compressed size (bytes): %llu\n"
316 " uncompressed size (bytes): %llu\n",
317 nr_uncompressed_extents,
318 uncompressed_sectors << 9,
319 nr_compressed_extents,
320 compressed_sectors_compressed << 9,
321 compressed_sectors_uncompressed << 9);
325 static void bch2_gc_gens_pos_to_text(struct printbuf *out, struct bch_fs *c)
327 pr_buf(out, "%s: ", bch2_btree_ids[c->gc_gens_btree]);
328 bch2_bpos_to_text(out, c->gc_gens_pos);
334 struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
335 struct printbuf out = _PBUF(buf, PAGE_SIZE);
337 sysfs_print(minor, c->minor);
338 sysfs_printf(internal_uuid, "%pU", c->sb.uuid.b);
340 sysfs_hprint(btree_cache_size, bch2_btree_cache_size(c));
341 sysfs_hprint(btree_avg_write_size, bch2_btree_avg_write_size(c));
343 sysfs_print(read_realloc_races,
344 atomic_long_read(&c->read_realloc_races));
345 sysfs_print(extent_migrate_done,
346 atomic_long_read(&c->extent_migrate_done));
347 sysfs_print(extent_migrate_raced,
348 atomic_long_read(&c->extent_migrate_raced));
350 sysfs_printf(btree_gc_periodic, "%u", (int) c->btree_gc_periodic);
352 if (attr == &sysfs_gc_gens_pos) {
353 bch2_gc_gens_pos_to_text(&out, c);
354 return out.pos - buf;
357 sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled);
359 sysfs_printf(rebalance_enabled, "%i", c->rebalance.enabled);
360 sysfs_pd_controller_show(rebalance, &c->rebalance.pd); /* XXX */
361 sysfs_hprint(copy_gc_wait,
362 max(0LL, c->copygc_wait -
363 atomic64_read(&c->io_clock[WRITE].now)) << 9);
365 if (attr == &sysfs_rebalance_work) {
366 bch2_rebalance_work_to_text(&out, c);
367 return out.pos - buf;
370 sysfs_print(promote_whole_extents, c->promote_whole_extents);
374 if (attr == &sysfs_journal_debug) {
375 bch2_journal_debug_to_text(&out, &c->journal);
376 return out.pos - buf;
379 if (attr == &sysfs_journal_pins) {
380 bch2_journal_pins_to_text(&out, &c->journal);
381 return out.pos - buf;
384 if (attr == &sysfs_btree_updates) {
385 bch2_btree_updates_to_text(&out, c);
386 return out.pos - buf;
389 if (attr == &sysfs_dirty_btree_nodes) {
390 bch2_dirty_btree_nodes_to_text(&out, c);
391 return out.pos - buf;
394 if (attr == &sysfs_btree_cache) {
395 bch2_btree_cache_to_text(&out, c);
396 return out.pos - buf;
399 if (attr == &sysfs_btree_key_cache) {
400 bch2_btree_key_cache_to_text(&out, &c->btree_key_cache);
401 return out.pos - buf;
404 if (attr == &sysfs_btree_transactions) {
405 bch2_btree_trans_to_text(&out, c);
406 return out.pos - buf;
409 if (attr == &sysfs_stripes_heap) {
410 bch2_stripes_heap_to_text(&out, c);
411 return out.pos - buf;
414 if (attr == &sysfs_open_buckets) {
415 bch2_open_buckets_to_text(&out, c);
416 return out.pos - buf;
419 if (attr == &sysfs_compression_stats) {
420 bch2_compression_stats_to_text(&out, c);
421 return out.pos - buf;
424 if (attr == &sysfs_new_stripes) {
425 bch2_new_stripes_to_text(&out, c);
426 return out.pos - buf;
429 if (attr == &sysfs_io_timers_read) {
430 bch2_io_timers_to_text(&out, &c->io_clock[READ]);
431 return out.pos - buf;
433 if (attr == &sysfs_io_timers_write) {
434 bch2_io_timers_to_text(&out, &c->io_clock[WRITE]);
435 return out.pos - buf;
438 if (attr == &sysfs_data_op_data_progress) {
439 data_progress_to_text(&out, c);
440 return out.pos - buf;
448 struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
450 if (attr == &sysfs_btree_gc_periodic) {
451 ssize_t ret = strtoul_safe(buf, c->btree_gc_periodic)
454 wake_up_process(c->gc_thread);
458 if (attr == &sysfs_copy_gc_enabled) {
459 ssize_t ret = strtoul_safe(buf, c->copy_gc_enabled)
462 if (c->copygc_thread)
463 wake_up_process(c->copygc_thread);
467 if (attr == &sysfs_rebalance_enabled) {
468 ssize_t ret = strtoul_safe(buf, c->rebalance.enabled)
475 sysfs_pd_controller_store(rebalance, &c->rebalance.pd);
477 sysfs_strtoul(promote_whole_extents, c->promote_whole_extents);
481 if (!test_bit(BCH_FS_STARTED, &c->flags))
486 if (attr == &sysfs_trigger_journal_flush)
487 bch2_journal_meta(&c->journal);
489 if (attr == &sysfs_trigger_gc) {
491 * Full gc is currently incompatible with btree key cache:
494 down_read(&c->state_lock);
495 bch2_gc(c, false, false);
496 up_read(&c->state_lock);
502 if (attr == &sysfs_prune_cache) {
503 struct shrink_control sc;
505 sc.gfp_mask = GFP_KERNEL;
506 sc.nr_to_scan = strtoul_or_return(buf);
507 c->btree_cache.shrink.scan_objects(&c->btree_cache.shrink, &sc);
510 #ifdef CONFIG_BCACHEFS_TESTS
511 if (attr == &sysfs_perf_test) {
512 char *tmp = kstrdup(buf, GFP_KERNEL), *p = tmp;
513 char *test = strsep(&p, " \t\n");
514 char *nr_str = strsep(&p, " \t\n");
515 char *threads_str = strsep(&p, " \t\n");
521 !(ret = kstrtouint(threads_str, 10, &threads)) &&
522 !(ret = bch2_strtoull_h(nr_str, &nr)))
523 ret = bch2_btree_perf_test(c, test, nr, threads);
534 struct attribute *bch2_fs_files[] = {
536 &sysfs_btree_cache_size,
537 &sysfs_btree_avg_write_size,
539 &sysfs_promote_whole_extents,
541 &sysfs_compression_stats,
543 #ifdef CONFIG_BCACHEFS_TESTS
549 /* internal dir - just a wrapper */
551 SHOW(bch2_fs_internal)
553 struct bch_fs *c = container_of(kobj, struct bch_fs, internal);
554 return bch2_fs_show(&c->kobj, attr, buf);
557 STORE(bch2_fs_internal)
559 struct bch_fs *c = container_of(kobj, struct bch_fs, internal);
560 return bch2_fs_store(&c->kobj, attr, buf, size);
562 SYSFS_OPS(bch2_fs_internal);
564 struct attribute *bch2_fs_internal_files[] = {
565 &sysfs_journal_debug,
567 &sysfs_btree_updates,
568 &sysfs_dirty_btree_nodes,
570 &sysfs_btree_key_cache,
571 &sysfs_btree_transactions,
575 &sysfs_io_timers_read,
576 &sysfs_io_timers_write,
578 &sysfs_trigger_journal_flush,
582 &sysfs_read_realloc_races,
583 &sysfs_extent_migrate_done,
584 &sysfs_extent_migrate_raced,
588 &sysfs_copy_gc_enabled,
591 &sysfs_rebalance_enabled,
592 &sysfs_rebalance_work,
593 sysfs_pd_controller_files(rebalance),
595 &sysfs_data_op_data_progress,
597 &sysfs_internal_uuid,
603 SHOW(bch2_fs_opts_dir)
605 struct printbuf out = _PBUF(buf, PAGE_SIZE);
606 struct bch_fs *c = container_of(kobj, struct bch_fs, opts_dir);
607 const struct bch_option *opt = container_of(attr, struct bch_option, attr);
608 int id = opt - bch2_opt_table;
609 u64 v = bch2_opt_get_by_id(&c->opts, id);
611 bch2_opt_to_text(&out, c, opt, v, OPT_SHOW_FULL_LIST);
614 return out.pos - buf;
617 STORE(bch2_fs_opts_dir)
619 struct bch_fs *c = container_of(kobj, struct bch_fs, opts_dir);
620 const struct bch_option *opt = container_of(attr, struct bch_option, attr);
621 int ret, id = opt - bch2_opt_table;
625 tmp = kstrdup(buf, GFP_KERNEL);
629 ret = bch2_opt_parse(c, opt, strim(tmp), &v);
635 ret = bch2_opt_check_may_set(c, id, v);
639 if (opt->set_sb != SET_NO_SB_OPT) {
640 mutex_lock(&c->sb_lock);
641 opt->set_sb(c->disk_sb.sb, v);
643 mutex_unlock(&c->sb_lock);
646 bch2_opt_set_by_id(&c->opts, id, v);
648 if ((id == Opt_background_target ||
649 id == Opt_background_compression) && v) {
650 bch2_rebalance_add_work(c, S64_MAX);
656 SYSFS_OPS(bch2_fs_opts_dir);
658 struct attribute *bch2_fs_opts_dir_files[] = { NULL };
660 int bch2_opts_create_sysfs_files(struct kobject *kobj)
662 const struct bch_option *i;
665 for (i = bch2_opt_table;
666 i < bch2_opt_table + bch2_opts_nr;
668 if (!(i->mode & OPT_FS))
671 ret = sysfs_create_file(kobj, &i->attr);
681 SHOW(bch2_fs_time_stats)
683 struct bch_fs *c = container_of(kobj, struct bch_fs, time_stats);
684 struct printbuf out = _PBUF(buf, PAGE_SIZE);
687 if (attr == &sysfs_time_stat_##name) { \
688 bch2_time_stats_to_text(&out, &c->times[BCH_TIME_##name]);\
689 return out.pos - buf; \
697 STORE(bch2_fs_time_stats)
701 SYSFS_OPS(bch2_fs_time_stats);
703 struct attribute *bch2_fs_time_stats_files[] = {
705 &sysfs_time_stat_##name,
711 static void reserve_stats_to_text(struct printbuf *out, struct bch_dev *ca)
713 enum alloc_reserve i;
715 spin_lock(&ca->fs->freelist_lock);
717 pr_buf(out, "free_inc:\t%zu\t%zu\n",
718 fifo_used(&ca->free_inc),
721 for (i = 0; i < RESERVE_NR; i++)
722 pr_buf(out, "free[%u]:\t%zu\t%zu\n", i,
723 fifo_used(&ca->free[i]),
726 spin_unlock(&ca->fs->freelist_lock);
729 static void dev_alloc_debug_to_text(struct printbuf *out, struct bch_dev *ca)
731 struct bch_fs *c = ca->fs;
732 struct bch_dev_usage stats = bch2_dev_usage_read(ca);
733 unsigned i, nr[BCH_DATA_NR];
735 memset(nr, 0, sizeof(nr));
737 for (i = 0; i < ARRAY_SIZE(c->open_buckets); i++)
738 nr[c->open_buckets[i].type]++;
741 "\t\t buckets\t sectors fragmented\n"
743 ca->mi.nbuckets - ca->mi.first_bucket);
745 for (i = 1; i < BCH_DATA_NR; i++)
746 pr_buf(out, "%-8s%16llu%16llu%16llu\n",
747 bch2_data_types[i], stats.d[i].buckets,
748 stats.d[i].sectors, stats.d[i].fragmented);
754 "free_inc\t\t%zu/%zu\n"
755 "free[RESERVE_MOVINGGC]\t%zu/%zu\n"
756 "free[RESERVE_NONE]\t%zu/%zu\n"
757 "freelist_wait\t\t%s\n"
758 "open buckets allocated\t%u\n"
759 "open buckets this dev\t%u\n"
760 "open buckets total\t%u\n"
761 "open_buckets_wait\t%s\n"
762 "open_buckets_btree\t%u\n"
763 "open_buckets_user\t%u\n"
764 "btree reserve cache\t%u\n"
765 "thread state:\t\t%s\n",
767 __dev_buckets_available(ca, stats),
768 fifo_used(&ca->free_inc), ca->free_inc.size,
769 fifo_used(&ca->free[RESERVE_MOVINGGC]), ca->free[RESERVE_MOVINGGC].size,
770 fifo_used(&ca->free[RESERVE_NONE]), ca->free[RESERVE_NONE].size,
771 c->freelist_wait.list.first ? "waiting" : "empty",
772 OPEN_BUCKETS_COUNT - c->open_buckets_nr_free,
775 c->open_buckets_wait.list.first ? "waiting" : "empty",
778 c->btree_reserve_cache_nr,
779 bch2_allocator_states[ca->allocator_state]);
782 static const char * const bch2_rw[] = {
788 static void dev_iodone_to_text(struct printbuf *out, struct bch_dev *ca)
792 for (rw = 0; rw < 2; rw++) {
793 pr_buf(out, "%s:\n", bch2_rw[rw]);
795 for (i = 1; i < BCH_DATA_NR; i++)
796 pr_buf(out, "%-12s:%12llu\n",
798 percpu_u64_get(&ca->io_done->sectors[rw][i]) << 9);
804 struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj);
805 struct bch_fs *c = ca->fs;
806 struct printbuf out = _PBUF(buf, PAGE_SIZE);
808 sysfs_printf(uuid, "%pU\n", ca->uuid.b);
810 sysfs_print(bucket_size, bucket_bytes(ca));
811 sysfs_print(first_bucket, ca->mi.first_bucket);
812 sysfs_print(nbuckets, ca->mi.nbuckets);
813 sysfs_print(durability, ca->mi.durability);
814 sysfs_print(discard, ca->mi.discard);
816 if (attr == &sysfs_label) {
818 mutex_lock(&c->sb_lock);
819 bch2_disk_path_to_text(&out, &c->disk_sb,
821 mutex_unlock(&c->sb_lock);
825 return out.pos - buf;
828 if (attr == &sysfs_has_data) {
829 bch2_flags_to_text(&out, bch2_data_types,
830 bch2_dev_has_data(c, ca));
832 return out.pos - buf;
835 if (attr == &sysfs_cache_replacement_policy) {
836 bch2_string_opt_to_text(&out,
837 bch2_cache_replacement_policies,
840 return out.pos - buf;
843 if (attr == &sysfs_state_rw) {
844 bch2_string_opt_to_text(&out, bch2_member_states,
847 return out.pos - buf;
850 if (attr == &sysfs_iodone) {
851 dev_iodone_to_text(&out, ca);
852 return out.pos - buf;
855 sysfs_print(io_latency_read, atomic64_read(&ca->cur_latency[READ]));
856 sysfs_print(io_latency_write, atomic64_read(&ca->cur_latency[WRITE]));
858 if (attr == &sysfs_io_latency_stats_read) {
859 bch2_time_stats_to_text(&out, &ca->io_latency[READ]);
860 return out.pos - buf;
862 if (attr == &sysfs_io_latency_stats_write) {
863 bch2_time_stats_to_text(&out, &ca->io_latency[WRITE]);
864 return out.pos - buf;
867 sysfs_printf(congested, "%u%%",
868 clamp(atomic_read(&ca->congested), 0, CONGESTED_MAX)
869 * 100 / CONGESTED_MAX);
871 if (attr == &sysfs_reserve_stats) {
872 reserve_stats_to_text(&out, ca);
873 return out.pos - buf;
875 if (attr == &sysfs_alloc_debug) {
876 dev_alloc_debug_to_text(&out, ca);
877 return out.pos - buf;
885 struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj);
886 struct bch_fs *c = ca->fs;
887 struct bch_member *mi;
889 if (attr == &sysfs_discard) {
890 bool v = strtoul_or_return(buf);
892 mutex_lock(&c->sb_lock);
893 mi = &bch2_sb_get_members(c->disk_sb.sb)->members[ca->dev_idx];
895 if (v != BCH_MEMBER_DISCARD(mi)) {
896 SET_BCH_MEMBER_DISCARD(mi, v);
899 mutex_unlock(&c->sb_lock);
902 if (attr == &sysfs_cache_replacement_policy) {
903 ssize_t v = __sysfs_match_string(bch2_cache_replacement_policies, -1, buf);
908 mutex_lock(&c->sb_lock);
909 mi = &bch2_sb_get_members(c->disk_sb.sb)->members[ca->dev_idx];
911 if ((unsigned) v != BCH_MEMBER_REPLACEMENT(mi)) {
912 SET_BCH_MEMBER_REPLACEMENT(mi, v);
915 mutex_unlock(&c->sb_lock);
918 if (attr == &sysfs_label) {
922 tmp = kstrdup(buf, GFP_KERNEL);
926 ret = bch2_dev_group_set(c, ca, strim(tmp));
932 if (attr == &sysfs_wake_allocator)
933 bch2_wake_allocator(ca);
939 struct attribute *bch2_dev_files[] = {
948 &sysfs_cache_replacement_policy,
955 &sysfs_io_latency_read,
956 &sysfs_io_latency_write,
957 &sysfs_io_latency_stats_read,
958 &sysfs_io_latency_stats_write,
961 &sysfs_reserve_stats,
965 &sysfs_wake_allocator,
969 #endif /* _BCACHEFS_SYSFS_H_ */