1 // SPDX-License-Identifier: GPL-2.0
3 * bcache sysfs interfaces
5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6 * Copyright 2012 Google, Inc.
9 #ifndef NO_BCACHEFS_SYSFS
12 #include "alloc_background.h"
14 #include "btree_cache.h"
16 #include "btree_iter.h"
17 #include "btree_key_cache.h"
18 #include "btree_update.h"
19 #include "btree_update_interior.h"
23 #include "disk_groups.h"
30 #include "rebalance.h"
35 #include <linux/blkdev.h>
36 #include <linux/sort.h>
37 #include <linux/sched/clock.h>
41 #define SYSFS_OPS(type) \
42 struct sysfs_ops type ## _sysfs_ops = { \
43 .show = type ## _show, \
44 .store = type ## _store \
48 static ssize_t fn ## _show(struct kobject *kobj, struct attribute *attr,\
52 static ssize_t fn ## _store(struct kobject *kobj, struct attribute *attr,\
53 const char *buf, size_t size) \
55 #define __sysfs_attribute(_name, _mode) \
56 static struct attribute sysfs_##_name = \
57 { .name = #_name, .mode = _mode }
59 #define write_attribute(n) __sysfs_attribute(n, S_IWUSR)
60 #define read_attribute(n) __sysfs_attribute(n, S_IRUGO)
61 #define rw_attribute(n) __sysfs_attribute(n, S_IRUGO|S_IWUSR)
63 #define sysfs_printf(file, fmt, ...) \
65 if (attr == &sysfs_ ## file) \
66 return scnprintf(buf, PAGE_SIZE, fmt "\n", __VA_ARGS__);\
69 #define sysfs_print(file, var) \
71 if (attr == &sysfs_ ## file) \
72 return snprint(buf, PAGE_SIZE, var); \
75 #define sysfs_hprint(file, val) \
77 if (attr == &sysfs_ ## file) { \
78 bch2_hprint(&out, val); \
80 return out.pos - buf; \
84 #define var_printf(_var, fmt) sysfs_printf(_var, fmt, var(_var))
85 #define var_print(_var) sysfs_print(_var, var(_var))
86 #define var_hprint(_var) sysfs_hprint(_var, var(_var))
88 #define sysfs_strtoul(file, var) \
90 if (attr == &sysfs_ ## file) \
91 return strtoul_safe(buf, var) ?: (ssize_t) size; \
94 #define sysfs_strtoul_clamp(file, var, min, max) \
96 if (attr == &sysfs_ ## file) \
97 return strtoul_safe_clamp(buf, var, min, max) \
101 #define strtoul_or_return(cp) \
104 int _r = kstrtoul(cp, 10, &_v); \
110 #define strtoul_restrict_or_return(cp, min, max) \
112 unsigned long __v = 0; \
113 int _r = strtoul_safe_restrict(cp, __v, min, max); \
119 #define strtoi_h_or_return(cp) \
122 int _r = strtoi_h(cp, &_v); \
128 #define sysfs_hatoi(file, var) \
130 if (attr == &sysfs_ ## file) \
131 return strtoi_h(buf, &var) ?: (ssize_t) size; \
134 write_attribute(trigger_journal_flush);
135 write_attribute(trigger_gc);
136 write_attribute(prune_cache);
137 rw_attribute(btree_gc_periodic);
138 rw_attribute(gc_gens_pos);
140 read_attribute(uuid);
141 read_attribute(minor);
142 read_attribute(bucket_size);
143 read_attribute(first_bucket);
144 read_attribute(nbuckets);
145 read_attribute(durability);
146 read_attribute(iodone);
148 read_attribute(io_latency_read);
149 read_attribute(io_latency_write);
150 read_attribute(io_latency_stats_read);
151 read_attribute(io_latency_stats_write);
152 read_attribute(congested);
154 read_attribute(btree_avg_write_size);
156 read_attribute(reserve_stats);
157 read_attribute(btree_cache_size);
158 read_attribute(compression_stats);
159 read_attribute(journal_debug);
160 read_attribute(journal_pins);
161 read_attribute(btree_updates);
162 read_attribute(dirty_btree_nodes);
163 read_attribute(btree_cache);
164 read_attribute(btree_key_cache);
165 read_attribute(btree_transactions);
166 read_attribute(stripes_heap);
167 read_attribute(open_buckets);
169 read_attribute(internal_uuid);
171 read_attribute(has_data);
172 read_attribute(alloc_debug);
173 write_attribute(wake_allocator);
175 read_attribute(read_realloc_races);
176 read_attribute(extent_migrate_done);
177 read_attribute(extent_migrate_raced);
179 rw_attribute(discard);
180 rw_attribute(cache_replacement_policy);
183 rw_attribute(copy_gc_enabled);
184 read_attribute(copy_gc_wait);
186 rw_attribute(rebalance_enabled);
187 sysfs_pd_controller_attribute(rebalance);
188 read_attribute(rebalance_work);
189 rw_attribute(promote_whole_extents);
191 read_attribute(new_stripes);
193 read_attribute(io_timers_read);
194 read_attribute(io_timers_write);
196 read_attribute(data_op_data_progress);
198 #ifdef CONFIG_BCACHEFS_TESTS
199 write_attribute(perf_test);
200 #endif /* CONFIG_BCACHEFS_TESTS */
203 static struct attribute sysfs_time_stat_##_name = \
204 { .name = #_name, .mode = S_IRUGO };
208 static struct attribute sysfs_state_rw = {
213 static size_t bch2_btree_cache_size(struct bch_fs *c)
218 mutex_lock(&c->btree_cache.lock);
219 list_for_each_entry(b, &c->btree_cache.live, list)
220 ret += btree_bytes(c);
222 mutex_unlock(&c->btree_cache.lock);
226 static size_t bch2_btree_avg_write_size(struct bch_fs *c)
228 u64 nr = atomic64_read(&c->btree_writes_nr);
229 u64 sectors = atomic64_read(&c->btree_writes_sectors);
231 return nr ? div64_u64(sectors, nr) : 0;
234 static long stats_to_text(struct printbuf *out, struct bch_fs *c,
235 struct bch_move_stats *stats)
237 pr_buf(out, "%s: data type %s btree_id %s position: ",
239 bch2_data_types[stats->data_type],
240 bch2_btree_ids[stats->btree_id]);
241 bch2_bpos_to_text(out, stats->pos);
242 pr_buf(out, "%s", "\n");
247 static long data_progress_to_text(struct printbuf *out, struct bch_fs *c)
250 struct bch_move_stats *iter;
252 mutex_lock(&c->data_progress_lock);
254 if (list_empty(&c->data_progress_list))
255 pr_buf(out, "%s", "no progress to report\n");
257 list_for_each_entry(iter, &c->data_progress_list, list) {
258 stats_to_text(out, c, iter);
261 mutex_unlock(&c->data_progress_lock);
265 static int fs_alloc_debug_to_text(struct printbuf *out, struct bch_fs *c)
267 struct bch_fs_usage_online *fs_usage = bch2_fs_usage_read(c);
272 bch2_fs_usage_to_text(out, c, fs_usage);
274 percpu_up_read(&c->mark_lock);
280 static int bch2_compression_stats_to_text(struct printbuf *out, struct bch_fs *c)
282 struct btree_trans trans;
283 struct btree_iter iter;
285 u64 nr_uncompressed_extents = 0, uncompressed_sectors = 0,
286 nr_compressed_extents = 0,
287 compressed_sectors_compressed = 0,
288 compressed_sectors_uncompressed = 0;
291 if (!test_bit(BCH_FS_STARTED, &c->flags))
294 bch2_trans_init(&trans, c, 0, 0);
296 for_each_btree_key(&trans, iter, BTREE_ID_extents, POS_MIN, 0, k, ret)
297 if (k.k->type == KEY_TYPE_extent) {
298 struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
299 const union bch_extent_entry *entry;
300 struct extent_ptr_decoded p;
302 extent_for_each_ptr_decode(e, p, entry) {
303 if (!crc_is_compressed(p.crc)) {
304 nr_uncompressed_extents++;
305 uncompressed_sectors += e.k->size;
307 nr_compressed_extents++;
308 compressed_sectors_compressed +=
309 p.crc.compressed_size;
310 compressed_sectors_uncompressed +=
311 p.crc.uncompressed_size;
314 /* only looking at the first ptr */
318 bch2_trans_iter_exit(&trans, &iter);
320 bch2_trans_exit(&trans);
325 "uncompressed data:\n"
326 " nr extents: %llu\n"
327 " size (bytes): %llu\n"
329 " nr extents: %llu\n"
330 " compressed size (bytes): %llu\n"
331 " uncompressed size (bytes): %llu\n",
332 nr_uncompressed_extents,
333 uncompressed_sectors << 9,
334 nr_compressed_extents,
335 compressed_sectors_compressed << 9,
336 compressed_sectors_uncompressed << 9);
340 static void bch2_gc_gens_pos_to_text(struct printbuf *out, struct bch_fs *c)
342 pr_buf(out, "%s: ", bch2_btree_ids[c->gc_gens_btree]);
343 bch2_bpos_to_text(out, c->gc_gens_pos);
349 struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
350 struct printbuf out = _PBUF(buf, PAGE_SIZE);
352 sysfs_print(minor, c->minor);
353 sysfs_printf(internal_uuid, "%pU", c->sb.uuid.b);
355 sysfs_hprint(btree_cache_size, bch2_btree_cache_size(c));
356 sysfs_hprint(btree_avg_write_size, bch2_btree_avg_write_size(c));
358 sysfs_print(read_realloc_races,
359 atomic_long_read(&c->read_realloc_races));
360 sysfs_print(extent_migrate_done,
361 atomic_long_read(&c->extent_migrate_done));
362 sysfs_print(extent_migrate_raced,
363 atomic_long_read(&c->extent_migrate_raced));
365 sysfs_printf(btree_gc_periodic, "%u", (int) c->btree_gc_periodic);
367 if (attr == &sysfs_gc_gens_pos) {
368 bch2_gc_gens_pos_to_text(&out, c);
369 return out.pos - buf;
372 sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled);
374 sysfs_printf(rebalance_enabled, "%i", c->rebalance.enabled);
375 sysfs_pd_controller_show(rebalance, &c->rebalance.pd); /* XXX */
376 sysfs_hprint(copy_gc_wait,
377 max(0LL, c->copygc_wait -
378 atomic64_read(&c->io_clock[WRITE].now)) << 9);
380 if (attr == &sysfs_rebalance_work) {
381 bch2_rebalance_work_to_text(&out, c);
382 return out.pos - buf;
385 sysfs_print(promote_whole_extents, c->promote_whole_extents);
389 if (attr == &sysfs_alloc_debug)
390 return fs_alloc_debug_to_text(&out, c) ?: out.pos - buf;
392 if (attr == &sysfs_journal_debug) {
393 bch2_journal_debug_to_text(&out, &c->journal);
394 return out.pos - buf;
397 if (attr == &sysfs_journal_pins) {
398 bch2_journal_pins_to_text(&out, &c->journal);
399 return out.pos - buf;
402 if (attr == &sysfs_btree_updates) {
403 bch2_btree_updates_to_text(&out, c);
404 return out.pos - buf;
407 if (attr == &sysfs_dirty_btree_nodes) {
408 bch2_dirty_btree_nodes_to_text(&out, c);
409 return out.pos - buf;
412 if (attr == &sysfs_btree_cache) {
413 bch2_btree_cache_to_text(&out, c);
414 return out.pos - buf;
417 if (attr == &sysfs_btree_key_cache) {
418 bch2_btree_key_cache_to_text(&out, &c->btree_key_cache);
419 return out.pos - buf;
422 if (attr == &sysfs_btree_transactions) {
423 bch2_btree_trans_to_text(&out, c);
424 return out.pos - buf;
427 if (attr == &sysfs_stripes_heap) {
428 bch2_stripes_heap_to_text(&out, c);
429 return out.pos - buf;
432 if (attr == &sysfs_open_buckets) {
433 bch2_open_buckets_to_text(&out, c);
434 return out.pos - buf;
437 if (attr == &sysfs_compression_stats) {
438 bch2_compression_stats_to_text(&out, c);
439 return out.pos - buf;
442 if (attr == &sysfs_new_stripes) {
443 bch2_new_stripes_to_text(&out, c);
444 return out.pos - buf;
447 if (attr == &sysfs_io_timers_read) {
448 bch2_io_timers_to_text(&out, &c->io_clock[READ]);
449 return out.pos - buf;
451 if (attr == &sysfs_io_timers_write) {
452 bch2_io_timers_to_text(&out, &c->io_clock[WRITE]);
453 return out.pos - buf;
456 if (attr == &sysfs_data_op_data_progress) {
457 data_progress_to_text(&out, c);
458 return out.pos - buf;
466 struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
468 if (attr == &sysfs_btree_gc_periodic) {
469 ssize_t ret = strtoul_safe(buf, c->btree_gc_periodic)
472 wake_up_process(c->gc_thread);
476 if (attr == &sysfs_copy_gc_enabled) {
477 ssize_t ret = strtoul_safe(buf, c->copy_gc_enabled)
480 if (c->copygc_thread)
481 wake_up_process(c->copygc_thread);
485 if (attr == &sysfs_rebalance_enabled) {
486 ssize_t ret = strtoul_safe(buf, c->rebalance.enabled)
493 sysfs_pd_controller_store(rebalance, &c->rebalance.pd);
495 sysfs_strtoul(promote_whole_extents, c->promote_whole_extents);
499 if (!test_bit(BCH_FS_STARTED, &c->flags))
504 if (attr == &sysfs_trigger_journal_flush)
505 bch2_journal_meta(&c->journal);
507 if (attr == &sysfs_trigger_gc) {
509 * Full gc is currently incompatible with btree key cache:
512 down_read(&c->state_lock);
513 bch2_gc(c, false, false);
514 up_read(&c->state_lock);
520 if (attr == &sysfs_prune_cache) {
521 struct shrink_control sc;
523 sc.gfp_mask = GFP_KERNEL;
524 sc.nr_to_scan = strtoul_or_return(buf);
525 c->btree_cache.shrink.scan_objects(&c->btree_cache.shrink, &sc);
528 #ifdef CONFIG_BCACHEFS_TESTS
529 if (attr == &sysfs_perf_test) {
530 char *tmp = kstrdup(buf, GFP_KERNEL), *p = tmp;
531 char *test = strsep(&p, " \t\n");
532 char *nr_str = strsep(&p, " \t\n");
533 char *threads_str = strsep(&p, " \t\n");
539 !(ret = kstrtouint(threads_str, 10, &threads)) &&
540 !(ret = bch2_strtoull_h(nr_str, &nr)))
541 ret = bch2_btree_perf_test(c, test, nr, threads);
552 struct attribute *bch2_fs_files[] = {
554 &sysfs_btree_cache_size,
555 &sysfs_btree_avg_write_size,
557 &sysfs_promote_whole_extents,
559 &sysfs_compression_stats,
561 #ifdef CONFIG_BCACHEFS_TESTS
567 /* internal dir - just a wrapper */
569 SHOW(bch2_fs_internal)
571 struct bch_fs *c = container_of(kobj, struct bch_fs, internal);
572 return bch2_fs_show(&c->kobj, attr, buf);
575 STORE(bch2_fs_internal)
577 struct bch_fs *c = container_of(kobj, struct bch_fs, internal);
578 return bch2_fs_store(&c->kobj, attr, buf, size);
580 SYSFS_OPS(bch2_fs_internal);
582 struct attribute *bch2_fs_internal_files[] = {
584 &sysfs_journal_debug,
586 &sysfs_btree_updates,
587 &sysfs_dirty_btree_nodes,
589 &sysfs_btree_key_cache,
590 &sysfs_btree_transactions,
594 &sysfs_read_realloc_races,
595 &sysfs_extent_migrate_done,
596 &sysfs_extent_migrate_raced,
598 &sysfs_trigger_journal_flush,
603 &sysfs_copy_gc_enabled,
606 &sysfs_rebalance_enabled,
607 &sysfs_rebalance_work,
608 sysfs_pd_controller_files(rebalance),
612 &sysfs_io_timers_read,
613 &sysfs_io_timers_write,
615 &sysfs_data_op_data_progress,
617 &sysfs_internal_uuid,
623 SHOW(bch2_fs_opts_dir)
625 struct printbuf out = _PBUF(buf, PAGE_SIZE);
626 struct bch_fs *c = container_of(kobj, struct bch_fs, opts_dir);
627 const struct bch_option *opt = container_of(attr, struct bch_option, attr);
628 int id = opt - bch2_opt_table;
629 u64 v = bch2_opt_get_by_id(&c->opts, id);
631 bch2_opt_to_text(&out, c, opt, v, OPT_SHOW_FULL_LIST);
634 return out.pos - buf;
637 STORE(bch2_fs_opts_dir)
639 struct bch_fs *c = container_of(kobj, struct bch_fs, opts_dir);
640 const struct bch_option *opt = container_of(attr, struct bch_option, attr);
641 int ret, id = opt - bch2_opt_table;
645 tmp = kstrdup(buf, GFP_KERNEL);
649 ret = bch2_opt_parse(c, opt, strim(tmp), &v);
655 ret = bch2_opt_check_may_set(c, id, v);
659 if (opt->set_sb != SET_NO_SB_OPT) {
660 mutex_lock(&c->sb_lock);
661 opt->set_sb(c->disk_sb.sb, v);
663 mutex_unlock(&c->sb_lock);
666 bch2_opt_set_by_id(&c->opts, id, v);
668 if ((id == Opt_background_target ||
669 id == Opt_background_compression) && v) {
670 bch2_rebalance_add_work(c, S64_MAX);
676 SYSFS_OPS(bch2_fs_opts_dir);
678 struct attribute *bch2_fs_opts_dir_files[] = { NULL };
680 int bch2_opts_create_sysfs_files(struct kobject *kobj)
682 const struct bch_option *i;
685 for (i = bch2_opt_table;
686 i < bch2_opt_table + bch2_opts_nr;
688 if (!(i->mode & OPT_FS))
691 ret = sysfs_create_file(kobj, &i->attr);
701 SHOW(bch2_fs_time_stats)
703 struct bch_fs *c = container_of(kobj, struct bch_fs, time_stats);
704 struct printbuf out = _PBUF(buf, PAGE_SIZE);
707 if (attr == &sysfs_time_stat_##name) { \
708 bch2_time_stats_to_text(&out, &c->times[BCH_TIME_##name]);\
709 return out.pos - buf; \
717 STORE(bch2_fs_time_stats)
721 SYSFS_OPS(bch2_fs_time_stats);
723 struct attribute *bch2_fs_time_stats_files[] = {
725 &sysfs_time_stat_##name,
731 static void reserve_stats_to_text(struct printbuf *out, struct bch_dev *ca)
733 enum alloc_reserve i;
735 spin_lock(&ca->fs->freelist_lock);
737 pr_buf(out, "free_inc:\t%zu\t%zu\n",
738 fifo_used(&ca->free_inc),
741 for (i = 0; i < RESERVE_NR; i++)
742 pr_buf(out, "free[%u]:\t%zu\t%zu\n", i,
743 fifo_used(&ca->free[i]),
746 spin_unlock(&ca->fs->freelist_lock);
749 static void dev_alloc_debug_to_text(struct printbuf *out, struct bch_dev *ca)
751 struct bch_fs *c = ca->fs;
752 struct bch_dev_usage stats = bch2_dev_usage_read(ca);
753 unsigned i, nr[BCH_DATA_NR];
755 memset(nr, 0, sizeof(nr));
757 for (i = 0; i < ARRAY_SIZE(c->open_buckets); i++)
758 nr[c->open_buckets[i].type]++;
761 "\t\t buckets\t sectors fragmented\n"
763 ca->mi.nbuckets - ca->mi.first_bucket);
765 for (i = 1; i < BCH_DATA_NR; i++)
766 pr_buf(out, "%-8s%16llu%16llu%16llu\n",
767 bch2_data_types[i], stats.d[i].buckets,
768 stats.d[i].sectors, stats.d[i].fragmented);
774 "free_inc\t\t%zu/%zu\n"
775 "free[RESERVE_MOVINGGC]\t%zu/%zu\n"
776 "free[RESERVE_NONE]\t%zu/%zu\n"
777 "freelist_wait\t\t%s\n"
778 "open buckets allocated\t%u\n"
779 "open buckets this dev\t%u\n"
780 "open buckets total\t%u\n"
781 "open_buckets_wait\t%s\n"
782 "open_buckets_btree\t%u\n"
783 "open_buckets_user\t%u\n"
784 "btree reserve cache\t%u\n"
785 "thread state:\t\t%s\n",
787 __dev_buckets_available(ca, stats),
788 fifo_used(&ca->free_inc), ca->free_inc.size,
789 fifo_used(&ca->free[RESERVE_MOVINGGC]), ca->free[RESERVE_MOVINGGC].size,
790 fifo_used(&ca->free[RESERVE_NONE]), ca->free[RESERVE_NONE].size,
791 c->freelist_wait.list.first ? "waiting" : "empty",
792 OPEN_BUCKETS_COUNT - c->open_buckets_nr_free,
795 c->open_buckets_wait.list.first ? "waiting" : "empty",
798 c->btree_reserve_cache_nr,
799 bch2_allocator_states[ca->allocator_state]);
802 static const char * const bch2_rw[] = {
808 static void dev_iodone_to_text(struct printbuf *out, struct bch_dev *ca)
812 for (rw = 0; rw < 2; rw++) {
813 pr_buf(out, "%s:\n", bch2_rw[rw]);
815 for (i = 1; i < BCH_DATA_NR; i++)
816 pr_buf(out, "%-12s:%12llu\n",
818 percpu_u64_get(&ca->io_done->sectors[rw][i]) << 9);
824 struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj);
825 struct bch_fs *c = ca->fs;
826 struct printbuf out = _PBUF(buf, PAGE_SIZE);
828 sysfs_printf(uuid, "%pU\n", ca->uuid.b);
830 sysfs_print(bucket_size, bucket_bytes(ca));
831 sysfs_print(first_bucket, ca->mi.first_bucket);
832 sysfs_print(nbuckets, ca->mi.nbuckets);
833 sysfs_print(durability, ca->mi.durability);
834 sysfs_print(discard, ca->mi.discard);
836 if (attr == &sysfs_label) {
838 mutex_lock(&c->sb_lock);
839 bch2_disk_path_to_text(&out, &c->disk_sb,
841 mutex_unlock(&c->sb_lock);
845 return out.pos - buf;
848 if (attr == &sysfs_has_data) {
849 bch2_flags_to_text(&out, bch2_data_types,
850 bch2_dev_has_data(c, ca));
852 return out.pos - buf;
855 if (attr == &sysfs_cache_replacement_policy) {
856 bch2_string_opt_to_text(&out,
857 bch2_cache_replacement_policies,
860 return out.pos - buf;
863 if (attr == &sysfs_state_rw) {
864 bch2_string_opt_to_text(&out, bch2_member_states,
867 return out.pos - buf;
870 if (attr == &sysfs_iodone) {
871 dev_iodone_to_text(&out, ca);
872 return out.pos - buf;
875 sysfs_print(io_latency_read, atomic64_read(&ca->cur_latency[READ]));
876 sysfs_print(io_latency_write, atomic64_read(&ca->cur_latency[WRITE]));
878 if (attr == &sysfs_io_latency_stats_read) {
879 bch2_time_stats_to_text(&out, &ca->io_latency[READ]);
880 return out.pos - buf;
882 if (attr == &sysfs_io_latency_stats_write) {
883 bch2_time_stats_to_text(&out, &ca->io_latency[WRITE]);
884 return out.pos - buf;
887 sysfs_printf(congested, "%u%%",
888 clamp(atomic_read(&ca->congested), 0, CONGESTED_MAX)
889 * 100 / CONGESTED_MAX);
891 if (attr == &sysfs_reserve_stats) {
892 reserve_stats_to_text(&out, ca);
893 return out.pos - buf;
895 if (attr == &sysfs_alloc_debug) {
896 dev_alloc_debug_to_text(&out, ca);
897 return out.pos - buf;
905 struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj);
906 struct bch_fs *c = ca->fs;
907 struct bch_member *mi;
909 if (attr == &sysfs_discard) {
910 bool v = strtoul_or_return(buf);
912 mutex_lock(&c->sb_lock);
913 mi = &bch2_sb_get_members(c->disk_sb.sb)->members[ca->dev_idx];
915 if (v != BCH_MEMBER_DISCARD(mi)) {
916 SET_BCH_MEMBER_DISCARD(mi, v);
919 mutex_unlock(&c->sb_lock);
922 if (attr == &sysfs_cache_replacement_policy) {
923 ssize_t v = __sysfs_match_string(bch2_cache_replacement_policies, -1, buf);
928 mutex_lock(&c->sb_lock);
929 mi = &bch2_sb_get_members(c->disk_sb.sb)->members[ca->dev_idx];
931 if ((unsigned) v != BCH_MEMBER_REPLACEMENT(mi)) {
932 SET_BCH_MEMBER_REPLACEMENT(mi, v);
935 mutex_unlock(&c->sb_lock);
938 if (attr == &sysfs_label) {
942 tmp = kstrdup(buf, GFP_KERNEL);
946 ret = bch2_dev_group_set(c, ca, strim(tmp));
952 if (attr == &sysfs_wake_allocator)
953 bch2_wake_allocator(ca);
959 struct attribute *bch2_dev_files[] = {
968 &sysfs_cache_replacement_policy,
975 &sysfs_io_latency_read,
976 &sysfs_io_latency_write,
977 &sysfs_io_latency_stats_read,
978 &sysfs_io_latency_stats_write,
981 &sysfs_reserve_stats,
985 &sysfs_wake_allocator,
989 #endif /* _BCACHEFS_SYSFS_H_ */