1 // SPDX-License-Identifier: GPL-2.0
3 * bcache sysfs interfaces
5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6 * Copyright 2012 Google, Inc.
9 #ifndef NO_BCACHEFS_SYSFS
12 #include "alloc_background.h"
13 #include "alloc_foreground.h"
15 #include "btree_cache.h"
17 #include "btree_iter.h"
18 #include "btree_key_cache.h"
19 #include "btree_update.h"
20 #include "btree_update_interior.h"
24 #include "disk_groups.h"
31 #include "nocow_locking.h"
33 #include "rebalance.h"
38 #include <linux/blkdev.h>
39 #include <linux/sort.h>
40 #include <linux/sched/clock.h>
44 #define SYSFS_OPS(type) \
45 const struct sysfs_ops type ## _sysfs_ops = { \
46 .show = type ## _show, \
47 .store = type ## _store \
51 static ssize_t fn ## _to_text(struct printbuf *, \
52 struct kobject *, struct attribute *); \
54 static ssize_t fn ## _show(struct kobject *kobj, struct attribute *attr,\
57 struct printbuf out = PRINTBUF; \
58 ssize_t ret = fn ## _to_text(&out, kobj, attr); \
60 if (out.pos && out.buf[out.pos - 1] != '\n') \
63 if (!ret && out.allocation_failure) \
67 ret = min_t(size_t, out.pos, PAGE_SIZE - 1); \
68 memcpy(buf, out.buf, ret); \
70 printbuf_exit(&out); \
71 return bch2_err_class(ret); \
74 static ssize_t fn ## _to_text(struct printbuf *out, struct kobject *kobj,\
75 struct attribute *attr)
78 static ssize_t fn ## _store_inner(struct kobject *, struct attribute *,\
79 const char *, size_t); \
81 static ssize_t fn ## _store(struct kobject *kobj, struct attribute *attr,\
82 const char *buf, size_t size) \
84 return bch2_err_class(fn##_store_inner(kobj, attr, buf, size)); \
87 static ssize_t fn ## _store_inner(struct kobject *kobj, struct attribute *attr,\
88 const char *buf, size_t size)
90 #define __sysfs_attribute(_name, _mode) \
91 static struct attribute sysfs_##_name = \
92 { .name = #_name, .mode = _mode }
94 #define write_attribute(n) __sysfs_attribute(n, 0200)
95 #define read_attribute(n) __sysfs_attribute(n, 0444)
96 #define rw_attribute(n) __sysfs_attribute(n, 0644)
98 #define sysfs_printf(file, fmt, ...) \
100 if (attr == &sysfs_ ## file) \
101 prt_printf(out, fmt "\n", __VA_ARGS__); \
104 #define sysfs_print(file, var) \
106 if (attr == &sysfs_ ## file) \
110 #define sysfs_hprint(file, val) \
112 if (attr == &sysfs_ ## file) \
113 prt_human_readable_s64(out, val); \
116 #define sysfs_strtoul(file, var) \
118 if (attr == &sysfs_ ## file) \
119 return strtoul_safe(buf, var) ?: (ssize_t) size; \
122 #define sysfs_strtoul_clamp(file, var, min, max) \
124 if (attr == &sysfs_ ## file) \
125 return strtoul_safe_clamp(buf, var, min, max) \
129 #define strtoul_or_return(cp) \
132 int _r = kstrtoul(cp, 10, &_v); \
138 write_attribute(trigger_gc);
139 write_attribute(trigger_discards);
140 write_attribute(trigger_invalidates);
141 write_attribute(prune_cache);
142 write_attribute(btree_wakeup);
143 rw_attribute(btree_gc_periodic);
144 rw_attribute(gc_gens_pos);
146 read_attribute(uuid);
147 read_attribute(minor);
148 read_attribute(bucket_size);
149 read_attribute(first_bucket);
150 read_attribute(nbuckets);
151 rw_attribute(durability);
152 read_attribute(iodone);
154 read_attribute(io_latency_read);
155 read_attribute(io_latency_write);
156 read_attribute(io_latency_stats_read);
157 read_attribute(io_latency_stats_write);
158 read_attribute(congested);
160 read_attribute(btree_write_stats);
162 read_attribute(btree_cache_size);
163 read_attribute(compression_stats);
164 read_attribute(journal_debug);
165 read_attribute(btree_updates);
166 read_attribute(btree_cache);
167 read_attribute(btree_key_cache);
168 read_attribute(stripes_heap);
169 read_attribute(open_buckets);
170 read_attribute(open_buckets_partial);
171 read_attribute(write_points);
172 read_attribute(nocow_lock_table);
174 #ifdef BCH_WRITE_REF_DEBUG
175 read_attribute(write_refs);
177 static const char * const bch2_write_refs[] = {
184 static void bch2_write_refs_to_text(struct printbuf *out, struct bch_fs *c)
186 bch2_printbuf_tabstop_push(out, 24);
188 for (unsigned i = 0; i < ARRAY_SIZE(c->writes); i++) {
189 prt_str(out, bch2_write_refs[i]);
191 prt_printf(out, "%li", atomic_long_read(&c->writes[i]));
197 read_attribute(internal_uuid);
198 read_attribute(disk_groups);
200 read_attribute(has_data);
201 read_attribute(alloc_debug);
203 #define x(t, n, ...) read_attribute(t);
204 BCH_PERSISTENT_COUNTERS()
207 rw_attribute(discard);
210 rw_attribute(copy_gc_enabled);
211 read_attribute(copy_gc_wait);
213 rw_attribute(rebalance_enabled);
214 sysfs_pd_controller_attribute(rebalance);
215 read_attribute(rebalance_work);
216 rw_attribute(promote_whole_extents);
218 read_attribute(new_stripes);
220 read_attribute(io_timers_read);
221 read_attribute(io_timers_write);
223 read_attribute(moving_ctxts);
225 #ifdef CONFIG_BCACHEFS_TESTS
226 write_attribute(perf_test);
227 #endif /* CONFIG_BCACHEFS_TESTS */
230 static struct attribute sysfs_time_stat_##_name = \
231 { .name = #_name, .mode = 0444 };
235 static struct attribute sysfs_state_rw = {
240 static size_t bch2_btree_cache_size(struct bch_fs *c)
245 mutex_lock(&c->btree_cache.lock);
246 list_for_each_entry(b, &c->btree_cache.live, list)
247 ret += btree_bytes(c);
249 mutex_unlock(&c->btree_cache.lock);
253 static int bch2_compression_stats_to_text(struct printbuf *out, struct bch_fs *c)
255 struct btree_trans *trans;
256 struct btree_iter iter;
259 u64 nr_uncompressed_extents = 0,
260 nr_compressed_extents = 0,
261 nr_incompressible_extents = 0,
262 uncompressed_sectors = 0,
263 incompressible_sectors = 0,
264 compressed_sectors_compressed = 0,
265 compressed_sectors_uncompressed = 0;
268 if (!test_bit(BCH_FS_STARTED, &c->flags))
271 trans = bch2_trans_get(c);
273 for (id = 0; id < BTREE_ID_NR; id++) {
274 if (!btree_type_has_ptrs(id))
277 for_each_btree_key(trans, iter, id, POS_MIN,
278 BTREE_ITER_ALL_SNAPSHOTS, k, ret) {
279 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
280 const union bch_extent_entry *entry;
281 struct extent_ptr_decoded p;
282 bool compressed = false, uncompressed = false, incompressible = false;
284 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
285 switch (p.crc.compression_type) {
286 case BCH_COMPRESSION_TYPE_none:
288 uncompressed_sectors += k.k->size;
290 case BCH_COMPRESSION_TYPE_incompressible:
291 incompressible = true;
292 incompressible_sectors += k.k->size;
295 compressed_sectors_compressed +=
296 p.crc.compressed_size;
297 compressed_sectors_uncompressed +=
298 p.crc.uncompressed_size;
305 nr_incompressible_extents++;
306 else if (uncompressed)
307 nr_uncompressed_extents++;
309 nr_compressed_extents++;
311 bch2_trans_iter_exit(trans, &iter);
314 bch2_trans_put(trans);
319 prt_printf(out, "uncompressed:\n");
320 prt_printf(out, " nr extents: %llu\n", nr_uncompressed_extents);
321 prt_printf(out, " size: ");
322 prt_human_readable_u64(out, uncompressed_sectors << 9);
323 prt_printf(out, "\n");
325 prt_printf(out, "compressed:\n");
326 prt_printf(out, " nr extents: %llu\n", nr_compressed_extents);
327 prt_printf(out, " compressed size: ");
328 prt_human_readable_u64(out, compressed_sectors_compressed << 9);
329 prt_printf(out, "\n");
330 prt_printf(out, " uncompressed size: ");
331 prt_human_readable_u64(out, compressed_sectors_uncompressed << 9);
332 prt_printf(out, "\n");
334 prt_printf(out, "incompressible:\n");
335 prt_printf(out, " nr extents: %llu\n", nr_incompressible_extents);
336 prt_printf(out, " size: ");
337 prt_human_readable_u64(out, incompressible_sectors << 9);
338 prt_printf(out, "\n");
342 static void bch2_gc_gens_pos_to_text(struct printbuf *out, struct bch_fs *c)
344 prt_printf(out, "%s: ", bch2_btree_ids[c->gc_gens_btree]);
345 bch2_bpos_to_text(out, c->gc_gens_pos);
346 prt_printf(out, "\n");
349 static void bch2_btree_wakeup_all(struct bch_fs *c)
351 struct btree_trans *trans;
353 seqmutex_lock(&c->btree_trans_lock);
354 list_for_each_entry(trans, &c->btree_trans_list, list) {
355 struct btree_bkey_cached_common *b = READ_ONCE(trans->locking);
358 six_lock_wakeup_all(&b->lock);
361 seqmutex_unlock(&c->btree_trans_lock);
366 struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
368 sysfs_print(minor, c->minor);
369 sysfs_printf(internal_uuid, "%pU", c->sb.uuid.b);
371 sysfs_hprint(btree_cache_size, bch2_btree_cache_size(c));
373 if (attr == &sysfs_btree_write_stats)
374 bch2_btree_write_stats_to_text(out, c);
376 sysfs_printf(btree_gc_periodic, "%u", (int) c->btree_gc_periodic);
378 if (attr == &sysfs_gc_gens_pos)
379 bch2_gc_gens_pos_to_text(out, c);
381 sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled);
383 sysfs_printf(rebalance_enabled, "%i", c->rebalance.enabled);
384 sysfs_pd_controller_show(rebalance, &c->rebalance.pd); /* XXX */
386 if (attr == &sysfs_copy_gc_wait)
387 bch2_copygc_wait_to_text(out, c);
389 if (attr == &sysfs_rebalance_work)
390 bch2_rebalance_work_to_text(out, c);
392 sysfs_print(promote_whole_extents, c->promote_whole_extents);
396 if (attr == &sysfs_journal_debug)
397 bch2_journal_debug_to_text(out, &c->journal);
399 if (attr == &sysfs_btree_updates)
400 bch2_btree_updates_to_text(out, c);
402 if (attr == &sysfs_btree_cache)
403 bch2_btree_cache_to_text(out, &c->btree_cache);
405 if (attr == &sysfs_btree_key_cache)
406 bch2_btree_key_cache_to_text(out, &c->btree_key_cache);
408 if (attr == &sysfs_stripes_heap)
409 bch2_stripes_heap_to_text(out, c);
411 if (attr == &sysfs_open_buckets)
412 bch2_open_buckets_to_text(out, c);
414 if (attr == &sysfs_open_buckets_partial)
415 bch2_open_buckets_partial_to_text(out, c);
417 if (attr == &sysfs_write_points)
418 bch2_write_points_to_text(out, c);
420 if (attr == &sysfs_compression_stats)
421 bch2_compression_stats_to_text(out, c);
423 if (attr == &sysfs_new_stripes)
424 bch2_new_stripes_to_text(out, c);
426 if (attr == &sysfs_io_timers_read)
427 bch2_io_timers_to_text(out, &c->io_clock[READ]);
429 if (attr == &sysfs_io_timers_write)
430 bch2_io_timers_to_text(out, &c->io_clock[WRITE]);
432 if (attr == &sysfs_moving_ctxts)
433 bch2_fs_moving_ctxts_to_text(out, c);
435 #ifdef BCH_WRITE_REF_DEBUG
436 if (attr == &sysfs_write_refs)
437 bch2_write_refs_to_text(out, c);
440 if (attr == &sysfs_nocow_lock_table)
441 bch2_nocow_locks_to_text(out, &c->nocow_locks);
443 if (attr == &sysfs_disk_groups)
444 bch2_disk_groups_to_text(out, c);
451 struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
453 if (attr == &sysfs_btree_gc_periodic) {
454 ssize_t ret = strtoul_safe(buf, c->btree_gc_periodic)
457 wake_up_process(c->gc_thread);
461 if (attr == &sysfs_copy_gc_enabled) {
462 ssize_t ret = strtoul_safe(buf, c->copy_gc_enabled)
465 if (c->copygc_thread)
466 wake_up_process(c->copygc_thread);
470 if (attr == &sysfs_rebalance_enabled) {
471 ssize_t ret = strtoul_safe(buf, c->rebalance.enabled)
478 sysfs_pd_controller_store(rebalance, &c->rebalance.pd);
480 sysfs_strtoul(promote_whole_extents, c->promote_whole_extents);
484 if (!test_bit(BCH_FS_STARTED, &c->flags))
489 if (!test_bit(BCH_FS_RW, &c->flags))
492 if (attr == &sysfs_prune_cache) {
493 struct shrink_control sc;
495 sc.gfp_mask = GFP_KERNEL;
496 sc.nr_to_scan = strtoul_or_return(buf);
497 c->btree_cache.shrink.scan_objects(&c->btree_cache.shrink, &sc);
500 if (attr == &sysfs_btree_wakeup)
501 bch2_btree_wakeup_all(c);
503 if (attr == &sysfs_trigger_gc) {
505 * Full gc is currently incompatible with btree key cache:
508 down_read(&c->state_lock);
509 bch2_gc(c, false, false);
510 up_read(&c->state_lock);
516 if (attr == &sysfs_trigger_discards)
519 if (attr == &sysfs_trigger_invalidates)
520 bch2_do_invalidates(c);
522 #ifdef CONFIG_BCACHEFS_TESTS
523 if (attr == &sysfs_perf_test) {
524 char *tmp = kstrdup(buf, GFP_KERNEL), *p = tmp;
525 char *test = strsep(&p, " \t\n");
526 char *nr_str = strsep(&p, " \t\n");
527 char *threads_str = strsep(&p, " \t\n");
533 !(ret = kstrtouint(threads_str, 10, &threads)) &&
534 !(ret = bch2_strtoull_h(nr_str, &nr)))
535 ret = bch2_btree_perf_test(c, test, nr, threads);
546 struct attribute *bch2_fs_files[] = {
548 &sysfs_btree_cache_size,
549 &sysfs_btree_write_stats,
551 &sysfs_promote_whole_extents,
553 &sysfs_compression_stats,
555 #ifdef CONFIG_BCACHEFS_TESTS
563 SHOW(bch2_fs_counters)
565 struct bch_fs *c = container_of(kobj, struct bch_fs, counters_kobj);
567 u64 counter_since_mount = 0;
569 printbuf_tabstop_push(out, 32);
572 if (attr == &sysfs_##t) { \
573 counter = percpu_u64_get(&c->counters[BCH_COUNTER_##t]);\
574 counter_since_mount = counter - c->counters_on_mount[BCH_COUNTER_##t];\
575 prt_printf(out, "since mount:"); \
577 prt_human_readable_u64(out, counter_since_mount); \
580 prt_printf(out, "since filesystem creation:"); \
582 prt_human_readable_u64(out, counter); \
585 BCH_PERSISTENT_COUNTERS()
590 STORE(bch2_fs_counters) {
594 SYSFS_OPS(bch2_fs_counters);
596 struct attribute *bch2_fs_counters_files[] = {
599 BCH_PERSISTENT_COUNTERS()
603 /* internal dir - just a wrapper */
605 SHOW(bch2_fs_internal)
607 struct bch_fs *c = container_of(kobj, struct bch_fs, internal);
609 return bch2_fs_to_text(out, &c->kobj, attr);
612 STORE(bch2_fs_internal)
614 struct bch_fs *c = container_of(kobj, struct bch_fs, internal);
616 return bch2_fs_store(&c->kobj, attr, buf, size);
618 SYSFS_OPS(bch2_fs_internal);
620 struct attribute *bch2_fs_internal_files[] = {
621 &sysfs_journal_debug,
622 &sysfs_btree_updates,
624 &sysfs_btree_key_cache,
628 &sysfs_open_buckets_partial,
630 #ifdef BCH_WRITE_REF_DEBUG
633 &sysfs_nocow_lock_table,
634 &sysfs_io_timers_read,
635 &sysfs_io_timers_write,
638 &sysfs_trigger_discards,
639 &sysfs_trigger_invalidates,
645 &sysfs_copy_gc_enabled,
648 &sysfs_rebalance_enabled,
649 &sysfs_rebalance_work,
650 sysfs_pd_controller_files(rebalance),
654 &sysfs_internal_uuid,
662 SHOW(bch2_fs_opts_dir)
664 struct bch_fs *c = container_of(kobj, struct bch_fs, opts_dir);
665 const struct bch_option *opt = container_of(attr, struct bch_option, attr);
666 int id = opt - bch2_opt_table;
667 u64 v = bch2_opt_get_by_id(&c->opts, id);
669 bch2_opt_to_text(out, c, c->disk_sb.sb, opt, v, OPT_SHOW_FULL_LIST);
675 STORE(bch2_fs_opts_dir)
677 struct bch_fs *c = container_of(kobj, struct bch_fs, opts_dir);
678 const struct bch_option *opt = container_of(attr, struct bch_option, attr);
679 int ret, id = opt - bch2_opt_table;
684 * We don't need to take c->writes for correctness, but it eliminates an
685 * unsightly error message in the dmesg log when we're RO:
687 if (unlikely(!bch2_write_ref_tryget(c, BCH_WRITE_REF_sysfs)))
690 tmp = kstrdup(buf, GFP_KERNEL);
696 ret = bch2_opt_parse(c, opt, strim(tmp), &v, NULL);
702 ret = bch2_opt_check_may_set(c, id, v);
706 bch2_opt_set_sb(c, opt, v);
707 bch2_opt_set_by_id(&c->opts, id, v);
709 if ((id == Opt_background_target ||
710 id == Opt_background_compression) && v) {
711 bch2_rebalance_add_work(c, S64_MAX);
717 bch2_write_ref_put(c, BCH_WRITE_REF_sysfs);
720 SYSFS_OPS(bch2_fs_opts_dir);
722 struct attribute *bch2_fs_opts_dir_files[] = { NULL };
724 int bch2_opts_create_sysfs_files(struct kobject *kobj)
726 const struct bch_option *i;
729 for (i = bch2_opt_table;
730 i < bch2_opt_table + bch2_opts_nr;
732 if (!(i->flags & OPT_FS))
735 ret = sysfs_create_file(kobj, &i->attr);
745 SHOW(bch2_fs_time_stats)
747 struct bch_fs *c = container_of(kobj, struct bch_fs, time_stats);
750 if (attr == &sysfs_time_stat_##name) \
751 bch2_time_stats_to_text(out, &c->times[BCH_TIME_##name]);
758 STORE(bch2_fs_time_stats)
762 SYSFS_OPS(bch2_fs_time_stats);
764 struct attribute *bch2_fs_time_stats_files[] = {
766 &sysfs_time_stat_##name,
772 static void dev_alloc_debug_to_text(struct printbuf *out, struct bch_dev *ca)
774 struct bch_fs *c = ca->fs;
775 struct bch_dev_usage stats = bch2_dev_usage_read(ca);
776 unsigned i, nr[BCH_DATA_NR];
778 memset(nr, 0, sizeof(nr));
780 for (i = 0; i < ARRAY_SIZE(c->open_buckets); i++)
781 nr[c->open_buckets[i].data_type]++;
783 printbuf_tabstop_push(out, 8);
784 printbuf_tabstop_push(out, 16);
785 printbuf_tabstop_push(out, 16);
786 printbuf_tabstop_push(out, 16);
787 printbuf_tabstop_push(out, 16);
790 prt_str(out, "buckets");
792 prt_str(out, "sectors");
794 prt_str(out, "fragmented");
798 for (i = 0; i < BCH_DATA_NR; i++) {
799 prt_str(out, bch2_data_types[i]);
801 prt_u64(out, stats.d[i].buckets);
803 prt_u64(out, stats.d[i].sectors);
805 prt_u64(out, stats.d[i].fragmented);
812 prt_u64(out, stats.buckets_ec);
818 prt_printf(out, "reserves:");
820 for (i = 0; i < BCH_WATERMARK_NR; i++) {
821 prt_str(out, bch2_watermarks[i]);
823 prt_u64(out, bch2_dev_buckets_reserved(ca, i));
830 printbuf_tabstops_reset(out);
831 printbuf_tabstop_push(out, 24);
833 prt_str(out, "freelist_wait");
835 prt_str(out, c->freelist_wait.list.first ? "waiting" : "empty");
838 prt_str(out, "open buckets allocated");
840 prt_u64(out, OPEN_BUCKETS_COUNT - c->open_buckets_nr_free);
843 prt_str(out, "open buckets this dev");
845 prt_u64(out, ca->nr_open_buckets);
848 prt_str(out, "open buckets total");
850 prt_u64(out, OPEN_BUCKETS_COUNT);
853 prt_str(out, "open_buckets_wait");
855 prt_str(out, c->open_buckets_wait.list.first ? "waiting" : "empty");
858 prt_str(out, "open_buckets_btree");
860 prt_u64(out, nr[BCH_DATA_btree]);
863 prt_str(out, "open_buckets_user");
865 prt_u64(out, nr[BCH_DATA_user]);
868 prt_str(out, "buckets_to_invalidate");
870 prt_u64(out, should_invalidate_buckets(ca, stats));
873 prt_str(out, "btree reserve cache");
875 prt_u64(out, c->btree_reserve_cache_nr);
879 static const char * const bch2_rw[] = {
885 static void dev_iodone_to_text(struct printbuf *out, struct bch_dev *ca)
889 for (rw = 0; rw < 2; rw++) {
890 prt_printf(out, "%s:\n", bch2_rw[rw]);
892 for (i = 1; i < BCH_DATA_NR; i++)
893 prt_printf(out, "%-12s:%12llu\n",
895 percpu_u64_get(&ca->io_done->sectors[rw][i]) << 9);
901 struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj);
902 struct bch_fs *c = ca->fs;
904 sysfs_printf(uuid, "%pU\n", ca->uuid.b);
906 sysfs_print(bucket_size, bucket_bytes(ca));
907 sysfs_print(first_bucket, ca->mi.first_bucket);
908 sysfs_print(nbuckets, ca->mi.nbuckets);
909 sysfs_print(durability, ca->mi.durability);
910 sysfs_print(discard, ca->mi.discard);
912 if (attr == &sysfs_label) {
914 mutex_lock(&c->sb_lock);
915 bch2_disk_path_to_text(out, c->disk_sb.sb,
917 mutex_unlock(&c->sb_lock);
923 if (attr == &sysfs_has_data) {
924 prt_bitflags(out, bch2_data_types, bch2_dev_has_data(c, ca));
928 if (attr == &sysfs_state_rw) {
929 prt_string_option(out, bch2_member_states, ca->mi.state);
933 if (attr == &sysfs_iodone)
934 dev_iodone_to_text(out, ca);
936 sysfs_print(io_latency_read, atomic64_read(&ca->cur_latency[READ]));
937 sysfs_print(io_latency_write, atomic64_read(&ca->cur_latency[WRITE]));
939 if (attr == &sysfs_io_latency_stats_read)
940 bch2_time_stats_to_text(out, &ca->io_latency[READ]);
942 if (attr == &sysfs_io_latency_stats_write)
943 bch2_time_stats_to_text(out, &ca->io_latency[WRITE]);
945 sysfs_printf(congested, "%u%%",
946 clamp(atomic_read(&ca->congested), 0, CONGESTED_MAX)
947 * 100 / CONGESTED_MAX);
949 if (attr == &sysfs_alloc_debug)
950 dev_alloc_debug_to_text(out, ca);
957 struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj);
958 struct bch_fs *c = ca->fs;
959 struct bch_member *mi;
961 if (attr == &sysfs_discard) {
962 bool v = strtoul_or_return(buf);
964 mutex_lock(&c->sb_lock);
965 mi = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx);
967 if (v != BCH_MEMBER_DISCARD(mi)) {
968 SET_BCH_MEMBER_DISCARD(mi, v);
971 mutex_unlock(&c->sb_lock);
974 if (attr == &sysfs_durability) {
975 u64 v = strtoul_or_return(buf);
977 mutex_lock(&c->sb_lock);
978 mi = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx);
980 if (v + 1 != BCH_MEMBER_DURABILITY(mi)) {
981 SET_BCH_MEMBER_DURABILITY(mi, v + 1);
984 mutex_unlock(&c->sb_lock);
987 if (attr == &sysfs_label) {
991 tmp = kstrdup(buf, GFP_KERNEL);
995 ret = bch2_dev_group_set(c, ca, strim(tmp));
1003 SYSFS_OPS(bch2_dev);
1005 struct attribute *bch2_dev_files[] = {
1008 &sysfs_first_bucket,
1020 &sysfs_io_latency_read,
1021 &sysfs_io_latency_write,
1022 &sysfs_io_latency_stats_read,
1023 &sysfs_io_latency_stats_write,
1031 #endif /* _BCACHEFS_SYSFS_H_ */