2 * bcache sysfs interfaces
4 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
5 * Copyright 2012 Google, Inc.
8 #ifndef NO_BCACHEFS_SYSFS
14 #include "btree_cache.h"
16 #include "btree_iter.h"
17 #include "btree_update.h"
18 #include "btree_update_interior.h"
29 #include <linux/blkdev.h>
30 #include <linux/sort.h>
31 #include <linux/sched/clock.h>
35 #define SYSFS_OPS(type) \
36 struct sysfs_ops type ## _sysfs_ops = { \
37 .show = type ## _show, \
38 .store = type ## _store \
42 static ssize_t fn ## _show(struct kobject *kobj, struct attribute *attr,\
46 static ssize_t fn ## _store(struct kobject *kobj, struct attribute *attr,\
47 const char *buf, size_t size) \
49 #define __sysfs_attribute(_name, _mode) \
50 static struct attribute sysfs_##_name = \
51 { .name = #_name, .mode = _mode }
53 #define write_attribute(n) __sysfs_attribute(n, S_IWUSR)
54 #define read_attribute(n) __sysfs_attribute(n, S_IRUGO)
55 #define rw_attribute(n) __sysfs_attribute(n, S_IRUGO|S_IWUSR)
57 #define sysfs_printf(file, fmt, ...) \
59 if (attr == &sysfs_ ## file) \
60 return scnprintf(buf, PAGE_SIZE, fmt "\n", __VA_ARGS__);\
63 #define sysfs_print(file, var) \
65 if (attr == &sysfs_ ## file) \
66 return snprint(buf, PAGE_SIZE, var); \
69 #define sysfs_hprint(file, val) \
71 if (attr == &sysfs_ ## file) { \
72 ssize_t ret = bch2_hprint(buf, val); \
78 #define var_printf(_var, fmt) sysfs_printf(_var, fmt, var(_var))
79 #define var_print(_var) sysfs_print(_var, var(_var))
80 #define var_hprint(_var) sysfs_hprint(_var, var(_var))
82 #define sysfs_strtoul(file, var) \
84 if (attr == &sysfs_ ## file) \
85 return strtoul_safe(buf, var) ?: (ssize_t) size; \
88 #define sysfs_strtoul_clamp(file, var, min, max) \
90 if (attr == &sysfs_ ## file) \
91 return strtoul_safe_clamp(buf, var, min, max) \
95 #define strtoul_or_return(cp) \
98 int _r = kstrtoul(cp, 10, &_v); \
104 #define strtoul_restrict_or_return(cp, min, max) \
106 unsigned long __v = 0; \
107 int _r = strtoul_safe_restrict(cp, __v, min, max); \
113 #define strtoi_h_or_return(cp) \
116 int _r = strtoi_h(cp, &_v); \
122 #define sysfs_hatoi(file, var) \
124 if (attr == &sysfs_ ## file) \
125 return strtoi_h(buf, &var) ?: (ssize_t) size; \
128 write_attribute(trigger_journal_flush);
129 write_attribute(trigger_btree_coalesce);
130 write_attribute(trigger_gc);
131 write_attribute(prune_cache);
132 rw_attribute(btree_gc_periodic);
134 read_attribute(uuid);
135 read_attribute(minor);
136 read_attribute(bucket_size);
137 read_attribute(block_size);
138 read_attribute(btree_node_size);
139 read_attribute(first_bucket);
140 read_attribute(nbuckets);
141 read_attribute(iostats);
142 read_attribute(read_priority_stats);
143 read_attribute(write_priority_stats);
144 read_attribute(fragmentation_stats);
145 read_attribute(oldest_gen_stats);
146 read_attribute(reserve_stats);
147 read_attribute(btree_cache_size);
148 read_attribute(compression_stats);
149 read_attribute(journal_debug);
150 read_attribute(journal_pins);
151 read_attribute(btree_updates);
152 read_attribute(dirty_btree_nodes);
154 read_attribute(internal_uuid);
156 read_attribute(has_data);
157 read_attribute(alloc_debug);
158 write_attribute(wake_allocator);
160 read_attribute(read_realloc_races);
161 read_attribute(extent_migrate_done);
162 read_attribute(extent_migrate_raced);
164 rw_attribute(journal_write_delay_ms);
165 rw_attribute(journal_reclaim_delay_ms);
167 rw_attribute(writeback_pages_max);
169 rw_attribute(discard);
170 rw_attribute(cache_replacement_policy);
172 rw_attribute(copy_gc_enabled);
173 sysfs_pd_controller_attribute(copy_gc);
176 rw_attribute(tiering_enabled);
177 rw_attribute(tiering_percent);
178 sysfs_pd_controller_attribute(tiering);
181 rw_attribute(pd_controllers_update_seconds);
183 read_attribute(meta_replicas_have);
184 read_attribute(data_replicas_have);
186 #define BCH_DEBUG_PARAM(name, description) \
190 #undef BCH_DEBUG_PARAM
192 #define BCH_TIME_STAT(name, frequency_units, duration_units) \
193 sysfs_time_stats_attribute(name, frequency_units, duration_units);
197 static struct attribute sysfs_state_rw = {
202 static size_t bch2_btree_cache_size(struct bch_fs *c)
207 mutex_lock(&c->btree_cache.lock);
208 list_for_each_entry(b, &c->btree_cache.live, list)
209 ret += btree_bytes(c);
211 mutex_unlock(&c->btree_cache.lock);
215 static ssize_t show_fs_alloc_debug(struct bch_fs *c, char *buf)
217 struct bch_fs_usage stats = bch2_fs_usage_read(c);
219 return scnprintf(buf, PAGE_SIZE,
220 "capacity:\t\t%llu\n"
224 "\treserved:\t%llu\n"
228 "\treserved:\t%llu\n"
232 "\treserved:\t%llu\n"
236 "\treserved:\t%llu\n"
237 "online reserved:\t%llu\n",
239 stats.s[0].data[S_META],
240 stats.s[0].data[S_DIRTY],
241 stats.s[0].persistent_reserved,
242 stats.s[1].data[S_META],
243 stats.s[1].data[S_DIRTY],
244 stats.s[1].persistent_reserved,
245 stats.s[2].data[S_META],
246 stats.s[2].data[S_DIRTY],
247 stats.s[2].persistent_reserved,
248 stats.s[3].data[S_META],
249 stats.s[3].data[S_DIRTY],
250 stats.s[3].persistent_reserved,
251 stats.online_reserved);
254 static ssize_t bch2_compression_stats(struct bch_fs *c, char *buf)
256 struct btree_iter iter;
258 u64 nr_uncompressed_extents = 0, uncompressed_sectors = 0,
259 nr_compressed_extents = 0,
260 compressed_sectors_compressed = 0,
261 compressed_sectors_uncompressed = 0;
263 if (!bch2_fs_running(c))
266 for_each_btree_key(&iter, c, BTREE_ID_EXTENTS, POS_MIN, 0, k)
267 if (k.k->type == BCH_EXTENT) {
268 struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
269 const struct bch_extent_ptr *ptr;
270 struct bch_extent_crc_unpacked crc;
272 extent_for_each_ptr_crc(e, ptr, crc) {
273 if (crc.compression_type == BCH_COMPRESSION_NONE) {
274 nr_uncompressed_extents++;
275 uncompressed_sectors += e.k->size;
277 nr_compressed_extents++;
278 compressed_sectors_compressed +=
280 compressed_sectors_uncompressed +=
281 crc.uncompressed_size;
284 /* only looking at the first ptr */
288 bch2_btree_iter_unlock(&iter);
290 return scnprintf(buf, PAGE_SIZE,
291 "uncompressed data:\n"
292 " nr extents: %llu\n"
293 " size (bytes): %llu\n"
295 " nr extents: %llu\n"
296 " compressed size (bytes): %llu\n"
297 " uncompressed size (bytes): %llu\n",
298 nr_uncompressed_extents,
299 uncompressed_sectors << 9,
300 nr_compressed_extents,
301 compressed_sectors_compressed << 9,
302 compressed_sectors_uncompressed << 9);
307 struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
309 sysfs_print(minor, c->minor);
310 sysfs_printf(internal_uuid, "%pU", c->sb.uuid.b);
312 sysfs_print(journal_write_delay_ms, c->journal.write_delay_ms);
313 sysfs_print(journal_reclaim_delay_ms, c->journal.reclaim_delay_ms);
315 sysfs_print(writeback_pages_max, c->writeback_pages_max);
317 sysfs_print(block_size, block_bytes(c));
318 sysfs_print(btree_node_size, btree_bytes(c));
319 sysfs_hprint(btree_cache_size, bch2_btree_cache_size(c));
321 sysfs_print(read_realloc_races,
322 atomic_long_read(&c->read_realloc_races));
323 sysfs_print(extent_migrate_done,
324 atomic_long_read(&c->extent_migrate_done));
325 sysfs_print(extent_migrate_raced,
326 atomic_long_read(&c->extent_migrate_raced));
328 sysfs_printf(btree_gc_periodic, "%u", (int) c->btree_gc_periodic);
330 sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled);
332 sysfs_print(pd_controllers_update_seconds,
333 c->pd_controllers_update_seconds);
335 sysfs_printf(tiering_enabled, "%i", c->tiering_enabled);
336 sysfs_print(tiering_percent, c->tiering_percent);
338 sysfs_pd_controller_show(tiering, &c->tiers[1].pd); /* XXX */
340 sysfs_printf(meta_replicas_have, "%u", bch2_replicas_online(c, true));
341 sysfs_printf(data_replicas_have, "%u", bch2_replicas_online(c, false));
345 if (attr == &sysfs_alloc_debug)
346 return show_fs_alloc_debug(c, buf);
348 if (attr == &sysfs_journal_debug)
349 return bch2_journal_print_debug(&c->journal, buf);
351 if (attr == &sysfs_journal_pins)
352 return bch2_journal_print_pins(&c->journal, buf);
354 if (attr == &sysfs_btree_updates)
355 return bch2_btree_updates_print(c, buf);
357 if (attr == &sysfs_dirty_btree_nodes)
358 return bch2_dirty_btree_nodes_print(c, buf);
360 if (attr == &sysfs_compression_stats)
361 return bch2_compression_stats(c, buf);
363 #define BCH_DEBUG_PARAM(name, description) sysfs_print(name, c->name);
365 #undef BCH_DEBUG_PARAM
372 struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
374 sysfs_strtoul(journal_write_delay_ms, c->journal.write_delay_ms);
375 sysfs_strtoul(journal_reclaim_delay_ms, c->journal.reclaim_delay_ms);
377 if (attr == &sysfs_writeback_pages_max)
378 c->writeback_pages_max = strtoul_restrict_or_return(buf, 1, UINT_MAX);
380 if (attr == &sysfs_btree_gc_periodic) {
381 ssize_t ret = strtoul_safe(buf, c->btree_gc_periodic)
384 wake_up_process(c->gc_thread);
388 if (attr == &sysfs_copy_gc_enabled) {
391 ssize_t ret = strtoul_safe(buf, c->copy_gc_enabled)
394 for_each_member_device(ca, c, i)
395 if (ca->copygc_thread)
396 wake_up_process(ca->copygc_thread);
400 if (attr == &sysfs_tiering_enabled) {
401 ssize_t ret = strtoul_safe(buf, c->tiering_enabled)
404 bch2_tiering_start(c); /* issue wakeups */
408 sysfs_strtoul(pd_controllers_update_seconds,
409 c->pd_controllers_update_seconds);
411 sysfs_strtoul(tiering_percent, c->tiering_percent);
412 sysfs_pd_controller_store(tiering, &c->tiers[1].pd); /* XXX */
416 #define BCH_DEBUG_PARAM(name, description) sysfs_strtoul(name, c->name);
418 #undef BCH_DEBUG_PARAM
420 if (!bch2_fs_running(c))
425 if (attr == &sysfs_trigger_journal_flush)
426 bch2_journal_meta_async(&c->journal, NULL);
428 if (attr == &sysfs_trigger_btree_coalesce)
431 if (attr == &sysfs_trigger_gc)
434 if (attr == &sysfs_prune_cache) {
435 struct shrink_control sc;
437 sc.gfp_mask = GFP_KERNEL;
438 sc.nr_to_scan = strtoul_or_return(buf);
439 c->btree_cache.shrink.scan_objects(&c->btree_cache.shrink, &sc);
447 struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
449 mutex_lock(&c->state_lock);
450 size = __bch2_fs_store(kobj, attr, buf, size);
451 mutex_unlock(&c->state_lock);
457 struct attribute *bch2_fs_files[] = {
460 &sysfs_btree_node_size,
461 &sysfs_btree_cache_size,
463 &sysfs_meta_replicas_have,
464 &sysfs_data_replicas_have,
466 &sysfs_journal_write_delay_ms,
467 &sysfs_journal_reclaim_delay_ms,
469 &sysfs_writeback_pages_max,
471 &sysfs_tiering_percent,
473 &sysfs_compression_stats,
477 /* internal dir - just a wrapper */
479 SHOW(bch2_fs_internal)
481 struct bch_fs *c = container_of(kobj, struct bch_fs, internal);
482 return bch2_fs_show(&c->kobj, attr, buf);
485 STORE(bch2_fs_internal)
487 struct bch_fs *c = container_of(kobj, struct bch_fs, internal);
488 return bch2_fs_store(&c->kobj, attr, buf, size);
490 SYSFS_OPS(bch2_fs_internal);
492 struct attribute *bch2_fs_internal_files[] = {
494 &sysfs_journal_debug,
496 &sysfs_btree_updates,
497 &sysfs_dirty_btree_nodes,
499 &sysfs_read_realloc_races,
500 &sysfs_extent_migrate_done,
501 &sysfs_extent_migrate_raced,
503 &sysfs_trigger_journal_flush,
504 &sysfs_trigger_btree_coalesce,
508 &sysfs_copy_gc_enabled,
509 &sysfs_tiering_enabled,
510 sysfs_pd_controller_files(tiering),
511 &sysfs_internal_uuid,
513 #define BCH_DEBUG_PARAM(name, description) &sysfs_##name,
515 #undef BCH_DEBUG_PARAM
522 SHOW(bch2_fs_opts_dir)
524 char *out = buf, *end = buf + PAGE_SIZE;
525 struct bch_fs *c = container_of(kobj, struct bch_fs, opts_dir);
526 const struct bch_option *opt = container_of(attr, struct bch_option, attr);
527 int id = opt - bch2_opt_table;
528 u64 v = bch2_opt_get_by_id(&c->opts, id);
530 out += opt->type == BCH_OPT_STR
531 ? bch2_scnprint_string_list(out, end - out, opt->choices, v)
532 : scnprintf(out, end - out, "%lli", v);
533 out += scnprintf(out, end - out, "\n");
538 STORE(bch2_fs_opts_dir)
540 struct bch_fs *c = container_of(kobj, struct bch_fs, opts_dir);
541 const struct bch_option *opt = container_of(attr, struct bch_option, attr);
542 int ret, id = opt - bch2_opt_table;
545 ret = bch2_opt_parse(opt, buf, &v);
549 mutex_lock(&c->sb_lock);
551 if (id == Opt_compression) {
552 int ret = bch2_check_set_has_compressed_data(c, v);
554 mutex_unlock(&c->sb_lock);
559 if (opt->set_sb != SET_NO_SB_OPT) {
560 opt->set_sb(c->disk_sb, v);
564 bch2_opt_set_by_id(&c->opts, id, v);
566 mutex_unlock(&c->sb_lock);
570 SYSFS_OPS(bch2_fs_opts_dir);
572 struct attribute *bch2_fs_opts_dir_files[] = { NULL };
574 int bch2_opts_create_sysfs_files(struct kobject *kobj)
576 const struct bch_option *i;
579 for (i = bch2_opt_table;
580 i < bch2_opt_table + bch2_opts_nr;
582 if (i->mode == OPT_INTERNAL)
585 ret = sysfs_create_file(kobj, &i->attr);
595 SHOW(bch2_fs_time_stats)
597 struct bch_fs *c = container_of(kobj, struct bch_fs, time_stats);
599 #define BCH_TIME_STAT(name, frequency_units, duration_units) \
600 sysfs_print_time_stats(&c->name##_time, name, \
601 frequency_units, duration_units);
608 STORE(bch2_fs_time_stats)
610 struct bch_fs *c = container_of(kobj, struct bch_fs, time_stats);
612 #define BCH_TIME_STAT(name, frequency_units, duration_units) \
613 sysfs_clear_time_stats(&c->name##_time, name);
619 SYSFS_OPS(bch2_fs_time_stats);
621 struct attribute *bch2_fs_time_stats_files[] = {
622 #define BCH_TIME_STAT(name, frequency_units, duration_units) \
623 sysfs_time_stats_attribute_list(name, frequency_units, duration_units)
630 typedef unsigned (bucket_map_fn)(struct bch_dev *, size_t, void *);
632 static unsigned bucket_priority_fn(struct bch_dev *ca, size_t b,
635 struct bucket *g = bucket(ca, b);
636 int rw = (private ? 1 : 0);
638 return ca->fs->prio_clock[rw].hand - g->prio[rw];
641 static unsigned bucket_sectors_used_fn(struct bch_dev *ca, size_t b,
644 struct bucket *g = bucket(ca, b);
645 return bucket_sectors_used(g->mark);
648 static unsigned bucket_oldest_gen_fn(struct bch_dev *ca, size_t b,
651 return bucket_gc_gen(ca, b);
654 static ssize_t show_quantiles(struct bch_dev *ca, char *buf,
655 bucket_map_fn *fn, void *private)
657 int cmp(const void *l, const void *r)
658 { return *((unsigned *) r) - *((unsigned *) l); }
661 /* Compute 31 quantiles */
665 down_read(&ca->bucket_lock);
668 p = vzalloc(n * sizeof(unsigned));
670 up_read(&ca->bucket_lock);
674 for (i = ca->mi.first_bucket; i < n; i++)
675 p[i] = fn(ca, i, private);
677 sort(p, n, sizeof(unsigned), cmp, NULL);
678 up_read(&ca->bucket_lock);
684 for (i = 0; i < ARRAY_SIZE(q); i++)
685 q[i] = p[n * (i + 1) / (ARRAY_SIZE(q) + 1)];
689 for (i = 0; i < ARRAY_SIZE(q); i++)
690 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
697 static ssize_t show_reserve_stats(struct bch_dev *ca, char *buf)
699 enum alloc_reserve i;
702 spin_lock(&ca->freelist_lock);
704 ret = scnprintf(buf, PAGE_SIZE,
705 "free_inc:\t%zu\t%zu\n",
706 fifo_used(&ca->free_inc),
709 for (i = 0; i < RESERVE_NR; i++)
710 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
711 "free[%u]:\t%zu\t%zu\n", i,
712 fifo_used(&ca->free[i]),
715 spin_unlock(&ca->freelist_lock);
720 static ssize_t show_dev_alloc_debug(struct bch_dev *ca, char *buf)
722 struct bch_fs *c = ca->fs;
723 struct bch_dev_usage stats = bch2_dev_usage_read(c, ca);
725 return scnprintf(buf, PAGE_SIZE,
726 "free_inc: %zu/%zu\n"
727 "free[RESERVE_BTREE]: %zu/%zu\n"
728 "free[RESERVE_MOVINGGC]: %zu/%zu\n"
729 "free[RESERVE_NONE]: %zu/%zu\n"
745 "freelist_wait: %s\n"
746 "open buckets: %u/%u (reserved %u)\n"
747 "open_buckets_wait: %s\n",
748 fifo_used(&ca->free_inc), ca->free_inc.size,
749 fifo_used(&ca->free[RESERVE_BTREE]), ca->free[RESERVE_BTREE].size,
750 fifo_used(&ca->free[RESERVE_MOVINGGC]), ca->free[RESERVE_MOVINGGC].size,
751 fifo_used(&ca->free[RESERVE_NONE]), ca->free[RESERVE_NONE].size,
752 ca->mi.nbuckets - ca->mi.first_bucket,
754 stats.buckets[BCH_DATA_SB],
755 stats.buckets[BCH_DATA_JOURNAL],
756 stats.buckets[BCH_DATA_BTREE],
757 stats.buckets[BCH_DATA_USER],
758 stats.buckets[BCH_DATA_CACHED],
759 __dev_buckets_available(ca, stats),
760 stats.sectors[BCH_DATA_SB],
761 stats.sectors[BCH_DATA_JOURNAL],
762 stats.sectors[BCH_DATA_BTREE],
763 stats.sectors[BCH_DATA_USER],
764 stats.sectors[BCH_DATA_CACHED],
765 c->freelist_wait.list.first ? "waiting" : "empty",
766 c->open_buckets_nr_free, OPEN_BUCKETS_COUNT, BTREE_NODE_RESERVE,
767 c->open_buckets_wait.list.first ? "waiting" : "empty");
770 static const char * const bch2_rw[] = {
776 static ssize_t show_dev_iostats(struct bch_dev *ca, char *buf)
778 char *out = buf, *end = buf + PAGE_SIZE;
781 for (rw = 0; rw < 2; rw++) {
782 out += scnprintf(out, end - out, "%s:\n", bch2_rw[rw]);
784 for (i = 1; i < BCH_DATA_NR; i++) {
787 for_each_possible_cpu(cpu)
788 n += per_cpu_ptr(ca->io_done, cpu)->sectors[rw][i];
790 out += scnprintf(out, end - out, "%-12s:%12llu\n",
791 bch2_data_types[i], n << 9);
800 struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj);
801 struct bch_fs *c = ca->fs;
802 char *out = buf, *end = buf + PAGE_SIZE;
804 sysfs_printf(uuid, "%pU\n", ca->uuid.b);
806 sysfs_print(bucket_size, bucket_bytes(ca));
807 sysfs_print(block_size, block_bytes(c));
808 sysfs_print(first_bucket, ca->mi.first_bucket);
809 sysfs_print(nbuckets, ca->mi.nbuckets);
810 sysfs_print(discard, ca->mi.discard);
812 if (attr == &sysfs_has_data) {
813 out += bch2_scnprint_flag_list(out, end - out,
815 bch2_dev_has_data(c, ca));
816 out += scnprintf(out, end - out, "\n");
820 sysfs_pd_controller_show(copy_gc, &ca->copygc_pd);
822 if (attr == &sysfs_cache_replacement_policy) {
823 out += bch2_scnprint_string_list(out, end - out,
824 bch2_cache_replacement_policies,
826 out += scnprintf(out, end - out, "\n");
830 sysfs_print(tier, ca->mi.tier);
832 if (attr == &sysfs_state_rw) {
833 out += bch2_scnprint_string_list(out, end - out,
836 out += scnprintf(out, end - out, "\n");
840 if (attr == &sysfs_iostats)
841 return show_dev_iostats(ca, buf);
842 if (attr == &sysfs_read_priority_stats)
843 return show_quantiles(ca, buf, bucket_priority_fn, (void *) 0);
844 if (attr == &sysfs_write_priority_stats)
845 return show_quantiles(ca, buf, bucket_priority_fn, (void *) 1);
846 if (attr == &sysfs_fragmentation_stats)
847 return show_quantiles(ca, buf, bucket_sectors_used_fn, NULL);
848 if (attr == &sysfs_oldest_gen_stats)
849 return show_quantiles(ca, buf, bucket_oldest_gen_fn, NULL);
850 if (attr == &sysfs_reserve_stats)
851 return show_reserve_stats(ca, buf);
852 if (attr == &sysfs_alloc_debug)
853 return show_dev_alloc_debug(ca, buf);
860 struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj);
861 struct bch_fs *c = ca->fs;
862 struct bch_member *mi;
864 sysfs_pd_controller_store(copy_gc, &ca->copygc_pd);
866 if (attr == &sysfs_discard) {
867 bool v = strtoul_or_return(buf);
869 mutex_lock(&c->sb_lock);
870 mi = &bch2_sb_get_members(c->disk_sb)->members[ca->dev_idx];
872 if (v != BCH_MEMBER_DISCARD(mi)) {
873 SET_BCH_MEMBER_DISCARD(mi, v);
876 mutex_unlock(&c->sb_lock);
879 if (attr == &sysfs_cache_replacement_policy) {
880 ssize_t v = bch2_read_string_list(buf, bch2_cache_replacement_policies);
885 mutex_lock(&c->sb_lock);
886 mi = &bch2_sb_get_members(c->disk_sb)->members[ca->dev_idx];
888 if ((unsigned) v != BCH_MEMBER_REPLACEMENT(mi)) {
889 SET_BCH_MEMBER_REPLACEMENT(mi, v);
892 mutex_unlock(&c->sb_lock);
895 if (attr == &sysfs_tier) {
897 unsigned v = strtoul_restrict_or_return(buf,
898 0, BCH_TIER_MAX - 1);
900 mutex_lock(&c->sb_lock);
901 prev_tier = ca->mi.tier;
903 if (v == ca->mi.tier) {
904 mutex_unlock(&c->sb_lock);
908 mi = &bch2_sb_get_members(c->disk_sb)->members[ca->dev_idx];
909 SET_BCH_MEMBER_TIER(mi, v);
912 clear_bit(ca->dev_idx, c->tiers[prev_tier].devs.d);
913 set_bit(ca->dev_idx, c->tiers[ca->mi.tier].devs.d);
914 mutex_unlock(&c->sb_lock);
916 bch2_recalc_capacity(c);
917 bch2_tiering_start(c);
920 if (attr == &sysfs_wake_allocator)
921 bch2_wake_allocator(ca);
927 struct attribute *bch2_dev_files[] = {
936 &sysfs_cache_replacement_policy,
943 /* alloc info - other stats: */
944 &sysfs_read_priority_stats,
945 &sysfs_write_priority_stats,
946 &sysfs_fragmentation_stats,
947 &sysfs_oldest_gen_stats,
948 &sysfs_reserve_stats,
952 &sysfs_wake_allocator,
954 sysfs_pd_controller_files(copy_gc),
958 #endif /* _BCACHEFS_SYSFS_H_ */