2 * bcache sysfs interfaces
4 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
5 * Copyright 2012 Google, Inc.
8 #ifndef NO_BCACHEFS_SYSFS
14 #include "btree_cache.h"
15 #include "btree_iter.h"
16 #include "btree_update.h"
27 #include <linux/blkdev.h>
28 #include <linux/sort.h>
29 #include <linux/sched/clock.h>
33 #define SYSFS_OPS(type) \
34 struct sysfs_ops type ## _sysfs_ops = { \
35 .show = type ## _show, \
36 .store = type ## _store \
40 static ssize_t fn ## _show(struct kobject *kobj, struct attribute *attr,\
44 static ssize_t fn ## _store(struct kobject *kobj, struct attribute *attr,\
45 const char *buf, size_t size) \
47 #define __sysfs_attribute(_name, _mode) \
48 static struct attribute sysfs_##_name = \
49 { .name = #_name, .mode = _mode }
51 #define write_attribute(n) __sysfs_attribute(n, S_IWUSR)
52 #define read_attribute(n) __sysfs_attribute(n, S_IRUGO)
53 #define rw_attribute(n) __sysfs_attribute(n, S_IRUGO|S_IWUSR)
55 #define sysfs_printf(file, fmt, ...) \
57 if (attr == &sysfs_ ## file) \
58 return scnprintf(buf, PAGE_SIZE, fmt "\n", __VA_ARGS__);\
61 #define sysfs_print(file, var) \
63 if (attr == &sysfs_ ## file) \
64 return snprint(buf, PAGE_SIZE, var); \
67 #define sysfs_hprint(file, val) \
69 if (attr == &sysfs_ ## file) { \
70 ssize_t ret = bch2_hprint(buf, val); \
76 #define var_printf(_var, fmt) sysfs_printf(_var, fmt, var(_var))
77 #define var_print(_var) sysfs_print(_var, var(_var))
78 #define var_hprint(_var) sysfs_hprint(_var, var(_var))
80 #define sysfs_strtoul(file, var) \
82 if (attr == &sysfs_ ## file) \
83 return strtoul_safe(buf, var) ?: (ssize_t) size; \
86 #define sysfs_strtoul_clamp(file, var, min, max) \
88 if (attr == &sysfs_ ## file) \
89 return strtoul_safe_clamp(buf, var, min, max) \
93 #define strtoul_or_return(cp) \
96 int _r = kstrtoul(cp, 10, &_v); \
102 #define strtoul_restrict_or_return(cp, min, max) \
104 unsigned long __v = 0; \
105 int _r = strtoul_safe_restrict(cp, __v, min, max); \
111 #define strtoi_h_or_return(cp) \
114 int _r = strtoi_h(cp, &_v); \
120 #define sysfs_hatoi(file, var) \
122 if (attr == &sysfs_ ## file) \
123 return strtoi_h(buf, &var) ?: (ssize_t) size; \
126 write_attribute(trigger_journal_flush);
127 write_attribute(trigger_btree_coalesce);
128 write_attribute(trigger_gc);
129 write_attribute(prune_cache);
130 rw_attribute(btree_gc_periodic);
132 read_attribute(uuid);
133 read_attribute(minor);
134 read_attribute(bucket_size);
135 read_attribute(block_size);
136 read_attribute(btree_node_size);
137 read_attribute(first_bucket);
138 read_attribute(nbuckets);
139 read_attribute(iostats);
140 read_attribute(read_priority_stats);
141 read_attribute(write_priority_stats);
142 read_attribute(fragmentation_stats);
143 read_attribute(oldest_gen_stats);
144 read_attribute(reserve_stats);
145 read_attribute(btree_cache_size);
146 read_attribute(compression_stats);
147 read_attribute(journal_debug);
148 read_attribute(journal_pins);
150 read_attribute(internal_uuid);
152 read_attribute(available_buckets);
153 read_attribute(free_buckets);
154 read_attribute(dirty_data);
155 read_attribute(dirty_bytes);
156 read_attribute(dirty_buckets);
157 read_attribute(cached_data);
158 read_attribute(cached_bytes);
159 read_attribute(cached_buckets);
160 read_attribute(meta_buckets);
161 read_attribute(alloc_buckets);
162 read_attribute(has_data);
163 read_attribute(alloc_debug);
165 read_attribute(read_realloc_races);
167 rw_attribute(journal_write_delay_ms);
168 rw_attribute(journal_reclaim_delay_ms);
170 rw_attribute(discard);
171 rw_attribute(cache_replacement_policy);
173 rw_attribute(foreground_write_ratelimit_enabled);
174 rw_attribute(copy_gc_enabled);
175 sysfs_pd_controller_attribute(copy_gc);
178 rw_attribute(tiering_enabled);
179 rw_attribute(tiering_percent);
180 sysfs_pd_controller_attribute(tiering);
182 sysfs_pd_controller_attribute(foreground_write);
184 rw_attribute(pd_controllers_update_seconds);
186 rw_attribute(foreground_target_percent);
188 read_attribute(meta_replicas_have);
189 read_attribute(data_replicas_have);
191 #define BCH_DEBUG_PARAM(name, description) \
195 #undef BCH_DEBUG_PARAM
197 #define BCH_OPT(_name, _mode, ...) \
198 static struct attribute sysfs_opt_##_name = { \
199 .name = #_name, .mode = _mode, \
205 #define BCH_TIME_STAT(name, frequency_units, duration_units) \
206 sysfs_time_stats_attribute(name, frequency_units, duration_units);
210 static struct attribute sysfs_state_rw = {
215 static size_t bch2_btree_cache_size(struct bch_fs *c)
220 mutex_lock(&c->btree_cache_lock);
221 list_for_each_entry(b, &c->btree_cache, list)
222 ret += btree_bytes(c);
224 mutex_unlock(&c->btree_cache_lock);
228 static ssize_t show_fs_alloc_debug(struct bch_fs *c, char *buf)
230 struct bch_fs_usage stats = bch2_fs_usage_read(c);
232 return scnprintf(buf, PAGE_SIZE,
233 "capacity:\t\t%llu\n"
237 "\treserved:\t%llu\n"
241 "\treserved:\t%llu\n"
245 "\treserved:\t%llu\n"
249 "\treserved:\t%llu\n"
250 "online reserved:\t%llu\n",
252 stats.s[0].data[S_META],
253 stats.s[0].data[S_DIRTY],
254 stats.s[0].persistent_reserved,
255 stats.s[1].data[S_META],
256 stats.s[1].data[S_DIRTY],
257 stats.s[1].persistent_reserved,
258 stats.s[2].data[S_META],
259 stats.s[2].data[S_DIRTY],
260 stats.s[2].persistent_reserved,
261 stats.s[3].data[S_META],
262 stats.s[3].data[S_DIRTY],
263 stats.s[3].persistent_reserved,
264 stats.online_reserved);
267 static ssize_t bch2_compression_stats(struct bch_fs *c, char *buf)
269 struct btree_iter iter;
271 u64 nr_uncompressed_extents = 0, uncompressed_sectors = 0,
272 nr_compressed_extents = 0,
273 compressed_sectors_compressed = 0,
274 compressed_sectors_uncompressed = 0;
276 if (!bch2_fs_running(c))
279 for_each_btree_key(&iter, c, BTREE_ID_EXTENTS, POS_MIN, 0, k)
280 if (k.k->type == BCH_EXTENT) {
281 struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
282 const struct bch_extent_ptr *ptr;
283 const union bch_extent_crc *crc;
285 extent_for_each_ptr_crc(e, ptr, crc) {
286 if (crc_compression_type(crc) == BCH_COMPRESSION_NONE) {
287 nr_uncompressed_extents++;
288 uncompressed_sectors += e.k->size;
290 nr_compressed_extents++;
291 compressed_sectors_compressed +=
292 crc_compressed_size(e.k, crc);
293 compressed_sectors_uncompressed +=
294 crc_uncompressed_size(e.k, crc);
297 /* only looking at the first ptr */
301 bch2_btree_iter_unlock(&iter);
303 return scnprintf(buf, PAGE_SIZE,
304 "uncompressed data:\n"
305 " nr extents: %llu\n"
306 " size (bytes): %llu\n"
308 " nr extents: %llu\n"
309 " compressed size (bytes): %llu\n"
310 " uncompressed size (bytes): %llu\n",
311 nr_uncompressed_extents,
312 uncompressed_sectors << 9,
313 nr_compressed_extents,
314 compressed_sectors_compressed << 9,
315 compressed_sectors_uncompressed << 9);
320 struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
322 sysfs_print(minor, c->minor);
323 sysfs_printf(internal_uuid, "%pU", c->sb.uuid.b);
325 sysfs_print(journal_write_delay_ms, c->journal.write_delay_ms);
326 sysfs_print(journal_reclaim_delay_ms, c->journal.reclaim_delay_ms);
328 sysfs_print(block_size, block_bytes(c));
329 sysfs_print(btree_node_size, btree_bytes(c));
330 sysfs_hprint(btree_cache_size, bch2_btree_cache_size(c));
332 sysfs_print(read_realloc_races,
333 atomic_long_read(&c->read_realloc_races));
335 sysfs_printf(btree_gc_periodic, "%u", (int) c->btree_gc_periodic);
337 sysfs_printf(foreground_write_ratelimit_enabled, "%i",
338 c->foreground_write_ratelimit_enabled);
339 sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled);
340 sysfs_pd_controller_show(foreground_write, &c->foreground_write_pd);
342 sysfs_print(pd_controllers_update_seconds,
343 c->pd_controllers_update_seconds);
344 sysfs_print(foreground_target_percent, c->foreground_target_percent);
346 sysfs_printf(tiering_enabled, "%i", c->tiering_enabled);
347 sysfs_print(tiering_percent, c->tiering_percent);
349 sysfs_pd_controller_show(tiering, &c->tiers[1].pd); /* XXX */
351 sysfs_printf(meta_replicas_have, "%u", bch2_replicas_online(c, true));
352 sysfs_printf(data_replicas_have, "%u", bch2_replicas_online(c, false));
356 if (attr == &sysfs_alloc_debug)
357 return show_fs_alloc_debug(c, buf);
359 if (attr == &sysfs_journal_debug)
360 return bch2_journal_print_debug(&c->journal, buf);
362 if (attr == &sysfs_journal_pins)
363 return bch2_journal_print_pins(&c->journal, buf);
365 if (attr == &sysfs_compression_stats)
366 return bch2_compression_stats(c, buf);
368 #define BCH_DEBUG_PARAM(name, description) sysfs_print(name, c->name);
370 #undef BCH_DEBUG_PARAM
377 struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
379 sysfs_strtoul(journal_write_delay_ms, c->journal.write_delay_ms);
380 sysfs_strtoul(journal_reclaim_delay_ms, c->journal.reclaim_delay_ms);
382 sysfs_strtoul(foreground_write_ratelimit_enabled,
383 c->foreground_write_ratelimit_enabled);
385 if (attr == &sysfs_btree_gc_periodic) {
386 ssize_t ret = strtoul_safe(buf, c->btree_gc_periodic)
389 wake_up_process(c->gc_thread);
393 if (attr == &sysfs_copy_gc_enabled) {
396 ssize_t ret = strtoul_safe(buf, c->copy_gc_enabled)
399 for_each_member_device(ca, c, i)
400 if (ca->moving_gc_read)
401 wake_up_process(ca->moving_gc_read);
405 if (attr == &sysfs_tiering_enabled) {
406 ssize_t ret = strtoul_safe(buf, c->tiering_enabled)
409 bch2_tiering_start(c); /* issue wakeups */
413 sysfs_pd_controller_store(foreground_write, &c->foreground_write_pd);
415 sysfs_strtoul(pd_controllers_update_seconds,
416 c->pd_controllers_update_seconds);
417 sysfs_strtoul(foreground_target_percent, c->foreground_target_percent);
419 sysfs_strtoul(tiering_percent, c->tiering_percent);
420 sysfs_pd_controller_store(tiering, &c->tiers[1].pd); /* XXX */
424 #define BCH_DEBUG_PARAM(name, description) sysfs_strtoul(name, c->name);
426 #undef BCH_DEBUG_PARAM
428 if (!bch2_fs_running(c))
433 if (attr == &sysfs_trigger_journal_flush)
434 bch2_journal_meta_async(&c->journal, NULL);
436 if (attr == &sysfs_trigger_btree_coalesce)
439 if (attr == &sysfs_trigger_gc)
442 if (attr == &sysfs_prune_cache) {
443 struct shrink_control sc;
445 sc.gfp_mask = GFP_KERNEL;
446 sc.nr_to_scan = strtoul_or_return(buf);
447 c->btree_cache_shrink.scan_objects(&c->btree_cache_shrink, &sc);
455 struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
457 mutex_lock(&c->state_lock);
458 size = __bch2_fs_store(kobj, attr, buf, size);
459 mutex_unlock(&c->state_lock);
465 struct attribute *bch2_fs_files[] = {
468 &sysfs_btree_node_size,
469 &sysfs_btree_cache_size,
471 &sysfs_meta_replicas_have,
472 &sysfs_data_replicas_have,
474 &sysfs_journal_write_delay_ms,
475 &sysfs_journal_reclaim_delay_ms,
477 &sysfs_foreground_target_percent,
478 &sysfs_tiering_percent,
480 &sysfs_compression_stats,
484 /* internal dir - just a wrapper */
486 SHOW(bch2_fs_internal)
488 struct bch_fs *c = container_of(kobj, struct bch_fs, internal);
489 return bch2_fs_show(&c->kobj, attr, buf);
492 STORE(bch2_fs_internal)
494 struct bch_fs *c = container_of(kobj, struct bch_fs, internal);
495 return bch2_fs_store(&c->kobj, attr, buf, size);
497 SYSFS_OPS(bch2_fs_internal);
499 struct attribute *bch2_fs_internal_files[] = {
501 &sysfs_journal_debug,
504 &sysfs_read_realloc_races,
506 &sysfs_trigger_journal_flush,
507 &sysfs_trigger_btree_coalesce,
511 &sysfs_foreground_write_ratelimit_enabled,
512 &sysfs_copy_gc_enabled,
513 &sysfs_tiering_enabled,
514 sysfs_pd_controller_files(tiering),
515 sysfs_pd_controller_files(foreground_write),
516 &sysfs_internal_uuid,
518 #define BCH_DEBUG_PARAM(name, description) &sysfs_##name,
520 #undef BCH_DEBUG_PARAM
527 SHOW(bch2_fs_opts_dir)
529 char *out = buf, *end = buf + PAGE_SIZE;
530 struct bch_fs *c = container_of(kobj, struct bch_fs, opts_dir);
532 out += bch2_opt_show(&c->opts, attr->name, out, end - out);
533 out += scnprintf(out, end - out, "\n");
538 STORE(bch2_fs_opts_dir)
540 struct bch_fs *c = container_of(kobj, struct bch_fs, opts_dir);
541 const struct bch_option *opt;
545 id = bch2_parse_sysfs_opt(attr->name, buf, &v);
549 opt = &bch2_opt_table[id];
551 mutex_lock(&c->sb_lock);
553 if (id == Opt_compression) {
554 int ret = bch2_check_set_has_compressed_data(c, v);
556 mutex_unlock(&c->sb_lock);
561 if (opt->set_sb != SET_NO_SB_OPT) {
562 opt->set_sb(c->disk_sb, v);
566 bch2_opt_set(&c->opts, id, v);
568 mutex_unlock(&c->sb_lock);
572 SYSFS_OPS(bch2_fs_opts_dir);
574 struct attribute *bch2_fs_opts_dir_files[] = {
575 #define BCH_OPT(_name, ...) \
586 SHOW(bch2_fs_time_stats)
588 struct bch_fs *c = container_of(kobj, struct bch_fs, time_stats);
590 #define BCH_TIME_STAT(name, frequency_units, duration_units) \
591 sysfs_print_time_stats(&c->name##_time, name, \
592 frequency_units, duration_units);
599 STORE(bch2_fs_time_stats)
601 struct bch_fs *c = container_of(kobj, struct bch_fs, time_stats);
603 #define BCH_TIME_STAT(name, frequency_units, duration_units) \
604 sysfs_clear_time_stats(&c->name##_time, name);
610 SYSFS_OPS(bch2_fs_time_stats);
612 struct attribute *bch2_fs_time_stats_files[] = {
613 #define BCH_TIME_STAT(name, frequency_units, duration_units) \
614 sysfs_time_stats_attribute_list(name, frequency_units, duration_units)
621 typedef unsigned (bucket_map_fn)(struct bch_dev *, struct bucket *, void *);
623 static unsigned bucket_priority_fn(struct bch_dev *ca, struct bucket *g,
626 int rw = (private ? 1 : 0);
628 return ca->fs->prio_clock[rw].hand - g->prio[rw];
631 static unsigned bucket_sectors_used_fn(struct bch_dev *ca, struct bucket *g,
634 return bucket_sectors_used(g->mark);
637 static unsigned bucket_oldest_gen_fn(struct bch_dev *ca, struct bucket *g,
640 return bucket_gc_gen(ca, g);
643 static ssize_t show_quantiles(struct bch_dev *ca, char *buf,
644 bucket_map_fn *fn, void *private)
646 int cmp(const void *l, const void *r)
647 { return *((unsigned *) r) - *((unsigned *) l); }
649 size_t n = ca->mi.nbuckets, i;
650 /* Compute 31 quantiles */
654 p = vzalloc(ca->mi.nbuckets * sizeof(unsigned));
658 for (i = ca->mi.first_bucket; i < n; i++)
659 p[i] = fn(ca, &ca->buckets[i], private);
661 sort(p, n, sizeof(unsigned), cmp, NULL);
667 for (i = 0; i < ARRAY_SIZE(q); i++)
668 q[i] = p[n * (i + 1) / (ARRAY_SIZE(q) + 1)];
672 for (i = 0; i < ARRAY_SIZE(q); i++)
673 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
681 static ssize_t show_reserve_stats(struct bch_dev *ca, char *buf)
683 enum alloc_reserve i;
686 spin_lock(&ca->freelist_lock);
688 ret = scnprintf(buf, PAGE_SIZE,
689 "free_inc:\t%zu\t%zu\n",
690 fifo_used(&ca->free_inc),
693 for (i = 0; i < RESERVE_NR; i++)
694 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
695 "free[%u]:\t%zu\t%zu\n", i,
696 fifo_used(&ca->free[i]),
699 spin_unlock(&ca->freelist_lock);
704 static ssize_t show_dev_alloc_debug(struct bch_dev *ca, char *buf)
706 struct bch_fs *c = ca->fs;
707 struct bch_dev_usage stats = bch2_dev_usage_read(ca);
709 return scnprintf(buf, PAGE_SIZE,
710 "free_inc: %zu/%zu\n"
711 "free[RESERVE_BTREE]: %zu/%zu\n"
712 "free[RESERVE_MOVINGGC]: %zu/%zu\n"
713 "free[RESERVE_NONE]: %zu/%zu\n"
717 "available: %llu/%llu\n"
718 "freelist_wait: %s\n"
719 "open buckets: %u/%u (reserved %u)\n"
720 "open_buckets_wait: %s\n",
721 fifo_used(&ca->free_inc), ca->free_inc.size,
722 fifo_used(&ca->free[RESERVE_BTREE]), ca->free[RESERVE_BTREE].size,
723 fifo_used(&ca->free[RESERVE_MOVINGGC]), ca->free[RESERVE_MOVINGGC].size,
724 fifo_used(&ca->free[RESERVE_NONE]), ca->free[RESERVE_NONE].size,
725 stats.buckets_alloc, ca->mi.nbuckets - ca->mi.first_bucket,
726 stats.buckets[S_META], ca->mi.nbuckets - ca->mi.first_bucket,
727 stats.buckets[S_DIRTY], ca->mi.nbuckets - ca->mi.first_bucket,
728 __dev_buckets_available(ca, stats), ca->mi.nbuckets - ca->mi.first_bucket,
729 c->freelist_wait.list.first ? "waiting" : "empty",
730 c->open_buckets_nr_free, OPEN_BUCKETS_COUNT, BTREE_NODE_RESERVE,
731 c->open_buckets_wait.list.first ? "waiting" : "empty");
734 const char * const bch2_rw[] = {
740 static ssize_t show_dev_iostats(struct bch_dev *ca, char *buf)
742 char *out = buf, *end = buf + PAGE_SIZE;
745 for (rw = 0; rw < 2; rw++) {
746 out += scnprintf(out, end - out, "%s:\n", bch2_rw[rw]);
748 for (i = 1; i < BCH_DATA_NR; i++) {
751 for_each_possible_cpu(cpu)
752 n += per_cpu_ptr(ca->io_done, cpu)->sectors[rw][i];
754 out += scnprintf(out, end - out, "%-12s:%12llu\n",
755 bch2_data_types[i], n << 9);
764 struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj);
765 struct bch_fs *c = ca->fs;
766 struct bch_dev_usage stats = bch2_dev_usage_read(ca);
767 char *out = buf, *end = buf + PAGE_SIZE;
769 sysfs_printf(uuid, "%pU\n", ca->uuid.b);
771 sysfs_print(bucket_size, bucket_bytes(ca));
772 sysfs_print(block_size, block_bytes(c));
773 sysfs_print(first_bucket, ca->mi.first_bucket);
774 sysfs_print(nbuckets, ca->mi.nbuckets);
775 sysfs_print(discard, ca->mi.discard);
777 sysfs_hprint(dirty_data, stats.sectors[S_DIRTY] << 9);
778 sysfs_print(dirty_bytes, stats.sectors[S_DIRTY] << 9);
779 sysfs_print(dirty_buckets, stats.buckets[S_DIRTY]);
780 sysfs_hprint(cached_data, stats.sectors_cached << 9);
781 sysfs_print(cached_bytes, stats.sectors_cached << 9);
782 sysfs_print(cached_buckets, stats.buckets_cached);
783 sysfs_print(meta_buckets, stats.buckets[S_META]);
784 sysfs_print(alloc_buckets, stats.buckets_alloc);
785 sysfs_print(available_buckets, dev_buckets_available(ca));
786 sysfs_print(free_buckets, dev_buckets_free(ca));
788 if (attr == &sysfs_has_data) {
789 out += bch2_scnprint_flag_list(out, end - out,
791 bch2_dev_has_data(c, ca));
792 out += scnprintf(out, end - out, "\n");
796 sysfs_pd_controller_show(copy_gc, &ca->moving_gc_pd);
798 if (attr == &sysfs_cache_replacement_policy) {
799 out += bch2_scnprint_string_list(out, end - out,
800 bch2_cache_replacement_policies,
802 out += scnprintf(out, end - out, "\n");
806 sysfs_print(tier, ca->mi.tier);
808 if (attr == &sysfs_state_rw) {
809 out += bch2_scnprint_string_list(out, end - out,
812 out += scnprintf(out, end - out, "\n");
816 if (attr == &sysfs_iostats)
817 return show_dev_iostats(ca, buf);
818 if (attr == &sysfs_read_priority_stats)
819 return show_quantiles(ca, buf, bucket_priority_fn, (void *) 0);
820 if (attr == &sysfs_write_priority_stats)
821 return show_quantiles(ca, buf, bucket_priority_fn, (void *) 1);
822 if (attr == &sysfs_fragmentation_stats)
823 return show_quantiles(ca, buf, bucket_sectors_used_fn, NULL);
824 if (attr == &sysfs_oldest_gen_stats)
825 return show_quantiles(ca, buf, bucket_oldest_gen_fn, NULL);
826 if (attr == &sysfs_reserve_stats)
827 return show_reserve_stats(ca, buf);
828 if (attr == &sysfs_alloc_debug)
829 return show_dev_alloc_debug(ca, buf);
836 struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj);
837 struct bch_fs *c = ca->fs;
838 struct bch_member *mi;
840 sysfs_pd_controller_store(copy_gc, &ca->moving_gc_pd);
842 if (attr == &sysfs_discard) {
843 bool v = strtoul_or_return(buf);
845 mutex_lock(&c->sb_lock);
846 mi = &bch2_sb_get_members(c->disk_sb)->members[ca->dev_idx];
848 if (v != BCH_MEMBER_DISCARD(mi)) {
849 SET_BCH_MEMBER_DISCARD(mi, v);
852 mutex_unlock(&c->sb_lock);
855 if (attr == &sysfs_cache_replacement_policy) {
856 ssize_t v = bch2_read_string_list(buf, bch2_cache_replacement_policies);
861 mutex_lock(&c->sb_lock);
862 mi = &bch2_sb_get_members(c->disk_sb)->members[ca->dev_idx];
864 if ((unsigned) v != BCH_MEMBER_REPLACEMENT(mi)) {
865 SET_BCH_MEMBER_REPLACEMENT(mi, v);
868 mutex_unlock(&c->sb_lock);
871 if (attr == &sysfs_tier) {
873 unsigned v = strtoul_restrict_or_return(buf,
874 0, BCH_TIER_MAX - 1);
876 mutex_lock(&c->sb_lock);
877 prev_tier = ca->mi.tier;
879 if (v == ca->mi.tier) {
880 mutex_unlock(&c->sb_lock);
884 mi = &bch2_sb_get_members(c->disk_sb)->members[ca->dev_idx];
885 SET_BCH_MEMBER_TIER(mi, v);
888 clear_bit(ca->dev_idx, c->tiers[prev_tier].devs.d);
889 set_bit(ca->dev_idx, c->tiers[ca->mi.tier].devs.d);
890 mutex_unlock(&c->sb_lock);
892 bch2_recalc_capacity(c);
893 bch2_tiering_start(c);
900 struct attribute *bch2_dev_files[] = {
909 &sysfs_cache_replacement_policy,
916 /* alloc info - data: */
922 /* alloc info - buckets: */
923 &sysfs_available_buckets,
925 &sysfs_dirty_buckets,
926 &sysfs_cached_buckets,
928 &sysfs_alloc_buckets,
930 /* alloc info - other stats: */
931 &sysfs_read_priority_stats,
932 &sysfs_write_priority_stats,
933 &sysfs_fragmentation_stats,
934 &sysfs_oldest_gen_stats,
935 &sysfs_reserve_stats,
940 sysfs_pd_controller_files(copy_gc),
944 #endif /* _BCACHEFS_SYSFS_H_ */