2 * bcache sysfs interfaces
4 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
5 * Copyright 2012 Google, Inc.
12 #include "btree_cache.h"
13 #include "btree_iter.h"
14 #include "btree_update.h"
25 #include <linux/blkdev.h>
26 #include <linux/sort.h>
30 #define SYSFS_OPS(type) \
31 struct sysfs_ops type ## _sysfs_ops = { \
32 .show = type ## _show, \
33 .store = type ## _store \
37 static ssize_t fn ## _show(struct kobject *kobj, struct attribute *attr,\
41 static ssize_t fn ## _store(struct kobject *kobj, struct attribute *attr,\
42 const char *buf, size_t size) \
44 #define __sysfs_attribute(_name, _mode) \
45 static struct attribute sysfs_##_name = \
46 { .name = #_name, .mode = _mode }
48 #define write_attribute(n) __sysfs_attribute(n, S_IWUSR)
49 #define read_attribute(n) __sysfs_attribute(n, S_IRUGO)
50 #define rw_attribute(n) __sysfs_attribute(n, S_IRUGO|S_IWUSR)
52 #define sysfs_printf(file, fmt, ...) \
54 if (attr == &sysfs_ ## file) \
55 return snprintf(buf, PAGE_SIZE, fmt "\n", __VA_ARGS__); \
58 #define sysfs_print(file, var) \
60 if (attr == &sysfs_ ## file) \
61 return snprint(buf, PAGE_SIZE, var); \
64 #define sysfs_hprint(file, val) \
66 if (attr == &sysfs_ ## file) { \
67 ssize_t ret = bch2_hprint(buf, val); \
73 #define var_printf(_var, fmt) sysfs_printf(_var, fmt, var(_var))
74 #define var_print(_var) sysfs_print(_var, var(_var))
75 #define var_hprint(_var) sysfs_hprint(_var, var(_var))
77 #define sysfs_strtoul(file, var) \
79 if (attr == &sysfs_ ## file) \
80 return strtoul_safe(buf, var) ?: (ssize_t) size; \
83 #define sysfs_strtoul_clamp(file, var, min, max) \
85 if (attr == &sysfs_ ## file) \
86 return strtoul_safe_clamp(buf, var, min, max) \
90 #define strtoul_or_return(cp) \
93 int _r = kstrtoul(cp, 10, &_v); \
99 #define strtoul_restrict_or_return(cp, min, max) \
101 unsigned long __v = 0; \
102 int _r = strtoul_safe_restrict(cp, __v, min, max); \
108 #define strtoi_h_or_return(cp) \
111 int _r = strtoi_h(cp, &_v); \
117 #define sysfs_hatoi(file, var) \
119 if (attr == &sysfs_ ## file) \
120 return strtoi_h(buf, &var) ?: (ssize_t) size; \
123 write_attribute(trigger_journal_flush);
124 write_attribute(trigger_btree_coalesce);
125 write_attribute(trigger_gc);
126 write_attribute(prune_cache);
128 read_attribute(uuid);
129 read_attribute(minor);
130 read_attribute(bucket_size);
131 read_attribute(block_size);
132 read_attribute(btree_node_size);
133 read_attribute(first_bucket);
134 read_attribute(nbuckets);
135 read_attribute(read_priority_stats);
136 read_attribute(write_priority_stats);
137 read_attribute(fragmentation_stats);
138 read_attribute(oldest_gen_stats);
139 read_attribute(reserve_stats);
140 read_attribute(btree_cache_size);
141 read_attribute(compression_stats);
142 read_attribute(written);
143 read_attribute(btree_written);
144 read_attribute(metadata_written);
145 read_attribute(journal_debug);
146 read_attribute(journal_pins);
148 read_attribute(internal_uuid);
150 read_attribute(available_buckets);
151 read_attribute(free_buckets);
152 read_attribute(dirty_data);
153 read_attribute(dirty_bytes);
154 read_attribute(dirty_buckets);
155 read_attribute(cached_data);
156 read_attribute(cached_bytes);
157 read_attribute(cached_buckets);
158 read_attribute(meta_buckets);
159 read_attribute(alloc_buckets);
160 read_attribute(has_data);
161 read_attribute(has_metadata);
162 read_attribute(alloc_debug);
164 read_attribute(read_realloc_races);
166 rw_attribute(journal_write_delay_ms);
167 rw_attribute(journal_reclaim_delay_ms);
169 rw_attribute(discard);
170 rw_attribute(cache_replacement_policy);
172 rw_attribute(foreground_write_ratelimit_enabled);
173 rw_attribute(copy_gc_enabled);
174 sysfs_pd_controller_attribute(copy_gc);
177 rw_attribute(tiering_enabled);
178 rw_attribute(tiering_percent);
179 sysfs_pd_controller_attribute(tiering);
181 sysfs_pd_controller_attribute(foreground_write);
183 rw_attribute(pd_controllers_update_seconds);
185 rw_attribute(foreground_target_percent);
187 read_attribute(meta_replicas_have);
188 read_attribute(data_replicas_have);
190 #define BCH_DEBUG_PARAM(name, description) \
194 #undef BCH_DEBUG_PARAM
196 #define BCH_OPT(_name, _mode, ...) \
197 static struct attribute sysfs_opt_##_name = { \
198 .name = #_name, .mode = _mode, \
204 #define BCH_TIME_STAT(name, frequency_units, duration_units) \
205 sysfs_time_stats_attribute(name, frequency_units, duration_units);
209 static struct attribute sysfs_state_rw = {
214 static size_t bch2_btree_cache_size(struct bch_fs *c)
219 mutex_lock(&c->btree_cache_lock);
220 list_for_each_entry(b, &c->btree_cache, list)
221 ret += btree_bytes(c);
223 mutex_unlock(&c->btree_cache_lock);
227 static ssize_t show_fs_alloc_debug(struct bch_fs *c, char *buf)
229 struct bch_fs_usage stats = bch2_fs_usage_read(c);
231 return scnprintf(buf, PAGE_SIZE,
232 "capacity:\t\t%llu\n"
236 "\tcached:\t\t%llu\n"
240 "\tcached:\t\t%llu\n"
241 "persistent reserved sectors:\t%llu\n"
242 "online reserved sectors:\t%llu\n",
244 stats.s[S_COMPRESSED][S_META],
245 stats.s[S_COMPRESSED][S_DIRTY],
246 stats.s[S_COMPRESSED][S_CACHED],
247 stats.s[S_UNCOMPRESSED][S_META],
248 stats.s[S_UNCOMPRESSED][S_DIRTY],
249 stats.s[S_UNCOMPRESSED][S_CACHED],
250 stats.persistent_reserved,
251 stats.online_reserved);
254 static ssize_t bch2_compression_stats(struct bch_fs *c, char *buf)
256 struct btree_iter iter;
258 u64 nr_uncompressed_extents = 0, uncompressed_sectors = 0,
259 nr_compressed_extents = 0,
260 compressed_sectors_compressed = 0,
261 compressed_sectors_uncompressed = 0;
263 if (!bch2_fs_running(c))
266 for_each_btree_key(&iter, c, BTREE_ID_EXTENTS, POS_MIN, k)
267 if (k.k->type == BCH_EXTENT) {
268 struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
269 const struct bch_extent_ptr *ptr;
270 const union bch_extent_crc *crc;
272 extent_for_each_ptr_crc(e, ptr, crc) {
273 if (crc_compression_type(crc) == BCH_COMPRESSION_NONE) {
274 nr_uncompressed_extents++;
275 uncompressed_sectors += e.k->size;
277 nr_compressed_extents++;
278 compressed_sectors_compressed +=
279 crc_compressed_size(e.k, crc);
280 compressed_sectors_uncompressed +=
281 crc_uncompressed_size(e.k, crc);
284 /* only looking at the first ptr */
288 bch2_btree_iter_unlock(&iter);
290 return snprintf(buf, PAGE_SIZE,
291 "uncompressed data:\n"
292 " nr extents: %llu\n"
293 " size (bytes): %llu\n"
295 " nr extents: %llu\n"
296 " compressed size (bytes): %llu\n"
297 " uncompressed size (bytes): %llu\n",
298 nr_uncompressed_extents,
299 uncompressed_sectors << 9,
300 nr_compressed_extents,
301 compressed_sectors_compressed << 9,
302 compressed_sectors_uncompressed << 9);
307 struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
309 sysfs_print(minor, c->minor);
310 sysfs_printf(internal_uuid, "%pU", c->sb.uuid.b);
312 sysfs_print(journal_write_delay_ms, c->journal.write_delay_ms);
313 sysfs_print(journal_reclaim_delay_ms, c->journal.reclaim_delay_ms);
315 sysfs_print(block_size, block_bytes(c));
316 sysfs_print(btree_node_size, btree_bytes(c));
317 sysfs_hprint(btree_cache_size, bch2_btree_cache_size(c));
319 sysfs_print(read_realloc_races,
320 atomic_long_read(&c->read_realloc_races));
322 sysfs_printf(foreground_write_ratelimit_enabled, "%i",
323 c->foreground_write_ratelimit_enabled);
324 sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled);
325 sysfs_pd_controller_show(foreground_write, &c->foreground_write_pd);
327 sysfs_print(pd_controllers_update_seconds,
328 c->pd_controllers_update_seconds);
329 sysfs_print(foreground_target_percent, c->foreground_target_percent);
331 sysfs_printf(tiering_enabled, "%i", c->tiering_enabled);
332 sysfs_print(tiering_percent, c->tiering_percent);
334 sysfs_pd_controller_show(tiering, &c->tiers[1].pd); /* XXX */
336 sysfs_printf(meta_replicas_have, "%u", c->sb.meta_replicas_have);
337 sysfs_printf(data_replicas_have, "%u", c->sb.data_replicas_have);
341 if (attr == &sysfs_alloc_debug)
342 return show_fs_alloc_debug(c, buf);
344 if (attr == &sysfs_journal_debug)
345 return bch2_journal_print_debug(&c->journal, buf);
347 if (attr == &sysfs_journal_pins)
348 return bch2_journal_print_pins(&c->journal, buf);
350 if (attr == &sysfs_compression_stats)
351 return bch2_compression_stats(c, buf);
353 #define BCH_DEBUG_PARAM(name, description) sysfs_print(name, c->name);
355 #undef BCH_DEBUG_PARAM
362 struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
364 sysfs_strtoul(journal_write_delay_ms, c->journal.write_delay_ms);
365 sysfs_strtoul(journal_reclaim_delay_ms, c->journal.reclaim_delay_ms);
367 sysfs_strtoul(foreground_write_ratelimit_enabled,
368 c->foreground_write_ratelimit_enabled);
370 if (attr == &sysfs_copy_gc_enabled) {
373 ssize_t ret = strtoul_safe(buf, c->copy_gc_enabled)
376 for_each_member_device(ca, c, i)
377 if (ca->moving_gc_read)
378 wake_up_process(ca->moving_gc_read);
382 if (attr == &sysfs_tiering_enabled) {
383 ssize_t ret = strtoul_safe(buf, c->tiering_enabled)
386 bch2_tiering_start(c); /* issue wakeups */
390 sysfs_pd_controller_store(foreground_write, &c->foreground_write_pd);
392 sysfs_strtoul(pd_controllers_update_seconds,
393 c->pd_controllers_update_seconds);
394 sysfs_strtoul(foreground_target_percent, c->foreground_target_percent);
396 sysfs_strtoul(tiering_percent, c->tiering_percent);
397 sysfs_pd_controller_store(tiering, &c->tiers[1].pd); /* XXX */
401 #define BCH_DEBUG_PARAM(name, description) sysfs_strtoul(name, c->name);
403 #undef BCH_DEBUG_PARAM
405 if (!bch2_fs_running(c))
410 if (attr == &sysfs_trigger_journal_flush)
411 bch2_journal_meta_async(&c->journal, NULL);
413 if (attr == &sysfs_trigger_btree_coalesce)
416 if (attr == &sysfs_trigger_gc)
419 if (attr == &sysfs_prune_cache) {
420 struct shrink_control sc;
422 sc.gfp_mask = GFP_KERNEL;
423 sc.nr_to_scan = strtoul_or_return(buf);
424 c->btree_cache_shrink.scan_objects(&c->btree_cache_shrink, &sc);
432 struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
434 mutex_lock(&c->state_lock);
435 size = __bch2_fs_store(kobj, attr, buf, size);
436 mutex_unlock(&c->state_lock);
442 struct attribute *bch2_fs_files[] = {
445 &sysfs_btree_node_size,
446 &sysfs_btree_cache_size,
448 &sysfs_meta_replicas_have,
449 &sysfs_data_replicas_have,
451 &sysfs_journal_write_delay_ms,
452 &sysfs_journal_reclaim_delay_ms,
454 &sysfs_foreground_target_percent,
455 &sysfs_tiering_percent,
457 &sysfs_compression_stats,
461 /* internal dir - just a wrapper */
463 SHOW(bch2_fs_internal)
465 struct bch_fs *c = container_of(kobj, struct bch_fs, internal);
466 return bch2_fs_show(&c->kobj, attr, buf);
469 STORE(bch2_fs_internal)
471 struct bch_fs *c = container_of(kobj, struct bch_fs, internal);
472 return bch2_fs_store(&c->kobj, attr, buf, size);
474 SYSFS_OPS(bch2_fs_internal);
476 struct attribute *bch2_fs_internal_files[] = {
478 &sysfs_journal_debug,
481 &sysfs_read_realloc_races,
483 &sysfs_trigger_journal_flush,
484 &sysfs_trigger_btree_coalesce,
488 &sysfs_foreground_write_ratelimit_enabled,
489 &sysfs_copy_gc_enabled,
490 &sysfs_tiering_enabled,
491 sysfs_pd_controller_files(tiering),
492 sysfs_pd_controller_files(foreground_write),
493 &sysfs_internal_uuid,
495 #define BCH_DEBUG_PARAM(name, description) &sysfs_##name,
497 #undef BCH_DEBUG_PARAM
504 SHOW(bch2_fs_opts_dir)
506 struct bch_fs *c = container_of(kobj, struct bch_fs, opts_dir);
508 return bch2_opt_show(&c->opts, attr->name, buf, PAGE_SIZE);
511 STORE(bch2_fs_opts_dir)
513 struct bch_fs *c = container_of(kobj, struct bch_fs, opts_dir);
514 const struct bch_option *opt;
518 id = bch2_parse_sysfs_opt(attr->name, buf, &v);
522 opt = &bch2_opt_table[id];
524 mutex_lock(&c->sb_lock);
526 if (id == Opt_compression) {
527 int ret = bch2_check_set_has_compressed_data(c, v);
529 mutex_unlock(&c->sb_lock);
534 if (opt->set_sb != SET_NO_SB_OPT) {
535 opt->set_sb(c->disk_sb, v);
539 bch2_opt_set(&c->opts, id, v);
541 mutex_unlock(&c->sb_lock);
545 SYSFS_OPS(bch2_fs_opts_dir);
547 struct attribute *bch2_fs_opts_dir_files[] = {
548 #define BCH_OPT(_name, ...) \
559 SHOW(bch2_fs_time_stats)
561 struct bch_fs *c = container_of(kobj, struct bch_fs, time_stats);
563 #define BCH_TIME_STAT(name, frequency_units, duration_units) \
564 sysfs_print_time_stats(&c->name##_time, name, \
565 frequency_units, duration_units);
572 STORE(bch2_fs_time_stats)
574 struct bch_fs *c = container_of(kobj, struct bch_fs, time_stats);
576 #define BCH_TIME_STAT(name, frequency_units, duration_units) \
577 sysfs_clear_time_stats(&c->name##_time, name);
583 SYSFS_OPS(bch2_fs_time_stats);
585 struct attribute *bch2_fs_time_stats_files[] = {
586 #define BCH_TIME_STAT(name, frequency_units, duration_units) \
587 sysfs_time_stats_attribute_list(name, frequency_units, duration_units)
594 typedef unsigned (bucket_map_fn)(struct bch_dev *, struct bucket *, void *);
596 static unsigned bucket_priority_fn(struct bch_dev *ca, struct bucket *g,
599 int rw = (private ? 1 : 0);
601 return ca->fs->prio_clock[rw].hand - g->prio[rw];
604 static unsigned bucket_sectors_used_fn(struct bch_dev *ca, struct bucket *g,
607 return bucket_sectors_used(g);
610 static unsigned bucket_oldest_gen_fn(struct bch_dev *ca, struct bucket *g,
613 return bucket_gc_gen(ca, g);
616 static ssize_t show_quantiles(struct bch_dev *ca, char *buf,
617 bucket_map_fn *fn, void *private)
619 int cmp(const void *l, const void *r)
620 { return *((unsigned *) r) - *((unsigned *) l); }
622 size_t n = ca->mi.nbuckets, i;
623 /* Compute 31 quantiles */
627 p = vzalloc(ca->mi.nbuckets * sizeof(unsigned));
631 for (i = ca->mi.first_bucket; i < n; i++)
632 p[i] = fn(ca, &ca->buckets[i], private);
634 sort(p, n, sizeof(unsigned), cmp, NULL);
640 for (i = 0; i < ARRAY_SIZE(q); i++)
641 q[i] = p[n * (i + 1) / (ARRAY_SIZE(q) + 1)];
645 for (i = 0; i < ARRAY_SIZE(q); i++)
646 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
654 static ssize_t show_reserve_stats(struct bch_dev *ca, char *buf)
656 enum alloc_reserve i;
659 spin_lock(&ca->freelist_lock);
661 ret = scnprintf(buf, PAGE_SIZE,
662 "free_inc:\t%zu\t%zu\n",
663 fifo_used(&ca->free_inc),
666 for (i = 0; i < RESERVE_NR; i++)
667 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
668 "free[%u]:\t%zu\t%zu\n", i,
669 fifo_used(&ca->free[i]),
672 spin_unlock(&ca->freelist_lock);
677 static ssize_t show_dev_alloc_debug(struct bch_dev *ca, char *buf)
679 struct bch_fs *c = ca->fs;
680 struct bch_dev_usage stats = bch2_dev_usage_read(ca);
682 return scnprintf(buf, PAGE_SIZE,
683 "free_inc: %zu/%zu\n"
684 "free[RESERVE_PRIO]: %zu/%zu\n"
685 "free[RESERVE_BTREE]: %zu/%zu\n"
686 "free[RESERVE_MOVINGGC]: %zu/%zu\n"
687 "free[RESERVE_NONE]: %zu/%zu\n"
691 "available: %llu/%llu\n"
692 "freelist_wait: %s\n"
693 "open buckets: %u/%u (reserved %u)\n"
694 "open_buckets_wait: %s\n",
695 fifo_used(&ca->free_inc), ca->free_inc.size,
696 fifo_used(&ca->free[RESERVE_PRIO]), ca->free[RESERVE_PRIO].size,
697 fifo_used(&ca->free[RESERVE_BTREE]), ca->free[RESERVE_BTREE].size,
698 fifo_used(&ca->free[RESERVE_MOVINGGC]), ca->free[RESERVE_MOVINGGC].size,
699 fifo_used(&ca->free[RESERVE_NONE]), ca->free[RESERVE_NONE].size,
700 stats.buckets_alloc, ca->mi.nbuckets - ca->mi.first_bucket,
701 stats.buckets_meta, ca->mi.nbuckets - ca->mi.first_bucket,
702 stats.buckets_dirty, ca->mi.nbuckets - ca->mi.first_bucket,
703 __dev_buckets_available(ca, stats), ca->mi.nbuckets - ca->mi.first_bucket,
704 c->freelist_wait.list.first ? "waiting" : "empty",
705 c->open_buckets_nr_free, OPEN_BUCKETS_COUNT, BTREE_NODE_RESERVE,
706 c->open_buckets_wait.list.first ? "waiting" : "empty");
709 static u64 sectors_written(struct bch_dev *ca)
714 for_each_possible_cpu(cpu)
715 ret += *per_cpu_ptr(ca->sectors_written, cpu);
722 struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj);
723 struct bch_fs *c = ca->fs;
724 struct bch_dev_usage stats = bch2_dev_usage_read(ca);
726 sysfs_printf(uuid, "%pU\n", ca->uuid.b);
728 sysfs_print(bucket_size, bucket_bytes(ca));
729 sysfs_print(block_size, block_bytes(c));
730 sysfs_print(first_bucket, ca->mi.first_bucket);
731 sysfs_print(nbuckets, ca->mi.nbuckets);
732 sysfs_print(discard, ca->mi.discard);
733 sysfs_hprint(written, sectors_written(ca) << 9);
734 sysfs_hprint(btree_written,
735 atomic64_read(&ca->btree_sectors_written) << 9);
736 sysfs_hprint(metadata_written,
737 (atomic64_read(&ca->meta_sectors_written) +
738 atomic64_read(&ca->btree_sectors_written)) << 9);
740 sysfs_hprint(dirty_data, stats.sectors[S_DIRTY] << 9);
741 sysfs_print(dirty_bytes, stats.sectors[S_DIRTY] << 9);
742 sysfs_print(dirty_buckets, stats.buckets_dirty);
743 sysfs_hprint(cached_data, stats.sectors[S_CACHED] << 9);
744 sysfs_print(cached_bytes, stats.sectors[S_CACHED] << 9);
745 sysfs_print(cached_buckets, stats.buckets_cached);
746 sysfs_print(meta_buckets, stats.buckets_meta);
747 sysfs_print(alloc_buckets, stats.buckets_alloc);
748 sysfs_print(available_buckets, dev_buckets_available(ca));
749 sysfs_print(free_buckets, dev_buckets_free(ca));
750 sysfs_print(has_data, ca->mi.has_data);
751 sysfs_print(has_metadata, ca->mi.has_metadata);
753 sysfs_pd_controller_show(copy_gc, &ca->moving_gc_pd);
755 if (attr == &sysfs_cache_replacement_policy)
756 return bch2_snprint_string_list(buf, PAGE_SIZE,
757 bch2_cache_replacement_policies,
760 sysfs_print(tier, ca->mi.tier);
762 if (attr == &sysfs_state_rw)
763 return bch2_snprint_string_list(buf, PAGE_SIZE,
767 if (attr == &sysfs_read_priority_stats)
768 return show_quantiles(ca, buf, bucket_priority_fn, (void *) 0);
769 if (attr == &sysfs_write_priority_stats)
770 return show_quantiles(ca, buf, bucket_priority_fn, (void *) 1);
771 if (attr == &sysfs_fragmentation_stats)
772 return show_quantiles(ca, buf, bucket_sectors_used_fn, NULL);
773 if (attr == &sysfs_oldest_gen_stats)
774 return show_quantiles(ca, buf, bucket_oldest_gen_fn, NULL);
775 if (attr == &sysfs_reserve_stats)
776 return show_reserve_stats(ca, buf);
777 if (attr == &sysfs_alloc_debug)
778 return show_dev_alloc_debug(ca, buf);
785 struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj);
786 struct bch_fs *c = ca->fs;
787 struct bch_member *mi;
789 sysfs_pd_controller_store(copy_gc, &ca->moving_gc_pd);
791 if (attr == &sysfs_discard) {
792 bool v = strtoul_or_return(buf);
794 mutex_lock(&c->sb_lock);
795 mi = &bch2_sb_get_members(c->disk_sb)->members[ca->dev_idx];
797 if (v != BCH_MEMBER_DISCARD(mi)) {
798 SET_BCH_MEMBER_DISCARD(mi, v);
801 mutex_unlock(&c->sb_lock);
804 if (attr == &sysfs_cache_replacement_policy) {
805 ssize_t v = bch2_read_string_list(buf, bch2_cache_replacement_policies);
810 mutex_lock(&c->sb_lock);
811 mi = &bch2_sb_get_members(c->disk_sb)->members[ca->dev_idx];
813 if ((unsigned) v != BCH_MEMBER_REPLACEMENT(mi)) {
814 SET_BCH_MEMBER_REPLACEMENT(mi, v);
817 mutex_unlock(&c->sb_lock);
820 if (attr == &sysfs_tier) {
822 unsigned v = strtoul_restrict_or_return(buf,
823 0, BCH_TIER_MAX - 1);
825 mutex_lock(&c->sb_lock);
826 prev_tier = ca->mi.tier;
828 if (v == ca->mi.tier) {
829 mutex_unlock(&c->sb_lock);
833 mi = &bch2_sb_get_members(c->disk_sb)->members[ca->dev_idx];
834 SET_BCH_MEMBER_TIER(mi, v);
837 bch2_dev_group_remove(&c->tiers[prev_tier].devs, ca);
838 bch2_dev_group_add(&c->tiers[ca->mi.tier].devs, ca);
839 mutex_unlock(&c->sb_lock);
841 bch2_recalc_capacity(c);
842 bch2_tiering_start(c);
849 struct attribute *bch2_dev_files[] = {
858 &sysfs_cache_replacement_policy,
867 &sysfs_btree_written,
868 &sysfs_metadata_written,
870 /* alloc info - data: */
876 /* alloc info - buckets: */
877 &sysfs_available_buckets,
879 &sysfs_dirty_buckets,
880 &sysfs_cached_buckets,
882 &sysfs_alloc_buckets,
884 /* alloc info - other stats: */
885 &sysfs_read_priority_stats,
886 &sysfs_write_priority_stats,
887 &sysfs_fragmentation_stats,
888 &sysfs_oldest_gen_stats,
889 &sysfs_reserve_stats,
894 sysfs_pd_controller_files(copy_gc),