]> git.sesse.net Git - bcachefs-tools-debian/blobdiff - libbcachefs/sysfs.c
Update bcachefs sources to 313b24b652 bcachefs: Fix an assertion
[bcachefs-tools-debian] / libbcachefs / sysfs.c
index c6a653ac201221bfe25a9f630978bd510f5f85af..4fc5777ecfb09d8959db36a0fa3c60719b2ddd13 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * bcache sysfs interfaces
  *
 
 #include "bcachefs.h"
 #include "alloc_background.h"
-#include "compress.h"
 #include "sysfs.h"
 #include "btree_cache.h"
 #include "btree_io.h"
 #include "btree_iter.h"
+#include "btree_key_cache.h"
 #include "btree_update.h"
 #include "btree_update_interior.h"
 #include "btree_gc.h"
 #include "buckets.h"
+#include "clock.h"
 #include "disk_groups.h"
+#include "ec.h"
 #include "inode.h"
 #include "journal.h"
 #include "keylist.h"
@@ -72,9 +75,9 @@ do {                                                                  \
 #define sysfs_hprint(file, val)                                                \
 do {                                                                   \
        if (attr == &sysfs_ ## file) {                                  \
-               ssize_t ret = bch2_hprint(buf, val);                    \
-               strcat(buf, "\n");                                      \
-               return ret + 1;                                         \
+               bch2_hprint(&out, val);                                 \
+               pr_buf(&out, "\n");                                     \
+               return out.pos - buf;                                   \
        }                                                               \
 } while (0)
 
@@ -162,6 +165,10 @@ read_attribute(journal_debug);
 read_attribute(journal_pins);
 read_attribute(btree_updates);
 read_attribute(dirty_btree_nodes);
+read_attribute(btree_cache);
+read_attribute(btree_key_cache);
+read_attribute(btree_transactions);
+read_attribute(stripes_heap);
 
 read_attribute(internal_uuid);
 
@@ -188,21 +195,20 @@ sysfs_pd_controller_attribute(rebalance);
 read_attribute(rebalance_work);
 rw_attribute(promote_whole_extents);
 
+read_attribute(new_stripes);
+
 rw_attribute(pd_controllers_update_seconds);
 
 read_attribute(meta_replicas_have);
 read_attribute(data_replicas_have);
 
+read_attribute(io_timers_read);
+read_attribute(io_timers_write);
+
 #ifdef CONFIG_BCACHEFS_TESTS
 write_attribute(perf_test);
 #endif /* CONFIG_BCACHEFS_TESTS */
 
-#define BCH_DEBUG_PARAM(name, description)                             \
-       rw_attribute(name);
-
-       BCH_DEBUG_PARAMS()
-#undef BCH_DEBUG_PARAM
-
 #define x(_name)                                               \
        static struct attribute sysfs_time_stat_##_name =               \
                { .name = #_name, .mode = S_IRUGO };
@@ -227,58 +233,45 @@ static size_t bch2_btree_cache_size(struct bch_fs *c)
        return ret;
 }
 
-static ssize_t show_fs_alloc_debug(struct bch_fs *c, char *buf)
+static int fs_alloc_debug_to_text(struct printbuf *out, struct bch_fs *c)
 {
-       struct printbuf out = _PBUF(buf, PAGE_SIZE);
-       struct bch_fs_usage stats = bch2_fs_usage_read(c);
-       unsigned replicas, type;
-
-       pr_buf(&out, "capacity:\t\t%llu\n", c->capacity);
-
-       for (replicas = 0; replicas < ARRAY_SIZE(stats.replicas); replicas++) {
-               pr_buf(&out, "%u replicas:\n", replicas + 1);
+       struct bch_fs_usage *fs_usage = bch2_fs_usage_read(c);
 
-               for (type = BCH_DATA_SB; type < BCH_DATA_NR; type++)
-                       pr_buf(&out, "\t%s:\t\t%llu\n",
-                              bch2_data_types[type],
-                              stats.replicas[replicas].data[type]);
-               pr_buf(&out, "\treserved:\t%llu\n",
-                      stats.replicas[replicas].persistent_reserved);
-       }
-
-       pr_buf(&out, "bucket usage\n");
+       if (!fs_usage)
+               return -ENOMEM;
 
-       for (type = BCH_DATA_SB; type < BCH_DATA_NR; type++)
-               pr_buf(&out, "\t%s:\t\t%llu\n",
-                      bch2_data_types[type],
-                      stats.buckets[type]);
+       bch2_fs_usage_to_text(out, c, fs_usage);
 
-       pr_buf(&out, "online reserved:\t%llu\n",
-              stats.online_reserved);
+       percpu_up_read(&c->mark_lock);
 
-       return out.pos - buf;
+       kfree(fs_usage);
+       return 0;
 }
 
-static ssize_t bch2_compression_stats(struct bch_fs *c, char *buf)
+static int bch2_compression_stats_to_text(struct printbuf *out, struct bch_fs *c)
 {
-       struct btree_iter iter;
+       struct btree_trans trans;
+       struct btree_iter *iter;
        struct bkey_s_c k;
        u64 nr_uncompressed_extents = 0, uncompressed_sectors = 0,
            nr_compressed_extents = 0,
            compressed_sectors_compressed = 0,
            compressed_sectors_uncompressed = 0;
+       int ret;
 
-       if (!bch2_fs_running(c))
+       if (!test_bit(BCH_FS_STARTED, &c->flags))
                return -EPERM;
 
-       for_each_btree_key(&iter, c, BTREE_ID_EXTENTS, POS_MIN, 0, k)
-               if (k.k->type == BCH_EXTENT) {
+       bch2_trans_init(&trans, c, 0, 0);
+
+       for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, POS_MIN, 0, k, ret)
+               if (k.k->type == KEY_TYPE_extent) {
                        struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
                        const union bch_extent_entry *entry;
                        struct extent_ptr_decoded p;
 
                        extent_for_each_ptr_decode(e, p, entry) {
-                               if (p.crc.compression_type == BCH_COMPRESSION_NONE) {
+                               if (!crc_is_compressed(p.crc)) {
                                        nr_uncompressed_extents++;
                                        uncompressed_sectors += e.k->size;
                                } else {
@@ -293,26 +286,31 @@ static ssize_t bch2_compression_stats(struct bch_fs *c, char *buf)
                                break;
                        }
                }
-       bch2_btree_iter_unlock(&iter);
-
-       return scnprintf(buf, PAGE_SIZE,
-                       "uncompressed data:\n"
-                       "       nr extents:                     %llu\n"
-                       "       size (bytes):                   %llu\n"
-                       "compressed data:\n"
-                       "       nr extents:                     %llu\n"
-                       "       compressed size (bytes):        %llu\n"
-                       "       uncompressed size (bytes):      %llu\n",
-                       nr_uncompressed_extents,
-                       uncompressed_sectors << 9,
-                       nr_compressed_extents,
-                       compressed_sectors_compressed << 9,
-                       compressed_sectors_uncompressed << 9);
+
+       ret = bch2_trans_exit(&trans) ?: ret;
+       if (ret)
+               return ret;
+
+       pr_buf(out,
+              "uncompressed data:\n"
+              "        nr extents:                     %llu\n"
+              "        size (bytes):                   %llu\n"
+              "compressed data:\n"
+              "        nr extents:                     %llu\n"
+              "        compressed size (bytes):        %llu\n"
+              "        uncompressed size (bytes):      %llu\n",
+              nr_uncompressed_extents,
+              uncompressed_sectors << 9,
+              nr_compressed_extents,
+              compressed_sectors_compressed << 9,
+              compressed_sectors_uncompressed << 9);
+       return 0;
 }
 
 SHOW(bch2_fs)
 {
        struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
+       struct printbuf out = _PBUF(buf, PAGE_SIZE);
 
        sysfs_print(minor,                      c->minor);
        sysfs_printf(internal_uuid, "%pU",      c->sb.uuid.b);
@@ -340,43 +338,86 @@ SHOW(bch2_fs)
 
        sysfs_printf(rebalance_enabled,         "%i", c->rebalance.enabled);
        sysfs_pd_controller_show(rebalance,     &c->rebalance.pd); /* XXX */
+       sysfs_pd_controller_show(copy_gc,       &c->copygc_pd);
 
-       if (attr == &sysfs_rebalance_work)
-               return bch2_rebalance_work_show(c, buf);
+       if (attr == &sysfs_rebalance_work) {
+               bch2_rebalance_work_to_text(&out, c);
+               return out.pos - buf;
+       }
 
        sysfs_print(promote_whole_extents,      c->promote_whole_extents);
 
-       sysfs_printf(meta_replicas_have, "%u",  bch2_replicas_online(c, true));
-       sysfs_printf(data_replicas_have, "%u",  bch2_replicas_online(c, false));
+       sysfs_printf(meta_replicas_have, "%i",  bch2_replicas_online(c, true));
+       sysfs_printf(data_replicas_have, "%i",  bch2_replicas_online(c, false));
 
        /* Debugging: */
 
        if (attr == &sysfs_alloc_debug)
-               return show_fs_alloc_debug(c, buf);
+               return fs_alloc_debug_to_text(&out, c) ?: out.pos - buf;
 
-       if (attr == &sysfs_journal_debug)
-               return bch2_journal_print_debug(&c->journal, buf);
+       if (attr == &sysfs_journal_debug) {
+               bch2_journal_debug_to_text(&out, &c->journal);
+               return out.pos - buf;
+       }
 
-       if (attr == &sysfs_journal_pins)
-               return bch2_journal_print_pins(&c->journal, buf);
+       if (attr == &sysfs_journal_pins) {
+               bch2_journal_pins_to_text(&out, &c->journal);
+               return out.pos - buf;
+       }
 
-       if (attr == &sysfs_btree_updates)
-               return bch2_btree_updates_print(c, buf);
+       if (attr == &sysfs_btree_updates) {
+               bch2_btree_updates_to_text(&out, c);
+               return out.pos - buf;
+       }
 
-       if (attr == &sysfs_dirty_btree_nodes)
-               return bch2_dirty_btree_nodes_print(c, buf);
+       if (attr == &sysfs_dirty_btree_nodes) {
+               bch2_dirty_btree_nodes_to_text(&out, c);
+               return out.pos - buf;
+       }
+
+       if (attr == &sysfs_btree_cache) {
+               bch2_btree_cache_to_text(&out, c);
+               return out.pos - buf;
+       }
+
+       if (attr == &sysfs_btree_key_cache) {
+               bch2_btree_key_cache_to_text(&out, &c->btree_key_cache);
+               return out.pos - buf;
+       }
+
+       if (attr == &sysfs_btree_transactions) {
+               bch2_btree_trans_to_text(&out, c);
+               return out.pos - buf;
+       }
+
+       if (attr == &sysfs_stripes_heap) {
+               bch2_stripes_heap_to_text(&out, c);
+               return out.pos - buf;
+       }
 
-       if (attr == &sysfs_compression_stats)
-               return bch2_compression_stats(c, buf);
+       if (attr == &sysfs_compression_stats) {
+               bch2_compression_stats_to_text(&out, c);
+               return out.pos - buf;
+       }
 
-#define BCH_DEBUG_PARAM(name, description) sysfs_print(name, c->name);
-       BCH_DEBUG_PARAMS()
-#undef BCH_DEBUG_PARAM
+       if (attr == &sysfs_new_stripes) {
+               bch2_new_stripes_to_text(&out, c);
+               return out.pos - buf;
+       }
+
+       if (attr == &sysfs_io_timers_read) {
+               bch2_io_timers_to_text(&out, &c->io_clock[READ]);
+               return out.pos - buf;
+       }
+       if (attr == &sysfs_io_timers_write) {
+               bch2_io_timers_to_text(&out, &c->io_clock[WRITE]);
+               return out.pos - buf;
+       }
 
        return 0;
 }
 
-STORE(__bch2_fs)
+STORE(bch2_fs)
 {
        struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
 
@@ -392,14 +433,11 @@ STORE(__bch2_fs)
        }
 
        if (attr == &sysfs_copy_gc_enabled) {
-               struct bch_dev *ca;
-               unsigned i;
                ssize_t ret = strtoul_safe(buf, c->copy_gc_enabled)
                        ?: (ssize_t) size;
 
-               for_each_member_device(ca, c, i)
-                       if (ca->copygc_thread)
-                               wake_up_process(ca->copygc_thread);
+               if (c->copygc_thread)
+                       wake_up_process(c->copygc_thread);
                return ret;
        }
 
@@ -414,28 +452,35 @@ STORE(__bch2_fs)
        sysfs_strtoul(pd_controllers_update_seconds,
                      c->pd_controllers_update_seconds);
        sysfs_pd_controller_store(rebalance,    &c->rebalance.pd);
+       sysfs_pd_controller_store(copy_gc,      &c->copygc_pd);
 
        sysfs_strtoul(promote_whole_extents,    c->promote_whole_extents);
 
        /* Debugging: */
 
-#define BCH_DEBUG_PARAM(name, description) sysfs_strtoul(name, c->name);
-       BCH_DEBUG_PARAMS()
-#undef BCH_DEBUG_PARAM
-
-       if (!bch2_fs_running(c))
+       if (!test_bit(BCH_FS_STARTED, &c->flags))
                return -EPERM;
 
        /* Debugging: */
 
        if (attr == &sysfs_trigger_journal_flush)
-               bch2_journal_meta_async(&c->journal, NULL);
+               bch2_journal_meta(&c->journal);
 
        if (attr == &sysfs_trigger_btree_coalesce)
                bch2_coalesce(c);
 
-       if (attr == &sysfs_trigger_gc)
-               bch2_gc(c);
+       if (attr == &sysfs_trigger_gc) {
+               /*
+                * Full gc is currently incompatible with btree key cache:
+                */
+#if 0
+               down_read(&c->state_lock);
+               bch2_gc(c, NULL, false, false);
+               up_read(&c->state_lock);
+#else
+               bch2_gc_gens(c);
+#endif
+       }
 
        if (attr == &sysfs_prune_cache) {
                struct shrink_control sc;
@@ -444,6 +489,7 @@ STORE(__bch2_fs)
                sc.nr_to_scan = strtoul_or_return(buf);
                c->btree_cache.shrink.scan_objects(&c->btree_cache.shrink, &sc);
        }
+
 #ifdef CONFIG_BCACHEFS_TESTS
        if (attr == &sysfs_perf_test) {
                char *tmp = kstrdup(buf, GFP_KERNEL), *p = tmp;
@@ -457,25 +503,15 @@ STORE(__bch2_fs)
                if (threads_str &&
                    !(ret = kstrtouint(threads_str, 10, &threads)) &&
                    !(ret = bch2_strtoull_h(nr_str, &nr)))
-                       bch2_btree_perf_test(c, test, nr, threads);
-               else
-                       size = ret;
+                       ret = bch2_btree_perf_test(c, test, nr, threads);
                kfree(tmp);
+
+               if (ret)
+                       size = ret;
        }
 #endif
        return size;
 }
-
-STORE(bch2_fs)
-{
-       struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
-
-       mutex_lock(&c->state_lock);
-       size = __bch2_fs_store(kobj, attr, buf, size);
-       mutex_unlock(&c->state_lock);
-
-       return size;
-}
 SYSFS_OPS(bch2_fs);
 
 struct attribute *bch2_fs_files[] = {
@@ -521,6 +557,10 @@ struct attribute *bch2_fs_internal_files[] = {
        &sysfs_journal_pins,
        &sysfs_btree_updates,
        &sysfs_dirty_btree_nodes,
+       &sysfs_btree_cache,
+       &sysfs_btree_key_cache,
+       &sysfs_btree_transactions,
+       &sysfs_stripes_heap,
 
        &sysfs_read_realloc_races,
        &sysfs_extent_migrate_done,
@@ -536,13 +576,14 @@ struct attribute *bch2_fs_internal_files[] = {
        &sysfs_rebalance_enabled,
        &sysfs_rebalance_work,
        sysfs_pd_controller_files(rebalance),
+       sysfs_pd_controller_files(copy_gc),
 
-       &sysfs_internal_uuid,
+       &sysfs_new_stripes,
 
-#define BCH_DEBUG_PARAM(name, description) &sysfs_##name,
-       BCH_DEBUG_PARAMS()
-#undef BCH_DEBUG_PARAM
+       &sysfs_io_timers_read,
+       &sysfs_io_timers_write,
 
+       &sysfs_internal_uuid,
        NULL
 };
 
@@ -580,14 +621,9 @@ STORE(bch2_fs_opts_dir)
        if (ret < 0)
                return ret;
 
-       if (id == Opt_compression ||
-           id == Opt_background_compression) {
-               int ret = bch2_check_set_has_compressed_data(c, v);
-               if (ret) {
-                       mutex_unlock(&c->sb_lock);
-                       return ret;
-               }
-       }
+       ret = bch2_opt_check_may_set(c, id, v);
+       if (ret < 0)
+               return ret;
 
        if (opt->set_sb != SET_NO_SB_OPT) {
                mutex_lock(&c->sb_lock);
@@ -618,7 +654,7 @@ int bch2_opts_create_sysfs_files(struct kobject *kobj)
        for (i = bch2_opt_table;
             i < bch2_opt_table + bch2_opts_nr;
             i++) {
-               if (i->mode == OPT_INTERNAL)
+               if (!(i->mode & (OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME)))
                        continue;
 
                ret = sysfs_create_file(kobj, &i->attr);
@@ -634,11 +670,13 @@ int bch2_opts_create_sysfs_files(struct kobject *kobj)
 SHOW(bch2_fs_time_stats)
 {
        struct bch_fs *c = container_of(kobj, struct bch_fs, time_stats);
+       struct printbuf out = _PBUF(buf, PAGE_SIZE);
 
-#define x(name)                                                \
-       if (attr == &sysfs_time_stat_##name)                            \
-               return bch2_time_stats_print(&c->times[BCH_TIME_##name],\
-                                            buf, PAGE_SIZE);
+#define x(name)                                                                \
+       if (attr == &sysfs_time_stat_##name) {                          \
+               bch2_time_stats_to_text(&out, &c->times[BCH_TIME_##name]);\
+               return out.pos - buf;                                   \
+       }
        BCH_TIME_STATS()
 #undef x
 
@@ -685,19 +723,19 @@ static unsigned bucket_oldest_gen_fn(struct bch_fs *c, struct bch_dev *ca,
 
 static int unsigned_cmp(const void *_l, const void *_r)
 {
-       unsigned l = *((unsigned *) _l);
-       unsigned r = *((unsigned *) _r);
+       const unsigned *l = _l;
+       const unsigned *r = _r;
 
-       return (l > r) - (l < r);
+       return cmp_int(*l, *r);
 }
 
-static ssize_t show_quantiles(struct bch_fs *c, struct bch_dev *ca,
-                             char *buf, bucket_map_fn *fn, void *private)
+static int quantiles_to_text(struct printbuf *out,
+                            struct bch_fs *c, struct bch_dev *ca,
+                            bucket_map_fn *fn, void *private)
 {
        size_t i, n;
        /* Compute 31 quantiles */
        unsigned q[31], *p;
-       ssize_t ret = 0;
 
        down_read(&ca->bucket_lock);
        n = ca->mi.nbuckets;
@@ -724,86 +762,77 @@ static ssize_t show_quantiles(struct bch_fs *c, struct bch_dev *ca,
        vfree(p);
 
        for (i = 0; i < ARRAY_SIZE(q); i++)
-               ret += scnprintf(buf + ret, PAGE_SIZE - ret,
-                                "%u ", q[i]);
-       buf[ret - 1] = '\n';
-
-       return ret;
+               pr_buf(out, "%u ", q[i]);
+       pr_buf(out, "\n");
+       return 0;
 }
 
-static ssize_t show_reserve_stats(struct bch_dev *ca, char *buf)
+static void reserve_stats_to_text(struct printbuf *out, struct bch_dev *ca)
 {
-       struct printbuf out = _PBUF(buf, PAGE_SIZE);
        enum alloc_reserve i;
 
-       spin_lock(&ca->freelist_lock);
+       spin_lock(&ca->fs->freelist_lock);
 
-       pr_buf(&out, "free_inc:\t%zu\t%zu\n",
+       pr_buf(out, "free_inc:\t%zu\t%zu\n",
               fifo_used(&ca->free_inc),
               ca->free_inc.size);
 
        for (i = 0; i < RESERVE_NR; i++)
-               pr_buf(&out, "free[%u]:\t%zu\t%zu\n", i,
+               pr_buf(out, "free[%u]:\t%zu\t%zu\n", i,
                       fifo_used(&ca->free[i]),
                       ca->free[i].size);
 
-       spin_unlock(&ca->freelist_lock);
-
-       return out.pos - buf;
+       spin_unlock(&ca->fs->freelist_lock);
 }
 
-static ssize_t show_dev_alloc_debug(struct bch_dev *ca, char *buf)
+static void dev_alloc_debug_to_text(struct printbuf *out, struct bch_dev *ca)
 {
        struct bch_fs *c = ca->fs;
-       struct bch_dev_usage stats = bch2_dev_usage_read(c, ca);
-
-       return scnprintf(buf, PAGE_SIZE,
-               "free_inc:               %zu/%zu\n"
-               "free[RESERVE_BTREE]:    %zu/%zu\n"
-               "free[RESERVE_MOVINGGC]: %zu/%zu\n"
-               "free[RESERVE_NONE]:     %zu/%zu\n"
-               "buckets:\n"
-               "    capacity:           %llu\n"
-               "    alloc:              %llu\n"
-               "    sb:                 %llu\n"
-               "    journal:            %llu\n"
-               "    meta:               %llu\n"
-               "    user:               %llu\n"
-               "    cached:             %llu\n"
-               "    available:          %lli\n"
-               "sectors:\n"
-               "    sb:                 %llu\n"
-               "    journal:            %llu\n"
-               "    meta:               %llu\n"
-               "    user:               %llu\n"
-               "    cached:             %llu\n"
-               "    fragmented:         %llu\n"
-               "    copygc threshold:   %llu\n"
-               "freelist_wait:          %s\n"
-               "open buckets:           %u/%u (reserved %u)\n"
-               "open_buckets_wait:      %s\n",
-               fifo_used(&ca->free_inc),               ca->free_inc.size,
-               fifo_used(&ca->free[RESERVE_BTREE]),    ca->free[RESERVE_BTREE].size,
-               fifo_used(&ca->free[RESERVE_MOVINGGC]), ca->free[RESERVE_MOVINGGC].size,
-               fifo_used(&ca->free[RESERVE_NONE]),     ca->free[RESERVE_NONE].size,
-               ca->mi.nbuckets - ca->mi.first_bucket,
-               stats.buckets_alloc,
-               stats.buckets[BCH_DATA_SB],
-               stats.buckets[BCH_DATA_JOURNAL],
-               stats.buckets[BCH_DATA_BTREE],
-               stats.buckets[BCH_DATA_USER],
-               stats.buckets[BCH_DATA_CACHED],
-               ca->mi.nbuckets - ca->mi.first_bucket - stats.buckets_unavailable,
-               stats.sectors[BCH_DATA_SB],
-               stats.sectors[BCH_DATA_JOURNAL],
-               stats.sectors[BCH_DATA_BTREE],
-               stats.sectors[BCH_DATA_USER],
-               stats.sectors[BCH_DATA_CACHED],
-               stats.sectors_fragmented,
-               ca->copygc_threshold,
-               c->freelist_wait.list.first             ? "waiting" : "empty",
-               c->open_buckets_nr_free, OPEN_BUCKETS_COUNT, BTREE_NODE_RESERVE,
-               c->open_buckets_wait.list.first         ? "waiting" : "empty");
+       struct bch_dev_usage stats = bch2_dev_usage_read(ca);
+       unsigned i, nr[BCH_DATA_NR];
+
+       memset(nr, 0, sizeof(nr));
+
+       for (i = 0; i < ARRAY_SIZE(c->open_buckets); i++)
+               nr[c->open_buckets[i].type]++;
+
+       pr_buf(out,
+              "\t\t buckets\t sectors      fragmented\n"
+              "capacity%16llu\n",
+              ca->mi.nbuckets - ca->mi.first_bucket);
+
+       for (i = 1; i < BCH_DATA_NR; i++)
+               pr_buf(out, "%-8s%16llu%16llu%16llu\n",
+                      bch2_data_types[i], stats.d[i].buckets,
+                      stats.d[i].sectors, stats.d[i].fragmented);
+
+       pr_buf(out,
+              "ec\t%16llu\n"
+              "available%15llu\n"
+              "alloc\t%16llu\n"
+              "\n"
+              "free_inc\t\t%zu/%zu\n"
+              "free[RESERVE_MOVINGGC]\t%zu/%zu\n"
+              "free[RESERVE_NONE]\t%zu/%zu\n"
+              "freelist_wait\t\t%s\n"
+              "open buckets\t\t%u/%u (reserved %u)\n"
+              "open_buckets_wait\t%s\n"
+              "open_buckets_btree\t%u\n"
+              "open_buckets_user\t%u\n"
+              "btree reserve cache\t%u\n",
+              stats.buckets_ec,
+              __dev_buckets_available(ca, stats),
+              stats.buckets_alloc,
+              fifo_used(&ca->free_inc),                ca->free_inc.size,
+              fifo_used(&ca->free[RESERVE_MOVINGGC]),  ca->free[RESERVE_MOVINGGC].size,
+              fifo_used(&ca->free[RESERVE_NONE]),      ca->free[RESERVE_NONE].size,
+              c->freelist_wait.list.first              ? "waiting" : "empty",
+              c->open_buckets_nr_free, OPEN_BUCKETS_COUNT,
+              BTREE_NODE_OPEN_BUCKET_RESERVE,
+              c->open_buckets_wait.list.first          ? "waiting" : "empty",
+              nr[BCH_DATA_btree],
+              nr[BCH_DATA_user],
+              c->btree_reserve_cache_nr);
 }
 
 static const char * const bch2_rw[] = {
@@ -812,26 +841,18 @@ static const char * const bch2_rw[] = {
        NULL
 };
 
-static ssize_t show_dev_iodone(struct bch_dev *ca, char *buf)
+static void dev_iodone_to_text(struct printbuf *out, struct bch_dev *ca)
 {
-       struct printbuf out = _PBUF(buf, PAGE_SIZE);
-       int rw, i, cpu;
+       int rw, i;
 
        for (rw = 0; rw < 2; rw++) {
-               pr_buf(&out, "%s:\n", bch2_rw[rw]);
-
-               for (i = 1; i < BCH_DATA_NR; i++) {
-                       u64 n = 0;
+               pr_buf(out, "%s:\n", bch2_rw[rw]);
 
-                       for_each_possible_cpu(cpu)
-                               n += per_cpu_ptr(ca->io_done, cpu)->sectors[rw][i];
-
-                       pr_buf(&out, "%-12s:%12llu\n",
-                              bch2_data_types[i], n << 9);
-               }
+               for (i = 1; i < BCH_DATA_NR; i++)
+                       pr_buf(out, "%-12s:%12llu\n",
+                              bch2_data_types[i],
+                              percpu_u64_get(&ca->io_done->sectors[rw][i]) << 9);
        }
-
-       return out.pos - buf;
 }
 
 SHOW(bch2_dev)
@@ -855,8 +876,6 @@ SHOW(bch2_dev)
                        bch2_disk_path_to_text(&out, &c->disk_sb,
                                               ca->mi.group - 1);
                        mutex_unlock(&c->sb_lock);
-               } else {
-                       pr_buf(&out, "none");
                }
 
                pr_buf(&out, "\n");
@@ -870,8 +889,6 @@ SHOW(bch2_dev)
                return out.pos - buf;
        }
 
-       sysfs_pd_controller_show(copy_gc, &ca->copygc_pd);
-
        if (attr == &sysfs_cache_replacement_policy) {
                bch2_string_opt_to_text(&out,
                                        bch2_cache_replacement_policies,
@@ -887,34 +904,44 @@ SHOW(bch2_dev)
                return out.pos - buf;
        }
 
-       if (attr == &sysfs_iodone)
-               return show_dev_iodone(ca, buf);
+       if (attr == &sysfs_iodone) {
+               dev_iodone_to_text(&out, ca);
+               return out.pos - buf;
+       }
 
        sysfs_print(io_latency_read,            atomic64_read(&ca->cur_latency[READ]));
        sysfs_print(io_latency_write,           atomic64_read(&ca->cur_latency[WRITE]));
 
-       if (attr == &sysfs_io_latency_stats_read)
-               return bch2_time_stats_print(&ca->io_latency[READ], buf, PAGE_SIZE);
-       if (attr == &sysfs_io_latency_stats_write)
-               return bch2_time_stats_print(&ca->io_latency[WRITE], buf, PAGE_SIZE);
+       if (attr == &sysfs_io_latency_stats_read) {
+               bch2_time_stats_to_text(&out, &ca->io_latency[READ]);
+               return out.pos - buf;
+       }
+       if (attr == &sysfs_io_latency_stats_write) {
+               bch2_time_stats_to_text(&out, &ca->io_latency[WRITE]);
+               return out.pos - buf;
+       }
 
        sysfs_printf(congested,                 "%u%%",
                     clamp(atomic_read(&ca->congested), 0, CONGESTED_MAX)
                     * 100 / CONGESTED_MAX);
 
        if (attr == &sysfs_bucket_quantiles_last_read)
-               return show_quantiles(c, ca, buf, bucket_last_io_fn, (void *) 0);
+               return quantiles_to_text(&out, c, ca, bucket_last_io_fn, (void *) 0) ?: out.pos - buf;
        if (attr == &sysfs_bucket_quantiles_last_write)
-               return show_quantiles(c, ca, buf, bucket_last_io_fn, (void *) 1);
+               return quantiles_to_text(&out, c, ca, bucket_last_io_fn, (void *) 1) ?: out.pos - buf;
        if (attr == &sysfs_bucket_quantiles_fragmentation)
-               return show_quantiles(c, ca, buf, bucket_sectors_used_fn, NULL);
+               return quantiles_to_text(&out, c, ca, bucket_sectors_used_fn, NULL)  ?: out.pos - buf;
        if (attr == &sysfs_bucket_quantiles_oldest_gen)
-               return show_quantiles(c, ca, buf, bucket_oldest_gen_fn, NULL);
+               return quantiles_to_text(&out, c, ca, bucket_oldest_gen_fn, NULL)    ?: out.pos - buf;
 
-       if (attr == &sysfs_reserve_stats)
-               return show_reserve_stats(ca, buf);
-       if (attr == &sysfs_alloc_debug)
-               return show_dev_alloc_debug(ca, buf);
+       if (attr == &sysfs_reserve_stats) {
+               reserve_stats_to_text(&out, ca);
+               return out.pos - buf;
+       }
+       if (attr == &sysfs_alloc_debug) {
+               dev_alloc_debug_to_text(&out, ca);
+               return out.pos - buf;
+       }
 
        return 0;
 }
@@ -925,8 +952,6 @@ STORE(bch2_dev)
        struct bch_fs *c = ca->fs;
        struct bch_member *mi;
 
-       sysfs_pd_controller_store(copy_gc, &ca->copygc_pd);
-
        if (attr == &sysfs_discard) {
                bool v = strtoul_or_return(buf);
 
@@ -1011,8 +1036,6 @@ struct attribute *bch2_dev_files[] = {
        /* debug: */
        &sysfs_alloc_debug,
        &sysfs_wake_allocator,
-
-       sysfs_pd_controller_files(copy_gc),
        NULL
 };