#include "btree_cache.h"
#include "btree_io.h"
#include "btree_iter.h"
+#include "btree_key_cache.h"
#include "btree_update.h"
#include "btree_update_interior.h"
#include "btree_gc.h"
#include "buckets.h"
+#include "clock.h"
#include "disk_groups.h"
#include "ec.h"
#include "inode.h"
#define sysfs_hprint(file, val) \
do { \
if (attr == &sysfs_ ## file) { \
- struct printbuf out = _PBUF(buf, PAGE_SIZE); \
bch2_hprint(&out, val); \
pr_buf(&out, "\n"); \
return out.pos - buf; \
write_attribute(trigger_journal_flush);
write_attribute(trigger_btree_coalesce);
write_attribute(trigger_gc);
-write_attribute(trigger_alloc_write);
write_attribute(prune_cache);
rw_attribute(btree_gc_periodic);
read_attribute(journal_pins);
read_attribute(btree_updates);
read_attribute(dirty_btree_nodes);
+read_attribute(btree_cache);
+read_attribute(btree_key_cache);
+read_attribute(btree_transactions);
+read_attribute(stripes_heap);
read_attribute(internal_uuid);
read_attribute(meta_replicas_have);
read_attribute(data_replicas_have);
+read_attribute(io_timers_read);
+read_attribute(io_timers_write);
+
#ifdef CONFIG_BCACHEFS_TESTS
write_attribute(perf_test);
#endif /* CONFIG_BCACHEFS_TESTS */
-#define BCH_DEBUG_PARAM(name, description) \
- rw_attribute(name);
-
- BCH_DEBUG_PARAMS()
-#undef BCH_DEBUG_PARAM
-
#define x(_name) \
static struct attribute sysfs_time_stat_##_name = \
{ .name = #_name, .mode = S_IRUGO };
return ret;
}
-static ssize_t show_fs_alloc_debug(struct bch_fs *c, char *buf)
+static int fs_alloc_debug_to_text(struct printbuf *out, struct bch_fs *c)
{
- struct printbuf out = _PBUF(buf, PAGE_SIZE);
struct bch_fs_usage *fs_usage = bch2_fs_usage_read(c);
if (!fs_usage)
return -ENOMEM;
- bch2_fs_usage_to_text(&out, c, fs_usage);
+ bch2_fs_usage_to_text(out, c, fs_usage);
percpu_up_read(&c->mark_lock);
kfree(fs_usage);
-
- return out.pos - buf;
+ return 0;
}
-static ssize_t bch2_compression_stats(struct bch_fs *c, char *buf)
+static int bch2_compression_stats_to_text(struct printbuf *out, struct bch_fs *c)
{
struct btree_trans trans;
struct btree_iter *iter;
struct extent_ptr_decoded p;
extent_for_each_ptr_decode(e, p, entry) {
- if (p.crc.compression_type == BCH_COMPRESSION_NONE) {
+ if (!crc_is_compressed(p.crc)) {
nr_uncompressed_extents++;
uncompressed_sectors += e.k->size;
} else {
if (ret)
return ret;
- return scnprintf(buf, PAGE_SIZE,
- "uncompressed data:\n"
- " nr extents: %llu\n"
- " size (bytes): %llu\n"
- "compressed data:\n"
- " nr extents: %llu\n"
- " compressed size (bytes): %llu\n"
- " uncompressed size (bytes): %llu\n",
- nr_uncompressed_extents,
- uncompressed_sectors << 9,
- nr_compressed_extents,
- compressed_sectors_compressed << 9,
- compressed_sectors_uncompressed << 9);
-}
-
-static ssize_t bch2_new_stripes(struct bch_fs *c, char *buf)
-{
- char *out = buf, *end = buf + PAGE_SIZE;
- struct ec_stripe_head *h;
- struct ec_stripe_new *s;
-
- mutex_lock(&c->ec_new_stripe_lock);
- list_for_each_entry(h, &c->ec_new_stripe_list, list) {
- out += scnprintf(out, end - out,
- "target %u algo %u redundancy %u:\n",
- h->target, h->algo, h->redundancy);
-
- if (h->s)
- out += scnprintf(out, end - out,
- "\tpending: blocks %u allocated %u\n",
- h->s->blocks.nr,
- bitmap_weight(h->s->blocks_allocated,
- h->s->blocks.nr));
-
- mutex_lock(&h->lock);
- list_for_each_entry(s, &h->stripes, list)
- out += scnprintf(out, end - out,
- "\tin flight: blocks %u allocated %u pin %u\n",
- s->blocks.nr,
- bitmap_weight(s->blocks_allocated,
- s->blocks.nr),
- atomic_read(&s->pin));
- mutex_unlock(&h->lock);
-
- }
- mutex_unlock(&c->ec_new_stripe_lock);
-
- return out - buf;
+ pr_buf(out,
+ "uncompressed data:\n"
+ " nr extents: %llu\n"
+ " size (bytes): %llu\n"
+ "compressed data:\n"
+ " nr extents: %llu\n"
+ " compressed size (bytes): %llu\n"
+ " uncompressed size (bytes): %llu\n",
+ nr_uncompressed_extents,
+ uncompressed_sectors << 9,
+ nr_compressed_extents,
+ compressed_sectors_compressed << 9,
+ compressed_sectors_uncompressed << 9);
+ return 0;
}
SHOW(bch2_fs)
{
struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
+ struct printbuf out = _PBUF(buf, PAGE_SIZE);
sysfs_print(minor, c->minor);
sysfs_printf(internal_uuid, "%pU", c->sb.uuid.b);
sysfs_printf(rebalance_enabled, "%i", c->rebalance.enabled);
sysfs_pd_controller_show(rebalance, &c->rebalance.pd); /* XXX */
+ sysfs_pd_controller_show(copy_gc, &c->copygc_pd);
- if (attr == &sysfs_rebalance_work)
- return bch2_rebalance_work_show(c, buf);
+ if (attr == &sysfs_rebalance_work) {
+ bch2_rebalance_work_to_text(&out, c);
+ return out.pos - buf;
+ }
sysfs_print(promote_whole_extents, c->promote_whole_extents);
/* Debugging: */
if (attr == &sysfs_alloc_debug)
- return show_fs_alloc_debug(c, buf);
+ return fs_alloc_debug_to_text(&out, c) ?: out.pos - buf;
+
+ if (attr == &sysfs_journal_debug) {
+ bch2_journal_debug_to_text(&out, &c->journal);
+ return out.pos - buf;
+ }
+
+ if (attr == &sysfs_journal_pins) {
+ bch2_journal_pins_to_text(&out, &c->journal);
+ return out.pos - buf;
+ }
+
+ if (attr == &sysfs_btree_updates) {
+ bch2_btree_updates_to_text(&out, c);
+ return out.pos - buf;
+ }
+
+ if (attr == &sysfs_dirty_btree_nodes) {
+ bch2_dirty_btree_nodes_to_text(&out, c);
+ return out.pos - buf;
+ }
- if (attr == &sysfs_journal_debug)
- return bch2_journal_print_debug(&c->journal, buf);
+ if (attr == &sysfs_btree_cache) {
+ bch2_btree_cache_to_text(&out, c);
+ return out.pos - buf;
+ }
- if (attr == &sysfs_journal_pins)
- return bch2_journal_print_pins(&c->journal, buf);
+ if (attr == &sysfs_btree_key_cache) {
+ bch2_btree_key_cache_to_text(&out, &c->btree_key_cache);
+ return out.pos - buf;
+ }
- if (attr == &sysfs_btree_updates)
- return bch2_btree_updates_print(c, buf);
+ if (attr == &sysfs_btree_transactions) {
+ bch2_btree_trans_to_text(&out, c);
+ return out.pos - buf;
+ }
- if (attr == &sysfs_dirty_btree_nodes)
- return bch2_dirty_btree_nodes_print(c, buf);
+ if (attr == &sysfs_stripes_heap) {
+ bch2_stripes_heap_to_text(&out, c);
+ return out.pos - buf;
+ }
- if (attr == &sysfs_compression_stats)
- return bch2_compression_stats(c, buf);
+ if (attr == &sysfs_compression_stats) {
+ bch2_compression_stats_to_text(&out, c);
+ return out.pos - buf;
+ }
- if (attr == &sysfs_new_stripes)
- return bch2_new_stripes(c, buf);
+ if (attr == &sysfs_new_stripes) {
+ bch2_new_stripes_to_text(&out, c);
+ return out.pos - buf;
+ }
-#define BCH_DEBUG_PARAM(name, description) sysfs_print(name, c->name);
- BCH_DEBUG_PARAMS()
-#undef BCH_DEBUG_PARAM
+ if (attr == &sysfs_io_timers_read) {
+ bch2_io_timers_to_text(&out, &c->io_clock[READ]);
+ return out.pos - buf;
+ }
+ if (attr == &sysfs_io_timers_write) {
+ bch2_io_timers_to_text(&out, &c->io_clock[WRITE]);
+ return out.pos - buf;
+ }
return 0;
}
-STORE(__bch2_fs)
+STORE(bch2_fs)
{
struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
}
if (attr == &sysfs_copy_gc_enabled) {
- struct bch_dev *ca;
- unsigned i;
ssize_t ret = strtoul_safe(buf, c->copy_gc_enabled)
?: (ssize_t) size;
- for_each_member_device(ca, c, i)
- if (ca->copygc_thread)
- wake_up_process(ca->copygc_thread);
+ if (c->copygc_thread)
+ wake_up_process(c->copygc_thread);
return ret;
}
sysfs_strtoul(pd_controllers_update_seconds,
c->pd_controllers_update_seconds);
sysfs_pd_controller_store(rebalance, &c->rebalance.pd);
+ sysfs_pd_controller_store(copy_gc, &c->copygc_pd);
sysfs_strtoul(promote_whole_extents, c->promote_whole_extents);
/* Debugging: */
-#define BCH_DEBUG_PARAM(name, description) sysfs_strtoul(name, c->name);
- BCH_DEBUG_PARAMS()
-#undef BCH_DEBUG_PARAM
-
if (!test_bit(BCH_FS_STARTED, &c->flags))
return -EPERM;
/* Debugging: */
if (attr == &sysfs_trigger_journal_flush)
- bch2_journal_meta_async(&c->journal, NULL);
+ bch2_journal_meta(&c->journal);
if (attr == &sysfs_trigger_btree_coalesce)
bch2_coalesce(c);
- if (attr == &sysfs_trigger_gc)
+ if (attr == &sysfs_trigger_gc) {
+ /*
+ * Full gc is currently incompatible with btree key cache:
+ */
+#if 0
+ down_read(&c->state_lock);
bch2_gc(c, NULL, false, false);
-
- if (attr == &sysfs_trigger_alloc_write) {
- bool wrote;
-
- bch2_alloc_write(c, 0, &wrote);
+ up_read(&c->state_lock);
+#else
+ bch2_gc_gens(c);
+#endif
}
if (attr == &sysfs_prune_cache) {
sc.nr_to_scan = strtoul_or_return(buf);
c->btree_cache.shrink.scan_objects(&c->btree_cache.shrink, &sc);
}
+
#ifdef CONFIG_BCACHEFS_TESTS
if (attr == &sysfs_perf_test) {
char *tmp = kstrdup(buf, GFP_KERNEL), *p = tmp;
if (threads_str &&
!(ret = kstrtouint(threads_str, 10, &threads)) &&
!(ret = bch2_strtoull_h(nr_str, &nr)))
- bch2_btree_perf_test(c, test, nr, threads);
- else
- size = ret;
+ ret = bch2_btree_perf_test(c, test, nr, threads);
kfree(tmp);
+
+ if (ret)
+ size = ret;
}
#endif
return size;
}
-
-STORE(bch2_fs)
-{
- struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
-
- mutex_lock(&c->state_lock);
- size = __bch2_fs_store(kobj, attr, buf, size);
- mutex_unlock(&c->state_lock);
-
- return size;
-}
SYSFS_OPS(bch2_fs);
struct attribute *bch2_fs_files[] = {
&sysfs_journal_pins,
&sysfs_btree_updates,
&sysfs_dirty_btree_nodes,
+ &sysfs_btree_cache,
+ &sysfs_btree_key_cache,
+ &sysfs_btree_transactions,
+ &sysfs_stripes_heap,
&sysfs_read_realloc_races,
&sysfs_extent_migrate_done,
&sysfs_trigger_journal_flush,
&sysfs_trigger_btree_coalesce,
&sysfs_trigger_gc,
- &sysfs_trigger_alloc_write,
&sysfs_prune_cache,
&sysfs_copy_gc_enabled,
&sysfs_rebalance_enabled,
&sysfs_rebalance_work,
sysfs_pd_controller_files(rebalance),
+ sysfs_pd_controller_files(copy_gc),
&sysfs_new_stripes,
- &sysfs_internal_uuid,
-
-#define BCH_DEBUG_PARAM(name, description) &sysfs_##name,
- BCH_DEBUG_PARAMS()
-#undef BCH_DEBUG_PARAM
+ &sysfs_io_timers_read,
+ &sysfs_io_timers_write,
+ &sysfs_internal_uuid,
NULL
};
SHOW(bch2_fs_time_stats)
{
struct bch_fs *c = container_of(kobj, struct bch_fs, time_stats);
+ struct printbuf out = _PBUF(buf, PAGE_SIZE);
-#define x(name) \
- if (attr == &sysfs_time_stat_##name) \
- return bch2_time_stats_print(&c->times[BCH_TIME_##name],\
- buf, PAGE_SIZE);
+#define x(name) \
+ if (attr == &sysfs_time_stat_##name) { \
+ bch2_time_stats_to_text(&out, &c->times[BCH_TIME_##name]);\
+ return out.pos - buf; \
+ }
BCH_TIME_STATS()
#undef x
return cmp_int(*l, *r);
}
-static ssize_t show_quantiles(struct bch_fs *c, struct bch_dev *ca,
- char *buf, bucket_map_fn *fn, void *private)
+static int quantiles_to_text(struct printbuf *out,
+ struct bch_fs *c, struct bch_dev *ca,
+ bucket_map_fn *fn, void *private)
{
size_t i, n;
/* Compute 31 quantiles */
unsigned q[31], *p;
- ssize_t ret = 0;
down_read(&ca->bucket_lock);
n = ca->mi.nbuckets;
vfree(p);
for (i = 0; i < ARRAY_SIZE(q); i++)
- ret += scnprintf(buf + ret, PAGE_SIZE - ret,
- "%u ", q[i]);
- buf[ret - 1] = '\n';
-
- return ret;
+ pr_buf(out, "%u ", q[i]);
+ pr_buf(out, "\n");
+ return 0;
}
-static ssize_t show_reserve_stats(struct bch_dev *ca, char *buf)
+static void reserve_stats_to_text(struct printbuf *out, struct bch_dev *ca)
{
- struct printbuf out = _PBUF(buf, PAGE_SIZE);
enum alloc_reserve i;
- spin_lock(&ca->freelist_lock);
+ spin_lock(&ca->fs->freelist_lock);
- pr_buf(&out, "free_inc:\t%zu\t%zu\n",
+ pr_buf(out, "free_inc:\t%zu\t%zu\n",
fifo_used(&ca->free_inc),
ca->free_inc.size);
for (i = 0; i < RESERVE_NR; i++)
- pr_buf(&out, "free[%u]:\t%zu\t%zu\n", i,
+ pr_buf(out, "free[%u]:\t%zu\t%zu\n", i,
fifo_used(&ca->free[i]),
ca->free[i].size);
- spin_unlock(&ca->freelist_lock);
-
- return out.pos - buf;
+ spin_unlock(&ca->fs->freelist_lock);
}
-static ssize_t show_dev_alloc_debug(struct bch_dev *ca, char *buf)
+static void dev_alloc_debug_to_text(struct printbuf *out, struct bch_dev *ca)
{
struct bch_fs *c = ca->fs;
- struct bch_dev_usage stats = bch2_dev_usage_read(c, ca);
+ struct bch_dev_usage stats = bch2_dev_usage_read(ca);
unsigned i, nr[BCH_DATA_NR];
memset(nr, 0, sizeof(nr));
for (i = 0; i < ARRAY_SIZE(c->open_buckets); i++)
nr[c->open_buckets[i].type]++;
- return scnprintf(buf, PAGE_SIZE,
- "free_inc: %zu/%zu\n"
- "free[RESERVE_BTREE]: %zu/%zu\n"
- "free[RESERVE_MOVINGGC]: %zu/%zu\n"
- "free[RESERVE_NONE]: %zu/%zu\n"
- "buckets:\n"
- " capacity: %llu\n"
- " alloc: %llu\n"
- " sb: %llu\n"
- " journal: %llu\n"
- " meta: %llu\n"
- " user: %llu\n"
- " cached: %llu\n"
- " erasure coded: %llu\n"
- " available: %lli\n"
- "sectors:\n"
- " sb: %llu\n"
- " journal: %llu\n"
- " meta: %llu\n"
- " user: %llu\n"
- " cached: %llu\n"
- " fragmented: %llu\n"
- " copygc threshold: %llu\n"
- "freelist_wait: %s\n"
- "open buckets: %u/%u (reserved %u)\n"
- "open_buckets_wait: %s\n"
- "open_buckets_btree: %u\n"
- "open_buckets_user: %u\n"
- "btree reserve cache: %u\n",
- fifo_used(&ca->free_inc), ca->free_inc.size,
- fifo_used(&ca->free[RESERVE_BTREE]), ca->free[RESERVE_BTREE].size,
- fifo_used(&ca->free[RESERVE_MOVINGGC]), ca->free[RESERVE_MOVINGGC].size,
- fifo_used(&ca->free[RESERVE_NONE]), ca->free[RESERVE_NONE].size,
- ca->mi.nbuckets - ca->mi.first_bucket,
- stats.buckets_alloc,
- stats.buckets[BCH_DATA_SB],
- stats.buckets[BCH_DATA_JOURNAL],
- stats.buckets[BCH_DATA_BTREE],
- stats.buckets[BCH_DATA_USER],
- stats.buckets[BCH_DATA_CACHED],
- stats.buckets_ec,
- ca->mi.nbuckets - ca->mi.first_bucket - stats.buckets_unavailable,
- stats.sectors[BCH_DATA_SB],
- stats.sectors[BCH_DATA_JOURNAL],
- stats.sectors[BCH_DATA_BTREE],
- stats.sectors[BCH_DATA_USER],
- stats.sectors[BCH_DATA_CACHED],
- stats.sectors_fragmented,
- ca->copygc_threshold,
- c->freelist_wait.list.first ? "waiting" : "empty",
- c->open_buckets_nr_free, OPEN_BUCKETS_COUNT,
- BTREE_NODE_OPEN_BUCKET_RESERVE,
- c->open_buckets_wait.list.first ? "waiting" : "empty",
- nr[BCH_DATA_BTREE],
- nr[BCH_DATA_USER],
- c->btree_reserve_cache_nr);
+ pr_buf(out,
+ "\t\t buckets\t sectors fragmented\n"
+ "capacity%16llu\n",
+ ca->mi.nbuckets - ca->mi.first_bucket);
+
+ for (i = 1; i < BCH_DATA_NR; i++)
+ pr_buf(out, "%-8s%16llu%16llu%16llu\n",
+ bch2_data_types[i], stats.d[i].buckets,
+ stats.d[i].sectors, stats.d[i].fragmented);
+
+ pr_buf(out,
+ "ec\t%16llu\n"
+ "available%15llu\n"
+ "alloc\t%16llu\n"
+ "\n"
+ "free_inc\t\t%zu/%zu\n"
+ "free[RESERVE_MOVINGGC]\t%zu/%zu\n"
+ "free[RESERVE_NONE]\t%zu/%zu\n"
+ "freelist_wait\t\t%s\n"
+ "open buckets\t\t%u/%u (reserved %u)\n"
+ "open_buckets_wait\t%s\n"
+ "open_buckets_btree\t%u\n"
+ "open_buckets_user\t%u\n"
+ "btree reserve cache\t%u\n",
+ stats.buckets_ec,
+ __dev_buckets_available(ca, stats),
+ stats.buckets_alloc,
+ fifo_used(&ca->free_inc), ca->free_inc.size,
+ fifo_used(&ca->free[RESERVE_MOVINGGC]), ca->free[RESERVE_MOVINGGC].size,
+ fifo_used(&ca->free[RESERVE_NONE]), ca->free[RESERVE_NONE].size,
+ c->freelist_wait.list.first ? "waiting" : "empty",
+ c->open_buckets_nr_free, OPEN_BUCKETS_COUNT,
+ BTREE_NODE_OPEN_BUCKET_RESERVE,
+ c->open_buckets_wait.list.first ? "waiting" : "empty",
+ nr[BCH_DATA_btree],
+ nr[BCH_DATA_user],
+ c->btree_reserve_cache_nr);
}
static const char * const bch2_rw[] = {
NULL
};
-static ssize_t show_dev_iodone(struct bch_dev *ca, char *buf)
+static void dev_iodone_to_text(struct printbuf *out, struct bch_dev *ca)
{
- struct printbuf out = _PBUF(buf, PAGE_SIZE);
int rw, i;
for (rw = 0; rw < 2; rw++) {
- pr_buf(&out, "%s:\n", bch2_rw[rw]);
+ pr_buf(out, "%s:\n", bch2_rw[rw]);
for (i = 1; i < BCH_DATA_NR; i++)
- pr_buf(&out, "%-12s:%12llu\n",
+ pr_buf(out, "%-12s:%12llu\n",
bch2_data_types[i],
percpu_u64_get(&ca->io_done->sectors[rw][i]) << 9);
}
-
- return out.pos - buf;
}
SHOW(bch2_dev)
bch2_disk_path_to_text(&out, &c->disk_sb,
ca->mi.group - 1);
mutex_unlock(&c->sb_lock);
- } else {
- pr_buf(&out, "none");
}
pr_buf(&out, "\n");
return out.pos - buf;
}
- sysfs_pd_controller_show(copy_gc, &ca->copygc_pd);
-
if (attr == &sysfs_cache_replacement_policy) {
bch2_string_opt_to_text(&out,
bch2_cache_replacement_policies,
return out.pos - buf;
}
- if (attr == &sysfs_iodone)
- return show_dev_iodone(ca, buf);
+ if (attr == &sysfs_iodone) {
+ dev_iodone_to_text(&out, ca);
+ return out.pos - buf;
+ }
sysfs_print(io_latency_read, atomic64_read(&ca->cur_latency[READ]));
sysfs_print(io_latency_write, atomic64_read(&ca->cur_latency[WRITE]));
- if (attr == &sysfs_io_latency_stats_read)
- return bch2_time_stats_print(&ca->io_latency[READ], buf, PAGE_SIZE);
- if (attr == &sysfs_io_latency_stats_write)
- return bch2_time_stats_print(&ca->io_latency[WRITE], buf, PAGE_SIZE);
+ if (attr == &sysfs_io_latency_stats_read) {
+ bch2_time_stats_to_text(&out, &ca->io_latency[READ]);
+ return out.pos - buf;
+ }
+ if (attr == &sysfs_io_latency_stats_write) {
+ bch2_time_stats_to_text(&out, &ca->io_latency[WRITE]);
+ return out.pos - buf;
+ }
sysfs_printf(congested, "%u%%",
clamp(atomic_read(&ca->congested), 0, CONGESTED_MAX)
* 100 / CONGESTED_MAX);
if (attr == &sysfs_bucket_quantiles_last_read)
- return show_quantiles(c, ca, buf, bucket_last_io_fn, (void *) 0);
+ return quantiles_to_text(&out, c, ca, bucket_last_io_fn, (void *) 0) ?: out.pos - buf;
if (attr == &sysfs_bucket_quantiles_last_write)
- return show_quantiles(c, ca, buf, bucket_last_io_fn, (void *) 1);
+ return quantiles_to_text(&out, c, ca, bucket_last_io_fn, (void *) 1) ?: out.pos - buf;
if (attr == &sysfs_bucket_quantiles_fragmentation)
- return show_quantiles(c, ca, buf, bucket_sectors_used_fn, NULL);
+ return quantiles_to_text(&out, c, ca, bucket_sectors_used_fn, NULL) ?: out.pos - buf;
if (attr == &sysfs_bucket_quantiles_oldest_gen)
- return show_quantiles(c, ca, buf, bucket_oldest_gen_fn, NULL);
+ return quantiles_to_text(&out, c, ca, bucket_oldest_gen_fn, NULL) ?: out.pos - buf;
- if (attr == &sysfs_reserve_stats)
- return show_reserve_stats(ca, buf);
- if (attr == &sysfs_alloc_debug)
- return show_dev_alloc_debug(ca, buf);
+ if (attr == &sysfs_reserve_stats) {
+ reserve_stats_to_text(&out, ca);
+ return out.pos - buf;
+ }
+ if (attr == &sysfs_alloc_debug) {
+ dev_alloc_debug_to_text(&out, ca);
+ return out.pos - buf;
+ }
return 0;
}
struct bch_fs *c = ca->fs;
struct bch_member *mi;
- sysfs_pd_controller_store(copy_gc, &ca->copygc_pd);
-
if (attr == &sysfs_discard) {
bool v = strtoul_or_return(buf);
/* debug: */
&sysfs_alloc_debug,
&sysfs_wake_allocator,
-
- sysfs_pd_controller_files(copy_gc),
NULL
};