#define sysfs_hprint(file, val) \
do { \
if (attr == &sysfs_ ## file) { \
- ssize_t ret = bch2_hprint(buf, val); \
- strcat(buf, "\n"); \
- return ret + 1; \
+ struct printbuf out = _PBUF(buf, PAGE_SIZE); \
+ bch2_hprint(&out, val); \
+ pr_buf(&out, "\n"); \
+ return out.pos - buf; \
} \
} while (0)
write_attribute(trigger_journal_flush);
write_attribute(trigger_btree_coalesce);
write_attribute(trigger_gc);
+write_attribute(trigger_alloc_write);
write_attribute(prune_cache);
rw_attribute(btree_gc_periodic);
static ssize_t show_fs_alloc_debug(struct bch_fs *c, char *buf)
{
struct printbuf out = _PBUF(buf, PAGE_SIZE);
- struct bch_fs_usage stats = bch2_fs_usage_read(c);
- unsigned replicas, type;
-
- pr_buf(&out, "capacity:\t\t%llu\n", c->capacity);
-
- for (replicas = 0; replicas < ARRAY_SIZE(stats.replicas); replicas++) {
- pr_buf(&out, "%u replicas:\n", replicas + 1);
-
- for (type = BCH_DATA_SB; type < BCH_DATA_NR; type++)
- pr_buf(&out, "\t%s:\t\t%llu\n",
- bch2_data_types[type],
- stats.replicas[replicas].data[type]);
- pr_buf(&out, "\terasure coded:\t%llu\n",
- stats.replicas[replicas].ec_data);
- pr_buf(&out, "\treserved:\t%llu\n",
- stats.replicas[replicas].persistent_reserved);
+ struct bch_fs_usage *fs_usage = bch2_fs_usage_read(c);
+ unsigned i;
+
+ if (!fs_usage)
+ return -ENOMEM;
+
+ pr_buf(&out, "capacity:\t\t\t%llu\n", c->capacity);
+
+ pr_buf(&out, "hidden:\t\t\t\t%llu\n",
+ fs_usage->hidden);
+ pr_buf(&out, "data:\t\t\t\t%llu\n",
+ fs_usage->data);
+ pr_buf(&out, "cached:\t\t\t\t%llu\n",
+ fs_usage->cached);
+ pr_buf(&out, "reserved:\t\t\t%llu\n",
+ fs_usage->reserved);
+ pr_buf(&out, "nr_inodes:\t\t\t%llu\n",
+ fs_usage->nr_inodes);
+ pr_buf(&out, "online reserved:\t\t%llu\n",
+ fs_usage->online_reserved);
+
+ for (i = 0;
+ i < ARRAY_SIZE(fs_usage->persistent_reserved);
+ i++) {
+ pr_buf(&out, "%u replicas:\n", i + 1);
+ pr_buf(&out, "\treserved:\t\t%llu\n",
+ fs_usage->persistent_reserved[i]);
}
- pr_buf(&out, "bucket usage\n");
+ for (i = 0; i < c->replicas.nr; i++) {
+ struct bch_replicas_entry *e =
+ cpu_replicas_entry(&c->replicas, i);
- for (type = BCH_DATA_SB; type < BCH_DATA_NR; type++)
- pr_buf(&out, "\t%s:\t\t%llu\n",
- bch2_data_types[type],
- stats.buckets[type]);
+ pr_buf(&out, "\t");
+ bch2_replicas_entry_to_text(&out, e);
+ pr_buf(&out, ":\t%llu\n", fs_usage->replicas[i]);
+ }
+
+ percpu_up_read_preempt_enable(&c->mark_lock);
- pr_buf(&out, "online reserved:\t%llu\n",
- stats.s.online_reserved);
+ kfree(fs_usage);
return out.pos - buf;
}
compressed_sectors_compressed = 0,
compressed_sectors_uncompressed = 0;
- if (!bch2_fs_running(c))
+ if (!test_bit(BCH_FS_STARTED, &c->flags))
return -EPERM;
for_each_btree_key(&iter, c, BTREE_ID_EXTENTS, POS_MIN, 0, k)
BCH_DEBUG_PARAMS()
#undef BCH_DEBUG_PARAM
- if (!bch2_fs_running(c))
+ if (!test_bit(BCH_FS_STARTED, &c->flags))
return -EPERM;
/* Debugging: */
if (attr == &sysfs_trigger_gc)
bch2_gc(c, NULL, false);
+ if (attr == &sysfs_trigger_alloc_write) {
+ bool wrote;
+
+ bch2_alloc_write(c, false, &wrote);
+ }
+
if (attr == &sysfs_prune_cache) {
struct shrink_control sc;
&sysfs_trigger_journal_flush,
&sysfs_trigger_btree_coalesce,
&sysfs_trigger_gc,
+ &sysfs_trigger_alloc_write,
&sysfs_prune_cache,
&sysfs_copy_gc_enabled,
for (i = bch2_opt_table;
i < bch2_opt_table + bch2_opts_nr;
i++) {
- if (i->mode == OPT_INTERNAL)
+ if (!(i->mode & (OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME)))
continue;
ret = sysfs_create_file(kobj, &i->attr);
{
struct bch_fs *c = ca->fs;
struct bch_dev_usage stats = bch2_dev_usage_read(c, ca);
+ unsigned i, nr[BCH_DATA_NR];
+
+ memset(nr, 0, sizeof(nr));
+
+ for (i = 0; i < ARRAY_SIZE(c->open_buckets); i++)
+ nr[c->open_buckets[i].type]++;
return scnprintf(buf, PAGE_SIZE,
"free_inc: %zu/%zu\n"
" copygc threshold: %llu\n"
"freelist_wait: %s\n"
"open buckets: %u/%u (reserved %u)\n"
- "open_buckets_wait: %s\n",
+ "open_buckets_wait: %s\n"
+ "open_buckets_btree: %u\n"
+ "open_buckets_user: %u\n"
+ "btree reserve cache: %u\n",
fifo_used(&ca->free_inc), ca->free_inc.size,
fifo_used(&ca->free[RESERVE_BTREE]), ca->free[RESERVE_BTREE].size,
fifo_used(&ca->free[RESERVE_MOVINGGC]), ca->free[RESERVE_MOVINGGC].size,
stats.sectors_fragmented,
ca->copygc_threshold,
c->freelist_wait.list.first ? "waiting" : "empty",
- c->open_buckets_nr_free, OPEN_BUCKETS_COUNT, BTREE_NODE_RESERVE,
- c->open_buckets_wait.list.first ? "waiting" : "empty");
+ c->open_buckets_nr_free, OPEN_BUCKETS_COUNT,
+ BTREE_NODE_OPEN_BUCKET_RESERVE,
+ c->open_buckets_wait.list.first ? "waiting" : "empty",
+ nr[BCH_DATA_BTREE],
+ nr[BCH_DATA_USER],
+ c->btree_reserve_cache_nr);
}
static const char * const bch2_rw[] = {
static ssize_t show_dev_iodone(struct bch_dev *ca, char *buf)
{
struct printbuf out = _PBUF(buf, PAGE_SIZE);
- int rw, i, cpu;
+ int rw, i;
for (rw = 0; rw < 2; rw++) {
pr_buf(&out, "%s:\n", bch2_rw[rw]);
- for (i = 1; i < BCH_DATA_NR; i++) {
- u64 n = 0;
-
- for_each_possible_cpu(cpu)
- n += per_cpu_ptr(ca->io_done, cpu)->sectors[rw][i];
-
+ for (i = 1; i < BCH_DATA_NR; i++)
pr_buf(&out, "%-12s:%12llu\n",
- bch2_data_types[i], n << 9);
- }
+ bch2_data_types[i],
+ percpu_u64_get(&ca->io_done->sectors[rw][i]) << 9);
}
return out.pos - buf;