#include "bcachefs.h"
#include "alloc_background.h"
+#include "alloc_foreground.h"
#include "sysfs.h"
#include "btree_cache.h"
#include "btree_io.h"
return strtoi_h(buf, &var) ?: (ssize_t) size; \
} while (0)
-write_attribute(trigger_journal_flush);
write_attribute(trigger_gc);
write_attribute(prune_cache);
rw_attribute(btree_gc_periodic);
read_attribute(uuid);
read_attribute(minor);
read_attribute(bucket_size);
-read_attribute(block_size);
-read_attribute(btree_node_size);
read_attribute(first_bucket);
read_attribute(nbuckets);
read_attribute(durability);
read_attribute(btree_avg_write_size);
-read_attribute(bucket_quantiles_last_read);
-read_attribute(bucket_quantiles_last_write);
-read_attribute(bucket_quantiles_fragmentation);
-read_attribute(bucket_quantiles_oldest_gen);
-
read_attribute(reserve_stats);
read_attribute(btree_cache_size);
read_attribute(compression_stats);
read_attribute(extent_migrate_done);
read_attribute(extent_migrate_raced);
-rw_attribute(journal_write_delay_ms);
-rw_attribute(journal_reclaim_delay_ms);
-
rw_attribute(discard);
-rw_attribute(cache_replacement_policy);
rw_attribute(label);
rw_attribute(copy_gc_enabled);
read_attribute(io_timers_read);
read_attribute(io_timers_write);
+read_attribute(data_jobs);
+
#ifdef CONFIG_BCACHEFS_TESTS
write_attribute(perf_test);
#endif /* CONFIG_BCACHEFS_TESTS */
return nr ? div64_u64(sectors, nr) : 0;
}
-static int fs_alloc_debug_to_text(struct printbuf *out, struct bch_fs *c)
+static long data_progress_to_text(struct printbuf *out, struct bch_fs *c)
{
- struct bch_fs_usage_online *fs_usage = bch2_fs_usage_read(c);
+ long ret = 0;
+ struct bch_move_stats *stats;
- if (!fs_usage)
- return -ENOMEM;
-
- bch2_fs_usage_to_text(out, c, fs_usage);
-
- percpu_up_read(&c->mark_lock);
+ mutex_lock(&c->data_progress_lock);
+ list_for_each_entry(stats, &c->data_progress_list, list) {
+ pr_buf(out, "%s: data type %s btree_id %s position: ",
+ stats->name,
+ bch2_data_types[stats->data_type],
+ bch2_btree_ids[stats->btree_id]);
+ bch2_bpos_to_text(out, stats->pos);
+ pr_buf(out, "%s", "\n");
+ }
- kfree(fs_usage);
- return 0;
+ mutex_unlock(&c->data_progress_lock);
+ return ret;
}
static int bch2_compression_stats_to_text(struct printbuf *out, struct bch_fs *c)
{
struct btree_trans trans;
- struct btree_iter *iter;
+ struct btree_iter iter;
struct bkey_s_c k;
- u64 nr_uncompressed_extents = 0, uncompressed_sectors = 0,
+ enum btree_id id;
+ u64 nr_uncompressed_extents = 0,
nr_compressed_extents = 0,
+ nr_incompressible_extents = 0,
+ uncompressed_sectors = 0,
+ incompressible_sectors = 0,
compressed_sectors_compressed = 0,
compressed_sectors_uncompressed = 0;
int ret;
bch2_trans_init(&trans, c, 0, 0);
- for_each_btree_key(&trans, iter, BTREE_ID_extents, POS_MIN, 0, k, ret)
- if (k.k->type == KEY_TYPE_extent) {
- struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
+ for (id = 0; id < BTREE_ID_NR; id++) {
+ if (!((1U << id) & BTREE_ID_HAS_PTRS))
+ continue;
+
+ for_each_btree_key(&trans, iter, id, POS_MIN,
+ BTREE_ITER_ALL_SNAPSHOTS, k, ret) {
+ struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
const union bch_extent_entry *entry;
struct extent_ptr_decoded p;
-
- extent_for_each_ptr_decode(e, p, entry) {
- if (!crc_is_compressed(p.crc)) {
- nr_uncompressed_extents++;
- uncompressed_sectors += e.k->size;
- } else {
- nr_compressed_extents++;
+ bool compressed = false, uncompressed = false, incompressible = false;
+
+ bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
+ switch (p.crc.compression_type) {
+ case BCH_COMPRESSION_TYPE_none:
+ uncompressed = true;
+ uncompressed_sectors += k.k->size;
+ break;
+ case BCH_COMPRESSION_TYPE_incompressible:
+ incompressible = true;
+ incompressible_sectors += k.k->size;
+ break;
+ default:
compressed_sectors_compressed +=
p.crc.compressed_size;
compressed_sectors_uncompressed +=
p.crc.uncompressed_size;
+ compressed = true;
+ break;
}
-
- /* only looking at the first ptr */
- break;
}
+
+ if (incompressible)
+ nr_incompressible_extents++;
+ else if (uncompressed)
+ nr_uncompressed_extents++;
+ else if (compressed)
+ nr_compressed_extents++;
}
+ bch2_trans_iter_exit(&trans, &iter);
+ }
+
+ bch2_trans_exit(&trans);
- ret = bch2_trans_exit(&trans) ?: ret;
if (ret)
return ret;
- pr_buf(out,
- "uncompressed data:\n"
- " nr extents: %llu\n"
- " size (bytes): %llu\n"
- "compressed data:\n"
- " nr extents: %llu\n"
- " compressed size (bytes): %llu\n"
- " uncompressed size (bytes): %llu\n",
- nr_uncompressed_extents,
- uncompressed_sectors << 9,
- nr_compressed_extents,
- compressed_sectors_compressed << 9,
- compressed_sectors_uncompressed << 9);
+ pr_buf(out, "uncompressed:\n");
+ pr_buf(out, " nr extents: %llu\n", nr_uncompressed_extents);
+ pr_buf(out, " size: ");
+ bch2_hprint(out, uncompressed_sectors << 9);
+ pr_buf(out, "\n");
+
+ pr_buf(out, "compressed:\n");
+ pr_buf(out, " nr extents: %llu\n", nr_compressed_extents);
+ pr_buf(out, " compressed size: ");
+ bch2_hprint(out, compressed_sectors_compressed << 9);
+ pr_buf(out, "\n");
+ pr_buf(out, " uncompressed size: ");
+ bch2_hprint(out, compressed_sectors_uncompressed << 9);
+ pr_buf(out, "\n");
+
+ pr_buf(out, "incompressible:\n");
+ pr_buf(out, " nr extents: %llu\n", nr_incompressible_extents);
+ pr_buf(out, " size: ");
+ bch2_hprint(out, incompressible_sectors << 9);
+ pr_buf(out, "\n");
return 0;
}
sysfs_print(minor, c->minor);
sysfs_printf(internal_uuid, "%pU", c->sb.uuid.b);
- sysfs_print(journal_write_delay_ms, c->journal.write_delay_ms);
- sysfs_print(journal_reclaim_delay_ms, c->journal.reclaim_delay_ms);
-
- sysfs_print(block_size, block_bytes(c));
- sysfs_print(btree_node_size, btree_bytes(c));
sysfs_hprint(btree_cache_size, bch2_btree_cache_size(c));
sysfs_hprint(btree_avg_write_size, bch2_btree_avg_write_size(c));
/* Debugging: */
- if (attr == &sysfs_alloc_debug)
- return fs_alloc_debug_to_text(&out, c) ?: out.pos - buf;
-
if (attr == &sysfs_journal_debug) {
bch2_journal_debug_to_text(&out, &c->journal);
return out.pos - buf;
return out.pos - buf;
}
+ if (attr == &sysfs_data_jobs) {
+ data_progress_to_text(&out, c);
+ return out.pos - buf;
+ }
+
return 0;
}
{
struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
- sysfs_strtoul(journal_write_delay_ms, c->journal.write_delay_ms);
- sysfs_strtoul(journal_reclaim_delay_ms, c->journal.reclaim_delay_ms);
-
if (attr == &sysfs_btree_gc_periodic) {
ssize_t ret = strtoul_safe(buf, c->btree_gc_periodic)
?: (ssize_t) size;
/* Debugging: */
- if (attr == &sysfs_trigger_journal_flush)
- bch2_journal_meta(&c->journal);
+ if (!test_bit(BCH_FS_RW, &c->flags))
+ return -EROFS;
+
+ if (attr == &sysfs_prune_cache) {
+ struct shrink_control sc;
+
+ sc.gfp_mask = GFP_KERNEL;
+ sc.nr_to_scan = strtoul_or_return(buf);
+ c->btree_cache.shrink.scan_objects(&c->btree_cache.shrink, &sc);
+ }
if (attr == &sysfs_trigger_gc) {
/*
#endif
}
- if (attr == &sysfs_prune_cache) {
- struct shrink_control sc;
-
- sc.gfp_mask = GFP_KERNEL;
- sc.nr_to_scan = strtoul_or_return(buf);
- c->btree_cache.shrink.scan_objects(&c->btree_cache.shrink, &sc);
- }
-
#ifdef CONFIG_BCACHEFS_TESTS
if (attr == &sysfs_perf_test) {
char *tmp = kstrdup(buf, GFP_KERNEL), *p = tmp;
struct attribute *bch2_fs_files[] = {
&sysfs_minor,
- &sysfs_block_size,
- &sysfs_btree_node_size,
&sysfs_btree_cache_size,
&sysfs_btree_avg_write_size,
- &sysfs_journal_write_delay_ms,
- &sysfs_journal_reclaim_delay_ms,
-
&sysfs_promote_whole_extents,
&sysfs_compression_stats,
SYSFS_OPS(bch2_fs_internal);
struct attribute *bch2_fs_internal_files[] = {
- &sysfs_alloc_debug,
&sysfs_journal_debug,
&sysfs_journal_pins,
&sysfs_btree_updates,
&sysfs_btree_cache,
&sysfs_btree_key_cache,
&sysfs_btree_transactions,
+ &sysfs_new_stripes,
&sysfs_stripes_heap,
&sysfs_open_buckets,
+ &sysfs_io_timers_read,
+ &sysfs_io_timers_write,
+
+ &sysfs_trigger_gc,
+ &sysfs_prune_cache,
&sysfs_read_realloc_races,
&sysfs_extent_migrate_done,
&sysfs_extent_migrate_raced,
- &sysfs_trigger_journal_flush,
- &sysfs_trigger_gc,
&sysfs_gc_gens_pos,
- &sysfs_prune_cache,
&sysfs_copy_gc_enabled,
&sysfs_copy_gc_wait,
&sysfs_rebalance_work,
sysfs_pd_controller_files(rebalance),
- &sysfs_new_stripes,
-
- &sysfs_io_timers_read,
- &sysfs_io_timers_write,
+ &sysfs_data_jobs,
&sysfs_internal_uuid,
NULL
if (!tmp)
return -ENOMEM;
- ret = bch2_opt_parse(c, opt, strim(tmp), &v);
+ ret = bch2_opt_parse(c, NULL, opt, strim(tmp), &v);
kfree(tmp);
if (ret < 0)
if (ret < 0)
return ret;
- if (opt->set_sb != SET_NO_SB_OPT) {
- mutex_lock(&c->sb_lock);
- opt->set_sb(c->disk_sb.sb, v);
- bch2_write_super(c);
- mutex_unlock(&c->sb_lock);
- }
-
+ bch2_opt_set_sb(c, opt, v);
bch2_opt_set_by_id(&c->opts, id, v);
if ((id == Opt_background_target ||
for (i = bch2_opt_table;
i < bch2_opt_table + bch2_opts_nr;
i++) {
- if (!(i->mode & (OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME)))
+ if (!(i->flags & OPT_FS))
continue;
ret = sysfs_create_file(kobj, &i->attr);
NULL
};
-typedef unsigned (bucket_map_fn)(struct bch_fs *, struct bch_dev *,
- size_t, void *);
-
-static unsigned bucket_last_io_fn(struct bch_fs *c, struct bch_dev *ca,
- size_t b, void *private)
-{
- int rw = (private ? 1 : 0);
-
- return atomic64_read(&c->io_clock[rw].now) - bucket(ca, b)->io_time[rw];
-}
-
-static unsigned bucket_sectors_used_fn(struct bch_fs *c, struct bch_dev *ca,
- size_t b, void *private)
-{
- struct bucket *g = bucket(ca, b);
- return bucket_sectors_used(g->mark);
-}
-
-static unsigned bucket_oldest_gen_fn(struct bch_fs *c, struct bch_dev *ca,
- size_t b, void *private)
-{
- return bucket_gc_gen(bucket(ca, b));
-}
-
-static int unsigned_cmp(const void *_l, const void *_r)
-{
- const unsigned *l = _l;
- const unsigned *r = _r;
-
- return cmp_int(*l, *r);
-}
-
-static int quantiles_to_text(struct printbuf *out,
- struct bch_fs *c, struct bch_dev *ca,
- bucket_map_fn *fn, void *private)
-{
- size_t i, n;
- /* Compute 31 quantiles */
- unsigned q[31], *p;
-
- down_read(&ca->bucket_lock);
- n = ca->mi.nbuckets;
-
- p = vzalloc(n * sizeof(unsigned));
- if (!p) {
- up_read(&ca->bucket_lock);
- return -ENOMEM;
- }
-
- for (i = ca->mi.first_bucket; i < n; i++)
- p[i] = fn(c, ca, i, private);
-
- sort(p, n, sizeof(unsigned), unsigned_cmp, NULL);
- up_read(&ca->bucket_lock);
-
- while (n &&
- !p[n - 1])
- --n;
-
- for (i = 0; i < ARRAY_SIZE(q); i++)
- q[i] = p[n * (i + 1) / (ARRAY_SIZE(q) + 1)];
-
- vfree(p);
-
- for (i = 0; i < ARRAY_SIZE(q); i++)
- pr_buf(out, "%u ", q[i]);
- pr_buf(out, "\n");
- return 0;
-}
-
static void reserve_stats_to_text(struct printbuf *out, struct bch_dev *ca)
{
enum alloc_reserve i;
memset(nr, 0, sizeof(nr));
for (i = 0; i < ARRAY_SIZE(c->open_buckets); i++)
- nr[c->open_buckets[i].type]++;
+ nr[c->open_buckets[i].data_type]++;
pr_buf(out,
"\t\t buckets\t sectors fragmented\n"
sysfs_printf(uuid, "%pU\n", ca->uuid.b);
sysfs_print(bucket_size, bucket_bytes(ca));
- sysfs_print(block_size, block_bytes(c));
sysfs_print(first_bucket, ca->mi.first_bucket);
sysfs_print(nbuckets, ca->mi.nbuckets);
sysfs_print(durability, ca->mi.durability);
return out.pos - buf;
}
- if (attr == &sysfs_cache_replacement_policy) {
- bch2_string_opt_to_text(&out,
- bch2_cache_replacement_policies,
- ca->mi.replacement);
- pr_buf(&out, "\n");
- return out.pos - buf;
- }
-
if (attr == &sysfs_state_rw) {
bch2_string_opt_to_text(&out, bch2_member_states,
ca->mi.state);
clamp(atomic_read(&ca->congested), 0, CONGESTED_MAX)
* 100 / CONGESTED_MAX);
- if (attr == &sysfs_bucket_quantiles_last_read)
- return quantiles_to_text(&out, c, ca, bucket_last_io_fn, (void *) 0) ?: out.pos - buf;
- if (attr == &sysfs_bucket_quantiles_last_write)
- return quantiles_to_text(&out, c, ca, bucket_last_io_fn, (void *) 1) ?: out.pos - buf;
- if (attr == &sysfs_bucket_quantiles_fragmentation)
- return quantiles_to_text(&out, c, ca, bucket_sectors_used_fn, NULL) ?: out.pos - buf;
- if (attr == &sysfs_bucket_quantiles_oldest_gen)
- return quantiles_to_text(&out, c, ca, bucket_oldest_gen_fn, NULL) ?: out.pos - buf;
-
if (attr == &sysfs_reserve_stats) {
reserve_stats_to_text(&out, ca);
return out.pos - buf;
mutex_unlock(&c->sb_lock);
}
- if (attr == &sysfs_cache_replacement_policy) {
- ssize_t v = __sysfs_match_string(bch2_cache_replacement_policies, -1, buf);
-
- if (v < 0)
- return v;
-
- mutex_lock(&c->sb_lock);
- mi = &bch2_sb_get_members(c->disk_sb.sb)->members[ca->dev_idx];
-
- if ((unsigned) v != BCH_MEMBER_REPLACEMENT(mi)) {
- SET_BCH_MEMBER_REPLACEMENT(mi, v);
- bch2_write_super(c);
- }
- mutex_unlock(&c->sb_lock);
- }
-
if (attr == &sysfs_label) {
char *tmp;
int ret;
struct attribute *bch2_dev_files[] = {
&sysfs_uuid,
&sysfs_bucket_size,
- &sysfs_block_size,
&sysfs_first_bucket,
&sysfs_nbuckets,
&sysfs_durability,
/* settings: */
&sysfs_discard,
- &sysfs_cache_replacement_policy,
&sysfs_state_rw,
&sysfs_label,
&sysfs_io_latency_stats_write,
&sysfs_congested,
- /* alloc info - other stats: */
- &sysfs_bucket_quantiles_last_read,
- &sysfs_bucket_quantiles_last_write,
- &sysfs_bucket_quantiles_fragmentation,
- &sysfs_bucket_quantiles_oldest_gen,
-
&sysfs_reserve_stats,
/* debug: */