bch2_io_clock_exit(&c->io_clock[READ]);
bch2_fs_compress_exit(c);
percpu_free_rwsem(&c->mark_lock);
+ free_percpu(c->usage_scratch);
free_percpu(c->usage[0]);
free_percpu(c->pcpu);
mempool_exit(&c->btree_iters_pool);
{
struct bch_sb_field_members *mi;
struct bch_fs *c;
- unsigned i, iter_size;
+ unsigned i, iter_size, fs_usage_size;
const char *err;
pr_verbose_init(opts, "");
(btree_blocks(c) + 1) * 2 *
sizeof(struct btree_node_iter_set);
+ fs_usage_size = sizeof(struct bch_fs_usage) +
+ sizeof(u64) * c->replicas.nr;
+
if (!(c->wq = alloc_workqueue("bcachefs",
WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_HIGHPRI, 1)) ||
!(c->copygc_wq = alloc_workqueue("bcache_copygc",
max(offsetof(struct btree_read_bio, bio),
offsetof(struct btree_write_bio, wbio.bio)),
BIOSET_NEED_BVECS) ||
- !(c->usage[0] = alloc_percpu(struct bch_fs_usage)) ||
+ !(c->usage[0] = __alloc_percpu(fs_usage_size, sizeof(u64))) ||
+ !(c->usage_scratch = __alloc_percpu(fs_usage_size, sizeof(u64))) ||
!(c->pcpu = alloc_percpu(struct bch_fs_pcpu)) ||
mempool_init_kvpmalloc_pool(&c->btree_bounce_pool, 1,
btree_bytes(c)) ||