#include "btree_gc.h"
#include "buckets.h"
#include "error.h"
+#include "movinggc.h"
#include <linux/preempt.h>
#include <trace/events/bcachefs.h>
__bch2_fs_usage_read(c);
unsigned i;
- for (i = 0; i < BCH_REPLICAS_MAX; i++) {
+ for (i = 0; i < ARRAY_SIZE(stats.s); i++) {
if ((s64) stats.s[i].data[S_META] < 0)
panic("replicas %u meta underflow: %lli\n",
i + 1, stats.s[i].data[S_META]);
struct bch_dev_usage stats =
__bch2_dev_usage_read(ca);
u64 n = ca->mi.nbuckets - ca->mi.first_bucket;
+ unsigned i;
- BUG_ON(stats.buckets[S_META] > n);
- BUG_ON(stats.buckets[S_DIRTY] > n);
- BUG_ON(stats.buckets_cached > n);
+ for (i = 0; i < ARRAY_SIZE(stats.buckets); i++)
+ BUG_ON(stats.buckets[i] > n);
BUG_ON(stats.buckets_alloc > n);
BUG_ON(stats.buckets_unavailable > n);
}
{
u16 last_seq_ondisk = c->journal.last_seq_ondisk;
struct bch_dev *ca;
+ struct bucket_array *buckets;
struct bucket *g;
struct bucket_mark m;
unsigned i;
- for_each_member_device(ca, c, i)
- for_each_bucket(g, ca) {
+ for_each_member_device(ca, c, i) {
+ down_read(&ca->bucket_lock);
+ buckets = bucket_array(ca);
+
+ for_each_bucket(g, buckets) {
bucket_cmpxchg(g, m, ({
if (!m.journal_seq_valid ||
bucket_needs_journal_commit(m, last_seq_ondisk))
m.journal_seq_valid = 0;
}));
}
+ up_read(&ca->bucket_lock);
+ }
}
#define bch2_usage_add(_acc, _stats) \
#define bch2_usage_read_raw(_stats) \
({ \
- typeof(*this_cpu_ptr(_stats)) _acc = { 0 }; \
+ typeof(*this_cpu_ptr(_stats)) _acc; \
int cpu; \
\
+ memset(&_acc, 0, sizeof(_acc)); \
+ \
for_each_possible_cpu(cpu) \
bch2_usage_add(&_acc, per_cpu_ptr((_stats), cpu)); \
\
c->usage_percpu);
}
-static inline int is_meta_bucket(struct bucket_mark m)
+struct fs_usage_sum {
+ u64 data;
+ u64 reserved;
+};
+
+static inline struct fs_usage_sum __fs_usage_sum(struct bch_fs_usage stats)
+{
+ struct fs_usage_sum sum = { 0 };
+ unsigned i;
+
+ for (i = 0; i < ARRAY_SIZE(stats.s); i++) {
+ sum.data += (stats.s[i].data[S_META] +
+ stats.s[i].data[S_DIRTY]) * (i + 1);
+ sum.reserved += stats.s[i].persistent_reserved * (i + 1);
+ }
+
+ sum.reserved += stats.online_reserved;
+ return sum;
+}
+
+#define RESERVE_FACTOR 6
+
+static u64 reserve_factor(u64 r)
+{
+ return r + (round_up(r, (1 << RESERVE_FACTOR)) >> RESERVE_FACTOR);
+}
+
+static u64 avail_factor(u64 r)
+{
+ return (r << RESERVE_FACTOR) / (1 << RESERVE_FACTOR) + 1;
+}
+
+u64 __bch2_fs_sectors_used(struct bch_fs *c, struct bch_fs_usage stats)
{
- return m.data_type != BUCKET_DATA;
+ struct fs_usage_sum sum = __fs_usage_sum(stats);
+
+ return sum.data + reserve_factor(sum.reserved);
}
-static inline int is_dirty_bucket(struct bucket_mark m)
+u64 bch2_fs_sectors_used(struct bch_fs *c, struct bch_fs_usage stats)
{
- return m.data_type == BUCKET_DATA && !!m.dirty_sectors;
+ return min(c->capacity, __bch2_fs_sectors_used(c, stats));
}
-static inline int is_cached_bucket(struct bucket_mark m)
+u64 bch2_fs_sectors_free(struct bch_fs *c, struct bch_fs_usage stats)
{
- return m.data_type == BUCKET_DATA &&
- !m.dirty_sectors && !!m.cached_sectors;
+ return avail_factor(c->capacity - bch2_fs_sectors_used(c, stats));
}
static inline int is_unavailable_bucket(struct bucket_mark m)
return !is_available_bucket(m);
}
-static inline enum s_alloc bucket_type(struct bucket_mark m)
+static inline int is_fragmented_bucket(struct bucket_mark m,
+ struct bch_dev *ca)
+{
+ if (!m.owned_by_allocator &&
+ m.data_type == BCH_DATA_USER &&
+ bucket_sectors_used(m))
+ return max_t(int, 0, (int) ca->mi.bucket_size -
+ bucket_sectors_used(m));
+ return 0;
+}
+
+static inline enum bch_data_type bucket_type(struct bucket_mark m)
{
- return is_meta_bucket(m) ? S_META : S_DIRTY;
+ return m.cached_sectors && !m.dirty_sectors
+ ? BCH_DATA_CACHED
+ : m.data_type;
}
static bool bucket_became_unavailable(struct bch_fs *c,
{
return is_available_bucket(old) &&
!is_available_bucket(new) &&
- c && c->gc_pos.phase == GC_PHASE_DONE;
+ (!c || c->gc_pos.phase == GC_PHASE_DONE);
}
void bch2_fs_usage_apply(struct bch_fs *c,
stats->online_reserved -= added;
}
- lg_local_lock(&c->usage_lock);
+ percpu_down_read_preempt_disable(&c->usage_lock);
/* online_reserved not subject to gc: */
this_cpu_ptr(c->usage_percpu)->online_reserved +=
stats->online_reserved;
bch2_usage_add(this_cpu_ptr(c->usage_percpu), stats);
bch2_fs_stats_verify(c);
- lg_local_unlock(&c->usage_lock);
+ percpu_up_read_preempt_enable(&c->usage_lock);
memset(stats, 0, sizeof(*stats));
}
static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca,
- struct bucket *g, struct bucket_mark old,
- struct bucket_mark new)
+ struct bucket_mark old, struct bucket_mark new)
{
struct bch_dev_usage *dev_usage;
- BUG_ON((g - ca->buckets) < ca->mi.first_bucket ||
- (g - ca->buckets) >= ca->mi.nbuckets);
+ if (c)
+ percpu_rwsem_assert_held(&c->usage_lock);
- bch2_fs_inconsistent_on(old.data_type && new.data_type &&
- old.data_type != new.data_type, c,
- "different types of metadata in same bucket: %u, %u",
- old.data_type, new.data_type);
+ if (old.data_type && new.data_type &&
+ old.data_type != new.data_type) {
+ BUG_ON(!c);
+ bch2_fs_inconsistent(c,
+ "different types of data in same bucket: %s, %s",
+ bch2_data_types[old.data_type],
+ bch2_data_types[new.data_type]);
+ }
- preempt_disable();
dev_usage = this_cpu_ptr(ca->usage_percpu);
- dev_usage->buckets[S_META] +=
- is_meta_bucket(new) - is_meta_bucket(old);
- dev_usage->buckets[S_DIRTY] +=
- is_dirty_bucket(new) - is_dirty_bucket(old);
- dev_usage->buckets_cached +=
- is_cached_bucket(new) - is_cached_bucket(old);
+ dev_usage->buckets[bucket_type(old)]--;
+ dev_usage->buckets[bucket_type(new)]++;
+
dev_usage->buckets_alloc +=
(int) new.owned_by_allocator - (int) old.owned_by_allocator;
dev_usage->buckets_unavailable +=
is_unavailable_bucket(new) - is_unavailable_bucket(old);
- dev_usage->sectors[bucket_type(old)] -= old.dirty_sectors;
- dev_usage->sectors[bucket_type(new)] += new.dirty_sectors;
- dev_usage->sectors_cached +=
+ dev_usage->sectors[old.data_type] -= old.dirty_sectors;
+ dev_usage->sectors[new.data_type] += new.dirty_sectors;
+ dev_usage->sectors[BCH_DATA_CACHED] +=
(int) new.cached_sectors - (int) old.cached_sectors;
- preempt_enable();
+ dev_usage->sectors_fragmented +=
+ is_fragmented_bucket(new, ca) - is_fragmented_bucket(old, ca);
if (!is_available_bucket(old) && is_available_bucket(new))
bch2_wake_allocator(ca);
({ \
struct bucket_mark _old = bucket_cmpxchg(g, new, expr); \
\
- bch2_dev_usage_update(c, ca, g, _old, new); \
+ bch2_dev_usage_update(c, ca, _old, new); \
_old; \
})
bool bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca,
- struct bucket *g, struct bucket_mark *old)
+ size_t b, struct bucket_mark *old)
{
+ struct bucket *g;
struct bucket_mark new;
- lg_local_lock(&c->usage_lock);
+ percpu_rwsem_assert_held(&c->usage_lock);
+
+ g = bucket(ca, b);
+
*old = bucket_data_cmpxchg(c, ca, g, new, ({
- if (!is_available_bucket(new))
+ if (!is_available_bucket(new)) {
+ percpu_up_read_preempt_enable(&c->usage_lock);
return false;
+ }
new.owned_by_allocator = 1;
- new.touched_this_mount = 1;
new.data_type = 0;
new.cached_sectors = 0;
new.dirty_sectors = 0;
new.gen++;
}));
- lg_local_unlock(&c->usage_lock);
if (!old->owned_by_allocator && old->cached_sectors)
- trace_invalidate(ca, bucket_to_sector(ca, g - ca->buckets),
+ trace_invalidate(ca, bucket_to_sector(ca, b),
old->cached_sectors);
return true;
}
-bool bch2_mark_alloc_bucket_startup(struct bch_fs *c, struct bch_dev *ca,
- struct bucket *g)
-{
- struct bucket_mark new, old;
-
- lg_local_lock(&c->usage_lock);
- old = bucket_data_cmpxchg(c, ca, g, new, ({
- if (new.touched_this_mount ||
- !is_available_bucket(new))
- return false;
-
- new.owned_by_allocator = 1;
- new.touched_this_mount = 1;
- }));
- lg_local_unlock(&c->usage_lock);
-
- return true;
-}
-
void bch2_mark_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
- struct bucket *g, bool owned_by_allocator,
+ size_t b, bool owned_by_allocator,
struct gc_pos pos, unsigned flags)
{
+ struct bucket *g;
struct bucket_mark old, new;
- lg_local_lock(&c->usage_lock);
+ percpu_rwsem_assert_held(&c->usage_lock);
+ g = bucket(ca, b);
+
if (!(flags & BCH_BUCKET_MARK_GC_LOCK_HELD) &&
- gc_will_visit(c, pos)) {
- lg_local_unlock(&c->usage_lock);
+ gc_will_visit(c, pos))
return;
- }
old = bucket_data_cmpxchg(c, ca, g, new, ({
- new.touched_this_mount = 1;
new.owned_by_allocator = owned_by_allocator;
}));
- lg_local_unlock(&c->usage_lock);
BUG_ON(!owned_by_allocator && !old.owned_by_allocator &&
c->gc_pos.phase == GC_PHASE_DONE);
} while (0)
void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
- struct bucket *g, enum bucket_data_type type,
- struct gc_pos pos, unsigned flags)
+ size_t b, enum bch_data_type type,
+ unsigned sectors, struct gc_pos pos,
+ unsigned flags)
{
+ struct bucket *g;
struct bucket_mark old, new;
BUG_ON(!type);
- lg_local_lock(&c->usage_lock);
- if (!(flags & BCH_BUCKET_MARK_GC_LOCK_HELD) &&
- gc_will_visit(c, pos)) {
- lg_local_unlock(&c->usage_lock);
- return;
+ if (likely(c)) {
+ percpu_rwsem_assert_held(&c->usage_lock);
+
+ if (!(flags & BCH_BUCKET_MARK_GC_LOCK_HELD) &&
+ gc_will_visit(c, pos))
+ return;
}
+ rcu_read_lock();
+
+ g = bucket(ca, b);
old = bucket_data_cmpxchg(c, ca, g, new, ({
- saturated_add(ca, new.dirty_sectors, ca->mi.bucket_size,
+ saturated_add(ca, new.dirty_sectors, sectors,
GC_MAX_SECTORS_USED);
new.data_type = type;
- new.touched_this_mount = 1;
}));
- lg_local_unlock(&c->usage_lock);
- if (old.data_type != type &&
- (old.data_type ||
- old.cached_sectors ||
- old.dirty_sectors))
- bch_err(c, "bucket %zu has multiple types of data (%u, %u)",
- g - ca->buckets, old.data_type, new.data_type);
+ rcu_read_unlock();
BUG_ON(!(flags & BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE) &&
bucket_became_unavailable(c, old, new));
{
struct bucket_mark old, new;
unsigned saturated;
- struct bch_dev *ca = c->devs[ptr->dev];
- struct bucket *g = ca->buckets + PTR_BUCKET_NR(ca, ptr);
- unsigned data_type = type == S_META
- ? BUCKET_BTREE : BUCKET_DATA;
+ struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
+ struct bucket *g = PTR_BUCKET(ca, ptr);
+ enum bch_data_type data_type = type == S_META
+ ? BCH_DATA_BTREE : BCH_DATA_USER;
u64 v;
if (crc.compression_type) {
if (flags & BCH_BUCKET_MARK_GC_WILL_VISIT) {
if (journal_seq)
bucket_cmpxchg(g, new, ({
- new.touched_this_mount = 1;
new.journal_seq_valid = 1;
new.journal_seq = journal_seq;
}));
return;
}
- v = READ_ONCE(g->_mark.counter);
+ v = atomic64_read(&g->_mark.v);
do {
- new.counter = old.counter = v;
+ new.v.counter = old.v.counter = v;
saturated = 0;
/*
new.data_type = data_type;
}
- new.touched_this_mount = 1;
-
if (flags & BCH_BUCKET_MARK_NOATOMIC) {
g->_mark = new;
break;
}
- } while ((v = cmpxchg(&g->_mark.counter,
- old.counter,
- new.counter)) != old.counter);
-
- bch2_dev_usage_update(c, ca, g, old, new);
+ } while ((v = atomic64_cmpxchg(&g->_mark.v,
+ old.v.counter,
+ new.v.counter)) != old.v.counter);
- if (old.data_type != data_type &&
- (old.data_type ||
- old.cached_sectors ||
- old.dirty_sectors))
- bch_err(c, "bucket %zu has multiple types of data (%u, %u)",
- g - ca->buckets, old.data_type, new.data_type);
+ bch2_dev_usage_update(c, ca, old, new);
BUG_ON(!(flags & BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE) &&
bucket_became_unavailable(c, old, new));
* (e.g. the btree node lock, or the relevant allocator lock).
*/
- lg_local_lock(&c->usage_lock);
+ percpu_down_read_preempt_disable(&c->usage_lock);
if (!(flags & BCH_BUCKET_MARK_GC_LOCK_HELD) &&
gc_will_visit(c, pos))
flags |= BCH_BUCKET_MARK_GC_WILL_VISIT;
+ if (!stats)
+ stats = this_cpu_ptr(c->usage_percpu);
+
switch (k.k->type) {
case BCH_EXTENT:
case BCH_EXTENT_CACHED: {
replicas += !ptr->cached;
}
- BUG_ON(replicas >= BCH_REPLICAS_MAX);
-
- if (replicas)
+ if (replicas) {
+ BUG_ON(replicas - 1 > ARRAY_SIZE(stats->s));
stats->s[replicas - 1].data[type] += sectors;
+ }
break;
}
case BCH_RESERVATION: {
struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
- if (r.v->nr_replicas)
+ if (r.v->nr_replicas) {
+ BUG_ON(r.v->nr_replicas - 1 > ARRAY_SIZE(stats->s));
stats->s[r.v->nr_replicas - 1].persistent_reserved += sectors;
+ }
break;
}
}
- lg_local_unlock(&c->usage_lock);
+ percpu_up_read_preempt_enable(&c->usage_lock);
}
/* Disk reservations: */
static u64 __recalc_sectors_available(struct bch_fs *c)
{
- u64 avail;
int cpu;
for_each_possible_cpu(cpu)
per_cpu_ptr(c->usage_percpu, cpu)->available_cache = 0;
- avail = c->capacity - bch2_fs_sectors_used(c);
-
- avail <<= RESERVE_FACTOR;
- avail /= (1 << RESERVE_FACTOR) + 1;
- return avail;
+ return bch2_fs_sectors_free(c, bch2_fs_usage_read(c));
}
/* Used by gc when it's starting: */
void bch2_recalc_sectors_available(struct bch_fs *c)
{
- lg_global_lock(&c->usage_lock);
+ percpu_down_write(&c->usage_lock);
atomic64_set(&c->sectors_available, __recalc_sectors_available(c));
- lg_global_unlock(&c->usage_lock);
+ percpu_up_write(&c->usage_lock);
}
void __bch2_disk_reservation_put(struct bch_fs *c, struct disk_reservation *res)
{
- lg_local_lock(&c->usage_lock);
+ percpu_down_read_preempt_disable(&c->usage_lock);
this_cpu_sub(c->usage_percpu->online_reserved,
res->sectors);
bch2_fs_stats_verify(c);
- lg_local_unlock(&c->usage_lock);
+ percpu_up_read_preempt_enable(&c->usage_lock);
res->sectors = 0;
}
s64 sectors_available;
int ret;
- sectors *= res->nr_replicas;
-
- lg_local_lock(&c->usage_lock);
+ percpu_down_read_preempt_disable(&c->usage_lock);
stats = this_cpu_ptr(c->usage_percpu);
if (sectors <= stats->available_cache)
get = min((u64) sectors + SECTORS_CACHE, old);
if (get < sectors) {
- lg_local_unlock(&c->usage_lock);
+ percpu_up_read_preempt_enable(&c->usage_lock);
goto recalculate;
}
} while ((v = atomic64_cmpxchg(&c->sectors_available,
bch2_disk_reservations_verify(c, flags);
bch2_fs_stats_verify(c);
- lg_local_unlock(&c->usage_lock);
+ percpu_up_read_preempt_enable(&c->usage_lock);
return 0;
recalculate:
else if (!down_read_trylock(&c->gc_lock))
return -EINTR;
}
- lg_global_lock(&c->usage_lock);
+ percpu_down_write(&c->usage_lock);
sectors_available = __recalc_sectors_available(c);
if (sectors <= sectors_available ||
}
bch2_fs_stats_verify(c);
- lg_global_unlock(&c->usage_lock);
+ percpu_up_write(&c->usage_lock);
+
if (!(flags & BCH_DISK_RESERVATION_GC_LOCK_HELD))
up_read(&c->gc_lock);
return ret;
}
-int bch2_disk_reservation_get(struct bch_fs *c,
- struct disk_reservation *res,
- unsigned sectors, int flags)
+/* Startup/shutdown: */
+
+static void buckets_free_rcu(struct rcu_head *rcu)
{
- res->sectors = 0;
- res->gen = c->capacity_gen;
- res->nr_replicas = (flags & BCH_DISK_RESERVATION_METADATA)
- ? c->opts.metadata_replicas
- : c->opts.data_replicas;
+ struct bucket_array *buckets =
+ container_of(rcu, struct bucket_array, rcu);
+
+ kvpfree(buckets,
+ sizeof(struct bucket_array) +
+ buckets->nbuckets * sizeof(struct bucket));
+}
+
+int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
+{
+ struct bucket_array *buckets = NULL, *old_buckets = NULL;
+ unsigned long *buckets_dirty = NULL;
+ u8 *oldest_gens = NULL;
+ alloc_fifo free[RESERVE_NR];
+ alloc_fifo free_inc;
+ alloc_heap alloc_heap;
+ copygc_heap copygc_heap;
+
+ size_t btree_reserve = DIV_ROUND_UP(BTREE_NODE_RESERVE,
+ ca->mi.bucket_size / c->opts.btree_node_size);
+ /* XXX: these should be tunable */
+ size_t reserve_none = max_t(size_t, 4, ca->mi.nbuckets >> 9);
+ size_t copygc_reserve = max_t(size_t, 16, ca->mi.nbuckets >> 7);
+ size_t free_inc_reserve = copygc_reserve / 2;
+ bool resize = ca->buckets != NULL,
+ start_copygc = ca->copygc_thread != NULL;
+ int ret = -ENOMEM;
+ unsigned i;
+
+ memset(&free, 0, sizeof(free));
+ memset(&free_inc, 0, sizeof(free_inc));
+ memset(&alloc_heap, 0, sizeof(alloc_heap));
+ memset(©gc_heap, 0, sizeof(copygc_heap));
+
+ if (!(buckets = kvpmalloc(sizeof(struct bucket_array) +
+ nbuckets * sizeof(struct bucket),
+ GFP_KERNEL|__GFP_ZERO)) ||
+ !(oldest_gens = kvpmalloc(nbuckets * sizeof(u8),
+ GFP_KERNEL|__GFP_ZERO)) ||
+ !(buckets_dirty = kvpmalloc(BITS_TO_LONGS(nbuckets) *
+ sizeof(unsigned long),
+ GFP_KERNEL|__GFP_ZERO)) ||
+ !init_fifo(&free[RESERVE_BTREE], btree_reserve, GFP_KERNEL) ||
+ !init_fifo(&free[RESERVE_MOVINGGC],
+ copygc_reserve, GFP_KERNEL) ||
+ !init_fifo(&free[RESERVE_NONE], reserve_none, GFP_KERNEL) ||
+ !init_fifo(&free_inc, free_inc_reserve, GFP_KERNEL) ||
+ !init_heap(&alloc_heap, free_inc_reserve, GFP_KERNEL) ||
+ !init_heap(©gc_heap, copygc_reserve, GFP_KERNEL))
+ goto err;
+
+ buckets->first_bucket = ca->mi.first_bucket;
+ buckets->nbuckets = nbuckets;
+
+ bch2_copygc_stop(ca);
+
+ if (resize) {
+ down_write(&c->gc_lock);
+ down_write(&ca->bucket_lock);
+ percpu_down_write(&c->usage_lock);
+ }
+
+ old_buckets = bucket_array(ca);
+
+ if (resize) {
+ size_t n = min(buckets->nbuckets, old_buckets->nbuckets);
+
+ memcpy(buckets->b,
+ old_buckets->b,
+ n * sizeof(struct bucket));
+ memcpy(oldest_gens,
+ ca->oldest_gens,
+ n * sizeof(u8));
+ memcpy(buckets_dirty,
+ ca->buckets_dirty,
+ BITS_TO_LONGS(n) * sizeof(unsigned long));
+ }
+
+ rcu_assign_pointer(ca->buckets, buckets);
+ buckets = old_buckets;
+
+ swap(ca->oldest_gens, oldest_gens);
+ swap(ca->buckets_dirty, buckets_dirty);
+
+ if (resize)
+ percpu_up_write(&c->usage_lock);
+
+ spin_lock(&c->freelist_lock);
+ for (i = 0; i < RESERVE_NR; i++) {
+ fifo_move(&free[i], &ca->free[i]);
+ swap(ca->free[i], free[i]);
+ }
+ fifo_move(&free_inc, &ca->free_inc);
+ swap(ca->free_inc, free_inc);
+ spin_unlock(&c->freelist_lock);
+
+ /* with gc lock held, alloc_heap can't be in use: */
+ swap(ca->alloc_heap, alloc_heap);
+
+ /* and we shut down copygc: */
+ swap(ca->copygc_heap, copygc_heap);
+
+ nbuckets = ca->mi.nbuckets;
+
+ if (resize) {
+ up_write(&ca->bucket_lock);
+ up_write(&c->gc_lock);
+ }
+
+ if (start_copygc &&
+ bch2_copygc_start(c, ca))
+ bch_err(ca, "error restarting copygc thread");
+
+ ret = 0;
+err:
+ free_heap(©gc_heap);
+ free_heap(&alloc_heap);
+ free_fifo(&free_inc);
+ for (i = 0; i < RESERVE_NR; i++)
+ free_fifo(&free[i]);
+ kvpfree(buckets_dirty,
+ BITS_TO_LONGS(nbuckets) * sizeof(unsigned long));
+ kvpfree(oldest_gens,
+ nbuckets * sizeof(u8));
+ if (buckets)
+ call_rcu(&old_buckets->rcu, buckets_free_rcu);
+
+ return ret;
+}
+
+void bch2_dev_buckets_free(struct bch_dev *ca)
+{
+ unsigned i;
+
+ free_heap(&ca->copygc_heap);
+ free_heap(&ca->alloc_heap);
+ free_fifo(&ca->free_inc);
+ for (i = 0; i < RESERVE_NR; i++)
+ free_fifo(&ca->free[i]);
+ kvpfree(ca->buckets_dirty,
+ BITS_TO_LONGS(ca->mi.nbuckets) * sizeof(unsigned long));
+ kvpfree(ca->oldest_gens, ca->mi.nbuckets * sizeof(u8));
+ kvpfree(rcu_dereference_protected(ca->buckets, 1),
+ sizeof(struct bucket_array) +
+ ca->mi.nbuckets * sizeof(struct bucket));
+
+ free_percpu(ca->usage_percpu);
+}
+
+int bch2_dev_buckets_alloc(struct bch_fs *c, struct bch_dev *ca)
+{
+ if (!(ca->usage_percpu = alloc_percpu(struct bch_dev_usage)))
+ return -ENOMEM;
- return bch2_disk_reservation_add(c, res, sectors, flags);
+ return bch2_dev_buckets_resize(c, ca, ca->mi.nbuckets);;
}