X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libbcachefs%2Falloc_foreground.c;h=c7db89b92dbffede6ccada0f07ecad57f9580576;hb=cfa816bf3f823a3bedfedd8e214ea929c5c755fe;hp=3a67ac0d913512f89e2ff61d2934af27155076cd;hpb=0206d42daf4c4bd3bbcfa15a2bef34319524db49;p=bcachefs-tools-debian diff --git a/libbcachefs/alloc_foreground.c b/libbcachefs/alloc_foreground.c index 3a67ac0..c7db89b 100644 --- a/libbcachefs/alloc_foreground.c +++ b/libbcachefs/alloc_foreground.c @@ -29,15 +29,24 @@ #include "journal.h" #include "movinggc.h" #include "nocow_locking.h" +#include "trace.h" #include #include #include -#include -const char * const bch2_alloc_reserves[] = { +static void bch2_trans_mutex_lock_norelock(struct btree_trans *trans, + struct mutex *lock) +{ + if (!mutex_trylock(lock)) { + bch2_trans_unlock(trans); + mutex_lock(lock); + } +} + +const char * const bch2_watermarks[] = { #define x(t) #t, - BCH_ALLOC_RESERVES() + BCH_WATERMARKS() #undef x NULL }; @@ -97,7 +106,7 @@ void __bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob) struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev); if (ob->ec) { - ec_stripe_new_put(c, ob->ec); + ec_stripe_new_put(c, ob->ec, STRIPE_REF_io); return; } @@ -150,9 +159,7 @@ static struct open_bucket *bch2_open_bucket_alloc(struct bch_fs *c) return ob; } -static void open_bucket_free_unused(struct bch_fs *c, - struct write_point *wp, - struct open_bucket *ob) +static void open_bucket_free_unused(struct bch_fs *c, struct open_bucket *ob) { BUG_ON(c->open_buckets_partial_nr >= ARRAY_SIZE(c->open_buckets_partial)); @@ -181,13 +188,13 @@ long bch2_bucket_alloc_new_fs(struct bch_dev *ca) return -1; } -static inline unsigned open_buckets_reserved(enum alloc_reserve reserve) +static inline unsigned open_buckets_reserved(enum bch_watermark watermark) { - switch (reserve) { - case RESERVE_btree: - case RESERVE_btree_movinggc: + switch (watermark) { + case BCH_WATERMARK_btree: + case BCH_WATERMARK_btree_copygc: return 0; - case RESERVE_movinggc: + case BCH_WATERMARK_copygc: return OPEN_BUCKETS_COUNT / 4; default: return OPEN_BUCKETS_COUNT / 2; @@ -196,7 +203,7 @@ static inline unsigned open_buckets_reserved(enum alloc_reserve reserve) static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev *ca, u64 bucket, - enum alloc_reserve reserve, + enum bch_watermark watermark, const struct bch_alloc_v4 *a, struct bucket_alloc_state *s, struct closure *cl) @@ -226,7 +233,7 @@ static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev * spin_lock(&c->freelist_lock); - if (unlikely(c->open_buckets_nr_free <= open_buckets_reserved(reserve))) { + if (unlikely(c->open_buckets_nr_free <= open_buckets_reserved(watermark))) { if (cl) closure_wait(&c->open_buckets_wait, cl); @@ -277,7 +284,7 @@ static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev * } static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bch_dev *ca, - enum alloc_reserve reserve, u64 free_entry, + enum bch_watermark watermark, u64 free_entry, struct bucket_alloc_state *s, struct bkey_s_c freespace_k, struct closure *cl) @@ -303,8 +310,9 @@ static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bc goto err; } - bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, POS(ca->dev_idx, b), BTREE_ITER_CACHED); - k = bch2_btree_iter_peek_slot(&iter); + k = bch2_bkey_get_iter(trans, &iter, + BTREE_ID_alloc, POS(ca->dev_idx, b), + BTREE_ITER_CACHED); ret = bkey_err(k); if (ret) { ob = ERR_PTR(ret); @@ -345,17 +353,17 @@ static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bc if (!test_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags)) { struct bch_backpointer bp; - u64 bp_offset = 0; + struct bpos bp_pos = POS_MIN; ret = bch2_get_next_backpointer(trans, POS(ca->dev_idx, b), -1, - &bp_offset, &bp, + &bp_pos, &bp, BTREE_ITER_NOPRESERVE); if (ret) { ob = ERR_PTR(ret); goto err; } - if (bp_offset != U64_MAX) { + if (!bkey_eq(bp_pos, POS_MAX)) { /* * Bucket may have data in it - we don't call * bc2h_trans_inconnsistent() because fsck hasn't @@ -366,11 +374,12 @@ static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bc } } - ob = __try_alloc_bucket(c, ca, b, reserve, a, s, cl); + ob = __try_alloc_bucket(c, ca, b, watermark, a, s, cl); if (!ob) iter.path->preserve = false; err: - set_btree_iter_dontneed(&iter); + if (iter.trans && iter.path) + set_btree_iter_dontneed(&iter); bch2_trans_iter_exit(trans, &iter); printbuf_exit(&buf); return ob; @@ -385,7 +394,7 @@ err: static noinline struct open_bucket * bch2_bucket_alloc_early(struct btree_trans *trans, struct bch_dev *ca, - enum alloc_reserve reserve, + enum bch_watermark watermark, struct bucket_alloc_state *s, struct closure *cl) { @@ -415,7 +424,7 @@ again: s->buckets_seen++; - ob = __try_alloc_bucket(trans->c, ca, k.k->p.offset, reserve, a, s, cl); + ob = __try_alloc_bucket(trans->c, ca, k.k->p.offset, watermark, a, s, cl); if (ob) break; } @@ -436,7 +445,7 @@ again: static struct open_bucket *bch2_bucket_alloc_freelist(struct btree_trans *trans, struct bch_dev *ca, - enum alloc_reserve reserve, + enum bch_watermark watermark, struct bucket_alloc_state *s, struct closure *cl) { @@ -465,7 +474,7 @@ again: s->buckets_seen++; - ob = try_alloc_bucket(trans, ca, reserve, + ob = try_alloc_bucket(trans, ca, watermark, alloc_cursor, s, k, cl); if (ob) { iter.path->preserve = false; @@ -498,7 +507,7 @@ again: */ static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans, struct bch_dev *ca, - enum alloc_reserve reserve, + enum bch_watermark watermark, struct closure *cl, struct bch_dev_usage *usage) { @@ -510,7 +519,7 @@ static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans, bool waiting = false; again: bch2_dev_usage_read_fast(ca, usage); - avail = dev_buckets_free(ca, *usage, reserve); + avail = dev_buckets_free(ca, *usage, watermark); if (usage->d[BCH_DATA_need_discard].buckets > avail) bch2_do_discards(c); @@ -539,8 +548,8 @@ again: closure_wake_up(&c->freelist_wait); alloc: ob = likely(freespace) - ? bch2_bucket_alloc_freelist(trans, ca, reserve, &s, cl) - : bch2_bucket_alloc_early(trans, ca, reserve, &s, cl); + ? bch2_bucket_alloc_freelist(trans, ca, watermark, &s, cl) + : bch2_bucket_alloc_early(trans, ca, watermark, &s, cl); if (s.skipped_need_journal_commit * 2 > avail) bch2_journal_flush_async(&c->journal, NULL); @@ -555,7 +564,7 @@ err: if (!IS_ERR(ob)) trace_and_count(c, bucket_alloc, ca, - bch2_alloc_reserves[reserve], + bch2_watermarks[watermark], ob->bucket, usage->d[BCH_DATA_free].buckets, avail, @@ -566,7 +575,7 @@ err: ""); else if (!bch2_err_matches(PTR_ERR(ob), BCH_ERR_transaction_restart)) trace_and_count(c, bucket_alloc_fail, ca, - bch2_alloc_reserves[reserve], + bch2_watermarks[watermark], 0, usage->d[BCH_DATA_free].buckets, avail, @@ -580,14 +589,14 @@ err: } struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca, - enum alloc_reserve reserve, + enum bch_watermark watermark, struct closure *cl) { struct bch_dev_usage usage; struct open_bucket *ob; bch2_trans_do(c, NULL, NULL, 0, - PTR_ERR_OR_ZERO(ob = bch2_bucket_alloc_trans(&trans, ca, reserve, + PTR_ERR_OR_ZERO(ob = bch2_bucket_alloc_trans(&trans, ca, watermark, cl, &usage))); return ob; } @@ -620,7 +629,7 @@ static inline void bch2_dev_stripe_increment_inlined(struct bch_dev *ca, struct bch_dev_usage *usage) { u64 *v = stripe->next_alloc + ca->dev_idx; - u64 free_space = dev_buckets_available(ca, RESERVE_none); + u64 free_space = dev_buckets_available(ca, BCH_WATERMARK_normal); u64 free_space_inv = free_space ? div64_u64(1ULL << 48, free_space) : 1ULL << 48; @@ -658,9 +667,11 @@ static int add_new_bucket(struct bch_fs *c, bch_dev_bkey_exists(c, ob->dev)->mi.durability; BUG_ON(*nr_effective >= nr_replicas); + BUG_ON(flags & BCH_WRITE_ONLY_SPECIFIED_DEVS); __clear_bit(ob->dev, devs_may_alloc->d); - *nr_effective += durability; + *nr_effective += (flags & BCH_WRITE_ONLY_SPECIFIED_DEVS) + ? durability : 1; *have_cache |= !durability; ob_push(c, ptrs, ob); @@ -679,8 +690,9 @@ int bch2_bucket_alloc_set_trans(struct btree_trans *trans, unsigned nr_replicas, unsigned *nr_effective, bool *have_cache, + unsigned flags, enum bch_data_type data_type, - enum alloc_reserve reserve, + enum bch_watermark watermark, struct closure *cl) { struct bch_fs *c = trans->c; @@ -713,7 +725,7 @@ int bch2_bucket_alloc_set_trans(struct btree_trans *trans, continue; } - ob = bch2_bucket_alloc_trans(trans, ca, reserve, cl, &usage); + ob = bch2_bucket_alloc_trans(trans, ca, watermark, cl, &usage); if (!IS_ERR(ob)) bch2_dev_stripe_increment_inlined(ca, stripe, &usage); percpu_ref_put(&ca->ref); @@ -729,7 +741,7 @@ int bch2_bucket_alloc_set_trans(struct btree_trans *trans, if (add_new_bucket(c, ptrs, devs_may_alloc, nr_replicas, nr_effective, - have_cache, 0, ob)) { + have_cache, flags, ob)) { ret = 0; break; } @@ -754,7 +766,7 @@ static int bucket_alloc_from_stripe(struct btree_trans *trans, unsigned nr_replicas, unsigned *nr_effective, bool *have_cache, - enum alloc_reserve reserve, + enum bch_watermark watermark, unsigned flags, struct closure *cl) { @@ -772,7 +784,7 @@ static int bucket_alloc_from_stripe(struct btree_trans *trans, if (ec_open_bucket(c, ptrs)) return 0; - h = bch2_ec_stripe_head_get(trans, target, 0, nr_replicas - 1, reserve, cl); + h = bch2_ec_stripe_head_get(trans, target, 0, nr_replicas - 1, watermark, cl); if (IS_ERR(h)) return PTR_ERR(h); if (!h) @@ -796,7 +808,7 @@ got_bucket: ob->ec_idx = ec_idx; ob->ec = h->s; - ec_stripe_new_get(h->s); + ec_stripe_new_get(h->s, STRIPE_REF_io); ret = add_new_bucket(c, ptrs, devs_may_alloc, nr_replicas, nr_effective, @@ -823,7 +835,7 @@ static bool want_bucket(struct bch_fs *c, return false; if (!ca->mi.durability && - (wp->data_type != BCH_DATA_user || !*have_cache)) + (wp->data_type == BCH_DATA_btree || ec || *have_cache)) return false; if (ec != (ob->ec != NULL)) @@ -867,7 +879,7 @@ static int bucket_alloc_set_partial(struct bch_fs *c, unsigned nr_replicas, unsigned *nr_effective, bool *have_cache, bool ec, - enum alloc_reserve reserve, + enum bch_watermark watermark, unsigned flags) { int i, ret = 0; @@ -877,6 +889,9 @@ static int bucket_alloc_set_partial(struct bch_fs *c, spin_lock(&c->freelist_lock); + if (!c->open_buckets_partial_nr) + goto unlock; + for (i = c->open_buckets_partial_nr - 1; i >= 0; --i) { struct open_bucket *ob = c->open_buckets + c->open_buckets_partial[i]; @@ -886,7 +901,7 @@ static int bucket_alloc_set_partial(struct bch_fs *c, u64 avail; bch2_dev_usage_read_fast(ca, &usage); - avail = dev_buckets_free(ca, usage, reserve); + avail = dev_buckets_free(ca, usage, watermark); if (!avail) continue; @@ -902,7 +917,7 @@ static int bucket_alloc_set_partial(struct bch_fs *c, break; } } - +unlock: spin_unlock(&c->freelist_lock); return ret; } @@ -916,7 +931,7 @@ static int __open_bucket_add_buckets(struct btree_trans *trans, unsigned nr_replicas, unsigned *nr_effective, bool *have_cache, - enum alloc_reserve reserve, + enum bch_watermark watermark, unsigned flags, struct closure *_cl) { @@ -927,9 +942,7 @@ static int __open_bucket_add_buckets(struct btree_trans *trans, unsigned i; int ret; - rcu_read_lock(); devs = target_rw_devs(c, wp->data_type, target); - rcu_read_unlock(); /* Don't allocate from devices we already have pointers to: */ for (i = 0; i < devs_have->nr; i++) @@ -949,7 +962,7 @@ static int __open_bucket_add_buckets(struct btree_trans *trans, ret = bucket_alloc_set_partial(c, ptrs, wp, &devs, nr_replicas, nr_effective, - have_cache, erasure_code, reserve, flags); + have_cache, erasure_code, watermark, flags); if (ret) return ret; @@ -958,7 +971,7 @@ static int __open_bucket_add_buckets(struct btree_trans *trans, target, nr_replicas, nr_effective, have_cache, - reserve, flags, _cl); + watermark, flags, _cl); } else { retry_blocking: /* @@ -967,7 +980,7 @@ retry_blocking: */ ret = bch2_bucket_alloc_set_trans(trans, ptrs, &wp->stripe, &devs, nr_replicas, nr_effective, have_cache, - wp->data_type, reserve, cl); + flags, wp->data_type, watermark, cl); if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart) && !bch2_err_matches(ret, BCH_ERR_insufficient_devices) && @@ -990,7 +1003,7 @@ static int open_bucket_add_buckets(struct btree_trans *trans, unsigned nr_replicas, unsigned *nr_effective, bool *have_cache, - enum alloc_reserve reserve, + enum bch_watermark watermark, unsigned flags, struct closure *cl) { @@ -1000,7 +1013,7 @@ static int open_bucket_add_buckets(struct btree_trans *trans, ret = __open_bucket_add_buckets(trans, ptrs, wp, devs_have, target, erasure_code, nr_replicas, nr_effective, have_cache, - reserve, flags, cl); + watermark, flags, cl); if (bch2_err_matches(ret, BCH_ERR_transaction_restart) || bch2_err_matches(ret, BCH_ERR_operation_blocked) || bch2_err_matches(ret, BCH_ERR_freelist_empty) || @@ -1013,49 +1026,100 @@ static int open_bucket_add_buckets(struct btree_trans *trans, ret = __open_bucket_add_buckets(trans, ptrs, wp, devs_have, target, false, nr_replicas, nr_effective, have_cache, - reserve, flags, cl); + watermark, flags, cl); return ret < 0 ? ret : 0; } -void bch2_open_buckets_stop_dev(struct bch_fs *c, struct bch_dev *ca, - struct open_buckets *obs) +static bool should_drop_bucket(struct open_bucket *ob, struct bch_fs *c, + struct bch_dev *ca, bool ec) { - struct open_buckets ptrs = { .nr = 0 }; - struct open_bucket *ob, *ob2; - unsigned i, j; - - open_bucket_for_each(c, obs, ob, i) { - bool drop = !ca || ob->dev == ca->dev_idx; + if (ec) { + return ob->ec != NULL; + } else if (ca) { + bool drop = ob->dev == ca->dev_idx; + struct open_bucket *ob2; + unsigned i; if (!drop && ob->ec) { mutex_lock(&ob->ec->lock); - for (j = 0; j < ob->ec->new_stripe.key.v.nr_blocks; j++) { - if (!ob->ec->blocks[j]) + for (i = 0; i < ob->ec->new_stripe.key.v.nr_blocks; i++) { + if (!ob->ec->blocks[i]) continue; - ob2 = c->open_buckets + ob->ec->blocks[j]; + ob2 = c->open_buckets + ob->ec->blocks[i]; drop |= ob2->dev == ca->dev_idx; } mutex_unlock(&ob->ec->lock); } - if (drop) - bch2_open_bucket_put(c, ob); - else - ob_push(c, &ptrs, ob); + return drop; + } else { + return true; } - - *obs = ptrs; } -void bch2_writepoint_stop(struct bch_fs *c, struct bch_dev *ca, - struct write_point *wp) +static void bch2_writepoint_stop(struct bch_fs *c, struct bch_dev *ca, + bool ec, struct write_point *wp) { + struct open_buckets ptrs = { .nr = 0 }; + struct open_bucket *ob; + unsigned i; + mutex_lock(&wp->lock); - bch2_open_buckets_stop_dev(c, ca, &wp->ptrs); + open_bucket_for_each(c, &wp->ptrs, ob, i) + if (should_drop_bucket(ob, c, ca, ec)) + bch2_open_bucket_put(c, ob); + else + ob_push(c, &ptrs, ob); + wp->ptrs = ptrs; mutex_unlock(&wp->lock); } +void bch2_open_buckets_stop(struct bch_fs *c, struct bch_dev *ca, + bool ec) +{ + unsigned i; + + /* Next, close write points that point to this device... */ + for (i = 0; i < ARRAY_SIZE(c->write_points); i++) + bch2_writepoint_stop(c, ca, ec, &c->write_points[i]); + + bch2_writepoint_stop(c, ca, ec, &c->copygc_write_point); + bch2_writepoint_stop(c, ca, ec, &c->rebalance_write_point); + bch2_writepoint_stop(c, ca, ec, &c->btree_write_point); + + mutex_lock(&c->btree_reserve_cache_lock); + while (c->btree_reserve_cache_nr) { + struct btree_alloc *a = + &c->btree_reserve_cache[--c->btree_reserve_cache_nr]; + + bch2_open_buckets_put(c, &a->ob); + } + mutex_unlock(&c->btree_reserve_cache_lock); + + spin_lock(&c->freelist_lock); + i = 0; + while (i < c->open_buckets_partial_nr) { + struct open_bucket *ob = + c->open_buckets + c->open_buckets_partial[i]; + + if (should_drop_bucket(ob, c, ca, ec)) { + --c->open_buckets_partial_nr; + swap(c->open_buckets_partial[i], + c->open_buckets_partial[c->open_buckets_partial_nr]); + ob->on_partial_list = false; + spin_unlock(&c->freelist_lock); + bch2_open_bucket_put(c, ob); + spin_lock(&c->freelist_lock); + } else { + i++; + } + } + spin_unlock(&c->freelist_lock); + + bch2_ec_stop_dev(c, ca); +} + static inline struct hlist_head *writepoint_hash(struct bch_fs *c, unsigned long write_point) { @@ -1101,10 +1165,12 @@ static bool try_increase_writepoints(struct bch_fs *c) return true; } -static bool try_decrease_writepoints(struct bch_fs *c, - unsigned old_nr) +static bool try_decrease_writepoints(struct btree_trans *trans, unsigned old_nr) { + struct bch_fs *c = trans->c; struct write_point *wp; + struct open_bucket *ob; + unsigned i; mutex_lock(&c->write_points_hash_lock); if (c->write_points_nr < old_nr) { @@ -1123,19 +1189,13 @@ static bool try_decrease_writepoints(struct bch_fs *c, hlist_del_rcu(&wp->node); mutex_unlock(&c->write_points_hash_lock); - bch2_writepoint_stop(c, NULL, wp); + bch2_trans_mutex_lock_norelock(trans, &wp->lock); + open_bucket_for_each(c, &wp->ptrs, ob, i) + open_bucket_free_unused(c, ob); + mutex_unlock(&wp->lock); return true; } -static void bch2_trans_mutex_lock_norelock(struct btree_trans *trans, - struct mutex *lock) -{ - if (!mutex_trylock(lock)) { - bch2_trans_unlock(trans); - mutex_lock(lock); - } -} - static struct write_point *writepoint_find(struct btree_trans *trans, unsigned long write_point) { @@ -1203,7 +1263,7 @@ int bch2_alloc_sectors_start_trans(struct btree_trans *trans, struct bch_devs_list *devs_have, unsigned nr_replicas, unsigned nr_replicas_required, - enum alloc_reserve reserve, + enum bch_watermark watermark, unsigned flags, struct closure *cl, struct write_point **wp_ret) @@ -1217,6 +1277,8 @@ int bch2_alloc_sectors_start_trans(struct btree_trans *trans, int ret; int i; + BUG_ON(flags & BCH_WRITE_ONLY_SPECIFIED_DEVS); + BUG_ON(!nr_replicas || !nr_replicas_required); retry: ptrs.nr = 0; @@ -1230,26 +1292,37 @@ retry: if (wp->data_type != BCH_DATA_user) have_cache = true; - if (!target || (flags & BCH_WRITE_ONLY_SPECIFIED_DEVS)) { - ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have, - target, erasure_code, - nr_replicas, &nr_effective, - &have_cache, reserve, - flags, cl); - } else { + if (target && !(flags & BCH_WRITE_ONLY_SPECIFIED_DEVS)) { ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have, target, erasure_code, nr_replicas, &nr_effective, - &have_cache, reserve, + &have_cache, watermark, flags, NULL); if (!ret || bch2_err_matches(ret, BCH_ERR_transaction_restart)) goto alloc_done; + /* Don't retry from all devices if we're out of open buckets: */ + if (bch2_err_matches(ret, BCH_ERR_open_buckets_empty)) + goto allocate_blocking; + + /* + * Only try to allocate cache (durability = 0 devices) from the + * specified target: + */ + have_cache = true; + ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have, 0, erasure_code, nr_replicas, &nr_effective, - &have_cache, reserve, + &have_cache, watermark, + flags, cl); + } else { +allocate_blocking: + ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have, + target, erasure_code, + nr_replicas, &nr_effective, + &have_cache, watermark, flags, cl); } alloc_done: @@ -1267,7 +1340,7 @@ alloc_done: /* Free buckets we didn't use: */ open_bucket_for_each(c, &wp->ptrs, ob, i) - open_bucket_free_unused(c, wp, ob); + open_bucket_free_unused(c, ob); wp->ptrs = ptrs; @@ -1284,13 +1357,13 @@ err: if (ptrs.nr < ARRAY_SIZE(ptrs.v)) ob_push(c, &ptrs, ob); else - open_bucket_free_unused(c, wp, ob); + open_bucket_free_unused(c, ob); wp->ptrs = ptrs; mutex_unlock(&wp->lock); if (bch2_err_matches(ret, BCH_ERR_freelist_empty) && - try_decrease_writepoints(c, write_points_nr)) + try_decrease_writepoints(trans, write_points_nr)) goto retry; if (bch2_err_matches(ret, BCH_ERR_open_buckets_empty) || @@ -1380,14 +1453,16 @@ void bch2_fs_allocator_foreground_init(struct bch_fs *c) static void bch2_open_bucket_to_text(struct printbuf *out, struct bch_fs *c, struct open_bucket *ob) { + struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev); unsigned data_type = ob->data_type; barrier(); /* READ_ONCE() doesn't work on bitfields */ - prt_printf(out, "%zu ref %u %s %u:%llu gen %u", + prt_printf(out, "%zu ref %u %s %u:%llu gen %u allocated %u/%u", ob - c->open_buckets, atomic_read(&ob->pin), data_type < BCH_DATA_NR ? bch2_data_types[data_type] : "invalid data type", - ob->dev, ob->bucket, ob->gen); + ob->dev, ob->bucket, ob->gen, + ca->mi.bucket_size - ob->sectors_free, ca->mi.bucket_size); if (ob->ec) prt_printf(out, " ec idx %llu", ob->ec->idx); if (ob->on_partial_list)