X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libbcachefs%2Falloc_foreground.c;h=a961df74d421707468ed0868932d25a59fc218fb;hb=e3e7f67b3ed89f5b3158142c29e66bb98f868ce2;hp=a32aaf51bb3e36dac0d1c13efbd3f19e3e3df58c;hpb=934a84dfaf719af82dadbbe0e2480baff03c905b;p=bcachefs-tools-debian diff --git a/libbcachefs/alloc_foreground.c b/libbcachefs/alloc_foreground.c index a32aaf5..a961df7 100644 --- a/libbcachefs/alloc_foreground.c +++ b/libbcachefs/alloc_foreground.c @@ -25,18 +25,28 @@ #include "disk_groups.h" #include "ec.h" #include "error.h" -#include "io.h" +#include "io_write.h" #include "journal.h" #include "movinggc.h" +#include "nocow_locking.h" +#include "trace.h" #include #include #include -#include -const char * const bch2_alloc_reserves[] = { +static void bch2_trans_mutex_lock_norelock(struct btree_trans *trans, + struct mutex *lock) +{ + if (!mutex_trylock(lock)) { + bch2_trans_unlock(trans); + mutex_lock(lock); + } +} + +const char * const bch2_watermarks[] = { #define x(t) #t, - BCH_ALLOC_RESERVES() + BCH_WATERMARKS() #undef x NULL }; @@ -57,6 +67,17 @@ const char * const bch2_alloc_reserves[] = { * reference _after_ doing the index update that makes its allocation reachable. */ +void bch2_reset_alloc_cursors(struct bch_fs *c) +{ + struct bch_dev *ca; + unsigned i; + + rcu_read_lock(); + for_each_member_device_rcu(ca, c, i, NULL) + ca->alloc_cursor = 0; + rcu_read_unlock(); +} + static void bch2_open_bucket_hash_add(struct bch_fs *c, struct open_bucket *ob) { open_bucket_idx_t idx = ob - c->open_buckets; @@ -85,7 +106,7 @@ void __bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob) struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev); if (ob->ec) { - bch2_ec_bucket_written(c, ob); + ec_stripe_new_put(c, ob->ec, STRIPE_REF_io); return; } @@ -138,30 +159,19 @@ static struct open_bucket *bch2_open_bucket_alloc(struct bch_fs *c) return ob; } -static void open_bucket_free_unused(struct bch_fs *c, - struct write_point *wp, - struct open_bucket *ob) +static void open_bucket_free_unused(struct bch_fs *c, struct open_bucket *ob) { - struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev); - bool may_realloc = wp->data_type == BCH_DATA_user; - - BUG_ON(ca->open_buckets_partial_nr > - ARRAY_SIZE(ca->open_buckets_partial)); - - if (ca->open_buckets_partial_nr < - ARRAY_SIZE(ca->open_buckets_partial) && - may_realloc) { - spin_lock(&c->freelist_lock); - ob->on_partial_list = true; - ca->open_buckets_partial[ca->open_buckets_partial_nr++] = - ob - c->open_buckets; - spin_unlock(&c->freelist_lock); + BUG_ON(c->open_buckets_partial_nr >= + ARRAY_SIZE(c->open_buckets_partial)); - closure_wake_up(&c->open_buckets_wait); - closure_wake_up(&c->freelist_wait); - } else { - bch2_open_bucket_put(c, ob); - } + spin_lock(&c->freelist_lock); + ob->on_partial_list = true; + c->open_buckets_partial[c->open_buckets_partial_nr++] = + ob - c->open_buckets; + spin_unlock(&c->freelist_lock); + + closure_wake_up(&c->open_buckets_wait); + closure_wake_up(&c->freelist_wait); } /* _only_ for allocating the journal on a new device: */ @@ -178,14 +188,16 @@ long bch2_bucket_alloc_new_fs(struct bch_dev *ca) return -1; } -static inline unsigned open_buckets_reserved(enum alloc_reserve reserve) +static inline unsigned open_buckets_reserved(enum bch_watermark watermark) { - switch (reserve) { - case RESERVE_btree: - case RESERVE_btree_movinggc: + switch (watermark) { + case BCH_WATERMARK_reclaim: return 0; - case RESERVE_movinggc: + case BCH_WATERMARK_btree: + case BCH_WATERMARK_btree_copygc: return OPEN_BUCKETS_COUNT / 4; + case BCH_WATERMARK_copygc: + return OPEN_BUCKETS_COUNT / 3; default: return OPEN_BUCKETS_COUNT / 2; } @@ -193,8 +205,8 @@ static inline unsigned open_buckets_reserved(enum alloc_reserve reserve) static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev *ca, u64 bucket, - enum alloc_reserve reserve, - struct bch_alloc_v4 *a, + enum bch_watermark watermark, + const struct bch_alloc_v4 *a, struct bucket_alloc_state *s, struct closure *cl) { @@ -223,7 +235,7 @@ static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev * spin_lock(&c->freelist_lock); - if (unlikely(c->open_buckets_nr_free <= open_buckets_reserved(reserve))) { + if (unlikely(c->open_buckets_nr_free <= open_buckets_reserved(watermark))) { if (cl) closure_wait(&c->open_buckets_wait, cl); @@ -247,7 +259,6 @@ static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev * ob->valid = true; ob->sectors_free = ca->mi.bucket_size; - ob->alloc_reserve = reserve; ob->dev = ca->dev_idx; ob->gen = a->gen; ob->bucket = bucket; @@ -271,12 +282,11 @@ static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev * } spin_unlock(&c->freelist_lock); - return ob; } static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bch_dev *ca, - enum alloc_reserve reserve, u64 free_entry, + enum bch_watermark watermark, u64 free_entry, struct bucket_alloc_state *s, struct bkey_s_c freespace_k, struct closure *cl) @@ -285,7 +295,8 @@ static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bc struct btree_iter iter = { NULL }; struct bkey_s_c k; struct open_bucket *ob; - struct bch_alloc_v4 a; + struct bch_alloc_v4 a_convert; + const struct bch_alloc_v4 *a; u64 b = free_entry & ~(~0ULL << 56); unsigned genbits = free_entry >> 56; struct printbuf buf = PRINTBUF; @@ -301,32 +312,38 @@ static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bc goto err; } - bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, POS(ca->dev_idx, b), BTREE_ITER_CACHED); - k = bch2_btree_iter_peek_slot(&iter); + k = bch2_bkey_get_iter(trans, &iter, + BTREE_ID_alloc, POS(ca->dev_idx, b), + BTREE_ITER_CACHED); ret = bkey_err(k); if (ret) { ob = ERR_PTR(ret); goto err; } - bch2_alloc_to_v4(k, &a); + a = bch2_alloc_to_v4(k, &a_convert); - if (genbits != (alloc_freespace_genbits(a) >> 56)) { - prt_printf(&buf, "bucket in freespace btree with wrong genbits (got %u should be %llu)\n" - " freespace key ", - genbits, alloc_freespace_genbits(a) >> 56); + if (a->data_type != BCH_DATA_free) { + if (c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_alloc_info) { + ob = NULL; + goto err; + } + + prt_printf(&buf, "non free bucket in freespace btree\n" + " freespace key "); bch2_bkey_val_to_text(&buf, c, freespace_k); prt_printf(&buf, "\n "); bch2_bkey_val_to_text(&buf, c, k); bch2_trans_inconsistent(trans, "%s", buf.buf); ob = ERR_PTR(-EIO); goto err; - } - if (a.data_type != BCH_DATA_free) { - prt_printf(&buf, "non free bucket in freespace btree\n" - " freespace key "); + if (genbits != (alloc_freespace_genbits(*a) >> 56) && + c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info) { + prt_printf(&buf, "bucket in freespace btree with wrong genbits (got %u should be %llu)\n" + " freespace key ", + genbits, alloc_freespace_genbits(*a) >> 56); bch2_bkey_val_to_text(&buf, c, freespace_k); prt_printf(&buf, "\n "); bch2_bkey_val_to_text(&buf, c, k); @@ -335,19 +352,19 @@ static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bc goto err; } - if (!test_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags)) { + if (c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_extents_to_backpointers) { struct bch_backpointer bp; - u64 bp_offset = 0; + struct bpos bp_pos = POS_MIN; ret = bch2_get_next_backpointer(trans, POS(ca->dev_idx, b), -1, - &bp_offset, &bp, + &bp_pos, &bp, BTREE_ITER_NOPRESERVE); if (ret) { ob = ERR_PTR(ret); goto err; } - if (bp_offset != U64_MAX) { + if (!bkey_eq(bp_pos, POS_MAX)) { /* * Bucket may have data in it - we don't call * bc2h_trans_inconnsistent() because fsck hasn't @@ -358,42 +375,17 @@ static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bc } } - ob = __try_alloc_bucket(c, ca, b, reserve, &a, s, cl); + ob = __try_alloc_bucket(c, ca, b, watermark, a, s, cl); if (!ob) iter.path->preserve = false; err: - set_btree_iter_dontneed(&iter); + if (iter.trans && iter.path) + set_btree_iter_dontneed(&iter); bch2_trans_iter_exit(trans, &iter); printbuf_exit(&buf); return ob; } -static struct open_bucket *try_alloc_partial_bucket(struct bch_fs *c, struct bch_dev *ca, - enum alloc_reserve reserve) -{ - struct open_bucket *ob; - int i; - - spin_lock(&c->freelist_lock); - - for (i = ca->open_buckets_partial_nr - 1; i >= 0; --i) { - ob = c->open_buckets + ca->open_buckets_partial[i]; - - if (reserve <= ob->alloc_reserve) { - array_remove_item(ca->open_buckets_partial, - ca->open_buckets_partial_nr, - i); - ob->on_partial_list = false; - ob->alloc_reserve = reserve; - spin_unlock(&c->freelist_lock); - return ob; - } - } - - spin_unlock(&c->freelist_lock); - return NULL; -} - /* * This path is for before the freespace btree is initialized: * @@ -403,21 +395,32 @@ static struct open_bucket *try_alloc_partial_bucket(struct bch_fs *c, struct bch static noinline struct open_bucket * bch2_bucket_alloc_early(struct btree_trans *trans, struct bch_dev *ca, - enum alloc_reserve reserve, + enum bch_watermark watermark, struct bucket_alloc_state *s, struct closure *cl) { - struct btree_iter iter; - struct bkey_s_c k; + struct btree_iter iter, citer; + struct bkey_s_c k, ck; struct open_bucket *ob = NULL; + u64 first_bucket = max_t(u64, ca->mi.first_bucket, ca->new_fs_bucket_idx); + u64 alloc_start = max(first_bucket, READ_ONCE(ca->alloc_cursor)); + u64 alloc_cursor = alloc_start; int ret; - s->cur_bucket = max_t(u64, s->cur_bucket, ca->mi.first_bucket); - s->cur_bucket = max_t(u64, s->cur_bucket, ca->new_fs_bucket_idx); - - for_each_btree_key_norestart(trans, iter, BTREE_ID_alloc, POS(ca->dev_idx, s->cur_bucket), + /* + * Scan with an uncached iterator to avoid polluting the key cache. An + * uncached iter will return a cached key if one exists, but if not + * there is no other underlying protection for the associated key cache + * slot. To avoid racing bucket allocations, look up the cached key slot + * of any likely allocation candidate before attempting to proceed with + * the allocation. This provides proper exclusion on the associated + * bucket. + */ +again: + for_each_btree_key_norestart(trans, iter, BTREE_ID_alloc, POS(ca->dev_idx, alloc_cursor), BTREE_ITER_SLOTS, k, ret) { - struct bch_alloc_v4 a; + struct bch_alloc_v4 a_convert; + const struct bch_alloc_v4 *a; if (bkey_ge(k.k->p, POS(ca->dev_idx, ca->mi.nbuckets))) break; @@ -426,60 +429,82 @@ bch2_bucket_alloc_early(struct btree_trans *trans, is_superblock_bucket(ca, k.k->p.offset)) continue; - bch2_alloc_to_v4(k, &a); - - if (a.data_type != BCH_DATA_free) + a = bch2_alloc_to_v4(k, &a_convert); + if (a->data_type != BCH_DATA_free) continue; + /* now check the cached key to serialize concurrent allocs of the bucket */ + ck = bch2_bkey_get_iter(trans, &citer, BTREE_ID_alloc, k.k->p, BTREE_ITER_CACHED); + ret = bkey_err(ck); + if (ret) + break; + + a = bch2_alloc_to_v4(ck, &a_convert); + if (a->data_type != BCH_DATA_free) + goto next; + s->buckets_seen++; - ob = __try_alloc_bucket(trans->c, ca, k.k->p.offset, reserve, &a, s, cl); + ob = __try_alloc_bucket(trans->c, ca, k.k->p.offset, watermark, a, s, cl); +next: + citer.path->preserve = false; + bch2_trans_iter_exit(trans, &citer); if (ob) break; } bch2_trans_iter_exit(trans, &iter); - s->cur_bucket = iter.pos.offset; + alloc_cursor = iter.pos.offset; + ca->alloc_cursor = alloc_cursor; + + if (!ob && ret) + ob = ERR_PTR(ret); - return ob ?: ERR_PTR(ret ?: -BCH_ERR_no_buckets_found); + if (!ob && alloc_start > first_bucket) { + alloc_cursor = alloc_start = first_bucket; + goto again; + } + + return ob; } static struct open_bucket *bch2_bucket_alloc_freelist(struct btree_trans *trans, struct bch_dev *ca, - enum alloc_reserve reserve, + enum bch_watermark watermark, struct bucket_alloc_state *s, struct closure *cl) { struct btree_iter iter; struct bkey_s_c k; struct open_bucket *ob = NULL; + u64 alloc_start = max_t(u64, ca->mi.first_bucket, READ_ONCE(ca->alloc_cursor)); + u64 alloc_cursor = alloc_start; int ret; BUG_ON(ca->new_fs_bucket_idx); - - /* - * XXX: - * On transaction restart, we'd like to restart from the bucket we were - * at previously - */ +again: for_each_btree_key_norestart(trans, iter, BTREE_ID_freespace, - POS(ca->dev_idx, s->cur_bucket), 0, k, ret) { + POS(ca->dev_idx, alloc_cursor), 0, k, ret) { if (k.k->p.inode != ca->dev_idx) break; - for (s->cur_bucket = max(s->cur_bucket, bkey_start_offset(k.k)); - s->cur_bucket < k.k->p.offset; - s->cur_bucket++) { + for (alloc_cursor = max(alloc_cursor, bkey_start_offset(k.k)); + alloc_cursor < k.k->p.offset; + alloc_cursor++) { ret = btree_trans_too_many_iters(trans); - if (ret) + if (ret) { + ob = ERR_PTR(ret); break; + } s->buckets_seen++; - ob = try_alloc_bucket(trans, ca, reserve, - s->cur_bucket, s, k, cl); - if (ob) + ob = try_alloc_bucket(trans, ca, watermark, + alloc_cursor, s, k, cl); + if (ob) { + iter.path->preserve = false; break; + } } if (ob || ret) @@ -487,31 +512,44 @@ static struct open_bucket *bch2_bucket_alloc_freelist(struct btree_trans *trans, } bch2_trans_iter_exit(trans, &iter); - return ob ?: ERR_PTR(ret); + ca->alloc_cursor = alloc_cursor; + + if (!ob && ret) + ob = ERR_PTR(ret); + + if (!ob && alloc_start > ca->mi.first_bucket) { + alloc_cursor = alloc_start = ca->mi.first_bucket; + goto again; + } + + return ob; } /** - * bch_bucket_alloc - allocate a single bucket from a specific device + * bch2_bucket_alloc_trans - allocate a single bucket from a specific device + * @trans: transaction object + * @ca: device to allocate from + * @watermark: how important is this allocation? + * @cl: if not NULL, closure to be used to wait if buckets not available + * @usage: for secondarily also returning the current device usage * - * Returns index of bucket on success, 0 on failure + * Returns: an open_bucket on success, or an ERR_PTR() on failure. */ static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans, struct bch_dev *ca, - enum alloc_reserve reserve, - bool may_alloc_partial, + enum bch_watermark watermark, struct closure *cl, struct bch_dev_usage *usage) { struct bch_fs *c = trans->c; struct open_bucket *ob = NULL; - bool freespace_initialized = READ_ONCE(ca->mi.freespace_initialized); - u64 start = freespace_initialized ? 0 : ca->bucket_alloc_trans_early_cursor; + bool freespace = READ_ONCE(ca->mi.freespace_initialized); u64 avail; - struct bucket_alloc_state s = { .cur_bucket = start }; + struct bucket_alloc_state s = { 0 }; bool waiting = false; again: bch2_dev_usage_read_fast(ca, usage); - avail = dev_buckets_free(ca, *usage, reserve); + avail = dev_buckets_free(ca, *usage, watermark); if (usage->d[BCH_DATA_need_discard].buckets > avail) bch2_do_discards(c); @@ -538,37 +576,37 @@ again: if (waiting) closure_wake_up(&c->freelist_wait); - - if (may_alloc_partial) { - ob = try_alloc_partial_bucket(c, ca, reserve); - if (ob) - return ob; - } - - ob = likely(ca->mi.freespace_initialized) - ? bch2_bucket_alloc_freelist(trans, ca, reserve, &s, cl) - : bch2_bucket_alloc_early(trans, ca, reserve, &s, cl); +alloc: + ob = likely(freespace) + ? bch2_bucket_alloc_freelist(trans, ca, watermark, &s, cl) + : bch2_bucket_alloc_early(trans, ca, watermark, &s, cl); if (s.skipped_need_journal_commit * 2 > avail) bch2_journal_flush_async(&c->journal, NULL); - if (!ob && !freespace_initialized && start) { - start = s.cur_bucket = 0; - goto again; + if (!ob && freespace && c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_alloc_info) { + freespace = false; + goto alloc; } - - if (!freespace_initialized) - ca->bucket_alloc_trans_early_cursor = s.cur_bucket; err: if (!ob) ob = ERR_PTR(-BCH_ERR_no_buckets_found); if (!IS_ERR(ob)) - trace_and_count(c, bucket_alloc, ca, bch2_alloc_reserves[reserve], - may_alloc_partial, ob->bucket); + trace_and_count(c, bucket_alloc, ca, + bch2_watermarks[watermark], + ob->bucket, + usage->d[BCH_DATA_free].buckets, + avail, + bch2_copygc_wait_amount(c), + c->copygc_wait - atomic64_read(&c->io_clock[WRITE].now), + &s, + cl == NULL, + ""); else if (!bch2_err_matches(PTR_ERR(ob), BCH_ERR_transaction_restart)) - trace_and_count(c, bucket_alloc_fail, - ca, bch2_alloc_reserves[reserve], + trace_and_count(c, bucket_alloc_fail, ca, + bch2_watermarks[watermark], + 0, usage->d[BCH_DATA_free].buckets, avail, bch2_copygc_wait_amount(c), @@ -581,16 +619,15 @@ err: } struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca, - enum alloc_reserve reserve, - bool may_alloc_partial, + enum bch_watermark watermark, struct closure *cl) { struct bch_dev_usage usage; struct open_bucket *ob; bch2_trans_do(c, NULL, NULL, 0, - PTR_ERR_OR_ZERO(ob = bch2_bucket_alloc_trans(&trans, ca, reserve, - may_alloc_partial, cl, &usage))); + PTR_ERR_OR_ZERO(ob = bch2_bucket_alloc_trans(trans, ca, watermark, + cl, &usage))); return ob; } @@ -622,7 +659,7 @@ static inline void bch2_dev_stripe_increment_inlined(struct bch_dev *ca, struct bch_dev_usage *usage) { u64 *v = stripe->next_alloc + ca->dev_idx; - u64 free_space = dev_buckets_available(ca, RESERVE_none); + u64 free_space = dev_buckets_available(ca, BCH_WATERMARK_normal); u64 free_space_inv = free_space ? div64_u64(1ULL << 48, free_space) : 1ULL << 48; @@ -647,12 +684,10 @@ void bch2_dev_stripe_increment(struct bch_dev *ca, bch2_dev_stripe_increment_inlined(ca, stripe, &usage); } -#define BUCKET_MAY_ALLOC_PARTIAL (1 << 0) -#define BUCKET_ALLOC_USE_DURABILITY (1 << 1) - -static void add_new_bucket(struct bch_fs *c, +static int add_new_bucket(struct bch_fs *c, struct open_buckets *ptrs, struct bch_devs_mask *devs_may_alloc, + unsigned nr_replicas, unsigned *nr_effective, bool *have_cache, unsigned flags, @@ -661,23 +696,31 @@ static void add_new_bucket(struct bch_fs *c, unsigned durability = bch_dev_bkey_exists(c, ob->dev)->mi.durability; + BUG_ON(*nr_effective >= nr_replicas); + __clear_bit(ob->dev, devs_may_alloc->d); - *nr_effective += (flags & BUCKET_ALLOC_USE_DURABILITY) - ? durability : 1; + *nr_effective += durability; *have_cache |= !durability; ob_push(c, ptrs, ob); + + if (*nr_effective >= nr_replicas) + return 1; + if (ob->ec) + return 1; + return 0; } -static int bch2_bucket_alloc_set_trans(struct btree_trans *trans, +int bch2_bucket_alloc_set_trans(struct btree_trans *trans, struct open_buckets *ptrs, struct dev_stripe_state *stripe, struct bch_devs_mask *devs_may_alloc, unsigned nr_replicas, unsigned *nr_effective, bool *have_cache, - enum alloc_reserve reserve, unsigned flags, + enum bch_data_type data_type, + enum bch_watermark watermark, struct closure *cl) { struct bch_fs *c = trans->c; @@ -710,8 +753,7 @@ static int bch2_bucket_alloc_set_trans(struct btree_trans *trans, continue; } - ob = bch2_bucket_alloc_trans(trans, ca, reserve, - flags & BUCKET_MAY_ALLOC_PARTIAL, cl, &usage); + ob = bch2_bucket_alloc_trans(trans, ca, watermark, cl, &usage); if (!IS_ERR(ob)) bch2_dev_stripe_increment_inlined(ca, stripe, &usage); percpu_ref_put(&ca->ref); @@ -723,10 +765,11 @@ static int bch2_bucket_alloc_set_trans(struct btree_trans *trans, continue; } - add_new_bucket(c, ptrs, devs_may_alloc, - nr_effective, have_cache, flags, ob); + ob->data_type = data_type; - if (*nr_effective >= nr_replicas) { + if (add_new_bucket(c, ptrs, devs_may_alloc, + nr_replicas, nr_effective, + have_cache, flags, ob)) { ret = 0; break; } @@ -735,24 +778,6 @@ static int bch2_bucket_alloc_set_trans(struct btree_trans *trans, return ret; } -int bch2_bucket_alloc_set(struct bch_fs *c, - struct open_buckets *ptrs, - struct dev_stripe_state *stripe, - struct bch_devs_mask *devs_may_alloc, - unsigned nr_replicas, - unsigned *nr_effective, - bool *have_cache, - enum alloc_reserve reserve, - unsigned flags, - struct closure *cl) -{ - return bch2_trans_do(c, NULL, NULL, 0, - bch2_bucket_alloc_set_trans(&trans, ptrs, stripe, - devs_may_alloc, nr_replicas, - nr_effective, have_cache, reserve, - flags, cl)); -} - /* Allocate from stripes: */ /* @@ -761,26 +786,24 @@ int bch2_bucket_alloc_set(struct bch_fs *c, * it's to a device we don't want: */ -static int bucket_alloc_from_stripe(struct bch_fs *c, +static int bucket_alloc_from_stripe(struct btree_trans *trans, struct open_buckets *ptrs, struct write_point *wp, struct bch_devs_mask *devs_may_alloc, u16 target, - unsigned erasure_code, unsigned nr_replicas, unsigned *nr_effective, bool *have_cache, + enum bch_watermark watermark, unsigned flags, struct closure *cl) { + struct bch_fs *c = trans->c; struct dev_alloc_list devs_sorted; struct ec_stripe_head *h; struct open_bucket *ob; - struct bch_dev *ca; unsigned i, ec_idx; - - if (!erasure_code) - return 0; + int ret = 0; if (nr_replicas < 2) return 0; @@ -788,11 +811,9 @@ static int bucket_alloc_from_stripe(struct bch_fs *c, if (ec_open_bucket(c, ptrs)) return 0; - h = bch2_ec_stripe_head_get(c, target, 0, nr_replicas - 1, - wp == &c->copygc_write_point, - cl); + h = bch2_ec_stripe_head_get(trans, target, 0, nr_replicas - 1, watermark, cl); if (IS_ERR(h)) - return -PTR_ERR(h); + return PTR_ERR(h); if (!h) return 0; @@ -810,63 +831,132 @@ static int bucket_alloc_from_stripe(struct bch_fs *c, } goto out_put_head; got_bucket: - ca = bch_dev_bkey_exists(c, ob->dev); - ob->ec_idx = ec_idx; ob->ec = h->s; + ec_stripe_new_get(h->s, STRIPE_REF_io); - add_new_bucket(c, ptrs, devs_may_alloc, - nr_effective, have_cache, flags, ob); - atomic_inc(&h->s->pin); + ret = add_new_bucket(c, ptrs, devs_may_alloc, + nr_replicas, nr_effective, + have_cache, flags, ob); out_put_head: bch2_ec_stripe_head_put(c, h); - return 0; + return ret; } /* Sector allocator */ -static void get_buckets_from_writepoint(struct bch_fs *c, - struct open_buckets *ptrs, - struct write_point *wp, - struct bch_devs_mask *devs_may_alloc, - unsigned nr_replicas, - unsigned *nr_effective, - bool *have_cache, - unsigned flags, - bool need_ec) +static bool want_bucket(struct bch_fs *c, + struct write_point *wp, + struct bch_devs_mask *devs_may_alloc, + bool *have_cache, bool ec, + struct open_bucket *ob) +{ + struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev); + + if (!test_bit(ob->dev, devs_may_alloc->d)) + return false; + + if (ob->data_type != wp->data_type) + return false; + + if (!ca->mi.durability && + (wp->data_type == BCH_DATA_btree || ec || *have_cache)) + return false; + + if (ec != (ob->ec != NULL)) + return false; + + return true; +} + +static int bucket_alloc_set_writepoint(struct bch_fs *c, + struct open_buckets *ptrs, + struct write_point *wp, + struct bch_devs_mask *devs_may_alloc, + unsigned nr_replicas, + unsigned *nr_effective, + bool *have_cache, + bool ec, unsigned flags) { struct open_buckets ptrs_skip = { .nr = 0 }; struct open_bucket *ob; unsigned i; + int ret = 0; open_bucket_for_each(c, &wp->ptrs, ob, i) { - struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev); - - if (*nr_effective < nr_replicas && - test_bit(ob->dev, devs_may_alloc->d) && - (ca->mi.durability || - (wp->data_type == BCH_DATA_user && !*have_cache)) && - (ob->ec || !need_ec)) { - add_new_bucket(c, ptrs, devs_may_alloc, - nr_effective, have_cache, - flags, ob); - } else { + if (!ret && want_bucket(c, wp, devs_may_alloc, + have_cache, ec, ob)) + ret = add_new_bucket(c, ptrs, devs_may_alloc, + nr_replicas, nr_effective, + have_cache, flags, ob); + else ob_push(c, &ptrs_skip, ob); - } } wp->ptrs = ptrs_skip; + + return ret; } -static int open_bucket_add_buckets(struct btree_trans *trans, +static int bucket_alloc_set_partial(struct bch_fs *c, + struct open_buckets *ptrs, + struct write_point *wp, + struct bch_devs_mask *devs_may_alloc, + unsigned nr_replicas, + unsigned *nr_effective, + bool *have_cache, bool ec, + enum bch_watermark watermark, + unsigned flags) +{ + int i, ret = 0; + + if (!c->open_buckets_partial_nr) + return 0; + + spin_lock(&c->freelist_lock); + + if (!c->open_buckets_partial_nr) + goto unlock; + + for (i = c->open_buckets_partial_nr - 1; i >= 0; --i) { + struct open_bucket *ob = c->open_buckets + c->open_buckets_partial[i]; + + if (want_bucket(c, wp, devs_may_alloc, have_cache, ec, ob)) { + struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev); + struct bch_dev_usage usage; + u64 avail; + + bch2_dev_usage_read_fast(ca, &usage); + avail = dev_buckets_free(ca, usage, watermark); + if (!avail) + continue; + + array_remove_item(c->open_buckets_partial, + c->open_buckets_partial_nr, + i); + ob->on_partial_list = false; + + ret = add_new_bucket(c, ptrs, devs_may_alloc, + nr_replicas, nr_effective, + have_cache, flags, ob); + if (ret) + break; + } + } +unlock: + spin_unlock(&c->freelist_lock); + return ret; +} + +static int __open_bucket_add_buckets(struct btree_trans *trans, struct open_buckets *ptrs, struct write_point *wp, struct bch_devs_list *devs_have, u16 target, - unsigned erasure_code, + bool erasure_code, unsigned nr_replicas, unsigned *nr_effective, bool *have_cache, - enum alloc_reserve reserve, + enum bch_watermark watermark, unsigned flags, struct closure *_cl) { @@ -874,12 +964,10 @@ static int open_bucket_add_buckets(struct btree_trans *trans, struct bch_devs_mask devs; struct open_bucket *ob; struct closure *cl = NULL; - int ret; unsigned i; + int ret; - rcu_read_lock(); devs = target_rw_devs(c, wp->data_type, target); - rcu_read_unlock(); /* Don't allocate from devices we already have pointers to: */ for (i = 0; i < devs_have->nr; i++) @@ -888,93 +976,191 @@ static int open_bucket_add_buckets(struct btree_trans *trans, open_bucket_for_each(c, ptrs, ob, i) __clear_bit(ob->dev, devs.d); - if (erasure_code) { - if (!ec_open_bucket(c, ptrs)) { - get_buckets_from_writepoint(c, ptrs, wp, &devs, - nr_replicas, nr_effective, - have_cache, flags, true); - if (*nr_effective >= nr_replicas) - return 0; - } + if (erasure_code && ec_open_bucket(c, ptrs)) + return 0; - if (!ec_open_bucket(c, ptrs)) { - ret = bucket_alloc_from_stripe(c, ptrs, wp, &devs, - target, erasure_code, - nr_replicas, nr_effective, - have_cache, flags, _cl); - if (bch2_err_matches(ret, BCH_ERR_transaction_restart) || - bch2_err_matches(ret, BCH_ERR_freelist_empty) || - bch2_err_matches(ret, BCH_ERR_open_buckets_empty)) - return ret; - if (*nr_effective >= nr_replicas) - return 0; - } - } + ret = bucket_alloc_set_writepoint(c, ptrs, wp, &devs, + nr_replicas, nr_effective, + have_cache, erasure_code, flags); + if (ret) + return ret; - get_buckets_from_writepoint(c, ptrs, wp, &devs, - nr_replicas, nr_effective, - have_cache, flags, false); - if (*nr_effective >= nr_replicas) - return 0; + ret = bucket_alloc_set_partial(c, ptrs, wp, &devs, + nr_replicas, nr_effective, + have_cache, erasure_code, watermark, flags); + if (ret) + return ret; + if (erasure_code) { + ret = bucket_alloc_from_stripe(trans, ptrs, wp, &devs, + target, + nr_replicas, nr_effective, + have_cache, + watermark, flags, _cl); + } else { retry_blocking: - /* - * Try nonblocking first, so that if one device is full we'll try from - * other devices: - */ - ret = bch2_bucket_alloc_set_trans(trans, ptrs, &wp->stripe, &devs, - nr_replicas, nr_effective, have_cache, - reserve, flags, cl); - if (ret && - !bch2_err_matches(ret, BCH_ERR_transaction_restart) && - !bch2_err_matches(ret, BCH_ERR_insufficient_devices) && - !cl && _cl) { - cl = _cl; - goto retry_blocking; + /* + * Try nonblocking first, so that if one device is full we'll try from + * other devices: + */ + ret = bch2_bucket_alloc_set_trans(trans, ptrs, &wp->stripe, &devs, + nr_replicas, nr_effective, have_cache, + flags, wp->data_type, watermark, cl); + if (ret && + !bch2_err_matches(ret, BCH_ERR_transaction_restart) && + !bch2_err_matches(ret, BCH_ERR_insufficient_devices) && + !cl && _cl) { + cl = _cl; + goto retry_blocking; + } } return ret; } -void bch2_open_buckets_stop_dev(struct bch_fs *c, struct bch_dev *ca, - struct open_buckets *obs) +static int open_bucket_add_buckets(struct btree_trans *trans, + struct open_buckets *ptrs, + struct write_point *wp, + struct bch_devs_list *devs_have, + u16 target, + unsigned erasure_code, + unsigned nr_replicas, + unsigned *nr_effective, + bool *have_cache, + enum bch_watermark watermark, + unsigned flags, + struct closure *cl) { - struct open_buckets ptrs = { .nr = 0 }; - struct open_bucket *ob, *ob2; - unsigned i, j; + int ret; - open_bucket_for_each(c, obs, ob, i) { - bool drop = !ca || ob->dev == ca->dev_idx; + if (erasure_code) { + ret = __open_bucket_add_buckets(trans, ptrs, wp, + devs_have, target, erasure_code, + nr_replicas, nr_effective, have_cache, + watermark, flags, cl); + if (bch2_err_matches(ret, BCH_ERR_transaction_restart) || + bch2_err_matches(ret, BCH_ERR_operation_blocked) || + bch2_err_matches(ret, BCH_ERR_freelist_empty) || + bch2_err_matches(ret, BCH_ERR_open_buckets_empty)) + return ret; + if (*nr_effective >= nr_replicas) + return 0; + } + + ret = __open_bucket_add_buckets(trans, ptrs, wp, + devs_have, target, false, + nr_replicas, nr_effective, have_cache, + watermark, flags, cl); + return ret < 0 ? ret : 0; +} + +/** + * should_drop_bucket - check if this is open_bucket should go away + * @ob: open_bucket to predicate on + * @c: filesystem handle + * @ca: if set, we're killing buckets for a particular device + * @ec: if true, we're shutting down erasure coding and killing all ec + * open_buckets + * otherwise, return true + * Returns: true if we should kill this open_bucket + * + * We're killing open_buckets because we're shutting down a device, erasure + * coding, or the entire filesystem - check if this open_bucket matches: + */ +static bool should_drop_bucket(struct open_bucket *ob, struct bch_fs *c, + struct bch_dev *ca, bool ec) +{ + if (ec) { + return ob->ec != NULL; + } else if (ca) { + bool drop = ob->dev == ca->dev_idx; + struct open_bucket *ob2; + unsigned i; if (!drop && ob->ec) { + unsigned nr_blocks; + mutex_lock(&ob->ec->lock); - for (j = 0; j < ob->ec->new_stripe.key.v.nr_blocks; j++) { - if (!ob->ec->blocks[j]) + nr_blocks = bkey_i_to_stripe(&ob->ec->new_stripe.key)->v.nr_blocks; + + for (i = 0; i < nr_blocks; i++) { + if (!ob->ec->blocks[i]) continue; - ob2 = c->open_buckets + ob->ec->blocks[j]; + ob2 = c->open_buckets + ob->ec->blocks[i]; drop |= ob2->dev == ca->dev_idx; } mutex_unlock(&ob->ec->lock); } - if (drop) - bch2_open_bucket_put(c, ob); - else - ob_push(c, &ptrs, ob); + return drop; + } else { + return true; } - - *obs = ptrs; } -void bch2_writepoint_stop(struct bch_fs *c, struct bch_dev *ca, - struct write_point *wp) +static void bch2_writepoint_stop(struct bch_fs *c, struct bch_dev *ca, + bool ec, struct write_point *wp) { + struct open_buckets ptrs = { .nr = 0 }; + struct open_bucket *ob; + unsigned i; + mutex_lock(&wp->lock); - bch2_open_buckets_stop_dev(c, ca, &wp->ptrs); + open_bucket_for_each(c, &wp->ptrs, ob, i) + if (should_drop_bucket(ob, c, ca, ec)) + bch2_open_bucket_put(c, ob); + else + ob_push(c, &ptrs, ob); + wp->ptrs = ptrs; mutex_unlock(&wp->lock); } +void bch2_open_buckets_stop(struct bch_fs *c, struct bch_dev *ca, + bool ec) +{ + unsigned i; + + /* Next, close write points that point to this device... */ + for (i = 0; i < ARRAY_SIZE(c->write_points); i++) + bch2_writepoint_stop(c, ca, ec, &c->write_points[i]); + + bch2_writepoint_stop(c, ca, ec, &c->copygc_write_point); + bch2_writepoint_stop(c, ca, ec, &c->rebalance_write_point); + bch2_writepoint_stop(c, ca, ec, &c->btree_write_point); + + mutex_lock(&c->btree_reserve_cache_lock); + while (c->btree_reserve_cache_nr) { + struct btree_alloc *a = + &c->btree_reserve_cache[--c->btree_reserve_cache_nr]; + + bch2_open_buckets_put(c, &a->ob); + } + mutex_unlock(&c->btree_reserve_cache_lock); + + spin_lock(&c->freelist_lock); + i = 0; + while (i < c->open_buckets_partial_nr) { + struct open_bucket *ob = + c->open_buckets + c->open_buckets_partial[i]; + + if (should_drop_bucket(ob, c, ca, ec)) { + --c->open_buckets_partial_nr; + swap(c->open_buckets_partial[i], + c->open_buckets_partial[c->open_buckets_partial_nr]); + ob->on_partial_list = false; + spin_unlock(&c->freelist_lock); + bch2_open_bucket_put(c, ob); + spin_lock(&c->freelist_lock); + } else { + i++; + } + } + spin_unlock(&c->freelist_lock); + + bch2_ec_stop_dev(c, ca); +} + static inline struct hlist_head *writepoint_hash(struct bch_fs *c, unsigned long write_point) { @@ -1020,10 +1206,12 @@ static bool try_increase_writepoints(struct bch_fs *c) return true; } -static bool try_decrease_writepoints(struct bch_fs *c, - unsigned old_nr) +static bool try_decrease_writepoints(struct btree_trans *trans, unsigned old_nr) { + struct bch_fs *c = trans->c; struct write_point *wp; + struct open_bucket *ob; + unsigned i; mutex_lock(&c->write_points_hash_lock); if (c->write_points_nr < old_nr) { @@ -1042,19 +1230,14 @@ static bool try_decrease_writepoints(struct bch_fs *c, hlist_del_rcu(&wp->node); mutex_unlock(&c->write_points_hash_lock); - bch2_writepoint_stop(c, NULL, wp); + bch2_trans_mutex_lock_norelock(trans, &wp->lock); + open_bucket_for_each(c, &wp->ptrs, ob, i) + open_bucket_free_unused(c, ob); + wp->ptrs.nr = 0; + mutex_unlock(&wp->lock); return true; } -static void bch2_trans_mutex_lock(struct btree_trans *trans, - struct mutex *lock) -{ - if (!mutex_trylock(lock)) { - bch2_trans_unlock(trans); - mutex_lock(lock); - } -} - static struct write_point *writepoint_find(struct btree_trans *trans, unsigned long write_point) { @@ -1064,7 +1247,7 @@ static struct write_point *writepoint_find(struct btree_trans *trans, if (!(write_point & 1UL)) { wp = (struct write_point *) write_point; - bch2_trans_mutex_lock(trans, &wp->lock); + bch2_trans_mutex_lock_norelock(trans, &wp->lock); return wp; } @@ -1073,7 +1256,7 @@ restart_find: wp = __writepoint_find(head, write_point); if (wp) { lock_wp: - bch2_trans_mutex_lock(trans, &wp->lock); + bch2_trans_mutex_lock_norelock(trans, &wp->lock); if (wp->write_point == write_point) goto out; mutex_unlock(&wp->lock); @@ -1086,8 +1269,8 @@ restart_find_oldest: if (!oldest || time_before64(wp->last_used, oldest->last_used)) oldest = wp; - bch2_trans_mutex_lock(trans, &oldest->lock); - bch2_trans_mutex_lock(trans, &c->write_points_hash_lock); + bch2_trans_mutex_lock_norelock(trans, &oldest->lock); + bch2_trans_mutex_lock_norelock(trans, &c->write_points_hash_lock); if (oldest >= c->write_points + c->write_points_nr || try_increase_writepoints(c)) { mutex_unlock(&c->write_points_hash_lock); @@ -1112,33 +1295,58 @@ out: return wp; } +static noinline void +deallocate_extra_replicas(struct bch_fs *c, + struct open_buckets *ptrs, + struct open_buckets *ptrs_no_use, + unsigned extra_replicas) +{ + struct open_buckets ptrs2 = { 0 }; + struct open_bucket *ob; + unsigned i; + + open_bucket_for_each(c, ptrs, ob, i) { + unsigned d = bch_dev_bkey_exists(c, ob->dev)->mi.durability; + + if (d && d <= extra_replicas) { + extra_replicas -= d; + ob_push(c, ptrs_no_use, ob); + } else { + ob_push(c, &ptrs2, ob); + } + } + + *ptrs = ptrs2; +} + /* * Get us an open_bucket we can allocate from, return with it locked: */ int bch2_alloc_sectors_start_trans(struct btree_trans *trans, - unsigned target, - unsigned erasure_code, - struct write_point_specifier write_point, - struct bch_devs_list *devs_have, - unsigned nr_replicas, - unsigned nr_replicas_required, - enum alloc_reserve reserve, - unsigned flags, - struct closure *cl, - struct write_point **wp_ret) + unsigned target, + unsigned erasure_code, + struct write_point_specifier write_point, + struct bch_devs_list *devs_have, + unsigned nr_replicas, + unsigned nr_replicas_required, + enum bch_watermark watermark, + unsigned flags, + struct closure *cl, + struct write_point **wp_ret) { struct bch_fs *c = trans->c; struct write_point *wp; struct open_bucket *ob; struct open_buckets ptrs; unsigned nr_effective, write_points_nr; - unsigned ob_flags = 0; bool have_cache; int ret; int i; - if (!(flags & BCH_WRITE_ONLY_SPECIFIED_DEVS)) - ob_flags |= BUCKET_ALLOC_USE_DURABILITY; + if (!IS_ENABLED(CONFIG_BCACHEFS_ERASURE_CODING)) + erasure_code = false; + + BUG_ON(flags & BCH_WRITE_ONLY_SPECIFIED_DEVS); BUG_ON(!nr_replicas || !nr_replicas_required); retry: @@ -1149,34 +1357,42 @@ retry: *wp_ret = wp = writepoint_find(trans, write_point.v); - if (wp->data_type == BCH_DATA_user) - ob_flags |= BUCKET_MAY_ALLOC_PARTIAL; - /* metadata may not allocate on cache devices: */ if (wp->data_type != BCH_DATA_user) have_cache = true; - if (!target || (flags & BCH_WRITE_ONLY_SPECIFIED_DEVS)) { - ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have, - target, erasure_code, - nr_replicas, &nr_effective, - &have_cache, reserve, - ob_flags, cl); - } else { + if (target && !(flags & BCH_WRITE_ONLY_SPECIFIED_DEVS)) { ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have, target, erasure_code, nr_replicas, &nr_effective, - &have_cache, reserve, - ob_flags, NULL); + &have_cache, watermark, + flags, NULL); if (!ret || bch2_err_matches(ret, BCH_ERR_transaction_restart)) goto alloc_done; + /* Don't retry from all devices if we're out of open buckets: */ + if (bch2_err_matches(ret, BCH_ERR_open_buckets_empty)) + goto allocate_blocking; + + /* + * Only try to allocate cache (durability = 0 devices) from the + * specified target: + */ + have_cache = true; + ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have, 0, erasure_code, nr_replicas, &nr_effective, - &have_cache, reserve, - ob_flags, cl); + &have_cache, watermark, + flags, cl); + } else { +allocate_blocking: + ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have, + target, erasure_code, + nr_replicas, &nr_effective, + &have_cache, watermark, + flags, cl); } alloc_done: BUG_ON(!ret && nr_effective < nr_replicas); @@ -1191,9 +1407,12 @@ alloc_done: if (ret) goto err; + if (nr_effective > nr_replicas) + deallocate_extra_replicas(c, &ptrs, &wp->ptrs, nr_effective - nr_replicas); + /* Free buckets we didn't use: */ open_bucket_for_each(c, &wp->ptrs, ob, i) - open_bucket_free_unused(c, wp, ob); + open_bucket_free_unused(c, ob); wp->ptrs = ptrs; @@ -1210,24 +1429,21 @@ err: if (ptrs.nr < ARRAY_SIZE(ptrs.v)) ob_push(c, &ptrs, ob); else - open_bucket_free_unused(c, wp, ob); + open_bucket_free_unused(c, ob); wp->ptrs = ptrs; mutex_unlock(&wp->lock); if (bch2_err_matches(ret, BCH_ERR_freelist_empty) && - try_decrease_writepoints(c, write_points_nr)) + try_decrease_writepoints(trans, write_points_nr)) goto retry; if (bch2_err_matches(ret, BCH_ERR_open_buckets_empty) || bch2_err_matches(ret, BCH_ERR_freelist_empty)) return cl - ? -EAGAIN + ? -BCH_ERR_bucket_alloc_blocked : -BCH_ERR_ENOSPC_bucket_alloc; - if (bch2_err_matches(ret, BCH_ERR_insufficient_devices)) - return -EROFS; - return ret; } @@ -1307,21 +1523,106 @@ void bch2_fs_allocator_foreground_init(struct bch_fs *c) } } +static void bch2_open_bucket_to_text(struct printbuf *out, struct bch_fs *c, struct open_bucket *ob) +{ + struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev); + unsigned data_type = ob->data_type; + barrier(); /* READ_ONCE() doesn't work on bitfields */ + + prt_printf(out, "%zu ref %u %s %u:%llu gen %u allocated %u/%u", + ob - c->open_buckets, + atomic_read(&ob->pin), + data_type < BCH_DATA_NR ? bch2_data_types[data_type] : "invalid data type", + ob->dev, ob->bucket, ob->gen, + ca->mi.bucket_size - ob->sectors_free, ca->mi.bucket_size); + if (ob->ec) + prt_printf(out, " ec idx %llu", ob->ec->idx); + if (ob->on_partial_list) + prt_str(out, " partial"); + prt_newline(out); +} + void bch2_open_buckets_to_text(struct printbuf *out, struct bch_fs *c) { struct open_bucket *ob; + out->atomic++; + for (ob = c->open_buckets; ob < c->open_buckets + ARRAY_SIZE(c->open_buckets); ob++) { spin_lock(&ob->lock); - if (ob->valid && !ob->on_partial_list) { - prt_printf(out, "%zu ref %u type %s %u:%llu:%u\n", - ob - c->open_buckets, - atomic_read(&ob->pin), - bch2_data_types[ob->data_type], - ob->dev, ob->bucket, ob->gen); - } + if (ob->valid && !ob->on_partial_list) + bch2_open_bucket_to_text(out, c, ob); spin_unlock(&ob->lock); } + + --out->atomic; +} + +void bch2_open_buckets_partial_to_text(struct printbuf *out, struct bch_fs *c) +{ + unsigned i; + + out->atomic++; + spin_lock(&c->freelist_lock); + + for (i = 0; i < c->open_buckets_partial_nr; i++) + bch2_open_bucket_to_text(out, c, + c->open_buckets + c->open_buckets_partial[i]); + + spin_unlock(&c->freelist_lock); + --out->atomic; +} + +static const char * const bch2_write_point_states[] = { +#define x(n) #n, + WRITE_POINT_STATES() +#undef x + NULL +}; + +static void bch2_write_point_to_text(struct printbuf *out, struct bch_fs *c, + struct write_point *wp) +{ + struct open_bucket *ob; + unsigned i; + + prt_printf(out, "%lu: ", wp->write_point); + prt_human_readable_u64(out, wp->sectors_allocated); + + prt_printf(out, " last wrote: "); + bch2_pr_time_units(out, sched_clock() - wp->last_used); + + for (i = 0; i < WRITE_POINT_STATE_NR; i++) { + prt_printf(out, " %s: ", bch2_write_point_states[i]); + bch2_pr_time_units(out, wp->time[i]); + } + + prt_newline(out); + + printbuf_indent_add(out, 2); + open_bucket_for_each(c, &wp->ptrs, ob, i) + bch2_open_bucket_to_text(out, c, ob); + printbuf_indent_sub(out, 2); +} + +void bch2_write_points_to_text(struct printbuf *out, struct bch_fs *c) +{ + struct write_point *wp; + + prt_str(out, "Foreground write points\n"); + for (wp = c->write_points; + wp < c->write_points + ARRAY_SIZE(c->write_points); + wp++) + bch2_write_point_to_text(out, c, wp); + + prt_str(out, "Copygc write point\n"); + bch2_write_point_to_text(out, c, &c->copygc_write_point); + + prt_str(out, "Rebalance write point\n"); + bch2_write_point_to_text(out, c, &c->rebalance_write_point); + + prt_str(out, "Btree write point\n"); + bch2_write_point_to_text(out, c, &c->btree_write_point); }