X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libbcachefs%2Falloc_foreground.c;h=affddf1f03fff0cb3121ab5f767df65beb1d19b9;hb=da6a35689518599b381c285cd9505ab8d58f7c73;hp=178d7c058597ada79a67a58359b7ca0554885e83;hpb=3765483ff0cf9abd0243fcafe11aebd0f9beb03d;p=bcachefs-tools-debian diff --git a/libbcachefs/alloc_foreground.c b/libbcachefs/alloc_foreground.c index 178d7c0..affddf1 100644 --- a/libbcachefs/alloc_foreground.c +++ b/libbcachefs/alloc_foreground.c @@ -14,6 +14,7 @@ #include "bcachefs.h" #include "alloc_background.h" #include "alloc_foreground.h" +#include "backpointers.h" #include "btree_iter.h" #include "btree_update.h" #include "btree_gc.h" @@ -26,12 +27,21 @@ #include "error.h" #include "io.h" #include "journal.h" +#include "movinggc.h" +#include "nocow_locking.h" #include #include #include #include +const char * const bch2_alloc_reserves[] = { +#define x(t) #t, + BCH_ALLOC_RESERVES() +#undef x + NULL +}; + /* * Open buckets represent a bucket that's currently being allocated from. They * serve two purposes: @@ -48,6 +58,17 @@ * reference _after_ doing the index update that makes its allocation reachable. */ +void bch2_reset_alloc_cursors(struct bch_fs *c) +{ + struct bch_dev *ca; + unsigned i; + + rcu_read_lock(); + for_each_member_device_rcu(ca, c, i, NULL) + ca->alloc_cursor = 0; + rcu_read_unlock(); +} + static void bch2_open_bucket_hash_add(struct bch_fs *c, struct open_bucket *ob) { open_bucket_idx_t idx = ob - c->open_buckets; @@ -172,10 +193,10 @@ long bch2_bucket_alloc_new_fs(struct bch_dev *ca) static inline unsigned open_buckets_reserved(enum alloc_reserve reserve) { switch (reserve) { - case RESERVE_BTREE: - case RESERVE_BTREE_MOVINGGC: + case RESERVE_btree: + case RESERVE_btree_movinggc: return 0; - case RESERVE_MOVINGGC: + case RESERVE_movinggc: return OPEN_BUCKETS_COUNT / 4; default: return OPEN_BUCKETS_COUNT / 2; @@ -183,22 +204,32 @@ static inline unsigned open_buckets_reserved(enum alloc_reserve reserve) } static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev *ca, + u64 bucket, enum alloc_reserve reserve, - struct bkey_alloc_unpacked a, - size_t *need_journal_commit, + const struct bch_alloc_v4 *a, + struct bucket_alloc_state *s, struct closure *cl) { struct open_bucket *ob; - if (unlikely(ca->buckets_nouse && test_bit(a.bucket, ca->buckets_nouse))) + if (unlikely(ca->buckets_nouse && test_bit(bucket, ca->buckets_nouse))) { + s->skipped_nouse++; return NULL; + } - if (bch2_bucket_is_open(c, ca->dev_idx, a.bucket)) + if (bch2_bucket_is_open(c, ca->dev_idx, bucket)) { + s->skipped_open++; return NULL; + } if (bch2_bucket_needs_journal_commit(&c->buckets_waiting_for_journal, - c->journal.flushed_seq_ondisk, ca->dev_idx, a.bucket)) { - (*need_journal_commit)++; + c->journal.flushed_seq_ondisk, ca->dev_idx, bucket)) { + s->skipped_need_journal_commit++; + return NULL; + } + + if (bch2_bucket_nocow_is_locked(&c->nocow_locks, POS(ca->dev_idx, bucket))) { + s->skipped_nocow++; return NULL; } @@ -212,14 +243,13 @@ static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev * c->blocked_allocate_open_bucket = local_clock(); spin_unlock(&c->freelist_lock); - - trace_open_bucket_alloc_fail(ca, reserve); - return ERR_PTR(-OPEN_BUCKETS_EMPTY); + return ERR_PTR(-BCH_ERR_open_buckets_empty); } /* Recheck under lock: */ - if (bch2_bucket_is_open(c, ca->dev_idx, a.bucket)) { + if (bch2_bucket_is_open(c, ca->dev_idx, bucket)) { spin_unlock(&c->freelist_lock); + s->skipped_open++; return NULL; } @@ -231,8 +261,8 @@ static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev * ob->sectors_free = ca->mi.bucket_size; ob->alloc_reserve = reserve; ob->dev = ca->dev_idx; - ob->gen = a.gen; - ob->bucket = a.bucket; + ob->gen = a->gen; + ob->bucket = bucket; spin_unlock(&ob->lock); ca->nr_open_buckets++; @@ -253,26 +283,36 @@ static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev * } spin_unlock(&c->freelist_lock); - - trace_bucket_alloc(ca, reserve); return ob; } static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bch_dev *ca, enum alloc_reserve reserve, u64 free_entry, - size_t *need_journal_commit, + struct bucket_alloc_state *s, + struct bkey_s_c freespace_k, struct closure *cl) { struct bch_fs *c = trans->c; - struct btree_iter iter; + struct btree_iter iter = { NULL }; struct bkey_s_c k; struct open_bucket *ob; - struct bkey_alloc_unpacked a; + struct bch_alloc_v4 a_convert; + const struct bch_alloc_v4 *a; u64 b = free_entry & ~(~0ULL << 56); unsigned genbits = free_entry >> 56; struct printbuf buf = PRINTBUF; int ret; + if (b < ca->mi.first_bucket || b >= ca->mi.nbuckets) { + prt_printf(&buf, "freespace btree has bucket outside allowed range %u-%llu\n" + " freespace key ", + ca->mi.first_bucket, ca->mi.nbuckets); + bch2_bkey_val_to_text(&buf, c, freespace_k); + bch2_trans_inconsistent(trans, "%s", buf.buf); + ob = ERR_PTR(-EIO); + goto err; + } + bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, POS(ca->dev_idx, b), BTREE_ITER_CACHED); k = bch2_btree_iter_peek_slot(&iter); ret = bkey_err(k); @@ -281,37 +321,66 @@ static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bc goto err; } - a = bch2_alloc_unpack(k); + a = bch2_alloc_to_v4(k, &a_convert); - if (bch2_fs_inconsistent_on(bucket_state(a) != BUCKET_free, c, - "non free bucket in freespace btree (state %s)\n" - " %s\n" - " at %llu (genbits %u)", - bch2_bucket_states[bucket_state(a)], - (bch2_bkey_val_to_text(&buf, c, k), buf.buf), - free_entry, genbits)) { + if (a->data_type != BCH_DATA_free) { + if (!test_bit(BCH_FS_CHECK_ALLOC_DONE, &c->flags)) { + ob = NULL; + goto err; + } + + prt_printf(&buf, "non free bucket in freespace btree\n" + " freespace key "); + bch2_bkey_val_to_text(&buf, c, freespace_k); + prt_printf(&buf, "\n "); + bch2_bkey_val_to_text(&buf, c, k); + bch2_trans_inconsistent(trans, "%s", buf.buf); ob = ERR_PTR(-EIO); goto err; } - if (bch2_fs_inconsistent_on(genbits != (alloc_freespace_genbits(a) >> 56), c, - "bucket in freespace btree with wrong genbits (got %u should be %llu)\n" - " %s", - genbits, alloc_freespace_genbits(a) >> 56, - (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { + if (genbits != (alloc_freespace_genbits(*a) >> 56) && + test_bit(BCH_FS_CHECK_ALLOC_DONE, &c->flags)) { + prt_printf(&buf, "bucket in freespace btree with wrong genbits (got %u should be %llu)\n" + " freespace key ", + genbits, alloc_freespace_genbits(*a) >> 56); + bch2_bkey_val_to_text(&buf, c, freespace_k); + prt_printf(&buf, "\n "); + bch2_bkey_val_to_text(&buf, c, k); + bch2_trans_inconsistent(trans, "%s", buf.buf); ob = ERR_PTR(-EIO); goto err; + } - if (bch2_fs_inconsistent_on(b < ca->mi.first_bucket || b >= ca->mi.nbuckets, c, - "freespace btree has bucket outside allowed range (got %llu, valid %u-%llu)", - b, ca->mi.first_bucket, ca->mi.nbuckets)) { - ob = ERR_PTR(-EIO); - goto err; + if (!test_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags)) { + struct bch_backpointer bp; + u64 bp_offset = 0; + + ret = bch2_get_next_backpointer(trans, POS(ca->dev_idx, b), -1, + &bp_offset, &bp, + BTREE_ITER_NOPRESERVE); + if (ret) { + ob = ERR_PTR(ret); + goto err; + } + + if (bp_offset != U64_MAX) { + /* + * Bucket may have data in it - we don't call + * bc2h_trans_inconnsistent() because fsck hasn't + * finished yet + */ + ob = NULL; + goto err; + } } - ob = __try_alloc_bucket(c, ca, reserve, a, need_journal_commit, cl); + ob = __try_alloc_bucket(c, ca, b, reserve, a, s, cl); + if (!ob) + iter.path->preserve = false; err: + set_btree_iter_dontneed(&iter); bch2_trans_iter_exit(trans, &iter); printbuf_exit(&buf); return ob; @@ -350,147 +419,222 @@ static struct open_bucket *try_alloc_partial_bucket(struct bch_fs *c, struct bch * journal buckets - journal buckets will be < ca->new_fs_bucket_idx */ static noinline struct open_bucket * -bch2_bucket_alloc_trans_early(struct btree_trans *trans, - struct bch_dev *ca, - enum alloc_reserve reserve, - u64 *b, - size_t *need_journal_commit, - struct closure *cl) +bch2_bucket_alloc_early(struct btree_trans *trans, + struct bch_dev *ca, + enum alloc_reserve reserve, + struct bucket_alloc_state *s, + struct closure *cl) { struct btree_iter iter; struct bkey_s_c k; struct open_bucket *ob = NULL; + u64 alloc_start = max_t(u64, ca->mi.first_bucket, ca->new_fs_bucket_idx); + u64 alloc_cursor = max(alloc_start, READ_ONCE(ca->alloc_cursor)); int ret; - - *b = max_t(u64, *b, ca->mi.first_bucket); - *b = max_t(u64, *b, ca->new_fs_bucket_idx); - - for_each_btree_key(trans, iter, BTREE_ID_alloc, POS(ca->dev_idx, *b), +again: + for_each_btree_key_norestart(trans, iter, BTREE_ID_alloc, POS(ca->dev_idx, alloc_cursor), BTREE_ITER_SLOTS, k, ret) { - struct bkey_alloc_unpacked a; + struct bch_alloc_v4 a_convert; + const struct bch_alloc_v4 *a; - if (bkey_cmp(k.k->p, POS(ca->dev_idx, ca->mi.nbuckets)) >= 0) + if (bkey_ge(k.k->p, POS(ca->dev_idx, ca->mi.nbuckets))) break; if (ca->new_fs_bucket_idx && is_superblock_bucket(ca, k.k->p.offset)) continue; - a = bch2_alloc_unpack(k); + a = bch2_alloc_to_v4(k, &a_convert); - if (bucket_state(a) != BUCKET_free) + if (a->data_type != BCH_DATA_free) continue; - ob = __try_alloc_bucket(trans->c, ca, reserve, a, - need_journal_commit, cl); + s->buckets_seen++; + + ob = __try_alloc_bucket(trans->c, ca, k.k->p.offset, reserve, a, s, cl); if (ob) break; } bch2_trans_iter_exit(trans, &iter); - *b = iter.pos.offset; + ca->alloc_cursor = alloc_cursor; + + if (!ob && ret) + ob = ERR_PTR(ret); + + if (!ob && alloc_cursor > alloc_start) { + alloc_cursor = alloc_start; + goto again; + } - return ob ?: ERR_PTR(ret ?: -FREELIST_EMPTY); + return ob; } -static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans, +static struct open_bucket *bch2_bucket_alloc_freelist(struct btree_trans *trans, struct bch_dev *ca, enum alloc_reserve reserve, - u64 *b, - size_t *need_journal_commit, + struct bucket_alloc_state *s, struct closure *cl) { struct btree_iter iter; struct bkey_s_c k; struct open_bucket *ob = NULL; + u64 alloc_start = max_t(u64, ca->mi.first_bucket, READ_ONCE(ca->alloc_cursor)); + u64 alloc_cursor = alloc_start; int ret; - if (unlikely(!ca->mi.freespace_initialized)) - return bch2_bucket_alloc_trans_early(trans, ca, reserve, b, - need_journal_commit, cl); - BUG_ON(ca->new_fs_bucket_idx); - - for_each_btree_key(trans, iter, BTREE_ID_freespace, - POS(ca->dev_idx, *b), 0, k, ret) { +again: + for_each_btree_key_norestart(trans, iter, BTREE_ID_freespace, + POS(ca->dev_idx, alloc_cursor), 0, k, ret) { if (k.k->p.inode != ca->dev_idx) break; - for (*b = max(*b, bkey_start_offset(k.k)); - *b != k.k->p.offset && !ob; - (*b)++) { - if (btree_trans_too_many_iters(trans)) { - ob = ERR_PTR(-EINTR); + for (alloc_cursor = max(alloc_cursor, bkey_start_offset(k.k)); + alloc_cursor < k.k->p.offset; + alloc_cursor++) { + ret = btree_trans_too_many_iters(trans); + if (ret) { + ob = ERR_PTR(ret); break; } - ob = try_alloc_bucket(trans, ca, reserve, *b, - need_journal_commit, cl); + s->buckets_seen++; + + ob = try_alloc_bucket(trans, ca, reserve, + alloc_cursor, s, k, cl); + if (ob) { + iter.path->preserve = false; + break; + } } - if (ob) + + if (ob || ret) break; } bch2_trans_iter_exit(trans, &iter); - return ob ?: ERR_PTR(ret); + ca->alloc_cursor = alloc_cursor; + + if (!ob && ret) + ob = ERR_PTR(ret); + + if (!ob && alloc_start > ca->mi.first_bucket) { + alloc_cursor = alloc_start = ca->mi.first_bucket; + goto again; + } + + return ob; } /** * bch_bucket_alloc - allocate a single bucket from a specific device * * Returns index of bucket on success, 0 on failure - * */ -struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca, + */ +static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans, + struct bch_dev *ca, enum alloc_reserve reserve, bool may_alloc_partial, - struct closure *cl) + struct closure *cl, + struct bch_dev_usage *usage) { + struct bch_fs *c = trans->c; struct open_bucket *ob = NULL; - size_t need_journal_commit = 0; - u64 avail = dev_buckets_available(ca, reserve); - u64 b = 0; - int ret; - - if (may_alloc_partial) { - ob = try_alloc_partial_bucket(c, ca, reserve); - if (ob) - return ob; - } + bool freespace = READ_ONCE(ca->mi.freespace_initialized); + u64 avail; + struct bucket_alloc_state s = { 0 }; + bool waiting = false; again: + bch2_dev_usage_read_fast(ca, usage); + avail = dev_buckets_free(ca, *usage, reserve); + + if (usage->d[BCH_DATA_need_discard].buckets > avail) + bch2_do_discards(c); + + if (usage->d[BCH_DATA_need_gc_gens].buckets > avail) + bch2_do_gc_gens(c); + + if (should_invalidate_buckets(ca, *usage)) + bch2_do_invalidates(c); + if (!avail) { - if (cl) { + if (cl && !waiting) { closure_wait(&c->freelist_wait, cl); - /* recheck after putting ourself on waitlist */ - avail = dev_buckets_available(ca, reserve); - if (avail) { - closure_wake_up(&c->freelist_wait); - goto again; - } + waiting = true; + goto again; } if (!c->blocked_allocate) c->blocked_allocate = local_clock(); - ob = ERR_PTR(-FREELIST_EMPTY); + ob = ERR_PTR(-BCH_ERR_freelist_empty); goto err; } - ret = bch2_trans_do(c, NULL, NULL, 0, - PTR_ERR_OR_ZERO(ob = bch2_bucket_alloc_trans(&trans, - ca, reserve, &b, - &need_journal_commit, cl))); + if (waiting) + closure_wake_up(&c->freelist_wait); + + if (may_alloc_partial) { + ob = try_alloc_partial_bucket(c, ca, reserve); + if (ob) + return ob; + } +alloc: + ob = likely(freespace) + ? bch2_bucket_alloc_freelist(trans, ca, reserve, &s, cl) + : bch2_bucket_alloc_early(trans, ca, reserve, &s, cl); - if (need_journal_commit * 2 > avail) + if (s.skipped_need_journal_commit * 2 > avail) bch2_journal_flush_async(&c->journal, NULL); + + if (!ob && freespace && !test_bit(BCH_FS_CHECK_ALLOC_DONE, &c->flags)) { + freespace = false; + goto alloc; + } err: if (!ob) - ob = ERR_PTR(ret ?: -FREELIST_EMPTY); + ob = ERR_PTR(-BCH_ERR_no_buckets_found); + + if (!IS_ERR(ob)) + trace_and_count(c, bucket_alloc, ca, + bch2_alloc_reserves[reserve], + may_alloc_partial, + ob->bucket, + usage->d[BCH_DATA_free].buckets, + avail, + bch2_copygc_wait_amount(c), + c->copygc_wait - atomic64_read(&c->io_clock[WRITE].now), + &s, + cl == NULL, + ""); + else if (!bch2_err_matches(PTR_ERR(ob), BCH_ERR_transaction_restart)) + trace_and_count(c, bucket_alloc_fail, ca, + bch2_alloc_reserves[reserve], + may_alloc_partial, + 0, + usage->d[BCH_DATA_free].buckets, + avail, + bch2_copygc_wait_amount(c), + c->copygc_wait - atomic64_read(&c->io_clock[WRITE].now), + &s, + cl == NULL, + bch2_err_str(PTR_ERR(ob))); - if (ob == ERR_PTR(-FREELIST_EMPTY)) { - trace_bucket_alloc_fail(ca, reserve, avail, need_journal_commit); - atomic_long_inc(&c->bucket_alloc_fail); - } + return ob; +} +struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca, + enum alloc_reserve reserve, + bool may_alloc_partial, + struct closure *cl) +{ + struct bch_dev_usage usage; + struct open_bucket *ob; + + bch2_trans_do(c, NULL, NULL, 0, + PTR_ERR_OR_ZERO(ob = bch2_bucket_alloc_trans(&trans, ca, reserve, + may_alloc_partial, cl, &usage))); return ob; } @@ -517,11 +661,12 @@ struct dev_alloc_list bch2_dev_alloc_list(struct bch_fs *c, return ret; } -void bch2_dev_stripe_increment(struct bch_dev *ca, - struct dev_stripe_state *stripe) +static inline void bch2_dev_stripe_increment_inlined(struct bch_dev *ca, + struct dev_stripe_state *stripe, + struct bch_dev_usage *usage) { u64 *v = stripe->next_alloc + ca->dev_idx; - u64 free_space = dev_buckets_available(ca, RESERVE_NONE); + u64 free_space = dev_buckets_available(ca, RESERVE_none); u64 free_space_inv = free_space ? div64_u64(1ULL << 48, free_space) : 1ULL << 48; @@ -537,6 +682,15 @@ void bch2_dev_stripe_increment(struct bch_dev *ca, *v = *v < scale ? 0 : *v - scale; } +void bch2_dev_stripe_increment(struct bch_dev *ca, + struct dev_stripe_state *stripe) +{ + struct bch_dev_usage usage; + + bch2_dev_usage_read_fast(ca, &usage); + bch2_dev_stripe_increment_inlined(ca, stripe, &usage); +} + #define BUCKET_MAY_ALLOC_PARTIAL (1 << 0) #define BUCKET_ALLOC_USE_DURABILITY (1 << 1) @@ -559,7 +713,7 @@ static void add_new_bucket(struct bch_fs *c, ob_push(c, ptrs, ob); } -int bch2_bucket_alloc_set(struct bch_fs *c, +int bch2_bucket_alloc_set_trans(struct btree_trans *trans, struct open_buckets *ptrs, struct dev_stripe_state *stripe, struct bch_devs_mask *devs_may_alloc, @@ -570,16 +724,18 @@ int bch2_bucket_alloc_set(struct bch_fs *c, unsigned flags, struct closure *cl) { + struct bch_fs *c = trans->c; struct dev_alloc_list devs_sorted = bch2_dev_alloc_list(c, stripe, devs_may_alloc); unsigned dev; struct bch_dev *ca; - int ret = -INSUFFICIENT_DEVICES; + int ret = -BCH_ERR_insufficient_devices; unsigned i; BUG_ON(*nr_effective >= nr_replicas); for (i = 0; i < devs_sorted.nr; i++) { + struct bch_dev_usage usage; struct open_bucket *ob; dev = devs_sorted.devs[i]; @@ -598,16 +754,15 @@ int bch2_bucket_alloc_set(struct bch_fs *c, continue; } - ob = bch2_bucket_alloc(c, ca, reserve, - flags & BUCKET_MAY_ALLOC_PARTIAL, cl); + ob = bch2_bucket_alloc_trans(trans, ca, reserve, + flags & BUCKET_MAY_ALLOC_PARTIAL, cl, &usage); if (!IS_ERR(ob)) - bch2_dev_stripe_increment(ca, stripe); + bch2_dev_stripe_increment_inlined(ca, stripe, &usage); percpu_ref_put(&ca->ref); if (IS_ERR(ob)) { ret = PTR_ERR(ob); - - if (cl) + if (bch2_err_matches(ret, BCH_ERR_transaction_restart) || cl) break; continue; } @@ -632,7 +787,7 @@ int bch2_bucket_alloc_set(struct bch_fs *c, * it's to a device we don't want: */ -static int bucket_alloc_from_stripe(struct bch_fs *c, +static int bucket_alloc_from_stripe(struct btree_trans *trans, struct open_buckets *ptrs, struct write_point *wp, struct bch_devs_mask *devs_may_alloc, @@ -644,6 +799,7 @@ static int bucket_alloc_from_stripe(struct bch_fs *c, unsigned flags, struct closure *cl) { + struct bch_fs *c = trans->c; struct dev_alloc_list devs_sorted; struct ec_stripe_head *h; struct open_bucket *ob; @@ -659,11 +815,11 @@ static int bucket_alloc_from_stripe(struct bch_fs *c, if (ec_open_bucket(c, ptrs)) return 0; - h = bch2_ec_stripe_head_get(c, target, 0, nr_replicas - 1, + h = bch2_ec_stripe_head_get(trans, target, 0, nr_replicas - 1, wp == &c->copygc_write_point, cl); if (IS_ERR(h)) - return -PTR_ERR(h); + return PTR_ERR(h); if (!h) return 0; @@ -728,7 +884,7 @@ static void get_buckets_from_writepoint(struct bch_fs *c, wp->ptrs = ptrs_skip; } -static int open_bucket_add_buckets(struct bch_fs *c, +static int open_bucket_add_buckets(struct btree_trans *trans, struct open_buckets *ptrs, struct write_point *wp, struct bch_devs_list *devs_have, @@ -741,6 +897,7 @@ static int open_bucket_add_buckets(struct bch_fs *c, unsigned flags, struct closure *_cl) { + struct bch_fs *c = trans->c; struct bch_devs_mask devs; struct open_bucket *ob; struct closure *cl = NULL; @@ -768,12 +925,13 @@ static int open_bucket_add_buckets(struct bch_fs *c, } if (!ec_open_bucket(c, ptrs)) { - ret = bucket_alloc_from_stripe(c, ptrs, wp, &devs, + ret = bucket_alloc_from_stripe(trans, ptrs, wp, &devs, target, erasure_code, nr_replicas, nr_effective, have_cache, flags, _cl); - if (ret == -FREELIST_EMPTY || - ret == -OPEN_BUCKETS_EMPTY) + if (bch2_err_matches(ret, BCH_ERR_transaction_restart) || + bch2_err_matches(ret, BCH_ERR_freelist_empty) || + bch2_err_matches(ret, BCH_ERR_open_buckets_empty)) return ret; if (*nr_effective >= nr_replicas) return 0; @@ -791,10 +949,13 @@ retry_blocking: * Try nonblocking first, so that if one device is full we'll try from * other devices: */ - ret = bch2_bucket_alloc_set(c, ptrs, &wp->stripe, &devs, + ret = bch2_bucket_alloc_set_trans(trans, ptrs, &wp->stripe, &devs, nr_replicas, nr_effective, have_cache, reserve, flags, cl); - if (ret && ret != -INSUFFICIENT_DEVICES && !cl && _cl) { + if (ret && + !bch2_err_matches(ret, BCH_ERR_transaction_restart) && + !bch2_err_matches(ret, BCH_ERR_insufficient_devices) && + !cl && _cl) { cl = _cl; goto retry_blocking; } @@ -912,15 +1073,25 @@ static bool try_decrease_writepoints(struct bch_fs *c, return true; } -static struct write_point *writepoint_find(struct bch_fs *c, +static void bch2_trans_mutex_lock_norelock(struct btree_trans *trans, + struct mutex *lock) +{ + if (!mutex_trylock(lock)) { + bch2_trans_unlock(trans); + mutex_lock(lock); + } +} + +static struct write_point *writepoint_find(struct btree_trans *trans, unsigned long write_point) { + struct bch_fs *c = trans->c; struct write_point *wp, *oldest; struct hlist_head *head; if (!(write_point & 1UL)) { wp = (struct write_point *) write_point; - mutex_lock(&wp->lock); + bch2_trans_mutex_lock_norelock(trans, &wp->lock); return wp; } @@ -929,7 +1100,7 @@ restart_find: wp = __writepoint_find(head, write_point); if (wp) { lock_wp: - mutex_lock(&wp->lock); + bch2_trans_mutex_lock_norelock(trans, &wp->lock); if (wp->write_point == write_point) goto out; mutex_unlock(&wp->lock); @@ -942,8 +1113,8 @@ restart_find_oldest: if (!oldest || time_before64(wp->last_used, oldest->last_used)) oldest = wp; - mutex_lock(&oldest->lock); - mutex_lock(&c->write_points_hash_lock); + bch2_trans_mutex_lock_norelock(trans, &oldest->lock); + bch2_trans_mutex_lock_norelock(trans, &c->write_points_hash_lock); if (oldest >= c->write_points + c->write_points_nr || try_increase_writepoints(c)) { mutex_unlock(&c->write_points_hash_lock); @@ -964,24 +1135,26 @@ restart_find_oldest: hlist_add_head_rcu(&wp->node, head); mutex_unlock(&c->write_points_hash_lock); out: - wp->last_used = sched_clock(); + wp->last_used = local_clock(); return wp; } /* * Get us an open_bucket we can allocate from, return with it locked: */ -struct write_point *bch2_alloc_sectors_start(struct bch_fs *c, - unsigned target, - unsigned erasure_code, - struct write_point_specifier write_point, - struct bch_devs_list *devs_have, - unsigned nr_replicas, - unsigned nr_replicas_required, - enum alloc_reserve reserve, - unsigned flags, - struct closure *cl) +int bch2_alloc_sectors_start_trans(struct btree_trans *trans, + unsigned target, + unsigned erasure_code, + struct write_point_specifier write_point, + struct bch_devs_list *devs_have, + unsigned nr_replicas, + unsigned nr_replicas_required, + enum alloc_reserve reserve, + unsigned flags, + struct closure *cl, + struct write_point **wp_ret) { + struct bch_fs *c = trans->c; struct write_point *wp; struct open_bucket *ob; struct open_buckets ptrs; @@ -1001,7 +1174,7 @@ retry: write_points_nr = c->write_points_nr; have_cache = false; - wp = writepoint_find(c, write_point.v); + *wp_ret = wp = writepoint_find(trans, write_point.v); if (wp->data_type == BCH_DATA_user) ob_flags |= BUCKET_MAY_ALLOC_PARTIAL; @@ -1011,21 +1184,22 @@ retry: have_cache = true; if (!target || (flags & BCH_WRITE_ONLY_SPECIFIED_DEVS)) { - ret = open_bucket_add_buckets(c, &ptrs, wp, devs_have, + ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have, target, erasure_code, nr_replicas, &nr_effective, &have_cache, reserve, ob_flags, cl); } else { - ret = open_bucket_add_buckets(c, &ptrs, wp, devs_have, + ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have, target, erasure_code, nr_replicas, &nr_effective, &have_cache, reserve, ob_flags, NULL); - if (!ret) + if (!ret || + bch2_err_matches(ret, BCH_ERR_transaction_restart)) goto alloc_done; - ret = open_bucket_add_buckets(c, &ptrs, wp, devs_have, + ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have, 0, erasure_code, nr_replicas, &nr_effective, &have_cache, reserve, @@ -1037,7 +1211,7 @@ alloc_done: if (erasure_code && !ec_open_bucket(c, &ptrs)) pr_debug("failed to get ec bucket: ret %u", ret); - if (ret == -INSUFFICIENT_DEVICES && + if (ret == -BCH_ERR_insufficient_devices && nr_effective >= nr_replicas_required) ret = 0; @@ -1057,7 +1231,7 @@ alloc_done: BUG_ON(!wp->sectors_free || wp->sectors_free == UINT_MAX); - return wp; + return 0; err: open_bucket_for_each(c, &wp->ptrs, ob, i) if (ptrs.nr < ARRAY_SIZE(ptrs.v)) @@ -1068,19 +1242,17 @@ err: mutex_unlock(&wp->lock); - if (ret == -FREELIST_EMPTY && + if (bch2_err_matches(ret, BCH_ERR_freelist_empty) && try_decrease_writepoints(c, write_points_nr)) goto retry; - switch (ret) { - case -OPEN_BUCKETS_EMPTY: - case -FREELIST_EMPTY: - return cl ? ERR_PTR(-EAGAIN) : ERR_PTR(-ENOSPC); - case -INSUFFICIENT_DEVICES: - return ERR_PTR(-EROFS); - default: - return ERR_PTR(ret); - } + if (bch2_err_matches(ret, BCH_ERR_open_buckets_empty) || + bch2_err_matches(ret, BCH_ERR_freelist_empty)) + return cl + ? -BCH_ERR_bucket_alloc_blocked + : -BCH_ERR_ENOSPC_bucket_alloc; + + return ret; } struct bch_extent_ptr bch2_ob_ptr(struct bch_fs *c, struct open_bucket *ob) @@ -1097,34 +1269,11 @@ struct bch_extent_ptr bch2_ob_ptr(struct bch_fs *c, struct open_bucket *ob) }; } -/* - * Append pointers to the space we just allocated to @k, and mark @sectors space - * as allocated out of @ob - */ void bch2_alloc_sectors_append_ptrs(struct bch_fs *c, struct write_point *wp, struct bkey_i *k, unsigned sectors, bool cached) - { - struct open_bucket *ob; - unsigned i; - - BUG_ON(sectors > wp->sectors_free); - wp->sectors_free -= sectors; - - open_bucket_for_each(c, &wp->ptrs, ob, i) { - struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev); - struct bch_extent_ptr ptr = bch2_ob_ptr(c, ob); - - ptr.cached = cached || - (!ca->mi.durability && - wp->data_type == BCH_DATA_user); - - bch2_bkey_append_ptr(k, ptr); - - BUG_ON(sectors > ob->sectors_free); - ob->sectors_free -= sectors; - } + bch2_alloc_sectors_append_ptrs_inlined(c, wp, k, sectors, cached); } /* @@ -1133,17 +1282,7 @@ void bch2_alloc_sectors_append_ptrs(struct bch_fs *c, struct write_point *wp, */ void bch2_alloc_sectors_done(struct bch_fs *c, struct write_point *wp) { - struct open_buckets ptrs = { .nr = 0 }, keep = { .nr = 0 }; - struct open_bucket *ob; - unsigned i; - - open_bucket_for_each(c, &wp->ptrs, ob, i) - ob_push(c, !ob->sectors_free ? &ptrs : &keep, ob); - wp->ptrs = keep; - - mutex_unlock(&wp->lock); - - bch2_open_buckets_put(c, &ptrs); + bch2_alloc_sectors_done_inlined(c, wp); } static inline void writepoint_init(struct write_point *wp, @@ -1151,6 +1290,10 @@ static inline void writepoint_init(struct write_point *wp, { mutex_init(&wp->lock); wp->data_type = type; + + INIT_WORK(&wp->index_update_work, bch2_write_point_do_index_updates); + INIT_LIST_HEAD(&wp->writes); + spin_lock_init(&wp->writes_lock); } void bch2_fs_allocator_foreground_init(struct bch_fs *c) @@ -1181,7 +1324,7 @@ void bch2_fs_allocator_foreground_init(struct bch_fs *c) wp < c->write_points + c->write_points_nr; wp++) { writepoint_init(wp, BCH_DATA_user); - wp->last_used = sched_clock(); + wp->last_used = local_clock(); wp->write_point = (unsigned long) wp; hlist_add_head_rcu(&wp->node, writepoint_hash(c, wp->write_point)); @@ -1197,12 +1340,42 @@ void bch2_open_buckets_to_text(struct printbuf *out, struct bch_fs *c) ob++) { spin_lock(&ob->lock); if (ob->valid && !ob->on_partial_list) { - pr_buf(out, "%zu ref %u type %s\n", + prt_printf(out, "%zu ref %u type %s %u:%llu:%u\n", ob - c->open_buckets, atomic_read(&ob->pin), - bch2_data_types[ob->data_type]); + bch2_data_types[ob->data_type], + ob->dev, ob->bucket, ob->gen); } spin_unlock(&ob->lock); } +} +static const char * const bch2_write_point_states[] = { +#define x(n) #n, + WRITE_POINT_STATES() +#undef x + NULL +}; + +void bch2_write_points_to_text(struct printbuf *out, struct bch_fs *c) +{ + struct write_point *wp; + unsigned i; + + for (wp = c->write_points; + wp < c->write_points + ARRAY_SIZE(c->write_points); + wp++) { + prt_printf(out, "%lu: ", wp->write_point); + prt_human_readable_u64(out, wp->sectors_allocated); + + prt_printf(out, " last wrote: "); + bch2_pr_time_units(out, sched_clock() - wp->last_used); + + for (i = 0; i < WRITE_POINT_STATE_NR; i++) { + prt_printf(out, " %s: ", bch2_write_point_states[i]); + bch2_pr_time_units(out, wp->time[i]); + } + + prt_newline(out); + } }