X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libbcachefs%2Falloc_foreground.c;h=3c5100c269161bfcb3dfa3cf7209930be5591f17;hb=ba88873432ec9621b9913c00b31d7401357b5e55;hp=8f0b94f591bedd7f8645f32faf696ace430c092f;hpb=9fce394ca6d0082ced3612a627cd16e06d84244a;p=bcachefs-tools-debian diff --git a/libbcachefs/alloc_foreground.c b/libbcachefs/alloc_foreground.c index 8f0b94f..3c5100c 100644 --- a/libbcachefs/alloc_foreground.c +++ b/libbcachefs/alloc_foreground.c @@ -1,74 +1,55 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Primary bucket allocation code - * * Copyright 2012 Google, Inc. * - * Allocation in bcache is done in terms of buckets: - * - * Each bucket has associated an 8 bit gen; this gen corresponds to the gen in - * btree pointers - they must match for the pointer to be considered valid. - * - * Thus (assuming a bucket has no dirty data or metadata in it) we can reuse a - * bucket simply by incrementing its gen. - * - * The gens (along with the priorities; it's really the gens are important but - * the code is named as if it's the priorities) are written in an arbitrary list - * of buckets on disk, with a pointer to them in the journal header. - * - * When we invalidate a bucket, we have to write its new gen to disk and wait - * for that write to complete before we use it - otherwise after a crash we - * could have pointers that appeared to be good but pointed to data that had - * been overwritten. - * - * Since the gens and priorities are all stored contiguously on disk, we can - * batch this up: We fill up the free_inc list with freshly invalidated buckets, - * call prio_write(), and when prio_write() finishes we pull buckets off the - * free_inc list and optionally discard them. - * - * free_inc isn't the only freelist - if it was, we'd often have to sleep while - * priorities and gens were being written before we could allocate. c->free is a - * smaller freelist, and buckets on that list are always ready to be used. - * - * If we've got discards enabled, that happens when a bucket moves from the - * free_inc list to the free list. - * - * It's important to ensure that gens don't wrap around - with respect to - * either the oldest gen in the btree or the gen on disk. This is quite - * difficult to do in practice, but we explicitly guard against it anyways - if - * a bucket is in danger of wrapping around we simply skip invalidating it that - * time around, and we garbage collect or rewrite the priorities sooner than we - * would have otherwise. + * Foreground allocator code: allocate buckets from freelist, and allocate in + * sector granularity from writepoints. * * bch2_bucket_alloc() allocates a single bucket from a specific device. * * bch2_bucket_alloc_set() allocates one or more buckets from different devices * in a given filesystem. - * - * invalidate_buckets() drives all the processes described above. It's called - * from bch2_bucket_alloc() and a few other places that need to make sure free - * buckets are ready. - * - * invalidate_buckets_(lru|fifo)() find buckets that are available to be - * invalidated, and then invalidate them and stick them on the free_inc list - - * in either lru or fifo order. */ #include "bcachefs.h" #include "alloc_background.h" #include "alloc_foreground.h" +#include "backpointers.h" +#include "btree_iter.h" +#include "btree_update.h" #include "btree_gc.h" #include "buckets.h" +#include "buckets_waiting_for_journal.h" #include "clock.h" #include "debug.h" #include "disk_groups.h" #include "ec.h" +#include "error.h" #include "io.h" +#include "journal.h" +#include "movinggc.h" +#include "nocow_locking.h" +#include "trace.h" #include #include #include -#include + +static void bch2_trans_mutex_lock_norelock(struct btree_trans *trans, + struct mutex *lock) +{ + if (!mutex_trylock(lock)) { + bch2_trans_unlock(trans); + mutex_lock(lock); + } +} + +const char * const bch2_alloc_reserves[] = { +#define x(t) #t, + BCH_ALLOC_RESERVES() +#undef x + NULL +}; /* * Open buckets represent a bucket that's currently being allocated from. They @@ -86,30 +67,66 @@ * reference _after_ doing the index update that makes its allocation reachable. */ +void bch2_reset_alloc_cursors(struct bch_fs *c) +{ + struct bch_dev *ca; + unsigned i; + + rcu_read_lock(); + for_each_member_device_rcu(ca, c, i, NULL) + ca->alloc_cursor = 0; + rcu_read_unlock(); +} + +static void bch2_open_bucket_hash_add(struct bch_fs *c, struct open_bucket *ob) +{ + open_bucket_idx_t idx = ob - c->open_buckets; + open_bucket_idx_t *slot = open_bucket_hashslot(c, ob->dev, ob->bucket); + + ob->hash = *slot; + *slot = idx; +} + +static void bch2_open_bucket_hash_remove(struct bch_fs *c, struct open_bucket *ob) +{ + open_bucket_idx_t idx = ob - c->open_buckets; + open_bucket_idx_t *slot = open_bucket_hashslot(c, ob->dev, ob->bucket); + + while (*slot != idx) { + BUG_ON(!*slot); + slot = &c->open_buckets[*slot].hash; + } + + *slot = ob->hash; + ob->hash = 0; +} + void __bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob) { - struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev); + struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev); if (ob->ec) { - bch2_ec_bucket_written(c, ob); + ec_stripe_new_put(c, ob->ec, STRIPE_REF_io); return; } percpu_down_read(&c->mark_lock); spin_lock(&ob->lock); - bch2_mark_alloc_bucket(c, ca, PTR_BUCKET_NR(ca, &ob->ptr), - false, gc_pos_alloc(c, ob), 0); ob->valid = false; - ob->type = 0; + ob->data_type = 0; spin_unlock(&ob->lock); percpu_up_read(&c->mark_lock); spin_lock(&c->freelist_lock); + bch2_open_bucket_hash_remove(c, ob); + ob->freelist = c->open_buckets_freelist; c->open_buckets_freelist = ob - c->open_buckets; + c->open_buckets_nr_free++; + ca->nr_open_buckets--; spin_unlock(&c->freelist_lock); closure_wake_up(&c->open_buckets_wait); @@ -123,8 +140,7 @@ void bch2_open_bucket_write_error(struct bch_fs *c, unsigned i; open_bucket_for_each(c, obs, ob, i) - if (ob->ptr.dev == dev && - ob->ec) + if (ob->dev == dev && ob->ec) bch2_ec_bucket_cancel(c, ob); } @@ -137,117 +153,86 @@ static struct open_bucket *bch2_open_bucket_alloc(struct bch_fs *c) ob = c->open_buckets + c->open_buckets_freelist; c->open_buckets_freelist = ob->freelist; atomic_set(&ob->pin, 1); - ob->type = 0; + ob->data_type = 0; c->open_buckets_nr_free--; return ob; } -static void open_bucket_free_unused(struct bch_fs *c, - struct write_point *wp, - struct open_bucket *ob) +static void open_bucket_free_unused(struct bch_fs *c, struct open_bucket *ob) { - struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev); - bool may_realloc = wp->type == BCH_DATA_user; - - BUG_ON(ca->open_buckets_partial_nr > - ARRAY_SIZE(ca->open_buckets_partial)); + BUG_ON(c->open_buckets_partial_nr >= + ARRAY_SIZE(c->open_buckets_partial)); - if (ca->open_buckets_partial_nr < - ARRAY_SIZE(ca->open_buckets_partial) && - may_realloc) { - spin_lock(&c->freelist_lock); - ob->on_partial_list = true; - ca->open_buckets_partial[ca->open_buckets_partial_nr++] = - ob - c->open_buckets; - spin_unlock(&c->freelist_lock); - - closure_wake_up(&c->open_buckets_wait); - closure_wake_up(&c->freelist_wait); - } else { - bch2_open_bucket_put(c, ob); - } -} - -static void verify_not_stale(struct bch_fs *c, const struct open_buckets *obs) -{ -#ifdef CONFIG_BCACHEFS_DEBUG - struct open_bucket *ob; - unsigned i; - - open_bucket_for_each(c, obs, ob, i) { - struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev); + spin_lock(&c->freelist_lock); + ob->on_partial_list = true; + c->open_buckets_partial[c->open_buckets_partial_nr++] = + ob - c->open_buckets; + spin_unlock(&c->freelist_lock); - BUG_ON(ptr_stale(ca, &ob->ptr)); - } -#endif + closure_wake_up(&c->open_buckets_wait); + closure_wake_up(&c->freelist_wait); } /* _only_ for allocating the journal on a new device: */ long bch2_bucket_alloc_new_fs(struct bch_dev *ca) { - struct bucket_array *buckets; - ssize_t b; + while (ca->new_fs_bucket_idx < ca->mi.nbuckets) { + u64 b = ca->new_fs_bucket_idx++; - rcu_read_lock(); - buckets = bucket_array(ca); - - for (b = buckets->first_bucket; b < buckets->nbuckets; b++) - if (is_available_bucket(buckets->b[b].mark) && - !buckets->b[b].mark.owned_by_allocator) - goto success; - b = -1; -success: - rcu_read_unlock(); - return b; + if (!is_superblock_bucket(ca, b) && + (!ca->buckets_nouse || !test_bit(b, ca->buckets_nouse))) + return b; + } + + return -1; } static inline unsigned open_buckets_reserved(enum alloc_reserve reserve) { switch (reserve) { - case RESERVE_BTREE: - case RESERVE_BTREE_MOVINGGC: + case RESERVE_btree: + case RESERVE_btree_movinggc: return 0; - case RESERVE_MOVINGGC: + case RESERVE_movinggc: return OPEN_BUCKETS_COUNT / 4; default: return OPEN_BUCKETS_COUNT / 2; } } -/** - * bch_bucket_alloc - allocate a single bucket from a specific device - * - * Returns index of bucket on success, 0 on failure - * */ -struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca, - enum alloc_reserve reserve, - bool may_alloc_partial, - struct closure *cl) +static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev *ca, + u64 bucket, + enum alloc_reserve reserve, + const struct bch_alloc_v4 *a, + struct bucket_alloc_state *s, + struct closure *cl) { struct open_bucket *ob; - long b = 0; - spin_lock(&c->freelist_lock); + if (unlikely(ca->buckets_nouse && test_bit(bucket, ca->buckets_nouse))) { + s->skipped_nouse++; + return NULL; + } - if (may_alloc_partial) { - int i; + if (bch2_bucket_is_open(c, ca->dev_idx, bucket)) { + s->skipped_open++; + return NULL; + } - for (i = ca->open_buckets_partial_nr - 1; i >= 0; --i) { - ob = c->open_buckets + ca->open_buckets_partial[i]; + if (bch2_bucket_needs_journal_commit(&c->buckets_waiting_for_journal, + c->journal.flushed_seq_ondisk, ca->dev_idx, bucket)) { + s->skipped_need_journal_commit++; + return NULL; + } - if (reserve <= ob->alloc_reserve) { - array_remove_item(ca->open_buckets_partial, - ca->open_buckets_partial_nr, - i); - ob->on_partial_list = false; - ob->alloc_reserve = reserve; - spin_unlock(&c->freelist_lock); - return ob; - } - } + if (bch2_bucket_nocow_is_locked(&c->nocow_locks, POS(ca->dev_idx, bucket))) { + s->skipped_nocow++; + return NULL; } + spin_lock(&c->freelist_lock); + if (unlikely(c->open_buckets_nr_free <= open_buckets_reserved(reserve))) { if (cl) closure_wait(&c->open_buckets_wait, cl); @@ -256,52 +241,30 @@ struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca, c->blocked_allocate_open_bucket = local_clock(); spin_unlock(&c->freelist_lock); - trace_open_bucket_alloc_fail(ca, reserve); - return ERR_PTR(-OPEN_BUCKETS_EMPTY); + return ERR_PTR(-BCH_ERR_open_buckets_empty); } - if (likely(fifo_pop(&ca->free[RESERVE_NONE], b))) - goto out; - - switch (reserve) { - case RESERVE_BTREE_MOVINGGC: - case RESERVE_MOVINGGC: - if (fifo_pop(&ca->free[RESERVE_MOVINGGC], b)) - goto out; - break; - default: - break; + /* Recheck under lock: */ + if (bch2_bucket_is_open(c, ca->dev_idx, bucket)) { + spin_unlock(&c->freelist_lock); + s->skipped_open++; + return NULL; } - if (cl) - closure_wait(&c->freelist_wait, cl); - - if (!c->blocked_allocate) - c->blocked_allocate = local_clock(); - - spin_unlock(&c->freelist_lock); - - trace_bucket_alloc_fail(ca, reserve); - return ERR_PTR(-FREELIST_EMPTY); -out: - verify_not_on_freelist(c, ca, b); - ob = bch2_open_bucket_alloc(c); spin_lock(&ob->lock); ob->valid = true; ob->sectors_free = ca->mi.bucket_size; - ob->alloc_reserve = reserve; - ob->ptr = (struct bch_extent_ptr) { - .type = 1 << BCH_EXTENT_ENTRY_ptr, - .gen = bucket(ca, b)->mark.gen, - .offset = bucket_to_sector(ca, b), - .dev = ca->dev_idx, - }; - + ob->dev = ca->dev_idx; + ob->gen = a->gen; + ob->bucket = bucket; spin_unlock(&ob->lock); + ca->nr_open_buckets++; + bch2_open_bucket_hash_add(c, ob); + if (c->blocked_allocate_open_bucket) { bch2_time_stats_update( &c->times[BCH_TIME_blocked_allocate_open_bucket], @@ -317,10 +280,324 @@ out: } spin_unlock(&c->freelist_lock); + return ob; +} + +static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bch_dev *ca, + enum alloc_reserve reserve, u64 free_entry, + struct bucket_alloc_state *s, + struct bkey_s_c freespace_k, + struct closure *cl) +{ + struct bch_fs *c = trans->c; + struct btree_iter iter = { NULL }; + struct bkey_s_c k; + struct open_bucket *ob; + struct bch_alloc_v4 a_convert; + const struct bch_alloc_v4 *a; + u64 b = free_entry & ~(~0ULL << 56); + unsigned genbits = free_entry >> 56; + struct printbuf buf = PRINTBUF; + int ret; + + if (b < ca->mi.first_bucket || b >= ca->mi.nbuckets) { + prt_printf(&buf, "freespace btree has bucket outside allowed range %u-%llu\n" + " freespace key ", + ca->mi.first_bucket, ca->mi.nbuckets); + bch2_bkey_val_to_text(&buf, c, freespace_k); + bch2_trans_inconsistent(trans, "%s", buf.buf); + ob = ERR_PTR(-EIO); + goto err; + } + + k = bch2_bkey_get_iter(trans, &iter, + BTREE_ID_alloc, POS(ca->dev_idx, b), + BTREE_ITER_CACHED); + ret = bkey_err(k); + if (ret) { + ob = ERR_PTR(ret); + goto err; + } + + a = bch2_alloc_to_v4(k, &a_convert); + + if (a->data_type != BCH_DATA_free) { + if (!test_bit(BCH_FS_CHECK_ALLOC_DONE, &c->flags)) { + ob = NULL; + goto err; + } + + prt_printf(&buf, "non free bucket in freespace btree\n" + " freespace key "); + bch2_bkey_val_to_text(&buf, c, freespace_k); + prt_printf(&buf, "\n "); + bch2_bkey_val_to_text(&buf, c, k); + bch2_trans_inconsistent(trans, "%s", buf.buf); + ob = ERR_PTR(-EIO); + goto err; + } - bch2_wake_allocator(ca); + if (genbits != (alloc_freespace_genbits(*a) >> 56) && + test_bit(BCH_FS_CHECK_ALLOC_DONE, &c->flags)) { + prt_printf(&buf, "bucket in freespace btree with wrong genbits (got %u should be %llu)\n" + " freespace key ", + genbits, alloc_freespace_genbits(*a) >> 56); + bch2_bkey_val_to_text(&buf, c, freespace_k); + prt_printf(&buf, "\n "); + bch2_bkey_val_to_text(&buf, c, k); + bch2_trans_inconsistent(trans, "%s", buf.buf); + ob = ERR_PTR(-EIO); + goto err; - trace_bucket_alloc(ca, reserve); + } + + if (!test_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags)) { + struct bch_backpointer bp; + struct bpos bp_pos = POS_MIN; + + ret = bch2_get_next_backpointer(trans, POS(ca->dev_idx, b), -1, + &bp_pos, &bp, + BTREE_ITER_NOPRESERVE); + if (ret) { + ob = ERR_PTR(ret); + goto err; + } + + if (!bkey_eq(bp_pos, POS_MAX)) { + /* + * Bucket may have data in it - we don't call + * bc2h_trans_inconnsistent() because fsck hasn't + * finished yet + */ + ob = NULL; + goto err; + } + } + + ob = __try_alloc_bucket(c, ca, b, reserve, a, s, cl); + if (!ob) + iter.path->preserve = false; +err: + if (iter.trans && iter.path) + set_btree_iter_dontneed(&iter); + bch2_trans_iter_exit(trans, &iter); + printbuf_exit(&buf); + return ob; +} + +/* + * This path is for before the freespace btree is initialized: + * + * If ca->new_fs_bucket_idx is nonzero, we haven't yet marked superblock & + * journal buckets - journal buckets will be < ca->new_fs_bucket_idx + */ +static noinline struct open_bucket * +bch2_bucket_alloc_early(struct btree_trans *trans, + struct bch_dev *ca, + enum alloc_reserve reserve, + struct bucket_alloc_state *s, + struct closure *cl) +{ + struct btree_iter iter; + struct bkey_s_c k; + struct open_bucket *ob = NULL; + u64 alloc_start = max_t(u64, ca->mi.first_bucket, ca->new_fs_bucket_idx); + u64 alloc_cursor = max(alloc_start, READ_ONCE(ca->alloc_cursor)); + int ret; +again: + for_each_btree_key_norestart(trans, iter, BTREE_ID_alloc, POS(ca->dev_idx, alloc_cursor), + BTREE_ITER_SLOTS, k, ret) { + struct bch_alloc_v4 a_convert; + const struct bch_alloc_v4 *a; + + if (bkey_ge(k.k->p, POS(ca->dev_idx, ca->mi.nbuckets))) + break; + + if (ca->new_fs_bucket_idx && + is_superblock_bucket(ca, k.k->p.offset)) + continue; + + a = bch2_alloc_to_v4(k, &a_convert); + + if (a->data_type != BCH_DATA_free) + continue; + + s->buckets_seen++; + + ob = __try_alloc_bucket(trans->c, ca, k.k->p.offset, reserve, a, s, cl); + if (ob) + break; + } + bch2_trans_iter_exit(trans, &iter); + + ca->alloc_cursor = alloc_cursor; + + if (!ob && ret) + ob = ERR_PTR(ret); + + if (!ob && alloc_cursor > alloc_start) { + alloc_cursor = alloc_start; + goto again; + } + + return ob; +} + +static struct open_bucket *bch2_bucket_alloc_freelist(struct btree_trans *trans, + struct bch_dev *ca, + enum alloc_reserve reserve, + struct bucket_alloc_state *s, + struct closure *cl) +{ + struct btree_iter iter; + struct bkey_s_c k; + struct open_bucket *ob = NULL; + u64 alloc_start = max_t(u64, ca->mi.first_bucket, READ_ONCE(ca->alloc_cursor)); + u64 alloc_cursor = alloc_start; + int ret; + + BUG_ON(ca->new_fs_bucket_idx); +again: + for_each_btree_key_norestart(trans, iter, BTREE_ID_freespace, + POS(ca->dev_idx, alloc_cursor), 0, k, ret) { + if (k.k->p.inode != ca->dev_idx) + break; + + for (alloc_cursor = max(alloc_cursor, bkey_start_offset(k.k)); + alloc_cursor < k.k->p.offset; + alloc_cursor++) { + ret = btree_trans_too_many_iters(trans); + if (ret) { + ob = ERR_PTR(ret); + break; + } + + s->buckets_seen++; + + ob = try_alloc_bucket(trans, ca, reserve, + alloc_cursor, s, k, cl); + if (ob) { + iter.path->preserve = false; + break; + } + } + + if (ob || ret) + break; + } + bch2_trans_iter_exit(trans, &iter); + + ca->alloc_cursor = alloc_cursor; + + if (!ob && ret) + ob = ERR_PTR(ret); + + if (!ob && alloc_start > ca->mi.first_bucket) { + alloc_cursor = alloc_start = ca->mi.first_bucket; + goto again; + } + + return ob; +} + +/** + * bch_bucket_alloc - allocate a single bucket from a specific device + * + * Returns index of bucket on success, 0 on failure + */ +static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans, + struct bch_dev *ca, + enum alloc_reserve reserve, + struct closure *cl, + struct bch_dev_usage *usage) +{ + struct bch_fs *c = trans->c; + struct open_bucket *ob = NULL; + bool freespace = READ_ONCE(ca->mi.freespace_initialized); + u64 avail; + struct bucket_alloc_state s = { 0 }; + bool waiting = false; +again: + bch2_dev_usage_read_fast(ca, usage); + avail = dev_buckets_free(ca, *usage, reserve); + + if (usage->d[BCH_DATA_need_discard].buckets > avail) + bch2_do_discards(c); + + if (usage->d[BCH_DATA_need_gc_gens].buckets > avail) + bch2_do_gc_gens(c); + + if (should_invalidate_buckets(ca, *usage)) + bch2_do_invalidates(c); + + if (!avail) { + if (cl && !waiting) { + closure_wait(&c->freelist_wait, cl); + waiting = true; + goto again; + } + + if (!c->blocked_allocate) + c->blocked_allocate = local_clock(); + + ob = ERR_PTR(-BCH_ERR_freelist_empty); + goto err; + } + + if (waiting) + closure_wake_up(&c->freelist_wait); +alloc: + ob = likely(freespace) + ? bch2_bucket_alloc_freelist(trans, ca, reserve, &s, cl) + : bch2_bucket_alloc_early(trans, ca, reserve, &s, cl); + + if (s.skipped_need_journal_commit * 2 > avail) + bch2_journal_flush_async(&c->journal, NULL); + + if (!ob && freespace && !test_bit(BCH_FS_CHECK_ALLOC_DONE, &c->flags)) { + freespace = false; + goto alloc; + } +err: + if (!ob) + ob = ERR_PTR(-BCH_ERR_no_buckets_found); + + if (!IS_ERR(ob)) + trace_and_count(c, bucket_alloc, ca, + bch2_alloc_reserves[reserve], + ob->bucket, + usage->d[BCH_DATA_free].buckets, + avail, + bch2_copygc_wait_amount(c), + c->copygc_wait - atomic64_read(&c->io_clock[WRITE].now), + &s, + cl == NULL, + ""); + else if (!bch2_err_matches(PTR_ERR(ob), BCH_ERR_transaction_restart)) + trace_and_count(c, bucket_alloc_fail, ca, + bch2_alloc_reserves[reserve], + 0, + usage->d[BCH_DATA_free].buckets, + avail, + bch2_copygc_wait_amount(c), + c->copygc_wait - atomic64_read(&c->io_clock[WRITE].now), + &s, + cl == NULL, + bch2_err_str(PTR_ERR(ob))); + + return ob; +} + +struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca, + enum alloc_reserve reserve, + struct closure *cl) +{ + struct bch_dev_usage usage; + struct open_bucket *ob; + + bch2_trans_do(c, NULL, NULL, 0, + PTR_ERR_OR_ZERO(ob = bch2_bucket_alloc_trans(&trans, ca, reserve, + cl, &usage))); return ob; } @@ -347,11 +624,12 @@ struct dev_alloc_list bch2_dev_alloc_list(struct bch_fs *c, return ret; } -void bch2_dev_stripe_increment(struct bch_dev *ca, - struct dev_stripe_state *stripe) +static inline void bch2_dev_stripe_increment_inlined(struct bch_dev *ca, + struct dev_stripe_state *stripe, + struct bch_dev_usage *usage) { u64 *v = stripe->next_alloc + ca->dev_idx; - u64 free_space = dev_buckets_free(ca); + u64 free_space = dev_buckets_available(ca, RESERVE_none); u64 free_space_inv = free_space ? div64_u64(1ULL << 48, free_space) : 1ULL << 48; @@ -367,75 +645,106 @@ void bch2_dev_stripe_increment(struct bch_dev *ca, *v = *v < scale ? 0 : *v - scale; } -#define BUCKET_MAY_ALLOC_PARTIAL (1 << 0) -#define BUCKET_ALLOC_USE_DURABILITY (1 << 1) +void bch2_dev_stripe_increment(struct bch_dev *ca, + struct dev_stripe_state *stripe) +{ + struct bch_dev_usage usage; + + bch2_dev_usage_read_fast(ca, &usage); + bch2_dev_stripe_increment_inlined(ca, stripe, &usage); +} -static void add_new_bucket(struct bch_fs *c, +static int add_new_bucket(struct bch_fs *c, struct open_buckets *ptrs, struct bch_devs_mask *devs_may_alloc, + unsigned nr_replicas, unsigned *nr_effective, bool *have_cache, unsigned flags, struct open_bucket *ob) { unsigned durability = - bch_dev_bkey_exists(c, ob->ptr.dev)->mi.durability; + bch_dev_bkey_exists(c, ob->dev)->mi.durability; - __clear_bit(ob->ptr.dev, devs_may_alloc->d); - *nr_effective += (flags & BUCKET_ALLOC_USE_DURABILITY) + BUG_ON(*nr_effective >= nr_replicas); + BUG_ON(flags & BCH_WRITE_ONLY_SPECIFIED_DEVS); + + __clear_bit(ob->dev, devs_may_alloc->d); + *nr_effective += (flags & BCH_WRITE_ONLY_SPECIFIED_DEVS) ? durability : 1; *have_cache |= !durability; ob_push(c, ptrs, ob); + + if (*nr_effective >= nr_replicas) + return 1; + if (ob->ec) + return 1; + return 0; } -enum bucket_alloc_ret -bch2_bucket_alloc_set(struct bch_fs *c, +int bch2_bucket_alloc_set_trans(struct btree_trans *trans, struct open_buckets *ptrs, struct dev_stripe_state *stripe, struct bch_devs_mask *devs_may_alloc, unsigned nr_replicas, unsigned *nr_effective, bool *have_cache, - enum alloc_reserve reserve, unsigned flags, + enum bch_data_type data_type, + enum alloc_reserve reserve, struct closure *cl) { + struct bch_fs *c = trans->c; struct dev_alloc_list devs_sorted = bch2_dev_alloc_list(c, stripe, devs_may_alloc); + unsigned dev; struct bch_dev *ca; - enum bucket_alloc_ret ret = INSUFFICIENT_DEVICES; + int ret = -BCH_ERR_insufficient_devices; unsigned i; BUG_ON(*nr_effective >= nr_replicas); for (i = 0; i < devs_sorted.nr; i++) { + struct bch_dev_usage usage; struct open_bucket *ob; - ca = rcu_dereference(c->devs[devs_sorted.devs[i]]); + dev = devs_sorted.devs[i]; + + rcu_read_lock(); + ca = rcu_dereference(c->devs[dev]); + if (ca) + percpu_ref_get(&ca->ref); + rcu_read_unlock(); + if (!ca) continue; - if (!ca->mi.durability && *have_cache) + if (!ca->mi.durability && *have_cache) { + percpu_ref_put(&ca->ref); continue; + } - ob = bch2_bucket_alloc(c, ca, reserve, - flags & BUCKET_MAY_ALLOC_PARTIAL, cl); - if (IS_ERR(ob)) { - ret = -PTR_ERR(ob); + ob = bch2_bucket_alloc_trans(trans, ca, reserve, cl, &usage); + if (!IS_ERR(ob)) + bch2_dev_stripe_increment_inlined(ca, stripe, &usage); + percpu_ref_put(&ca->ref); - if (cl) - return ret; + if (IS_ERR(ob)) { + ret = PTR_ERR(ob); + if (bch2_err_matches(ret, BCH_ERR_transaction_restart) || cl) + break; continue; } - add_new_bucket(c, ptrs, devs_may_alloc, - nr_effective, have_cache, flags, ob); + ob->data_type = data_type; - bch2_dev_stripe_increment(ca, stripe); - - if (*nr_effective >= nr_replicas) - return ALLOC_SUCCESS; + if (add_new_bucket(c, ptrs, devs_may_alloc, + nr_replicas, nr_effective, + have_cache, flags, ob)) { + ret = 0; + break; + } } return ret; @@ -449,27 +758,25 @@ bch2_bucket_alloc_set(struct bch_fs *c, * it's to a device we don't want: */ -static enum bucket_alloc_ret -bucket_alloc_from_stripe(struct bch_fs *c, +static int bucket_alloc_from_stripe(struct btree_trans *trans, struct open_buckets *ptrs, struct write_point *wp, struct bch_devs_mask *devs_may_alloc, u16 target, - unsigned erasure_code, unsigned nr_replicas, unsigned *nr_effective, bool *have_cache, + enum alloc_reserve reserve, unsigned flags, struct closure *cl) { + struct bch_fs *c = trans->c; struct dev_alloc_list devs_sorted; struct ec_stripe_head *h; struct open_bucket *ob; struct bch_dev *ca; unsigned i, ec_idx; - - if (!erasure_code) - return 0; + int ret = 0; if (nr_replicas < 2) return 0; @@ -477,11 +784,9 @@ bucket_alloc_from_stripe(struct bch_fs *c, if (ec_open_bucket(c, ptrs)) return 0; - h = bch2_ec_stripe_head_get(c, target, 0, nr_replicas - 1, - wp == &c->copygc_write_point, - cl); + h = bch2_ec_stripe_head_get(trans, target, 0, nr_replicas - 1, reserve, cl); if (IS_ERR(h)) - return -PTR_ERR(h); + return PTR_ERR(h); if (!h) return 0; @@ -493,66 +798,136 @@ bucket_alloc_from_stripe(struct bch_fs *c, continue; ob = c->open_buckets + h->s->blocks[ec_idx]; - if (ob->ptr.dev == devs_sorted.devs[i] && + if (ob->dev == devs_sorted.devs[i] && !test_and_set_bit(ec_idx, h->s->blocks_allocated)) goto got_bucket; } goto out_put_head; got_bucket: - ca = bch_dev_bkey_exists(c, ob->ptr.dev); + ca = bch_dev_bkey_exists(c, ob->dev); ob->ec_idx = ec_idx; ob->ec = h->s; + ec_stripe_new_get(h->s, STRIPE_REF_io); - add_new_bucket(c, ptrs, devs_may_alloc, - nr_effective, have_cache, flags, ob); - atomic_inc(&h->s->pin); + ret = add_new_bucket(c, ptrs, devs_may_alloc, + nr_replicas, nr_effective, + have_cache, flags, ob); out_put_head: bch2_ec_stripe_head_put(c, h); - return 0; + return ret; } /* Sector allocator */ -static void get_buckets_from_writepoint(struct bch_fs *c, - struct open_buckets *ptrs, - struct write_point *wp, - struct bch_devs_mask *devs_may_alloc, - unsigned nr_replicas, - unsigned *nr_effective, - bool *have_cache, - unsigned flags, - bool need_ec) +static bool want_bucket(struct bch_fs *c, + struct write_point *wp, + struct bch_devs_mask *devs_may_alloc, + bool *have_cache, bool ec, + struct open_bucket *ob) +{ + struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev); + + if (!test_bit(ob->dev, devs_may_alloc->d)) + return false; + + if (ob->data_type != wp->data_type) + return false; + + if (!ca->mi.durability && + (wp->data_type == BCH_DATA_btree || ec || *have_cache)) + return false; + + if (ec != (ob->ec != NULL)) + return false; + + return true; +} + +static int bucket_alloc_set_writepoint(struct bch_fs *c, + struct open_buckets *ptrs, + struct write_point *wp, + struct bch_devs_mask *devs_may_alloc, + unsigned nr_replicas, + unsigned *nr_effective, + bool *have_cache, + bool ec, unsigned flags) { struct open_buckets ptrs_skip = { .nr = 0 }; struct open_bucket *ob; unsigned i; + int ret = 0; open_bucket_for_each(c, &wp->ptrs, ob, i) { - struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev); - - if (*nr_effective < nr_replicas && - test_bit(ob->ptr.dev, devs_may_alloc->d) && - (ca->mi.durability || - (wp->type == BCH_DATA_user && !*have_cache)) && - (ob->ec || !need_ec)) { - add_new_bucket(c, ptrs, devs_may_alloc, - nr_effective, have_cache, - flags, ob); - } else { + if (!ret && want_bucket(c, wp, devs_may_alloc, + have_cache, ec, ob)) + ret = add_new_bucket(c, ptrs, devs_may_alloc, + nr_replicas, nr_effective, + have_cache, flags, ob); + else ob_push(c, &ptrs_skip, ob); - } } wp->ptrs = ptrs_skip; + + return ret; } -static enum bucket_alloc_ret -open_bucket_add_buckets(struct bch_fs *c, +static int bucket_alloc_set_partial(struct bch_fs *c, + struct open_buckets *ptrs, + struct write_point *wp, + struct bch_devs_mask *devs_may_alloc, + unsigned nr_replicas, + unsigned *nr_effective, + bool *have_cache, bool ec, + enum alloc_reserve reserve, + unsigned flags) +{ + int i, ret = 0; + + if (!c->open_buckets_partial_nr) + return 0; + + spin_lock(&c->freelist_lock); + + if (!c->open_buckets_partial_nr) + goto unlock; + + for (i = c->open_buckets_partial_nr - 1; i >= 0; --i) { + struct open_bucket *ob = c->open_buckets + c->open_buckets_partial[i]; + + if (want_bucket(c, wp, devs_may_alloc, have_cache, ec, ob)) { + struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev); + struct bch_dev_usage usage; + u64 avail; + + bch2_dev_usage_read_fast(ca, &usage); + avail = dev_buckets_free(ca, usage, reserve); + if (!avail) + continue; + + array_remove_item(c->open_buckets_partial, + c->open_buckets_partial_nr, + i); + ob->on_partial_list = false; + + ret = add_new_bucket(c, ptrs, devs_may_alloc, + nr_replicas, nr_effective, + have_cache, flags, ob); + if (ret) + break; + } + } +unlock: + spin_unlock(&c->freelist_lock); + return ret; +} + +static int __open_bucket_add_buckets(struct btree_trans *trans, struct open_buckets *ptrs, struct write_point *wp, struct bch_devs_list *devs_have, u16 target, - unsigned erasure_code, + bool erasure_code, unsigned nr_replicas, unsigned *nr_effective, bool *have_cache, @@ -560,112 +935,191 @@ open_bucket_add_buckets(struct bch_fs *c, unsigned flags, struct closure *_cl) { + struct bch_fs *c = trans->c; struct bch_devs_mask devs; struct open_bucket *ob; struct closure *cl = NULL; - enum bucket_alloc_ret ret; unsigned i; + int ret; - rcu_read_lock(); - devs = target_rw_devs(c, wp->type, target); - rcu_read_unlock(); + devs = target_rw_devs(c, wp->data_type, target); /* Don't allocate from devices we already have pointers to: */ for (i = 0; i < devs_have->nr; i++) __clear_bit(devs_have->devs[i], devs.d); open_bucket_for_each(c, ptrs, ob, i) - __clear_bit(ob->ptr.dev, devs.d); + __clear_bit(ob->dev, devs.d); + + if (erasure_code && ec_open_bucket(c, ptrs)) + return 0; + + ret = bucket_alloc_set_writepoint(c, ptrs, wp, &devs, + nr_replicas, nr_effective, + have_cache, erasure_code, flags); + if (ret) + return ret; + + ret = bucket_alloc_set_partial(c, ptrs, wp, &devs, + nr_replicas, nr_effective, + have_cache, erasure_code, reserve, flags); + if (ret) + return ret; if (erasure_code) { - if (!ec_open_bucket(c, ptrs)) { - get_buckets_from_writepoint(c, ptrs, wp, &devs, - nr_replicas, nr_effective, - have_cache, flags, true); - if (*nr_effective >= nr_replicas) - return 0; + ret = bucket_alloc_from_stripe(trans, ptrs, wp, &devs, + target, + nr_replicas, nr_effective, + have_cache, + reserve, flags, _cl); + } else { +retry_blocking: + /* + * Try nonblocking first, so that if one device is full we'll try from + * other devices: + */ + ret = bch2_bucket_alloc_set_trans(trans, ptrs, &wp->stripe, &devs, + nr_replicas, nr_effective, have_cache, + flags, wp->data_type, reserve, cl); + if (ret && + !bch2_err_matches(ret, BCH_ERR_transaction_restart) && + !bch2_err_matches(ret, BCH_ERR_insufficient_devices) && + !cl && _cl) { + cl = _cl; + goto retry_blocking; } - if (!ec_open_bucket(c, ptrs)) { - ret = bucket_alloc_from_stripe(c, ptrs, wp, &devs, - target, erasure_code, - nr_replicas, nr_effective, - have_cache, flags, _cl); - if (ret == FREELIST_EMPTY || - ret == OPEN_BUCKETS_EMPTY) - return ret; - if (*nr_effective >= nr_replicas) - return 0; - } } - get_buckets_from_writepoint(c, ptrs, wp, &devs, - nr_replicas, nr_effective, - have_cache, flags, false); - if (*nr_effective >= nr_replicas) - return 0; + return ret; +} - percpu_down_read(&c->mark_lock); - rcu_read_lock(); +static int open_bucket_add_buckets(struct btree_trans *trans, + struct open_buckets *ptrs, + struct write_point *wp, + struct bch_devs_list *devs_have, + u16 target, + unsigned erasure_code, + unsigned nr_replicas, + unsigned *nr_effective, + bool *have_cache, + enum alloc_reserve reserve, + unsigned flags, + struct closure *cl) +{ + int ret; -retry_blocking: - /* - * Try nonblocking first, so that if one device is full we'll try from - * other devices: - */ - ret = bch2_bucket_alloc_set(c, ptrs, &wp->stripe, &devs, + if (erasure_code) { + ret = __open_bucket_add_buckets(trans, ptrs, wp, + devs_have, target, erasure_code, nr_replicas, nr_effective, have_cache, reserve, flags, cl); - if (ret && ret != INSUFFICIENT_DEVICES && !cl && _cl) { - cl = _cl; - goto retry_blocking; + if (bch2_err_matches(ret, BCH_ERR_transaction_restart) || + bch2_err_matches(ret, BCH_ERR_operation_blocked) || + bch2_err_matches(ret, BCH_ERR_freelist_empty) || + bch2_err_matches(ret, BCH_ERR_open_buckets_empty)) + return ret; + if (*nr_effective >= nr_replicas) + return 0; } - rcu_read_unlock(); - percpu_up_read(&c->mark_lock); - - return ret; + ret = __open_bucket_add_buckets(trans, ptrs, wp, + devs_have, target, false, + nr_replicas, nr_effective, have_cache, + reserve, flags, cl); + return ret < 0 ? ret : 0; } -void bch2_open_buckets_stop_dev(struct bch_fs *c, struct bch_dev *ca, - struct open_buckets *obs) +static bool should_drop_bucket(struct open_bucket *ob, struct bch_fs *c, + struct bch_dev *ca, bool ec) { - struct open_buckets ptrs = { .nr = 0 }; - struct open_bucket *ob, *ob2; - unsigned i, j; - - open_bucket_for_each(c, obs, ob, i) { - bool drop = !ca || ob->ptr.dev == ca->dev_idx; + if (ec) { + return ob->ec != NULL; + } else if (ca) { + bool drop = ob->dev == ca->dev_idx; + struct open_bucket *ob2; + unsigned i; if (!drop && ob->ec) { mutex_lock(&ob->ec->lock); - for (j = 0; j < ob->ec->new_stripe.key.v.nr_blocks; j++) { - if (!ob->ec->blocks[j]) + for (i = 0; i < ob->ec->new_stripe.key.v.nr_blocks; i++) { + if (!ob->ec->blocks[i]) continue; - ob2 = c->open_buckets + ob->ec->blocks[j]; - drop |= ob2->ptr.dev == ca->dev_idx; + ob2 = c->open_buckets + ob->ec->blocks[i]; + drop |= ob2->dev == ca->dev_idx; } mutex_unlock(&ob->ec->lock); } - if (drop) - bch2_open_bucket_put(c, ob); - else - ob_push(c, &ptrs, ob); + return drop; + } else { + return true; } - - *obs = ptrs; } -void bch2_writepoint_stop(struct bch_fs *c, struct bch_dev *ca, - struct write_point *wp) +static void bch2_writepoint_stop(struct bch_fs *c, struct bch_dev *ca, + bool ec, struct write_point *wp) { + struct open_buckets ptrs = { .nr = 0 }; + struct open_bucket *ob; + unsigned i; + mutex_lock(&wp->lock); - bch2_open_buckets_stop_dev(c, ca, &wp->ptrs); + open_bucket_for_each(c, &wp->ptrs, ob, i) + if (should_drop_bucket(ob, c, ca, ec)) + bch2_open_bucket_put(c, ob); + else + ob_push(c, &ptrs, ob); + wp->ptrs = ptrs; mutex_unlock(&wp->lock); } +void bch2_open_buckets_stop(struct bch_fs *c, struct bch_dev *ca, + bool ec) +{ + unsigned i; + + /* Next, close write points that point to this device... */ + for (i = 0; i < ARRAY_SIZE(c->write_points); i++) + bch2_writepoint_stop(c, ca, ec, &c->write_points[i]); + + bch2_writepoint_stop(c, ca, ec, &c->copygc_write_point); + bch2_writepoint_stop(c, ca, ec, &c->rebalance_write_point); + bch2_writepoint_stop(c, ca, ec, &c->btree_write_point); + + mutex_lock(&c->btree_reserve_cache_lock); + while (c->btree_reserve_cache_nr) { + struct btree_alloc *a = + &c->btree_reserve_cache[--c->btree_reserve_cache_nr]; + + bch2_open_buckets_put(c, &a->ob); + } + mutex_unlock(&c->btree_reserve_cache_lock); + + spin_lock(&c->freelist_lock); + i = 0; + while (i < c->open_buckets_partial_nr) { + struct open_bucket *ob = + c->open_buckets + c->open_buckets_partial[i]; + + if (should_drop_bucket(ob, c, ca, ec)) { + --c->open_buckets_partial_nr; + swap(c->open_buckets_partial[i], + c->open_buckets_partial[c->open_buckets_partial_nr]); + ob->on_partial_list = false; + spin_unlock(&c->freelist_lock); + bch2_open_bucket_put(c, ob); + spin_lock(&c->freelist_lock); + } else { + i++; + } + } + spin_unlock(&c->freelist_lock); + + bch2_ec_stop_dev(c, ca); +} + static inline struct hlist_head *writepoint_hash(struct bch_fs *c, unsigned long write_point) { @@ -680,11 +1134,14 @@ static struct write_point *__writepoint_find(struct hlist_head *head, { struct write_point *wp; + rcu_read_lock(); hlist_for_each_entry_rcu(wp, head, node) if (wp->write_point == write_point) - return wp; - - return NULL; + goto out; + wp = NULL; +out: + rcu_read_unlock(); + return wp; } static inline bool too_many_writepoints(struct bch_fs *c, unsigned factor) @@ -708,10 +1165,12 @@ static bool try_increase_writepoints(struct bch_fs *c) return true; } -static bool try_decrease_writepoints(struct bch_fs *c, - unsigned old_nr) +static bool try_decrease_writepoints(struct btree_trans *trans, unsigned old_nr) { + struct bch_fs *c = trans->c; struct write_point *wp; + struct open_bucket *ob; + unsigned i; mutex_lock(&c->write_points_hash_lock); if (c->write_points_nr < old_nr) { @@ -730,19 +1189,23 @@ static bool try_decrease_writepoints(struct bch_fs *c, hlist_del_rcu(&wp->node); mutex_unlock(&c->write_points_hash_lock); - bch2_writepoint_stop(c, NULL, wp); + bch2_trans_mutex_lock_norelock(trans, &wp->lock); + open_bucket_for_each(c, &wp->ptrs, ob, i) + open_bucket_free_unused(c, ob); + mutex_unlock(&wp->lock); return true; } -static struct write_point *writepoint_find(struct bch_fs *c, +static struct write_point *writepoint_find(struct btree_trans *trans, unsigned long write_point) { + struct bch_fs *c = trans->c; struct write_point *wp, *oldest; struct hlist_head *head; if (!(write_point & 1UL)) { wp = (struct write_point *) write_point; - mutex_lock(&wp->lock); + bch2_trans_mutex_lock_norelock(trans, &wp->lock); return wp; } @@ -751,7 +1214,7 @@ restart_find: wp = __writepoint_find(head, write_point); if (wp) { lock_wp: - mutex_lock(&wp->lock); + bch2_trans_mutex_lock_norelock(trans, &wp->lock); if (wp->write_point == write_point) goto out; mutex_unlock(&wp->lock); @@ -764,8 +1227,8 @@ restart_find_oldest: if (!oldest || time_before64(wp->last_used, oldest->last_used)) oldest = wp; - mutex_lock(&oldest->lock); - mutex_lock(&c->write_points_hash_lock); + bch2_trans_mutex_lock_norelock(trans, &oldest->lock); + bch2_trans_mutex_lock_norelock(trans, &c->write_points_hash_lock); if (oldest >= c->write_points + c->write_points_nr || try_increase_writepoints(c)) { mutex_unlock(&c->write_points_hash_lock); @@ -786,35 +1249,35 @@ restart_find_oldest: hlist_add_head_rcu(&wp->node, head); mutex_unlock(&c->write_points_hash_lock); out: - wp->last_used = sched_clock(); + wp->last_used = local_clock(); return wp; } /* * Get us an open_bucket we can allocate from, return with it locked: */ -struct write_point *bch2_alloc_sectors_start(struct bch_fs *c, - unsigned target, - unsigned erasure_code, - struct write_point_specifier write_point, - struct bch_devs_list *devs_have, - unsigned nr_replicas, - unsigned nr_replicas_required, - enum alloc_reserve reserve, - unsigned flags, - struct closure *cl) +int bch2_alloc_sectors_start_trans(struct btree_trans *trans, + unsigned target, + unsigned erasure_code, + struct write_point_specifier write_point, + struct bch_devs_list *devs_have, + unsigned nr_replicas, + unsigned nr_replicas_required, + enum alloc_reserve reserve, + unsigned flags, + struct closure *cl, + struct write_point **wp_ret) { + struct bch_fs *c = trans->c; struct write_point *wp; struct open_bucket *ob; struct open_buckets ptrs; unsigned nr_effective, write_points_nr; - unsigned ob_flags = 0; bool have_cache; - enum bucket_alloc_ret ret; + int ret; int i; - if (!(flags & BCH_WRITE_ONLY_SPECIFIED_DEVS)) - ob_flags |= BUCKET_ALLOC_USE_DURABILITY; + BUG_ON(flags & BCH_WRITE_ONLY_SPECIFIED_DEVS); BUG_ON(!nr_replicas || !nr_replicas_required); retry: @@ -823,35 +1286,44 @@ retry: write_points_nr = c->write_points_nr; have_cache = false; - wp = writepoint_find(c, write_point.v); - - if (wp->type == BCH_DATA_user) - ob_flags |= BUCKET_MAY_ALLOC_PARTIAL; + *wp_ret = wp = writepoint_find(trans, write_point.v); /* metadata may not allocate on cache devices: */ - if (wp->type != BCH_DATA_user) + if (wp->data_type != BCH_DATA_user) have_cache = true; - if (!target || (flags & BCH_WRITE_ONLY_SPECIFIED_DEVS)) { - ret = open_bucket_add_buckets(c, &ptrs, wp, devs_have, - target, erasure_code, - nr_replicas, &nr_effective, - &have_cache, reserve, - ob_flags, cl); - } else { - ret = open_bucket_add_buckets(c, &ptrs, wp, devs_have, + if (target && !(flags & BCH_WRITE_ONLY_SPECIFIED_DEVS)) { + ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have, target, erasure_code, nr_replicas, &nr_effective, &have_cache, reserve, - ob_flags, NULL); - if (!ret) + flags, NULL); + if (!ret || + bch2_err_matches(ret, BCH_ERR_transaction_restart)) goto alloc_done; - ret = open_bucket_add_buckets(c, &ptrs, wp, devs_have, + /* Don't retry from all devices if we're out of open buckets: */ + if (bch2_err_matches(ret, BCH_ERR_open_buckets_empty)) + goto allocate_blocking; + + /* + * Only try to allocate cache (durability = 0 devices) from the + * specified target: + */ + have_cache = true; + + ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have, 0, erasure_code, nr_replicas, &nr_effective, &have_cache, reserve, - ob_flags, cl); + flags, cl); + } else { +allocate_blocking: + ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have, + target, erasure_code, + nr_replicas, &nr_effective, + &have_cache, reserve, + flags, cl); } alloc_done: BUG_ON(!ret && nr_effective < nr_replicas); @@ -859,7 +1331,7 @@ alloc_done: if (erasure_code && !ec_open_bucket(c, &ptrs)) pr_debug("failed to get ec bucket: ret %u", ret); - if (ret == INSUFFICIENT_DEVICES && + if (ret == -BCH_ERR_insufficient_devices && nr_effective >= nr_replicas_required) ret = 0; @@ -868,7 +1340,7 @@ alloc_done: /* Free buckets we didn't use: */ open_bucket_for_each(c, &wp->ptrs, ob, i) - open_bucket_free_unused(c, wp, ob); + open_bucket_free_unused(c, ob); wp->ptrs = ptrs; @@ -879,61 +1351,49 @@ alloc_done: BUG_ON(!wp->sectors_free || wp->sectors_free == UINT_MAX); - verify_not_stale(c, &wp->ptrs); - - return wp; + return 0; err: open_bucket_for_each(c, &wp->ptrs, ob, i) if (ptrs.nr < ARRAY_SIZE(ptrs.v)) ob_push(c, &ptrs, ob); else - open_bucket_free_unused(c, wp, ob); + open_bucket_free_unused(c, ob); wp->ptrs = ptrs; mutex_unlock(&wp->lock); - if (ret == FREELIST_EMPTY && - try_decrease_writepoints(c, write_points_nr)) + if (bch2_err_matches(ret, BCH_ERR_freelist_empty) && + try_decrease_writepoints(trans, write_points_nr)) goto retry; - switch (ret) { - case OPEN_BUCKETS_EMPTY: - case FREELIST_EMPTY: - return cl ? ERR_PTR(-EAGAIN) : ERR_PTR(-ENOSPC); - case INSUFFICIENT_DEVICES: - return ERR_PTR(-EROFS); - default: - BUG(); - } -} + if (bch2_err_matches(ret, BCH_ERR_open_buckets_empty) || + bch2_err_matches(ret, BCH_ERR_freelist_empty)) + return cl + ? -BCH_ERR_bucket_alloc_blocked + : -BCH_ERR_ENOSPC_bucket_alloc; -/* - * Append pointers to the space we just allocated to @k, and mark @sectors space - * as allocated out of @ob - */ -void bch2_alloc_sectors_append_ptrs(struct bch_fs *c, struct write_point *wp, - struct bkey_i *k, unsigned sectors) + return ret; +} +struct bch_extent_ptr bch2_ob_ptr(struct bch_fs *c, struct open_bucket *ob) { - struct open_bucket *ob; - unsigned i; - - BUG_ON(sectors > wp->sectors_free); - wp->sectors_free -= sectors; + struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev); - open_bucket_for_each(c, &wp->ptrs, ob, i) { - struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev); - struct bch_extent_ptr tmp = ob->ptr; - - tmp.cached = !ca->mi.durability && - wp->type == BCH_DATA_user; - - tmp.offset += ca->mi.bucket_size - ob->sectors_free; - bch2_bkey_append_ptr(k, tmp); + return (struct bch_extent_ptr) { + .type = 1 << BCH_EXTENT_ENTRY_ptr, + .gen = ob->gen, + .dev = ob->dev, + .offset = bucket_to_sector(ca, ob->bucket) + + ca->mi.bucket_size - + ob->sectors_free, + }; +} - BUG_ON(sectors > ob->sectors_free); - ob->sectors_free -= sectors; - } +void bch2_alloc_sectors_append_ptrs(struct bch_fs *c, struct write_point *wp, + struct bkey_i *k, unsigned sectors, + bool cached) +{ + bch2_alloc_sectors_append_ptrs_inlined(c, wp, k, sectors, cached); } /* @@ -942,24 +1402,18 @@ void bch2_alloc_sectors_append_ptrs(struct bch_fs *c, struct write_point *wp, */ void bch2_alloc_sectors_done(struct bch_fs *c, struct write_point *wp) { - struct open_buckets ptrs = { .nr = 0 }, keep = { .nr = 0 }; - struct open_bucket *ob; - unsigned i; - - open_bucket_for_each(c, &wp->ptrs, ob, i) - ob_push(c, !ob->sectors_free ? &ptrs : &keep, ob); - wp->ptrs = keep; - - mutex_unlock(&wp->lock); - - bch2_open_buckets_put(c, &ptrs); + bch2_alloc_sectors_done_inlined(c, wp); } static inline void writepoint_init(struct write_point *wp, enum bch_data_type type) { mutex_init(&wp->lock); - wp->type = type; + wp->data_type = type; + + INIT_WORK(&wp->index_update_work, bch2_write_point_do_index_updates); + INIT_LIST_HEAD(&wp->writes); + spin_lock_init(&wp->writes_lock); } void bch2_fs_allocator_foreground_init(struct bch_fs *c) @@ -990,9 +1444,91 @@ void bch2_fs_allocator_foreground_init(struct bch_fs *c) wp < c->write_points + c->write_points_nr; wp++) { writepoint_init(wp, BCH_DATA_user); - wp->last_used = sched_clock(); + wp->last_used = local_clock(); wp->write_point = (unsigned long) wp; hlist_add_head_rcu(&wp->node, writepoint_hash(c, wp->write_point)); } } + +static void bch2_open_bucket_to_text(struct printbuf *out, struct bch_fs *c, struct open_bucket *ob) +{ + struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev); + unsigned data_type = ob->data_type; + barrier(); /* READ_ONCE() doesn't work on bitfields */ + + prt_printf(out, "%zu ref %u %s %u:%llu gen %u allocated %u/%u", + ob - c->open_buckets, + atomic_read(&ob->pin), + data_type < BCH_DATA_NR ? bch2_data_types[data_type] : "invalid data type", + ob->dev, ob->bucket, ob->gen, + ca->mi.bucket_size - ob->sectors_free, ca->mi.bucket_size); + if (ob->ec) + prt_printf(out, " ec idx %llu", ob->ec->idx); + if (ob->on_partial_list) + prt_str(out, " partial"); + prt_newline(out); +} + +void bch2_open_buckets_to_text(struct printbuf *out, struct bch_fs *c) +{ + struct open_bucket *ob; + + out->atomic++; + + for (ob = c->open_buckets; + ob < c->open_buckets + ARRAY_SIZE(c->open_buckets); + ob++) { + spin_lock(&ob->lock); + if (ob->valid && !ob->on_partial_list) + bch2_open_bucket_to_text(out, c, ob); + spin_unlock(&ob->lock); + } + + --out->atomic; +} + +void bch2_open_buckets_partial_to_text(struct printbuf *out, struct bch_fs *c) +{ + unsigned i; + + out->atomic++; + spin_lock(&c->freelist_lock); + + for (i = 0; i < c->open_buckets_partial_nr; i++) + bch2_open_bucket_to_text(out, c, + c->open_buckets + c->open_buckets_partial[i]); + + spin_unlock(&c->freelist_lock); + --out->atomic; +} + +static const char * const bch2_write_point_states[] = { +#define x(n) #n, + WRITE_POINT_STATES() +#undef x + NULL +}; + +void bch2_write_points_to_text(struct printbuf *out, struct bch_fs *c) +{ + struct write_point *wp; + unsigned i; + + for (wp = c->write_points; + wp < c->write_points + ARRAY_SIZE(c->write_points); + wp++) { + prt_printf(out, "%lu: ", wp->write_point); + prt_human_readable_u64(out, wp->sectors_allocated); + + prt_printf(out, " last wrote: "); + bch2_pr_time_units(out, sched_clock() - wp->last_used); + + for (i = 0; i < WRITE_POINT_STATE_NR; i++) { + prt_printf(out, " %s: ", bch2_write_point_states[i]); + bch2_pr_time_units(out, wp->time[i]); + } + + prt_newline(out); + } +}