+// SPDX-License-Identifier: GPL-2.0
/*
- * Primary bucket allocation code
- *
* Copyright 2012 Google, Inc.
*
- * Allocation in bcache is done in terms of buckets:
- *
- * Each bucket has associated an 8 bit gen; this gen corresponds to the gen in
- * btree pointers - they must match for the pointer to be considered valid.
- *
- * Thus (assuming a bucket has no dirty data or metadata in it) we can reuse a
- * bucket simply by incrementing its gen.
- *
- * The gens (along with the priorities; it's really the gens are important but
- * the code is named as if it's the priorities) are written in an arbitrary list
- * of buckets on disk, with a pointer to them in the journal header.
- *
- * When we invalidate a bucket, we have to write its new gen to disk and wait
- * for that write to complete before we use it - otherwise after a crash we
- * could have pointers that appeared to be good but pointed to data that had
- * been overwritten.
- *
- * Since the gens and priorities are all stored contiguously on disk, we can
- * batch this up: We fill up the free_inc list with freshly invalidated buckets,
- * call prio_write(), and when prio_write() finishes we pull buckets off the
- * free_inc list and optionally discard them.
- *
- * free_inc isn't the only freelist - if it was, we'd often have to sleep while
- * priorities and gens were being written before we could allocate. c->free is a
- * smaller freelist, and buckets on that list are always ready to be used.
- *
- * If we've got discards enabled, that happens when a bucket moves from the
- * free_inc list to the free list.
- *
- * It's important to ensure that gens don't wrap around - with respect to
- * either the oldest gen in the btree or the gen on disk. This is quite
- * difficult to do in practice, but we explicitly guard against it anyways - if
- * a bucket is in danger of wrapping around we simply skip invalidating it that
- * time around, and we garbage collect or rewrite the priorities sooner than we
- * would have otherwise.
+ * Foreground allocator code: allocate buckets from freelist, and allocate in
+ * sector granularity from writepoints.
*
* bch2_bucket_alloc() allocates a single bucket from a specific device.
*
* bch2_bucket_alloc_set() allocates one or more buckets from different devices
* in a given filesystem.
- *
- * invalidate_buckets() drives all the processes described above. It's called
- * from bch2_bucket_alloc() and a few other places that need to make sure free
- * buckets are ready.
- *
- * invalidate_buckets_(lru|fifo)() find buckets that are available to be
- * invalidated, and then invalidate them and stick them on the free_inc list -
- * in either lru or fifo order.
*/
#include "bcachefs.h"
#include "alloc_background.h"
#include "alloc_foreground.h"
+#include "btree_iter.h"
+#include "btree_update.h"
#include "btree_gc.h"
#include "buckets.h"
+#include "buckets_waiting_for_journal.h"
#include "clock.h"
#include "debug.h"
#include "disk_groups.h"
#include "ec.h"
+#include "error.h"
#include "io.h"
+#include "journal.h"
#include <linux/math64.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
#include <trace/events/bcachefs.h>
-enum bucket_alloc_ret {
- ALLOC_SUCCESS,
- OPEN_BUCKETS_EMPTY,
- FREELIST_EMPTY, /* Allocator thread not keeping up */
+const char * const bch2_alloc_reserves[] = {
+#define x(t) #t,
+ BCH_ALLOC_RESERVES()
+#undef x
+ NULL
};
/*
* reference _after_ doing the index update that makes its allocation reachable.
*/
+static void bch2_open_bucket_hash_add(struct bch_fs *c, struct open_bucket *ob)
+{
+ open_bucket_idx_t idx = ob - c->open_buckets;
+ open_bucket_idx_t *slot = open_bucket_hashslot(c, ob->dev, ob->bucket);
+
+ ob->hash = *slot;
+ *slot = idx;
+}
+
+static void bch2_open_bucket_hash_remove(struct bch_fs *c, struct open_bucket *ob)
+{
+ open_bucket_idx_t idx = ob - c->open_buckets;
+ open_bucket_idx_t *slot = open_bucket_hashslot(c, ob->dev, ob->bucket);
+
+ while (*slot != idx) {
+ BUG_ON(!*slot);
+ slot = &c->open_buckets[*slot].hash;
+ }
+
+ *slot = ob->hash;
+ ob->hash = 0;
+}
+
void __bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob)
{
- struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
+ struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
if (ob->ec) {
bch2_ec_bucket_written(c, ob);
return;
}
- percpu_down_read_preempt_disable(&c->usage_lock);
+ percpu_down_read(&c->mark_lock);
spin_lock(&ob->lock);
- bch2_mark_alloc_bucket(c, ca, PTR_BUCKET_NR(ca, &ob->ptr),
- false, gc_pos_alloc(c, ob), 0);
ob->valid = false;
+ ob->data_type = 0;
spin_unlock(&ob->lock);
- percpu_up_read_preempt_enable(&c->usage_lock);
+ percpu_up_read(&c->mark_lock);
spin_lock(&c->freelist_lock);
+ bch2_open_bucket_hash_remove(c, ob);
+
ob->freelist = c->open_buckets_freelist;
c->open_buckets_freelist = ob - c->open_buckets;
+
c->open_buckets_nr_free++;
+ ca->nr_open_buckets--;
spin_unlock(&c->freelist_lock);
closure_wake_up(&c->open_buckets_wait);
unsigned i;
open_bucket_for_each(c, obs, ob, i)
- if (ob->ptr.dev == dev &&
- ob->ec)
+ if (ob->dev == dev && ob->ec)
bch2_ec_bucket_cancel(c, ob);
}
ob = c->open_buckets + c->open_buckets_freelist;
c->open_buckets_freelist = ob->freelist;
atomic_set(&ob->pin, 1);
+ ob->data_type = 0;
c->open_buckets_nr_free--;
return ob;
}
static void open_bucket_free_unused(struct bch_fs *c,
- struct open_bucket *ob,
- bool may_realloc)
+ struct write_point *wp,
+ struct open_bucket *ob)
{
- struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
+ struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
+ bool may_realloc = wp->data_type == BCH_DATA_user;
- BUG_ON(ca->open_buckets_partial_nr >=
+ BUG_ON(ca->open_buckets_partial_nr >
ARRAY_SIZE(ca->open_buckets_partial));
if (ca->open_buckets_partial_nr <
}
}
-static void verify_not_stale(struct bch_fs *c, const struct open_buckets *obs)
-{
-#ifdef CONFIG_BCACHEFS_DEBUG
- struct open_bucket *ob;
- unsigned i;
-
- open_bucket_for_each(c, obs, ob, i) {
- struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
-
- BUG_ON(ptr_stale(ca, &ob->ptr));
- }
-#endif
-}
-
/* _only_ for allocating the journal on a new device: */
long bch2_bucket_alloc_new_fs(struct bch_dev *ca)
{
- struct bucket_array *buckets;
- ssize_t b;
+ while (ca->new_fs_bucket_idx < ca->mi.nbuckets) {
+ u64 b = ca->new_fs_bucket_idx++;
- rcu_read_lock();
- buckets = bucket_array(ca);
+ if (!is_superblock_bucket(ca, b) &&
+ (!ca->buckets_nouse || !test_bit(b, ca->buckets_nouse)))
+ return b;
+ }
- for (b = ca->mi.first_bucket; b < ca->mi.nbuckets; b++)
- if (is_available_bucket(buckets->b[b].mark))
- goto success;
- b = -1;
-success:
- rcu_read_unlock();
- return b;
+ return -1;
}
static inline unsigned open_buckets_reserved(enum alloc_reserve reserve)
{
switch (reserve) {
- case RESERVE_ALLOC:
+ case RESERVE_btree:
+ case RESERVE_btree_movinggc:
return 0;
- case RESERVE_BTREE:
- return BTREE_NODE_RESERVE / 2;
+ case RESERVE_movinggc:
+ return OPEN_BUCKETS_COUNT / 4;
default:
- return BTREE_NODE_RESERVE;
+ return OPEN_BUCKETS_COUNT / 2;
}
}
-/**
- * bch_bucket_alloc - allocate a single bucket from a specific device
- *
- * Returns index of bucket on success, 0 on failure
- * */
-struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca,
- enum alloc_reserve reserve,
- bool may_alloc_partial,
- struct closure *cl)
+static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
+ u64 bucket,
+ enum alloc_reserve reserve,
+ struct bch_alloc_v4 *a,
+ u64 *skipped_open,
+ u64 *skipped_need_journal_commit,
+ u64 *skipped_nouse,
+ struct closure *cl)
{
- struct bucket_array *buckets;
struct open_bucket *ob;
- long bucket = 0;
- spin_lock(&c->freelist_lock);
+ if (unlikely(ca->buckets_nouse && test_bit(bucket, ca->buckets_nouse))) {
+ (*skipped_nouse)++;
+ return NULL;
+ }
- if (may_alloc_partial &&
- ca->open_buckets_partial_nr) {
- ob = c->open_buckets +
- ca->open_buckets_partial[--ca->open_buckets_partial_nr];
- ob->on_partial_list = false;
- spin_unlock(&c->freelist_lock);
- return ob;
+ if (bch2_bucket_is_open(c, ca->dev_idx, bucket)) {
+ (*skipped_open)++;
+ return NULL;
}
+ if (bch2_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
+ c->journal.flushed_seq_ondisk, ca->dev_idx, bucket)) {
+ (*skipped_need_journal_commit)++;
+ return NULL;
+ }
+
+ spin_lock(&c->freelist_lock);
+
if (unlikely(c->open_buckets_nr_free <= open_buckets_reserved(reserve))) {
if (cl)
closure_wait(&c->open_buckets_wait, cl);
+
+ if (!c->blocked_allocate_open_bucket)
+ c->blocked_allocate_open_bucket = local_clock();
+
spin_unlock(&c->freelist_lock);
- trace_open_bucket_alloc_fail(ca, reserve);
+
+ trace_open_bucket_alloc_fail(ca, bch2_alloc_reserves[reserve]);
return ERR_PTR(-OPEN_BUCKETS_EMPTY);
}
- if (likely(fifo_pop(&ca->free[RESERVE_NONE], bucket)))
- goto out;
+ /* Recheck under lock: */
+ if (bch2_bucket_is_open(c, ca->dev_idx, bucket)) {
+ spin_unlock(&c->freelist_lock);
+ (*skipped_open)++;
+ return NULL;
+ }
- switch (reserve) {
- case RESERVE_ALLOC:
- if (fifo_pop(&ca->free[RESERVE_BTREE], bucket))
- goto out;
- break;
- case RESERVE_BTREE:
- if (fifo_used(&ca->free[RESERVE_BTREE]) * 2 >=
- ca->free[RESERVE_BTREE].size &&
- fifo_pop(&ca->free[RESERVE_BTREE], bucket))
- goto out;
- break;
- case RESERVE_MOVINGGC:
- if (fifo_pop(&ca->free[RESERVE_MOVINGGC], bucket))
- goto out;
- break;
- default:
- break;
+ ob = bch2_open_bucket_alloc(c);
+
+ spin_lock(&ob->lock);
+
+ ob->valid = true;
+ ob->sectors_free = ca->mi.bucket_size;
+ ob->alloc_reserve = reserve;
+ ob->dev = ca->dev_idx;
+ ob->gen = a->gen;
+ ob->bucket = bucket;
+ spin_unlock(&ob->lock);
+
+ ca->nr_open_buckets++;
+ bch2_open_bucket_hash_add(c, ob);
+
+ if (c->blocked_allocate_open_bucket) {
+ bch2_time_stats_update(
+ &c->times[BCH_TIME_blocked_allocate_open_bucket],
+ c->blocked_allocate_open_bucket);
+ c->blocked_allocate_open_bucket = 0;
}
- if (cl)
- closure_wait(&c->freelist_wait, cl);
+ if (c->blocked_allocate) {
+ bch2_time_stats_update(
+ &c->times[BCH_TIME_blocked_allocate],
+ c->blocked_allocate);
+ c->blocked_allocate = 0;
+ }
spin_unlock(&c->freelist_lock);
- trace_bucket_alloc_fail(ca, reserve);
- return ERR_PTR(-FREELIST_EMPTY);
-out:
- verify_not_on_freelist(c, ca, bucket);
+ trace_bucket_alloc(ca, bch2_alloc_reserves[reserve]);
+ return ob;
+}
- ob = bch2_open_bucket_alloc(c);
+static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bch_dev *ca,
+ enum alloc_reserve reserve, u64 free_entry,
+ u64 *skipped_open,
+ u64 *skipped_need_journal_commit,
+ u64 *skipped_nouse,
+ struct closure *cl)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ struct open_bucket *ob;
+ struct bch_alloc_v4 a;
+ u64 b = free_entry & ~(~0ULL << 56);
+ unsigned genbits = free_entry >> 56;
+ struct printbuf buf = PRINTBUF;
+ int ret;
- spin_lock(&ob->lock);
- buckets = bucket_array(ca);
+ bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, POS(ca->dev_idx, b), BTREE_ITER_CACHED);
+ k = bch2_btree_iter_peek_slot(&iter);
+ ret = bkey_err(k);
+ if (ret) {
+ ob = ERR_PTR(ret);
+ goto err;
+ }
- ob->valid = true;
- ob->sectors_free = ca->mi.bucket_size;
- ob->ptr = (struct bch_extent_ptr) {
- .gen = buckets->b[bucket].mark.gen,
- .offset = bucket_to_sector(ca, bucket),
- .dev = ca->dev_idx,
- };
+ bch2_alloc_to_v4(k, &a);
- bucket_io_clock_reset(c, ca, bucket, READ);
- bucket_io_clock_reset(c, ca, bucket, WRITE);
- spin_unlock(&ob->lock);
+ if (bch2_fs_inconsistent_on(bucket_state(a) != BUCKET_free, c,
+ "non free bucket in freespace btree (state %s)\n"
+ " %s\n"
+ " at %llu (genbits %u)",
+ bch2_bucket_states[bucket_state(a)],
+ (bch2_bkey_val_to_text(&buf, c, k), buf.buf),
+ free_entry, genbits)) {
+ ob = ERR_PTR(-EIO);
+ goto err;
+ }
+
+ if (bch2_fs_inconsistent_on(genbits != (alloc_freespace_genbits(a) >> 56), c,
+ "bucket in freespace btree with wrong genbits (got %u should be %llu)\n"
+ " %s",
+ genbits, alloc_freespace_genbits(a) >> 56,
+ (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
+ ob = ERR_PTR(-EIO);
+ goto err;
+ }
+
+ if (bch2_fs_inconsistent_on(b < ca->mi.first_bucket || b >= ca->mi.nbuckets, c,
+ "freespace btree has bucket outside allowed range (got %llu, valid %u-%llu)",
+ b, ca->mi.first_bucket, ca->mi.nbuckets)) {
+ ob = ERR_PTR(-EIO);
+ goto err;
+ }
+
+ ob = __try_alloc_bucket(c, ca, b, reserve, &a,
+ skipped_open,
+ skipped_need_journal_commit,
+ skipped_nouse,
+ cl);
+err:
+ bch2_trans_iter_exit(trans, &iter);
+ printbuf_exit(&buf);
+ return ob;
+}
+
+static struct open_bucket *try_alloc_partial_bucket(struct bch_fs *c, struct bch_dev *ca,
+ enum alloc_reserve reserve)
+{
+ struct open_bucket *ob;
+ int i;
+
+ spin_lock(&c->freelist_lock);
+
+ for (i = ca->open_buckets_partial_nr - 1; i >= 0; --i) {
+ ob = c->open_buckets + ca->open_buckets_partial[i];
+
+ if (reserve <= ob->alloc_reserve) {
+ array_remove_item(ca->open_buckets_partial,
+ ca->open_buckets_partial_nr,
+ i);
+ ob->on_partial_list = false;
+ ob->alloc_reserve = reserve;
+ spin_unlock(&c->freelist_lock);
+ return ob;
+ }
+ }
spin_unlock(&c->freelist_lock);
+ return NULL;
+}
+
+/*
+ * This path is for before the freespace btree is initialized:
+ *
+ * If ca->new_fs_bucket_idx is nonzero, we haven't yet marked superblock &
+ * journal buckets - journal buckets will be < ca->new_fs_bucket_idx
+ */
+static noinline struct open_bucket *
+bch2_bucket_alloc_trans_early(struct btree_trans *trans,
+ struct bch_dev *ca,
+ enum alloc_reserve reserve,
+ u64 *cur_bucket,
+ u64 *buckets_seen,
+ u64 *skipped_open,
+ u64 *skipped_need_journal_commit,
+ u64 *skipped_nouse,
+ struct closure *cl)
+{
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ struct open_bucket *ob = NULL;
+ int ret;
+
+ *cur_bucket = max_t(u64, *cur_bucket, ca->mi.first_bucket);
+ *cur_bucket = max_t(u64, *cur_bucket, ca->new_fs_bucket_idx);
+
+ for_each_btree_key(trans, iter, BTREE_ID_alloc, POS(ca->dev_idx, *cur_bucket),
+ BTREE_ITER_SLOTS, k, ret) {
+ struct bch_alloc_v4 a;
+
+ if (bkey_cmp(k.k->p, POS(ca->dev_idx, ca->mi.nbuckets)) >= 0)
+ break;
+
+ if (ca->new_fs_bucket_idx &&
+ is_superblock_bucket(ca, k.k->p.offset))
+ continue;
+
+ bch2_alloc_to_v4(k, &a);
+
+ if (bucket_state(a) != BUCKET_free)
+ continue;
+
+ (*buckets_seen)++;
+
+ ob = __try_alloc_bucket(trans->c, ca, k.k->p.offset, reserve, &a,
+ skipped_open,
+ skipped_need_journal_commit,
+ skipped_nouse,
+ cl);
+ if (ob)
+ break;
+ }
+ bch2_trans_iter_exit(trans, &iter);
- bch2_wake_allocator(ca);
+ *cur_bucket = iter.pos.offset;
+
+ return ob ?: ERR_PTR(ret ?: -FREELIST_EMPTY);
+}
+
+static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans,
+ struct bch_dev *ca,
+ enum alloc_reserve reserve,
+ u64 *cur_bucket,
+ u64 *buckets_seen,
+ u64 *skipped_open,
+ u64 *skipped_need_journal_commit,
+ u64 *skipped_nouse,
+ struct closure *cl)
+{
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ struct open_bucket *ob = NULL;
+ int ret;
+
+ if (unlikely(!ca->mi.freespace_initialized))
+ return bch2_bucket_alloc_trans_early(trans, ca, reserve,
+ cur_bucket,
+ buckets_seen,
+ skipped_open,
+ skipped_need_journal_commit,
+ skipped_nouse,
+ cl);
+
+ BUG_ON(ca->new_fs_bucket_idx);
+
+ for_each_btree_key(trans, iter, BTREE_ID_freespace,
+ POS(ca->dev_idx, *cur_bucket), 0, k, ret) {
+ if (k.k->p.inode != ca->dev_idx)
+ break;
+
+ for (*cur_bucket = max(*cur_bucket, bkey_start_offset(k.k));
+ *cur_bucket != k.k->p.offset && !ob;
+ (*cur_bucket)++) {
+ if (btree_trans_too_many_iters(trans)) {
+ ob = ERR_PTR(-EINTR);
+ break;
+ }
+
+ (*buckets_seen)++;
+
+ ob = try_alloc_bucket(trans, ca, reserve,
+ *cur_bucket,
+ skipped_open,
+ skipped_need_journal_commit,
+ skipped_nouse,
+ cl);
+ }
+ if (ob)
+ break;
+ }
+ bch2_trans_iter_exit(trans, &iter);
+
+ return ob ?: ERR_PTR(ret);
+}
+
+/**
+ * bch_bucket_alloc - allocate a single bucket from a specific device
+ *
+ * Returns index of bucket on success, 0 on failure
+ * */
+struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca,
+ enum alloc_reserve reserve,
+ bool may_alloc_partial,
+ struct closure *cl)
+{
+ struct open_bucket *ob = NULL;
+ u64 avail = dev_buckets_available(ca, reserve);
+ u64 cur_bucket = 0;
+ u64 buckets_seen = 0;
+ u64 skipped_open = 0;
+ u64 skipped_need_journal_commit = 0;
+ u64 skipped_nouse = 0;
+ int ret;
+
+ if (may_alloc_partial) {
+ ob = try_alloc_partial_bucket(c, ca, reserve);
+ if (ob)
+ return ob;
+ }
+again:
+ if (!avail) {
+ if (cl) {
+ closure_wait(&c->freelist_wait, cl);
+ /* recheck after putting ourself on waitlist */
+ avail = dev_buckets_available(ca, reserve);
+ if (avail) {
+ closure_wake_up(&c->freelist_wait);
+ goto again;
+ }
+ }
+
+ if (!c->blocked_allocate)
+ c->blocked_allocate = local_clock();
+
+ ob = ERR_PTR(-FREELIST_EMPTY);
+ goto err;
+ }
+
+ ret = bch2_trans_do(c, NULL, NULL, 0,
+ PTR_ERR_OR_ZERO(ob = bch2_bucket_alloc_trans(&trans, ca, reserve,
+ &cur_bucket,
+ &buckets_seen,
+ &skipped_open,
+ &skipped_need_journal_commit,
+ &skipped_nouse,
+ cl)));
+
+ if (skipped_need_journal_commit * 2 > avail)
+ bch2_journal_flush_async(&c->journal, NULL);
+err:
+ if (!ob)
+ ob = ERR_PTR(ret ?: -FREELIST_EMPTY);
+
+ if (IS_ERR(ob)) {
+ trace_bucket_alloc_fail(ca, bch2_alloc_reserves[reserve], avail,
+ buckets_seen,
+ skipped_open,
+ skipped_need_journal_commit,
+ skipped_nouse,
+ cl == NULL, PTR_ERR(ob));
+ atomic_long_inc(&c->bucket_alloc_fail);
+ }
- trace_bucket_alloc(ca, reserve);
return ob;
}
struct bch_devs_mask *devs)
{
struct dev_alloc_list ret = { .nr = 0 };
- struct bch_dev *ca;
unsigned i;
- for_each_member_device_rcu(ca, c, i, devs)
+ for_each_set_bit(i, devs->d, BCH_SB_MEMBERS_MAX)
ret.devs[ret.nr++] = i;
bubble_sort(ret.devs, ret.nr, dev_stripe_cmp);
return ret;
}
-void bch2_dev_stripe_increment(struct bch_fs *c, struct bch_dev *ca,
+void bch2_dev_stripe_increment(struct bch_dev *ca,
struct dev_stripe_state *stripe)
{
u64 *v = stripe->next_alloc + ca->dev_idx;
- u64 free_space = dev_buckets_free(c, ca);
+ u64 free_space = dev_buckets_available(ca, RESERVE_none);
u64 free_space_inv = free_space
? div64_u64(1ULL << 48, free_space)
: 1ULL << 48;
#define BUCKET_MAY_ALLOC_PARTIAL (1 << 0)
#define BUCKET_ALLOC_USE_DURABILITY (1 << 1)
-static int bch2_bucket_alloc_set(struct bch_fs *c,
- struct open_buckets *ptrs,
- struct dev_stripe_state *stripe,
- struct bch_devs_mask *devs_may_alloc,
- unsigned nr_replicas,
- unsigned *nr_effective,
- bool *have_cache,
- enum alloc_reserve reserve,
- unsigned flags,
- struct closure *cl)
+static void add_new_bucket(struct bch_fs *c,
+ struct open_buckets *ptrs,
+ struct bch_devs_mask *devs_may_alloc,
+ unsigned *nr_effective,
+ bool *have_cache,
+ unsigned flags,
+ struct open_bucket *ob)
+{
+ unsigned durability =
+ bch_dev_bkey_exists(c, ob->dev)->mi.durability;
+
+ __clear_bit(ob->dev, devs_may_alloc->d);
+ *nr_effective += (flags & BUCKET_ALLOC_USE_DURABILITY)
+ ? durability : 1;
+ *have_cache |= !durability;
+
+ ob_push(c, ptrs, ob);
+}
+
+int bch2_bucket_alloc_set(struct bch_fs *c,
+ struct open_buckets *ptrs,
+ struct dev_stripe_state *stripe,
+ struct bch_devs_mask *devs_may_alloc,
+ unsigned nr_replicas,
+ unsigned *nr_effective,
+ bool *have_cache,
+ enum alloc_reserve reserve,
+ unsigned flags,
+ struct closure *cl)
{
struct dev_alloc_list devs_sorted =
bch2_dev_alloc_list(c, stripe, devs_may_alloc);
+ unsigned dev;
struct bch_dev *ca;
- bool alloc_failure = false;
- unsigned i, durability;
+ int ret = -INSUFFICIENT_DEVICES;
+ unsigned i;
BUG_ON(*nr_effective >= nr_replicas);
for (i = 0; i < devs_sorted.nr; i++) {
struct open_bucket *ob;
- ca = rcu_dereference(c->devs[devs_sorted.devs[i]]);
+ dev = devs_sorted.devs[i];
+
+ rcu_read_lock();
+ ca = rcu_dereference(c->devs[dev]);
+ if (ca)
+ percpu_ref_get(&ca->ref);
+ rcu_read_unlock();
+
if (!ca)
continue;
- if (!ca->mi.durability && *have_cache)
+ if (!ca->mi.durability && *have_cache) {
+ percpu_ref_put(&ca->ref);
continue;
+ }
ob = bch2_bucket_alloc(c, ca, reserve,
flags & BUCKET_MAY_ALLOC_PARTIAL, cl);
- if (IS_ERR(ob)) {
- enum bucket_alloc_ret ret = -PTR_ERR(ob);
+ if (!IS_ERR(ob))
+ bch2_dev_stripe_increment(ca, stripe);
+ percpu_ref_put(&ca->ref);
- WARN_ON(reserve == RESERVE_MOVINGGC &&
- ret != OPEN_BUCKETS_EMPTY);
+ if (IS_ERR(ob)) {
+ ret = PTR_ERR(ob);
if (cl)
- return -EAGAIN;
- if (ret == OPEN_BUCKETS_EMPTY)
- return -ENOSPC;
- alloc_failure = true;
+ break;
continue;
}
- durability = (flags & BUCKET_ALLOC_USE_DURABILITY)
- ? ca->mi.durability : 1;
-
- __clear_bit(ca->dev_idx, devs_may_alloc->d);
- *nr_effective += durability;
- *have_cache |= !durability;
-
- ob_push(c, ptrs, ob);
+ add_new_bucket(c, ptrs, devs_may_alloc,
+ nr_effective, have_cache, flags, ob);
- bch2_dev_stripe_increment(c, ca, stripe);
-
- if (*nr_effective >= nr_replicas)
- return 0;
+ if (*nr_effective >= nr_replicas) {
+ ret = 0;
+ break;
+ }
}
- return alloc_failure ? -ENOSPC : -EROFS;
+ return ret;
}
/* Allocate from stripes: */
-/*
- * XXX: use a higher watermark for allocating open buckets here:
- */
-static int ec_stripe_alloc(struct bch_fs *c, struct ec_stripe_head *h)
-{
- struct bch_devs_mask devs;
- struct open_bucket *ob;
- unsigned i, nr_have = 0, nr_data =
- min_t(unsigned, h->nr_active_devs,
- EC_STRIPE_MAX) - h->redundancy;
- bool have_cache = true;
- int ret = 0;
-
- BUG_ON(h->blocks.nr > nr_data);
- BUG_ON(h->parity.nr > h->redundancy);
-
- devs = h->devs;
-
- open_bucket_for_each(c, &h->parity, ob, i)
- __clear_bit(ob->ptr.dev, devs.d);
- open_bucket_for_each(c, &h->blocks, ob, i)
- __clear_bit(ob->ptr.dev, devs.d);
-
- percpu_down_read_preempt_disable(&c->usage_lock);
- rcu_read_lock();
-
- if (h->parity.nr < h->redundancy) {
- nr_have = h->parity.nr;
-
- ret = bch2_bucket_alloc_set(c, &h->parity,
- &h->parity_stripe,
- &devs,
- h->redundancy,
- &nr_have,
- &have_cache,
- RESERVE_NONE,
- 0,
- NULL);
- if (ret)
- goto err;
- }
-
- if (h->blocks.nr < nr_data) {
- nr_have = h->blocks.nr;
-
- ret = bch2_bucket_alloc_set(c, &h->blocks,
- &h->block_stripe,
- &devs,
- nr_data,
- &nr_have,
- &have_cache,
- RESERVE_NONE,
- 0,
- NULL);
- if (ret)
- goto err;
- }
-
- rcu_read_unlock();
- percpu_up_read_preempt_enable(&c->usage_lock);
-
- return bch2_ec_stripe_new_alloc(c, h);
-err:
- rcu_read_unlock();
- percpu_up_read_preempt_enable(&c->usage_lock);
- return -1;
-}
-
/*
* if we can't allocate a new stripe because there are already too many
* partially filled stripes, force allocating from an existing stripe even when
* it's to a device we don't want:
*/
-static void bucket_alloc_from_stripe(struct bch_fs *c,
- struct open_buckets *ptrs,
- struct write_point *wp,
- struct bch_devs_mask *devs_may_alloc,
- u16 target,
- unsigned erasure_code,
- unsigned nr_replicas,
- unsigned *nr_effective,
- bool *have_cache)
+static int bucket_alloc_from_stripe(struct bch_fs *c,
+ struct open_buckets *ptrs,
+ struct write_point *wp,
+ struct bch_devs_mask *devs_may_alloc,
+ u16 target,
+ unsigned erasure_code,
+ unsigned nr_replicas,
+ unsigned *nr_effective,
+ bool *have_cache,
+ unsigned flags,
+ struct closure *cl)
{
struct dev_alloc_list devs_sorted;
struct ec_stripe_head *h;
unsigned i, ec_idx;
if (!erasure_code)
- return;
+ return 0;
if (nr_replicas < 2)
- return;
+ return 0;
if (ec_open_bucket(c, ptrs))
- return;
+ return 0;
- h = bch2_ec_stripe_head_get(c, target, erasure_code, nr_replicas - 1);
+ h = bch2_ec_stripe_head_get(c, target, 0, nr_replicas - 1,
+ wp == &c->copygc_write_point,
+ cl);
+ if (IS_ERR(h))
+ return -PTR_ERR(h);
if (!h)
- return;
-
- if (!h->s && ec_stripe_alloc(c, h))
- goto out_put_head;
+ return 0;
- rcu_read_lock();
devs_sorted = bch2_dev_alloc_list(c, &wp->stripe, devs_may_alloc);
- rcu_read_unlock();
for (i = 0; i < devs_sorted.nr; i++)
- open_bucket_for_each(c, &h->s->blocks, ob, ec_idx)
- if (ob->ptr.dev == devs_sorted.devs[i] &&
+ for (ec_idx = 0; ec_idx < h->s->nr_data; ec_idx++) {
+ if (!h->s->blocks[ec_idx])
+ continue;
+
+ ob = c->open_buckets + h->s->blocks[ec_idx];
+ if (ob->dev == devs_sorted.devs[i] &&
!test_and_set_bit(ec_idx, h->s->blocks_allocated))
goto got_bucket;
+ }
goto out_put_head;
got_bucket:
- ca = bch_dev_bkey_exists(c, ob->ptr.dev);
+ ca = bch_dev_bkey_exists(c, ob->dev);
ob->ec_idx = ec_idx;
ob->ec = h->s;
- __clear_bit(ob->ptr.dev, devs_may_alloc->d);
- *nr_effective += ca->mi.durability;
- *have_cache |= !ca->mi.durability;
-
- ob_push(c, ptrs, ob);
+ add_new_bucket(c, ptrs, devs_may_alloc,
+ nr_effective, have_cache, flags, ob);
atomic_inc(&h->s->pin);
out_put_head:
- bch2_ec_stripe_head_put(h);
+ bch2_ec_stripe_head_put(c, h);
+ return 0;
}
/* Sector allocator */
unsigned nr_replicas,
unsigned *nr_effective,
bool *have_cache,
+ unsigned flags,
bool need_ec)
{
struct open_buckets ptrs_skip = { .nr = 0 };
unsigned i;
open_bucket_for_each(c, &wp->ptrs, ob, i) {
- struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
+ struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
if (*nr_effective < nr_replicas &&
- test_bit(ob->ptr.dev, devs_may_alloc->d) &&
+ test_bit(ob->dev, devs_may_alloc->d) &&
(ca->mi.durability ||
- (wp->type == BCH_DATA_USER && !*have_cache)) &&
+ (wp->data_type == BCH_DATA_user && !*have_cache)) &&
(ob->ec || !need_ec)) {
- __clear_bit(ob->ptr.dev, devs_may_alloc->d);
- *nr_effective += ca->mi.durability;
- *have_cache |= !ca->mi.durability;
-
- ob_push(c, ptrs, ob);
+ add_new_bucket(c, ptrs, devs_may_alloc,
+ nr_effective, have_cache,
+ flags, ob);
} else {
ob_push(c, &ptrs_skip, ob);
}
}
static int open_bucket_add_buckets(struct bch_fs *c,
- struct open_buckets *ptrs,
- struct write_point *wp,
- struct bch_devs_list *devs_have,
- u16 target,
- unsigned erasure_code,
- unsigned nr_replicas,
- unsigned *nr_effective,
- bool *have_cache,
- enum alloc_reserve reserve,
- struct closure *_cl)
+ struct open_buckets *ptrs,
+ struct write_point *wp,
+ struct bch_devs_list *devs_have,
+ u16 target,
+ unsigned erasure_code,
+ unsigned nr_replicas,
+ unsigned *nr_effective,
+ bool *have_cache,
+ enum alloc_reserve reserve,
+ unsigned flags,
+ struct closure *_cl)
{
struct bch_devs_mask devs;
struct open_bucket *ob;
struct closure *cl = NULL;
- unsigned i, flags = BUCKET_ALLOC_USE_DURABILITY;
int ret;
-
- if (wp->type == BCH_DATA_USER)
- flags |= BUCKET_MAY_ALLOC_PARTIAL;
+ unsigned i;
rcu_read_lock();
- devs = target_rw_devs(c, wp->type, target);
+ devs = target_rw_devs(c, wp->data_type, target);
rcu_read_unlock();
/* Don't allocate from devices we already have pointers to: */
__clear_bit(devs_have->devs[i], devs.d);
open_bucket_for_each(c, ptrs, ob, i)
- __clear_bit(ob->ptr.dev, devs.d);
+ __clear_bit(ob->dev, devs.d);
if (erasure_code) {
- get_buckets_from_writepoint(c, ptrs, wp, &devs,
- nr_replicas, nr_effective,
- have_cache, true);
- if (*nr_effective >= nr_replicas)
- return 0;
+ if (!ec_open_bucket(c, ptrs)) {
+ get_buckets_from_writepoint(c, ptrs, wp, &devs,
+ nr_replicas, nr_effective,
+ have_cache, flags, true);
+ if (*nr_effective >= nr_replicas)
+ return 0;
+ }
- bucket_alloc_from_stripe(c, ptrs, wp, &devs,
- target, erasure_code,
- nr_replicas, nr_effective,
- have_cache);
- if (*nr_effective >= nr_replicas)
- return 0;
+ if (!ec_open_bucket(c, ptrs)) {
+ ret = bucket_alloc_from_stripe(c, ptrs, wp, &devs,
+ target, erasure_code,
+ nr_replicas, nr_effective,
+ have_cache, flags, _cl);
+ if (ret == -FREELIST_EMPTY ||
+ ret == -OPEN_BUCKETS_EMPTY)
+ return ret;
+ if (*nr_effective >= nr_replicas)
+ return 0;
+ }
}
get_buckets_from_writepoint(c, ptrs, wp, &devs,
nr_replicas, nr_effective,
- have_cache, false);
+ have_cache, flags, false);
if (*nr_effective >= nr_replicas)
return 0;
- percpu_down_read_preempt_disable(&c->usage_lock);
- rcu_read_lock();
-
retry_blocking:
/*
* Try nonblocking first, so that if one device is full we'll try from
ret = bch2_bucket_alloc_set(c, ptrs, &wp->stripe, &devs,
nr_replicas, nr_effective, have_cache,
reserve, flags, cl);
- if (ret && ret != -EROFS && !cl && _cl) {
+ if (ret && ret != -INSUFFICIENT_DEVICES && !cl && _cl) {
cl = _cl;
goto retry_blocking;
}
- rcu_read_unlock();
- percpu_up_read_preempt_enable(&c->usage_lock);
-
return ret;
}
void bch2_open_buckets_stop_dev(struct bch_fs *c, struct bch_dev *ca,
- struct open_buckets *obs,
- enum bch_data_type data_type)
+ struct open_buckets *obs)
{
struct open_buckets ptrs = { .nr = 0 };
struct open_bucket *ob, *ob2;
unsigned i, j;
open_bucket_for_each(c, obs, ob, i) {
- bool drop = !ca || ob->ptr.dev == ca->dev_idx;
+ bool drop = !ca || ob->dev == ca->dev_idx;
if (!drop && ob->ec) {
mutex_lock(&ob->ec->lock);
- open_bucket_for_each(c, &ob->ec->blocks, ob2, j)
- drop |= ob2->ptr.dev == ca->dev_idx;
- open_bucket_for_each(c, &ob->ec->parity, ob2, j)
- drop |= ob2->ptr.dev == ca->dev_idx;
+ for (j = 0; j < ob->ec->new_stripe.key.v.nr_blocks; j++) {
+ if (!ob->ec->blocks[j])
+ continue;
+
+ ob2 = c->open_buckets + ob->ec->blocks[j];
+ drop |= ob2->dev == ca->dev_idx;
+ }
mutex_unlock(&ob->ec->lock);
}
struct write_point *wp)
{
mutex_lock(&wp->lock);
- bch2_open_buckets_stop_dev(c, ca, &wp->ptrs, wp->type);
+ bch2_open_buckets_stop_dev(c, ca, &wp->ptrs);
mutex_unlock(&wp->lock);
}
{
struct write_point *wp;
+ rcu_read_lock();
hlist_for_each_entry_rcu(wp, head, node)
if (wp->write_point == write_point)
- return wp;
-
- return NULL;
+ goto out;
+ wp = NULL;
+out:
+ rcu_read_unlock();
+ return wp;
}
static inline bool too_many_writepoints(struct bch_fs *c, unsigned factor)
{
u64 stranded = c->write_points_nr * c->bucket_size_max;
- u64 free = bch2_fs_sectors_free(c, bch2_fs_usage_read(c));
+ u64 free = bch2_fs_usage_read_short(c).free;
return stranded * factor > free;
}
{
struct write_point *wp;
struct open_bucket *ob;
- unsigned nr_effective = 0;
- struct open_buckets ptrs = { .nr = 0 };
- bool have_cache = false;
- unsigned write_points_nr;
- int ret = 0, i;
+ struct open_buckets ptrs;
+ unsigned nr_effective, write_points_nr;
+ unsigned ob_flags = 0;
+ bool have_cache;
+ int ret;
+ int i;
+
+ if (!(flags & BCH_WRITE_ONLY_SPECIFIED_DEVS))
+ ob_flags |= BUCKET_ALLOC_USE_DURABILITY;
BUG_ON(!nr_replicas || !nr_replicas_required);
retry:
+ ptrs.nr = 0;
+ nr_effective = 0;
write_points_nr = c->write_points_nr;
+ have_cache = false;
wp = writepoint_find(c, write_point.v);
+ if (wp->data_type == BCH_DATA_user)
+ ob_flags |= BUCKET_MAY_ALLOC_PARTIAL;
+
/* metadata may not allocate on cache devices: */
- if (wp->type != BCH_DATA_USER)
+ if (wp->data_type != BCH_DATA_user)
have_cache = true;
if (!target || (flags & BCH_WRITE_ONLY_SPECIFIED_DEVS)) {
ret = open_bucket_add_buckets(c, &ptrs, wp, devs_have,
target, erasure_code,
nr_replicas, &nr_effective,
- &have_cache, reserve, cl);
+ &have_cache, reserve,
+ ob_flags, cl);
} else {
ret = open_bucket_add_buckets(c, &ptrs, wp, devs_have,
target, erasure_code,
nr_replicas, &nr_effective,
- &have_cache, reserve, NULL);
+ &have_cache, reserve,
+ ob_flags, NULL);
if (!ret)
goto alloc_done;
ret = open_bucket_add_buckets(c, &ptrs, wp, devs_have,
0, erasure_code,
nr_replicas, &nr_effective,
- &have_cache, reserve, cl);
+ &have_cache, reserve,
+ ob_flags, cl);
}
alloc_done:
BUG_ON(!ret && nr_effective < nr_replicas);
if (erasure_code && !ec_open_bucket(c, &ptrs))
pr_debug("failed to get ec bucket: ret %u", ret);
- if (ret == -EROFS &&
+ if (ret == -INSUFFICIENT_DEVICES &&
nr_effective >= nr_replicas_required)
ret = 0;
/* Free buckets we didn't use: */
open_bucket_for_each(c, &wp->ptrs, ob, i)
- open_bucket_free_unused(c, ob, wp->type == BCH_DATA_USER);
+ open_bucket_free_unused(c, wp, ob);
wp->ptrs = ptrs;
BUG_ON(!wp->sectors_free || wp->sectors_free == UINT_MAX);
- verify_not_stale(c, &wp->ptrs);
-
return wp;
err:
open_bucket_for_each(c, &wp->ptrs, ob, i)
if (ptrs.nr < ARRAY_SIZE(ptrs.v))
ob_push(c, &ptrs, ob);
else
- open_bucket_free_unused(c, ob,
- wp->type == BCH_DATA_USER);
+ open_bucket_free_unused(c, wp, ob);
wp->ptrs = ptrs;
mutex_unlock(&wp->lock);
- if (ret == -ENOSPC &&
+ if (ret == -FREELIST_EMPTY &&
try_decrease_writepoints(c, write_points_nr))
goto retry;
- return ERR_PTR(ret);
+ switch (ret) {
+ case -OPEN_BUCKETS_EMPTY:
+ case -FREELIST_EMPTY:
+ return cl ? ERR_PTR(-EAGAIN) : ERR_PTR(-ENOSPC);
+ case -INSUFFICIENT_DEVICES:
+ return ERR_PTR(-EROFS);
+ default:
+ return ERR_PTR(ret);
+ }
+}
+
+struct bch_extent_ptr bch2_ob_ptr(struct bch_fs *c, struct open_bucket *ob)
+{
+ struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
+
+ return (struct bch_extent_ptr) {
+ .type = 1 << BCH_EXTENT_ENTRY_ptr,
+ .gen = ob->gen,
+ .dev = ob->dev,
+ .offset = bucket_to_sector(ca, ob->bucket) +
+ ca->mi.bucket_size -
+ ob->sectors_free,
+ };
}
/*
* as allocated out of @ob
*/
void bch2_alloc_sectors_append_ptrs(struct bch_fs *c, struct write_point *wp,
- struct bkey_i *k, unsigned sectors)
+ struct bkey_i *k, unsigned sectors,
+ bool cached)
{
struct open_bucket *ob;
wp->sectors_free -= sectors;
open_bucket_for_each(c, &wp->ptrs, ob, i) {
- struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
- struct bch_extent_ptr tmp = ob->ptr;
+ struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
+ struct bch_extent_ptr ptr = bch2_ob_ptr(c, ob);
- tmp.cached = !ca->mi.durability &&
- wp->type == BCH_DATA_USER;
+ ptr.cached = cached ||
+ (!ca->mi.durability &&
+ wp->data_type == BCH_DATA_user);
- tmp.offset += ca->mi.bucket_size - ob->sectors_free;
- bch2_bkey_append_ptr(k, tmp);
+ bch2_bkey_append_ptr(k, ptr);
BUG_ON(sectors > ob->sectors_free);
ob->sectors_free -= sectors;
bch2_open_buckets_put(c, &ptrs);
}
+static inline void writepoint_init(struct write_point *wp,
+ enum bch_data_type type)
+{
+ mutex_init(&wp->lock);
+ wp->data_type = type;
+}
+
void bch2_fs_allocator_foreground_init(struct bch_fs *c)
{
struct open_bucket *ob;
c->open_buckets_freelist = ob - c->open_buckets;
}
- writepoint_init(&c->btree_write_point, BCH_DATA_BTREE);
- writepoint_init(&c->rebalance_write_point, BCH_DATA_USER);
+ writepoint_init(&c->btree_write_point, BCH_DATA_btree);
+ writepoint_init(&c->rebalance_write_point, BCH_DATA_user);
+ writepoint_init(&c->copygc_write_point, BCH_DATA_user);
for (wp = c->write_points;
wp < c->write_points + c->write_points_nr; wp++) {
- writepoint_init(wp, BCH_DATA_USER);
+ writepoint_init(wp, BCH_DATA_user);
wp->last_used = sched_clock();
wp->write_point = (unsigned long) wp;
writepoint_hash(c, wp->write_point));
}
}
+
+void bch2_open_buckets_to_text(struct printbuf *out, struct bch_fs *c)
+{
+ struct open_bucket *ob;
+
+ for (ob = c->open_buckets;
+ ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
+ ob++) {
+ spin_lock(&ob->lock);
+ if (ob->valid && !ob->on_partial_list) {
+ pr_buf(out, "%zu ref %u type %s\n",
+ ob - c->open_buckets,
+ atomic_read(&ob->pin),
+ bch2_data_types[ob->data_type]);
+ }
+ spin_unlock(&ob->lock);
+ }
+
+}