#include "bcachefs.h"
#include "alloc.h"
+#include "btree_cache.h"
+#include "btree_io.h"
#include "btree_update.h"
#include "btree_gc.h"
#include "buckets.h"
u64 size = bucket_to_sector(ca, ca->mi.nbuckets -
ca->mi.first_bucket) << 9;
u64 dirty = bucket_to_sector(ca,
- stats.buckets[S_DIRTY]) << 9;
+ stats.buckets[BCH_DATA_USER]) << 9;
u64 free = bucket_to_sector(ca,
__dev_buckets_free(ca, stats)) << 9;
/*
* reclaimed by copy GC
*/
s64 fragmented = (bucket_to_sector(ca,
- stats.buckets[S_DIRTY] +
- stats.buckets_cached) -
- (stats.sectors[S_DIRTY] +
- stats.sectors_cached)) << 9;
+ stats.buckets[BCH_DATA_USER] +
+ stats.buckets[BCH_DATA_CACHED]) -
+ (stats.sectors[BCH_DATA_USER] +
+ stats.sectors[BCH_DATA_CACHED])) << 9;
fragmented = max(0LL, fragmented);
c->pd_controllers_update_seconds * HZ);
}
+/* Persistent alloc info: */
+
static unsigned bch_alloc_val_u64s(const struct bch_alloc *a)
{
unsigned bytes = offsetof(struct bch_alloc, data);
if (a.k->p.offset >= ca->mi.nbuckets)
return;
- g = ca->buckets + a.k->p.offset;
+ lg_local_lock(&c->usage_lock);
+
+ g = bucket(ca, a.k->p.offset);
bucket_cmpxchg(g, new, ({
new.gen = a.v->gen;
new.gen_valid = 1;
g->prio[READ] = get_alloc_field(&d, 2);
if (a.v->fields & (1 << BCH_ALLOC_FIELD_WRITE_TIME))
g->prio[WRITE] = get_alloc_field(&d, 2);
+
+ lg_local_unlock(&c->usage_lock);
}
int bch2_alloc_read(struct bch_fs *c, struct list_head *journal_replay_list)
unsigned i;
int ret;
- if (!c->btree_roots[BTREE_ID_ALLOC].b)
- return 0;
-
for_each_btree_key(&iter, c, BTREE_ID_ALLOC, POS_MIN, 0, k) {
bch2_alloc_read_key(c, k);
bch2_btree_iter_cond_resched(&iter);
bch2_alloc_read_key(c, bkey_i_to_s_c(k));
}
- mutex_lock(&c->bucket_lock);
+ mutex_lock(&c->prio_clock[READ].lock);
for_each_member_device(ca, c, i) {
+ down_read(&ca->bucket_lock);
bch2_recalc_min_prio(c, ca, READ);
+ up_read(&ca->bucket_lock);
+ }
+ mutex_unlock(&c->prio_clock[READ].lock);
+
+ mutex_lock(&c->prio_clock[WRITE].lock);
+ for_each_member_device(ca, c, i) {
+ down_read(&ca->bucket_lock);
bch2_recalc_min_prio(c, ca, WRITE);
+ up_read(&ca->bucket_lock);
}
- mutex_unlock(&c->bucket_lock);
+ mutex_unlock(&c->prio_clock[WRITE].lock);
return 0;
}
static int __bch2_alloc_write_key(struct bch_fs *c, struct bch_dev *ca,
- struct bucket *g, struct btree_iter *iter,
+ size_t b, struct btree_iter *iter,
u64 *journal_seq)
{
struct bucket_mark m;
__BKEY_PADDED(k, DIV_ROUND_UP(sizeof(struct bch_alloc), 8)) alloc_key;
+ struct bucket *g;
struct bkey_i_alloc *a;
u8 *d;
int ret;
- bch2_btree_iter_set_pos(iter, POS(ca->dev_idx, g - ca->buckets));
+ bch2_btree_iter_set_pos(iter, POS(ca->dev_idx, b));
do {
- ret = bch2_btree_iter_traverse(iter);
+ ret = btree_iter_err(bch2_btree_iter_peek_slot(iter));
if (ret)
break;
+ lg_local_lock(&c->usage_lock);
+ g = bucket(ca, b);
+
/* read mark under btree node lock: */
m = READ_ONCE(g->mark);
a = bkey_alloc_init(&alloc_key.k);
put_alloc_field(&d, 2, g->prio[READ]);
if (a->v.fields & (1 << BCH_ALLOC_FIELD_WRITE_TIME))
put_alloc_field(&d, 2, g->prio[WRITE]);
+ lg_local_unlock(&c->usage_lock);
- bch2_btree_iter_set_pos(iter, a->k.p);
ret = bch2_btree_insert_at(c, NULL, NULL, journal_seq,
BTREE_INSERT_ATOMIC|
BTREE_INSERT_NOFAIL|
int bch2_alloc_replay_key(struct bch_fs *c, struct bpos pos)
{
struct bch_dev *ca;
- struct bucket *g;
struct btree_iter iter;
int ret;
if (pos.offset >= ca->mi.nbuckets)
return 0;
- g = ca->buckets + pos.offset;
-
bch2_btree_iter_init(&iter, c, BTREE_ID_ALLOC, POS_MIN,
- BTREE_ITER_INTENT);
+ BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
- ret = __bch2_alloc_write_key(c, ca, g, &iter, NULL);
+ ret = __bch2_alloc_write_key(c, ca, pos.offset, &iter, NULL);
bch2_btree_iter_unlock(&iter);
return ret;
}
-static int bch2_alloc_write(struct bch_fs *c, struct bch_dev *ca, u64 *journal_seq)
+int bch2_alloc_write(struct bch_fs *c)
{
- struct btree_iter iter;
- unsigned long bucket;
+ struct bch_dev *ca;
+ unsigned i;
int ret = 0;
- bch2_btree_iter_init(&iter, c, BTREE_ID_ALLOC, POS_MIN,
- BTREE_ITER_INTENT);
+ for_each_rw_member(ca, c, i) {
+ struct btree_iter iter;
+ unsigned long bucket;
- for_each_set_bit(bucket, ca->bucket_dirty, ca->mi.nbuckets) {
- ret = __bch2_alloc_write_key(c, ca, ca->buckets + bucket,
- &iter, journal_seq);
- if (ret)
- break;
-
- clear_bit(bucket, ca->bucket_dirty);
- }
+ bch2_btree_iter_init(&iter, c, BTREE_ID_ALLOC, POS_MIN,
+ BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
- bch2_btree_iter_unlock(&iter);
- return ret;
-}
-
-#define BUCKET_GC_GEN_MAX 96U
-
-/**
- * wait_buckets_available - wait on reclaimable buckets
- *
- * If there aren't enough available buckets to fill up free_inc, wait until
- * there are.
- */
-static int wait_buckets_available(struct bch_fs *c, struct bch_dev *ca)
-{
- unsigned long gc_count = c->gc_count;
- int ret = 0;
+ down_read(&ca->bucket_lock);
+ for_each_set_bit(bucket, ca->buckets_dirty, ca->mi.nbuckets) {
+ ret = __bch2_alloc_write_key(c, ca, bucket, &iter, NULL);
+ if (ret)
+ break;
- while (1) {
- set_current_state(TASK_INTERRUPTIBLE);
- if (kthread_should_stop()) {
- ret = -1;
- break;
+ clear_bit(bucket, ca->buckets_dirty);
}
+ up_read(&ca->bucket_lock);
+ bch2_btree_iter_unlock(&iter);
- if (gc_count != c->gc_count)
- ca->inc_gen_really_needs_gc = 0;
-
- if ((ssize_t) (dev_buckets_available(c, ca) -
- ca->inc_gen_really_needs_gc) >=
- (ssize_t) fifo_free(&ca->free_inc))
+ if (ret) {
+ percpu_ref_put(&ca->io_ref);
break;
-
- up_read(&c->gc_lock);
- schedule();
- try_to_freeze();
- down_read(&c->gc_lock);
+ }
}
- __set_current_state(TASK_RUNNING);
return ret;
}
-static void verify_not_on_freelist(struct bch_fs *c, struct bch_dev *ca,
- size_t bucket)
-{
- if (expensive_debug_checks(c)) {
- size_t iter;
- long i;
- unsigned j;
-
- for (j = 0; j < RESERVE_NR; j++)
- fifo_for_each_entry(i, &ca->free[j], iter)
- BUG_ON(i == bucket);
- fifo_for_each_entry(i, &ca->free_inc, iter)
- BUG_ON(i == bucket);
- }
-}
-
-/* Bucket heap / gen */
+/* Bucket IO clocks: */
static void bch2_recalc_min_prio(struct bch_fs *c, struct bch_dev *ca, int rw)
{
struct prio_clock *clock = &c->prio_clock[rw];
+ struct bucket_array *buckets = bucket_array(ca);
struct bucket *g;
u16 max_delta = 1;
unsigned i;
- lockdep_assert_held(&c->bucket_lock);
+ lockdep_assert_held(&c->prio_clock[rw].lock);
/* Determine min prio for this particular device */
- for_each_bucket(g, ca)
+ for_each_bucket(g, buckets)
max_delta = max(max_delta, (u16) (clock->hand - g->prio[rw]));
ca->min_prio[rw] = clock->hand - max_delta;
static void bch2_rescale_prios(struct bch_fs *c, int rw)
{
struct prio_clock *clock = &c->prio_clock[rw];
+ struct bucket_array *buckets;
struct bch_dev *ca;
struct bucket *g;
unsigned i;
trace_rescale_prios(c);
for_each_member_device(ca, c, i) {
- for_each_bucket(g, ca)
+ down_read(&ca->bucket_lock);
+ buckets = bucket_array(ca);
+
+ for_each_bucket(g, buckets)
g->prio[rw] = clock->hand -
- (clock->hand - g->prio[rw]) / 2;
+ (clock->hand - g->prio[rw]) / 2;
bch2_recalc_min_prio(c, ca, rw);
+
+ up_read(&ca->bucket_lock);
}
}
static void bch2_inc_clock_hand(struct io_timer *timer)
{
struct prio_clock *clock = container_of(timer,
- struct prio_clock, rescale);
+ struct prio_clock, rescale);
struct bch_fs *c = container_of(clock,
- struct bch_fs, prio_clock[clock->rw]);
+ struct bch_fs, prio_clock[clock->rw]);
u64 capacity;
- mutex_lock(&c->bucket_lock);
+ mutex_lock(&clock->lock);
clock->hand++;
if (clock->hand == (u16) (clock->min_prio - 1))
bch2_rescale_prios(c, clock->rw);
- mutex_unlock(&c->bucket_lock);
+ mutex_unlock(&clock->lock);
capacity = READ_ONCE(c->capacity);
static void bch2_prio_timer_init(struct bch_fs *c, int rw)
{
struct prio_clock *clock = &c->prio_clock[rw];
- struct io_timer *timer = &clock->rescale;
- clock->rw = rw;
- timer->fn = bch2_inc_clock_hand;
- timer->expire = c->capacity >> 10;
+ clock->hand = 1;
+ clock->rw = rw;
+ clock->rescale.fn = bch2_inc_clock_hand;
+ clock->rescale.expire = c->capacity >> 10;
+ mutex_init(&clock->lock);
}
+/* Background allocator thread: */
+
/*
- * Background allocation thread: scans for buckets to be invalidated,
- * invalidates them, rewrites prios/gens (marking them as invalidated on disk),
- * then optionally issues discard commands to the newly free buckets, then puts
- * them on the various freelists.
+ * Scans for buckets to be invalidated, invalidates them, rewrites prios/gens
+ * (marking them as invalidated on disk), then optionally issues discard
+ * commands to the newly free buckets, then puts them on the various freelists.
*/
-static inline bool can_inc_bucket_gen(struct bch_dev *ca, struct bucket *g)
+static void verify_not_on_freelist(struct bch_fs *c, struct bch_dev *ca,
+ size_t bucket)
+{
+ if (expensive_debug_checks(c) &&
+ test_bit(BCH_FS_ALLOCATOR_STARTED, &c->flags)) {
+ size_t iter;
+ long i;
+ unsigned j;
+
+ for (j = 0; j < RESERVE_NR; j++)
+ fifo_for_each_entry(i, &ca->free[j], iter)
+ BUG_ON(i == bucket);
+ fifo_for_each_entry(i, &ca->free_inc, iter)
+ BUG_ON(i == bucket);
+ }
+}
+
+#define BUCKET_GC_GEN_MAX 96U
+
+/**
+ * wait_buckets_available - wait on reclaimable buckets
+ *
+ * If there aren't enough available buckets to fill up free_inc, wait until
+ * there are.
+ */
+static int wait_buckets_available(struct bch_fs *c, struct bch_dev *ca)
{
- return bucket_gc_gen(ca, g) < BUCKET_GC_GEN_MAX;
+ unsigned long gc_count = c->gc_count;
+ int ret = 0;
+
+ while (1) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (kthread_should_stop()) {
+ ret = -1;
+ break;
+ }
+
+ if (gc_count != c->gc_count)
+ ca->inc_gen_really_needs_gc = 0;
+
+ if ((ssize_t) (dev_buckets_available(c, ca) -
+ ca->inc_gen_really_needs_gc) >=
+ (ssize_t) fifo_free(&ca->free_inc))
+ break;
+
+ up_read(&c->gc_lock);
+ schedule();
+ try_to_freeze();
+ down_read(&c->gc_lock);
+ }
+
+ __set_current_state(TASK_RUNNING);
+ return ret;
}
-static bool bch2_can_invalidate_bucket(struct bch_dev *ca, struct bucket *g,
+static bool bch2_can_invalidate_bucket(struct bch_dev *ca,
+ size_t bucket,
struct bucket_mark mark)
{
+ u8 gc_gen;
+
if (!is_available_bucket(mark))
return false;
- if (bucket_gc_gen(ca, g) >= BUCKET_GC_GEN_MAX / 2)
+ gc_gen = bucket_gc_gen(ca, bucket);
+
+ if (gc_gen >= BUCKET_GC_GEN_MAX / 2)
ca->inc_gen_needs_gc++;
- if (bucket_gc_gen(ca, g) >= BUCKET_GC_GEN_MAX)
+ if (gc_gen >= BUCKET_GC_GEN_MAX)
ca->inc_gen_really_needs_gc++;
- return can_inc_bucket_gen(ca, g);
+ return gc_gen < BUCKET_GC_GEN_MAX;
}
static void bch2_invalidate_one_bucket(struct bch_fs *c, struct bch_dev *ca,
- struct bucket *g)
+ size_t bucket)
{
struct bucket_mark m;
spin_lock(&c->freelist_lock);
- if (!bch2_invalidate_bucket(c, ca, g, &m)) {
+ if (!bch2_invalidate_bucket(c, ca, bucket, &m)) {
spin_unlock(&c->freelist_lock);
return;
}
- verify_not_on_freelist(c, ca, g - ca->buckets);
- BUG_ON(!fifo_push(&ca->free_inc, g - ca->buckets));
+ verify_not_on_freelist(c, ca, bucket);
+ BUG_ON(!fifo_push(&ca->free_inc, bucket));
spin_unlock(&c->freelist_lock);
- g->prio[READ] = c->prio_clock[READ].hand;
- g->prio[WRITE] = c->prio_clock[WRITE].hand;
+ /* gc lock held: */
+ bucket_io_clock_reset(c, ca, bucket, READ);
+ bucket_io_clock_reset(c, ca, bucket, WRITE);
if (m.cached_sectors) {
ca->allocator_invalidating_data = true;
*/
static unsigned long bucket_sort_key(struct bch_fs *c, struct bch_dev *ca,
- struct bucket *g, struct bucket_mark m)
+ size_t b, struct bucket_mark m)
{
/*
* Time since last read, scaled to [0, 8) where larger value indicates
* more recently read data:
*/
unsigned long hotness =
- (g->prio[READ] - ca->min_prio[READ]) * 7 /
+ (bucket(ca, b)->prio[READ] - ca->min_prio[READ]) * 7 /
(c->prio_clock[READ].hand - ca->min_prio[READ]);
/* How much we want to keep the data in this bucket: */
(hotness + 1) * bucket_sectors_used(m);
unsigned long needs_journal_commit =
- bucket_needs_journal_commit(m, c->journal.last_seq_ondisk);
+ bucket_needs_journal_commit(m, c->journal.last_seq_ondisk);
return (data_wantness << 9) |
(needs_journal_commit << 8) |
- bucket_gc_gen(ca, g);
+ bucket_gc_gen(ca, b);
}
static inline int bucket_alloc_cmp(alloc_heap *h,
return (l.key > r.key) - (l.key < r.key);
}
-static void invalidate_buckets_lru(struct bch_fs *c, struct bch_dev *ca)
+static void find_reclaimable_buckets_lru(struct bch_fs *c, struct bch_dev *ca)
{
+ struct bucket_array *buckets;
struct alloc_heap_entry e;
- struct bucket *g;
+ size_t b;
ca->alloc_heap.used = 0;
- mutex_lock(&c->bucket_lock);
+ mutex_lock(&c->prio_clock[READ].lock);
+ down_read(&ca->bucket_lock);
+
+ buckets = bucket_array(ca);
+
bch2_recalc_min_prio(c, ca, READ);
- bch2_recalc_min_prio(c, ca, WRITE);
/*
* Find buckets with lowest read priority, by building a maxheap sorted
* by read priority and repeatedly replacing the maximum element until
* all buckets have been visited.
*/
- for_each_bucket(g, ca) {
- struct bucket_mark m = READ_ONCE(g->mark);
+ for (b = ca->mi.first_bucket; b < ca->mi.nbuckets; b++) {
+ struct bucket_mark m = READ_ONCE(buckets->b[b].mark);
- if (!bch2_can_invalidate_bucket(ca, g, m))
+ if (!bch2_can_invalidate_bucket(ca, b, m))
continue;
e = (struct alloc_heap_entry) {
- .bucket = g - ca->buckets,
- .key = bucket_sort_key(c, ca, g, m)
+ .bucket = b,
+ .key = bucket_sort_key(c, ca, b, m)
};
heap_add_or_replace(&ca->alloc_heap, e, -bucket_alloc_cmp);
}
+ up_read(&ca->bucket_lock);
+ mutex_unlock(&c->prio_clock[READ].lock);
+
heap_resort(&ca->alloc_heap, bucket_alloc_cmp);
/*
*/
while (!fifo_full(&ca->free_inc) &&
heap_pop(&ca->alloc_heap, e, bucket_alloc_cmp))
- bch2_invalidate_one_bucket(c, ca, &ca->buckets[e.bucket]);
-
- mutex_unlock(&c->bucket_lock);
+ bch2_invalidate_one_bucket(c, ca, e.bucket);
}
-static void invalidate_buckets_fifo(struct bch_fs *c, struct bch_dev *ca)
+static void find_reclaimable_buckets_fifo(struct bch_fs *c, struct bch_dev *ca)
{
+ struct bucket_array *buckets = bucket_array(ca);
struct bucket_mark m;
- struct bucket *g;
- size_t checked = 0;
+ size_t b, checked;
- while (!fifo_full(&ca->free_inc)) {
+ for (checked = 0;
+ checked < ca->mi.nbuckets && !fifo_full(&ca->free_inc);
+ checked++) {
if (ca->fifo_last_bucket < ca->mi.first_bucket ||
ca->fifo_last_bucket >= ca->mi.nbuckets)
ca->fifo_last_bucket = ca->mi.first_bucket;
- g = ca->buckets + ca->fifo_last_bucket++;
- m = READ_ONCE(g->mark);
+ b = ca->fifo_last_bucket++;
- if (bch2_can_invalidate_bucket(ca, g, m))
- bch2_invalidate_one_bucket(c, ca, g);
+ m = READ_ONCE(buckets->b[b].mark);
- if (++checked >= ca->mi.nbuckets)
- return;
+ if (bch2_can_invalidate_bucket(ca, b, m))
+ bch2_invalidate_one_bucket(c, ca, b);
}
}
-static void invalidate_buckets_random(struct bch_fs *c, struct bch_dev *ca)
+static void find_reclaimable_buckets_random(struct bch_fs *c, struct bch_dev *ca)
{
+ struct bucket_array *buckets = bucket_array(ca);
struct bucket_mark m;
- struct bucket *g;
- size_t checked = 0;
+ size_t checked;
- while (!fifo_full(&ca->free_inc)) {
- size_t n = bch2_rand_range(ca->mi.nbuckets -
- ca->mi.first_bucket) +
+ for (checked = 0;
+ checked < ca->mi.nbuckets / 2 && !fifo_full(&ca->free_inc);
+ checked++) {
+ size_t b = bch2_rand_range(ca->mi.nbuckets -
+ ca->mi.first_bucket) +
ca->mi.first_bucket;
- g = ca->buckets + n;
- m = READ_ONCE(g->mark);
+ m = READ_ONCE(buckets->b[b].mark);
- if (bch2_can_invalidate_bucket(ca, g, m))
- bch2_invalidate_one_bucket(c, ca, g);
-
- if (++checked >= ca->mi.nbuckets / 2)
- return;
+ if (bch2_can_invalidate_bucket(ca, b, m))
+ bch2_invalidate_one_bucket(c, ca, b);
}
}
-static void invalidate_buckets(struct bch_fs *c, struct bch_dev *ca)
+static void find_reclaimable_buckets(struct bch_fs *c, struct bch_dev *ca)
{
ca->inc_gen_needs_gc = 0;
ca->inc_gen_really_needs_gc = 0;
switch (ca->mi.replacement) {
case CACHE_REPLACEMENT_LRU:
- invalidate_buckets_lru(c, ca);
+ find_reclaimable_buckets_lru(c, ca);
break;
case CACHE_REPLACEMENT_FIFO:
- invalidate_buckets_fifo(c, ca);
+ find_reclaimable_buckets_fifo(c, ca);
break;
case CACHE_REPLACEMENT_RANDOM:
- invalidate_buckets_random(c, ca);
+ find_reclaimable_buckets_random(c, ca);
break;
}
}
return (*l > *r) - (*l < *r);
}
+static void sort_free_inc(struct bch_fs *c, struct bch_dev *ca)
+{
+ BUG_ON(ca->free_inc.front);
+
+ spin_lock(&c->freelist_lock);
+ sort(ca->free_inc.data,
+ ca->free_inc.back,
+ sizeof(ca->free_inc.data[0]),
+ size_t_cmp, NULL);
+ spin_unlock(&c->freelist_lock);
+}
+
static int bch2_invalidate_free_inc(struct bch_fs *c, struct bch_dev *ca,
- u64 *journal_seq)
+ u64 *journal_seq, size_t nr)
{
struct btree_iter iter;
- unsigned nr_invalidated = 0;
- size_t b, i;
int ret = 0;
bch2_btree_iter_init(&iter, c, BTREE_ID_ALLOC, POS(ca->dev_idx, 0),
- BTREE_ITER_INTENT);
+ BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
- fifo_for_each_entry(b, &ca->free_inc, i) {
- ret = __bch2_alloc_write_key(c, ca, ca->buckets + b,
- &iter, journal_seq);
+ /*
+ * XXX: if ca->nr_invalidated != 0, just return if we'd block doing the
+ * btree update or journal_res_get
+ */
+ while (ca->nr_invalidated < min(nr, fifo_used(&ca->free_inc))) {
+ size_t b = fifo_idx_entry(&ca->free_inc, ca->nr_invalidated);
+
+ ret = __bch2_alloc_write_key(c, ca, b, &iter, journal_seq);
if (ret)
break;
- nr_invalidated++;
+ ca->nr_invalidated++;
}
bch2_btree_iter_unlock(&iter);
- return nr_invalidated ?: ret;
+ return ret;
}
-/*
- * Given an invalidated, ready to use bucket: issue a discard to it if enabled,
- * then add it to the freelist, waiting until there's room if necessary:
- */
-static void discard_invalidated_bucket(struct bch_fs *c, struct bch_dev *ca,
- long bucket)
+static bool __push_invalidated_bucket(struct bch_fs *c, struct bch_dev *ca, size_t bucket)
{
- if (ca->mi.discard &&
- blk_queue_discard(bdev_get_queue(ca->disk_sb.bdev)))
- blkdev_issue_discard(ca->disk_sb.bdev,
- bucket_to_sector(ca, bucket),
- ca->mi.bucket_size, GFP_NOIO, 0);
+ unsigned i;
- while (1) {
- bool pushed = false;
- unsigned i;
+ /*
+ * Don't remove from free_inc until after it's added to
+ * freelist, so gc can find it:
+ */
+ spin_lock(&c->freelist_lock);
+ for (i = 0; i < RESERVE_NR; i++)
+ if (fifo_push(&ca->free[i], bucket)) {
+ fifo_pop(&ca->free_inc, bucket);
+ --ca->nr_invalidated;
+ closure_wake_up(&c->freelist_wait);
+ spin_unlock(&c->freelist_lock);
+ return true;
+ }
+ spin_unlock(&c->freelist_lock);
- set_current_state(TASK_INTERRUPTIBLE);
+ return false;
+}
- /*
- * Don't remove from free_inc until after it's added to
- * freelist, so gc can find it:
- */
- spin_lock(&c->freelist_lock);
- for (i = 0; i < RESERVE_NR; i++)
- if (fifo_push(&ca->free[i], bucket)) {
- fifo_pop(&ca->free_inc, bucket);
- closure_wake_up(&c->freelist_wait);
- pushed = true;
- break;
- }
- spin_unlock(&c->freelist_lock);
+static int push_invalidated_bucket(struct bch_fs *c, struct bch_dev *ca, size_t bucket)
+{
+ int ret = 0;
+
+ while (1) {
+ set_current_state(TASK_INTERRUPTIBLE);
- if (pushed)
+ if (__push_invalidated_bucket(c, ca, bucket))
break;
- if (kthread_should_stop())
+ if ((current->flags & PF_KTHREAD) &&
+ kthread_should_stop()) {
+ ret = -1;
break;
+ }
schedule();
try_to_freeze();
}
__set_current_state(TASK_RUNNING);
+ return ret;
+}
+
+/*
+ * Given an invalidated, ready to use bucket: issue a discard to it if enabled,
+ * then add it to the freelist, waiting until there's room if necessary:
+ */
+static int discard_invalidated_buckets(struct bch_fs *c, struct bch_dev *ca)
+{
+ while (ca->nr_invalidated) {
+ size_t bucket = fifo_peek(&ca->free_inc);
+
+ BUG_ON(fifo_empty(&ca->free_inc) || !ca->nr_invalidated);
+
+ if (ca->mi.discard &&
+ blk_queue_discard(bdev_get_queue(ca->disk_sb.bdev)))
+ blkdev_issue_discard(ca->disk_sb.bdev,
+ bucket_to_sector(ca, bucket),
+ ca->mi.bucket_size, GFP_NOIO, 0);
+
+ if (push_invalidated_bucket(c, ca, bucket))
+ return -1;
+ }
+
+ return 0;
}
/**
* bch_allocator_thread - move buckets from free_inc to reserves
*
- * The free_inc FIFO is populated by invalidate_buckets(), and
+ * The free_inc FIFO is populated by find_reclaimable_buckets(), and
* the reserves are depleted by bucket allocation. When we run out
* of free_inc, try to invalidate some buckets and write out
* prios and gens.
struct bch_dev *ca = arg;
struct bch_fs *c = ca->fs;
u64 journal_seq;
- size_t bucket;
int ret;
set_freezable();
while (1) {
while (1) {
- while (ca->nr_invalidated) {
- BUG_ON(fifo_empty(&ca->free_inc));
-
- bucket = fifo_peek(&ca->free_inc);
- discard_invalidated_bucket(c, ca, bucket);
- if (kthread_should_stop())
- return 0;
- --ca->nr_invalidated;
- }
+ ret = discard_invalidated_buckets(c, ca);
+ if (ret)
+ return 0;
if (fifo_empty(&ca->free_inc))
break;
journal_seq = 0;
- ret = bch2_invalidate_free_inc(c, ca, &journal_seq);
- if (ret < 0)
+ ret = bch2_invalidate_free_inc(c, ca, &journal_seq, SIZE_MAX);
+ if (ret)
return 0;
- ca->nr_invalidated = ret;
-
- if (ca->nr_invalidated == fifo_used(&ca->free_inc)) {
- ca->alloc_thread_started = true;
- bch2_alloc_write(c, ca, &journal_seq);
- }
-
if (ca->allocator_invalidating_data)
- bch2_journal_flush_seq(&c->journal, journal_seq);
+ ret = bch2_journal_flush_seq(&c->journal, journal_seq);
else if (ca->allocator_journal_seq_flush)
- bch2_journal_flush_seq(&c->journal,
+ ret = bch2_journal_flush_seq(&c->journal,
ca->allocator_journal_seq_flush);
+
+ /*
+ * journal error - buckets haven't actually been
+ * invalidated, can't discard them:
+ */
+ if (ret)
+ return 0;
}
/* Reset front/back so we can easily sort fifo entries later: */
* another cache tier
*/
- invalidate_buckets(c, ca);
+ find_reclaimable_buckets(c, ca);
trace_alloc_batch(ca, fifo_used(&ca->free_inc),
ca->free_inc.size);
}
up_read(&c->gc_lock);
- BUG_ON(ca->free_inc.front);
-
- spin_lock(&c->freelist_lock);
- sort(ca->free_inc.data,
- ca->free_inc.back,
- sizeof(ca->free_inc.data[0]),
- size_t_cmp, NULL);
- spin_unlock(&c->freelist_lock);
+ sort_free_inc(c, ca);
/*
* free_inc is now full of newly-invalidated buckets: next,
struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
spin_lock(&ob->lock);
- bch2_mark_alloc_bucket(c, ca, PTR_BUCKET(ca, &ob->ptr), false,
- gc_pos_alloc(c, ob), 0);
+ bch2_mark_alloc_bucket(c, ca, PTR_BUCKET_NR(ca, &ob->ptr),
+ false, gc_pos_alloc(c, ob), 0);
ob->valid = false;
spin_unlock(&ob->lock);
return ob;
}
-/*
- * XXX: allocation on startup is still sketchy. There is insufficient
- * synchronization for bch2_bucket_alloc_startup() to work correctly after
- * bch2_alloc_write() has been called, and we aren't currently doing anything
- * to guarantee that this won't happen.
- *
- * Even aside from that, it's really difficult to avoid situations where on
- * startup we write out a pointer to a freshly allocated bucket before the
- * corresponding gen - when we're still digging ourself out of the "i need to
- * allocate to write bucket gens, but i need to write bucket gens to allocate"
- * hole.
- *
- * Fortunately, bch2_btree_mark_key_initial() will detect and repair this
- * easily enough...
- */
-static long bch2_bucket_alloc_startup(struct bch_fs *c, struct bch_dev *ca)
+/* _only_ for allocating the journal and btree roots on a brand new fs: */
+int bch2_bucket_alloc_startup(struct bch_fs *c, struct bch_dev *ca)
{
- struct bucket *g;
- long r = -1;
-
- if (!down_read_trylock(&c->gc_lock))
- return r;
+ struct bucket_array *buckets;
+ ssize_t b;
- if (test_bit(BCH_FS_GC_FAILURE, &c->flags))
- goto out;
-
- for_each_bucket(g, ca)
- if (!g->mark.touched_this_mount &&
- is_available_bucket(g->mark) &&
- bch2_mark_alloc_bucket_startup(c, ca, g)) {
- r = g - ca->buckets;
- set_bit(r, ca->bucket_dirty);
- break;
+ rcu_read_lock();
+ buckets = bucket_array(ca);
+
+ for (b = ca->mi.first_bucket; b < ca->mi.nbuckets; b++)
+ if (is_available_bucket(buckets->b[b].mark)) {
+ bch2_mark_alloc_bucket(c, ca, b, true,
+ gc_pos_alloc(c, NULL),
+ BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE|
+ BCH_BUCKET_MARK_GC_LOCK_HELD);
+ set_bit(b, ca->buckets_dirty);
+ goto success;
}
-out:
- up_read(&c->gc_lock);
- return r;
+ b = -1;
+success:
+ rcu_read_unlock();
+ return b;
}
static inline unsigned open_buckets_reserved(enum alloc_reserve reserve)
bool may_alloc_partial,
struct closure *cl)
{
+ struct bucket_array *buckets;
struct open_bucket *ob;
long bucket;
break;
}
- if (unlikely(!ca->alloc_thread_started) &&
- (reserve == RESERVE_ALLOC) &&
+ if (unlikely(test_bit(BCH_FS_BRAND_NEW_FS, &c->flags)) &&
(bucket = bch2_bucket_alloc_startup(c, ca)) >= 0)
goto out;
ob = bch2_open_bucket_alloc(c);
spin_lock(&ob->lock);
+ lg_local_lock(&c->usage_lock);
+ buckets = bucket_array(ca);
+
ob->valid = true;
ob->sectors_free = ca->mi.bucket_size;
ob->ptr = (struct bch_extent_ptr) {
- .gen = ca->buckets[bucket].mark.gen,
+ .gen = buckets->b[bucket].mark.gen,
.offset = bucket_to_sector(ca, bucket),
.dev = ca->dev_idx,
};
+
+ bucket_io_clock_reset(c, ca, bucket, READ);
+ bucket_io_clock_reset(c, ca, bucket, WRITE);
+
+ lg_local_unlock(&c->usage_lock);
spin_unlock(&ob->lock);
spin_unlock(&c->freelist_lock);
bch2_wake_allocator(ca);
- ca->buckets[bucket].prio[READ] = c->prio_clock[READ].hand;
- ca->buckets[bucket].prio[WRITE] = c->prio_clock[WRITE].hand;
-
trace_bucket_alloc(ca, reserve);
return ob - c->open_buckets;
}
-struct dev_alloc_list bch2_wp_alloc_list(struct bch_fs *c,
- struct write_point *wp,
- struct bch_devs_mask *devs)
+static int __dev_alloc_cmp(struct bch_fs *c,
+ struct write_point *wp,
+ unsigned l, unsigned r)
{
- struct dev_alloc_list ret = { .nr = 0 };
- struct bch_dev *ca, *ca2;
- unsigned i, j;
+ struct bch_dev *ca_l = rcu_dereference(c->devs[l]);
+ struct bch_dev *ca_r = rcu_dereference(c->devs[r]);
- for_each_member_device_rcu(ca, c, i, devs) {
- for (j = 0; j < ret.nr; j++) {
- unsigned idx = ret.devs[j];
+ if (ca_l && ca_r && ca_l->mi.tier != ca_r->mi.tier)
+ return ((ca_l->mi.tier > ca_r->mi.tier) -
+ (ca_l->mi.tier < ca_r->mi.tier));
- ca2 = rcu_dereference(c->devs[idx]);
- if (!ca2)
- break;
+ return ((wp->next_alloc[l] > wp->next_alloc[r]) -
+ (wp->next_alloc[l] < wp->next_alloc[r]));
+}
- if (ca->mi.tier < ca2->mi.tier)
- break;
+#define dev_alloc_cmp(l, r) __dev_alloc_cmp(c, wp, l, r)
- if (ca->mi.tier == ca2->mi.tier &&
- wp->next_alloc[i] < wp->next_alloc[idx])
- break;
- }
+struct dev_alloc_list bch2_wp_alloc_list(struct bch_fs *c,
+ struct write_point *wp,
+ struct bch_devs_mask *devs)
+{
+ struct dev_alloc_list ret = { .nr = 0 };
+ struct bch_dev *ca;
+ unsigned i;
- array_insert_item(ret.devs, ret.nr, j, i);
- }
+ for_each_member_device_rcu(ca, c, i, devs)
+ ret.devs[ret.nr++] = i;
+ bubble_sort(ret.devs, ret.nr, dev_alloc_cmp);
return ret;
}
void bch2_wp_rescale(struct bch_fs *c, struct bch_dev *ca,
struct write_point *wp)
{
- unsigned i;
-
- for (i = 0; i < ARRAY_SIZE(wp->next_alloc); i++)
- wp->next_alloc[i] >>= 1;
+ u64 *v = wp->next_alloc + ca->dev_idx;
+ u64 free_space = dev_buckets_free(c, ca);
+ u64 free_space_inv = free_space
+ ? div64_u64(1ULL << 48, free_space)
+ : 1ULL << 48;
+ u64 scale = *v / 4;
+
+ if (*v + free_space_inv >= *v)
+ *v += free_space_inv;
+ else
+ *v = U64_MAX;
+
+ for (v = wp->next_alloc;
+ v < wp->next_alloc + ARRAY_SIZE(wp->next_alloc); v++)
+ *v = *v < scale ? 0 : *v - scale;
}
static enum bucket_alloc_ret __bch2_bucket_alloc_set(struct bch_fs *c,
{
enum bucket_alloc_ret ret = NO_DEVICES;
struct dev_alloc_list devs_sorted;
- u64 buckets_free;
unsigned i;
BUG_ON(nr_replicas > ARRAY_SIZE(wp->ptrs));
BUG_ON(wp->nr_ptrs >= ARRAY_SIZE(wp->ptrs));
wp->ptrs[wp->nr_ptrs++] = c->open_buckets + ob;
- buckets_free = U64_MAX, dev_buckets_free(c, ca);
- if (buckets_free)
- wp->next_alloc[ca->dev_idx] +=
- div64_u64(U64_MAX, buckets_free *
- ca->mi.bucket_size);
- else
- wp->next_alloc[ca->dev_idx] = U64_MAX;
bch2_wp_rescale(c, ca, wp);
__clear_bit(ca->dev_idx, devs->d);
return 0;
}
+static void allocator_start_issue_discards(struct bch_fs *c)
+{
+ struct bch_dev *ca;
+ unsigned dev_iter;
+ size_t i, bu;
+
+ for_each_rw_member(ca, c, dev_iter) {
+ unsigned done = 0;
+
+ fifo_for_each_entry(bu, &ca->free_inc, i) {
+ if (done == ca->nr_invalidated)
+ break;
+
+ blkdev_issue_discard(ca->disk_sb.bdev,
+ bucket_to_sector(ca, bu),
+ ca->mi.bucket_size, GFP_NOIO, 0);
+ done++;
+ }
+ }
+}
+
+static int __bch2_fs_allocator_start(struct bch_fs *c)
+{
+ struct bch_dev *ca;
+ size_t bu, i, devs_have_enough = 0;
+ unsigned dev_iter;
+ u64 journal_seq = 0;
+ bool invalidating_data = false;
+ int ret = 0;
+
+ if (test_bit(BCH_FS_GC_FAILURE, &c->flags))
+ return -1;
+
+ /* Scan for buckets that are already invalidated: */
+ for_each_rw_member(ca, c, dev_iter) {
+ struct btree_iter iter;
+ struct bucket_mark m;
+ struct bkey_s_c k;
+
+ for_each_btree_key(&iter, c, BTREE_ID_ALLOC, POS(ca->dev_idx, 0), 0, k) {
+ if (k.k->type != BCH_ALLOC)
+ continue;
+
+ bu = k.k->p.offset;
+ m = READ_ONCE(bucket(ca, bu)->mark);
+
+ if (!is_available_bucket(m) || m.cached_sectors)
+ continue;
+
+ bch2_mark_alloc_bucket(c, ca, bu, true,
+ gc_pos_alloc(c, NULL),
+ BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE|
+ BCH_BUCKET_MARK_GC_LOCK_HELD);
+
+ fifo_push(&ca->free_inc, bu);
+ ca->nr_invalidated++;
+
+ if (fifo_full(&ca->free_inc))
+ break;
+ }
+ bch2_btree_iter_unlock(&iter);
+ }
+
+ /* did we find enough buckets? */
+ for_each_rw_member(ca, c, dev_iter)
+ devs_have_enough += (fifo_used(&ca->free_inc) >=
+ ca->free[RESERVE_BTREE].size);
+
+ if (devs_have_enough >= c->opts.metadata_replicas)
+ return 0;
+
+ /* clear out free_inc - find_reclaimable_buckets() assumes it's empty */
+ for_each_rw_member(ca, c, dev_iter)
+ discard_invalidated_buckets(c, ca);
+
+ for_each_rw_member(ca, c, dev_iter) {
+ BUG_ON(!fifo_empty(&ca->free_inc));
+ ca->free_inc.front = ca->free_inc.back = 0;
+
+ find_reclaimable_buckets(c, ca);
+ sort_free_inc(c, ca);
+
+ invalidating_data |= ca->allocator_invalidating_data;
+
+ fifo_for_each_entry(bu, &ca->free_inc, i)
+ if (!fifo_push(&ca->free[RESERVE_BTREE], bu))
+ break;
+ }
+
+ /*
+ * We're moving buckets to freelists _before_ they've been marked as
+ * invalidated on disk - we have to so that we can allocate new btree
+ * nodes to mark them as invalidated on disk.
+ *
+ * However, we can't _write_ to any of these buckets yet - they might
+ * have cached data in them, which is live until they're marked as
+ * invalidated on disk:
+ */
+ if (invalidating_data)
+ set_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags);
+ else
+ allocator_start_issue_discards(c);
+
+ /*
+ * XXX: it's possible for this to deadlock waiting on journal reclaim,
+ * since we're holding btree writes. What then?
+ */
+
+ for_each_rw_member(ca, c, dev_iter) {
+ ret = bch2_invalidate_free_inc(c, ca, &journal_seq,
+ ca->free[RESERVE_BTREE].size);
+ if (ret) {
+ percpu_ref_put(&ca->io_ref);
+ return ret;
+ }
+ }
+
+ if (invalidating_data) {
+ ret = bch2_journal_flush_seq(&c->journal, journal_seq);
+ if (ret)
+ return ret;
+ }
+
+ if (invalidating_data)
+ allocator_start_issue_discards(c);
+
+ for_each_rw_member(ca, c, dev_iter)
+ while (ca->nr_invalidated) {
+ BUG_ON(!fifo_pop(&ca->free_inc, bu));
+ ca->nr_invalidated--;
+ }
+
+ set_bit(BCH_FS_ALLOCATOR_STARTED, &c->flags);
+
+ /* now flush dirty btree nodes: */
+ if (invalidating_data) {
+ struct bucket_table *tbl;
+ struct rhash_head *pos;
+ struct btree *b;
+
+ clear_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags);
+again:
+ rcu_read_lock();
+ for_each_cached_btree(b, c, tbl, i, pos)
+ if (btree_node_dirty(b) && (!b->written || b->level)) {
+ rcu_read_unlock();
+ six_lock_read(&b->lock);
+ bch2_btree_node_write(c, b, SIX_LOCK_read);
+ six_unlock_read(&b->lock);
+ goto again;
+ }
+ rcu_read_unlock();
+ }
+
+ return 0;
+}
+
+int bch2_fs_allocator_start(struct bch_fs *c)
+{
+ struct bch_dev *ca;
+ unsigned i;
+ int ret;
+
+ down_read(&c->gc_lock);
+ ret = __bch2_fs_allocator_start(c);
+ up_read(&c->gc_lock);
+
+ if (ret)
+ return ret;
+
+ for_each_rw_member(ca, c, i) {
+ ret = bch2_dev_allocator_start(ca);
+ if (ret) {
+ percpu_ref_put(&ca->io_ref);
+ return ret;
+ }
+ }
+
+ return bch2_alloc_write(c);
+}
+
void bch2_fs_allocator_init(struct bch_fs *c)
{
struct open_bucket *ob;