X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libbcachefs%2Falloc_background.c;h=a44d93b1f263b47662446930fb90382c11af8ef5;hb=7923c2ee0616ef35d984e292dd9e799babe96662;hp=2a36af5e022047945876f82c32bf77b2d3d70038;hpb=38f8daa2b17627488c51f2e0c150213bd1636e00;p=bcachefs-tools-debian diff --git a/libbcachefs/alloc_background.c b/libbcachefs/alloc_background.c index 2a36af5..a44d93b 100644 --- a/libbcachefs/alloc_background.c +++ b/libbcachefs/alloc_background.c @@ -9,10 +9,12 @@ #include "btree_update_interior.h" #include "btree_gc.h" #include "buckets.h" +#include "buckets_waiting_for_journal.h" #include "clock.h" #include "debug.h" #include "ec.h" #include "error.h" +#include "lru.h" #include "recovery.h" #include "varint.h" @@ -25,12 +27,7 @@ #include #include -const char * const bch2_allocator_states[] = { -#define x(n) #n, - ALLOC_THREAD_STATES() -#undef x - NULL -}; +/* Persistent alloc info: */ static const unsigned BCH_ALLOC_V1_FIELD_BYTES[] = { #define x(name, bits) [BCH_ALLOC_FIELD_V1_##name] = bits / 8, @@ -38,16 +35,19 @@ static const unsigned BCH_ALLOC_V1_FIELD_BYTES[] = { #undef x }; -struct bkey_alloc_buf { - struct bkey_i k; - struct bch_alloc_v3 v; - -#define x(_name, _bits) + _bits / 8 - u8 _pad[0 + BCH_ALLOC_FIELDS_V2()]; +struct bkey_alloc_unpacked { + u64 journal_seq; + u64 bucket; + u8 dev; + u8 gen; + u8 oldest_gen; + u8 data_type; + bool need_discard:1; + bool need_inc_gen:1; +#define x(_name, _bits) u##_bits _name; + BCH_ALLOC_FIELDS_V2() #undef x -} __attribute__((packed, aligned(8))); - -/* Persistent alloc info: */ +}; static inline u64 alloc_field_v1_get(const struct bch_alloc *a, const void **p, unsigned field) @@ -169,6 +169,8 @@ static int bch2_alloc_unpack_v3(struct bkey_alloc_unpacked *out, out->gen = a.v->gen; out->oldest_gen = a.v->oldest_gen; out->data_type = a.v->data_type; + out->need_discard = BCH_ALLOC_V3_NEED_DISCARD(a.v); + out->need_inc_gen = BCH_ALLOC_V3_NEED_INC_GEN(a.v); out->journal_seq = le64_to_cpu(a.v->journal_seq); #define x(_name, _bits) \ @@ -190,47 +192,7 @@ static int bch2_alloc_unpack_v3(struct bkey_alloc_unpacked *out, return 0; } -static void bch2_alloc_pack_v3(struct bkey_alloc_buf *dst, - const struct bkey_alloc_unpacked src) -{ - struct bkey_i_alloc_v3 *a = bkey_alloc_v3_init(&dst->k); - unsigned nr_fields = 0, last_nonzero_fieldnr = 0; - u8 *out = a->v.data; - u8 *end = (void *) &dst[1]; - u8 *last_nonzero_field = out; - unsigned bytes; - - a->k.p = POS(src.dev, src.bucket); - a->v.gen = src.gen; - a->v.oldest_gen = src.oldest_gen; - a->v.data_type = src.data_type; - a->v.journal_seq = cpu_to_le64(src.journal_seq); - -#define x(_name, _bits) \ - nr_fields++; \ - \ - if (src._name) { \ - out += bch2_varint_encode_fast(out, src._name); \ - \ - last_nonzero_field = out; \ - last_nonzero_fieldnr = nr_fields; \ - } else { \ - *out++ = 0; \ - } - - BCH_ALLOC_FIELDS_V2() -#undef x - BUG_ON(out > end); - - out = last_nonzero_field; - a->v.nr_fields = last_nonzero_fieldnr; - - bytes = (u8 *) out - (u8 *) &a->v; - set_bkey_val_bytes(&a->k, bytes); - memset_u64s_tail(&a->v, 0, bytes); -} - -struct bkey_alloc_unpacked bch2_alloc_unpack(struct bkey_s_c k) +static struct bkey_alloc_unpacked bch2_alloc_unpack(struct bkey_s_c k) { struct bkey_alloc_unpacked ret = { .dev = k.k->p.inode, @@ -253,24 +215,71 @@ struct bkey_alloc_unpacked bch2_alloc_unpack(struct bkey_s_c k) return ret; } -static void bch2_alloc_pack(struct bch_fs *c, - struct bkey_alloc_buf *dst, - const struct bkey_alloc_unpacked src) +void bch2_alloc_to_v4(struct bkey_s_c k, struct bch_alloc_v4 *out) { - bch2_alloc_pack_v3(dst, src); + if (k.k->type == KEY_TYPE_alloc_v4) { + *out = *bkey_s_c_to_alloc_v4(k).v; + } else { + struct bkey_alloc_unpacked u = bch2_alloc_unpack(k); + + *out = (struct bch_alloc_v4) { + .journal_seq = u.journal_seq, + .flags = u.need_discard, + .gen = u.gen, + .oldest_gen = u.oldest_gen, + .data_type = u.data_type, + .stripe_redundancy = u.stripe_redundancy, + .dirty_sectors = u.dirty_sectors, + .cached_sectors = u.cached_sectors, + .io_time[READ] = u.read_time, + .io_time[WRITE] = u.write_time, + .stripe = u.stripe, + }; + } } -int bch2_alloc_write(struct btree_trans *trans, struct btree_iter *iter, - struct bkey_alloc_unpacked *u, unsigned trigger_flags) +struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut(struct btree_trans *trans, struct bkey_s_c k) { - struct bkey_alloc_buf *a; + struct bkey_i_alloc_v4 *ret; - a = bch2_trans_kmalloc(trans, sizeof(struct bkey_alloc_buf)); - if (IS_ERR(a)) - return PTR_ERR(a); + if (k.k->type == KEY_TYPE_alloc_v4) { + ret = bch2_trans_kmalloc(trans, bkey_bytes(k.k)); + if (!IS_ERR(ret)) + bkey_reassemble(&ret->k_i, k); + } else { + ret = bch2_trans_kmalloc(trans, sizeof(*ret)); + if (!IS_ERR(ret)) { + bkey_alloc_v4_init(&ret->k_i); + ret->k.p = k.k->p; + bch2_alloc_to_v4(k, &ret->v); + } + } + return ret; +} + +struct bkey_i_alloc_v4 * +bch2_trans_start_alloc_update(struct btree_trans *trans, struct btree_iter *iter, + struct bpos pos) +{ + struct bkey_s_c k; + struct bkey_i_alloc_v4 *a; + int ret; - bch2_alloc_pack(trans->c, a, *u); - return bch2_trans_update(trans, iter, &a->k, trigger_flags); + bch2_trans_iter_init(trans, iter, BTREE_ID_alloc, pos, + BTREE_ITER_WITH_UPDATES| + BTREE_ITER_CACHED| + BTREE_ITER_INTENT); + k = bch2_btree_iter_peek_slot(iter); + ret = bkey_err(k); + if (ret) { + bch2_trans_iter_exit(trans, iter); + return ERR_PTR(ret); + } + + a = bch2_alloc_to_v4_mut(trans, k); + if (IS_ERR(a)) + bch2_trans_iter_exit(trans, iter); + return a; } static unsigned bch_alloc_v1_val_u64s(const struct bch_alloc *a) @@ -284,661 +293,987 @@ static unsigned bch_alloc_v1_val_u64s(const struct bch_alloc *a) return DIV_ROUND_UP(bytes, sizeof(u64)); } -const char *bch2_alloc_v1_invalid(const struct bch_fs *c, struct bkey_s_c k) +int bch2_alloc_v1_invalid(const struct bch_fs *c, struct bkey_s_c k, + int rw, struct printbuf *err) { struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k); - if (k.k->p.inode >= c->sb.nr_devices || - !c->devs[k.k->p.inode]) - return "invalid device"; - /* allow for unknown fields */ - if (bkey_val_u64s(a.k) < bch_alloc_v1_val_u64s(a.v)) - return "incorrect value size"; + if (bkey_val_u64s(a.k) < bch_alloc_v1_val_u64s(a.v)) { + pr_buf(err, "incorrect value size (%zu < %u)", + bkey_val_u64s(a.k), bch_alloc_v1_val_u64s(a.v)); + return -EINVAL; + } - return NULL; + return 0; } -const char *bch2_alloc_v2_invalid(const struct bch_fs *c, struct bkey_s_c k) +int bch2_alloc_v2_invalid(const struct bch_fs *c, struct bkey_s_c k, + int rw, struct printbuf *err) { struct bkey_alloc_unpacked u; - if (k.k->p.inode >= c->sb.nr_devices || - !c->devs[k.k->p.inode]) - return "invalid device"; - - if (bch2_alloc_unpack_v2(&u, k)) - return "unpack error"; + if (bch2_alloc_unpack_v2(&u, k)) { + pr_buf(err, "unpack error"); + return -EINVAL; + } - return NULL; + return 0; } -const char *bch2_alloc_v3_invalid(const struct bch_fs *c, struct bkey_s_c k) +int bch2_alloc_v3_invalid(const struct bch_fs *c, struct bkey_s_c k, + int rw, struct printbuf *err) { struct bkey_alloc_unpacked u; - if (k.k->p.inode >= c->sb.nr_devices || - !c->devs[k.k->p.inode]) - return "invalid device"; - - if (bch2_alloc_unpack_v3(&u, k)) - return "unpack error"; + if (bch2_alloc_unpack_v3(&u, k)) { + pr_buf(err, "unpack error"); + return -EINVAL; + } - return NULL; + return 0; } -void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c, - struct bkey_s_c k) +int bch2_alloc_v4_invalid(const struct bch_fs *c, struct bkey_s_c k, + int rw, struct printbuf *err) { - struct bkey_alloc_unpacked u = bch2_alloc_unpack(k); + struct bkey_s_c_alloc_v4 a = bkey_s_c_to_alloc_v4(k); - pr_buf(out, "gen %u oldest_gen %u data_type %s journal_seq %llu", - u.gen, u.oldest_gen, bch2_data_types[u.data_type], - u.journal_seq); -#define x(_name, ...) pr_buf(out, " " #_name " %llu", (u64) u._name); - BCH_ALLOC_FIELDS_V2() -#undef x -} + if (bkey_val_bytes(k.k) != sizeof(struct bch_alloc_v4)) { + pr_buf(err, "bad val size (%zu != %zu)", + bkey_val_bytes(k.k), sizeof(struct bch_alloc_v4)); + return -EINVAL; + } -static int bch2_alloc_read_fn(struct btree_trans *trans, struct bkey_s_c k) -{ - struct bch_fs *c = trans->c; - struct bch_dev *ca; - struct bucket *g; - struct bkey_alloc_unpacked u; + if (rw == WRITE) { + if (alloc_data_type(*a.v, a.v->data_type) != a.v->data_type) { + pr_buf(err, "invalid data type (got %u should be %u)", + a.v->data_type, alloc_data_type(*a.v, a.v->data_type)); + return -EINVAL; + } - if (!bkey_is_alloc(k.k)) - return 0; + switch (a.v->data_type) { + case BCH_DATA_free: + case BCH_DATA_need_gc_gens: + case BCH_DATA_need_discard: + if (a.v->dirty_sectors || + a.v->cached_sectors || + a.v->stripe) { + pr_buf(err, "empty data type free but have data"); + return -EINVAL; + } + break; + case BCH_DATA_sb: + case BCH_DATA_journal: + case BCH_DATA_btree: + case BCH_DATA_user: + case BCH_DATA_parity: + if (!a.v->dirty_sectors) { + pr_buf(err, "data_type %s but dirty_sectors==0", + bch2_data_types[a.v->data_type]); + return -EINVAL; + } + break; + case BCH_DATA_cached: + if (!a.v->cached_sectors || + a.v->dirty_sectors || + a.v->stripe) { + pr_buf(err, "data type inconsistency"); + return -EINVAL; + } - ca = bch_dev_bkey_exists(c, k.k->p.inode); - g = bucket(ca, k.k->p.offset); - u = bch2_alloc_unpack(k); - - *bucket_gen(ca, k.k->p.offset) = u.gen; - g->_mark.gen = u.gen; - g->_mark.data_type = u.data_type; - g->_mark.dirty_sectors = u.dirty_sectors; - g->_mark.cached_sectors = u.cached_sectors; - g->_mark.stripe = u.stripe != 0; - g->stripe = u.stripe; - g->stripe_redundancy = u.stripe_redundancy; - g->io_time[READ] = u.read_time; - g->io_time[WRITE] = u.write_time; - g->oldest_gen = u.oldest_gen; - g->gen_valid = 1; + if (!a.v->io_time[READ]) { + pr_buf(err, "cached bucket with read_time == 0"); + return -EINVAL; + } + break; + case BCH_DATA_stripe: + if (!a.v->stripe) { + pr_buf(err, "data_type %s but stripe==0", + bch2_data_types[a.v->data_type]); + return -EINVAL; + } + break; + } + } return 0; } +void bch2_alloc_v4_swab(struct bkey_s k) +{ + struct bch_alloc_v4 *a = bkey_s_to_alloc_v4(k).v; + + a->journal_seq = swab64(a->journal_seq); + a->flags = swab32(a->flags); + a->dirty_sectors = swab32(a->dirty_sectors); + a->cached_sectors = swab32(a->cached_sectors); + a->io_time[0] = swab64(a->io_time[0]); + a->io_time[1] = swab64(a->io_time[1]); + a->stripe = swab32(a->stripe); + a->nr_external_backpointers = swab32(a->nr_external_backpointers); +} + +void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k) +{ + struct bch_alloc_v4 a; + + bch2_alloc_to_v4(k, &a); + + pr_buf(out, "gen %u oldest_gen %u data_type %s journal_seq %llu need_discard %llu need_inc_gen %llu", + a.gen, a.oldest_gen, bch2_data_types[a.data_type], + a.journal_seq, + BCH_ALLOC_V4_NEED_DISCARD(&a), + BCH_ALLOC_V4_NEED_INC_GEN(&a)); + pr_buf(out, " dirty_sectors %u", a.dirty_sectors); + pr_buf(out, " cached_sectors %u", a.cached_sectors); + pr_buf(out, " stripe %u", a.stripe); + pr_buf(out, " stripe_redundancy %u", a.stripe_redundancy); + pr_buf(out, " read_time %llu", a.io_time[READ]); + pr_buf(out, " write_time %llu", a.io_time[WRITE]); +} + int bch2_alloc_read(struct bch_fs *c) { struct btree_trans trans; + struct btree_iter iter; + struct bkey_s_c k; + struct bch_alloc_v4 a; + struct bch_dev *ca; int ret; bch2_trans_init(&trans, c, 0, 0); - down_read(&c->gc_lock); - ret = bch2_btree_and_journal_walk(&trans, BTREE_ID_alloc, bch2_alloc_read_fn); - up_read(&c->gc_lock); + + for_each_btree_key(&trans, iter, BTREE_ID_alloc, POS_MIN, + BTREE_ITER_PREFETCH, k, ret) { + /* + * Not a fsck error because this is checked/repaired by + * bch2_check_alloc_key() which runs later: + */ + if (!bch2_dev_bucket_exists(c, k.k->p)) + continue; + + ca = bch_dev_bkey_exists(c, k.k->p.inode); + bch2_alloc_to_v4(k, &a); + + *bucket_gen(ca, k.k->p.offset) = a.gen; + } + bch2_trans_iter_exit(&trans, &iter); + bch2_trans_exit(&trans); - if (ret) { + + if (ret) bch_err(c, "error reading alloc info: %i", ret); - return ret; - } - return 0; + return ret; } -static int bch2_alloc_write_key(struct btree_trans *trans, - struct btree_iter *iter, - unsigned flags) +/* Free space/discard btree: */ + +static int bch2_bucket_do_index(struct btree_trans *trans, + struct bkey_s_c alloc_k, + const struct bch_alloc_v4 *a, + bool set) { struct bch_fs *c = trans->c; - struct bkey_s_c k; - struct bkey_alloc_unpacked old_u, new_u; + struct bch_dev *ca = bch_dev_bkey_exists(c, alloc_k.k->p.inode); + struct btree_iter iter; + struct bkey_s_c old; + struct bkey_i *k; + enum btree_id btree; + enum bch_bkey_type old_type = !set ? KEY_TYPE_set : KEY_TYPE_deleted; + enum bch_bkey_type new_type = set ? KEY_TYPE_set : KEY_TYPE_deleted; + struct printbuf buf = PRINTBUF; int ret; -retry: - bch2_trans_begin(trans); - ret = bch2_btree_key_cache_flush(trans, - BTREE_ID_alloc, iter->pos); - if (ret) - goto err; + if (a->data_type != BCH_DATA_free && + a->data_type != BCH_DATA_need_discard) + return 0; - k = bch2_btree_iter_peek_slot(iter); - ret = bkey_err(k); - if (ret) - goto err; + k = bch2_trans_kmalloc(trans, sizeof(*k)); + if (IS_ERR(k)) + return PTR_ERR(k); - old_u = bch2_alloc_unpack(k); - new_u = alloc_mem_to_key(c, iter); + bkey_init(&k->k); + k->k.type = new_type; - if (!bkey_alloc_unpacked_cmp(old_u, new_u)) + switch (a->data_type) { + case BCH_DATA_free: + btree = BTREE_ID_freespace; + k->k.p = alloc_freespace_pos(alloc_k.k->p, *a); + bch2_key_resize(&k->k, 1); + break; + case BCH_DATA_need_discard: + btree = BTREE_ID_need_discard; + k->k.p = alloc_k.k->p; + break; + default: return 0; + } + + bch2_trans_iter_init(trans, &iter, btree, + bkey_start_pos(&k->k), + BTREE_ITER_INTENT); + old = bch2_btree_iter_peek_slot(&iter); + ret = bkey_err(old); + if (ret) + goto err; + + if (ca->mi.freespace_initialized && + bch2_trans_inconsistent_on(old.k->type != old_type, trans, + "incorrect key when %s %s btree (got %s should be %s)\n" + " for %s", + set ? "setting" : "clearing", + bch2_btree_ids[btree], + bch2_bkey_types[old.k->type], + bch2_bkey_types[old_type], + (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) { + ret = -EIO; + goto err; + } - ret = bch2_alloc_write(trans, iter, &new_u, - BTREE_TRIGGER_NORUN) ?: - bch2_trans_commit(trans, NULL, NULL, - BTREE_INSERT_NOFAIL|flags); + ret = bch2_trans_update(trans, &iter, k, 0); err: - if (ret == -EINTR) - goto retry; + bch2_trans_iter_exit(trans, &iter); + printbuf_exit(&buf); return ret; } -int bch2_alloc_write_all(struct bch_fs *c, unsigned flags) +int bch2_trans_mark_alloc(struct btree_trans *trans, + struct bkey_s_c old, struct bkey_i *new, + unsigned flags) { - struct btree_trans trans; - struct btree_iter iter; - struct bch_dev *ca; - unsigned i; + struct bch_fs *c = trans->c; + struct bch_alloc_v4 old_a, *new_a; int ret = 0; - bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0); - bch2_trans_iter_init(&trans, &iter, BTREE_ID_alloc, POS_MIN, - BTREE_ITER_SLOTS|BTREE_ITER_INTENT); + /* + * Deletion only happens in the device removal path, with + * BTREE_TRIGGER_NORUN: + */ + BUG_ON(new->k.type != KEY_TYPE_alloc_v4); - for_each_member_device(ca, c, i) { - bch2_btree_iter_set_pos(&iter, - POS(ca->dev_idx, ca->mi.first_bucket)); + bch2_alloc_to_v4(old, &old_a); + new_a = &bkey_i_to_alloc_v4(new)->v; - while (iter.pos.offset < ca->mi.nbuckets) { - ret = bch2_alloc_write_key(&trans, &iter, flags); - if (ret) { - percpu_ref_put(&ca->ref); - goto err; - } - bch2_btree_iter_advance(&iter); - } + new_a->data_type = alloc_data_type(*new_a, new_a->data_type); + + if (new_a->dirty_sectors > old_a.dirty_sectors || + new_a->cached_sectors > old_a.cached_sectors) { + new_a->io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now)); + new_a->io_time[WRITE]= max_t(u64, 1, atomic64_read(&c->io_clock[WRITE].now)); + SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, true); + SET_BCH_ALLOC_V4_NEED_DISCARD(new_a, true); } -err: - bch2_trans_iter_exit(&trans, &iter); - bch2_trans_exit(&trans); - return ret; -} -/* Bucket IO clocks: */ + if (data_type_is_empty(new_a->data_type) && + BCH_ALLOC_V4_NEED_INC_GEN(new_a) && + !bch2_bucket_is_open_safe(c, new->k.p.inode, new->k.p.offset)) { + new_a->gen++; + SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, false); + } -int bch2_bucket_io_time_reset(struct btree_trans *trans, unsigned dev, - size_t bucket_nr, int rw) -{ - struct bch_fs *c = trans->c; - struct btree_iter iter; - struct bkey_alloc_unpacked u; - u64 *time, now; - int ret = 0; + if (old_a.data_type != new_a->data_type || + (new_a->data_type == BCH_DATA_free && + alloc_freespace_genbits(old_a) != alloc_freespace_genbits(*new_a))) { + ret = bch2_bucket_do_index(trans, old, &old_a, false) ?: + bch2_bucket_do_index(trans, bkey_i_to_s_c(new), new_a, true); + if (ret) + return ret; + } - bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, POS(dev, bucket_nr), - BTREE_ITER_CACHED| - BTREE_ITER_CACHED_NOFILL| - BTREE_ITER_INTENT); - ret = bch2_btree_iter_traverse(&iter); - if (ret) - goto out; + if (new_a->data_type == BCH_DATA_cached && + !new_a->io_time[READ]) + new_a->io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now)); - u = alloc_mem_to_key(c, &iter); + if ((old_a.data_type == BCH_DATA_cached) != + (new_a->data_type == BCH_DATA_cached)) { + u64 old_lru = alloc_lru_idx(old_a); + u64 new_lru = alloc_lru_idx(*new_a); - time = rw == READ ? &u.read_time : &u.write_time; - now = atomic64_read(&c->io_clock[rw].now); - if (*time == now) - goto out; + ret = bch2_lru_change(trans, new->k.p.inode, new->k.p.offset, + old_lru, &new_lru); + if (ret) + return ret; - *time = now; + if (new_a->data_type == BCH_DATA_cached) + new_a->io_time[READ] = new_lru; + } - ret = bch2_alloc_write(trans, &iter, &u, 0) ?: - bch2_trans_commit(trans, NULL, NULL, 0); -out: - bch2_trans_iter_exit(trans, &iter); - return ret; + return 0; } -/* Background allocator thread: */ - -/* - * Scans for buckets to be invalidated, invalidates them, rewrites prios/gens - * (marking them as invalidated on disk), then optionally issues discard - * commands to the newly free buckets, then puts them on the various freelists. - */ - -static bool bch2_can_invalidate_bucket(struct bch_dev *ca, size_t b, - struct bucket_mark m) +static int bch2_check_alloc_key(struct btree_trans *trans, + struct btree_iter *alloc_iter) { - u8 gc_gen; + struct bch_fs *c = trans->c; + struct bch_dev *ca; + struct btree_iter discard_iter, freespace_iter; + struct bch_alloc_v4 a; + unsigned discard_key_type, freespace_key_type; + struct bkey_s_c alloc_k, k; + struct printbuf buf = PRINTBUF; + struct printbuf buf2 = PRINTBUF; + int ret; - if (!is_available_bucket(m)) - return false; + alloc_k = bch2_btree_iter_peek(alloc_iter); + if (!alloc_k.k) + return 0; - if (m.owned_by_allocator) - return false; + ret = bkey_err(alloc_k); + if (ret) + return ret; - if (ca->buckets_nouse && - test_bit(b, ca->buckets_nouse)) - return false; + if (fsck_err_on(!bch2_dev_bucket_exists(c, alloc_k.k->p), c, + "alloc key for invalid device:bucket %llu:%llu", + alloc_k.k->p.inode, alloc_k.k->p.offset)) + return bch2_btree_delete_at(trans, alloc_iter, 0); - if (ca->new_fs_bucket_idx) { - /* - * Device or filesystem is still being initialized, and we - * haven't fully marked superblocks & journal: - */ - if (is_superblock_bucket(ca, b)) - return false; + ca = bch_dev_bkey_exists(c, alloc_k.k->p.inode); + if (!ca->mi.freespace_initialized) + return 0; - if (b < ca->new_fs_bucket_idx) - return false; - } + bch2_alloc_to_v4(alloc_k, &a); - gc_gen = bucket_gc_gen(bucket(ca, b)); + discard_key_type = a.data_type == BCH_DATA_need_discard + ? KEY_TYPE_set : 0; + freespace_key_type = a.data_type == BCH_DATA_free + ? KEY_TYPE_set : 0; - ca->inc_gen_needs_gc += gc_gen >= BUCKET_GC_GEN_MAX / 2; - ca->inc_gen_really_needs_gc += gc_gen >= BUCKET_GC_GEN_MAX; + bch2_trans_iter_init(trans, &discard_iter, BTREE_ID_need_discard, + alloc_k.k->p, 0); + bch2_trans_iter_init(trans, &freespace_iter, BTREE_ID_freespace, + alloc_freespace_pos(alloc_k.k->p, a), 0); - return gc_gen < BUCKET_GC_GEN_MAX; -} + k = bch2_btree_iter_peek_slot(&discard_iter); + ret = bkey_err(k); + if (ret) + goto err; -/* - * Determines what order we're going to reuse buckets, smallest bucket_key() - * first. - */ + if (fsck_err_on(k.k->type != discard_key_type, c, + "incorrect key in need_discard btree (got %s should be %s)\n" + " %s", + bch2_bkey_types[k.k->type], + bch2_bkey_types[discard_key_type], + (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) { + struct bkey_i *update = + bch2_trans_kmalloc(trans, sizeof(*update)); -static unsigned bucket_sort_key(struct bucket *g, struct bucket_mark m, - u64 now, u64 last_seq_ondisk) -{ - unsigned used = bucket_sectors_used(m); + ret = PTR_ERR_OR_ZERO(update); + if (ret) + goto err; - if (used) { - /* - * Prefer to keep buckets that have been read more recently, and - * buckets that have more data in them: - */ - u64 last_read = max_t(s64, 0, now - g->io_time[READ]); - u32 last_read_scaled = max_t(u64, U32_MAX, div_u64(last_read, used)); + bkey_init(&update->k); + update->k.type = discard_key_type; + update->k.p = discard_iter.pos; - return -last_read_scaled; - } else { - /* - * Prefer to use buckets with smaller gc_gen so that we don't - * have to walk the btree and recalculate oldest_gen - but shift - * off the low bits so that buckets will still have equal sort - * keys when there's only a small difference, so that we can - * keep sequential buckets together: - */ - return (bucket_needs_journal_commit(m, last_seq_ondisk) << 4)| - (bucket_gc_gen(g) >> 4); + ret = bch2_trans_update(trans, &discard_iter, update, 0); + if (ret) + goto err; } -} -static inline int bucket_alloc_cmp(alloc_heap *h, - struct alloc_heap_entry l, - struct alloc_heap_entry r) -{ - return cmp_int(l.key, r.key) ?: - cmp_int(r.nr, l.nr) ?: - cmp_int(l.bucket, r.bucket); -} + k = bch2_btree_iter_peek_slot(&freespace_iter); + ret = bkey_err(k); + if (ret) + goto err; -static inline int bucket_idx_cmp(const void *_l, const void *_r) -{ - const struct alloc_heap_entry *l = _l, *r = _r; + if (fsck_err_on(k.k->type != freespace_key_type, c, + "incorrect key in freespace btree (got %s should be %s)\n" + " %s", + bch2_bkey_types[k.k->type], + bch2_bkey_types[freespace_key_type], + (printbuf_reset(&buf), + bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) { + struct bkey_i *update = + bch2_trans_kmalloc(trans, sizeof(*update)); + + ret = PTR_ERR_OR_ZERO(update); + if (ret) + goto err; + + bkey_init(&update->k); + update->k.type = freespace_key_type; + update->k.p = freespace_iter.pos; + bch2_key_resize(&update->k, 1); - return cmp_int(l->bucket, r->bucket); + ret = bch2_trans_update(trans, &freespace_iter, update, 0); + if (ret) + goto err; + } +err: +fsck_err: + bch2_trans_iter_exit(trans, &freespace_iter); + bch2_trans_iter_exit(trans, &discard_iter); + printbuf_exit(&buf2); + printbuf_exit(&buf); + return ret; } -static void find_reclaimable_buckets_lru(struct bch_fs *c, struct bch_dev *ca) +static int bch2_check_discard_freespace_key(struct btree_trans *trans, + struct btree_iter *iter) { - struct bucket_array *buckets; - struct alloc_heap_entry e = { 0 }; - u64 now, last_seq_ondisk; - size_t b, i, nr = 0; + struct bch_fs *c = trans->c; + struct btree_iter alloc_iter; + struct bkey_s_c k, freespace_k; + struct bch_alloc_v4 a; + u64 genbits; + struct bpos pos; + enum bch_data_type state = iter->btree_id == BTREE_ID_need_discard + ? BCH_DATA_need_discard + : BCH_DATA_free; + struct printbuf buf = PRINTBUF; + int ret; - down_read(&ca->bucket_lock); + freespace_k = bch2_btree_iter_peek(iter); + if (!freespace_k.k) + return 1; - buckets = bucket_array(ca); - ca->alloc_heap.used = 0; - now = atomic64_read(&c->io_clock[READ].now); - last_seq_ondisk = c->journal.flushed_seq_ondisk; + ret = bkey_err(freespace_k); + if (ret) + return ret; - /* - * Find buckets with lowest read priority, by building a maxheap sorted - * by read priority and repeatedly replacing the maximum element until - * all buckets have been visited. - */ - for (b = ca->mi.first_bucket; b < ca->mi.nbuckets; b++) { - struct bucket *g = &buckets->b[b]; - struct bucket_mark m = READ_ONCE(g->mark); - unsigned key = bucket_sort_key(g, m, now, last_seq_ondisk); + pos = iter->pos; + pos.offset &= ~(~0ULL << 56); + genbits = iter->pos.offset & (~0ULL << 56); - cond_resched(); + bch2_trans_iter_init(trans, &alloc_iter, BTREE_ID_alloc, pos, 0); - if (!bch2_can_invalidate_bucket(ca, b, m)) - continue; + if (fsck_err_on(!bch2_dev_bucket_exists(c, pos), c, + "entry in %s btree for nonexistant dev:bucket %llu:%llu", + bch2_btree_ids[iter->btree_id], pos.inode, pos.offset)) + goto delete; - if (e.nr && e.bucket + e.nr == b && e.key == key) { - e.nr++; - } else { - if (e.nr) - heap_add_or_replace(&ca->alloc_heap, e, - -bucket_alloc_cmp, NULL); - - e = (struct alloc_heap_entry) { - .bucket = b, - .nr = 1, - .key = key, - }; - } - } + k = bch2_btree_iter_peek_slot(&alloc_iter); + ret = bkey_err(k); + if (ret) + goto err; + + bch2_alloc_to_v4(k, &a); + + if (fsck_err_on(a.data_type != state || + (state == BCH_DATA_free && + genbits != alloc_freespace_genbits(a)), c, + "%s\n incorrectly set in %s index (free %u, genbits %llu should be %llu)", + (bch2_bkey_val_to_text(&buf, c, k), buf.buf), + bch2_btree_ids[iter->btree_id], + a.data_type == state, + genbits >> 56, alloc_freespace_genbits(a) >> 56)) + goto delete; +out: +err: +fsck_err: + bch2_trans_iter_exit(trans, &alloc_iter); + printbuf_exit(&buf); + return ret; +delete: + ret = bch2_btree_delete_extent_at(trans, iter, + iter->btree_id == BTREE_ID_freespace ? 1 : 0, 0); + goto out; +} - if (e.nr) - heap_add_or_replace(&ca->alloc_heap, e, - -bucket_alloc_cmp, NULL); +int bch2_check_alloc_info(struct bch_fs *c) +{ + struct btree_trans trans; + struct btree_iter iter; + struct bkey_s_c k; + int ret = 0; - for (i = 0; i < ca->alloc_heap.used; i++) - nr += ca->alloc_heap.data[i].nr; + bch2_trans_init(&trans, c, 0, 0); - while (nr - ca->alloc_heap.data[0].nr >= ALLOC_SCAN_BATCH(ca)) { - nr -= ca->alloc_heap.data[0].nr; - heap_pop(&ca->alloc_heap, e, -bucket_alloc_cmp, NULL); + for_each_btree_key(&trans, iter, BTREE_ID_alloc, POS_MIN, + BTREE_ITER_PREFETCH, k, ret) { + ret = __bch2_trans_do(&trans, NULL, NULL, 0, + bch2_check_alloc_key(&trans, &iter)); + if (ret) + break; } + bch2_trans_iter_exit(&trans, &iter); - up_read(&ca->bucket_lock); -} - -static size_t find_reclaimable_buckets(struct bch_fs *c, struct bch_dev *ca) -{ - size_t i, nr = 0; + if (ret) + goto err; - ca->inc_gen_needs_gc = 0; - ca->inc_gen_really_needs_gc = 0; + bch2_trans_iter_init(&trans, &iter, BTREE_ID_need_discard, POS_MIN, + BTREE_ITER_PREFETCH); + while (1) { + ret = __bch2_trans_do(&trans, NULL, NULL, 0, + bch2_check_discard_freespace_key(&trans, &iter)); + if (ret) + break; - find_reclaimable_buckets_lru(c, ca); + bch2_btree_iter_set_pos(&iter, bpos_nosnap_successor(iter.pos)); + } + bch2_trans_iter_exit(&trans, &iter); - heap_resort(&ca->alloc_heap, bucket_alloc_cmp, NULL); + if (ret) + goto err; - for (i = 0; i < ca->alloc_heap.used; i++) - nr += ca->alloc_heap.data[i].nr; + bch2_trans_iter_init(&trans, &iter, BTREE_ID_freespace, POS_MIN, + BTREE_ITER_PREFETCH); + while (1) { + ret = __bch2_trans_do(&trans, NULL, NULL, 0, + bch2_check_discard_freespace_key(&trans, &iter)); + if (ret) + break; - return nr; + bch2_btree_iter_set_pos(&iter, bpos_nosnap_successor(iter.pos)); + } + bch2_trans_iter_exit(&trans, &iter); +err: + bch2_trans_exit(&trans); + return ret < 0 ? ret : 0; } -static int bucket_invalidate_btree(struct btree_trans *trans, - struct bch_dev *ca, u64 b, - struct bkey_alloc_unpacked *u) +static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans, + struct btree_iter *alloc_iter) { struct bch_fs *c = trans->c; - struct btree_iter iter; + struct btree_iter lru_iter; + struct bch_alloc_v4 a; + struct bkey_s_c alloc_k, k; + struct printbuf buf = PRINTBUF; + struct printbuf buf2 = PRINTBUF; int ret; - bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, - POS(ca->dev_idx, b), - BTREE_ITER_CACHED| - BTREE_ITER_CACHED_NOFILL| - BTREE_ITER_INTENT); + alloc_k = bch2_btree_iter_peek(alloc_iter); + if (!alloc_k.k) + return 0; - ret = bch2_btree_iter_traverse(&iter); + ret = bkey_err(alloc_k); + if (ret) + return ret; + + bch2_alloc_to_v4(alloc_k, &a); + + if (a.data_type != BCH_DATA_cached) + return 0; + + bch2_trans_iter_init(trans, &lru_iter, BTREE_ID_lru, + POS(alloc_k.k->p.inode, a.io_time[READ]), 0); + + k = bch2_btree_iter_peek_slot(&lru_iter); + ret = bkey_err(k); if (ret) goto err; - *u = alloc_mem_to_key(c, &iter); + if (fsck_err_on(!a.io_time[READ], c, + "cached bucket with read_time 0\n" + " %s", + (printbuf_reset(&buf), + bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf)) || + fsck_err_on(k.k->type != KEY_TYPE_lru || + le64_to_cpu(bkey_s_c_to_lru(k).v->idx) != alloc_k.k->p.offset, c, + "incorrect/missing lru entry\n" + " %s\n" + " %s", + (printbuf_reset(&buf), + bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf), + (bch2_bkey_val_to_text(&buf2, c, k), buf2.buf))) { + u64 read_time = a.io_time[READ]; + + if (!a.io_time[READ]) + a.io_time[READ] = atomic64_read(&c->io_clock[READ].now); + + ret = bch2_lru_set(trans, + alloc_k.k->p.inode, + alloc_k.k->p.offset, + &a.io_time[READ]); + if (ret) + goto err; - u->gen++; - u->data_type = 0; - u->dirty_sectors = 0; - u->cached_sectors = 0; - u->read_time = atomic64_read(&c->io_clock[READ].now); - u->write_time = atomic64_read(&c->io_clock[WRITE].now); + if (a.io_time[READ] != read_time) { + struct bkey_i_alloc_v4 *a_mut = + bch2_alloc_to_v4_mut(trans, alloc_k); + ret = PTR_ERR_OR_ZERO(a_mut); + if (ret) + goto err; - ret = bch2_alloc_write(trans, &iter, u, - BTREE_TRIGGER_BUCKET_INVALIDATE); + a_mut->v.io_time[READ] = a.io_time[READ]; + ret = bch2_trans_update(trans, alloc_iter, + &a_mut->k_i, BTREE_TRIGGER_NORUN); + if (ret) + goto err; + } + } err: - bch2_trans_iter_exit(trans, &iter); +fsck_err: + bch2_trans_iter_exit(trans, &lru_iter); + printbuf_exit(&buf2); + printbuf_exit(&buf); return ret; } -static int bch2_invalidate_one_bucket(struct bch_fs *c, struct bch_dev *ca, - u64 *journal_seq, unsigned flags) +int bch2_check_alloc_to_lru_refs(struct bch_fs *c) { - struct bkey_alloc_unpacked u; - size_t b; + struct btree_trans trans; + struct btree_iter iter; + struct bkey_s_c k; int ret = 0; - /* - * If the read-only path is trying to shut down, we can't be generating - * new btree updates: - */ - if (test_bit(BCH_FS_ALLOCATOR_STOPPING, &c->flags)) - return 1; - - BUG_ON(!ca->alloc_heap.used || - !ca->alloc_heap.data[0].nr); - b = ca->alloc_heap.data[0].bucket; + bch2_trans_init(&trans, c, 0, 0); - /* first, put on free_inc and mark as owned by allocator: */ - percpu_down_read(&c->mark_lock); + for_each_btree_key(&trans, iter, BTREE_ID_alloc, POS_MIN, + BTREE_ITER_PREFETCH, k, ret) { + ret = __bch2_trans_do(&trans, NULL, NULL, + BTREE_INSERT_NOFAIL| + BTREE_INSERT_LAZY_RW, + bch2_check_alloc_to_lru_ref(&trans, &iter)); + if (ret) + break; + } + bch2_trans_iter_exit(&trans, &iter); - bch2_mark_alloc_bucket(c, ca, b, true); + bch2_trans_exit(&trans); + return ret < 0 ? ret : 0; +} - spin_lock(&c->freelist_lock); - verify_not_on_freelist(c, ca, b); - BUG_ON(!fifo_push(&ca->free_inc, b)); - spin_unlock(&c->freelist_lock); +static int bch2_clear_need_discard(struct btree_trans *trans, struct bpos pos, + struct bch_dev *ca, bool *discard_done) +{ + struct bch_fs *c = trans->c; + struct btree_iter iter; + struct bkey_s_c k; + struct bkey_i_alloc_v4 *a; + struct printbuf buf = PRINTBUF; + int ret; - percpu_up_read(&c->mark_lock); + bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, pos, + BTREE_ITER_CACHED); + k = bch2_btree_iter_peek_slot(&iter); + ret = bkey_err(k); + if (ret) + goto out; - ret = bch2_trans_do(c, NULL, journal_seq, - BTREE_INSERT_NOCHECK_RW| - BTREE_INSERT_NOFAIL| - BTREE_INSERT_JOURNAL_RESERVED| - flags, - bucket_invalidate_btree(&trans, ca, b, &u)); + a = bch2_alloc_to_v4_mut(trans, k); + ret = PTR_ERR_OR_ZERO(a); + if (ret) + goto out; - if (!ret) { - /* remove from alloc_heap: */ - struct alloc_heap_entry e, *top = ca->alloc_heap.data; + if (BCH_ALLOC_V4_NEED_INC_GEN(&a->v)) { + a->v.gen++; + SET_BCH_ALLOC_V4_NEED_INC_GEN(&a->v, false); + goto write; + } - top->bucket++; - top->nr--; + if (bch2_trans_inconsistent_on(a->v.journal_seq > c->journal.flushed_seq_ondisk, trans, + "clearing need_discard but journal_seq %llu > flushed_seq %llu\n" + "%s", + a->v.journal_seq, + c->journal.flushed_seq_ondisk, + (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { + ret = -EIO; + goto out; + } - if (!top->nr) - heap_pop(&ca->alloc_heap, e, bucket_alloc_cmp, NULL); + if (bch2_trans_inconsistent_on(a->v.data_type != BCH_DATA_need_discard, trans, + "bucket incorrectly set in need_discard btree\n" + "%s", + (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { + ret = -EIO; + goto out; + } + if (!*discard_done && ca->mi.discard && !c->opts.nochanges) { /* - * Make sure we flush the last journal entry that updated this - * bucket (i.e. deleting the last reference) before writing to - * this bucket again: + * This works without any other locks because this is the only + * thread that removes items from the need_discard tree */ - *journal_seq = max(*journal_seq, u.journal_seq); - } else { - size_t b2; + bch2_trans_unlock(trans); + blkdev_issue_discard(ca->disk_sb.bdev, + k.k->p.offset * ca->mi.bucket_size, + ca->mi.bucket_size, + GFP_KERNEL, 0); + *discard_done = true; + + ret = bch2_trans_relock(trans) ? 0 : -EINTR; + if (ret) + goto out; + } - /* remove from free_inc: */ - percpu_down_read(&c->mark_lock); - spin_lock(&c->freelist_lock); + SET_BCH_ALLOC_V4_NEED_DISCARD(&a->v, false); + a->v.data_type = alloc_data_type(a->v, a->v.data_type); +write: + ret = bch2_trans_update(trans, &iter, &a->k_i, 0); +out: + bch2_trans_iter_exit(trans, &iter); + printbuf_exit(&buf); + return ret; +} - bch2_mark_alloc_bucket(c, ca, b, false); +static void bch2_do_discards_work(struct work_struct *work) +{ + struct bch_fs *c = container_of(work, struct bch_fs, discard_work); + struct bch_dev *ca = NULL; + struct btree_trans trans; + struct btree_iter iter; + struct bkey_s_c k; + u64 seen = 0, open = 0, need_journal_commit = 0, discarded = 0; + int ret; - BUG_ON(!fifo_pop_back(&ca->free_inc, b2)); - BUG_ON(b != b2); + bch2_trans_init(&trans, c, 0, 0); - spin_unlock(&c->freelist_lock); - percpu_up_read(&c->mark_lock); - } + for_each_btree_key(&trans, iter, BTREE_ID_need_discard, + POS_MIN, 0, k, ret) { + bool discard_done = false; - return ret < 0 ? ret : 0; -} + if (ca && k.k->p.inode != ca->dev_idx) { + percpu_ref_put(&ca->io_ref); + ca = NULL; + } -/* - * Pull buckets off ca->alloc_heap, invalidate them, move them to ca->free_inc: - */ -static int bch2_invalidate_buckets(struct bch_fs *c, struct bch_dev *ca) -{ - u64 journal_seq = 0; - int ret = 0; + if (!ca) { + ca = bch_dev_bkey_exists(c, k.k->p.inode); + if (!percpu_ref_tryget(&ca->io_ref)) { + ca = NULL; + bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode + 1, 0)); + continue; + } + } - /* Only use nowait if we've already invalidated at least one bucket: */ - while (!ret && - !fifo_full(&ca->free_inc) && - ca->alloc_heap.used) { - if (kthread_should_stop()) { - ret = 1; - break; + seen++; + + if (bch2_bucket_is_open_safe(c, k.k->p.inode, k.k->p.offset)) { + open++; + continue; } - ret = bch2_invalidate_one_bucket(c, ca, &journal_seq, - (!fifo_empty(&ca->free_inc) - ? BTREE_INSERT_NOWAIT : 0)); - /* - * We only want to batch up invalidates when they're going to - * require flushing the journal: - */ - if (!journal_seq) + if (bch2_bucket_needs_journal_commit(&c->buckets_waiting_for_journal, + c->journal.flushed_seq_ondisk, + k.k->p.inode, k.k->p.offset)) { + need_journal_commit++; + continue; + } + + ret = __bch2_trans_do(&trans, NULL, NULL, + BTREE_INSERT_USE_RESERVE| + BTREE_INSERT_NOFAIL, + bch2_clear_need_discard(&trans, k.k->p, ca, &discard_done)); + if (ret) break; + + discarded++; } + bch2_trans_iter_exit(&trans, &iter); - /* If we used NOWAIT, don't return the error: */ - if (!fifo_empty(&ca->free_inc)) - ret = 0; - if (ret < 0) - bch_err(ca, "error invalidating buckets: %i", ret); - if (ret) - return ret; + if (ca) + percpu_ref_put(&ca->io_ref); - if (journal_seq) - ret = bch2_journal_flush_seq(&c->journal, journal_seq); - if (ret) { - bch_err(ca, "journal error: %i", ret); - return ret; - } + bch2_trans_exit(&trans); - return 0; + if (need_journal_commit * 2 > seen) + bch2_journal_flush_async(&c->journal, NULL); + + percpu_ref_put(&c->writes); + + trace_do_discards(c, seen, open, need_journal_commit, discarded, ret); } -static void alloc_thread_set_state(struct bch_dev *ca, unsigned new_state) +void bch2_do_discards(struct bch_fs *c) { - if (ca->allocator_state != new_state) { - ca->allocator_state = new_state; - closure_wake_up(&ca->fs->freelist_wait); - } + if (percpu_ref_tryget(&c->writes) && + !queue_work(system_long_wq, &c->discard_work)) + percpu_ref_put(&c->writes); } -static int push_invalidated_bucket(struct bch_fs *c, struct bch_dev *ca, u64 b) +static int invalidate_one_bucket(struct btree_trans *trans, struct bch_dev *ca) { - unsigned i; - int ret = 0; + struct bch_fs *c = trans->c; + struct btree_iter lru_iter, alloc_iter = { NULL }; + struct bkey_s_c k; + struct bkey_i_alloc_v4 *a; + u64 bucket, idx; + struct printbuf buf = PRINTBUF; + int ret; - spin_lock(&c->freelist_lock); - for (i = 0; i < RESERVE_NR; i++) { - /* - * Don't strand buckets on the copygc freelist until - * after recovery is finished: - */ - if (i == RESERVE_MOVINGGC && - !test_bit(BCH_FS_STARTED, &c->flags)) - continue; + bch2_trans_iter_init(trans, &lru_iter, BTREE_ID_lru, + POS(ca->dev_idx, 0), 0); + k = bch2_btree_iter_peek(&lru_iter); + ret = bkey_err(k); + if (ret) + goto out; - if (fifo_push(&ca->free[i], b)) { - fifo_pop(&ca->free_inc, b); - ret = 1; - break; - } + if (!k.k || k.k->p.inode != ca->dev_idx) + goto out; + + if (bch2_trans_inconsistent_on(k.k->type != KEY_TYPE_lru, trans, + "non lru key in lru btree")) + goto out; + + idx = k.k->p.offset; + bucket = le64_to_cpu(bkey_s_c_to_lru(k).v->idx); + + a = bch2_trans_start_alloc_update(trans, &alloc_iter, + POS(ca->dev_idx, bucket)); + ret = PTR_ERR_OR_ZERO(a); + if (ret) + goto out; + + if (idx != alloc_lru_idx(a->v)) { + pr_buf(&buf, "alloc key does not point back to lru entry when invalidating bucket:\n "); + + bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&a->k_i)); + pr_buf(&buf, "\n "); + bch2_bkey_val_to_text(&buf, c, k); + bch2_trans_inconsistent(trans, "%s", buf.buf); + ret = -EINVAL; + goto out; } - spin_unlock(&c->freelist_lock); - ca->allocator_state = ret - ? ALLOCATOR_running - : ALLOCATOR_blocked_full; - closure_wake_up(&c->freelist_wait); + SET_BCH_ALLOC_V4_NEED_INC_GEN(&a->v, false); + a->v.gen++; + a->v.data_type = 0; + a->v.dirty_sectors = 0; + a->v.cached_sectors = 0; + a->v.io_time[READ] = atomic64_read(&c->io_clock[READ].now); + a->v.io_time[WRITE] = atomic64_read(&c->io_clock[WRITE].now); + + ret = bch2_trans_update(trans, &alloc_iter, &a->k_i, + BTREE_TRIGGER_BUCKET_INVALIDATE); +out: + bch2_trans_iter_exit(trans, &alloc_iter); + bch2_trans_iter_exit(trans, &lru_iter); + printbuf_exit(&buf); return ret; } -static void discard_one_bucket(struct bch_fs *c, struct bch_dev *ca, u64 b) +static void bch2_do_invalidates_work(struct work_struct *work) { - if (ca->mi.discard && - blk_queue_discard(bdev_get_queue(ca->disk_sb.bdev))) - blkdev_issue_discard(ca->disk_sb.bdev, bucket_to_sector(ca, b), - ca->mi.bucket_size, GFP_NOFS, 0); -} + struct bch_fs *c = container_of(work, struct bch_fs, invalidate_work); + struct bch_dev *ca; + struct btree_trans trans; + unsigned i; + int ret = 0; -static bool allocator_thread_running(struct bch_dev *ca) -{ - unsigned state = ca->mi.state == BCH_MEMBER_STATE_rw && - test_bit(BCH_FS_ALLOCATOR_RUNNING, &ca->fs->flags) - ? ALLOCATOR_running - : ALLOCATOR_stopped; - alloc_thread_set_state(ca, state); - return state == ALLOCATOR_running; + bch2_trans_init(&trans, c, 0, 0); + + for_each_member_device(ca, c, i) { + s64 nr_to_invalidate = + should_invalidate_buckets(ca, bch2_dev_usage_read(ca)); + + while (!ret && nr_to_invalidate-- >= 0) + ret = __bch2_trans_do(&trans, NULL, NULL, + BTREE_INSERT_USE_RESERVE| + BTREE_INSERT_NOFAIL, + invalidate_one_bucket(&trans, ca)); + } + + bch2_trans_exit(&trans); + percpu_ref_put(&c->writes); } -static int buckets_available(struct bch_dev *ca, unsigned long gc_count) +void bch2_do_invalidates(struct bch_fs *c) { - s64 available = dev_buckets_reclaimable(ca) - - (gc_count == ca->fs->gc_count ? ca->inc_gen_really_needs_gc : 0); - bool ret = available > 0; - - alloc_thread_set_state(ca, ret - ? ALLOCATOR_running - : ALLOCATOR_blocked); - return ret; + if (percpu_ref_tryget(&c->writes)) + queue_work(system_long_wq, &c->invalidate_work); } -/** - * bch_allocator_thread - move buckets from free_inc to reserves - * - * The free_inc FIFO is populated by find_reclaimable_buckets(), and - * the reserves are depleted by bucket allocation. When we run out - * of free_inc, try to invalidate some buckets and write out - * prios and gens. - */ -static int bch2_allocator_thread(void *arg) +static int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca) { - struct bch_dev *ca = arg; - struct bch_fs *c = ca->fs; - unsigned long gc_count = c->gc_count; - size_t nr; + struct btree_trans trans; + struct btree_iter iter; + struct bkey_s_c k; + struct bch_alloc_v4 a; + struct bch_member *m; int ret; - set_freezable(); + bch2_trans_init(&trans, c, 0, 0); - while (1) { - ret = kthread_wait_freezable(allocator_thread_running(ca)); + for_each_btree_key(&trans, iter, BTREE_ID_alloc, + POS(ca->dev_idx, ca->mi.first_bucket), + BTREE_ITER_SLOTS| + BTREE_ITER_PREFETCH, k, ret) { + if (iter.pos.offset >= ca->mi.nbuckets) + break; + + bch2_alloc_to_v4(k, &a); + ret = __bch2_trans_do(&trans, NULL, NULL, + BTREE_INSERT_LAZY_RW, + bch2_bucket_do_index(&trans, k, &a, true)); if (ret) - goto stop; + break; + } + bch2_trans_iter_exit(&trans, &iter); - while (!ca->alloc_heap.used) { - cond_resched(); + bch2_trans_exit(&trans); - ret = kthread_wait_freezable(buckets_available(ca, gc_count)); - if (ret) - goto stop; + if (ret) { + bch_err(ca, "error initializing free space: %i", ret); + return ret; + } - gc_count = c->gc_count; - nr = find_reclaimable_buckets(c, ca); + mutex_lock(&c->sb_lock); + m = bch2_sb_get_members(c->disk_sb.sb)->members + ca->dev_idx; + SET_BCH_MEMBER_FREESPACE_INITIALIZED(m, true); + mutex_unlock(&c->sb_lock); - trace_alloc_scan(ca, nr, ca->inc_gen_needs_gc, - ca->inc_gen_really_needs_gc); + return ret; +} - if ((ca->inc_gen_needs_gc >= ALLOC_SCAN_BATCH(ca) || - ca->inc_gen_really_needs_gc) && - c->gc_thread) { - atomic_inc(&c->kick_gc); - wake_up_process(c->gc_thread); - } - } +int bch2_fs_freespace_init(struct bch_fs *c) +{ + struct bch_dev *ca; + unsigned i; + int ret = 0; + bool doing_init = false; - ret = bch2_invalidate_buckets(c, ca); - if (ret) - goto stop; + /* + * We can crash during the device add path, so we need to check this on + * every mount: + */ - while (!fifo_empty(&ca->free_inc)) { - u64 b = fifo_peek(&ca->free_inc); + for_each_member_device(ca, c, i) { + if (ca->mi.freespace_initialized) + continue; - discard_one_bucket(c, ca, b); + if (!doing_init) { + bch_info(c, "initializing freespace"); + doing_init = true; + } - ret = kthread_wait_freezable(push_invalidated_bucket(c, ca, b)); - if (ret) - goto stop; + ret = bch2_dev_freespace_init(c, ca); + if (ret) { + percpu_ref_put(&ca->ref); + return ret; } } -stop: - alloc_thread_set_state(ca, ALLOCATOR_stopped); - return 0; + + if (doing_init) { + mutex_lock(&c->sb_lock); + bch2_write_super(c); + mutex_unlock(&c->sb_lock); + + bch_verbose(c, "done initializing freespace"); + } + + return ret; +} + +/* Bucket IO clocks: */ + +int bch2_bucket_io_time_reset(struct btree_trans *trans, unsigned dev, + size_t bucket_nr, int rw) +{ + struct bch_fs *c = trans->c; + struct btree_iter iter; + struct bkey_i_alloc_v4 *a; + u64 now; + int ret = 0; + + a = bch2_trans_start_alloc_update(trans, &iter, POS(dev, bucket_nr)); + ret = PTR_ERR_OR_ZERO(a); + if (ret) + return ret; + + now = atomic64_read(&c->io_clock[rw].now); + if (a->v.io_time[rw] == now) + goto out; + + a->v.io_time[rw] = now; + + ret = bch2_trans_update(trans, &iter, &a->k_i, 0) ?: + bch2_trans_commit(trans, NULL, NULL, 0); +out: + bch2_trans_iter_exit(trans, &iter); + return ret; } /* Startup/shutdown (ro/rw): */ @@ -949,7 +1284,7 @@ void bch2_recalc_capacity(struct bch_fs *c) u64 capacity = 0, reserved_sectors = 0, gc_reserve; unsigned bucket_size_max = 0; unsigned long ra_pages = 0; - unsigned i, j; + unsigned i; lockdep_assert_held(&c->state_lock); @@ -980,8 +1315,9 @@ void bch2_recalc_capacity(struct bch_fs *c) * allocations for foreground writes must wait - * not -ENOSPC calculations. */ - for (j = 0; j < RESERVE_NONE; j++) - dev_reserve += ca->free[j].size; + + dev_reserve += ca->nr_btree_reserve * 2; + dev_reserve += ca->mi.nbuckets >> 6; /* copygc reserve */ dev_reserve += 1; /* btree write point */ dev_reserve += 1; /* copygc write point */ @@ -1037,8 +1373,6 @@ void bch2_dev_allocator_remove(struct bch_fs *c, struct bch_dev *ca) { unsigned i; - BUG_ON(ca->alloc_thread); - /* First, remove device from allocation groups: */ for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++) @@ -1112,62 +1446,9 @@ void bch2_dev_allocator_add(struct bch_fs *c, struct bch_dev *ca) set_bit(ca->dev_idx, c->rw_devs[i].d); } -void bch2_dev_allocator_quiesce(struct bch_fs *c, struct bch_dev *ca) -{ - if (ca->alloc_thread) - closure_wait_event(&c->freelist_wait, - ca->allocator_state != ALLOCATOR_running); -} - -/* stop allocator thread: */ -void bch2_dev_allocator_stop(struct bch_dev *ca) -{ - struct task_struct *p; - - p = rcu_dereference_protected(ca->alloc_thread, 1); - ca->alloc_thread = NULL; - - /* - * We need an rcu barrier between setting ca->alloc_thread = NULL and - * the thread shutting down to avoid bch2_wake_allocator() racing: - * - * XXX: it would be better to have the rcu barrier be asynchronous - * instead of blocking us here - */ - synchronize_rcu(); - - if (p) { - kthread_stop(p); - put_task_struct(p); - } -} - -/* start allocator thread: */ -int bch2_dev_allocator_start(struct bch_dev *ca) -{ - struct task_struct *p; - - /* - * allocator thread already started? - */ - if (ca->alloc_thread) - return 0; - - p = kthread_create(bch2_allocator_thread, ca, - "bch-alloc/%s", ca->name); - if (IS_ERR(p)) { - bch_err(ca->fs, "error creating allocator thread: %li", - PTR_ERR(p)); - return PTR_ERR(p); - } - - get_task_struct(p); - rcu_assign_pointer(ca->alloc_thread, p); - wake_up_process(p); - return 0; -} - void bch2_fs_allocator_background_init(struct bch_fs *c) { spin_lock_init(&c->freelist_lock); + INIT_WORK(&c->discard_work, bch2_do_discards_work); + INIT_WORK(&c->invalidate_work, bch2_do_invalidates_work); }