]> git.sesse.net Git - bcachefs-tools-debian/blobdiff - libbcachefs/alloc_background.c
Update bcachefs sources to 9a555a741e80 bcachefs: omit alignment attribute on big...
[bcachefs-tools-debian] / libbcachefs / alloc_background.c
index 022c905dc8b43292226e075598d65a0a0b4ad025..c7be6afe89553b154ff8ba1d2899908026c5451a 100644 (file)
@@ -2,18 +2,23 @@
 #include "bcachefs.h"
 #include "alloc_background.h"
 #include "alloc_foreground.h"
+#include "backpointers.h"
 #include "btree_cache.h"
 #include "btree_io.h"
 #include "btree_key_cache.h"
 #include "btree_update.h"
 #include "btree_update_interior.h"
 #include "btree_gc.h"
+#include "btree_write_buffer.h"
 #include "buckets.h"
+#include "buckets_waiting_for_journal.h"
 #include "clock.h"
 #include "debug.h"
 #include "ec.h"
 #include "error.h"
+#include "lru.h"
 #include "recovery.h"
+#include "trace.h"
 #include "varint.h"
 
 #include <linux/kthread.h>
 #include <linux/rcupdate.h>
 #include <linux/sched/task.h>
 #include <linux/sort.h>
-#include <trace/events/bcachefs.h>
 
-const char * const bch2_allocator_states[] = {
-#define x(n)   #n,
-       ALLOC_THREAD_STATES()
-#undef x
-       NULL
-};
+/* Persistent alloc info: */
 
 static const unsigned BCH_ALLOC_V1_FIELD_BYTES[] = {
 #define x(name, bits) [BCH_ALLOC_FIELD_V1_##name] = bits / 8,
@@ -38,7 +37,17 @@ static const unsigned BCH_ALLOC_V1_FIELD_BYTES[] = {
 #undef x
 };
 
-/* Persistent alloc info: */
+struct bkey_alloc_unpacked {
+       u64             journal_seq;
+       u8              gen;
+       u8              oldest_gen;
+       u8              data_type;
+       bool            need_discard:1;
+       bool            need_inc_gen:1;
+#define x(_name, _bits)        u##_bits _name;
+       BCH_ALLOC_FIELDS_V2()
+#undef  x
+};
 
 static inline u64 alloc_field_v1_get(const struct bch_alloc *a,
                                     const void **p, unsigned field)
@@ -70,36 +79,6 @@ static inline u64 alloc_field_v1_get(const struct bch_alloc *a,
        return v;
 }
 
-static inline void alloc_field_v1_put(struct bkey_i_alloc *a, void **p,
-                                     unsigned field, u64 v)
-{
-       unsigned bytes = BCH_ALLOC_V1_FIELD_BYTES[field];
-
-       if (!v)
-               return;
-
-       a->v.fields |= 1 << field;
-
-       switch (bytes) {
-       case 1:
-               *((u8 *) *p) = v;
-               break;
-       case 2:
-               *((__le16 *) *p) = cpu_to_le16(v);
-               break;
-       case 4:
-               *((__le32 *) *p) = cpu_to_le32(v);
-               break;
-       case 8:
-               *((__le64 *) *p) = cpu_to_le64(v);
-               break;
-       default:
-               BUG();
-       }
-
-       *p += bytes;
-}
-
 static void bch2_alloc_unpack_v1(struct bkey_alloc_unpacked *out,
                                 struct bkey_s_c k)
 {
@@ -147,68 +126,61 @@ static int bch2_alloc_unpack_v2(struct bkey_alloc_unpacked *out,
        return 0;
 }
 
-static void bch2_alloc_pack_v2(struct bkey_alloc_buf *dst,
-                              const struct bkey_alloc_unpacked src)
+static int bch2_alloc_unpack_v3(struct bkey_alloc_unpacked *out,
+                               struct bkey_s_c k)
 {
-       struct bkey_i_alloc_v2 *a = bkey_alloc_v2_init(&dst->k);
-       unsigned nr_fields = 0, last_nonzero_fieldnr = 0;
-       u8 *out = a->v.data;
-       u8 *end = (void *) &dst[1];
-       u8 *last_nonzero_field = out;
-       unsigned bytes;
-
-       a->k.p          = POS(src.dev, src.bucket);
-       a->v.gen        = src.gen;
-       a->v.oldest_gen = src.oldest_gen;
-       a->v.data_type  = src.data_type;
+       struct bkey_s_c_alloc_v3 a = bkey_s_c_to_alloc_v3(k);
+       const u8 *in = a.v->data;
+       const u8 *end = bkey_val_end(a);
+       unsigned fieldnr = 0;
+       int ret;
+       u64 v;
+
+       out->gen        = a.v->gen;
+       out->oldest_gen = a.v->oldest_gen;
+       out->data_type  = a.v->data_type;
+       out->need_discard = BCH_ALLOC_V3_NEED_DISCARD(a.v);
+       out->need_inc_gen = BCH_ALLOC_V3_NEED_INC_GEN(a.v);
+       out->journal_seq = le64_to_cpu(a.v->journal_seq);
 
 #define x(_name, _bits)                                                        \
-       nr_fields++;                                                    \
-                                                                       \
-       if (src._name) {                                                \
-               out += bch2_varint_encode_fast(out, src._name);         \
-                                                                       \
-               last_nonzero_field = out;                               \
-               last_nonzero_fieldnr = nr_fields;                       \
+       if (fieldnr < a.v->nr_fields) {                                 \
+               ret = bch2_varint_decode_fast(in, end, &v);             \
+               if (ret < 0)                                            \
+                       return ret;                                     \
+               in += ret;                                              \
        } else {                                                        \
-               *out++ = 0;                                             \
-       }
+               v = 0;                                                  \
+       }                                                               \
+       out->_name = v;                                                 \
+       if (v != out->_name)                                            \
+               return -1;                                              \
+       fieldnr++;
 
        BCH_ALLOC_FIELDS_V2()
 #undef  x
-       BUG_ON(out > end);
-
-       out = last_nonzero_field;
-       a->v.nr_fields = last_nonzero_fieldnr;
-
-       bytes = (u8 *) out - (u8 *) &a->v;
-       set_bkey_val_bytes(&a->k, bytes);
-       memset_u64s_tail(&a->v, 0, bytes);
+       return 0;
 }
 
-struct bkey_alloc_unpacked bch2_alloc_unpack(struct bkey_s_c k)
+static struct bkey_alloc_unpacked bch2_alloc_unpack(struct bkey_s_c k)
 {
-       struct bkey_alloc_unpacked ret = {
-               .dev    = k.k->p.inode,
-               .bucket = k.k->p.offset,
-               .gen    = 0,
-       };
+       struct bkey_alloc_unpacked ret = { .gen = 0 };
 
-       if (k.k->type == KEY_TYPE_alloc_v2)
-               bch2_alloc_unpack_v2(&ret, k);
-       else if (k.k->type == KEY_TYPE_alloc)
+       switch (k.k->type) {
+       case KEY_TYPE_alloc:
                bch2_alloc_unpack_v1(&ret, k);
+               break;
+       case KEY_TYPE_alloc_v2:
+               bch2_alloc_unpack_v2(&ret, k);
+               break;
+       case KEY_TYPE_alloc_v3:
+               bch2_alloc_unpack_v3(&ret, k);
+               break;
+       }
 
        return ret;
 }
 
-void bch2_alloc_pack(struct bch_fs *c,
-                    struct bkey_alloc_buf *dst,
-                    const struct bkey_alloc_unpacked src)
-{
-       bch2_alloc_pack_v2(dst, src);
-}
-
 static unsigned bch_alloc_v1_val_u64s(const struct bch_alloc *a)
 {
        unsigned i, bytes = offsetof(struct bch_alloc, data);
@@ -220,804 +192,1896 @@ static unsigned bch_alloc_v1_val_u64s(const struct bch_alloc *a)
        return DIV_ROUND_UP(bytes, sizeof(u64));
 }
 
-const char *bch2_alloc_v1_invalid(const struct bch_fs *c, struct bkey_s_c k)
+int bch2_alloc_v1_invalid(struct bch_fs *c, struct bkey_s_c k,
+                         enum bkey_invalid_flags flags,
+                         struct printbuf *err)
 {
        struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k);
-
-       if (k.k->p.inode >= c->sb.nr_devices ||
-           !c->devs[k.k->p.inode])
-               return "invalid device";
+       int ret = 0;
 
        /* allow for unknown fields */
-       if (bkey_val_u64s(a.k) < bch_alloc_v1_val_u64s(a.v))
-               return "incorrect value size";
-
-       return NULL;
+       bkey_fsck_err_on(bkey_val_u64s(a.k) < bch_alloc_v1_val_u64s(a.v), c, err,
+                        alloc_v1_val_size_bad,
+                        "incorrect value size (%zu < %u)",
+                        bkey_val_u64s(a.k), bch_alloc_v1_val_u64s(a.v));
+fsck_err:
+       return ret;
 }
 
-const char *bch2_alloc_v2_invalid(const struct bch_fs *c, struct bkey_s_c k)
+int bch2_alloc_v2_invalid(struct bch_fs *c, struct bkey_s_c k,
+                         enum bkey_invalid_flags flags,
+                         struct printbuf *err)
 {
        struct bkey_alloc_unpacked u;
+       int ret = 0;
 
-       if (k.k->p.inode >= c->sb.nr_devices ||
-           !c->devs[k.k->p.inode])
-               return "invalid device";
-
-       if (bch2_alloc_unpack_v2(&u, k))
-               return "unpack error";
-
-       return NULL;
+       bkey_fsck_err_on(bch2_alloc_unpack_v2(&u, k), c, err,
+                        alloc_v2_unpack_error,
+                        "unpack error");
+fsck_err:
+       return ret;
 }
 
-void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c,
-                          struct bkey_s_c k)
+int bch2_alloc_v3_invalid(struct bch_fs *c, struct bkey_s_c k,
+                         enum bkey_invalid_flags flags,
+                         struct printbuf *err)
 {
-       struct bkey_alloc_unpacked u = bch2_alloc_unpack(k);
+       struct bkey_alloc_unpacked u;
+       int ret = 0;
 
-       pr_buf(out, "gen %u oldest_gen %u data_type %s",
-              u.gen, u.oldest_gen, bch2_data_types[u.data_type]);
-#define x(_name, ...)  pr_buf(out, " " #_name " %llu", (u64) u._name);
-       BCH_ALLOC_FIELDS_V2()
-#undef  x
+       bkey_fsck_err_on(bch2_alloc_unpack_v3(&u, k), c, err,
+                        alloc_v2_unpack_error,
+                        "unpack error");
+fsck_err:
+       return ret;
 }
 
-static int bch2_alloc_read_fn(struct btree_trans *trans, struct bkey_s_c k)
+int bch2_alloc_v4_invalid(struct bch_fs *c, struct bkey_s_c k,
+                         enum bkey_invalid_flags flags, struct printbuf *err)
 {
-       struct bch_fs *c = trans->c;
-       struct bch_dev *ca;
-       struct bucket *g;
-       struct bkey_alloc_unpacked u;
-
-       if (k.k->type != KEY_TYPE_alloc &&
-           k.k->type != KEY_TYPE_alloc_v2)
-               return 0;
-
-       ca = bch_dev_bkey_exists(c, k.k->p.inode);
-       g = bucket(ca, k.k->p.offset);
-       u = bch2_alloc_unpack(k);
-
-       g->_mark.gen            = u.gen;
-       g->_mark.data_type      = u.data_type;
-       g->_mark.dirty_sectors  = u.dirty_sectors;
-       g->_mark.cached_sectors = u.cached_sectors;
-       g->io_time[READ]        = u.read_time;
-       g->io_time[WRITE]       = u.write_time;
-       g->oldest_gen           = u.oldest_gen;
-       g->gen_valid            = 1;
+       struct bkey_s_c_alloc_v4 a = bkey_s_c_to_alloc_v4(k);
+       int ret = 0;
 
-       return 0;
+       bkey_fsck_err_on(alloc_v4_u64s(a.v) > bkey_val_u64s(k.k), c, err,
+                        alloc_v4_val_size_bad,
+                        "bad val size (%u > %zu)",
+                        alloc_v4_u64s(a.v), bkey_val_u64s(k.k));
+
+       bkey_fsck_err_on(!BCH_ALLOC_V4_BACKPOINTERS_START(a.v) &&
+                        BCH_ALLOC_V4_NR_BACKPOINTERS(a.v), c, err,
+                        alloc_v4_backpointers_start_bad,
+                        "invalid backpointers_start");
+
+       bkey_fsck_err_on(alloc_data_type(*a.v, a.v->data_type) != a.v->data_type, c, err,
+                        alloc_key_data_type_bad,
+                        "invalid data type (got %u should be %u)",
+                        a.v->data_type, alloc_data_type(*a.v, a.v->data_type));
+
+       switch (a.v->data_type) {
+       case BCH_DATA_free:
+       case BCH_DATA_need_gc_gens:
+       case BCH_DATA_need_discard:
+               bkey_fsck_err_on(bch2_bucket_sectors(*a.v) || a.v->stripe,
+                                c, err, alloc_key_empty_but_have_data,
+                                "empty data type free but have data");
+               break;
+       case BCH_DATA_sb:
+       case BCH_DATA_journal:
+       case BCH_DATA_btree:
+       case BCH_DATA_user:
+       case BCH_DATA_parity:
+               bkey_fsck_err_on(!bch2_bucket_sectors_dirty(*a.v),
+                                c, err, alloc_key_dirty_sectors_0,
+                                "data_type %s but dirty_sectors==0",
+                                bch2_data_type_str(a.v->data_type));
+               break;
+       case BCH_DATA_cached:
+               bkey_fsck_err_on(!a.v->cached_sectors ||
+                                bch2_bucket_sectors_dirty(*a.v) ||
+                                a.v->stripe,
+                                c, err, alloc_key_cached_inconsistency,
+                                "data type inconsistency");
+
+               bkey_fsck_err_on(!a.v->io_time[READ] &&
+                                c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_to_lru_refs,
+                                c, err, alloc_key_cached_but_read_time_zero,
+                                "cached bucket with read_time == 0");
+               break;
+       case BCH_DATA_stripe:
+               break;
+       }
+fsck_err:
+       return ret;
 }
 
-int bch2_alloc_read(struct bch_fs *c)
+void bch2_alloc_v4_swab(struct bkey_s k)
 {
-       struct btree_trans trans;
-       int ret;
-
-       bch2_trans_init(&trans, c, 0, 0);
-       down_read(&c->gc_lock);
-       ret = bch2_btree_and_journal_walk(&trans, BTREE_ID_alloc, bch2_alloc_read_fn);
-       up_read(&c->gc_lock);
-       bch2_trans_exit(&trans);
-       if (ret) {
-               bch_err(c, "error reading alloc info: %i", ret);
-               return ret;
+       struct bch_alloc_v4 *a = bkey_s_to_alloc_v4(k).v;
+       struct bch_backpointer *bp, *bps;
+
+       a->journal_seq          = swab64(a->journal_seq);
+       a->flags                = swab32(a->flags);
+       a->dirty_sectors        = swab32(a->dirty_sectors);
+       a->cached_sectors       = swab32(a->cached_sectors);
+       a->io_time[0]           = swab64(a->io_time[0]);
+       a->io_time[1]           = swab64(a->io_time[1]);
+       a->stripe               = swab32(a->stripe);
+       a->nr_external_backpointers = swab32(a->nr_external_backpointers);
+       a->fragmentation_lru    = swab64(a->fragmentation_lru);
+
+       bps = alloc_v4_backpointers(a);
+       for (bp = bps; bp < bps + BCH_ALLOC_V4_NR_BACKPOINTERS(a); bp++) {
+               bp->bucket_offset       = swab40(bp->bucket_offset);
+               bp->bucket_len          = swab32(bp->bucket_len);
+               bch2_bpos_swab(&bp->pos);
        }
-
-       return 0;
 }
 
-static int bch2_alloc_write_key(struct btree_trans *trans,
-                               struct btree_iter *iter,
-                               unsigned flags)
+void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
 {
-       struct bch_fs *c = trans->c;
-       struct bkey_s_c k;
-       struct bch_dev *ca;
-       struct bucket *g;
-       struct bucket_mark m;
-       struct bkey_alloc_unpacked old_u, new_u;
-       struct bkey_alloc_buf a;
-       int ret;
-retry:
-       bch2_trans_begin(trans);
-
-       ret = bch2_btree_key_cache_flush(trans,
-                       BTREE_ID_alloc, iter->pos);
-       if (ret)
-               goto err;
-
-       k = bch2_btree_iter_peek_slot(iter);
-       ret = bkey_err(k);
-       if (ret)
-               goto err;
-
-       old_u = bch2_alloc_unpack(k);
-
-       percpu_down_read(&c->mark_lock);
-       ca      = bch_dev_bkey_exists(c, iter->pos.inode);
-       g       = bucket(ca, iter->pos.offset);
-       m       = READ_ONCE(g->mark);
-       new_u   = alloc_mem_to_key(iter, g, m);
-       percpu_up_read(&c->mark_lock);
-
-       if (!bkey_alloc_unpacked_cmp(old_u, new_u))
-               return 0;
-
-       bch2_alloc_pack(c, &a, new_u);
-       ret   = bch2_trans_update(trans, iter, &a.k,
-                                 BTREE_TRIGGER_NORUN) ?:
-               bch2_trans_commit(trans, NULL, NULL,
-                               BTREE_INSERT_NOFAIL|flags);
-err:
-       if (ret == -EINTR)
-               goto retry;
-       return ret;
+       struct bch_alloc_v4 _a;
+       const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &_a);
+
+       prt_newline(out);
+       printbuf_indent_add(out, 2);
+
+       prt_printf(out, "gen %u oldest_gen %u data_type ", a->gen, a->oldest_gen);
+       bch2_prt_data_type(out, a->data_type);
+       prt_newline(out);
+       prt_printf(out, "journal_seq       %llu",       a->journal_seq);
+       prt_newline(out);
+       prt_printf(out, "need_discard      %llu",       BCH_ALLOC_V4_NEED_DISCARD(a));
+       prt_newline(out);
+       prt_printf(out, "need_inc_gen      %llu",       BCH_ALLOC_V4_NEED_INC_GEN(a));
+       prt_newline(out);
+       prt_printf(out, "dirty_sectors     %u", a->dirty_sectors);
+       prt_newline(out);
+       prt_printf(out, "cached_sectors    %u", a->cached_sectors);
+       prt_newline(out);
+       prt_printf(out, "stripe            %u", a->stripe);
+       prt_newline(out);
+       prt_printf(out, "stripe_redundancy %u", a->stripe_redundancy);
+       prt_newline(out);
+       prt_printf(out, "io_time[READ]     %llu",       a->io_time[READ]);
+       prt_newline(out);
+       prt_printf(out, "io_time[WRITE]    %llu",       a->io_time[WRITE]);
+       prt_newline(out);
+       prt_printf(out, "fragmentation     %llu",       a->fragmentation_lru);
+       prt_newline(out);
+       prt_printf(out, "bp_start          %llu", BCH_ALLOC_V4_BACKPOINTERS_START(a));
+       printbuf_indent_sub(out, 2);
 }
 
-int bch2_alloc_write(struct bch_fs *c, unsigned flags)
+void __bch2_alloc_to_v4(struct bkey_s_c k, struct bch_alloc_v4 *out)
 {
-       struct btree_trans trans;
-       struct btree_iter iter;
-       struct bch_dev *ca;
-       unsigned i;
-       int ret = 0;
+       if (k.k->type == KEY_TYPE_alloc_v4) {
+               void *src, *dst;
 
-       bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
-       bch2_trans_iter_init(&trans, &iter, BTREE_ID_alloc, POS_MIN,
-                            BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
+               *out = *bkey_s_c_to_alloc_v4(k).v;
 
-       for_each_member_device(ca, c, i) {
-               bch2_btree_iter_set_pos(&iter,
-                       POS(ca->dev_idx, ca->mi.first_bucket));
+               src = alloc_v4_backpointers(out);
+               SET_BCH_ALLOC_V4_BACKPOINTERS_START(out, BCH_ALLOC_V4_U64s);
+               dst = alloc_v4_backpointers(out);
 
-               while (iter.pos.offset < ca->mi.nbuckets) {
-                       ret = bch2_alloc_write_key(&trans, &iter, flags);
-                       if (ret) {
-                               percpu_ref_put(&ca->ref);
-                               goto err;
-                       }
-                       bch2_btree_iter_advance(&iter);
-               }
+               if (src < dst)
+                       memset(src, 0, dst - src);
+
+               SET_BCH_ALLOC_V4_NR_BACKPOINTERS(out, 0);
+       } else {
+               struct bkey_alloc_unpacked u = bch2_alloc_unpack(k);
+
+               *out = (struct bch_alloc_v4) {
+                       .journal_seq            = u.journal_seq,
+                       .flags                  = u.need_discard,
+                       .gen                    = u.gen,
+                       .oldest_gen             = u.oldest_gen,
+                       .data_type              = u.data_type,
+                       .stripe_redundancy      = u.stripe_redundancy,
+                       .dirty_sectors          = u.dirty_sectors,
+                       .cached_sectors         = u.cached_sectors,
+                       .io_time[READ]          = u.read_time,
+                       .io_time[WRITE]         = u.write_time,
+                       .stripe                 = u.stripe,
+               };
+
+               SET_BCH_ALLOC_V4_BACKPOINTERS_START(out, BCH_ALLOC_V4_U64s);
        }
-err:
-       bch2_trans_iter_exit(&trans, &iter);
-       bch2_trans_exit(&trans);
-       return ret;
 }
 
-/* Bucket IO clocks: */
-
-int bch2_bucket_io_time_reset(struct btree_trans *trans, unsigned dev,
-                             size_t bucket_nr, int rw)
+static noinline struct bkey_i_alloc_v4 *
+__bch2_alloc_to_v4_mut(struct btree_trans *trans, struct bkey_s_c k)
 {
-       struct bch_fs *c = trans->c;
-       struct bch_dev *ca = bch_dev_bkey_exists(c, dev);
-       struct btree_iter iter;
-       struct bucket *g;
-       struct bkey_alloc_buf *a;
-       struct bkey_alloc_unpacked u;
-       u64 *time, now;
-       int ret = 0;
+       struct bkey_i_alloc_v4 *ret;
 
-       bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, POS(dev, bucket_nr),
-                            BTREE_ITER_CACHED|
-                            BTREE_ITER_CACHED_NOFILL|
-                            BTREE_ITER_INTENT);
-       ret = bch2_btree_iter_traverse(&iter);
-       if (ret)
-               goto out;
+       ret = bch2_trans_kmalloc(trans, max(bkey_bytes(k.k), sizeof(struct bkey_i_alloc_v4)));
+       if (IS_ERR(ret))
+               return ret;
 
-       a = bch2_trans_kmalloc(trans, sizeof(struct bkey_alloc_buf));
-       ret = PTR_ERR_OR_ZERO(a);
-       if (ret)
-               goto out;
+       if (k.k->type == KEY_TYPE_alloc_v4) {
+               void *src, *dst;
 
-       percpu_down_read(&c->mark_lock);
-       g = bucket(ca, bucket_nr);
-       u = alloc_mem_to_key(&iter, g, READ_ONCE(g->mark));
-       percpu_up_read(&c->mark_lock);
+               bkey_reassemble(&ret->k_i, k);
 
-       time = rw == READ ? &u.read_time : &u.write_time;
-       now = atomic64_read(&c->io_clock[rw].now);
-       if (*time == now)
-               goto out;
+               src = alloc_v4_backpointers(&ret->v);
+               SET_BCH_ALLOC_V4_BACKPOINTERS_START(&ret->v, BCH_ALLOC_V4_U64s);
+               dst = alloc_v4_backpointers(&ret->v);
 
-       *time = now;
+               if (src < dst)
+                       memset(src, 0, dst - src);
 
-       bch2_alloc_pack(c, a, u);
-       ret   = bch2_trans_update(trans, &iter, &a->k, 0) ?:
-               bch2_trans_commit(trans, NULL, NULL, 0);
-out:
-       bch2_trans_iter_exit(trans, &iter);
+               SET_BCH_ALLOC_V4_NR_BACKPOINTERS(&ret->v, 0);
+               set_alloc_v4_u64s(ret);
+       } else {
+               bkey_alloc_v4_init(&ret->k_i);
+               ret->k.p = k.k->p;
+               bch2_alloc_to_v4(k, &ret->v);
+       }
        return ret;
 }
 
-/* Background allocator thread: */
-
-/*
- * Scans for buckets to be invalidated, invalidates them, rewrites prios/gens
- * (marking them as invalidated on disk), then optionally issues discard
- * commands to the newly free buckets, then puts them on the various freelists.
- */
-
-static bool bch2_can_invalidate_bucket(struct bch_dev *ca, size_t b,
-                                      struct bucket_mark m)
+static inline struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut_inlined(struct btree_trans *trans, struct bkey_s_c k)
 {
-       u8 gc_gen;
+       struct bkey_s_c_alloc_v4 a;
 
-       if (!is_available_bucket(m))
-               return false;
+       if (likely(k.k->type == KEY_TYPE_alloc_v4) &&
+           ((a = bkey_s_c_to_alloc_v4(k), true) &&
+            BCH_ALLOC_V4_NR_BACKPOINTERS(a.v) == 0))
+               return bch2_bkey_make_mut_noupdate_typed(trans, k, alloc_v4);
 
-       if (m.owned_by_allocator)
-               return false;
+       return __bch2_alloc_to_v4_mut(trans, k);
+}
 
-       if (ca->buckets_nouse &&
-           test_bit(b, ca->buckets_nouse))
-               return false;
+struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut(struct btree_trans *trans, struct bkey_s_c k)
+{
+       return bch2_alloc_to_v4_mut_inlined(trans, k);
+}
 
-       gc_gen = bucket_gc_gen(bucket(ca, b));
+struct bkey_i_alloc_v4 *
+bch2_trans_start_alloc_update(struct btree_trans *trans, struct btree_iter *iter,
+                             struct bpos pos)
+{
+       struct bkey_s_c k;
+       struct bkey_i_alloc_v4 *a;
+       int ret;
 
-       ca->inc_gen_needs_gc            += gc_gen >= BUCKET_GC_GEN_MAX / 2;
-       ca->inc_gen_really_needs_gc     += gc_gen >= BUCKET_GC_GEN_MAX;
+       k = bch2_bkey_get_iter(trans, iter, BTREE_ID_alloc, pos,
+                            BTREE_ITER_WITH_UPDATES|
+                            BTREE_ITER_CACHED|
+                            BTREE_ITER_INTENT);
+       ret = bkey_err(k);
+       if (unlikely(ret))
+               return ERR_PTR(ret);
 
-       return gc_gen < BUCKET_GC_GEN_MAX;
+       a = bch2_alloc_to_v4_mut_inlined(trans, k);
+       ret = PTR_ERR_OR_ZERO(a);
+       if (unlikely(ret))
+               goto err;
+       return a;
+err:
+       bch2_trans_iter_exit(trans, iter);
+       return ERR_PTR(ret);
 }
 
-/*
- * Determines what order we're going to reuse buckets, smallest bucket_key()
- * first.
- */
-
-static unsigned bucket_sort_key(struct bucket *g, struct bucket_mark m,
-                               u64 now, u64 last_seq_ondisk)
+static struct bpos alloc_gens_pos(struct bpos pos, unsigned *offset)
 {
-       unsigned used = bucket_sectors_used(m);
+       *offset = pos.offset & KEY_TYPE_BUCKET_GENS_MASK;
 
-       if (used) {
-               /*
-                * Prefer to keep buckets that have been read more recently, and
-                * buckets that have more data in them:
-                */
-               u64 last_read = max_t(s64, 0, now - g->io_time[READ]);
-               u32 last_read_scaled = max_t(u64, U32_MAX, div_u64(last_read, used));
+       pos.offset >>= KEY_TYPE_BUCKET_GENS_BITS;
+       return pos;
+}
 
-               return -last_read_scaled;
-       } else {
-               /*
-                * Prefer to use buckets with smaller gc_gen so that we don't
-                * have to walk the btree and recalculate oldest_gen - but shift
-                * off the low bits so that buckets will still have equal sort
-                * keys when there's only a small difference, so that we can
-                * keep sequential buckets together:
-                */
-               return  (bucket_needs_journal_commit(m, last_seq_ondisk) << 4)|
-                       (bucket_gc_gen(g) >> 4);
-       }
+static struct bpos bucket_gens_pos_to_alloc(struct bpos pos, unsigned offset)
+{
+       pos.offset <<= KEY_TYPE_BUCKET_GENS_BITS;
+       pos.offset += offset;
+       return pos;
 }
 
-static inline int bucket_alloc_cmp(alloc_heap *h,
-                                  struct alloc_heap_entry l,
-                                  struct alloc_heap_entry r)
+static unsigned alloc_gen(struct bkey_s_c k, unsigned offset)
 {
-       return  cmp_int(l.key, r.key) ?:
-               cmp_int(r.nr, l.nr) ?:
-               cmp_int(l.bucket, r.bucket);
+       return k.k->type == KEY_TYPE_bucket_gens
+               ? bkey_s_c_to_bucket_gens(k).v->gens[offset]
+               : 0;
 }
 
-static inline int bucket_idx_cmp(const void *_l, const void *_r)
+int bch2_bucket_gens_invalid(struct bch_fs *c, struct bkey_s_c k,
+                            enum bkey_invalid_flags flags,
+                            struct printbuf *err)
 {
-       const struct alloc_heap_entry *l = _l, *r = _r;
+       int ret = 0;
 
-       return cmp_int(l->bucket, r->bucket);
+       bkey_fsck_err_on(bkey_val_bytes(k.k) != sizeof(struct bch_bucket_gens), c, err,
+                        bucket_gens_val_size_bad,
+                        "bad val size (%zu != %zu)",
+                        bkey_val_bytes(k.k), sizeof(struct bch_bucket_gens));
+fsck_err:
+       return ret;
 }
 
-static void find_reclaimable_buckets_lru(struct bch_fs *c, struct bch_dev *ca)
+void bch2_bucket_gens_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
 {
-       struct bucket_array *buckets;
-       struct alloc_heap_entry e = { 0 };
-       u64 now, last_seq_ondisk;
-       size_t b, i, nr = 0;
+       struct bkey_s_c_bucket_gens g = bkey_s_c_to_bucket_gens(k);
+       unsigned i;
 
-       down_read(&ca->bucket_lock);
+       for (i = 0; i < ARRAY_SIZE(g.v->gens); i++) {
+               if (i)
+                       prt_char(out, ' ');
+               prt_printf(out, "%u", g.v->gens[i]);
+       }
+}
 
-       buckets = bucket_array(ca);
-       ca->alloc_heap.used = 0;
-       now = atomic64_read(&c->io_clock[READ].now);
-       last_seq_ondisk = c->journal.last_seq_ondisk;
+int bch2_bucket_gens_init(struct bch_fs *c)
+{
+       struct btree_trans *trans = bch2_trans_get(c);
+       struct bkey_i_bucket_gens g;
+       bool have_bucket_gens_key = false;
+       int ret;
 
-       /*
-        * Find buckets with lowest read priority, by building a maxheap sorted
-        * by read priority and repeatedly replacing the maximum element until
-        * all buckets have been visited.
-        */
-       for (b = ca->mi.first_bucket; b < ca->mi.nbuckets; b++) {
-               struct bucket *g = &buckets->b[b];
-               struct bucket_mark m = READ_ONCE(g->mark);
-               unsigned key = bucket_sort_key(g, m, now, last_seq_ondisk);
+       ret = for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN,
+                                BTREE_ITER_PREFETCH, k, ({
+               /*
+                * Not a fsck error because this is checked/repaired by
+                * bch2_check_alloc_key() which runs later:
+                */
+               if (!bch2_dev_bucket_exists(c, k.k->p))
+                       continue;
 
-               cond_resched();
+               struct bch_alloc_v4 a;
+               u8 gen = bch2_alloc_to_v4(k, &a)->gen;
+               unsigned offset;
+               struct bpos pos = alloc_gens_pos(iter.pos, &offset);
 
-               if (!bch2_can_invalidate_bucket(ca, b, m))
-                       continue;
+               if (have_bucket_gens_key && bkey_cmp(iter.pos, pos)) {
+                       ret = commit_do(trans, NULL, NULL,
+                                       BCH_TRANS_COMMIT_no_enospc,
+                               bch2_btree_insert_trans(trans, BTREE_ID_bucket_gens, &g.k_i, 0));
+                       if (ret)
+                               break;
+                       have_bucket_gens_key = false;
+               }
 
-               if (e.nr && e.bucket + e.nr == b && e.key == key) {
-                       e.nr++;
-               } else {
-                       if (e.nr)
-                               heap_add_or_replace(&ca->alloc_heap, e,
-                                       -bucket_alloc_cmp, NULL);
-
-                       e = (struct alloc_heap_entry) {
-                               .bucket = b,
-                               .nr     = 1,
-                               .key    = key,
-                       };
+               if (!have_bucket_gens_key) {
+                       bkey_bucket_gens_init(&g.k_i);
+                       g.k.p = pos;
+                       have_bucket_gens_key = true;
                }
-       }
 
-       if (e.nr)
-               heap_add_or_replace(&ca->alloc_heap, e,
-                               -bucket_alloc_cmp, NULL);
+               g.v.gens[offset] = gen;
+               0;
+       }));
 
-       for (i = 0; i < ca->alloc_heap.used; i++)
-               nr += ca->alloc_heap.data[i].nr;
+       if (have_bucket_gens_key && !ret)
+               ret = commit_do(trans, NULL, NULL,
+                               BCH_TRANS_COMMIT_no_enospc,
+                       bch2_btree_insert_trans(trans, BTREE_ID_bucket_gens, &g.k_i, 0));
 
-       while (nr - ca->alloc_heap.data[0].nr >= ALLOC_SCAN_BATCH(ca)) {
-               nr -= ca->alloc_heap.data[0].nr;
-               heap_pop(&ca->alloc_heap, e, -bucket_alloc_cmp, NULL);
-       }
+       bch2_trans_put(trans);
 
-       up_read(&ca->bucket_lock);
+       bch_err_fn(c, ret);
+       return ret;
 }
 
-static void find_reclaimable_buckets_fifo(struct bch_fs *c, struct bch_dev *ca)
+int bch2_alloc_read(struct bch_fs *c)
 {
-       struct bucket_array *buckets = bucket_array(ca);
-       struct bucket_mark m;
-       size_t b, start;
-
-       if (ca->fifo_last_bucket <  ca->mi.first_bucket ||
-           ca->fifo_last_bucket >= ca->mi.nbuckets)
-               ca->fifo_last_bucket = ca->mi.first_bucket;
-
-       start = ca->fifo_last_bucket;
-
-       do {
-               ca->fifo_last_bucket++;
-               if (ca->fifo_last_bucket == ca->mi.nbuckets)
-                       ca->fifo_last_bucket = ca->mi.first_bucket;
-
-               b = ca->fifo_last_bucket;
-               m = READ_ONCE(buckets->b[b].mark);
-
-               if (bch2_can_invalidate_bucket(ca, b, m)) {
-                       struct alloc_heap_entry e = { .bucket = b, .nr = 1, };
-
-                       heap_add(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
-                       if (heap_full(&ca->alloc_heap))
-                               break;
-               }
+       struct btree_trans *trans = bch2_trans_get(c);
+       int ret;
 
-               cond_resched();
-       } while (ca->fifo_last_bucket != start);
-}
+       down_read(&c->gc_lock);
 
-static void find_reclaimable_buckets_random(struct bch_fs *c, struct bch_dev *ca)
-{
-       struct bucket_array *buckets = bucket_array(ca);
-       struct bucket_mark m;
-       size_t checked, i;
+       if (c->sb.version_upgrade_complete >= bcachefs_metadata_version_bucket_gens) {
+               ret = for_each_btree_key(trans, iter, BTREE_ID_bucket_gens, POS_MIN,
+                                        BTREE_ITER_PREFETCH, k, ({
+                       u64 start = bucket_gens_pos_to_alloc(k.k->p, 0).offset;
+                       u64 end = bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0).offset;
 
-       for (checked = 0;
-            checked < ca->mi.nbuckets / 2;
-            checked++) {
-               size_t b = bch2_rand_range(ca->mi.nbuckets -
-                                          ca->mi.first_bucket) +
-                       ca->mi.first_bucket;
+                       if (k.k->type != KEY_TYPE_bucket_gens)
+                               continue;
 
-               m = READ_ONCE(buckets->b[b].mark);
+                       const struct bch_bucket_gens *g = bkey_s_c_to_bucket_gens(k).v;
 
-               if (bch2_can_invalidate_bucket(ca, b, m)) {
-                       struct alloc_heap_entry e = { .bucket = b, .nr = 1, };
+                       /*
+                        * Not a fsck error because this is checked/repaired by
+                        * bch2_check_alloc_key() which runs later:
+                        */
+                       if (!bch2_dev_exists2(c, k.k->p.inode))
+                               continue;
 
-                       heap_add(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
-                       if (heap_full(&ca->alloc_heap))
-                               break;
-               }
+                       struct bch_dev *ca = bch_dev_bkey_exists(c, k.k->p.inode);
 
-               cond_resched();
+                       for (u64 b = max_t(u64, ca->mi.first_bucket, start);
+                            b < min_t(u64, ca->mi.nbuckets, end);
+                            b++)
+                               *bucket_gen(ca, b) = g->gens[b & KEY_TYPE_BUCKET_GENS_MASK];
+                       0;
+               }));
+       } else {
+               ret = for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN,
+                                        BTREE_ITER_PREFETCH, k, ({
+                       /*
+                        * Not a fsck error because this is checked/repaired by
+                        * bch2_check_alloc_key() which runs later:
+                        */
+                       if (!bch2_dev_bucket_exists(c, k.k->p))
+                               continue;
+
+                       struct bch_dev *ca = bch_dev_bkey_exists(c, k.k->p.inode);
+
+                       struct bch_alloc_v4 a;
+                       *bucket_gen(ca, k.k->p.offset) = bch2_alloc_to_v4(k, &a)->gen;
+                       0;
+               }));
        }
 
-       sort(ca->alloc_heap.data,
-            ca->alloc_heap.used,
-            sizeof(ca->alloc_heap.data[0]),
-            bucket_idx_cmp, NULL);
+       bch2_trans_put(trans);
+       up_read(&c->gc_lock);
 
-       /* remove duplicates: */
-       for (i = 0; i + 1 < ca->alloc_heap.used; i++)
-               if (ca->alloc_heap.data[i].bucket ==
-                   ca->alloc_heap.data[i + 1].bucket)
-                       ca->alloc_heap.data[i].nr = 0;
+       bch_err_fn(c, ret);
+       return ret;
 }
 
-static size_t find_reclaimable_buckets(struct bch_fs *c, struct bch_dev *ca)
+/* Free space/discard btree: */
+
+static int bch2_bucket_do_index(struct btree_trans *trans,
+                               struct bkey_s_c alloc_k,
+                               const struct bch_alloc_v4 *a,
+                               bool set)
 {
-       size_t i, nr = 0;
+       struct bch_fs *c = trans->c;
+       struct bch_dev *ca = bch_dev_bkey_exists(c, alloc_k.k->p.inode);
+       struct btree_iter iter;
+       struct bkey_s_c old;
+       struct bkey_i *k;
+       enum btree_id btree;
+       enum bch_bkey_type old_type = !set ? KEY_TYPE_set : KEY_TYPE_deleted;
+       enum bch_bkey_type new_type =  set ? KEY_TYPE_set : KEY_TYPE_deleted;
+       struct printbuf buf = PRINTBUF;
+       int ret;
+
+       if (a->data_type != BCH_DATA_free &&
+           a->data_type != BCH_DATA_need_discard)
+               return 0;
 
-       ca->inc_gen_needs_gc                    = 0;
-       ca->inc_gen_really_needs_gc             = 0;
+       k = bch2_trans_kmalloc_nomemzero(trans, sizeof(*k));
+       if (IS_ERR(k))
+               return PTR_ERR(k);
 
-       switch (ca->mi.replacement) {
-       case BCH_CACHE_REPLACEMENT_lru:
-               find_reclaimable_buckets_lru(c, ca);
-               break;
-       case BCH_CACHE_REPLACEMENT_fifo:
-               find_reclaimable_buckets_fifo(c, ca);
+       bkey_init(&k->k);
+       k->k.type = new_type;
+
+       switch (a->data_type) {
+       case BCH_DATA_free:
+               btree = BTREE_ID_freespace;
+               k->k.p = alloc_freespace_pos(alloc_k.k->p, *a);
+               bch2_key_resize(&k->k, 1);
                break;
-       case BCH_CACHE_REPLACEMENT_random:
-               find_reclaimable_buckets_random(c, ca);
+       case BCH_DATA_need_discard:
+               btree = BTREE_ID_need_discard;
+               k->k.p = alloc_k.k->p;
                break;
+       default:
+               return 0;
        }
 
-       heap_resort(&ca->alloc_heap, bucket_alloc_cmp, NULL);
+       old = bch2_bkey_get_iter(trans, &iter, btree,
+                            bkey_start_pos(&k->k),
+                            BTREE_ITER_INTENT);
+       ret = bkey_err(old);
+       if (ret)
+               return ret;
 
-       for (i = 0; i < ca->alloc_heap.used; i++)
-               nr += ca->alloc_heap.data[i].nr;
+       if (ca->mi.freespace_initialized &&
+           c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info &&
+           bch2_trans_inconsistent_on(old.k->type != old_type, trans,
+                       "incorrect key when %s %s:%llu:%llu:0 (got %s should be %s)\n"
+                       "  for %s",
+                       set ? "setting" : "clearing",
+                       bch2_btree_id_str(btree),
+                       iter.pos.inode,
+                       iter.pos.offset,
+                       bch2_bkey_types[old.k->type],
+                       bch2_bkey_types[old_type],
+                       (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
+               ret = -EIO;
+               goto err;
+       }
 
-       return nr;
+       ret = bch2_trans_update(trans, &iter, k, 0);
+err:
+       bch2_trans_iter_exit(trans, &iter);
+       printbuf_exit(&buf);
+       return ret;
 }
 
-/*
- * returns sequence number of most recent journal entry that updated this
- * bucket:
- */
-static u64 bucket_journal_seq(struct bch_fs *c, struct bucket_mark m)
+static noinline int bch2_bucket_gen_update(struct btree_trans *trans,
+                                          struct bpos bucket, u8 gen)
 {
-       if (m.journal_seq_valid) {
-               u64 journal_seq = atomic64_read(&c->journal.seq);
-               u64 bucket_seq  = journal_seq;
+       struct btree_iter iter;
+       unsigned offset;
+       struct bpos pos = alloc_gens_pos(bucket, &offset);
+       struct bkey_i_bucket_gens *g;
+       struct bkey_s_c k;
+       int ret;
 
-               bucket_seq &= ~((u64) U16_MAX);
-               bucket_seq |= m.journal_seq;
+       g = bch2_trans_kmalloc(trans, sizeof(*g));
+       ret = PTR_ERR_OR_ZERO(g);
+       if (ret)
+               return ret;
 
-               if (bucket_seq > journal_seq)
-                       bucket_seq -= 1 << 16;
+       k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_bucket_gens, pos,
+                              BTREE_ITER_INTENT|
+                              BTREE_ITER_WITH_UPDATES);
+       ret = bkey_err(k);
+       if (ret)
+               return ret;
 
-               return bucket_seq;
+       if (k.k->type != KEY_TYPE_bucket_gens) {
+               bkey_bucket_gens_init(&g->k_i);
+               g->k.p = iter.pos;
        } else {
-               return 0;
+               bkey_reassemble(&g->k_i, k);
        }
+
+       g->v.gens[offset] = gen;
+
+       ret = bch2_trans_update(trans, &iter, &g->k_i, 0);
+       bch2_trans_iter_exit(trans, &iter);
+       return ret;
 }
 
-static int bucket_invalidate_btree(struct btree_trans *trans,
-                                  struct bch_dev *ca, u64 b)
+int bch2_trigger_alloc(struct btree_trans *trans,
+                      enum btree_id btree, unsigned level,
+                      struct bkey_s_c old, struct bkey_s new,
+                      unsigned flags)
 {
        struct bch_fs *c = trans->c;
-       struct bkey_alloc_buf *a;
-       struct bkey_alloc_unpacked u;
-       struct bucket *g;
-       struct bucket_mark m;
-       struct btree_iter iter;
-       int ret;
+       int ret = 0;
 
-       bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
-                            POS(ca->dev_idx, b),
-                            BTREE_ITER_CACHED|
-                            BTREE_ITER_CACHED_NOFILL|
-                            BTREE_ITER_INTENT);
+       if (bch2_trans_inconsistent_on(!bch2_dev_bucket_exists(c, new.k->p), trans,
+                                      "alloc key for invalid device or bucket"))
+               return -EIO;
 
-       a = bch2_trans_kmalloc(trans, sizeof(*a));
-       ret = PTR_ERR_OR_ZERO(a);
+       struct bch_dev *ca = bch_dev_bkey_exists(c, new.k->p.inode);
+
+       struct bch_alloc_v4 old_a_convert;
+       const struct bch_alloc_v4 *old_a = bch2_alloc_to_v4(old, &old_a_convert);
+
+       if (flags & BTREE_TRIGGER_TRANSACTIONAL) {
+               struct bch_alloc_v4 *new_a = bkey_s_to_alloc_v4(new).v;
+
+               new_a->data_type = alloc_data_type(*new_a, new_a->data_type);
+
+               if (bch2_bucket_sectors(*new_a) > bch2_bucket_sectors(*old_a)) {
+                       new_a->io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
+                       new_a->io_time[WRITE]= max_t(u64, 1, atomic64_read(&c->io_clock[WRITE].now));
+                       SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, true);
+                       SET_BCH_ALLOC_V4_NEED_DISCARD(new_a, true);
+               }
+
+               if (data_type_is_empty(new_a->data_type) &&
+                   BCH_ALLOC_V4_NEED_INC_GEN(new_a) &&
+                   !bch2_bucket_is_open_safe(c, new.k->p.inode, new.k->p.offset)) {
+                       new_a->gen++;
+                       SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, false);
+               }
+
+               if (old_a->data_type != new_a->data_type ||
+                   (new_a->data_type == BCH_DATA_free &&
+                    alloc_freespace_genbits(*old_a) != alloc_freespace_genbits(*new_a))) {
+                       ret =   bch2_bucket_do_index(trans, old, old_a, false) ?:
+                               bch2_bucket_do_index(trans, new.s_c, new_a, true);
+                       if (ret)
+                               return ret;
+               }
+
+               if (new_a->data_type == BCH_DATA_cached &&
+                   !new_a->io_time[READ])
+                       new_a->io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
+
+               u64 old_lru = alloc_lru_idx_read(*old_a);
+               u64 new_lru = alloc_lru_idx_read(*new_a);
+               if (old_lru != new_lru) {
+                       ret = bch2_lru_change(trans, new.k->p.inode,
+                                             bucket_to_u64(new.k->p),
+                                             old_lru, new_lru);
+                       if (ret)
+                               return ret;
+               }
+
+               new_a->fragmentation_lru = alloc_lru_idx_fragmentation(*new_a,
+                                               bch_dev_bkey_exists(c, new.k->p.inode));
+               if (old_a->fragmentation_lru != new_a->fragmentation_lru) {
+                       ret = bch2_lru_change(trans,
+                                       BCH_LRU_FRAGMENTATION_START,
+                                       bucket_to_u64(new.k->p),
+                                       old_a->fragmentation_lru, new_a->fragmentation_lru);
+                       if (ret)
+                               return ret;
+               }
+
+               if (old_a->gen != new_a->gen) {
+                       ret = bch2_bucket_gen_update(trans, new.k->p, new_a->gen);
+                       if (ret)
+                               return ret;
+               }
+
+               /*
+                * need to know if we're getting called from the invalidate path or
+                * not:
+                */
+
+               if ((flags & BTREE_TRIGGER_BUCKET_INVALIDATE) &&
+                   old_a->cached_sectors) {
+                       ret = bch2_update_cached_sectors_list(trans, new.k->p.inode,
+                                                             -((s64) old_a->cached_sectors));
+                       if (ret)
+                               return ret;
+               }
+       }
+
+       if ((flags & BTREE_TRIGGER_ATOMIC) && (flags & BTREE_TRIGGER_INSERT)) {
+               struct bch_alloc_v4 *new_a = bkey_s_to_alloc_v4(new).v;
+               u64 journal_seq = trans->journal_res.seq;
+               u64 bucket_journal_seq = new_a->journal_seq;
+
+               if ((flags & BTREE_TRIGGER_INSERT) &&
+                   data_type_is_empty(old_a->data_type) !=
+                   data_type_is_empty(new_a->data_type) &&
+                   new.k->type == KEY_TYPE_alloc_v4) {
+                       struct bch_alloc_v4 *v = bkey_s_to_alloc_v4(new).v;
+
+                       /*
+                        * If the btree updates referring to a bucket weren't flushed
+                        * before the bucket became empty again, then the we don't have
+                        * to wait on a journal flush before we can reuse the bucket:
+                        */
+                       v->journal_seq = bucket_journal_seq =
+                               data_type_is_empty(new_a->data_type) &&
+                               (journal_seq == v->journal_seq ||
+                                bch2_journal_noflush_seq(&c->journal, v->journal_seq))
+                               ? 0 : journal_seq;
+               }
+
+               if (!data_type_is_empty(old_a->data_type) &&
+                   data_type_is_empty(new_a->data_type) &&
+                   bucket_journal_seq) {
+                       ret = bch2_set_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
+                                       c->journal.flushed_seq_ondisk,
+                                       new.k->p.inode, new.k->p.offset,
+                                       bucket_journal_seq);
+                       if (ret) {
+                               bch2_fs_fatal_error(c,
+                                       "error setting bucket_needs_journal_commit: %i", ret);
+                               return ret;
+                       }
+               }
+
+               percpu_down_read(&c->mark_lock);
+               if (new_a->gen != old_a->gen)
+                       *bucket_gen(ca, new.k->p.offset) = new_a->gen;
+
+               bch2_dev_usage_update(c, ca, old_a, new_a, journal_seq, false);
+               percpu_up_read(&c->mark_lock);
+
+#define eval_state(_a, expr)           ({ const struct bch_alloc_v4 *a = _a; expr; })
+#define statechange(expr)              !eval_state(old_a, expr) && eval_state(new_a, expr)
+#define bucket_flushed(a)              (!a->journal_seq || a->journal_seq <= c->journal.flushed_seq_ondisk)
+
+               if (statechange(a->data_type == BCH_DATA_free &&
+                               bucket_flushed(a)))
+                       closure_wake_up(&c->freelist_wait);
+
+               if (statechange(a->data_type == BCH_DATA_need_discard &&
+                               bucket_flushed(a)) &&
+                   !bch2_bucket_is_open(c, new.k->p.inode, new.k->p.offset))
+                       bch2_do_discards(c);
+
+               if (statechange(a->data_type == BCH_DATA_cached) &&
+                   !bch2_bucket_is_open(c, new.k->p.inode, new.k->p.offset) &&
+                   should_invalidate_buckets(ca, bch2_dev_usage_read(ca)))
+                       bch2_do_invalidates(c);
+
+               if (statechange(a->data_type == BCH_DATA_need_gc_gens))
+                       bch2_do_gc_gens(c);
+       }
+
+       if ((flags & BTREE_TRIGGER_GC) &&
+           (flags & BTREE_TRIGGER_BUCKET_INVALIDATE)) {
+               struct bch_alloc_v4 new_a_convert;
+               const struct bch_alloc_v4 *new_a = bch2_alloc_to_v4(new.s_c, &new_a_convert);
+
+               percpu_down_read(&c->mark_lock);
+               struct bucket *g = gc_bucket(ca, new.k->p.offset);
+
+               bucket_lock(g);
+
+               g->gen_valid            = 1;
+               g->gen                  = new_a->gen;
+               g->data_type            = new_a->data_type;
+               g->stripe               = new_a->stripe;
+               g->stripe_redundancy    = new_a->stripe_redundancy;
+               g->dirty_sectors        = new_a->dirty_sectors;
+               g->cached_sectors       = new_a->cached_sectors;
+
+               bucket_unlock(g);
+               percpu_up_read(&c->mark_lock);
+       }
+
+       return 0;
+}
+
+/*
+ * This synthesizes deleted extents for holes, similar to BTREE_ITER_SLOTS for
+ * extents style btrees, but works on non-extents btrees:
+ */
+static struct bkey_s_c bch2_get_key_or_hole(struct btree_iter *iter, struct bpos end, struct bkey *hole)
+{
+       struct bkey_s_c k = bch2_btree_iter_peek_slot(iter);
+
+       if (bkey_err(k))
+               return k;
+
+       if (k.k->type) {
+               return k;
+       } else {
+               struct btree_iter iter2;
+               struct bpos next;
+
+               bch2_trans_copy_iter(&iter2, iter);
+
+               struct btree_path *path = btree_iter_path(iter->trans, iter);
+               if (!bpos_eq(path->l[0].b->key.k.p, SPOS_MAX))
+                       end = bkey_min(end, bpos_nosnap_successor(path->l[0].b->key.k.p));
+
+               end = bkey_min(end, POS(iter->pos.inode, iter->pos.offset + U32_MAX - 1));
+
+               /*
+                * btree node min/max is a closed interval, upto takes a half
+                * open interval:
+                */
+               k = bch2_btree_iter_peek_upto(&iter2, end);
+               next = iter2.pos;
+               bch2_trans_iter_exit(iter->trans, &iter2);
+
+               BUG_ON(next.offset >= iter->pos.offset + U32_MAX);
+
+               if (bkey_err(k))
+                       return k;
+
+               bkey_init(hole);
+               hole->p = iter->pos;
+
+               bch2_key_resize(hole, next.offset - iter->pos.offset);
+               return (struct bkey_s_c) { hole, NULL };
+       }
+}
+
+static bool next_bucket(struct bch_fs *c, struct bpos *bucket)
+{
+       struct bch_dev *ca;
+
+       if (bch2_dev_bucket_exists(c, *bucket))
+               return true;
+
+       if (bch2_dev_exists2(c, bucket->inode)) {
+               ca = bch_dev_bkey_exists(c, bucket->inode);
+
+               if (bucket->offset < ca->mi.first_bucket) {
+                       bucket->offset = ca->mi.first_bucket;
+                       return true;
+               }
+
+               bucket->inode++;
+               bucket->offset = 0;
+       }
+
+       rcu_read_lock();
+       ca = __bch2_next_dev_idx(c, bucket->inode, NULL);
+       if (ca)
+               *bucket = POS(ca->dev_idx, ca->mi.first_bucket);
+       rcu_read_unlock();
+
+       return ca != NULL;
+}
+
+static struct bkey_s_c bch2_get_key_or_real_bucket_hole(struct btree_iter *iter, struct bkey *hole)
+{
+       struct bch_fs *c = iter->trans->c;
+       struct bkey_s_c k;
+again:
+       k = bch2_get_key_or_hole(iter, POS_MAX, hole);
+       if (bkey_err(k))
+               return k;
+
+       if (!k.k->type) {
+               struct bpos bucket = bkey_start_pos(k.k);
+
+               if (!bch2_dev_bucket_exists(c, bucket)) {
+                       if (!next_bucket(c, &bucket))
+                               return bkey_s_c_null;
+
+                       bch2_btree_iter_set_pos(iter, bucket);
+                       goto again;
+               }
+
+               if (!bch2_dev_bucket_exists(c, k.k->p)) {
+                       struct bch_dev *ca = bch_dev_bkey_exists(c, bucket.inode);
+
+                       bch2_key_resize(hole, ca->mi.nbuckets - bucket.offset);
+               }
+       }
+
+       return k;
+}
+
+static noinline_for_stack
+int bch2_check_alloc_key(struct btree_trans *trans,
+                        struct bkey_s_c alloc_k,
+                        struct btree_iter *alloc_iter,
+                        struct btree_iter *discard_iter,
+                        struct btree_iter *freespace_iter,
+                        struct btree_iter *bucket_gens_iter)
+{
+       struct bch_fs *c = trans->c;
+       struct bch_dev *ca;
+       struct bch_alloc_v4 a_convert;
+       const struct bch_alloc_v4 *a;
+       unsigned discard_key_type, freespace_key_type;
+       unsigned gens_offset;
+       struct bkey_s_c k;
+       struct printbuf buf = PRINTBUF;
+       int ret;
+
+       if (fsck_err_on(!bch2_dev_bucket_exists(c, alloc_k.k->p), c,
+                       alloc_key_to_missing_dev_bucket,
+                       "alloc key for invalid device:bucket %llu:%llu",
+                       alloc_k.k->p.inode, alloc_k.k->p.offset))
+               return bch2_btree_delete_at(trans, alloc_iter, 0);
+
+       ca = bch_dev_bkey_exists(c, alloc_k.k->p.inode);
+       if (!ca->mi.freespace_initialized)
+               return 0;
+
+       a = bch2_alloc_to_v4(alloc_k, &a_convert);
+
+       discard_key_type = a->data_type == BCH_DATA_need_discard ? KEY_TYPE_set : 0;
+       bch2_btree_iter_set_pos(discard_iter, alloc_k.k->p);
+       k = bch2_btree_iter_peek_slot(discard_iter);
+       ret = bkey_err(k);
+       if (ret)
+               goto err;
+
+       if (k.k->type != discard_key_type &&
+           (c->opts.reconstruct_alloc ||
+            fsck_err(c, need_discard_key_wrong,
+                     "incorrect key in need_discard btree (got %s should be %s)\n"
+                     "  %s",
+                     bch2_bkey_types[k.k->type],
+                     bch2_bkey_types[discard_key_type],
+                     (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf)))) {
+               struct bkey_i *update =
+                       bch2_trans_kmalloc(trans, sizeof(*update));
+
+               ret = PTR_ERR_OR_ZERO(update);
+               if (ret)
+                       goto err;
+
+               bkey_init(&update->k);
+               update->k.type  = discard_key_type;
+               update->k.p     = discard_iter->pos;
+
+               ret = bch2_trans_update(trans, discard_iter, update, 0);
+               if (ret)
+                       goto err;
+       }
+
+       freespace_key_type = a->data_type == BCH_DATA_free ? KEY_TYPE_set : 0;
+       bch2_btree_iter_set_pos(freespace_iter, alloc_freespace_pos(alloc_k.k->p, *a));
+       k = bch2_btree_iter_peek_slot(freespace_iter);
+       ret = bkey_err(k);
        if (ret)
                goto err;
 
-       ret = bch2_btree_iter_traverse(&iter);
+       if (k.k->type != freespace_key_type &&
+           (c->opts.reconstruct_alloc ||
+            fsck_err(c, freespace_key_wrong,
+                     "incorrect key in freespace btree (got %s should be %s)\n"
+                     "  %s",
+                     bch2_bkey_types[k.k->type],
+                     bch2_bkey_types[freespace_key_type],
+                     (printbuf_reset(&buf),
+                      bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf)))) {
+               struct bkey_i *update =
+                       bch2_trans_kmalloc(trans, sizeof(*update));
+
+               ret = PTR_ERR_OR_ZERO(update);
+               if (ret)
+                       goto err;
+
+               bkey_init(&update->k);
+               update->k.type  = freespace_key_type;
+               update->k.p     = freespace_iter->pos;
+               bch2_key_resize(&update->k, 1);
+
+               ret = bch2_trans_update(trans, freespace_iter, update, 0);
+               if (ret)
+                       goto err;
+       }
+
+       bch2_btree_iter_set_pos(bucket_gens_iter, alloc_gens_pos(alloc_k.k->p, &gens_offset));
+       k = bch2_btree_iter_peek_slot(bucket_gens_iter);
+       ret = bkey_err(k);
        if (ret)
                goto err;
 
-       percpu_down_read(&c->mark_lock);
-       g = bucket(ca, b);
-       m = READ_ONCE(g->mark);
-       u = alloc_mem_to_key(&iter, g, m);
-       percpu_up_read(&c->mark_lock);
-
-       u.gen++;
-       u.data_type     = 0;
-       u.dirty_sectors = 0;
-       u.cached_sectors = 0;
-       u.read_time     = atomic64_read(&c->io_clock[READ].now);
-       u.write_time    = atomic64_read(&c->io_clock[WRITE].now);
-
-       bch2_alloc_pack(c, a, u);
-       ret = bch2_trans_update(trans, &iter, &a->k,
-                               BTREE_TRIGGER_BUCKET_INVALIDATE);
+       if (a->gen != alloc_gen(k, gens_offset) &&
+           (c->opts.reconstruct_alloc ||
+            fsck_err(c, bucket_gens_key_wrong,
+                     "incorrect gen in bucket_gens btree (got %u should be %u)\n"
+                     "  %s",
+                     alloc_gen(k, gens_offset), a->gen,
+                     (printbuf_reset(&buf),
+                      bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf)))) {
+               struct bkey_i_bucket_gens *g =
+                       bch2_trans_kmalloc(trans, sizeof(*g));
+
+               ret = PTR_ERR_OR_ZERO(g);
+               if (ret)
+                       goto err;
+
+               if (k.k->type == KEY_TYPE_bucket_gens) {
+                       bkey_reassemble(&g->k_i, k);
+               } else {
+                       bkey_bucket_gens_init(&g->k_i);
+                       g->k.p = alloc_gens_pos(alloc_k.k->p, &gens_offset);
+               }
+
+               g->v.gens[gens_offset] = a->gen;
+
+               ret = bch2_trans_update(trans, bucket_gens_iter, &g->k_i, 0);
+               if (ret)
+                       goto err;
+       }
 err:
-       bch2_trans_iter_exit(trans, &iter);
+fsck_err:
+       printbuf_exit(&buf);
        return ret;
 }
 
-static int bch2_invalidate_one_bucket(struct bch_fs *c, struct bch_dev *ca,
-                                     u64 *journal_seq, unsigned flags)
+static noinline_for_stack
+int bch2_check_alloc_hole_freespace(struct btree_trans *trans,
+                                   struct bpos start,
+                                   struct bpos *end,
+                                   struct btree_iter *freespace_iter)
 {
-       struct bucket *g;
-       struct bucket_mark m;
-       size_t b;
-       int ret = 0;
+       struct bch_fs *c = trans->c;
+       struct bch_dev *ca;
+       struct bkey_s_c k;
+       struct printbuf buf = PRINTBUF;
+       int ret;
 
-       BUG_ON(!ca->alloc_heap.used ||
-              !ca->alloc_heap.data[0].nr);
-       b = ca->alloc_heap.data[0].bucket;
+       ca = bch_dev_bkey_exists(c, start.inode);
+       if (!ca->mi.freespace_initialized)
+               return 0;
 
-       /* first, put on free_inc and mark as owned by allocator: */
-       percpu_down_read(&c->mark_lock);
-       g = bucket(ca, b);
-       m = READ_ONCE(g->mark);
+       bch2_btree_iter_set_pos(freespace_iter, start);
 
-       BUG_ON(m.dirty_sectors);
+       k = bch2_btree_iter_peek_slot(freespace_iter);
+       ret = bkey_err(k);
+       if (ret)
+               goto err;
 
-       bch2_mark_alloc_bucket(c, ca, b, true);
+       *end = bkey_min(k.k->p, *end);
 
-       spin_lock(&c->freelist_lock);
-       verify_not_on_freelist(c, ca, b);
-       BUG_ON(!fifo_push(&ca->free_inc, b));
-       spin_unlock(&c->freelist_lock);
+       if (k.k->type != KEY_TYPE_set &&
+           (c->opts.reconstruct_alloc ||
+            fsck_err(c, freespace_hole_missing,
+                     "hole in alloc btree missing in freespace btree\n"
+                     "  device %llu buckets %llu-%llu",
+                     freespace_iter->pos.inode,
+                     freespace_iter->pos.offset,
+                     end->offset))) {
+               struct bkey_i *update =
+                       bch2_trans_kmalloc(trans, sizeof(*update));
 
-       /*
-        * If we're not invalidating cached data, we only increment the bucket
-        * gen in memory here, the incremented gen will be updated in the btree
-        * by bch2_trans_mark_pointer():
-        */
-       if (!m.cached_sectors &&
-           !bucket_needs_journal_commit(m, c->journal.last_seq_ondisk)) {
-               BUG_ON(m.data_type);
-               bucket_cmpxchg(g, m, m.gen++);
-               percpu_up_read(&c->mark_lock);
-               goto out;
-       }
+               ret = PTR_ERR_OR_ZERO(update);
+               if (ret)
+                       goto err;
 
-       percpu_up_read(&c->mark_lock);
+               bkey_init(&update->k);
+               update->k.type  = KEY_TYPE_set;
+               update->k.p     = freespace_iter->pos;
+               bch2_key_resize(&update->k,
+                               min_t(u64, U32_MAX, end->offset -
+                                     freespace_iter->pos.offset));
 
-       /*
-        * If the read-only path is trying to shut down, we can't be generating
-        * new btree updates:
-        */
-       if (test_bit(BCH_FS_ALLOCATOR_STOPPING, &c->flags)) {
-               ret = 1;
-               goto out;
+               ret = bch2_trans_update(trans, freespace_iter, update, 0);
+               if (ret)
+                       goto err;
        }
+err:
+fsck_err:
+       printbuf_exit(&buf);
+       return ret;
+}
 
-       ret = bch2_trans_do(c, NULL, journal_seq,
-                           BTREE_INSERT_NOCHECK_RW|
-                           BTREE_INSERT_NOFAIL|
-                           BTREE_INSERT_JOURNAL_RESERVED|
-                           flags,
-                           bucket_invalidate_btree(&trans, ca, b));
-out:
-       if (!ret) {
-               /* remove from alloc_heap: */
-               struct alloc_heap_entry e, *top = ca->alloc_heap.data;
+static noinline_for_stack
+int bch2_check_alloc_hole_bucket_gens(struct btree_trans *trans,
+                                     struct bpos start,
+                                     struct bpos *end,
+                                     struct btree_iter *bucket_gens_iter)
+{
+       struct bch_fs *c = trans->c;
+       struct bkey_s_c k;
+       struct printbuf buf = PRINTBUF;
+       unsigned i, gens_offset, gens_end_offset;
+       int ret;
 
-               top->bucket++;
-               top->nr--;
+       bch2_btree_iter_set_pos(bucket_gens_iter, alloc_gens_pos(start, &gens_offset));
 
-               if (!top->nr)
-                       heap_pop(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
+       k = bch2_btree_iter_peek_slot(bucket_gens_iter);
+       ret = bkey_err(k);
+       if (ret)
+               goto err;
 
-               /*
-                * Make sure we flush the last journal entry that updated this
-                * bucket (i.e. deleting the last reference) before writing to
-                * this bucket again:
-                */
-               *journal_seq = max(*journal_seq, bucket_journal_seq(c, m));
-       } else {
-               size_t b2;
+       if (bkey_cmp(alloc_gens_pos(start, &gens_offset),
+                    alloc_gens_pos(*end,  &gens_end_offset)))
+               gens_end_offset = KEY_TYPE_BUCKET_GENS_NR;
+
+       if (k.k->type == KEY_TYPE_bucket_gens) {
+               struct bkey_i_bucket_gens g;
+               bool need_update = false;
+
+               bkey_reassemble(&g.k_i, k);
+
+               for (i = gens_offset; i < gens_end_offset; i++) {
+                       if (fsck_err_on(g.v.gens[i], c,
+                                       bucket_gens_hole_wrong,
+                                       "hole in alloc btree at %llu:%llu with nonzero gen in bucket_gens btree (%u)",
+                                       bucket_gens_pos_to_alloc(k.k->p, i).inode,
+                                       bucket_gens_pos_to_alloc(k.k->p, i).offset,
+                                       g.v.gens[i])) {
+                               g.v.gens[i] = 0;
+                               need_update = true;
+                       }
+               }
 
-               /* remove from free_inc: */
-               percpu_down_read(&c->mark_lock);
-               spin_lock(&c->freelist_lock);
+               if (need_update) {
+                       struct bkey_i *u = bch2_trans_kmalloc(trans, sizeof(g));
 
-               bch2_mark_alloc_bucket(c, ca, b, false);
+                       ret = PTR_ERR_OR_ZERO(u);
+                       if (ret)
+                               goto err;
 
-               BUG_ON(!fifo_pop_back(&ca->free_inc, b2));
-               BUG_ON(b != b2);
+                       memcpy(u, &g, sizeof(g));
 
-               spin_unlock(&c->freelist_lock);
-               percpu_up_read(&c->mark_lock);
+                       ret = bch2_trans_update(trans, bucket_gens_iter, u, 0);
+                       if (ret)
+                               goto err;
+               }
        }
 
-       return ret < 0 ? ret : 0;
+       *end = bkey_min(*end, bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0));
+err:
+fsck_err:
+       printbuf_exit(&buf);
+       return ret;
+}
+
+static noinline_for_stack int bch2_check_discard_freespace_key(struct btree_trans *trans,
+                                             struct btree_iter *iter)
+{
+       struct bch_fs *c = trans->c;
+       struct btree_iter alloc_iter;
+       struct bkey_s_c alloc_k;
+       struct bch_alloc_v4 a_convert;
+       const struct bch_alloc_v4 *a;
+       u64 genbits;
+       struct bpos pos;
+       enum bch_data_type state = iter->btree_id == BTREE_ID_need_discard
+               ? BCH_DATA_need_discard
+               : BCH_DATA_free;
+       struct printbuf buf = PRINTBUF;
+       int ret;
+
+       pos = iter->pos;
+       pos.offset &= ~(~0ULL << 56);
+       genbits = iter->pos.offset & (~0ULL << 56);
+
+       alloc_k = bch2_bkey_get_iter(trans, &alloc_iter, BTREE_ID_alloc, pos, 0);
+       ret = bkey_err(alloc_k);
+       if (ret)
+               return ret;
+
+       if (fsck_err_on(!bch2_dev_bucket_exists(c, pos), c,
+                       need_discard_freespace_key_to_invalid_dev_bucket,
+                       "entry in %s btree for nonexistant dev:bucket %llu:%llu",
+                       bch2_btree_id_str(iter->btree_id), pos.inode, pos.offset))
+               goto delete;
+
+       a = bch2_alloc_to_v4(alloc_k, &a_convert);
+
+       if (fsck_err_on(a->data_type != state ||
+                       (state == BCH_DATA_free &&
+                        genbits != alloc_freespace_genbits(*a)), c,
+                       need_discard_freespace_key_bad,
+                       "%s\n  incorrectly set at %s:%llu:%llu:0 (free %u, genbits %llu should be %llu)",
+                       (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf),
+                       bch2_btree_id_str(iter->btree_id),
+                       iter->pos.inode,
+                       iter->pos.offset,
+                       a->data_type == state,
+                       genbits >> 56, alloc_freespace_genbits(*a) >> 56))
+               goto delete;
+out:
+fsck_err:
+       set_btree_iter_dontneed(&alloc_iter);
+       bch2_trans_iter_exit(trans, &alloc_iter);
+       printbuf_exit(&buf);
+       return ret;
+delete:
+       ret =   bch2_btree_delete_extent_at(trans, iter,
+                       iter->btree_id == BTREE_ID_freespace ? 1 : 0, 0) ?:
+               bch2_trans_commit(trans, NULL, NULL,
+                       BCH_TRANS_COMMIT_no_enospc);
+       goto out;
 }
 
 /*
- * Pull buckets off ca->alloc_heap, invalidate them, move them to ca->free_inc:
+ * We've already checked that generation numbers in the bucket_gens btree are
+ * valid for buckets that exist; this just checks for keys for nonexistent
+ * buckets.
  */
-static int bch2_invalidate_buckets(struct bch_fs *c, struct bch_dev *ca)
+static noinline_for_stack
+int bch2_check_bucket_gens_key(struct btree_trans *trans,
+                              struct btree_iter *iter,
+                              struct bkey_s_c k)
+{
+       struct bch_fs *c = trans->c;
+       struct bkey_i_bucket_gens g;
+       struct bch_dev *ca;
+       u64 start = bucket_gens_pos_to_alloc(k.k->p, 0).offset;
+       u64 end = bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0).offset;
+       u64 b;
+       bool need_update = false, dev_exists;
+       struct printbuf buf = PRINTBUF;
+       int ret = 0;
+
+       BUG_ON(k.k->type != KEY_TYPE_bucket_gens);
+       bkey_reassemble(&g.k_i, k);
+
+       /* if no bch_dev, skip out whether we repair or not */
+       dev_exists = bch2_dev_exists2(c, k.k->p.inode);
+       if (!dev_exists) {
+               if (fsck_err_on(!dev_exists, c,
+                               bucket_gens_to_invalid_dev,
+                               "bucket_gens key for invalid device:\n  %s",
+                               (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
+                       ret = bch2_btree_delete_at(trans, iter, 0);
+               }
+               goto out;
+       }
+
+       ca = bch_dev_bkey_exists(c, k.k->p.inode);
+       if (fsck_err_on(end <= ca->mi.first_bucket ||
+                       start >= ca->mi.nbuckets, c,
+                       bucket_gens_to_invalid_buckets,
+                       "bucket_gens key for invalid buckets:\n  %s",
+                       (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
+               ret = bch2_btree_delete_at(trans, iter, 0);
+               goto out;
+       }
+
+       for (b = start; b < ca->mi.first_bucket; b++)
+               if (fsck_err_on(g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK], c,
+                               bucket_gens_nonzero_for_invalid_buckets,
+                               "bucket_gens key has nonzero gen for invalid bucket")) {
+                       g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK] = 0;
+                       need_update = true;
+               }
+
+       for (b = ca->mi.nbuckets; b < end; b++)
+               if (fsck_err_on(g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK], c,
+                               bucket_gens_nonzero_for_invalid_buckets,
+                               "bucket_gens key has nonzero gen for invalid bucket")) {
+                       g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK] = 0;
+                       need_update = true;
+               }
+
+       if (need_update) {
+               struct bkey_i *u = bch2_trans_kmalloc(trans, sizeof(g));
+
+               ret = PTR_ERR_OR_ZERO(u);
+               if (ret)
+                       goto out;
+
+               memcpy(u, &g, sizeof(g));
+               ret = bch2_trans_update(trans, iter, u, 0);
+       }
+out:
+fsck_err:
+       printbuf_exit(&buf);
+       return ret;
+}
+
+int bch2_check_alloc_info(struct bch_fs *c)
 {
-       u64 journal_seq = 0;
+       struct btree_trans *trans = bch2_trans_get(c);
+       struct btree_iter iter, discard_iter, freespace_iter, bucket_gens_iter;
+       struct bkey hole;
+       struct bkey_s_c k;
        int ret = 0;
 
-       /* Only use nowait if we've already invalidated at least one bucket: */
-       while (!ret &&
-              !fifo_full(&ca->free_inc) &&
-              ca->alloc_heap.used) {
-               if (kthread_should_stop()) {
-                       ret = 1;
+       bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, POS_MIN,
+                            BTREE_ITER_PREFETCH);
+       bch2_trans_iter_init(trans, &discard_iter, BTREE_ID_need_discard, POS_MIN,
+                            BTREE_ITER_PREFETCH);
+       bch2_trans_iter_init(trans, &freespace_iter, BTREE_ID_freespace, POS_MIN,
+                            BTREE_ITER_PREFETCH);
+       bch2_trans_iter_init(trans, &bucket_gens_iter, BTREE_ID_bucket_gens, POS_MIN,
+                            BTREE_ITER_PREFETCH);
+
+       while (1) {
+               struct bpos next;
+
+               bch2_trans_begin(trans);
+
+               k = bch2_get_key_or_real_bucket_hole(&iter, &hole);
+               ret = bkey_err(k);
+               if (ret)
+                       goto bkey_err;
+
+               if (!k.k)
                        break;
+
+               if (k.k->type) {
+                       next = bpos_nosnap_successor(k.k->p);
+
+                       ret = bch2_check_alloc_key(trans,
+                                                  k, &iter,
+                                                  &discard_iter,
+                                                  &freespace_iter,
+                                                  &bucket_gens_iter);
+                       if (ret)
+                               goto bkey_err;
+               } else {
+                       next = k.k->p;
+
+                       ret = bch2_check_alloc_hole_freespace(trans,
+                                                   bkey_start_pos(k.k),
+                                                   &next,
+                                                   &freespace_iter) ?:
+                               bch2_check_alloc_hole_bucket_gens(trans,
+                                                   bkey_start_pos(k.k),
+                                                   &next,
+                                                   &bucket_gens_iter);
+                       if (ret)
+                               goto bkey_err;
                }
 
-               ret = bch2_invalidate_one_bucket(c, ca, &journal_seq,
-                               (!fifo_empty(&ca->free_inc)
-                                ? BTREE_INSERT_NOWAIT : 0));
-               /*
-                * We only want to batch up invalidates when they're going to
-                * require flushing the journal:
-                */
-               if (!journal_seq)
+               ret = bch2_trans_commit(trans, NULL, NULL,
+                                       BCH_TRANS_COMMIT_no_enospc);
+               if (ret)
+                       goto bkey_err;
+
+               bch2_btree_iter_set_pos(&iter, next);
+bkey_err:
+               if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+                       continue;
+               if (ret)
                        break;
        }
+       bch2_trans_iter_exit(trans, &bucket_gens_iter);
+       bch2_trans_iter_exit(trans, &freespace_iter);
+       bch2_trans_iter_exit(trans, &discard_iter);
+       bch2_trans_iter_exit(trans, &iter);
 
-       /* If we used NOWAIT, don't return the error: */
-       if (!fifo_empty(&ca->free_inc))
-               ret = 0;
        if (ret < 0)
-               bch_err(ca, "error invalidating buckets: %i", ret);
+               goto err;
+
+       ret = for_each_btree_key(trans, iter,
+                       BTREE_ID_need_discard, POS_MIN,
+                       BTREE_ITER_PREFETCH, k,
+               bch2_check_discard_freespace_key(trans, &iter));
+       if (ret)
+               goto err;
+
+       bch2_trans_iter_init(trans, &iter, BTREE_ID_freespace, POS_MIN,
+                            BTREE_ITER_PREFETCH);
+       while (1) {
+               bch2_trans_begin(trans);
+               k = bch2_btree_iter_peek(&iter);
+               if (!k.k)
+                       break;
+
+               ret = bkey_err(k) ?:
+                       bch2_check_discard_freespace_key(trans, &iter);
+               if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
+                       ret = 0;
+                       continue;
+               }
+               if (ret) {
+                       struct printbuf buf = PRINTBUF;
+                       bch2_bkey_val_to_text(&buf, c, k);
+
+                       bch_err(c, "while checking %s", buf.buf);
+                       printbuf_exit(&buf);
+                       break;
+               }
+
+               bch2_btree_iter_set_pos(&iter, bpos_nosnap_successor(iter.pos));
+       }
+       bch2_trans_iter_exit(trans, &iter);
+       if (ret)
+               goto err;
+
+       ret = for_each_btree_key_commit(trans, iter,
+                       BTREE_ID_bucket_gens, POS_MIN,
+                       BTREE_ITER_PREFETCH, k,
+                       NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
+               bch2_check_bucket_gens_key(trans, &iter, k));
+err:
+       bch2_trans_put(trans);
+       bch_err_fn(c, ret);
+       return ret;
+}
+
+static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans,
+                                      struct btree_iter *alloc_iter)
+{
+       struct bch_fs *c = trans->c;
+       struct btree_iter lru_iter;
+       struct bch_alloc_v4 a_convert;
+       const struct bch_alloc_v4 *a;
+       struct bkey_s_c alloc_k, lru_k;
+       struct printbuf buf = PRINTBUF;
+       int ret;
+
+       alloc_k = bch2_btree_iter_peek(alloc_iter);
+       if (!alloc_k.k)
+               return 0;
+
+       ret = bkey_err(alloc_k);
        if (ret)
                return ret;
 
-       if (journal_seq)
-               ret = bch2_journal_flush_seq(&c->journal, journal_seq);
-       if (ret) {
-               bch_err(ca, "journal error: %i", ret);
+       a = bch2_alloc_to_v4(alloc_k, &a_convert);
+
+       if (a->data_type != BCH_DATA_cached)
+               return 0;
+
+       if (fsck_err_on(!a->io_time[READ], c,
+                       alloc_key_cached_but_read_time_zero,
+                       "cached bucket with read_time 0\n"
+                       "  %s",
+               (printbuf_reset(&buf),
+                bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
+               struct bkey_i_alloc_v4 *a_mut =
+                       bch2_alloc_to_v4_mut(trans, alloc_k);
+               ret = PTR_ERR_OR_ZERO(a_mut);
+               if (ret)
+                       goto err;
+
+               a_mut->v.io_time[READ] = atomic64_read(&c->io_clock[READ].now);
+               ret = bch2_trans_update(trans, alloc_iter,
+                                       &a_mut->k_i, BTREE_TRIGGER_NORUN);
+               if (ret)
+                       goto err;
+
+               a = &a_mut->v;
+       }
+
+       lru_k = bch2_bkey_get_iter(trans, &lru_iter, BTREE_ID_lru,
+                            lru_pos(alloc_k.k->p.inode,
+                                    bucket_to_u64(alloc_k.k->p),
+                                    a->io_time[READ]), 0);
+       ret = bkey_err(lru_k);
+       if (ret)
                return ret;
+
+       if (fsck_err_on(lru_k.k->type != KEY_TYPE_set, c,
+                       alloc_key_to_missing_lru_entry,
+                       "missing lru entry\n"
+                       "  %s",
+                       (printbuf_reset(&buf),
+                        bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
+               ret = bch2_lru_set(trans,
+                                  alloc_k.k->p.inode,
+                                  bucket_to_u64(alloc_k.k->p),
+                                  a->io_time[READ]);
+               if (ret)
+                       goto err;
        }
+err:
+fsck_err:
+       bch2_trans_iter_exit(trans, &lru_iter);
+       printbuf_exit(&buf);
+       return ret;
+}
 
-       return 0;
+int bch2_check_alloc_to_lru_refs(struct bch_fs *c)
+{
+       int ret = bch2_trans_run(c,
+               for_each_btree_key_commit(trans, iter, BTREE_ID_alloc,
+                               POS_MIN, BTREE_ITER_PREFETCH, k,
+                               NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
+                       bch2_check_alloc_to_lru_ref(trans, &iter)));
+       bch_err_fn(c, ret);
+       return ret;
 }
 
-static void alloc_thread_set_state(struct bch_dev *ca, unsigned new_state)
+struct discard_buckets_state {
+       u64             seen;
+       u64             open;
+       u64             need_journal_commit;
+       u64             discarded;
+       struct bch_dev  *ca;
+       u64             need_journal_commit_this_dev;
+};
+
+static void discard_buckets_next_dev(struct bch_fs *c, struct discard_buckets_state *s, struct bch_dev *ca)
 {
-       if (ca->allocator_state != new_state) {
-               ca->allocator_state = new_state;
-               closure_wake_up(&ca->fs->freelist_wait);
-       }
+       if (s->ca == ca)
+               return;
+
+       if (s->ca && s->need_journal_commit_this_dev >
+           bch2_dev_usage_read(s->ca).d[BCH_DATA_free].buckets)
+               bch2_journal_flush_async(&c->journal, NULL);
+
+       if (s->ca)
+               percpu_ref_put(&s->ca->ref);
+       if (ca)
+               percpu_ref_get(&ca->ref);
+       s->ca = ca;
+       s->need_journal_commit_this_dev = 0;
 }
 
-static int push_invalidated_bucket(struct bch_fs *c, struct bch_dev *ca, u64 b)
+static int bch2_discard_one_bucket(struct btree_trans *trans,
+                                  struct btree_iter *need_discard_iter,
+                                  struct bpos *discard_pos_done,
+                                  struct discard_buckets_state *s)
 {
-       unsigned i;
+       struct bch_fs *c = trans->c;
+       struct bpos pos = need_discard_iter->pos;
+       struct btree_iter iter = { NULL };
+       struct bkey_s_c k;
+       struct bch_dev *ca;
+       struct bkey_i_alloc_v4 *a;
+       struct printbuf buf = PRINTBUF;
        int ret = 0;
 
-       spin_lock(&c->freelist_lock);
-       for (i = 0; i < RESERVE_NR; i++) {
-               /*
-                * Don't strand buckets on the copygc freelist until
-                * after recovery is finished:
-                */
-               if (i == RESERVE_MOVINGGC &&
-                   !test_bit(BCH_FS_STARTED, &c->flags))
-                       continue;
+       ca = bch_dev_bkey_exists(c, pos.inode);
 
-               if (fifo_push(&ca->free[i], b)) {
-                       fifo_pop(&ca->free_inc, b);
-                       ret = 1;
-                       break;
+       if (!percpu_ref_tryget(&ca->io_ref)) {
+               bch2_btree_iter_set_pos(need_discard_iter, POS(pos.inode + 1, 0));
+               return 0;
+       }
+
+       discard_buckets_next_dev(c, s, ca);
+
+       if (bch2_bucket_is_open_safe(c, pos.inode, pos.offset)) {
+               s->open++;
+               goto out;
+       }
+
+       if (bch2_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
+                       c->journal.flushed_seq_ondisk,
+                       pos.inode, pos.offset)) {
+               s->need_journal_commit++;
+               s->need_journal_commit_this_dev++;
+               goto out;
+       }
+
+       k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_alloc,
+                              need_discard_iter->pos,
+                              BTREE_ITER_CACHED);
+       ret = bkey_err(k);
+       if (ret)
+               goto out;
+
+       a = bch2_alloc_to_v4_mut(trans, k);
+       ret = PTR_ERR_OR_ZERO(a);
+       if (ret)
+               goto out;
+
+       if (BCH_ALLOC_V4_NEED_INC_GEN(&a->v)) {
+               a->v.gen++;
+               SET_BCH_ALLOC_V4_NEED_INC_GEN(&a->v, false);
+               goto write;
+       }
+
+       if (a->v.journal_seq > c->journal.flushed_seq_ondisk) {
+               if (c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info) {
+                       bch2_trans_inconsistent(trans,
+                               "clearing need_discard but journal_seq %llu > flushed_seq %llu\n"
+                               "%s",
+                               a->v.journal_seq,
+                               c->journal.flushed_seq_ondisk,
+                               (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
+                       ret = -EIO;
                }
+               goto out;
        }
-       spin_unlock(&c->freelist_lock);
 
-       ca->allocator_state = ret
-               ? ALLOCATOR_running
-               : ALLOCATOR_blocked_full;
-       closure_wake_up(&c->freelist_wait);
+       if (a->v.data_type != BCH_DATA_need_discard) {
+               if (c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info) {
+                       bch2_trans_inconsistent(trans,
+                               "bucket incorrectly set in need_discard btree\n"
+                               "%s",
+                               (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
+                       ret = -EIO;
+               }
+
+               goto out;
+       }
+
+       if (!bkey_eq(*discard_pos_done, iter.pos) &&
+           ca->mi.discard && !c->opts.nochanges) {
+               /*
+                * This works without any other locks because this is the only
+                * thread that removes items from the need_discard tree
+                */
+               bch2_trans_unlock_long(trans);
+               blkdev_issue_discard(ca->disk_sb.bdev,
+                                    k.k->p.offset * ca->mi.bucket_size,
+                                    ca->mi.bucket_size,
+                                    GFP_KERNEL);
+               *discard_pos_done = iter.pos;
+
+               ret = bch2_trans_relock_notrace(trans);
+               if (ret)
+                       goto out;
+       }
+
+       SET_BCH_ALLOC_V4_NEED_DISCARD(&a->v, false);
+       a->v.data_type = alloc_data_type(a->v, a->v.data_type);
+write:
+       ret =   bch2_trans_update(trans, &iter, &a->k_i, 0) ?:
+               bch2_trans_commit(trans, NULL, NULL,
+                                 BCH_WATERMARK_btree|
+                                 BCH_TRANS_COMMIT_no_enospc);
+       if (ret)
+               goto out;
+
+       count_event(c, bucket_discard);
+       s->discarded++;
+out:
+       s->seen++;
+       bch2_trans_iter_exit(trans, &iter);
+       percpu_ref_put(&ca->io_ref);
+       printbuf_exit(&buf);
        return ret;
 }
 
-static void discard_one_bucket(struct bch_fs *c, struct bch_dev *ca, u64 b)
+static void bch2_do_discards_work(struct work_struct *work)
 {
-       if (ca->mi.discard &&
-           blk_queue_discard(bdev_get_queue(ca->disk_sb.bdev)))
-               blkdev_issue_discard(ca->disk_sb.bdev, bucket_to_sector(ca, b),
-                                    ca->mi.bucket_size, GFP_NOFS, 0);
+       struct bch_fs *c = container_of(work, struct bch_fs, discard_work);
+       struct discard_buckets_state s = {};
+       struct bpos discard_pos_done = POS_MAX;
+       int ret;
+
+       /*
+        * We're doing the commit in bch2_discard_one_bucket instead of using
+        * for_each_btree_key_commit() so that we can increment counters after
+        * successful commit:
+        */
+       ret = bch2_trans_run(c,
+               for_each_btree_key(trans, iter,
+                                  BTREE_ID_need_discard, POS_MIN, 0, k,
+                       bch2_discard_one_bucket(trans, &iter, &discard_pos_done, &s)));
+
+       discard_buckets_next_dev(c, &s, NULL);
+
+       trace_discard_buckets(c, s.seen, s.open, s.need_journal_commit, s.discarded,
+                             bch2_err_str(ret));
+
+       bch2_write_ref_put(c, BCH_WRITE_REF_discard);
 }
 
-static bool allocator_thread_running(struct bch_dev *ca)
+void bch2_do_discards(struct bch_fs *c)
 {
-       unsigned state = ca->mi.state == BCH_MEMBER_STATE_rw &&
-               test_bit(BCH_FS_ALLOCATOR_RUNNING, &ca->fs->flags)
-               ? ALLOCATOR_running
-               : ALLOCATOR_stopped;
-       alloc_thread_set_state(ca, state);
-       return state == ALLOCATOR_running;
+       if (bch2_write_ref_tryget(c, BCH_WRITE_REF_discard) &&
+           !queue_work(c->write_ref_wq, &c->discard_work))
+               bch2_write_ref_put(c, BCH_WRITE_REF_discard);
 }
 
-static int buckets_available(struct bch_dev *ca, unsigned long gc_count)
+static int invalidate_one_bucket(struct btree_trans *trans,
+                                struct btree_iter *lru_iter,
+                                struct bkey_s_c lru_k,
+                                s64 *nr_to_invalidate)
 {
-       s64 available = dev_buckets_reclaimable(ca) -
-               (gc_count == ca->fs->gc_count ? ca->inc_gen_really_needs_gc : 0);
-       bool ret = available > 0;
+       struct bch_fs *c = trans->c;
+       struct btree_iter alloc_iter = { NULL };
+       struct bkey_i_alloc_v4 *a = NULL;
+       struct printbuf buf = PRINTBUF;
+       struct bpos bucket = u64_to_bucket(lru_k.k->p.offset);
+       unsigned cached_sectors;
+       int ret = 0;
+
+       if (*nr_to_invalidate <= 0)
+               return 1;
+
+       if (!bch2_dev_bucket_exists(c, bucket)) {
+               prt_str(&buf, "lru entry points to invalid bucket");
+               goto err;
+       }
+
+       if (bch2_bucket_is_open_safe(c, bucket.inode, bucket.offset))
+               return 0;
+
+       a = bch2_trans_start_alloc_update(trans, &alloc_iter, bucket);
+       ret = PTR_ERR_OR_ZERO(a);
+       if (ret)
+               goto out;
+
+       /* We expect harmless races here due to the btree write buffer: */
+       if (lru_pos_time(lru_iter->pos) != alloc_lru_idx_read(a->v))
+               goto out;
+
+       BUG_ON(a->v.data_type != BCH_DATA_cached);
+
+       if (!a->v.cached_sectors)
+               bch_err(c, "invalidating empty bucket, confused");
+
+       cached_sectors = a->v.cached_sectors;
+
+       SET_BCH_ALLOC_V4_NEED_INC_GEN(&a->v, false);
+       a->v.gen++;
+       a->v.data_type          = 0;
+       a->v.dirty_sectors      = 0;
+       a->v.cached_sectors     = 0;
+       a->v.io_time[READ]      = atomic64_read(&c->io_clock[READ].now);
+       a->v.io_time[WRITE]     = atomic64_read(&c->io_clock[WRITE].now);
+
+       ret =   bch2_trans_update(trans, &alloc_iter, &a->k_i,
+                               BTREE_TRIGGER_BUCKET_INVALIDATE) ?:
+               bch2_trans_commit(trans, NULL, NULL,
+                                 BCH_WATERMARK_btree|
+                                 BCH_TRANS_COMMIT_no_enospc);
+       if (ret)
+               goto out;
 
-       alloc_thread_set_state(ca, ret
-                              ? ALLOCATOR_running
-                              : ALLOCATOR_blocked);
+       trace_and_count(c, bucket_invalidate, c, bucket.inode, bucket.offset, cached_sectors);
+       --*nr_to_invalidate;
+out:
+       bch2_trans_iter_exit(trans, &alloc_iter);
+       printbuf_exit(&buf);
        return ret;
+err:
+       prt_str(&buf, "\n  lru key: ");
+       bch2_bkey_val_to_text(&buf, c, lru_k);
+
+       prt_str(&buf, "\n  lru entry: ");
+       bch2_lru_pos_to_text(&buf, lru_iter->pos);
+
+       prt_str(&buf, "\n  alloc key: ");
+       if (!a)
+               bch2_bpos_to_text(&buf, bucket);
+       else
+               bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&a->k_i));
+
+       bch_err(c, "%s", buf.buf);
+       if (c->curr_recovery_pass > BCH_RECOVERY_PASS_check_lrus) {
+               bch2_inconsistent_error(c);
+               ret = -EINVAL;
+       }
+
+       goto out;
 }
 
-/**
- * bch_allocator_thread - move buckets from free_inc to reserves
- *
- * The free_inc FIFO is populated by find_reclaimable_buckets(), and
- * the reserves are depleted by bucket allocation. When we run out
- * of free_inc, try to invalidate some buckets and write out
- * prios and gens.
- */
-static int bch2_allocator_thread(void *arg)
+static void bch2_do_invalidates_work(struct work_struct *work)
 {
-       struct bch_dev *ca = arg;
-       struct bch_fs *c = ca->fs;
-       unsigned long gc_count = c->gc_count;
-       size_t nr;
+       struct bch_fs *c = container_of(work, struct bch_fs, invalidate_work);
+       struct btree_trans *trans = bch2_trans_get(c);
+       int ret = 0;
+
+       ret = bch2_btree_write_buffer_tryflush(trans);
+       if (ret)
+               goto err;
+
+       for_each_member_device(c, ca) {
+               s64 nr_to_invalidate =
+                       should_invalidate_buckets(ca, bch2_dev_usage_read(ca));
+
+               ret = for_each_btree_key_upto(trans, iter, BTREE_ID_lru,
+                               lru_pos(ca->dev_idx, 0, 0),
+                               lru_pos(ca->dev_idx, U64_MAX, LRU_TIME_MAX),
+                               BTREE_ITER_INTENT, k,
+                       invalidate_one_bucket(trans, &iter, k, &nr_to_invalidate));
+
+               if (ret < 0) {
+                       percpu_ref_put(&ca->ref);
+                       break;
+               }
+       }
+err:
+       bch2_trans_put(trans);
+       bch2_write_ref_put(c, BCH_WRITE_REF_invalidate);
+}
+
+void bch2_do_invalidates(struct bch_fs *c)
+{
+       if (bch2_write_ref_tryget(c, BCH_WRITE_REF_invalidate) &&
+           !queue_work(c->write_ref_wq, &c->invalidate_work))
+               bch2_write_ref_put(c, BCH_WRITE_REF_invalidate);
+}
+
+int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca,
+                           u64 bucket_start, u64 bucket_end)
+{
+       struct btree_trans *trans = bch2_trans_get(c);
+       struct btree_iter iter;
+       struct bkey_s_c k;
+       struct bkey hole;
+       struct bpos end = POS(ca->dev_idx, bucket_end);
+       struct bch_member *m;
+       unsigned long last_updated = jiffies;
        int ret;
 
-       set_freezable();
+       BUG_ON(bucket_start > bucket_end);
+       BUG_ON(bucket_end > ca->mi.nbuckets);
 
+       bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
+               POS(ca->dev_idx, max_t(u64, ca->mi.first_bucket, bucket_start)),
+               BTREE_ITER_PREFETCH);
+       /*
+        * Scan the alloc btree for every bucket on @ca, and add buckets to the
+        * freespace/need_discard/need_gc_gens btrees as needed:
+        */
        while (1) {
-               ret = kthread_wait_freezable(allocator_thread_running(ca));
+               if (last_updated + HZ * 10 < jiffies) {
+                       bch_info(ca, "%s: currently at %llu/%llu",
+                                __func__, iter.pos.offset, ca->mi.nbuckets);
+                       last_updated = jiffies;
+               }
+
+               bch2_trans_begin(trans);
+
+               if (bkey_ge(iter.pos, end)) {
+                       ret = 0;
+                       break;
+               }
+
+               k = bch2_get_key_or_hole(&iter, end, &hole);
+               ret = bkey_err(k);
                if (ret)
-                       goto stop;
+                       goto bkey_err;
+
+               if (k.k->type) {
+                       /*
+                        * We process live keys in the alloc btree one at a
+                        * time:
+                        */
+                       struct bch_alloc_v4 a_convert;
+                       const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &a_convert);
+
+                       ret =   bch2_bucket_do_index(trans, k, a, true) ?:
+                               bch2_trans_commit(trans, NULL, NULL,
+                                                 BCH_TRANS_COMMIT_no_enospc);
+                       if (ret)
+                               goto bkey_err;
 
-               while (!ca->alloc_heap.used) {
-                       cond_resched();
+                       bch2_btree_iter_advance(&iter);
+               } else {
+                       struct bkey_i *freespace;
 
-                       ret = kthread_wait_freezable(buckets_available(ca, gc_count));
+                       freespace = bch2_trans_kmalloc(trans, sizeof(*freespace));
+                       ret = PTR_ERR_OR_ZERO(freespace);
                        if (ret)
-                               goto stop;
+                               goto bkey_err;
 
-                       gc_count = c->gc_count;
-                       nr = find_reclaimable_buckets(c, ca);
+                       bkey_init(&freespace->k);
+                       freespace->k.type       = KEY_TYPE_set;
+                       freespace->k.p          = k.k->p;
+                       freespace->k.size       = k.k->size;
 
-                       trace_alloc_scan(ca, nr, ca->inc_gen_needs_gc,
-                                        ca->inc_gen_really_needs_gc);
+                       ret = bch2_btree_insert_trans(trans, BTREE_ID_freespace, freespace, 0) ?:
+                               bch2_trans_commit(trans, NULL, NULL,
+                                                 BCH_TRANS_COMMIT_no_enospc);
+                       if (ret)
+                               goto bkey_err;
 
-                       if ((ca->inc_gen_needs_gc >= ALLOC_SCAN_BATCH(ca) ||
-                            ca->inc_gen_really_needs_gc) &&
-                           c->gc_thread) {
-                               atomic_inc(&c->kick_gc);
-                               wake_up_process(c->gc_thread);
-                       }
+                       bch2_btree_iter_set_pos(&iter, k.k->p);
                }
-
-               ret = bch2_invalidate_buckets(c, ca);
+bkey_err:
+               if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+                       continue;
                if (ret)
-                       goto stop;
+                       break;
+       }
+
+       bch2_trans_iter_exit(trans, &iter);
+       bch2_trans_put(trans);
 
-               while (!fifo_empty(&ca->free_inc)) {
-                       u64 b = fifo_peek(&ca->free_inc);
+       if (ret < 0) {
+               bch_err_msg(ca, ret, "initializing free space");
+               return ret;
+       }
 
-                       discard_one_bucket(c, ca, b);
+       mutex_lock(&c->sb_lock);
+       m = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx);
+       SET_BCH_MEMBER_FREESPACE_INITIALIZED(m, true);
+       mutex_unlock(&c->sb_lock);
 
-                       ret = kthread_wait_freezable(push_invalidated_bucket(c, ca, b));
-                       if (ret)
-                               goto stop;
+       return 0;
+}
+
+int bch2_fs_freespace_init(struct bch_fs *c)
+{
+       int ret = 0;
+       bool doing_init = false;
+
+       /*
+        * We can crash during the device add path, so we need to check this on
+        * every mount:
+        */
+
+       for_each_member_device(c, ca) {
+               if (ca->mi.freespace_initialized)
+                       continue;
+
+               if (!doing_init) {
+                       bch_info(c, "initializing freespace");
+                       doing_init = true;
+               }
+
+               ret = bch2_dev_freespace_init(c, ca, 0, ca->mi.nbuckets);
+               if (ret) {
+                       percpu_ref_put(&ca->ref);
+                       bch_err_fn(c, ret);
+                       return ret;
                }
        }
-stop:
-       alloc_thread_set_state(ca, ALLOCATOR_stopped);
+
+       if (doing_init) {
+               mutex_lock(&c->sb_lock);
+               bch2_write_super(c);
+               mutex_unlock(&c->sb_lock);
+               bch_verbose(c, "done initializing freespace");
+       }
+
        return 0;
 }
 
+/* Bucket IO clocks: */
+
+int bch2_bucket_io_time_reset(struct btree_trans *trans, unsigned dev,
+                             size_t bucket_nr, int rw)
+{
+       struct bch_fs *c = trans->c;
+       struct btree_iter iter;
+       struct bkey_i_alloc_v4 *a;
+       u64 now;
+       int ret = 0;
+
+       a = bch2_trans_start_alloc_update(trans, &iter,  POS(dev, bucket_nr));
+       ret = PTR_ERR_OR_ZERO(a);
+       if (ret)
+               return ret;
+
+       now = atomic64_read(&c->io_clock[rw].now);
+       if (a->v.io_time[rw] == now)
+               goto out;
+
+       a->v.io_time[rw] = now;
+
+       ret   = bch2_trans_update(trans, &iter, &a->k_i, 0) ?:
+               bch2_trans_commit(trans, NULL, NULL, 0);
+out:
+       bch2_trans_iter_exit(trans, &iter);
+       return ret;
+}
+
 /* Startup/shutdown (ro/rw): */
 
 void bch2_recalc_capacity(struct bch_fs *c)
 {
-       struct bch_dev *ca;
        u64 capacity = 0, reserved_sectors = 0, gc_reserve;
        unsigned bucket_size_max = 0;
        unsigned long ra_pages = 0;
-       unsigned i, j;
 
        lockdep_assert_held(&c->state_lock);
 
-       for_each_online_member(ca, c, i) {
+       for_each_online_member(c, ca) {
                struct backing_dev_info *bdi = ca->disk_sb.bdev->bd_disk->bdi;
 
                ra_pages += bdi->ra_pages;
@@ -1025,7 +2089,7 @@ void bch2_recalc_capacity(struct bch_fs *c)
 
        bch2_set_ra_pages(c, ra_pages);
 
-       for_each_rw_member(ca, c, i) {
+       for_each_rw_member(c, ca) {
                u64 dev_reserve = 0;
 
                /*
@@ -1044,8 +2108,9 @@ void bch2_recalc_capacity(struct bch_fs *c)
                 * allocations for foreground writes must wait -
                 * not -ENOSPC calculations.
                 */
-               for (j = 0; j < RESERVE_NONE; j++)
-                       dev_reserve += ca->free[j].size;
+
+               dev_reserve += ca->nr_btree_reserve * 2;
+               dev_reserve += ca->mi.nbuckets >> 6; /* copygc reserve */
 
                dev_reserve += 1;       /* btree write point */
                dev_reserve += 1;       /* copygc write point */
@@ -1078,6 +2143,15 @@ void bch2_recalc_capacity(struct bch_fs *c)
        closure_wake_up(&c->freelist_wait);
 }
 
+u64 bch2_min_rw_member_capacity(struct bch_fs *c)
+{
+       u64 ret = U64_MAX;
+
+       for_each_rw_member(c, ca)
+               ret = min(ret, ca->mi.nbuckets * ca->mi.bucket_size);
+       return ret;
+}
+
 static bool bch2_dev_has_open_write_point(struct bch_fs *c, struct bch_dev *ca)
 {
        struct open_bucket *ob;
@@ -1088,7 +2162,7 @@ static bool bch2_dev_has_open_write_point(struct bch_fs *c, struct bch_dev *ca)
             ob++) {
                spin_lock(&ob->lock);
                if (ob->valid && !ob->on_partial_list &&
-                   ob->ptr.dev == ca->dev_idx)
+                   ob->dev == ca->dev_idx)
                        ret = true;
                spin_unlock(&ob->lock);
        }
@@ -1101,8 +2175,6 @@ void bch2_dev_allocator_remove(struct bch_fs *c, struct bch_dev *ca)
 {
        unsigned i;
 
-       BUG_ON(ca->alloc_thread);
-
        /* First, remove device from allocation groups: */
 
        for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
@@ -1113,40 +2185,7 @@ void bch2_dev_allocator_remove(struct bch_fs *c, struct bch_dev *ca)
         */
        bch2_recalc_capacity(c);
 
-       /* Next, close write points that point to this device... */
-       for (i = 0; i < ARRAY_SIZE(c->write_points); i++)
-               bch2_writepoint_stop(c, ca, &c->write_points[i]);
-
-       bch2_writepoint_stop(c, ca, &c->copygc_write_point);
-       bch2_writepoint_stop(c, ca, &c->rebalance_write_point);
-       bch2_writepoint_stop(c, ca, &c->btree_write_point);
-
-       mutex_lock(&c->btree_reserve_cache_lock);
-       while (c->btree_reserve_cache_nr) {
-               struct btree_alloc *a =
-                       &c->btree_reserve_cache[--c->btree_reserve_cache_nr];
-
-               bch2_open_buckets_put(c, &a->ob);
-       }
-       mutex_unlock(&c->btree_reserve_cache_lock);
-
-       while (1) {
-               struct open_bucket *ob;
-
-               spin_lock(&c->freelist_lock);
-               if (!ca->open_buckets_partial_nr) {
-                       spin_unlock(&c->freelist_lock);
-                       break;
-               }
-               ob = c->open_buckets +
-                       ca->open_buckets_partial[--ca->open_buckets_partial_nr];
-               ob->on_partial_list = false;
-               spin_unlock(&c->freelist_lock);
-
-               bch2_open_bucket_put(c, ob);
-       }
-
-       bch2_ec_stop_dev(c, ca);
+       bch2_open_buckets_stop(c, ca, false);
 
        /*
         * Wake up threads that were blocked on allocation, so they can notice
@@ -1176,81 +2215,9 @@ void bch2_dev_allocator_add(struct bch_fs *c, struct bch_dev *ca)
                        set_bit(ca->dev_idx, c->rw_devs[i].d);
 }
 
-void bch2_dev_allocator_quiesce(struct bch_fs *c, struct bch_dev *ca)
-{
-       if (ca->alloc_thread)
-               closure_wait_event(&c->freelist_wait,
-                                  ca->allocator_state != ALLOCATOR_running);
-}
-
-/* stop allocator thread: */
-void bch2_dev_allocator_stop(struct bch_dev *ca)
-{
-       struct task_struct *p;
-
-       p = rcu_dereference_protected(ca->alloc_thread, 1);
-       ca->alloc_thread = NULL;
-
-       /*
-        * We need an rcu barrier between setting ca->alloc_thread = NULL and
-        * the thread shutting down to avoid bch2_wake_allocator() racing:
-        *
-        * XXX: it would be better to have the rcu barrier be asynchronous
-        * instead of blocking us here
-        */
-       synchronize_rcu();
-
-       if (p) {
-               kthread_stop(p);
-               put_task_struct(p);
-       }
-}
-
-/* start allocator thread: */
-int bch2_dev_allocator_start(struct bch_dev *ca)
-{
-       struct task_struct *p;
-
-       /*
-        * allocator thread already started?
-        */
-       if (ca->alloc_thread)
-               return 0;
-
-       p = kthread_create(bch2_allocator_thread, ca,
-                          "bch-alloc/%s", ca->name);
-       if (IS_ERR(p)) {
-               bch_err(ca->fs, "error creating allocator thread: %li",
-                       PTR_ERR(p));
-               return PTR_ERR(p);
-       }
-
-       get_task_struct(p);
-       rcu_assign_pointer(ca->alloc_thread, p);
-       wake_up_process(p);
-       return 0;
-}
-
 void bch2_fs_allocator_background_init(struct bch_fs *c)
 {
        spin_lock_init(&c->freelist_lock);
-}
-
-void bch2_open_buckets_to_text(struct printbuf *out, struct bch_fs *c)
-{
-       struct open_bucket *ob;
-
-       for (ob = c->open_buckets;
-            ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
-            ob++) {
-               spin_lock(&ob->lock);
-               if (ob->valid && !ob->on_partial_list) {
-                       pr_buf(out, "%zu ref %u type %s\n",
-                              ob - c->open_buckets,
-                              atomic_read(&ob->pin),
-                              bch2_data_types[ob->type]);
-               }
-               spin_unlock(&ob->lock);
-       }
-
+       INIT_WORK(&c->discard_work, bch2_do_discards_work);
+       INIT_WORK(&c->invalidate_work, bch2_do_invalidates_work);
 }