]> git.sesse.net Git - bcachefs-tools-debian/blobdiff - libbcachefs/alloc_background.c
Move c_src dirs back to toplevel
[bcachefs-tools-debian] / libbcachefs / alloc_background.c
index 4b38fbd9d913758242f082b3654100cf3bed0294..a09b9d00226a4e1dd510c0c097ac59e7cb7d3c77 100644 (file)
@@ -9,6 +9,7 @@
 #include "btree_update.h"
 #include "btree_update_interior.h"
 #include "btree_gc.h"
+#include "btree_write_buffer.h"
 #include "buckets.h"
 #include "buckets_waiting_for_journal.h"
 #include "clock.h"
@@ -17,6 +18,7 @@
 #include "error.h"
 #include "lru.h"
 #include "recovery.h"
+#include "trace.h"
 #include "varint.h"
 
 #include <linux/kthread.h>
@@ -26,7 +28,6 @@
 #include <linux/rcupdate.h>
 #include <linux/sched/task.h>
 #include <linux/sort.h>
-#include <trace/events/bcachefs.h>
 
 /* Persistent alloc info: */
 
@@ -78,36 +79,6 @@ static inline u64 alloc_field_v1_get(const struct bch_alloc *a,
        return v;
 }
 
-static inline void alloc_field_v1_put(struct bkey_i_alloc *a, void **p,
-                                     unsigned field, u64 v)
-{
-       unsigned bytes = BCH_ALLOC_V1_FIELD_BYTES[field];
-
-       if (!v)
-               return;
-
-       a->v.fields |= 1 << field;
-
-       switch (bytes) {
-       case 1:
-               *((u8 *) *p) = v;
-               break;
-       case 2:
-               *((__le16 *) *p) = cpu_to_le16(v);
-               break;
-       case 4:
-               *((__le32 *) *p) = cpu_to_le32(v);
-               break;
-       case 8:
-               *((__le64 *) *p) = cpu_to_le64(v);
-               break;
-       default:
-               BUG();
-       }
-
-       *p += bytes;
-}
-
 static void bch2_alloc_unpack_v1(struct bkey_alloc_unpacked *out,
                                 struct bkey_s_c k)
 {
@@ -210,31 +181,6 @@ static struct bkey_alloc_unpacked bch2_alloc_unpack(struct bkey_s_c k)
        return ret;
 }
 
-struct bkey_i_alloc_v4 *
-bch2_trans_start_alloc_update(struct btree_trans *trans, struct btree_iter *iter,
-                             struct bpos pos)
-{
-       struct bkey_s_c k;
-       struct bkey_i_alloc_v4 *a;
-       int ret;
-
-       bch2_trans_iter_init(trans, iter, BTREE_ID_alloc, pos,
-                            BTREE_ITER_WITH_UPDATES|
-                            BTREE_ITER_CACHED|
-                            BTREE_ITER_INTENT);
-       k = bch2_btree_iter_peek_slot(iter);
-       ret = bkey_err(k);
-       if (ret) {
-               bch2_trans_iter_exit(trans, iter);
-               return ERR_PTR(ret);
-       }
-
-       a = bch2_alloc_to_v4_mut(trans, k);
-       if (IS_ERR(a))
-               bch2_trans_iter_exit(trans, iter);
-       return a;
-}
-
 static unsigned bch_alloc_v1_val_u64s(const struct bch_alloc *a)
 {
        unsigned i, bytes = offsetof(struct bch_alloc, data);
@@ -246,127 +192,106 @@ static unsigned bch_alloc_v1_val_u64s(const struct bch_alloc *a)
        return DIV_ROUND_UP(bytes, sizeof(u64));
 }
 
-int bch2_alloc_v1_invalid(const struct bch_fs *c, struct bkey_s_c k,
-                         int rw, struct printbuf *err)
+int bch2_alloc_v1_invalid(struct bch_fs *c, struct bkey_s_c k,
+                         enum bkey_invalid_flags flags,
+                         struct printbuf *err)
 {
        struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k);
+       int ret = 0;
 
        /* allow for unknown fields */
-       if (bkey_val_u64s(a.k) < bch_alloc_v1_val_u64s(a.v)) {
-               prt_printf(err, "incorrect value size (%zu < %u)",
-                      bkey_val_u64s(a.k), bch_alloc_v1_val_u64s(a.v));
-               return -EINVAL;
-       }
-
-       return 0;
+       bkey_fsck_err_on(bkey_val_u64s(a.k) < bch_alloc_v1_val_u64s(a.v), c, err,
+                        alloc_v1_val_size_bad,
+                        "incorrect value size (%zu < %u)",
+                        bkey_val_u64s(a.k), bch_alloc_v1_val_u64s(a.v));
+fsck_err:
+       return ret;
 }
 
-int bch2_alloc_v2_invalid(const struct bch_fs *c, struct bkey_s_c k,
-                         int rw, struct printbuf *err)
+int bch2_alloc_v2_invalid(struct bch_fs *c, struct bkey_s_c k,
+                         enum bkey_invalid_flags flags,
+                         struct printbuf *err)
 {
        struct bkey_alloc_unpacked u;
+       int ret = 0;
 
-       if (bch2_alloc_unpack_v2(&u, k)) {
-               prt_printf(err, "unpack error");
-               return -EINVAL;
-       }
-
-       return 0;
+       bkey_fsck_err_on(bch2_alloc_unpack_v2(&u, k), c, err,
+                        alloc_v2_unpack_error,
+                        "unpack error");
+fsck_err:
+       return ret;
 }
 
-int bch2_alloc_v3_invalid(const struct bch_fs *c, struct bkey_s_c k,
-                         int rw, struct printbuf *err)
+int bch2_alloc_v3_invalid(struct bch_fs *c, struct bkey_s_c k,
+                         enum bkey_invalid_flags flags,
+                         struct printbuf *err)
 {
        struct bkey_alloc_unpacked u;
+       int ret = 0;
 
-       if (bch2_alloc_unpack_v3(&u, k)) {
-               prt_printf(err, "unpack error");
-               return -EINVAL;
-       }
-
-       return 0;
+       bkey_fsck_err_on(bch2_alloc_unpack_v3(&u, k), c, err,
+                        alloc_v2_unpack_error,
+                        "unpack error");
+fsck_err:
+       return ret;
 }
 
-int bch2_alloc_v4_invalid(const struct bch_fs *c, struct bkey_s_c k,
-                         int rw, struct printbuf *err)
+int bch2_alloc_v4_invalid(struct bch_fs *c, struct bkey_s_c k,
+                         enum bkey_invalid_flags flags, struct printbuf *err)
 {
        struct bkey_s_c_alloc_v4 a = bkey_s_c_to_alloc_v4(k);
+       int ret = 0;
 
-       if (alloc_v4_u64s(a.v) != bkey_val_u64s(k.k)) {
-               prt_printf(err, "bad val size (%lu != %u)",
-                      bkey_val_u64s(k.k), alloc_v4_u64s(a.v));
-               return -EINVAL;
-       }
-
-       if (!BCH_ALLOC_V4_BACKPOINTERS_START(a.v) &&
-           BCH_ALLOC_V4_NR_BACKPOINTERS(a.v)) {
-               prt_printf(err, "invalid backpointers_start");
-               return -EINVAL;
-       }
+       bkey_fsck_err_on(alloc_v4_u64s(a.v) > bkey_val_u64s(k.k), c, err,
+                        alloc_v4_val_size_bad,
+                        "bad val size (%u > %zu)",
+                        alloc_v4_u64s(a.v), bkey_val_u64s(k.k));
 
-       if (rw == WRITE) {
-               if (alloc_data_type(*a.v, a.v->data_type) != a.v->data_type) {
-                       prt_printf(err, "invalid data type (got %u should be %u)",
-                              a.v->data_type, alloc_data_type(*a.v, a.v->data_type));
-                       return -EINVAL;
-               }
+       bkey_fsck_err_on(!BCH_ALLOC_V4_BACKPOINTERS_START(a.v) &&
+                        BCH_ALLOC_V4_NR_BACKPOINTERS(a.v), c, err,
+                        alloc_v4_backpointers_start_bad,
+                        "invalid backpointers_start");
 
-               switch (a.v->data_type) {
-               case BCH_DATA_free:
-               case BCH_DATA_need_gc_gens:
-               case BCH_DATA_need_discard:
-                       if (a.v->dirty_sectors ||
-                           a.v->cached_sectors ||
-                           a.v->stripe) {
-                               prt_printf(err, "empty data type free but have data");
-                               return -EINVAL;
-                       }
-                       break;
-               case BCH_DATA_sb:
-               case BCH_DATA_journal:
-               case BCH_DATA_btree:
-               case BCH_DATA_user:
-               case BCH_DATA_parity:
-                       if (!a.v->dirty_sectors) {
-                               prt_printf(err, "data_type %s but dirty_sectors==0",
-                                      bch2_data_types[a.v->data_type]);
-                               return -EINVAL;
-                       }
-                       break;
-               case BCH_DATA_cached:
-                       if (!a.v->cached_sectors ||
-                           a.v->dirty_sectors ||
-                           a.v->stripe) {
-                               prt_printf(err, "data type inconsistency");
-                               return -EINVAL;
-                       }
+       bkey_fsck_err_on(alloc_data_type(*a.v, a.v->data_type) != a.v->data_type, c, err,
+                        alloc_key_data_type_bad,
+                        "invalid data type (got %u should be %u)",
+                        a.v->data_type, alloc_data_type(*a.v, a.v->data_type));
 
-                       if (!a.v->io_time[READ] &&
-                           test_bit(BCH_FS_CHECK_ALLOC_TO_LRU_REFS_DONE, &c->flags)) {
-                               prt_printf(err, "cached bucket with read_time == 0");
-                               return -EINVAL;
-                       }
-                       break;
-               case BCH_DATA_stripe:
-                       if (!a.v->stripe) {
-                               prt_printf(err, "data_type %s but stripe==0",
-                                      bch2_data_types[a.v->data_type]);
-                               return -EINVAL;
-                       }
-                       break;
-               }
+       switch (a.v->data_type) {
+       case BCH_DATA_free:
+       case BCH_DATA_need_gc_gens:
+       case BCH_DATA_need_discard:
+               bkey_fsck_err_on(bch2_bucket_sectors(*a.v) || a.v->stripe,
+                                c, err, alloc_key_empty_but_have_data,
+                                "empty data type free but have data");
+               break;
+       case BCH_DATA_sb:
+       case BCH_DATA_journal:
+       case BCH_DATA_btree:
+       case BCH_DATA_user:
+       case BCH_DATA_parity:
+               bkey_fsck_err_on(!bch2_bucket_sectors_dirty(*a.v),
+                                c, err, alloc_key_dirty_sectors_0,
+                                "data_type %s but dirty_sectors==0",
+                                bch2_data_types[a.v->data_type]);
+               break;
+       case BCH_DATA_cached:
+               bkey_fsck_err_on(!a.v->cached_sectors ||
+                                bch2_bucket_sectors_dirty(*a.v) ||
+                                a.v->stripe,
+                                c, err, alloc_key_cached_inconsistency,
+                                "data type inconsistency");
+
+               bkey_fsck_err_on(!a.v->io_time[READ] &&
+                                c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_to_lru_refs,
+                                c, err, alloc_key_cached_but_read_time_zero,
+                                "cached bucket with read_time == 0");
+               break;
+       case BCH_DATA_stripe:
+               break;
        }
-
-       return 0;
-}
-
-static inline u64 swab40(u64 x)
-{
-       return (((x & 0x00000000ffULL) << 32)|
-               ((x & 0x000000ff00ULL) << 16)|
-               ((x & 0x0000ff0000ULL) >>  0)|
-               ((x & 0x00ff000000ULL) >> 16)|
-               ((x & 0xff00000000ULL) >> 32));
+fsck_err:
+       return ret;
 }
 
 void bch2_alloc_v4_swab(struct bkey_s k)
@@ -382,6 +307,7 @@ void bch2_alloc_v4_swab(struct bkey_s k)
        a->io_time[1]           = swab64(a->io_time[1]);
        a->stripe               = swab32(a->stripe);
        a->nr_external_backpointers = swab32(a->nr_external_backpointers);
+       a->fragmentation_lru    = swab64(a->fragmentation_lru);
 
        bps = alloc_v4_backpointers(a);
        for (bp = bps; bp < bps + BCH_ALLOC_V4_NR_BACKPOINTERS(a); bp++) {
@@ -394,20 +320,17 @@ void bch2_alloc_v4_swab(struct bkey_s k)
 void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
 {
        struct bch_alloc_v4 _a;
-       const struct bch_alloc_v4 *a = &_a;
-       const struct bch_backpointer *bps;
+       const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &_a);
        unsigned i;
 
-       if (k.k->type == KEY_TYPE_alloc_v4)
-               a = bkey_s_c_to_alloc_v4(k).v;
-       else
-               bch2_alloc_to_v4(k, &_a);
-
        prt_newline(out);
        printbuf_indent_add(out, 2);
 
        prt_printf(out, "gen %u oldest_gen %u data_type %s",
-              a->gen, a->oldest_gen, bch2_data_types[a->data_type]);
+              a->gen, a->oldest_gen,
+              a->data_type < BCH_DATA_NR
+              ? bch2_data_types[a->data_type]
+              : "(invalid data type)");
        prt_newline(out);
        prt_printf(out, "journal_seq       %llu",       a->journal_seq);
        prt_newline(out);
@@ -427,33 +350,44 @@ void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c
        prt_newline(out);
        prt_printf(out, "io_time[WRITE]    %llu",       a->io_time[WRITE]);
        prt_newline(out);
-       prt_printf(out, "backpointers:     %llu",       BCH_ALLOC_V4_NR_BACKPOINTERS(a));
-       printbuf_indent_add(out, 2);
+       prt_printf(out, "fragmentation     %llu",       a->fragmentation_lru);
+       prt_newline(out);
+       prt_printf(out, "bp_start          %llu", BCH_ALLOC_V4_BACKPOINTERS_START(a));
+       prt_newline(out);
+
+       if (BCH_ALLOC_V4_NR_BACKPOINTERS(a)) {
+               struct bkey_s_c_alloc_v4 a_raw = bkey_s_c_to_alloc_v4(k);
+               const struct bch_backpointer *bps = alloc_v4_backpointers_c(a_raw.v);
+
+               prt_printf(out, "backpointers:     %llu", BCH_ALLOC_V4_NR_BACKPOINTERS(a_raw.v));
+               printbuf_indent_add(out, 2);
 
-       bps = alloc_v4_backpointers_c(a);
-       for (i = 0; i < BCH_ALLOC_V4_NR_BACKPOINTERS(a); i++) {
-               prt_newline(out);
-               bch2_backpointer_to_text(out, &bps[i]);
+               for (i = 0; i < BCH_ALLOC_V4_NR_BACKPOINTERS(a_raw.v); i++) {
+                       prt_newline(out);
+                       bch2_backpointer_to_text(out, &bps[i]);
+               }
+
+               printbuf_indent_sub(out, 2);
        }
 
-       printbuf_indent_sub(out, 4);
+       printbuf_indent_sub(out, 2);
 }
 
-void bch2_alloc_to_v4(struct bkey_s_c k, struct bch_alloc_v4 *out)
+void __bch2_alloc_to_v4(struct bkey_s_c k, struct bch_alloc_v4 *out)
 {
        if (k.k->type == KEY_TYPE_alloc_v4) {
-               int d;
+               void *src, *dst;
 
                *out = *bkey_s_c_to_alloc_v4(k).v;
 
-               d = (int) BCH_ALLOC_V4_U64s -
-                       (int) (BCH_ALLOC_V4_BACKPOINTERS_START(out) ?: BCH_ALLOC_V4_U64s_V0);
-               if (unlikely(d > 0)) {
-                       memset((u64 *) out + BCH_ALLOC_V4_BACKPOINTERS_START(out),
-                              0,
-                              d * sizeof(u64));
-                       SET_BCH_ALLOC_V4_BACKPOINTERS_START(out, BCH_ALLOC_V4_U64s);
-               }
+               src = alloc_v4_backpointers(out);
+               SET_BCH_ALLOC_V4_BACKPOINTERS_START(out, BCH_ALLOC_V4_U64s);
+               dst = alloc_v4_backpointers(out);
+
+               if (src < dst)
+                       memset(src, 0, dst - src);
+
+               SET_BCH_ALLOC_V4_NR_BACKPOINTERS(out, 0);
        } else {
                struct bkey_alloc_unpacked u = bch2_alloc_unpack(k);
 
@@ -475,36 +409,29 @@ void bch2_alloc_to_v4(struct bkey_s_c k, struct bch_alloc_v4 *out)
        }
 }
 
-struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut(struct btree_trans *trans, struct bkey_s_c k)
+static noinline struct bkey_i_alloc_v4 *
+__bch2_alloc_to_v4_mut(struct btree_trans *trans, struct bkey_s_c k)
 {
-       unsigned bytes = k.k->type == KEY_TYPE_alloc_v4
-               ? bkey_bytes(k.k)
-               : sizeof(struct bkey_i_alloc_v4);
        struct bkey_i_alloc_v4 *ret;
 
-       /*
-        * Reserve space for one more backpointer here:
-        * Not sketchy at doing it this way, nope...
-        */
-       ret = bch2_trans_kmalloc(trans, bytes + sizeof(struct bch_backpointer));
+       ret = bch2_trans_kmalloc(trans, max(bkey_bytes(k.k), sizeof(struct bkey_i_alloc_v4)));
        if (IS_ERR(ret))
                return ret;
 
        if (k.k->type == KEY_TYPE_alloc_v4) {
-               bkey_reassemble(&ret->k_i, k);
+               void *src, *dst;
 
-               if (BCH_ALLOC_V4_BACKPOINTERS_START(&ret->v) < BCH_ALLOC_V4_U64s) {
-                       struct bch_backpointer *src, *dst;
+               bkey_reassemble(&ret->k_i, k);
 
-                       src = alloc_v4_backpointers(&ret->v);
-                       SET_BCH_ALLOC_V4_BACKPOINTERS_START(&ret->v, BCH_ALLOC_V4_U64s);
-                       dst = alloc_v4_backpointers(&ret->v);
+               src = alloc_v4_backpointers(&ret->v);
+               SET_BCH_ALLOC_V4_BACKPOINTERS_START(&ret->v, BCH_ALLOC_V4_U64s);
+               dst = alloc_v4_backpointers(&ret->v);
 
-                       memmove(dst, src, BCH_ALLOC_V4_NR_BACKPOINTERS(&ret->v) *
-                               sizeof(struct bch_backpointer));
+               if (src < dst)
                        memset(src, 0, dst - src);
-                       set_alloc_v4_u64s(ret);
-               }
+
+               SET_BCH_ALLOC_V4_NR_BACKPOINTERS(&ret->v, 0);
+               set_alloc_v4_u64s(ret);
        } else {
                bkey_alloc_v4_init(&ret->k_i);
                ret->k.p = k.k->p;
@@ -513,19 +440,106 @@ struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut(struct btree_trans *trans, struct b
        return ret;
 }
 
-int bch2_alloc_read(struct bch_fs *c)
+static inline struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut_inlined(struct btree_trans *trans, struct bkey_s_c k)
+{
+       struct bkey_s_c_alloc_v4 a;
+
+       if (likely(k.k->type == KEY_TYPE_alloc_v4) &&
+           ((a = bkey_s_c_to_alloc_v4(k), true) &&
+            BCH_ALLOC_V4_NR_BACKPOINTERS(a.v) == 0))
+               return bch2_bkey_make_mut_noupdate_typed(trans, k, alloc_v4);
+
+       return __bch2_alloc_to_v4_mut(trans, k);
+}
+
+struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut(struct btree_trans *trans, struct bkey_s_c k)
+{
+       return bch2_alloc_to_v4_mut_inlined(trans, k);
+}
+
+struct bkey_i_alloc_v4 *
+bch2_trans_start_alloc_update(struct btree_trans *trans, struct btree_iter *iter,
+                             struct bpos pos)
 {
-       struct btree_trans trans;
-       struct btree_iter iter;
        struct bkey_s_c k;
-       struct bch_alloc_v4 a;
-       struct bch_dev *ca;
+       struct bkey_i_alloc_v4 *a;
        int ret;
 
-       bch2_trans_init(&trans, c, 0, 0);
+       k = bch2_bkey_get_iter(trans, iter, BTREE_ID_alloc, pos,
+                            BTREE_ITER_WITH_UPDATES|
+                            BTREE_ITER_CACHED|
+                            BTREE_ITER_INTENT);
+       ret = bkey_err(k);
+       if (unlikely(ret))
+               return ERR_PTR(ret);
+
+       a = bch2_alloc_to_v4_mut_inlined(trans, k);
+       ret = PTR_ERR_OR_ZERO(a);
+       if (unlikely(ret))
+               goto err;
+       return a;
+err:
+       bch2_trans_iter_exit(trans, iter);
+       return ERR_PTR(ret);
+}
+
+static struct bpos alloc_gens_pos(struct bpos pos, unsigned *offset)
+{
+       *offset = pos.offset & KEY_TYPE_BUCKET_GENS_MASK;
+
+       pos.offset >>= KEY_TYPE_BUCKET_GENS_BITS;
+       return pos;
+}
+
+static struct bpos bucket_gens_pos_to_alloc(struct bpos pos, unsigned offset)
+{
+       pos.offset <<= KEY_TYPE_BUCKET_GENS_BITS;
+       pos.offset += offset;
+       return pos;
+}
+
+static unsigned alloc_gen(struct bkey_s_c k, unsigned offset)
+{
+       return k.k->type == KEY_TYPE_bucket_gens
+               ? bkey_s_c_to_bucket_gens(k).v->gens[offset]
+               : 0;
+}
+
+int bch2_bucket_gens_invalid(struct bch_fs *c, struct bkey_s_c k,
+                            enum bkey_invalid_flags flags,
+                            struct printbuf *err)
+{
+       int ret = 0;
+
+       bkey_fsck_err_on(bkey_val_bytes(k.k) != sizeof(struct bch_bucket_gens), c, err,
+                        bucket_gens_val_size_bad,
+                        "bad val size (%zu != %zu)",
+                        bkey_val_bytes(k.k), sizeof(struct bch_bucket_gens));
+fsck_err:
+       return ret;
+}
+
+void bch2_bucket_gens_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
+{
+       struct bkey_s_c_bucket_gens g = bkey_s_c_to_bucket_gens(k);
+       unsigned i;
+
+       for (i = 0; i < ARRAY_SIZE(g.v->gens); i++) {
+               if (i)
+                       prt_char(out, ' ');
+               prt_printf(out, "%u", g.v->gens[i]);
+       }
+}
+
+int bch2_bucket_gens_init(struct bch_fs *c)
+{
+       struct btree_trans *trans = bch2_trans_get(c);
+       struct bkey_i_bucket_gens g;
+       bool have_bucket_gens_key = false;
+       int ret;
 
-       for_each_btree_key(&trans, iter, BTREE_ID_alloc, POS_MIN,
-                          BTREE_ITER_PREFETCH, k, ret) {
+       ret = for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN,
+                                BTREE_ITER_PREFETCH, k, ({
                /*
                 * Not a fsck error because this is checked/repaired by
                 * bch2_check_alloc_key() which runs later:
@@ -533,18 +547,96 @@ int bch2_alloc_read(struct bch_fs *c)
                if (!bch2_dev_bucket_exists(c, k.k->p))
                        continue;
 
-               ca = bch_dev_bkey_exists(c, k.k->p.inode);
-               bch2_alloc_to_v4(k, &a);
+               struct bch_alloc_v4 a;
+               u8 gen = bch2_alloc_to_v4(k, &a)->gen;
+               unsigned offset;
+               struct bpos pos = alloc_gens_pos(iter.pos, &offset);
 
-               *bucket_gen(ca, k.k->p.offset) = a.gen;
-       }
-       bch2_trans_iter_exit(&trans, &iter);
+               if (have_bucket_gens_key && bkey_cmp(iter.pos, pos)) {
+                       ret = commit_do(trans, NULL, NULL,
+                                       BCH_TRANS_COMMIT_no_enospc,
+                               bch2_btree_insert_trans(trans, BTREE_ID_bucket_gens, &g.k_i, 0));
+                       if (ret)
+                               break;
+                       have_bucket_gens_key = false;
+               }
 
-       bch2_trans_exit(&trans);
+               if (!have_bucket_gens_key) {
+                       bkey_bucket_gens_init(&g.k_i);
+                       g.k.p = pos;
+                       have_bucket_gens_key = true;
+               }
 
-       if (ret)
-               bch_err(c, "error reading alloc info: %i", ret);
+               g.v.gens[offset] = gen;
+               0;
+       }));
 
+       if (have_bucket_gens_key && !ret)
+               ret = commit_do(trans, NULL, NULL,
+                               BCH_TRANS_COMMIT_no_enospc,
+                       bch2_btree_insert_trans(trans, BTREE_ID_bucket_gens, &g.k_i, 0));
+
+       bch2_trans_put(trans);
+
+       bch_err_fn(c, ret);
+       return ret;
+}
+
+int bch2_alloc_read(struct bch_fs *c)
+{
+       struct btree_trans *trans = bch2_trans_get(c);
+       int ret;
+
+       down_read(&c->gc_lock);
+
+       if (c->sb.version_upgrade_complete >= bcachefs_metadata_version_bucket_gens) {
+               ret = for_each_btree_key(trans, iter, BTREE_ID_bucket_gens, POS_MIN,
+                                        BTREE_ITER_PREFETCH, k, ({
+                       u64 start = bucket_gens_pos_to_alloc(k.k->p, 0).offset;
+                       u64 end = bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0).offset;
+
+                       if (k.k->type != KEY_TYPE_bucket_gens)
+                               continue;
+
+                       const struct bch_bucket_gens *g = bkey_s_c_to_bucket_gens(k).v;
+
+                       /*
+                        * Not a fsck error because this is checked/repaired by
+                        * bch2_check_alloc_key() which runs later:
+                        */
+                       if (!bch2_dev_exists2(c, k.k->p.inode))
+                               continue;
+
+                       struct bch_dev *ca = bch_dev_bkey_exists(c, k.k->p.inode);
+
+                       for (u64 b = max_t(u64, ca->mi.first_bucket, start);
+                            b < min_t(u64, ca->mi.nbuckets, end);
+                            b++)
+                               *bucket_gen(ca, b) = g->gens[b & KEY_TYPE_BUCKET_GENS_MASK];
+                       0;
+               }));
+       } else {
+               ret = for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN,
+                                        BTREE_ITER_PREFETCH, k, ({
+                       /*
+                        * Not a fsck error because this is checked/repaired by
+                        * bch2_check_alloc_key() which runs later:
+                        */
+                       if (!bch2_dev_bucket_exists(c, k.k->p))
+                               continue;
+
+                       struct bch_dev *ca = bch_dev_bkey_exists(c, k.k->p.inode);
+
+                       struct bch_alloc_v4 a;
+                       *bucket_gen(ca, k.k->p.offset) = bch2_alloc_to_v4(k, &a)->gen;
+                       0;
+               }));
+       }
+
+       bch2_trans_put(trans);
+       up_read(&c->gc_lock);
+
+       bch_err_fn(c, ret);
        return ret;
 }
 
@@ -570,7 +662,7 @@ static int bch2_bucket_do_index(struct btree_trans *trans,
            a->data_type != BCH_DATA_need_discard)
                return 0;
 
-       k = bch2_trans_kmalloc(trans, sizeof(*k));
+       k = bch2_trans_kmalloc_nomemzero(trans, sizeof(*k));
        if (IS_ERR(k))
                return PTR_ERR(k);
 
@@ -591,20 +683,22 @@ static int bch2_bucket_do_index(struct btree_trans *trans,
                return 0;
        }
 
-       bch2_trans_iter_init(trans, &iter, btree,
+       old = bch2_bkey_get_iter(trans, &iter, btree,
                             bkey_start_pos(&k->k),
                             BTREE_ITER_INTENT);
-       old = bch2_btree_iter_peek_slot(&iter);
        ret = bkey_err(old);
        if (ret)
-               goto err;
+               return ret;
 
        if (ca->mi.freespace_initialized &&
+           c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info &&
            bch2_trans_inconsistent_on(old.k->type != old_type, trans,
-                       "incorrect key when %s %s btree (got %s should be %s)\n"
+                       "incorrect key when %s %s:%llu:%llu:0 (got %s should be %s)\n"
                        "  for %s",
                        set ? "setting" : "clearing",
-                       bch2_btree_ids[btree],
+                       bch2_btree_id_str(btree),
+                       iter.pos.inode,
+                       iter.pos.offset,
                        bch2_bkey_types[old.k->type],
                        bch2_bkey_types[old_type],
                        (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
@@ -619,95 +713,342 @@ err:
        return ret;
 }
 
-int bch2_trans_mark_alloc(struct btree_trans *trans,
-                         enum btree_id btree_id, unsigned level,
-                         struct bkey_s_c old, struct bkey_i *new,
-                         unsigned flags)
+static noinline int bch2_bucket_gen_update(struct btree_trans *trans,
+                                          struct bpos bucket, u8 gen)
+{
+       struct btree_iter iter;
+       unsigned offset;
+       struct bpos pos = alloc_gens_pos(bucket, &offset);
+       struct bkey_i_bucket_gens *g;
+       struct bkey_s_c k;
+       int ret;
+
+       g = bch2_trans_kmalloc(trans, sizeof(*g));
+       ret = PTR_ERR_OR_ZERO(g);
+       if (ret)
+               return ret;
+
+       k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_bucket_gens, pos,
+                              BTREE_ITER_INTENT|
+                              BTREE_ITER_WITH_UPDATES);
+       ret = bkey_err(k);
+       if (ret)
+               return ret;
+
+       if (k.k->type != KEY_TYPE_bucket_gens) {
+               bkey_bucket_gens_init(&g->k_i);
+               g->k.p = iter.pos;
+       } else {
+               bkey_reassemble(&g->k_i, k);
+       }
+
+       g->v.gens[offset] = gen;
+
+       ret = bch2_trans_update(trans, &iter, &g->k_i, 0);
+       bch2_trans_iter_exit(trans, &iter);
+       return ret;
+}
+
+int bch2_trigger_alloc(struct btree_trans *trans,
+                      enum btree_id btree, unsigned level,
+                      struct bkey_s_c old, struct bkey_s new,
+                      unsigned flags)
 {
        struct bch_fs *c = trans->c;
-       struct bch_alloc_v4 old_a, *new_a;
-       u64 old_lru, new_lru;
        int ret = 0;
 
-       /*
-        * Deletion only happens in the device removal path, with
-        * BTREE_TRIGGER_NORUN:
-        */
-       BUG_ON(new->k.type != KEY_TYPE_alloc_v4);
+       if (bch2_trans_inconsistent_on(!bch2_dev_bucket_exists(c, new.k->p), trans,
+                                      "alloc key for invalid device or bucket"))
+               return -EIO;
 
-       bch2_alloc_to_v4(old, &old_a);
-       new_a = &bkey_i_to_alloc_v4(new)->v;
+       struct bch_dev *ca = bch_dev_bkey_exists(c, new.k->p.inode);
 
-       new_a->data_type = alloc_data_type(*new_a, new_a->data_type);
+       struct bch_alloc_v4 old_a_convert;
+       const struct bch_alloc_v4 *old_a = bch2_alloc_to_v4(old, &old_a_convert);
 
-       if (new_a->dirty_sectors > old_a.dirty_sectors ||
-           new_a->cached_sectors > old_a.cached_sectors) {
-               new_a->io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
-               new_a->io_time[WRITE]= max_t(u64, 1, atomic64_read(&c->io_clock[WRITE].now));
-               SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, true);
-               SET_BCH_ALLOC_V4_NEED_DISCARD(new_a, true);
-       }
+       if (flags & BTREE_TRIGGER_TRANSACTIONAL) {
+               struct bch_alloc_v4 *new_a = bkey_s_to_alloc_v4(new).v;
+
+               new_a->data_type = alloc_data_type(*new_a, new_a->data_type);
+
+               if (bch2_bucket_sectors(*new_a) > bch2_bucket_sectors(*old_a)) {
+                       new_a->io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
+                       new_a->io_time[WRITE]= max_t(u64, 1, atomic64_read(&c->io_clock[WRITE].now));
+                       SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, true);
+                       SET_BCH_ALLOC_V4_NEED_DISCARD(new_a, true);
+               }
+
+               if (data_type_is_empty(new_a->data_type) &&
+                   BCH_ALLOC_V4_NEED_INC_GEN(new_a) &&
+                   !bch2_bucket_is_open_safe(c, new.k->p.inode, new.k->p.offset)) {
+                       new_a->gen++;
+                       SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, false);
+               }
+
+               if (old_a->data_type != new_a->data_type ||
+                   (new_a->data_type == BCH_DATA_free &&
+                    alloc_freespace_genbits(*old_a) != alloc_freespace_genbits(*new_a))) {
+                       ret =   bch2_bucket_do_index(trans, old, old_a, false) ?:
+                               bch2_bucket_do_index(trans, new.s_c, new_a, true);
+                       if (ret)
+                               return ret;
+               }
+
+               if (new_a->data_type == BCH_DATA_cached &&
+                   !new_a->io_time[READ])
+                       new_a->io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
+
+               u64 old_lru = alloc_lru_idx_read(*old_a);
+               u64 new_lru = alloc_lru_idx_read(*new_a);
+               if (old_lru != new_lru) {
+                       ret = bch2_lru_change(trans, new.k->p.inode,
+                                             bucket_to_u64(new.k->p),
+                                             old_lru, new_lru);
+                       if (ret)
+                               return ret;
+               }
+
+               new_a->fragmentation_lru = alloc_lru_idx_fragmentation(*new_a,
+                                               bch_dev_bkey_exists(c, new.k->p.inode));
+               if (old_a->fragmentation_lru != new_a->fragmentation_lru) {
+                       ret = bch2_lru_change(trans,
+                                       BCH_LRU_FRAGMENTATION_START,
+                                       bucket_to_u64(new.k->p),
+                                       old_a->fragmentation_lru, new_a->fragmentation_lru);
+                       if (ret)
+                               return ret;
+               }
 
-       if (data_type_is_empty(new_a->data_type) &&
-           BCH_ALLOC_V4_NEED_INC_GEN(new_a) &&
-           !bch2_bucket_is_open_safe(c, new->k.p.inode, new->k.p.offset)) {
-               new_a->gen++;
-               SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, false);
+               if (old_a->gen != new_a->gen) {
+                       ret = bch2_bucket_gen_update(trans, new.k->p, new_a->gen);
+                       if (ret)
+                               return ret;
+               }
+
+               /*
+                * need to know if we're getting called from the invalidate path or
+                * not:
+                */
+
+               if ((flags & BTREE_TRIGGER_BUCKET_INVALIDATE) &&
+                   old_a->cached_sectors) {
+                       ret = bch2_update_cached_sectors_list(trans, new.k->p.inode,
+                                                             -((s64) old_a->cached_sectors));
+                       if (ret)
+                               return ret;
+               }
        }
 
-       if (old_a.data_type != new_a->data_type ||
-           (new_a->data_type == BCH_DATA_free &&
-            alloc_freespace_genbits(old_a) != alloc_freespace_genbits(*new_a))) {
-               ret =   bch2_bucket_do_index(trans, old, &old_a, false) ?:
-                       bch2_bucket_do_index(trans, bkey_i_to_s_c(new), new_a, true);
-               if (ret)
-                       return ret;
+       if (!(flags & BTREE_TRIGGER_TRANSACTIONAL) && (flags & BTREE_TRIGGER_INSERT)) {
+               struct bch_alloc_v4 *new_a = bkey_s_to_alloc_v4(new).v;
+               u64 journal_seq = trans->journal_res.seq;
+               u64 bucket_journal_seq = new_a->journal_seq;
+
+               if ((flags & BTREE_TRIGGER_INSERT) &&
+                   data_type_is_empty(old_a->data_type) !=
+                   data_type_is_empty(new_a->data_type) &&
+                   new.k->type == KEY_TYPE_alloc_v4) {
+                       struct bch_alloc_v4 *v = bkey_s_to_alloc_v4(new).v;
+
+                       /*
+                        * If the btree updates referring to a bucket weren't flushed
+                        * before the bucket became empty again, then the we don't have
+                        * to wait on a journal flush before we can reuse the bucket:
+                        */
+                       v->journal_seq = bucket_journal_seq =
+                               data_type_is_empty(new_a->data_type) &&
+                               (journal_seq == v->journal_seq ||
+                                bch2_journal_noflush_seq(&c->journal, v->journal_seq))
+                               ? 0 : journal_seq;
+               }
+
+               if (!data_type_is_empty(old_a->data_type) &&
+                   data_type_is_empty(new_a->data_type) &&
+                   bucket_journal_seq) {
+                       ret = bch2_set_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
+                                       c->journal.flushed_seq_ondisk,
+                                       new.k->p.inode, new.k->p.offset,
+                                       bucket_journal_seq);
+                       if (ret) {
+                               bch2_fs_fatal_error(c,
+                                       "error setting bucket_needs_journal_commit: %i", ret);
+                               return ret;
+                       }
+               }
+
+               percpu_down_read(&c->mark_lock);
+               if (new_a->gen != old_a->gen)
+                       *bucket_gen(ca, new.k->p.offset) = new_a->gen;
+
+               bch2_dev_usage_update(c, ca, old_a, new_a, journal_seq, false);
+
+               if (new_a->data_type == BCH_DATA_free &&
+                   (!new_a->journal_seq || new_a->journal_seq < c->journal.flushed_seq_ondisk))
+                       closure_wake_up(&c->freelist_wait);
+
+               if (new_a->data_type == BCH_DATA_need_discard &&
+                   (!bucket_journal_seq || bucket_journal_seq < c->journal.flushed_seq_ondisk))
+                       bch2_do_discards(c);
+
+               if (old_a->data_type != BCH_DATA_cached &&
+                   new_a->data_type == BCH_DATA_cached &&
+                   should_invalidate_buckets(ca, bch2_dev_usage_read(ca)))
+                       bch2_do_invalidates(c);
+
+               if (new_a->data_type == BCH_DATA_need_gc_gens)
+                       bch2_do_gc_gens(c);
+               percpu_up_read(&c->mark_lock);
        }
 
-       if (new_a->data_type == BCH_DATA_cached &&
-           !new_a->io_time[READ])
-               new_a->io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
+       if ((flags & BTREE_TRIGGER_GC) &&
+           (flags & BTREE_TRIGGER_BUCKET_INVALIDATE)) {
+               struct bch_alloc_v4 new_a_convert;
+               const struct bch_alloc_v4 *new_a = bch2_alloc_to_v4(new.s_c, &new_a_convert);
 
-       old_lru = alloc_lru_idx(old_a);
-       new_lru = alloc_lru_idx(*new_a);
+               percpu_down_read(&c->mark_lock);
+               struct bucket *g = gc_bucket(ca, new.k->p.offset);
 
-       if (old_lru != new_lru) {
-               ret = bch2_lru_change(trans, new->k.p.inode, new->k.p.offset,
-                                     old_lru, &new_lru, old);
-               if (ret)
-                       return ret;
+               bucket_lock(g);
 
-               if (new_a->data_type == BCH_DATA_cached)
-                       new_a->io_time[READ] = new_lru;
+               g->gen_valid            = 1;
+               g->gen                  = new_a->gen;
+               g->data_type            = new_a->data_type;
+               g->stripe               = new_a->stripe;
+               g->stripe_redundancy    = new_a->stripe_redundancy;
+               g->dirty_sectors        = new_a->dirty_sectors;
+               g->cached_sectors       = new_a->cached_sectors;
+
+               bucket_unlock(g);
+               percpu_up_read(&c->mark_lock);
        }
 
        return 0;
 }
 
-static int bch2_check_alloc_key(struct btree_trans *trans,
-                               struct btree_iter *alloc_iter,
-                               struct btree_iter *discard_iter,
-                               struct btree_iter *freespace_iter)
+/*
+ * This synthesizes deleted extents for holes, similar to BTREE_ITER_SLOTS for
+ * extents style btrees, but works on non-extents btrees:
+ */
+static struct bkey_s_c bch2_get_key_or_hole(struct btree_iter *iter, struct bpos end, struct bkey *hole)
+{
+       struct bkey_s_c k = bch2_btree_iter_peek_slot(iter);
+
+       if (bkey_err(k))
+               return k;
+
+       if (k.k->type) {
+               return k;
+       } else {
+               struct btree_iter iter2;
+               struct bpos next;
+
+               bch2_trans_copy_iter(&iter2, iter);
+
+               struct btree_path *path = btree_iter_path(iter->trans, iter);
+               if (!bpos_eq(path->l[0].b->key.k.p, SPOS_MAX))
+                       end = bkey_min(end, bpos_nosnap_successor(path->l[0].b->key.k.p));
+
+               end = bkey_min(end, POS(iter->pos.inode, iter->pos.offset + U32_MAX - 1));
+
+               /*
+                * btree node min/max is a closed interval, upto takes a half
+                * open interval:
+                */
+               k = bch2_btree_iter_peek_upto(&iter2, end);
+               next = iter2.pos;
+               bch2_trans_iter_exit(iter->trans, &iter2);
+
+               BUG_ON(next.offset >= iter->pos.offset + U32_MAX);
+
+               if (bkey_err(k))
+                       return k;
+
+               bkey_init(hole);
+               hole->p = iter->pos;
+
+               bch2_key_resize(hole, next.offset - iter->pos.offset);
+               return (struct bkey_s_c) { hole, NULL };
+       }
+}
+
+static bool next_bucket(struct bch_fs *c, struct bpos *bucket)
+{
+       struct bch_dev *ca;
+
+       if (bch2_dev_bucket_exists(c, *bucket))
+               return true;
+
+       if (bch2_dev_exists2(c, bucket->inode)) {
+               ca = bch_dev_bkey_exists(c, bucket->inode);
+
+               if (bucket->offset < ca->mi.first_bucket) {
+                       bucket->offset = ca->mi.first_bucket;
+                       return true;
+               }
+
+               bucket->inode++;
+               bucket->offset = 0;
+       }
+
+       rcu_read_lock();
+       ca = __bch2_next_dev_idx(c, bucket->inode, NULL);
+       if (ca)
+               *bucket = POS(ca->dev_idx, ca->mi.first_bucket);
+       rcu_read_unlock();
+
+       return ca != NULL;
+}
+
+static struct bkey_s_c bch2_get_key_or_real_bucket_hole(struct btree_iter *iter, struct bkey *hole)
+{
+       struct bch_fs *c = iter->trans->c;
+       struct bkey_s_c k;
+again:
+       k = bch2_get_key_or_hole(iter, POS_MAX, hole);
+       if (bkey_err(k))
+               return k;
+
+       if (!k.k->type) {
+               struct bpos bucket = bkey_start_pos(k.k);
+
+               if (!bch2_dev_bucket_exists(c, bucket)) {
+                       if (!next_bucket(c, &bucket))
+                               return bkey_s_c_null;
+
+                       bch2_btree_iter_set_pos(iter, bucket);
+                       goto again;
+               }
+
+               if (!bch2_dev_bucket_exists(c, k.k->p)) {
+                       struct bch_dev *ca = bch_dev_bkey_exists(c, bucket.inode);
+
+                       bch2_key_resize(hole, ca->mi.nbuckets - bucket.offset);
+               }
+       }
+
+       return k;
+}
+
+static noinline_for_stack
+int bch2_check_alloc_key(struct btree_trans *trans,
+                        struct bkey_s_c alloc_k,
+                        struct btree_iter *alloc_iter,
+                        struct btree_iter *discard_iter,
+                        struct btree_iter *freespace_iter,
+                        struct btree_iter *bucket_gens_iter)
 {
        struct bch_fs *c = trans->c;
        struct bch_dev *ca;
-       struct bch_alloc_v4 a;
+       struct bch_alloc_v4 a_convert;
+       const struct bch_alloc_v4 *a;
        unsigned discard_key_type, freespace_key_type;
-       struct bkey_s_c alloc_k, k;
+       unsigned gens_offset;
+       struct bkey_s_c k;
        struct printbuf buf = PRINTBUF;
        int ret;
 
-       alloc_k = bch2_dev_bucket_exists(c, alloc_iter->pos)
-               ? bch2_btree_iter_peek_slot(alloc_iter)
-               : bch2_btree_iter_peek(alloc_iter);
-       if (!alloc_k.k)
-               return 1;
-
-       ret = bkey_err(alloc_k);
-       if (ret)
-               return ret;
-
        if (fsck_err_on(!bch2_dev_bucket_exists(c, alloc_k.k->p), c,
+                       alloc_key_to_missing_dev_bucket,
                        "alloc key for invalid device:bucket %llu:%llu",
                        alloc_k.k->p.inode, alloc_k.k->p.offset))
                return bch2_btree_delete_at(trans, alloc_iter, 0);
@@ -716,16 +1057,10 @@ static int bch2_check_alloc_key(struct btree_trans *trans,
        if (!ca->mi.freespace_initialized)
                return 0;
 
-       bch2_alloc_to_v4(alloc_k, &a);
-
-       discard_key_type = a.data_type == BCH_DATA_need_discard
-               ? KEY_TYPE_set : 0;
-       freespace_key_type = a.data_type == BCH_DATA_free
-               ? KEY_TYPE_set : 0;
+       a = bch2_alloc_to_v4(alloc_k, &a_convert);
 
+       discard_key_type = a->data_type == BCH_DATA_need_discard ? KEY_TYPE_set : 0;
        bch2_btree_iter_set_pos(discard_iter, alloc_k.k->p);
-       bch2_btree_iter_set_pos(freespace_iter, alloc_freespace_pos(alloc_k.k->p, a));
-
        k = bch2_btree_iter_peek_slot(discard_iter);
        ret = bkey_err(k);
        if (ret)
@@ -733,7 +1068,8 @@ static int bch2_check_alloc_key(struct btree_trans *trans,
 
        if (k.k->type != discard_key_type &&
            (c->opts.reconstruct_alloc ||
-            fsck_err(c, "incorrect key in need_discard btree (got %s should be %s)\n"
+            fsck_err(c, need_discard_key_wrong,
+                     "incorrect key in need_discard btree (got %s should be %s)\n"
                      "  %s",
                      bch2_bkey_types[k.k->type],
                      bch2_bkey_types[discard_key_type],
@@ -754,6 +1090,8 @@ static int bch2_check_alloc_key(struct btree_trans *trans,
                        goto err;
        }
 
+       freespace_key_type = a->data_type == BCH_DATA_free ? KEY_TYPE_set : 0;
+       bch2_btree_iter_set_pos(freespace_iter, alloc_freespace_pos(alloc_k.k->p, *a));
        k = bch2_btree_iter_peek_slot(freespace_iter);
        ret = bkey_err(k);
        if (ret)
@@ -761,7 +1099,8 @@ static int bch2_check_alloc_key(struct btree_trans *trans,
 
        if (k.k->type != freespace_key_type &&
            (c->opts.reconstruct_alloc ||
-            fsck_err(c, "incorrect key in freespace btree (got %s should be %s)\n"
+            fsck_err(c, freespace_key_wrong,
+                     "incorrect key in freespace btree (got %s should be %s)\n"
                      "  %s",
                      bch2_bkey_types[k.k->type],
                      bch2_bkey_types[freespace_key_type],
@@ -783,19 +1122,175 @@ static int bch2_check_alloc_key(struct btree_trans *trans,
                if (ret)
                        goto err;
        }
+
+       bch2_btree_iter_set_pos(bucket_gens_iter, alloc_gens_pos(alloc_k.k->p, &gens_offset));
+       k = bch2_btree_iter_peek_slot(bucket_gens_iter);
+       ret = bkey_err(k);
+       if (ret)
+               goto err;
+
+       if (a->gen != alloc_gen(k, gens_offset) &&
+           (c->opts.reconstruct_alloc ||
+            fsck_err(c, bucket_gens_key_wrong,
+                     "incorrect gen in bucket_gens btree (got %u should be %u)\n"
+                     "  %s",
+                     alloc_gen(k, gens_offset), a->gen,
+                     (printbuf_reset(&buf),
+                      bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf)))) {
+               struct bkey_i_bucket_gens *g =
+                       bch2_trans_kmalloc(trans, sizeof(*g));
+
+               ret = PTR_ERR_OR_ZERO(g);
+               if (ret)
+                       goto err;
+
+               if (k.k->type == KEY_TYPE_bucket_gens) {
+                       bkey_reassemble(&g->k_i, k);
+               } else {
+                       bkey_bucket_gens_init(&g->k_i);
+                       g->k.p = alloc_gens_pos(alloc_k.k->p, &gens_offset);
+               }
+
+               g->v.gens[gens_offset] = a->gen;
+
+               ret = bch2_trans_update(trans, bucket_gens_iter, &g->k_i, 0);
+               if (ret)
+                       goto err;
+       }
 err:
 fsck_err:
        printbuf_exit(&buf);
        return ret;
 }
 
-static int bch2_check_discard_freespace_key(struct btree_trans *trans,
-                                           struct btree_iter *iter)
+static noinline_for_stack
+int bch2_check_alloc_hole_freespace(struct btree_trans *trans,
+                                   struct bpos start,
+                                   struct bpos *end,
+                                   struct btree_iter *freespace_iter)
+{
+       struct bch_fs *c = trans->c;
+       struct bch_dev *ca;
+       struct bkey_s_c k;
+       struct printbuf buf = PRINTBUF;
+       int ret;
+
+       ca = bch_dev_bkey_exists(c, start.inode);
+       if (!ca->mi.freespace_initialized)
+               return 0;
+
+       bch2_btree_iter_set_pos(freespace_iter, start);
+
+       k = bch2_btree_iter_peek_slot(freespace_iter);
+       ret = bkey_err(k);
+       if (ret)
+               goto err;
+
+       *end = bkey_min(k.k->p, *end);
+
+       if (k.k->type != KEY_TYPE_set &&
+           (c->opts.reconstruct_alloc ||
+            fsck_err(c, freespace_hole_missing,
+                     "hole in alloc btree missing in freespace btree\n"
+                     "  device %llu buckets %llu-%llu",
+                     freespace_iter->pos.inode,
+                     freespace_iter->pos.offset,
+                     end->offset))) {
+               struct bkey_i *update =
+                       bch2_trans_kmalloc(trans, sizeof(*update));
+
+               ret = PTR_ERR_OR_ZERO(update);
+               if (ret)
+                       goto err;
+
+               bkey_init(&update->k);
+               update->k.type  = KEY_TYPE_set;
+               update->k.p     = freespace_iter->pos;
+               bch2_key_resize(&update->k,
+                               min_t(u64, U32_MAX, end->offset -
+                                     freespace_iter->pos.offset));
+
+               ret = bch2_trans_update(trans, freespace_iter, update, 0);
+               if (ret)
+                       goto err;
+       }
+err:
+fsck_err:
+       printbuf_exit(&buf);
+       return ret;
+}
+
+static noinline_for_stack
+int bch2_check_alloc_hole_bucket_gens(struct btree_trans *trans,
+                                     struct bpos start,
+                                     struct bpos *end,
+                                     struct btree_iter *bucket_gens_iter)
+{
+       struct bch_fs *c = trans->c;
+       struct bkey_s_c k;
+       struct printbuf buf = PRINTBUF;
+       unsigned i, gens_offset, gens_end_offset;
+       int ret;
+
+       bch2_btree_iter_set_pos(bucket_gens_iter, alloc_gens_pos(start, &gens_offset));
+
+       k = bch2_btree_iter_peek_slot(bucket_gens_iter);
+       ret = bkey_err(k);
+       if (ret)
+               goto err;
+
+       if (bkey_cmp(alloc_gens_pos(start, &gens_offset),
+                    alloc_gens_pos(*end,  &gens_end_offset)))
+               gens_end_offset = KEY_TYPE_BUCKET_GENS_NR;
+
+       if (k.k->type == KEY_TYPE_bucket_gens) {
+               struct bkey_i_bucket_gens g;
+               bool need_update = false;
+
+               bkey_reassemble(&g.k_i, k);
+
+               for (i = gens_offset; i < gens_end_offset; i++) {
+                       if (fsck_err_on(g.v.gens[i], c,
+                                       bucket_gens_hole_wrong,
+                                       "hole in alloc btree at %llu:%llu with nonzero gen in bucket_gens btree (%u)",
+                                       bucket_gens_pos_to_alloc(k.k->p, i).inode,
+                                       bucket_gens_pos_to_alloc(k.k->p, i).offset,
+                                       g.v.gens[i])) {
+                               g.v.gens[i] = 0;
+                               need_update = true;
+                       }
+               }
+
+               if (need_update) {
+                       struct bkey_i *u = bch2_trans_kmalloc(trans, sizeof(g));
+
+                       ret = PTR_ERR_OR_ZERO(u);
+                       if (ret)
+                               goto err;
+
+                       memcpy(u, &g, sizeof(g));
+
+                       ret = bch2_trans_update(trans, bucket_gens_iter, u, 0);
+                       if (ret)
+                               goto err;
+               }
+       }
+
+       *end = bkey_min(*end, bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0));
+err:
+fsck_err:
+       printbuf_exit(&buf);
+       return ret;
+}
+
+static noinline_for_stack int bch2_check_discard_freespace_key(struct btree_trans *trans,
+                                             struct btree_iter *iter)
 {
        struct bch_fs *c = trans->c;
        struct btree_iter alloc_iter;
-       struct bkey_s_c k, freespace_k;
-       struct bch_alloc_v4 a;
+       struct bkey_s_c alloc_k;
+       struct bch_alloc_v4 a_convert;
+       const struct bch_alloc_v4 *a;
        u64 genbits;
        struct bpos pos;
        enum bch_data_type state = iter->btree_id == BTREE_ID_need_discard
@@ -804,119 +1299,246 @@ static int bch2_check_discard_freespace_key(struct btree_trans *trans,
        struct printbuf buf = PRINTBUF;
        int ret;
 
-       freespace_k = bch2_btree_iter_peek(iter);
-       if (!freespace_k.k)
-               return 1;
-
-       ret = bkey_err(freespace_k);
-       if (ret)
-               return ret;
-
        pos = iter->pos;
        pos.offset &= ~(~0ULL << 56);
        genbits = iter->pos.offset & (~0ULL << 56);
 
-       bch2_trans_iter_init(trans, &alloc_iter, BTREE_ID_alloc, pos, 0);
+       alloc_k = bch2_bkey_get_iter(trans, &alloc_iter, BTREE_ID_alloc, pos, 0);
+       ret = bkey_err(alloc_k);
+       if (ret)
+               return ret;
 
        if (fsck_err_on(!bch2_dev_bucket_exists(c, pos), c,
+                       need_discard_freespace_key_to_invalid_dev_bucket,
                        "entry in %s btree for nonexistant dev:bucket %llu:%llu",
-                       bch2_btree_ids[iter->btree_id], pos.inode, pos.offset))
-               goto delete;
-
-       k = bch2_btree_iter_peek_slot(&alloc_iter);
-       ret = bkey_err(k);
-       if (ret)
-               goto err;
+                       bch2_btree_id_str(iter->btree_id), pos.inode, pos.offset))
+               goto delete;
 
-       bch2_alloc_to_v4(k, &a);
+       a = bch2_alloc_to_v4(alloc_k, &a_convert);
 
-       if (fsck_err_on(a.data_type != state ||
+       if (fsck_err_on(a->data_type != state ||
                        (state == BCH_DATA_free &&
-                        genbits != alloc_freespace_genbits(a)), c,
-                       "%s\n  incorrectly set in %s index (free %u, genbits %llu should be %llu)",
-                       (bch2_bkey_val_to_text(&buf, c, k), buf.buf),
-                       bch2_btree_ids[iter->btree_id],
-                       a.data_type == state,
-                       genbits >> 56, alloc_freespace_genbits(a) >> 56))
+                        genbits != alloc_freespace_genbits(*a)), c,
+                       need_discard_freespace_key_bad,
+                       "%s\n  incorrectly set at %s:%llu:%llu:0 (free %u, genbits %llu should be %llu)",
+                       (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf),
+                       bch2_btree_id_str(iter->btree_id),
+                       iter->pos.inode,
+                       iter->pos.offset,
+                       a->data_type == state,
+                       genbits >> 56, alloc_freespace_genbits(*a) >> 56))
                goto delete;
 out:
-err:
 fsck_err:
+       set_btree_iter_dontneed(&alloc_iter);
        bch2_trans_iter_exit(trans, &alloc_iter);
        printbuf_exit(&buf);
        return ret;
 delete:
-       ret = bch2_btree_delete_extent_at(trans, iter,
-                       iter->btree_id == BTREE_ID_freespace ? 1 : 0, 0);
+       ret =   bch2_btree_delete_extent_at(trans, iter,
+                       iter->btree_id == BTREE_ID_freespace ? 1 : 0, 0) ?:
+               bch2_trans_commit(trans, NULL, NULL,
+                       BCH_TRANS_COMMIT_no_enospc);
        goto out;
 }
 
-int bch2_check_alloc_info(struct bch_fs *c)
+/*
+ * We've already checked that generation numbers in the bucket_gens btree are
+ * valid for buckets that exist; this just checks for keys for nonexistent
+ * buckets.
+ */
+static noinline_for_stack
+int bch2_check_bucket_gens_key(struct btree_trans *trans,
+                              struct btree_iter *iter,
+                              struct bkey_s_c k)
 {
-       struct btree_trans trans;
-       struct btree_iter iter, discard_iter, freespace_iter;
+       struct bch_fs *c = trans->c;
+       struct bkey_i_bucket_gens g;
+       struct bch_dev *ca;
+       u64 start = bucket_gens_pos_to_alloc(k.k->p, 0).offset;
+       u64 end = bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0).offset;
+       u64 b;
+       bool need_update = false, dev_exists;
+       struct printbuf buf = PRINTBUF;
        int ret = 0;
 
-       bch2_trans_init(&trans, c, 0, 0);
+       BUG_ON(k.k->type != KEY_TYPE_bucket_gens);
+       bkey_reassemble(&g.k_i, k);
+
+       /* if no bch_dev, skip out whether we repair or not */
+       dev_exists = bch2_dev_exists2(c, k.k->p.inode);
+       if (!dev_exists) {
+               if (fsck_err_on(!dev_exists, c,
+                               bucket_gens_to_invalid_dev,
+                               "bucket_gens key for invalid device:\n  %s",
+                               (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
+                       ret = bch2_btree_delete_at(trans, iter, 0);
+               }
+               goto out;
+       }
+
+       ca = bch_dev_bkey_exists(c, k.k->p.inode);
+       if (fsck_err_on(end <= ca->mi.first_bucket ||
+                       start >= ca->mi.nbuckets, c,
+                       bucket_gens_to_invalid_buckets,
+                       "bucket_gens key for invalid buckets:\n  %s",
+                       (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
+               ret = bch2_btree_delete_at(trans, iter, 0);
+               goto out;
+       }
+
+       for (b = start; b < ca->mi.first_bucket; b++)
+               if (fsck_err_on(g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK], c,
+                               bucket_gens_nonzero_for_invalid_buckets,
+                               "bucket_gens key has nonzero gen for invalid bucket")) {
+                       g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK] = 0;
+                       need_update = true;
+               }
+
+       for (b = ca->mi.nbuckets; b < end; b++)
+               if (fsck_err_on(g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK], c,
+                               bucket_gens_nonzero_for_invalid_buckets,
+                               "bucket_gens key has nonzero gen for invalid bucket")) {
+                       g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK] = 0;
+                       need_update = true;
+               }
+
+       if (need_update) {
+               struct bkey_i *u = bch2_trans_kmalloc(trans, sizeof(g));
+
+               ret = PTR_ERR_OR_ZERO(u);
+               if (ret)
+                       goto out;
+
+               memcpy(u, &g, sizeof(g));
+               ret = bch2_trans_update(trans, iter, u, 0);
+       }
+out:
+fsck_err:
+       printbuf_exit(&buf);
+       return ret;
+}
+
+int bch2_check_alloc_info(struct bch_fs *c)
+{
+       struct btree_trans *trans = bch2_trans_get(c);
+       struct btree_iter iter, discard_iter, freespace_iter, bucket_gens_iter;
+       struct bkey hole;
+       struct bkey_s_c k;
+       int ret = 0;
 
-       bch2_trans_iter_init(&trans, &iter, BTREE_ID_alloc, POS_MIN,
+       bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, POS_MIN,
+                            BTREE_ITER_PREFETCH);
+       bch2_trans_iter_init(trans, &discard_iter, BTREE_ID_need_discard, POS_MIN,
                             BTREE_ITER_PREFETCH);
-       bch2_trans_iter_init(&trans, &discard_iter, BTREE_ID_need_discard, POS_MIN,
+       bch2_trans_iter_init(trans, &freespace_iter, BTREE_ID_freespace, POS_MIN,
                             BTREE_ITER_PREFETCH);
-       bch2_trans_iter_init(&trans, &freespace_iter, BTREE_ID_freespace, POS_MIN,
+       bch2_trans_iter_init(trans, &bucket_gens_iter, BTREE_ID_bucket_gens, POS_MIN,
                             BTREE_ITER_PREFETCH);
+
        while (1) {
-               ret = __bch2_trans_do(&trans, NULL, NULL,
-                                     BTREE_INSERT_NOFAIL|
-                                     BTREE_INSERT_LAZY_RW,
-                       bch2_check_alloc_key(&trans, &iter,
-                                            &discard_iter,
-                                            &freespace_iter));
+               struct bpos next;
+
+               bch2_trans_begin(trans);
+
+               k = bch2_get_key_or_real_bucket_hole(&iter, &hole);
+               ret = bkey_err(k);
                if (ret)
+                       goto bkey_err;
+
+               if (!k.k)
                        break;
 
-               bch2_btree_iter_advance(&iter);
-       }
-       bch2_trans_iter_exit(&trans, &freespace_iter);
-       bch2_trans_iter_exit(&trans, &discard_iter);
-       bch2_trans_iter_exit(&trans, &iter);
+               if (k.k->type) {
+                       next = bpos_nosnap_successor(k.k->p);
 
-       if (ret < 0)
-               goto err;
+                       ret = bch2_check_alloc_key(trans,
+                                                  k, &iter,
+                                                  &discard_iter,
+                                                  &freespace_iter,
+                                                  &bucket_gens_iter);
+                       if (ret)
+                               goto bkey_err;
+               } else {
+                       next = k.k->p;
+
+                       ret = bch2_check_alloc_hole_freespace(trans,
+                                                   bkey_start_pos(k.k),
+                                                   &next,
+                                                   &freespace_iter) ?:
+                               bch2_check_alloc_hole_bucket_gens(trans,
+                                                   bkey_start_pos(k.k),
+                                                   &next,
+                                                   &bucket_gens_iter);
+                       if (ret)
+                               goto bkey_err;
+               }
 
-       bch2_trans_iter_init(&trans, &iter, BTREE_ID_need_discard, POS_MIN,
-                            BTREE_ITER_PREFETCH);
-       while (1) {
-               ret = __bch2_trans_do(&trans, NULL, NULL,
-                                     BTREE_INSERT_NOFAIL|
-                                     BTREE_INSERT_LAZY_RW,
-                       bch2_check_discard_freespace_key(&trans, &iter));
+               ret = bch2_trans_commit(trans, NULL, NULL,
+                                       BCH_TRANS_COMMIT_no_enospc);
                if (ret)
-                       break;
+                       goto bkey_err;
 
-               bch2_btree_iter_advance(&iter);
+               bch2_btree_iter_set_pos(&iter, next);
+bkey_err:
+               if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+                       continue;
+               if (ret)
+                       break;
        }
-       bch2_trans_iter_exit(&trans, &iter);
+       bch2_trans_iter_exit(trans, &bucket_gens_iter);
+       bch2_trans_iter_exit(trans, &freespace_iter);
+       bch2_trans_iter_exit(trans, &discard_iter);
+       bch2_trans_iter_exit(trans, &iter);
 
        if (ret < 0)
                goto err;
 
-       bch2_trans_iter_init(&trans, &iter, BTREE_ID_freespace, POS_MIN,
+       ret = for_each_btree_key(trans, iter,
+                       BTREE_ID_need_discard, POS_MIN,
+                       BTREE_ITER_PREFETCH, k,
+               bch2_check_discard_freespace_key(trans, &iter));
+       if (ret)
+               goto err;
+
+       bch2_trans_iter_init(trans, &iter, BTREE_ID_freespace, POS_MIN,
                             BTREE_ITER_PREFETCH);
        while (1) {
-               ret = __bch2_trans_do(&trans, NULL, NULL,
-                                     BTREE_INSERT_NOFAIL|
-                                     BTREE_INSERT_LAZY_RW,
-                       bch2_check_discard_freespace_key(&trans, &iter));
-               if (ret)
+               bch2_trans_begin(trans);
+               k = bch2_btree_iter_peek(&iter);
+               if (!k.k)
                        break;
 
-               bch2_btree_iter_advance(&iter);
+               ret = bkey_err(k) ?:
+                       bch2_check_discard_freespace_key(trans, &iter);
+               if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
+                       ret = 0;
+                       continue;
+               }
+               if (ret) {
+                       struct printbuf buf = PRINTBUF;
+                       bch2_bkey_val_to_text(&buf, c, k);
+
+                       bch_err(c, "while checking %s", buf.buf);
+                       printbuf_exit(&buf);
+                       break;
+               }
+
+               bch2_btree_iter_set_pos(&iter, bpos_nosnap_successor(iter.pos));
        }
-       bch2_trans_iter_exit(&trans, &iter);
+       bch2_trans_iter_exit(trans, &iter);
+       if (ret)
+               goto err;
+
+       ret = for_each_btree_key_commit(trans, iter,
+                       BTREE_ID_bucket_gens, POS_MIN,
+                       BTREE_ITER_PREFETCH, k,
+                       NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
+               bch2_check_bucket_gens_key(trans, &iter, k));
 err:
-       bch2_trans_exit(&trans);
-       return ret < 0 ? ret : 0;
+       bch2_trans_put(trans);
+       bch_err_fn(c, ret);
+       return ret;
 }
 
 static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans,
@@ -924,10 +1546,10 @@ static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans,
 {
        struct bch_fs *c = trans->c;
        struct btree_iter lru_iter;
-       struct bch_alloc_v4 a;
-       struct bkey_s_c alloc_k, k;
+       struct bch_alloc_v4 a_convert;
+       const struct bch_alloc_v4 *a;
+       struct bkey_s_c alloc_k, lru_k;
        struct printbuf buf = PRINTBUF;
-       struct printbuf buf2 = PRINTBUF;
        int ret;
 
        alloc_k = bch2_btree_iter_peek(alloc_iter);
@@ -938,103 +1560,109 @@ static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans,
        if (ret)
                return ret;
 
-       bch2_alloc_to_v4(alloc_k, &a);
+       a = bch2_alloc_to_v4(alloc_k, &a_convert);
 
-       if (a.data_type != BCH_DATA_cached)
+       if (a->data_type != BCH_DATA_cached)
                return 0;
 
-       bch2_trans_iter_init(trans, &lru_iter, BTREE_ID_lru,
-                            POS(alloc_k.k->p.inode, a.io_time[READ]), 0);
-
-       k = bch2_btree_iter_peek_slot(&lru_iter);
-       ret = bkey_err(k);
-       if (ret)
-               goto err;
-
-       if (fsck_err_on(!a.io_time[READ], c,
+       if (fsck_err_on(!a->io_time[READ], c,
+                       alloc_key_cached_but_read_time_zero,
                        "cached bucket with read_time 0\n"
                        "  %s",
                (printbuf_reset(&buf),
-                bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf)) ||
-           fsck_err_on(k.k->type != KEY_TYPE_lru ||
-                       le64_to_cpu(bkey_s_c_to_lru(k).v->idx) != alloc_k.k->p.offset, c,
-                       "incorrect/missing lru entry\n"
-                       "  %s\n"
-                       "  %s",
-                       (printbuf_reset(&buf),
-                        bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf),
-                       (bch2_bkey_val_to_text(&buf2, c, k), buf2.buf))) {
-               u64 read_time = a.io_time[READ];
+                bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
+               struct bkey_i_alloc_v4 *a_mut =
+                       bch2_alloc_to_v4_mut(trans, alloc_k);
+               ret = PTR_ERR_OR_ZERO(a_mut);
+               if (ret)
+                       goto err;
+
+               a_mut->v.io_time[READ] = atomic64_read(&c->io_clock[READ].now);
+               ret = bch2_trans_update(trans, alloc_iter,
+                                       &a_mut->k_i, BTREE_TRIGGER_NORUN);
+               if (ret)
+                       goto err;
 
-               if (!a.io_time[READ])
-                       a.io_time[READ] = atomic64_read(&c->io_clock[READ].now);
+               a = &a_mut->v;
+       }
+
+       lru_k = bch2_bkey_get_iter(trans, &lru_iter, BTREE_ID_lru,
+                            lru_pos(alloc_k.k->p.inode,
+                                    bucket_to_u64(alloc_k.k->p),
+                                    a->io_time[READ]), 0);
+       ret = bkey_err(lru_k);
+       if (ret)
+               return ret;
 
+       if (fsck_err_on(lru_k.k->type != KEY_TYPE_set, c,
+                       alloc_key_to_missing_lru_entry,
+                       "missing lru entry\n"
+                       "  %s",
+                       (printbuf_reset(&buf),
+                        bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
                ret = bch2_lru_set(trans,
                                   alloc_k.k->p.inode,
-                                  alloc_k.k->p.offset,
-                                  &a.io_time[READ]);
+                                  bucket_to_u64(alloc_k.k->p),
+                                  a->io_time[READ]);
                if (ret)
                        goto err;
-
-               if (a.io_time[READ] != read_time) {
-                       struct bkey_i_alloc_v4 *a_mut =
-                               bch2_alloc_to_v4_mut(trans, alloc_k);
-                       ret = PTR_ERR_OR_ZERO(a_mut);
-                       if (ret)
-                               goto err;
-
-                       a_mut->v.io_time[READ] = a.io_time[READ];
-                       ret = bch2_trans_update(trans, alloc_iter,
-                                               &a_mut->k_i, BTREE_TRIGGER_NORUN);
-                       if (ret)
-                               goto err;
-               }
        }
 err:
 fsck_err:
        bch2_trans_iter_exit(trans, &lru_iter);
-       printbuf_exit(&buf2);
        printbuf_exit(&buf);
        return ret;
 }
 
 int bch2_check_alloc_to_lru_refs(struct bch_fs *c)
 {
-       struct btree_trans trans;
-       struct btree_iter iter;
-       struct bkey_s_c k;
-       int ret = 0;
-
-       bch2_trans_init(&trans, c, 0, 0);
-
-       for_each_btree_key(&trans, iter, BTREE_ID_alloc, POS_MIN,
-                          BTREE_ITER_PREFETCH, k, ret) {
-               ret = __bch2_trans_do(&trans, NULL, NULL,
-                                     BTREE_INSERT_NOFAIL|
-                                     BTREE_INSERT_LAZY_RW,
-                       bch2_check_alloc_to_lru_ref(&trans, &iter));
-               if (ret)
-                       break;
-       }
-       bch2_trans_iter_exit(&trans, &iter);
-
-       bch2_trans_exit(&trans);
-       return ret < 0 ? ret : 0;
+       int ret = bch2_trans_run(c,
+               for_each_btree_key_commit(trans, iter, BTREE_ID_alloc,
+                               POS_MIN, BTREE_ITER_PREFETCH, k,
+                               NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
+                       bch2_check_alloc_to_lru_ref(trans, &iter)));
+       bch_err_fn(c, ret);
+       return ret;
 }
 
-static int bch2_clear_need_discard(struct btree_trans *trans, struct bpos pos,
-                                  struct bch_dev *ca, bool *discard_done)
+static int bch2_discard_one_bucket(struct btree_trans *trans,
+                                  struct btree_iter *need_discard_iter,
+                                  struct bpos *discard_pos_done,
+                                  u64 *seen,
+                                  u64 *open,
+                                  u64 *need_journal_commit,
+                                  u64 *discarded)
 {
        struct bch_fs *c = trans->c;
-       struct btree_iter iter;
+       struct bpos pos = need_discard_iter->pos;
+       struct btree_iter iter = { NULL };
        struct bkey_s_c k;
+       struct bch_dev *ca;
        struct bkey_i_alloc_v4 *a;
        struct printbuf buf = PRINTBUF;
-       int ret;
+       int ret = 0;
+
+       ca = bch_dev_bkey_exists(c, pos.inode);
+       if (!percpu_ref_tryget(&ca->io_ref)) {
+               bch2_btree_iter_set_pos(need_discard_iter, POS(pos.inode + 1, 0));
+               return 0;
+       }
+
+       if (bch2_bucket_is_open_safe(c, pos.inode, pos.offset)) {
+               (*open)++;
+               goto out;
+       }
+
+       if (bch2_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
+                       c->journal.flushed_seq_ondisk,
+                       pos.inode, pos.offset)) {
+               (*need_journal_commit)++;
+               goto out;
+       }
 
-       bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, pos,
-                            BTREE_ITER_CACHED);
-       k = bch2_btree_iter_peek_slot(&iter);
+       k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_alloc,
+                              need_discard_iter->pos,
+                              BTREE_ITER_CACHED);
        ret = bkey_err(k);
        if (ret)
                goto out;
@@ -1050,25 +1678,33 @@ static int bch2_clear_need_discard(struct btree_trans *trans, struct bpos pos,
                goto write;
        }
 
-       if (bch2_trans_inconsistent_on(a->v.journal_seq > c->journal.flushed_seq_ondisk, trans,
-                       "clearing need_discard but journal_seq %llu > flushed_seq %llu\n"
-                       "%s",
-                       a->v.journal_seq,
-                       c->journal.flushed_seq_ondisk,
-                       (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
-               ret = -EIO;
+       if (a->v.journal_seq > c->journal.flushed_seq_ondisk) {
+               if (c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info) {
+                       bch2_trans_inconsistent(trans,
+                               "clearing need_discard but journal_seq %llu > flushed_seq %llu\n"
+                               "%s",
+                               a->v.journal_seq,
+                               c->journal.flushed_seq_ondisk,
+                               (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
+                       ret = -EIO;
+               }
                goto out;
        }
 
-       if (bch2_trans_inconsistent_on(a->v.data_type != BCH_DATA_need_discard, trans,
-                       "bucket incorrectly set in need_discard btree\n"
-                       "%s",
-                       (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
-               ret = -EIO;
+       if (a->v.data_type != BCH_DATA_need_discard) {
+               if (c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info) {
+                       bch2_trans_inconsistent(trans,
+                               "bucket incorrectly set in need_discard btree\n"
+                               "%s",
+                               (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
+                       ret = -EIO;
+               }
+
                goto out;
        }
 
-       if (!*discard_done && ca->mi.discard && !c->opts.nochanges) {
+       if (!bkey_eq(*discard_pos_done, iter.pos) &&
+           ca->mi.discard && !c->opts.nochanges) {
                /*
                 * This works without any other locks because this is the only
                 * thread that removes items from the need_discard tree
@@ -1077,10 +1713,10 @@ static int bch2_clear_need_discard(struct btree_trans *trans, struct bpos pos,
                blkdev_issue_discard(ca->disk_sb.bdev,
                                     k.k->p.offset * ca->mi.bucket_size,
                                     ca->mi.bucket_size,
-                                    GFP_KERNEL, 0);
-               *discard_done = true;
+                                    GFP_KERNEL);
+               *discard_pos_done = iter.pos;
 
-               ret = bch2_trans_relock(trans) ? 0 : -EINTR;
+               ret = bch2_trans_relock_notrace(trans);
                if (ret)
                        goto out;
        }
@@ -1088,9 +1724,19 @@ static int bch2_clear_need_discard(struct btree_trans *trans, struct bpos pos,
        SET_BCH_ALLOC_V4_NEED_DISCARD(&a->v, false);
        a->v.data_type = alloc_data_type(a->v, a->v.data_type);
 write:
-       ret = bch2_trans_update(trans, &iter, &a->k_i, 0);
+       ret =   bch2_trans_update(trans, &iter, &a->k_i, 0) ?:
+               bch2_trans_commit(trans, NULL, NULL,
+                                 BCH_WATERMARK_btree|
+                                 BCH_TRANS_COMMIT_no_enospc);
+       if (ret)
+               goto out;
+
+       count_event(c, bucket_discard);
+       (*discarded)++;
 out:
+       (*seen)++;
        bch2_trans_iter_exit(trans, &iter);
+       percpu_ref_put(&ca->io_ref);
        printbuf_exit(&buf);
        return ret;
 }
@@ -1098,149 +1744,79 @@ out:
 static void bch2_do_discards_work(struct work_struct *work)
 {
        struct bch_fs *c = container_of(work, struct bch_fs, discard_work);
-       struct bch_dev *ca = NULL;
-       struct btree_trans trans;
-       struct btree_iter iter;
-       struct bkey_s_c k;
        u64 seen = 0, open = 0, need_journal_commit = 0, discarded = 0;
+       struct bpos discard_pos_done = POS_MAX;
        int ret;
 
-       bch2_trans_init(&trans, c, 0, 0);
-
-       for_each_btree_key(&trans, iter, BTREE_ID_need_discard,
-                          POS_MIN, 0, k, ret) {
-               bool discard_done = false;
-
-               if (ca && k.k->p.inode != ca->dev_idx) {
-                       percpu_ref_put(&ca->io_ref);
-                       ca = NULL;
-               }
-
-               if (!ca) {
-                       ca = bch_dev_bkey_exists(c, k.k->p.inode);
-                       if (!percpu_ref_tryget(&ca->io_ref)) {
-                               ca = NULL;
-                               bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode + 1, 0));
-                               continue;
-                       }
-               }
-
-               seen++;
-
-               if (bch2_bucket_is_open_safe(c, k.k->p.inode, k.k->p.offset)) {
-                       open++;
-                       continue;
-               }
-
-               if (bch2_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
-                               c->journal.flushed_seq_ondisk,
-                               k.k->p.inode, k.k->p.offset)) {
-                       need_journal_commit++;
-                       continue;
-               }
-
-               ret = __bch2_trans_do(&trans, NULL, NULL,
-                                     BTREE_INSERT_USE_RESERVE|
-                                     BTREE_INSERT_NOFAIL,
-                               bch2_clear_need_discard(&trans, k.k->p, ca, &discard_done));
-               if (ret)
-                       break;
-
-               this_cpu_inc(c->counters[BCH_COUNTER_bucket_discard]);
-               discarded++;
-       }
-       bch2_trans_iter_exit(&trans, &iter);
-
-       if (ca)
-               percpu_ref_put(&ca->io_ref);
-
-       bch2_trans_exit(&trans);
+       /*
+        * We're doing the commit in bch2_discard_one_bucket instead of using
+        * for_each_btree_key_commit() so that we can increment counters after
+        * successful commit:
+        */
+       ret = bch2_trans_run(c,
+               for_each_btree_key(trans, iter,
+                                  BTREE_ID_need_discard, POS_MIN, 0, k,
+                       bch2_discard_one_bucket(trans, &iter, &discard_pos_done,
+                                               &seen,
+                                               &open,
+                                               &need_journal_commit,
+                                               &discarded)));
 
        if (need_journal_commit * 2 > seen)
                bch2_journal_flush_async(&c->journal, NULL);
 
-       percpu_ref_put(&c->writes);
+       bch2_write_ref_put(c, BCH_WRITE_REF_discard);
 
-       trace_discard_buckets(c, seen, open, need_journal_commit, discarded, ret);
+       trace_discard_buckets(c, seen, open, need_journal_commit, discarded,
+                             bch2_err_str(ret));
 }
 
 void bch2_do_discards(struct bch_fs *c)
 {
-       if (percpu_ref_tryget_live(&c->writes) &&
-           !queue_work(system_long_wq, &c->discard_work))
-               percpu_ref_put(&c->writes);
+       if (bch2_write_ref_tryget(c, BCH_WRITE_REF_discard) &&
+           !queue_work(c->write_ref_wq, &c->discard_work))
+               bch2_write_ref_put(c, BCH_WRITE_REF_discard);
 }
 
-static int invalidate_one_bucket(struct btree_trans *trans, struct bch_dev *ca,
-                                struct bpos *bucket_pos, unsigned *cached_sectors)
+static int invalidate_one_bucket(struct btree_trans *trans,
+                                struct btree_iter *lru_iter,
+                                struct bkey_s_c lru_k,
+                                s64 *nr_to_invalidate)
 {
        struct bch_fs *c = trans->c;
-       struct btree_iter lru_iter, alloc_iter = { NULL };
-       struct bkey_s_c k;
-       struct bkey_i_alloc_v4 *a;
-       u64 bucket, idx;
+       struct btree_iter alloc_iter = { NULL };
+       struct bkey_i_alloc_v4 *a = NULL;
        struct printbuf buf = PRINTBUF;
-       int ret;
-
-       bch2_trans_iter_init(trans, &lru_iter, BTREE_ID_lru,
-                            POS(ca->dev_idx, 0), 0);
-next_lru:
-       k = bch2_btree_iter_peek(&lru_iter);
-       ret = bkey_err(k);
-       if (ret)
-               goto out;
-
-       if (!k.k || k.k->p.inode != ca->dev_idx) {
-               ret = 1;
-               goto out;
-       }
+       struct bpos bucket = u64_to_bucket(lru_k.k->p.offset);
+       unsigned cached_sectors;
+       int ret = 0;
 
-       if (k.k->type != KEY_TYPE_lru) {
-               prt_printf(&buf, "non lru key in lru btree:\n  ");
-               bch2_bkey_val_to_text(&buf, c, k);
+       if (*nr_to_invalidate <= 0)
+               return 1;
 
-               if (!test_bit(BCH_FS_CHECK_LRUS_DONE, &c->flags)) {
-                       bch_err(c, "%s", buf.buf);
-                       bch2_btree_iter_advance(&lru_iter);
-                       goto next_lru;
-               } else {
-                       bch2_trans_inconsistent(trans, "%s", buf.buf);
-                       ret = -EINVAL;
-                       goto out;
-               }
+       if (!bch2_dev_bucket_exists(c, bucket)) {
+               prt_str(&buf, "lru entry points to invalid bucket");
+               goto err;
        }
 
-       idx     = k.k->p.offset;
-       bucket  = le64_to_cpu(bkey_s_c_to_lru(k).v->idx);
-
-       *bucket_pos = POS(ca->dev_idx, bucket);
+       if (bch2_bucket_is_open_safe(c, bucket.inode, bucket.offset))
+               return 0;
 
-       a = bch2_trans_start_alloc_update(trans, &alloc_iter, *bucket_pos);
+       a = bch2_trans_start_alloc_update(trans, &alloc_iter, bucket);
        ret = PTR_ERR_OR_ZERO(a);
        if (ret)
                goto out;
 
-       if (idx != alloc_lru_idx(a->v)) {
-               prt_printf(&buf, "alloc key does not point back to lru entry when invalidating bucket:\n  ");
-               bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&a->k_i));
-               prt_printf(&buf, "\n  ");
-               bch2_bkey_val_to_text(&buf, c, k);
+       /* We expect harmless races here due to the btree write buffer: */
+       if (lru_pos_time(lru_iter->pos) != alloc_lru_idx_read(a->v))
+               goto out;
 
-               if (!test_bit(BCH_FS_CHECK_LRUS_DONE, &c->flags)) {
-                       bch_err(c, "%s", buf.buf);
-                       bch2_btree_iter_advance(&lru_iter);
-                       goto next_lru;
-               } else {
-                       bch2_trans_inconsistent(trans, "%s", buf.buf);
-                       ret = -EINVAL;
-                       goto out;
-               }
-       }
+       BUG_ON(a->v.data_type != BCH_DATA_cached);
 
        if (!a->v.cached_sectors)
                bch_err(c, "invalidating empty bucket, confused");
 
-       *cached_sectors = a->v.cached_sectors;
+       cached_sectors = a->v.cached_sectors;
 
        SET_BCH_ALLOC_V4_NEED_INC_GEN(&a->v, false);
        a->v.gen++;
@@ -1250,116 +1826,181 @@ next_lru:
        a->v.io_time[READ]      = atomic64_read(&c->io_clock[READ].now);
        a->v.io_time[WRITE]     = atomic64_read(&c->io_clock[WRITE].now);
 
-       ret = bch2_trans_update(trans, &alloc_iter, &a->k_i,
-                               BTREE_TRIGGER_BUCKET_INVALIDATE);
+       ret =   bch2_trans_update(trans, &alloc_iter, &a->k_i,
+                               BTREE_TRIGGER_BUCKET_INVALIDATE) ?:
+               bch2_trans_commit(trans, NULL, NULL,
+                                 BCH_WATERMARK_btree|
+                                 BCH_TRANS_COMMIT_no_enospc);
        if (ret)
                goto out;
+
+       trace_and_count(c, bucket_invalidate, c, bucket.inode, bucket.offset, cached_sectors);
+       --*nr_to_invalidate;
 out:
        bch2_trans_iter_exit(trans, &alloc_iter);
-       bch2_trans_iter_exit(trans, &lru_iter);
        printbuf_exit(&buf);
        return ret;
+err:
+       prt_str(&buf, "\n  lru key: ");
+       bch2_bkey_val_to_text(&buf, c, lru_k);
+
+       prt_str(&buf, "\n  lru entry: ");
+       bch2_lru_pos_to_text(&buf, lru_iter->pos);
+
+       prt_str(&buf, "\n  alloc key: ");
+       if (!a)
+               bch2_bpos_to_text(&buf, bucket);
+       else
+               bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&a->k_i));
+
+       bch_err(c, "%s", buf.buf);
+       if (c->curr_recovery_pass > BCH_RECOVERY_PASS_check_lrus) {
+               bch2_inconsistent_error(c);
+               ret = -EINVAL;
+       }
+
+       goto out;
 }
 
 static void bch2_do_invalidates_work(struct work_struct *work)
 {
        struct bch_fs *c = container_of(work, struct bch_fs, invalidate_work);
-       struct bch_dev *ca;
-       struct btree_trans trans;
-       struct bpos bucket;
-       unsigned i, sectors;
+       struct btree_trans *trans = bch2_trans_get(c);
        int ret = 0;
 
-       bch2_trans_init(&trans, c, 0, 0);
+       ret = bch2_btree_write_buffer_tryflush(trans);
+       if (ret)
+               goto err;
 
-       for_each_member_device(ca, c, i) {
+       for_each_member_device(c, ca) {
                s64 nr_to_invalidate =
                        should_invalidate_buckets(ca, bch2_dev_usage_read(ca));
 
-               while (nr_to_invalidate-- >= 0) {
-                       ret = __bch2_trans_do(&trans, NULL, NULL,
-                                             BTREE_INSERT_USE_RESERVE|
-                                             BTREE_INSERT_NOFAIL,
-                                       invalidate_one_bucket(&trans, ca, &bucket,
-                                                             &sectors));
-                       if (ret)
-                               break;
+               ret = for_each_btree_key_upto(trans, iter, BTREE_ID_lru,
+                               lru_pos(ca->dev_idx, 0, 0),
+                               lru_pos(ca->dev_idx, U64_MAX, LRU_TIME_MAX),
+                               BTREE_ITER_INTENT, k,
+                       invalidate_one_bucket(trans, &iter, k, &nr_to_invalidate));
 
-                       trace_invalidate_bucket(c, bucket.inode, bucket.offset, sectors);
-                       this_cpu_inc(c->counters[BCH_COUNTER_bucket_invalidate]);
+               if (ret < 0) {
+                       percpu_ref_put(&ca->ref);
+                       break;
                }
        }
-
-       bch2_trans_exit(&trans);
-       percpu_ref_put(&c->writes);
+err:
+       bch2_trans_put(trans);
+       bch2_write_ref_put(c, BCH_WRITE_REF_invalidate);
 }
 
 void bch2_do_invalidates(struct bch_fs *c)
 {
-       if (percpu_ref_tryget_live(&c->writes) &&
-           !queue_work(system_long_wq, &c->invalidate_work))
-               percpu_ref_put(&c->writes);
-}
-
-static int bucket_freespace_init(struct btree_trans *trans, struct btree_iter *iter)
-{
-       struct bch_alloc_v4 a;
-       struct bkey_s_c k;
-       int ret;
-
-       k = bch2_btree_iter_peek_slot(iter);
-       ret = bkey_err(k);
-       if (ret)
-               return ret;
-
-       bch2_alloc_to_v4(k, &a);
-       return bch2_bucket_do_index(trans, k, &a, true);
+       if (bch2_write_ref_tryget(c, BCH_WRITE_REF_invalidate) &&
+           !queue_work(c->write_ref_wq, &c->invalidate_work))
+               bch2_write_ref_put(c, BCH_WRITE_REF_invalidate);
 }
 
-static int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca)
+int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca,
+                           u64 bucket_start, u64 bucket_end)
 {
-       struct btree_trans trans;
+       struct btree_trans *trans = bch2_trans_get(c);
        struct btree_iter iter;
        struct bkey_s_c k;
+       struct bkey hole;
+       struct bpos end = POS(ca->dev_idx, bucket_end);
        struct bch_member *m;
+       unsigned long last_updated = jiffies;
        int ret;
 
-       bch2_trans_init(&trans, c, 0, 0);
+       BUG_ON(bucket_start > bucket_end);
+       BUG_ON(bucket_end > ca->mi.nbuckets);
+
+       bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
+               POS(ca->dev_idx, max_t(u64, ca->mi.first_bucket, bucket_start)),
+               BTREE_ITER_PREFETCH);
+       /*
+        * Scan the alloc btree for every bucket on @ca, and add buckets to the
+        * freespace/need_discard/need_gc_gens btrees as needed:
+        */
+       while (1) {
+               if (last_updated + HZ * 10 < jiffies) {
+                       bch_info(ca, "%s: currently at %llu/%llu",
+                                __func__, iter.pos.offset, ca->mi.nbuckets);
+                       last_updated = jiffies;
+               }
+
+               bch2_trans_begin(trans);
 
-       for_each_btree_key(&trans, iter, BTREE_ID_alloc,
-                          POS(ca->dev_idx, ca->mi.first_bucket),
-                          BTREE_ITER_SLOTS|
-                          BTREE_ITER_PREFETCH, k, ret) {
-               if (iter.pos.offset >= ca->mi.nbuckets)
+               if (bkey_ge(iter.pos, end)) {
+                       ret = 0;
                        break;
+               }
+
+               k = bch2_get_key_or_hole(&iter, end, &hole);
+               ret = bkey_err(k);
+               if (ret)
+                       goto bkey_err;
+
+               if (k.k->type) {
+                       /*
+                        * We process live keys in the alloc btree one at a
+                        * time:
+                        */
+                       struct bch_alloc_v4 a_convert;
+                       const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &a_convert);
+
+                       ret =   bch2_bucket_do_index(trans, k, a, true) ?:
+                               bch2_trans_commit(trans, NULL, NULL,
+                                                 BCH_TRANS_COMMIT_no_enospc);
+                       if (ret)
+                               goto bkey_err;
+
+                       bch2_btree_iter_advance(&iter);
+               } else {
+                       struct bkey_i *freespace;
+
+                       freespace = bch2_trans_kmalloc(trans, sizeof(*freespace));
+                       ret = PTR_ERR_OR_ZERO(freespace);
+                       if (ret)
+                               goto bkey_err;
 
-               ret = __bch2_trans_do(&trans, NULL, NULL,
-                                     BTREE_INSERT_LAZY_RW,
-                                bucket_freespace_init(&trans, &iter));
+                       bkey_init(&freespace->k);
+                       freespace->k.type       = KEY_TYPE_set;
+                       freespace->k.p          = k.k->p;
+                       freespace->k.size       = k.k->size;
+
+                       ret = bch2_btree_insert_trans(trans, BTREE_ID_freespace, freespace, 0) ?:
+                               bch2_trans_commit(trans, NULL, NULL,
+                                                 BCH_TRANS_COMMIT_no_enospc);
+                       if (ret)
+                               goto bkey_err;
+
+                       bch2_btree_iter_set_pos(&iter, k.k->p);
+               }
+bkey_err:
+               if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+                       continue;
                if (ret)
                        break;
        }
-       bch2_trans_iter_exit(&trans, &iter);
 
-       bch2_trans_exit(&trans);
+       bch2_trans_iter_exit(trans, &iter);
+       bch2_trans_put(trans);
 
-       if (ret) {
-               bch_err(ca, "error initializing free space: %i", ret);
+       if (ret < 0) {
+               bch_err_msg(ca, ret, "initializing free space");
                return ret;
        }
 
        mutex_lock(&c->sb_lock);
-       m = bch2_sb_get_members(c->disk_sb.sb)->members + ca->dev_idx;
+       m = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx);
        SET_BCH_MEMBER_FREESPACE_INITIALIZED(m, true);
        mutex_unlock(&c->sb_lock);
 
-       return ret;
+       return 0;
 }
 
 int bch2_fs_freespace_init(struct bch_fs *c)
 {
-       struct bch_dev *ca;
-       unsigned i;
        int ret = 0;
        bool doing_init = false;
 
@@ -1368,7 +2009,7 @@ int bch2_fs_freespace_init(struct bch_fs *c)
         * every mount:
         */
 
-       for_each_member_device(ca, c, i) {
+       for_each_member_device(c, ca) {
                if (ca->mi.freespace_initialized)
                        continue;
 
@@ -1377,9 +2018,10 @@ int bch2_fs_freespace_init(struct bch_fs *c)
                        doing_init = true;
                }
 
-               ret = bch2_dev_freespace_init(c, ca);
+               ret = bch2_dev_freespace_init(c, ca, 0, ca->mi.nbuckets);
                if (ret) {
                        percpu_ref_put(&ca->ref);
+                       bch_err_fn(c, ret);
                        return ret;
                }
        }
@@ -1388,11 +2030,10 @@ int bch2_fs_freespace_init(struct bch_fs *c)
                mutex_lock(&c->sb_lock);
                bch2_write_super(c);
                mutex_unlock(&c->sb_lock);
-
                bch_verbose(c, "done initializing freespace");
        }
 
-       return ret;
+       return 0;
 }
 
 /* Bucket IO clocks: */
@@ -1428,15 +2069,13 @@ out:
 
 void bch2_recalc_capacity(struct bch_fs *c)
 {
-       struct bch_dev *ca;
        u64 capacity = 0, reserved_sectors = 0, gc_reserve;
        unsigned bucket_size_max = 0;
        unsigned long ra_pages = 0;
-       unsigned i;
 
        lockdep_assert_held(&c->state_lock);
 
-       for_each_online_member(ca, c, i) {
+       for_each_online_member(c, ca) {
                struct backing_dev_info *bdi = ca->disk_sb.bdev->bd_disk->bdi;
 
                ra_pages += bdi->ra_pages;
@@ -1444,7 +2083,7 @@ void bch2_recalc_capacity(struct bch_fs *c)
 
        bch2_set_ra_pages(c, ra_pages);
 
-       for_each_rw_member(ca, c, i) {
+       for_each_rw_member(c, ca) {
                u64 dev_reserve = 0;
 
                /*
@@ -1498,6 +2137,15 @@ void bch2_recalc_capacity(struct bch_fs *c)
        closure_wake_up(&c->freelist_wait);
 }
 
+u64 bch2_min_rw_member_capacity(struct bch_fs *c)
+{
+       u64 ret = U64_MAX;
+
+       for_each_rw_member(c, ca)
+               ret = min(ret, ca->mi.nbuckets * ca->mi.bucket_size);
+       return ret;
+}
+
 static bool bch2_dev_has_open_write_point(struct bch_fs *c, struct bch_dev *ca)
 {
        struct open_bucket *ob;
@@ -1531,40 +2179,7 @@ void bch2_dev_allocator_remove(struct bch_fs *c, struct bch_dev *ca)
         */
        bch2_recalc_capacity(c);
 
-       /* Next, close write points that point to this device... */
-       for (i = 0; i < ARRAY_SIZE(c->write_points); i++)
-               bch2_writepoint_stop(c, ca, &c->write_points[i]);
-
-       bch2_writepoint_stop(c, ca, &c->copygc_write_point);
-       bch2_writepoint_stop(c, ca, &c->rebalance_write_point);
-       bch2_writepoint_stop(c, ca, &c->btree_write_point);
-
-       mutex_lock(&c->btree_reserve_cache_lock);
-       while (c->btree_reserve_cache_nr) {
-               struct btree_alloc *a =
-                       &c->btree_reserve_cache[--c->btree_reserve_cache_nr];
-
-               bch2_open_buckets_put(c, &a->ob);
-       }
-       mutex_unlock(&c->btree_reserve_cache_lock);
-
-       while (1) {
-               struct open_bucket *ob;
-
-               spin_lock(&c->freelist_lock);
-               if (!ca->open_buckets_partial_nr) {
-                       spin_unlock(&c->freelist_lock);
-                       break;
-               }
-               ob = c->open_buckets +
-                       ca->open_buckets_partial[--ca->open_buckets_partial_nr];
-               ob->on_partial_list = false;
-               spin_unlock(&c->freelist_lock);
-
-               bch2_open_bucket_put(c, ob);
-       }
-
-       bch2_ec_stop_dev(c, ca);
+       bch2_open_buckets_stop(c, ca, false);
 
        /*
         * Wake up threads that were blocked on allocation, so they can notice