]> git.sesse.net Git - bcachefs-tools-debian/blobdiff - libbcachefs/alloc_foreground.c
Move c_src dirs back to toplevel
[bcachefs-tools-debian] / libbcachefs / alloc_foreground.c
index f2f9015dbb00ca144fe0eb2cf31d2f4e53f2b909..b0ff47998a9440912f940dc09e27b34e6341cb9e 100644 (file)
@@ -1,78 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
- * Primary bucket allocation code
- *
  * Copyright 2012 Google, Inc.
  *
- * Allocation in bcache is done in terms of buckets:
- *
- * Each bucket has associated an 8 bit gen; this gen corresponds to the gen in
- * btree pointers - they must match for the pointer to be considered valid.
- *
- * Thus (assuming a bucket has no dirty data or metadata in it) we can reuse a
- * bucket simply by incrementing its gen.
- *
- * The gens (along with the priorities; it's really the gens are important but
- * the code is named as if it's the priorities) are written in an arbitrary list
- * of buckets on disk, with a pointer to them in the journal header.
- *
- * When we invalidate a bucket, we have to write its new gen to disk and wait
- * for that write to complete before we use it - otherwise after a crash we
- * could have pointers that appeared to be good but pointed to data that had
- * been overwritten.
- *
- * Since the gens and priorities are all stored contiguously on disk, we can
- * batch this up: We fill up the free_inc list with freshly invalidated buckets,
- * call prio_write(), and when prio_write() finishes we pull buckets off the
- * free_inc list and optionally discard them.
- *
- * free_inc isn't the only freelist - if it was, we'd often have to sleep while
- * priorities and gens were being written before we could allocate. c->free is a
- * smaller freelist, and buckets on that list are always ready to be used.
- *
- * If we've got discards enabled, that happens when a bucket moves from the
- * free_inc list to the free list.
- *
- * It's important to ensure that gens don't wrap around - with respect to
- * either the oldest gen in the btree or the gen on disk. This is quite
- * difficult to do in practice, but we explicitly guard against it anyways - if
- * a bucket is in danger of wrapping around we simply skip invalidating it that
- * time around, and we garbage collect or rewrite the priorities sooner than we
- * would have otherwise.
+ * Foreground allocator code: allocate buckets from freelist, and allocate in
+ * sector granularity from writepoints.
  *
  * bch2_bucket_alloc() allocates a single bucket from a specific device.
  *
  * bch2_bucket_alloc_set() allocates one or more buckets from different devices
  * in a given filesystem.
- *
- * invalidate_buckets() drives all the processes described above. It's called
- * from bch2_bucket_alloc() and a few other places that need to make sure free
- * buckets are ready.
- *
- * invalidate_buckets_(lru|fifo)() find buckets that are available to be
- * invalidated, and then invalidate them and stick them on the free_inc list -
- * in either lru or fifo order.
  */
 
 #include "bcachefs.h"
 #include "alloc_background.h"
 #include "alloc_foreground.h"
+#include "backpointers.h"
+#include "btree_iter.h"
+#include "btree_update.h"
 #include "btree_gc.h"
 #include "buckets.h"
+#include "buckets_waiting_for_journal.h"
 #include "clock.h"
 #include "debug.h"
 #include "disk_groups.h"
 #include "ec.h"
-#include "io.h"
+#include "error.h"
+#include "io_write.h"
+#include "journal.h"
+#include "movinggc.h"
+#include "nocow_locking.h"
+#include "trace.h"
 
 #include <linux/math64.h>
 #include <linux/rculist.h>
 #include <linux/rcupdate.h>
-#include <trace/events/bcachefs.h>
 
-enum bucket_alloc_ret {
-       ALLOC_SUCCESS,
-       OPEN_BUCKETS_EMPTY,
-       FREELIST_EMPTY,         /* Allocator thread not keeping up */
+static void bch2_trans_mutex_lock_norelock(struct btree_trans *trans,
+                                          struct mutex *lock)
+{
+       if (!mutex_trylock(lock)) {
+               bch2_trans_unlock(trans);
+               mutex_lock(lock);
+       }
+}
+
+const char * const bch2_watermarks[] = {
+#define x(t) #t,
+       BCH_WATERMARKS()
+#undef x
+       NULL
 };
 
 /*
@@ -91,30 +67,63 @@ enum bucket_alloc_ret {
  * reference _after_ doing the index update that makes its allocation reachable.
  */
 
+void bch2_reset_alloc_cursors(struct bch_fs *c)
+{
+       rcu_read_lock();
+       for_each_member_device_rcu(c, ca, NULL)
+               ca->alloc_cursor = 0;
+       rcu_read_unlock();
+}
+
+static void bch2_open_bucket_hash_add(struct bch_fs *c, struct open_bucket *ob)
+{
+       open_bucket_idx_t idx = ob - c->open_buckets;
+       open_bucket_idx_t *slot = open_bucket_hashslot(c, ob->dev, ob->bucket);
+
+       ob->hash = *slot;
+       *slot = idx;
+}
+
+static void bch2_open_bucket_hash_remove(struct bch_fs *c, struct open_bucket *ob)
+{
+       open_bucket_idx_t idx = ob - c->open_buckets;
+       open_bucket_idx_t *slot = open_bucket_hashslot(c, ob->dev, ob->bucket);
+
+       while (*slot != idx) {
+               BUG_ON(!*slot);
+               slot = &c->open_buckets[*slot].hash;
+       }
+
+       *slot = ob->hash;
+       ob->hash = 0;
+}
+
 void __bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob)
 {
-       struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
+       struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
 
        if (ob->ec) {
-               bch2_ec_bucket_written(c, ob);
+               ec_stripe_new_put(c, ob->ec, STRIPE_REF_io);
                return;
        }
 
-       percpu_down_read_preempt_disable(&c->mark_lock);
+       percpu_down_read(&c->mark_lock);
        spin_lock(&ob->lock);
 
-       bch2_mark_alloc_bucket(c, ca, PTR_BUCKET_NR(ca, &ob->ptr),
-                              false, gc_pos_alloc(c, ob), 0);
        ob->valid = false;
-       ob->type = 0;
+       ob->data_type = 0;
 
        spin_unlock(&ob->lock);
-       percpu_up_read_preempt_enable(&c->mark_lock);
+       percpu_up_read(&c->mark_lock);
 
        spin_lock(&c->freelist_lock);
+       bch2_open_bucket_hash_remove(c, ob);
+
        ob->freelist = c->open_buckets_freelist;
        c->open_buckets_freelist = ob - c->open_buckets;
+
        c->open_buckets_nr_free++;
+       ca->nr_open_buckets--;
        spin_unlock(&c->freelist_lock);
 
        closure_wake_up(&c->open_buckets_wait);
@@ -128,8 +137,7 @@ void bch2_open_bucket_write_error(struct bch_fs *c,
        unsigned i;
 
        open_bucket_for_each(c, obs, ob, i)
-               if (ob->ptr.dev == dev &&
-                   ob->ec)
+               if (ob->dev == dev && ob->ec)
                        bch2_ec_bucket_cancel(c, ob);
 }
 
@@ -142,169 +150,472 @@ static struct open_bucket *bch2_open_bucket_alloc(struct bch_fs *c)
        ob = c->open_buckets + c->open_buckets_freelist;
        c->open_buckets_freelist = ob->freelist;
        atomic_set(&ob->pin, 1);
-       ob->type = 0;
+       ob->data_type = 0;
 
        c->open_buckets_nr_free--;
        return ob;
 }
 
-static void open_bucket_free_unused(struct bch_fs *c,
-                                   struct open_bucket *ob,
-                                   bool may_realloc)
+static void open_bucket_free_unused(struct bch_fs *c, struct open_bucket *ob)
 {
-       struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
-
-       BUG_ON(ca->open_buckets_partial_nr >=
-              ARRAY_SIZE(ca->open_buckets_partial));
-
-       if (ca->open_buckets_partial_nr <
-           ARRAY_SIZE(ca->open_buckets_partial) &&
-           may_realloc) {
-               spin_lock(&c->freelist_lock);
-               ob->on_partial_list = true;
-               ca->open_buckets_partial[ca->open_buckets_partial_nr++] =
-                       ob - c->open_buckets;
-               spin_unlock(&c->freelist_lock);
+       BUG_ON(c->open_buckets_partial_nr >=
+              ARRAY_SIZE(c->open_buckets_partial));
 
-               closure_wake_up(&c->open_buckets_wait);
-               closure_wake_up(&c->freelist_wait);
-       } else {
-               bch2_open_bucket_put(c, ob);
-       }
-}
-
-static void verify_not_stale(struct bch_fs *c, const struct open_buckets *obs)
-{
-#ifdef CONFIG_BCACHEFS_DEBUG
-       struct open_bucket *ob;
-       unsigned i;
-
-       open_bucket_for_each(c, obs, ob, i) {
-               struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
+       spin_lock(&c->freelist_lock);
+       ob->on_partial_list = true;
+       c->open_buckets_partial[c->open_buckets_partial_nr++] =
+               ob - c->open_buckets;
+       spin_unlock(&c->freelist_lock);
 
-               BUG_ON(ptr_stale(ca, &ob->ptr));
-       }
-#endif
+       closure_wake_up(&c->open_buckets_wait);
+       closure_wake_up(&c->freelist_wait);
 }
 
 /* _only_ for allocating the journal on a new device: */
 long bch2_bucket_alloc_new_fs(struct bch_dev *ca)
 {
-       struct bucket_array *buckets;
-       ssize_t b;
+       while (ca->new_fs_bucket_idx < ca->mi.nbuckets) {
+               u64 b = ca->new_fs_bucket_idx++;
 
-       rcu_read_lock();
-       buckets = bucket_array(ca);
+               if (!is_superblock_bucket(ca, b) &&
+                   (!ca->buckets_nouse || !test_bit(b, ca->buckets_nouse)))
+                       return b;
+       }
 
-       for (b = ca->mi.first_bucket; b < ca->mi.nbuckets; b++)
-               if (is_available_bucket(buckets->b[b].mark))
-                       goto success;
-       b = -1;
-success:
-       rcu_read_unlock();
-       return b;
+       return -1;
 }
 
-static inline unsigned open_buckets_reserved(enum alloc_reserve reserve)
+static inline unsigned open_buckets_reserved(enum bch_watermark watermark)
 {
-       switch (reserve) {
-       case RESERVE_ALLOC:
+       switch (watermark) {
+       case BCH_WATERMARK_reclaim:
                return 0;
-       case RESERVE_BTREE:
-               return BTREE_NODE_OPEN_BUCKET_RESERVE;
+       case BCH_WATERMARK_btree:
+       case BCH_WATERMARK_btree_copygc:
+               return OPEN_BUCKETS_COUNT / 4;
+       case BCH_WATERMARK_copygc:
+               return OPEN_BUCKETS_COUNT / 3;
        default:
-               return BTREE_NODE_OPEN_BUCKET_RESERVE * 2;
+               return OPEN_BUCKETS_COUNT / 2;
        }
 }
 
-/**
- * bch_bucket_alloc - allocate a single bucket from a specific device
- *
- * Returns index of bucket on success, 0 on failure
- * */
-struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca,
-                                     enum alloc_reserve reserve,
-                                     bool may_alloc_partial,
-                                     struct closure *cl)
+static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
+                                             u64 bucket,
+                                             enum bch_watermark watermark,
+                                             const struct bch_alloc_v4 *a,
+                                             struct bucket_alloc_state *s,
+                                             struct closure *cl)
 {
-       struct bucket_array *buckets;
        struct open_bucket *ob;
-       long bucket = 0;
 
-       spin_lock(&c->freelist_lock);
-
-       if (may_alloc_partial &&
-           ca->open_buckets_partial_nr) {
-               ob = c->open_buckets +
-                       ca->open_buckets_partial[--ca->open_buckets_partial_nr];
-               ob->on_partial_list = false;
-               spin_unlock(&c->freelist_lock);
-               return ob;
+       if (unlikely(ca->buckets_nouse && test_bit(bucket, ca->buckets_nouse))) {
+               s->skipped_nouse++;
+               return NULL;
        }
 
-       if (unlikely(c->open_buckets_nr_free <= open_buckets_reserved(reserve))) {
-               if (cl)
-                       closure_wait(&c->open_buckets_wait, cl);
-               spin_unlock(&c->freelist_lock);
-               trace_open_bucket_alloc_fail(ca, reserve);
-               return ERR_PTR(-OPEN_BUCKETS_EMPTY);
+       if (bch2_bucket_is_open(c, ca->dev_idx, bucket)) {
+               s->skipped_open++;
+               return NULL;
        }
 
-       if (likely(fifo_pop(&ca->free[RESERVE_NONE], bucket)))
-               goto out;
+       if (bch2_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
+                       c->journal.flushed_seq_ondisk, ca->dev_idx, bucket)) {
+               s->skipped_need_journal_commit++;
+               return NULL;
+       }
 
-       switch (reserve) {
-       case RESERVE_ALLOC:
-               if (fifo_pop(&ca->free[RESERVE_BTREE], bucket))
-                       goto out;
-               break;
-       case RESERVE_BTREE:
-               if (fifo_used(&ca->free[RESERVE_BTREE]) * 2 >=
-                   ca->free[RESERVE_BTREE].size &&
-                   fifo_pop(&ca->free[RESERVE_BTREE], bucket))
-                       goto out;
-               break;
-       case RESERVE_MOVINGGC:
-               if (fifo_pop(&ca->free[RESERVE_MOVINGGC], bucket))
-                       goto out;
-               break;
-       default:
-               break;
+       if (bch2_bucket_nocow_is_locked(&c->nocow_locks, POS(ca->dev_idx, bucket))) {
+               s->skipped_nocow++;
+               return NULL;
        }
 
-       if (cl)
-               closure_wait(&c->freelist_wait, cl);
+       spin_lock(&c->freelist_lock);
 
-       spin_unlock(&c->freelist_lock);
+       if (unlikely(c->open_buckets_nr_free <= open_buckets_reserved(watermark))) {
+               if (cl)
+                       closure_wait(&c->open_buckets_wait, cl);
 
-       trace_bucket_alloc_fail(ca, reserve);
-       return ERR_PTR(-FREELIST_EMPTY);
-out:
-       verify_not_on_freelist(c, ca, bucket);
+               track_event_change(&c->times[BCH_TIME_blocked_allocate_open_bucket],
+                                  &c->blocked_allocate_open_bucket, true);
+               spin_unlock(&c->freelist_lock);
+               return ERR_PTR(-BCH_ERR_open_buckets_empty);
+       }
+
+       /* Recheck under lock: */
+       if (bch2_bucket_is_open(c, ca->dev_idx, bucket)) {
+               spin_unlock(&c->freelist_lock);
+               s->skipped_open++;
+               return NULL;
+       }
 
        ob = bch2_open_bucket_alloc(c);
 
        spin_lock(&ob->lock);
-       buckets = bucket_array(ca);
 
        ob->valid       = true;
        ob->sectors_free = ca->mi.bucket_size;
-       ob->ptr         = (struct bch_extent_ptr) {
-               .type   = 1 << BCH_EXTENT_ENTRY_ptr,
-               .gen    = buckets->b[bucket].mark.gen,
-               .offset = bucket_to_sector(ca, bucket),
-               .dev    = ca->dev_idx,
-       };
-
-       bucket_io_clock_reset(c, ca, bucket, READ);
-       bucket_io_clock_reset(c, ca, bucket, WRITE);
+       ob->dev         = ca->dev_idx;
+       ob->gen         = a->gen;
+       ob->bucket      = bucket;
        spin_unlock(&ob->lock);
 
+       ca->nr_open_buckets++;
+       bch2_open_bucket_hash_add(c, ob);
+
+       track_event_change(&c->times[BCH_TIME_blocked_allocate_open_bucket],
+                          &c->blocked_allocate_open_bucket, false);
+
+       track_event_change(&c->times[BCH_TIME_blocked_allocate],
+                          &c->blocked_allocate, false);
+
        spin_unlock(&c->freelist_lock);
+       return ob;
+}
+
+static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bch_dev *ca,
+                                           enum bch_watermark watermark, u64 free_entry,
+                                           struct bucket_alloc_state *s,
+                                           struct bkey_s_c freespace_k,
+                                           struct closure *cl)
+{
+       struct bch_fs *c = trans->c;
+       struct btree_iter iter = { NULL };
+       struct bkey_s_c k;
+       struct open_bucket *ob;
+       struct bch_alloc_v4 a_convert;
+       const struct bch_alloc_v4 *a;
+       u64 b = free_entry & ~(~0ULL << 56);
+       unsigned genbits = free_entry >> 56;
+       struct printbuf buf = PRINTBUF;
+       int ret;
+
+       if (b < ca->mi.first_bucket || b >= ca->mi.nbuckets) {
+               prt_printf(&buf, "freespace btree has bucket outside allowed range %u-%llu\n"
+                      "  freespace key ",
+                       ca->mi.first_bucket, ca->mi.nbuckets);
+               bch2_bkey_val_to_text(&buf, c, freespace_k);
+               bch2_trans_inconsistent(trans, "%s", buf.buf);
+               ob = ERR_PTR(-EIO);
+               goto err;
+       }
+
+       k = bch2_bkey_get_iter(trans, &iter,
+                              BTREE_ID_alloc, POS(ca->dev_idx, b),
+                              BTREE_ITER_CACHED);
+       ret = bkey_err(k);
+       if (ret) {
+               ob = ERR_PTR(ret);
+               goto err;
+       }
+
+       a = bch2_alloc_to_v4(k, &a_convert);
+
+       if (a->data_type != BCH_DATA_free) {
+               if (c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_alloc_info) {
+                       ob = NULL;
+                       goto err;
+               }
+
+               prt_printf(&buf, "non free bucket in freespace btree\n"
+                      "  freespace key ");
+               bch2_bkey_val_to_text(&buf, c, freespace_k);
+               prt_printf(&buf, "\n  ");
+               bch2_bkey_val_to_text(&buf, c, k);
+               bch2_trans_inconsistent(trans, "%s", buf.buf);
+               ob = ERR_PTR(-EIO);
+               goto err;
+       }
+
+       if (genbits != (alloc_freespace_genbits(*a) >> 56) &&
+           c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info) {
+               prt_printf(&buf, "bucket in freespace btree with wrong genbits (got %u should be %llu)\n"
+                      "  freespace key ",
+                      genbits, alloc_freespace_genbits(*a) >> 56);
+               bch2_bkey_val_to_text(&buf, c, freespace_k);
+               prt_printf(&buf, "\n  ");
+               bch2_bkey_val_to_text(&buf, c, k);
+               bch2_trans_inconsistent(trans, "%s", buf.buf);
+               ob = ERR_PTR(-EIO);
+               goto err;
+       }
+
+       if (c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_extents_to_backpointers) {
+               struct bch_backpointer bp;
+               struct bpos bp_pos = POS_MIN;
+
+               ret = bch2_get_next_backpointer(trans, POS(ca->dev_idx, b), -1,
+                                               &bp_pos, &bp,
+                                               BTREE_ITER_NOPRESERVE);
+               if (ret) {
+                       ob = ERR_PTR(ret);
+                       goto err;
+               }
+
+               if (!bkey_eq(bp_pos, POS_MAX)) {
+                       /*
+                        * Bucket may have data in it - we don't call
+                        * bc2h_trans_inconnsistent() because fsck hasn't
+                        * finished yet
+                        */
+                       ob = NULL;
+                       goto err;
+               }
+       }
+
+       ob = __try_alloc_bucket(c, ca, b, watermark, a, s, cl);
+       if (!ob)
+               set_btree_iter_dontneed(&iter);
+err:
+       if (iter.path)
+               set_btree_iter_dontneed(&iter);
+       bch2_trans_iter_exit(trans, &iter);
+       printbuf_exit(&buf);
+       return ob;
+}
+
+/*
+ * This path is for before the freespace btree is initialized:
+ *
+ * If ca->new_fs_bucket_idx is nonzero, we haven't yet marked superblock &
+ * journal buckets - journal buckets will be < ca->new_fs_bucket_idx
+ */
+static noinline struct open_bucket *
+bch2_bucket_alloc_early(struct btree_trans *trans,
+                       struct bch_dev *ca,
+                       enum bch_watermark watermark,
+                       struct bucket_alloc_state *s,
+                       struct closure *cl)
+{
+       struct btree_iter iter, citer;
+       struct bkey_s_c k, ck;
+       struct open_bucket *ob = NULL;
+       u64 first_bucket = max_t(u64, ca->mi.first_bucket, ca->new_fs_bucket_idx);
+       u64 alloc_start = max(first_bucket, READ_ONCE(ca->alloc_cursor));
+       u64 alloc_cursor = alloc_start;
+       int ret;
+
+       /*
+        * Scan with an uncached iterator to avoid polluting the key cache. An
+        * uncached iter will return a cached key if one exists, but if not
+        * there is no other underlying protection for the associated key cache
+        * slot. To avoid racing bucket allocations, look up the cached key slot
+        * of any likely allocation candidate before attempting to proceed with
+        * the allocation. This provides proper exclusion on the associated
+        * bucket.
+        */
+again:
+       for_each_btree_key_norestart(trans, iter, BTREE_ID_alloc, POS(ca->dev_idx, alloc_cursor),
+                          BTREE_ITER_SLOTS, k, ret) {
+               struct bch_alloc_v4 a_convert;
+               const struct bch_alloc_v4 *a;
+
+               if (bkey_ge(k.k->p, POS(ca->dev_idx, ca->mi.nbuckets)))
+                       break;
+
+               if (ca->new_fs_bucket_idx &&
+                   is_superblock_bucket(ca, k.k->p.offset))
+                       continue;
+
+               a = bch2_alloc_to_v4(k, &a_convert);
+               if (a->data_type != BCH_DATA_free)
+                       continue;
+
+               /* now check the cached key to serialize concurrent allocs of the bucket */
+               ck = bch2_bkey_get_iter(trans, &citer, BTREE_ID_alloc, k.k->p, BTREE_ITER_CACHED);
+               ret = bkey_err(ck);
+               if (ret)
+                       break;
+
+               a = bch2_alloc_to_v4(ck, &a_convert);
+               if (a->data_type != BCH_DATA_free)
+                       goto next;
+
+               s->buckets_seen++;
+
+               ob = __try_alloc_bucket(trans->c, ca, k.k->p.offset, watermark, a, s, cl);
+next:
+               set_btree_iter_dontneed(&citer);
+               bch2_trans_iter_exit(trans, &citer);
+               if (ob)
+                       break;
+       }
+       bch2_trans_iter_exit(trans, &iter);
+
+       alloc_cursor = iter.pos.offset;
+       ca->alloc_cursor = alloc_cursor;
+
+       if (!ob && ret)
+               ob = ERR_PTR(ret);
+
+       if (!ob && alloc_start > first_bucket) {
+               alloc_cursor = alloc_start = first_bucket;
+               goto again;
+       }
+
+       return ob;
+}
+
+static struct open_bucket *bch2_bucket_alloc_freelist(struct btree_trans *trans,
+                                                  struct bch_dev *ca,
+                                                  enum bch_watermark watermark,
+                                                  struct bucket_alloc_state *s,
+                                                  struct closure *cl)
+{
+       struct btree_iter iter;
+       struct bkey_s_c k;
+       struct open_bucket *ob = NULL;
+       u64 alloc_start = max_t(u64, ca->mi.first_bucket, READ_ONCE(ca->alloc_cursor));
+       u64 alloc_cursor = alloc_start;
+       int ret;
+
+       BUG_ON(ca->new_fs_bucket_idx);
+again:
+       for_each_btree_key_norestart(trans, iter, BTREE_ID_freespace,
+                                    POS(ca->dev_idx, alloc_cursor), 0, k, ret) {
+               if (k.k->p.inode != ca->dev_idx)
+                       break;
+
+               for (alloc_cursor = max(alloc_cursor, bkey_start_offset(k.k));
+                    alloc_cursor < k.k->p.offset;
+                    alloc_cursor++) {
+                       ret = btree_trans_too_many_iters(trans);
+                       if (ret) {
+                               ob = ERR_PTR(ret);
+                               break;
+                       }
+
+                       s->buckets_seen++;
+
+                       ob = try_alloc_bucket(trans, ca, watermark,
+                                             alloc_cursor, s, k, cl);
+                       if (ob) {
+                               set_btree_iter_dontneed(&iter);
+                               break;
+                       }
+               }
 
-       bch2_wake_allocator(ca);
+               if (ob || ret)
+                       break;
+       }
+       bch2_trans_iter_exit(trans, &iter);
+
+       ca->alloc_cursor = alloc_cursor;
+
+       if (!ob && ret)
+               ob = ERR_PTR(ret);
+
+       if (!ob && alloc_start > ca->mi.first_bucket) {
+               alloc_cursor = alloc_start = ca->mi.first_bucket;
+               goto again;
+       }
+
+       return ob;
+}
+
+/**
+ * bch2_bucket_alloc_trans - allocate a single bucket from a specific device
+ * @trans:     transaction object
+ * @ca:                device to allocate from
+ * @watermark: how important is this allocation?
+ * @cl:                if not NULL, closure to be used to wait if buckets not available
+ * @usage:     for secondarily also returning the current device usage
+ *
+ * Returns:    an open_bucket on success, or an ERR_PTR() on failure.
+ */
+static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans,
+                                     struct bch_dev *ca,
+                                     enum bch_watermark watermark,
+                                     struct closure *cl,
+                                     struct bch_dev_usage *usage)
+{
+       struct bch_fs *c = trans->c;
+       struct open_bucket *ob = NULL;
+       bool freespace = READ_ONCE(ca->mi.freespace_initialized);
+       u64 avail;
+       struct bucket_alloc_state s = { 0 };
+       bool waiting = false;
+again:
+       bch2_dev_usage_read_fast(ca, usage);
+       avail = dev_buckets_free(ca, *usage, watermark);
+
+       if (usage->d[BCH_DATA_need_discard].buckets > avail)
+               bch2_do_discards(c);
+
+       if (usage->d[BCH_DATA_need_gc_gens].buckets > avail)
+               bch2_do_gc_gens(c);
+
+       if (should_invalidate_buckets(ca, *usage))
+               bch2_do_invalidates(c);
+
+       if (!avail) {
+               if (cl && !waiting) {
+                       closure_wait(&c->freelist_wait, cl);
+                       waiting = true;
+                       goto again;
+               }
+
+               track_event_change(&c->times[BCH_TIME_blocked_allocate],
+                                  &c->blocked_allocate, true);
+
+               ob = ERR_PTR(-BCH_ERR_freelist_empty);
+               goto err;
+       }
+
+       if (waiting)
+               closure_wake_up(&c->freelist_wait);
+alloc:
+       ob = likely(freespace)
+               ? bch2_bucket_alloc_freelist(trans, ca, watermark, &s, cl)
+               : bch2_bucket_alloc_early(trans, ca, watermark, &s, cl);
+
+       if (s.skipped_need_journal_commit * 2 > avail)
+               bch2_journal_flush_async(&c->journal, NULL);
+
+       if (!ob && freespace && c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_alloc_info) {
+               freespace = false;
+               goto alloc;
+       }
+err:
+       if (!ob)
+               ob = ERR_PTR(-BCH_ERR_no_buckets_found);
+
+       if (!IS_ERR(ob))
+               trace_and_count(c, bucket_alloc, ca,
+                               bch2_watermarks[watermark],
+                               ob->bucket,
+                               usage->d[BCH_DATA_free].buckets,
+                               avail,
+                               bch2_copygc_wait_amount(c),
+                               c->copygc_wait - atomic64_read(&c->io_clock[WRITE].now),
+                               &s,
+                               cl == NULL,
+                               "");
+       else if (!bch2_err_matches(PTR_ERR(ob), BCH_ERR_transaction_restart))
+               trace_and_count(c, bucket_alloc_fail, ca,
+                               bch2_watermarks[watermark],
+                               0,
+                               usage->d[BCH_DATA_free].buckets,
+                               avail,
+                               bch2_copygc_wait_amount(c),
+                               c->copygc_wait - atomic64_read(&c->io_clock[WRITE].now),
+                               &s,
+                               cl == NULL,
+                               bch2_err_str(PTR_ERR(ob)));
 
-       trace_bucket_alloc(ca, reserve);
+       return ob;
+}
+
+struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca,
+                                     enum bch_watermark watermark,
+                                     struct closure *cl)
+{
+       struct bch_dev_usage usage;
+       struct open_bucket *ob;
+
+       bch2_trans_do(c, NULL, NULL, 0,
+                     PTR_ERR_OR_ZERO(ob = bch2_bucket_alloc_trans(trans, ca, watermark,
+                                                       cl, &usage)));
        return ob;
 }
 
@@ -322,21 +633,21 @@ struct dev_alloc_list bch2_dev_alloc_list(struct bch_fs *c,
                                          struct bch_devs_mask *devs)
 {
        struct dev_alloc_list ret = { .nr = 0 };
-       struct bch_dev *ca;
        unsigned i;
 
-       for_each_member_device_rcu(ca, c, i, devs)
+       for_each_set_bit(i, devs->d, BCH_SB_MEMBERS_MAX)
                ret.devs[ret.nr++] = i;
 
        bubble_sort(ret.devs, ret.nr, dev_stripe_cmp);
        return ret;
 }
 
-void bch2_dev_stripe_increment(struct bch_fs *c, struct bch_dev *ca,
-                              struct dev_stripe_state *stripe)
+static inline void bch2_dev_stripe_increment_inlined(struct bch_dev *ca,
+                              struct dev_stripe_state *stripe,
+                              struct bch_dev_usage *usage)
 {
        u64 *v = stripe->next_alloc + ca->dev_idx;
-       u64 free_space = dev_buckets_free(c, ca);
+       u64 free_space = dev_buckets_available(ca, BCH_WATERMARK_normal);
        u64 free_space_inv = free_space
                ? div64_u64(1ULL << 48, free_space)
                : 1ULL << 48;
@@ -352,353 +663,492 @@ void bch2_dev_stripe_increment(struct bch_fs *c, struct bch_dev *ca,
                *v = *v < scale ? 0 : *v - scale;
 }
 
-#define BUCKET_MAY_ALLOC_PARTIAL       (1 << 0)
-#define BUCKET_ALLOC_USE_DURABILITY    (1 << 1)
+void bch2_dev_stripe_increment(struct bch_dev *ca,
+                              struct dev_stripe_state *stripe)
+{
+       struct bch_dev_usage usage;
+
+       bch2_dev_usage_read_fast(ca, &usage);
+       bch2_dev_stripe_increment_inlined(ca, stripe, &usage);
+}
+
+static int add_new_bucket(struct bch_fs *c,
+                          struct open_buckets *ptrs,
+                          struct bch_devs_mask *devs_may_alloc,
+                          unsigned nr_replicas,
+                          unsigned *nr_effective,
+                          bool *have_cache,
+                          unsigned flags,
+                          struct open_bucket *ob)
+{
+       unsigned durability =
+               bch_dev_bkey_exists(c, ob->dev)->mi.durability;
+
+       BUG_ON(*nr_effective >= nr_replicas);
+
+       __clear_bit(ob->dev, devs_may_alloc->d);
+       *nr_effective   += durability;
+       *have_cache     |= !durability;
+
+       ob_push(c, ptrs, ob);
+
+       if (*nr_effective >= nr_replicas)
+               return 1;
+       if (ob->ec)
+               return 1;
+       return 0;
+}
 
-static int bch2_bucket_alloc_set(struct bch_fs *c,
-                                struct open_buckets *ptrs,
-                                struct dev_stripe_state *stripe,
-                                struct bch_devs_mask *devs_may_alloc,
-                                unsigned nr_replicas,
-                                unsigned *nr_effective,
-                                bool *have_cache,
-                                enum alloc_reserve reserve,
-                                unsigned flags,
-                                struct closure *cl)
+int bch2_bucket_alloc_set_trans(struct btree_trans *trans,
+                     struct open_buckets *ptrs,
+                     struct dev_stripe_state *stripe,
+                     struct bch_devs_mask *devs_may_alloc,
+                     unsigned nr_replicas,
+                     unsigned *nr_effective,
+                     bool *have_cache,
+                     unsigned flags,
+                     enum bch_data_type data_type,
+                     enum bch_watermark watermark,
+                     struct closure *cl)
 {
+       struct bch_fs *c = trans->c;
        struct dev_alloc_list devs_sorted =
                bch2_dev_alloc_list(c, stripe, devs_may_alloc);
+       unsigned dev;
        struct bch_dev *ca;
-       bool alloc_failure = false;
-       unsigned i, durability;
+       int ret = -BCH_ERR_insufficient_devices;
+       unsigned i;
 
        BUG_ON(*nr_effective >= nr_replicas);
 
        for (i = 0; i < devs_sorted.nr; i++) {
+               struct bch_dev_usage usage;
                struct open_bucket *ob;
 
-               ca = rcu_dereference(c->devs[devs_sorted.devs[i]]);
+               dev = devs_sorted.devs[i];
+
+               rcu_read_lock();
+               ca = rcu_dereference(c->devs[dev]);
+               if (ca)
+                       percpu_ref_get(&ca->ref);
+               rcu_read_unlock();
+
                if (!ca)
                        continue;
 
-               if (!ca->mi.durability && *have_cache)
+               if (!ca->mi.durability && *have_cache) {
+                       percpu_ref_put(&ca->ref);
                        continue;
+               }
 
-               ob = bch2_bucket_alloc(c, ca, reserve,
-                               flags & BUCKET_MAY_ALLOC_PARTIAL, cl);
-               if (IS_ERR(ob)) {
-                       enum bucket_alloc_ret ret = -PTR_ERR(ob);
-
-                       WARN_ON(reserve == RESERVE_MOVINGGC &&
-                               ret != OPEN_BUCKETS_EMPTY);
+               ob = bch2_bucket_alloc_trans(trans, ca, watermark, cl, &usage);
+               if (!IS_ERR(ob))
+                       bch2_dev_stripe_increment_inlined(ca, stripe, &usage);
+               percpu_ref_put(&ca->ref);
 
-                       if (cl)
-                               return -EAGAIN;
-                       if (ret == OPEN_BUCKETS_EMPTY)
-                               return -ENOSPC;
-                       alloc_failure = true;
+               if (IS_ERR(ob)) {
+                       ret = PTR_ERR(ob);
+                       if (bch2_err_matches(ret, BCH_ERR_transaction_restart) || cl)
+                               break;
                        continue;
                }
 
-               durability = (flags & BUCKET_ALLOC_USE_DURABILITY)
-                       ? ca->mi.durability : 1;
-
-               __clear_bit(ca->dev_idx, devs_may_alloc->d);
-               *nr_effective   += durability;
-               *have_cache     |= !durability;
-
-               ob_push(c, ptrs, ob);
+               ob->data_type = data_type;
 
-               bch2_dev_stripe_increment(c, ca, stripe);
-
-               if (*nr_effective >= nr_replicas)
-                       return 0;
+               if (add_new_bucket(c, ptrs, devs_may_alloc,
+                                  nr_replicas, nr_effective,
+                                  have_cache, flags, ob)) {
+                       ret = 0;
+                       break;
+               }
        }
 
-       return alloc_failure ? -ENOSPC : -EROFS;
+       return ret;
 }
 
 /* Allocate from stripes: */
 
-/*
- * XXX: use a higher watermark for allocating open buckets here:
- */
-static int ec_stripe_alloc(struct bch_fs *c, struct ec_stripe_head *h)
-{
-       struct bch_devs_mask devs;
-       struct open_bucket *ob;
-       unsigned i, nr_have = 0, nr_data =
-               min_t(unsigned, h->nr_active_devs,
-                     EC_STRIPE_MAX) - h->redundancy;
-       bool have_cache = true;
-       int ret = 0;
-
-       BUG_ON(h->blocks.nr > nr_data);
-       BUG_ON(h->parity.nr > h->redundancy);
-
-       devs = h->devs;
-
-       open_bucket_for_each(c, &h->parity, ob, i)
-               __clear_bit(ob->ptr.dev, devs.d);
-       open_bucket_for_each(c, &h->blocks, ob, i)
-               __clear_bit(ob->ptr.dev, devs.d);
-
-       percpu_down_read_preempt_disable(&c->mark_lock);
-       rcu_read_lock();
-
-       if (h->parity.nr < h->redundancy) {
-               nr_have = h->parity.nr;
-
-               ret = bch2_bucket_alloc_set(c, &h->parity,
-                                           &h->parity_stripe,
-                                           &devs,
-                                           h->redundancy,
-                                           &nr_have,
-                                           &have_cache,
-                                           RESERVE_NONE,
-                                           0,
-                                           NULL);
-               if (ret)
-                       goto err;
-       }
-
-       if (h->blocks.nr < nr_data) {
-               nr_have = h->blocks.nr;
-
-               ret = bch2_bucket_alloc_set(c, &h->blocks,
-                                           &h->block_stripe,
-                                           &devs,
-                                           nr_data,
-                                           &nr_have,
-                                           &have_cache,
-                                           RESERVE_NONE,
-                                           0,
-                                           NULL);
-               if (ret)
-                       goto err;
-       }
-
-       rcu_read_unlock();
-       percpu_up_read_preempt_enable(&c->mark_lock);
-
-       return bch2_ec_stripe_new_alloc(c, h);
-err:
-       rcu_read_unlock();
-       percpu_up_read_preempt_enable(&c->mark_lock);
-       return -1;
-}
-
 /*
  * if we can't allocate a new stripe because there are already too many
  * partially filled stripes, force allocating from an existing stripe even when
  * it's to a device we don't want:
  */
 
-static void bucket_alloc_from_stripe(struct bch_fs *c,
-                                    struct open_buckets *ptrs,
-                                    struct write_point *wp,
-                                    struct bch_devs_mask *devs_may_alloc,
-                                    u16 target,
-                                    unsigned erasure_code,
-                                    unsigned nr_replicas,
-                                    unsigned *nr_effective,
-                                    bool *have_cache)
+static int bucket_alloc_from_stripe(struct btree_trans *trans,
+                        struct open_buckets *ptrs,
+                        struct write_point *wp,
+                        struct bch_devs_mask *devs_may_alloc,
+                        u16 target,
+                        unsigned nr_replicas,
+                        unsigned *nr_effective,
+                        bool *have_cache,
+                        enum bch_watermark watermark,
+                        unsigned flags,
+                        struct closure *cl)
 {
+       struct bch_fs *c = trans->c;
        struct dev_alloc_list devs_sorted;
        struct ec_stripe_head *h;
        struct open_bucket *ob;
-       struct bch_dev *ca;
        unsigned i, ec_idx;
-
-       if (!erasure_code)
-               return;
+       int ret = 0;
 
        if (nr_replicas < 2)
-               return;
+               return 0;
 
        if (ec_open_bucket(c, ptrs))
-               return;
+               return 0;
 
-       h = bch2_ec_stripe_head_get(c, target, erasure_code, nr_replicas - 1);
+       h = bch2_ec_stripe_head_get(trans, target, 0, nr_replicas - 1, watermark, cl);
+       if (IS_ERR(h))
+               return PTR_ERR(h);
        if (!h)
-               return;
-
-       if (!h->s && ec_stripe_alloc(c, h))
-               goto out_put_head;
+               return 0;
 
-       rcu_read_lock();
        devs_sorted = bch2_dev_alloc_list(c, &wp->stripe, devs_may_alloc);
-       rcu_read_unlock();
 
        for (i = 0; i < devs_sorted.nr; i++)
-               open_bucket_for_each(c, &h->s->blocks, ob, ec_idx)
-                       if (ob->ptr.dev == devs_sorted.devs[i] &&
+               for (ec_idx = 0; ec_idx < h->s->nr_data; ec_idx++) {
+                       if (!h->s->blocks[ec_idx])
+                               continue;
+
+                       ob = c->open_buckets + h->s->blocks[ec_idx];
+                       if (ob->dev == devs_sorted.devs[i] &&
                            !test_and_set_bit(ec_idx, h->s->blocks_allocated))
                                goto got_bucket;
+               }
        goto out_put_head;
 got_bucket:
-       ca = bch_dev_bkey_exists(c, ob->ptr.dev);
-
        ob->ec_idx      = ec_idx;
        ob->ec          = h->s;
+       ec_stripe_new_get(h->s, STRIPE_REF_io);
 
-       __clear_bit(ob->ptr.dev, devs_may_alloc->d);
-       *nr_effective   += ca->mi.durability;
-       *have_cache     |= !ca->mi.durability;
-
-       ob_push(c, ptrs, ob);
-       atomic_inc(&h->s->pin);
+       ret = add_new_bucket(c, ptrs, devs_may_alloc,
+                            nr_replicas, nr_effective,
+                            have_cache, flags, ob);
 out_put_head:
-       bch2_ec_stripe_head_put(h);
+       bch2_ec_stripe_head_put(c, h);
+       return ret;
 }
 
 /* Sector allocator */
 
-static void get_buckets_from_writepoint(struct bch_fs *c,
-                                       struct open_buckets *ptrs,
-                                       struct write_point *wp,
-                                       struct bch_devs_mask *devs_may_alloc,
-                                       unsigned nr_replicas,
-                                       unsigned *nr_effective,
-                                       bool *have_cache,
-                                       bool need_ec)
+static bool want_bucket(struct bch_fs *c,
+                       struct write_point *wp,
+                       struct bch_devs_mask *devs_may_alloc,
+                       bool *have_cache, bool ec,
+                       struct open_bucket *ob)
+{
+       struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
+
+       if (!test_bit(ob->dev, devs_may_alloc->d))
+               return false;
+
+       if (ob->data_type != wp->data_type)
+               return false;
+
+       if (!ca->mi.durability &&
+           (wp->data_type == BCH_DATA_btree || ec || *have_cache))
+               return false;
+
+       if (ec != (ob->ec != NULL))
+               return false;
+
+       return true;
+}
+
+static int bucket_alloc_set_writepoint(struct bch_fs *c,
+                                      struct open_buckets *ptrs,
+                                      struct write_point *wp,
+                                      struct bch_devs_mask *devs_may_alloc,
+                                      unsigned nr_replicas,
+                                      unsigned *nr_effective,
+                                      bool *have_cache,
+                                      bool ec, unsigned flags)
 {
        struct open_buckets ptrs_skip = { .nr = 0 };
        struct open_bucket *ob;
        unsigned i;
+       int ret = 0;
 
        open_bucket_for_each(c, &wp->ptrs, ob, i) {
-               struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
-
-               if (*nr_effective < nr_replicas &&
-                   test_bit(ob->ptr.dev, devs_may_alloc->d) &&
-                   (ca->mi.durability ||
-                    (wp->type == BCH_DATA_USER && !*have_cache)) &&
-                   (ob->ec || !need_ec)) {
-                       __clear_bit(ob->ptr.dev, devs_may_alloc->d);
-                       *nr_effective   += ca->mi.durability;
-                       *have_cache     |= !ca->mi.durability;
-
-                       ob_push(c, ptrs, ob);
-               } else {
+               if (!ret && want_bucket(c, wp, devs_may_alloc,
+                                       have_cache, ec, ob))
+                       ret = add_new_bucket(c, ptrs, devs_may_alloc,
+                                      nr_replicas, nr_effective,
+                                      have_cache, flags, ob);
+               else
                        ob_push(c, &ptrs_skip, ob);
-               }
        }
        wp->ptrs = ptrs_skip;
+
+       return ret;
+}
+
+static int bucket_alloc_set_partial(struct bch_fs *c,
+                                   struct open_buckets *ptrs,
+                                   struct write_point *wp,
+                                   struct bch_devs_mask *devs_may_alloc,
+                                   unsigned nr_replicas,
+                                   unsigned *nr_effective,
+                                   bool *have_cache, bool ec,
+                                   enum bch_watermark watermark,
+                                   unsigned flags)
+{
+       int i, ret = 0;
+
+       if (!c->open_buckets_partial_nr)
+               return 0;
+
+       spin_lock(&c->freelist_lock);
+
+       if (!c->open_buckets_partial_nr)
+               goto unlock;
+
+       for (i = c->open_buckets_partial_nr - 1; i >= 0; --i) {
+               struct open_bucket *ob = c->open_buckets + c->open_buckets_partial[i];
+
+               if (want_bucket(c, wp, devs_may_alloc, have_cache, ec, ob)) {
+                       struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
+                       struct bch_dev_usage usage;
+                       u64 avail;
+
+                       bch2_dev_usage_read_fast(ca, &usage);
+                       avail = dev_buckets_free(ca, usage, watermark);
+                       if (!avail)
+                               continue;
+
+                       array_remove_item(c->open_buckets_partial,
+                                         c->open_buckets_partial_nr,
+                                         i);
+                       ob->on_partial_list = false;
+
+                       ret = add_new_bucket(c, ptrs, devs_may_alloc,
+                                            nr_replicas, nr_effective,
+                                            have_cache, flags, ob);
+                       if (ret)
+                               break;
+               }
+       }
+unlock:
+       spin_unlock(&c->freelist_lock);
+       return ret;
 }
 
-static int open_bucket_add_buckets(struct bch_fs *c,
-                                  struct open_buckets *ptrs,
-                                  struct write_point *wp,
-                                  struct bch_devs_list *devs_have,
-                                  u16 target,
-                                  unsigned erasure_code,
-                                  unsigned nr_replicas,
-                                  unsigned *nr_effective,
-                                  bool *have_cache,
-                                  enum alloc_reserve reserve,
-                                  struct closure *_cl)
+static int __open_bucket_add_buckets(struct btree_trans *trans,
+                       struct open_buckets *ptrs,
+                       struct write_point *wp,
+                       struct bch_devs_list *devs_have,
+                       u16 target,
+                       bool erasure_code,
+                       unsigned nr_replicas,
+                       unsigned *nr_effective,
+                       bool *have_cache,
+                       enum bch_watermark watermark,
+                       unsigned flags,
+                       struct closure *_cl)
 {
+       struct bch_fs *c = trans->c;
        struct bch_devs_mask devs;
        struct open_bucket *ob;
        struct closure *cl = NULL;
-       unsigned i, flags = BUCKET_ALLOC_USE_DURABILITY;
+       unsigned i;
        int ret;
 
-       if (wp->type == BCH_DATA_USER)
-               flags |= BUCKET_MAY_ALLOC_PARTIAL;
-
-       rcu_read_lock();
-       devs = target_rw_devs(c, wp->type, target);
-       rcu_read_unlock();
+       devs = target_rw_devs(c, wp->data_type, target);
 
        /* Don't allocate from devices we already have pointers to: */
-       for (i = 0; i < devs_have->nr; i++)
-               __clear_bit(devs_have->devs[i], devs.d);
+       darray_for_each(*devs_have, i)
+               __clear_bit(*i, devs.d);
 
        open_bucket_for_each(c, ptrs, ob, i)
-               __clear_bit(ob->ptr.dev, devs.d);
+               __clear_bit(ob->dev, devs.d);
 
-       if (erasure_code) {
-               get_buckets_from_writepoint(c, ptrs, wp, &devs,
-                                           nr_replicas, nr_effective,
-                                           have_cache, true);
-               if (*nr_effective >= nr_replicas)
-                       return 0;
+       if (erasure_code && ec_open_bucket(c, ptrs))
+               return 0;
+
+       ret = bucket_alloc_set_writepoint(c, ptrs, wp, &devs,
+                                nr_replicas, nr_effective,
+                                have_cache, erasure_code, flags);
+       if (ret)
+               return ret;
+
+       ret = bucket_alloc_set_partial(c, ptrs, wp, &devs,
+                                nr_replicas, nr_effective,
+                                have_cache, erasure_code, watermark, flags);
+       if (ret)
+               return ret;
 
-               bucket_alloc_from_stripe(c, ptrs, wp, &devs,
-                                        target, erasure_code,
+       if (erasure_code) {
+               ret = bucket_alloc_from_stripe(trans, ptrs, wp, &devs,
+                                        target,
                                         nr_replicas, nr_effective,
-                                        have_cache);
-               if (*nr_effective >= nr_replicas)
-                       return 0;
+                                        have_cache,
+                                        watermark, flags, _cl);
+       } else {
+retry_blocking:
+               /*
+                * Try nonblocking first, so that if one device is full we'll try from
+                * other devices:
+                */
+               ret = bch2_bucket_alloc_set_trans(trans, ptrs, &wp->stripe, &devs,
+                                       nr_replicas, nr_effective, have_cache,
+                                       flags, wp->data_type, watermark, cl);
+               if (ret &&
+                   !bch2_err_matches(ret, BCH_ERR_transaction_restart) &&
+                   !bch2_err_matches(ret, BCH_ERR_insufficient_devices) &&
+                   !cl && _cl) {
+                       cl = _cl;
+                       goto retry_blocking;
+               }
        }
 
-       get_buckets_from_writepoint(c, ptrs, wp, &devs,
-                                   nr_replicas, nr_effective,
-                                   have_cache, false);
-       if (*nr_effective >= nr_replicas)
-               return 0;
+       return ret;
+}
 
-       percpu_down_read_preempt_disable(&c->mark_lock);
-       rcu_read_lock();
+static int open_bucket_add_buckets(struct btree_trans *trans,
+                       struct open_buckets *ptrs,
+                       struct write_point *wp,
+                       struct bch_devs_list *devs_have,
+                       u16 target,
+                       unsigned erasure_code,
+                       unsigned nr_replicas,
+                       unsigned *nr_effective,
+                       bool *have_cache,
+                       enum bch_watermark watermark,
+                       unsigned flags,
+                       struct closure *cl)
+{
+       int ret;
 
-retry_blocking:
-       /*
-        * Try nonblocking first, so that if one device is full we'll try from
-        * other devices:
-        */
-       ret = bch2_bucket_alloc_set(c, ptrs, &wp->stripe, &devs,
+       if (erasure_code) {
+               ret = __open_bucket_add_buckets(trans, ptrs, wp,
+                               devs_have, target, erasure_code,
                                nr_replicas, nr_effective, have_cache,
-                               reserve, flags, cl);
-       if (ret && ret != -EROFS && !cl && _cl) {
-               cl = _cl;
-               goto retry_blocking;
+                               watermark, flags, cl);
+               if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
+                   bch2_err_matches(ret, BCH_ERR_operation_blocked) ||
+                   bch2_err_matches(ret, BCH_ERR_freelist_empty) ||
+                   bch2_err_matches(ret, BCH_ERR_open_buckets_empty))
+                       return ret;
+               if (*nr_effective >= nr_replicas)
+                       return 0;
        }
 
-       rcu_read_unlock();
-       percpu_up_read_preempt_enable(&c->mark_lock);
-
-       return ret;
+       ret = __open_bucket_add_buckets(trans, ptrs, wp,
+                       devs_have, target, false,
+                       nr_replicas, nr_effective, have_cache,
+                       watermark, flags, cl);
+       return ret < 0 ? ret : 0;
 }
 
-void bch2_open_buckets_stop_dev(struct bch_fs *c, struct bch_dev *ca,
-                               struct open_buckets *obs,
-                               enum bch_data_type data_type)
+/**
+ * should_drop_bucket - check if this is open_bucket should go away
+ * @ob:                open_bucket to predicate on
+ * @c:         filesystem handle
+ * @ca:                if set, we're killing buckets for a particular device
+ * @ec:                if true, we're shutting down erasure coding and killing all ec
+ *             open_buckets
+ *             otherwise, return true
+ * Returns: true if we should kill this open_bucket
+ *
+ * We're killing open_buckets because we're shutting down a device, erasure
+ * coding, or the entire filesystem - check if this open_bucket matches:
+ */
+static bool should_drop_bucket(struct open_bucket *ob, struct bch_fs *c,
+                              struct bch_dev *ca, bool ec)
 {
-       struct open_buckets ptrs = { .nr = 0 };
-       struct open_bucket *ob, *ob2;
-       unsigned i, j;
-
-       open_bucket_for_each(c, obs, ob, i) {
-               bool drop = !ca || ob->ptr.dev == ca->dev_idx;
+       if (ec) {
+               return ob->ec != NULL;
+       } else if (ca) {
+               bool drop = ob->dev == ca->dev_idx;
+               struct open_bucket *ob2;
+               unsigned i;
 
                if (!drop && ob->ec) {
+                       unsigned nr_blocks;
+
                        mutex_lock(&ob->ec->lock);
-                       open_bucket_for_each(c, &ob->ec->blocks, ob2, j)
-                               drop |= ob2->ptr.dev == ca->dev_idx;
-                       open_bucket_for_each(c, &ob->ec->parity, ob2, j)
-                               drop |= ob2->ptr.dev == ca->dev_idx;
+                       nr_blocks = bkey_i_to_stripe(&ob->ec->new_stripe.key)->v.nr_blocks;
+
+                       for (i = 0; i < nr_blocks; i++) {
+                               if (!ob->ec->blocks[i])
+                                       continue;
+
+                               ob2 = c->open_buckets + ob->ec->blocks[i];
+                               drop |= ob2->dev == ca->dev_idx;
+                       }
                        mutex_unlock(&ob->ec->lock);
                }
 
-               if (drop)
-                       bch2_open_bucket_put(c, ob);
-               else
-                       ob_push(c, &ptrs, ob);
+               return drop;
+       } else {
+               return true;
        }
-
-       *obs = ptrs;
 }
 
-void bch2_writepoint_stop(struct bch_fs *c, struct bch_dev *ca,
-                         struct write_point *wp)
+static void bch2_writepoint_stop(struct bch_fs *c, struct bch_dev *ca,
+                                bool ec, struct write_point *wp)
 {
+       struct open_buckets ptrs = { .nr = 0 };
+       struct open_bucket *ob;
+       unsigned i;
+
        mutex_lock(&wp->lock);
-       bch2_open_buckets_stop_dev(c, ca, &wp->ptrs, wp->type);
+       open_bucket_for_each(c, &wp->ptrs, ob, i)
+               if (should_drop_bucket(ob, c, ca, ec))
+                       bch2_open_bucket_put(c, ob);
+               else
+                       ob_push(c, &ptrs, ob);
+       wp->ptrs = ptrs;
        mutex_unlock(&wp->lock);
 }
 
+void bch2_open_buckets_stop(struct bch_fs *c, struct bch_dev *ca,
+                           bool ec)
+{
+       unsigned i;
+
+       /* Next, close write points that point to this device... */
+       for (i = 0; i < ARRAY_SIZE(c->write_points); i++)
+               bch2_writepoint_stop(c, ca, ec, &c->write_points[i]);
+
+       bch2_writepoint_stop(c, ca, ec, &c->copygc_write_point);
+       bch2_writepoint_stop(c, ca, ec, &c->rebalance_write_point);
+       bch2_writepoint_stop(c, ca, ec, &c->btree_write_point);
+
+       mutex_lock(&c->btree_reserve_cache_lock);
+       while (c->btree_reserve_cache_nr) {
+               struct btree_alloc *a =
+                       &c->btree_reserve_cache[--c->btree_reserve_cache_nr];
+
+               bch2_open_buckets_put(c, &a->ob);
+       }
+       mutex_unlock(&c->btree_reserve_cache_lock);
+
+       spin_lock(&c->freelist_lock);
+       i = 0;
+       while (i < c->open_buckets_partial_nr) {
+               struct open_bucket *ob =
+                       c->open_buckets + c->open_buckets_partial[i];
+
+               if (should_drop_bucket(ob, c, ca, ec)) {
+                       --c->open_buckets_partial_nr;
+                       swap(c->open_buckets_partial[i],
+                            c->open_buckets_partial[c->open_buckets_partial_nr]);
+                       ob->on_partial_list = false;
+                       spin_unlock(&c->freelist_lock);
+                       bch2_open_bucket_put(c, ob);
+                       spin_lock(&c->freelist_lock);
+               } else {
+                       i++;
+               }
+       }
+       spin_unlock(&c->freelist_lock);
+
+       bch2_ec_stop_dev(c, ca);
+}
+
 static inline struct hlist_head *writepoint_hash(struct bch_fs *c,
                                                 unsigned long write_point)
 {
@@ -713,17 +1163,20 @@ static struct write_point *__writepoint_find(struct hlist_head *head,
 {
        struct write_point *wp;
 
+       rcu_read_lock();
        hlist_for_each_entry_rcu(wp, head, node)
                if (wp->write_point == write_point)
-                       return wp;
-
-       return NULL;
+                       goto out;
+       wp = NULL;
+out:
+       rcu_read_unlock();
+       return wp;
 }
 
 static inline bool too_many_writepoints(struct bch_fs *c, unsigned factor)
 {
        u64 stranded    = c->write_points_nr * c->bucket_size_max;
-       u64 free        = bch2_fs_sectors_free(c);
+       u64 free        = bch2_fs_usage_read_short(c).free;
 
        return stranded * factor > free;
 }
@@ -741,10 +1194,12 @@ static bool try_increase_writepoints(struct bch_fs *c)
        return true;
 }
 
-static bool try_decrease_writepoints(struct bch_fs *c,
-                                    unsigned old_nr)
+static bool try_decrease_writepoints(struct btree_trans *trans, unsigned old_nr)
 {
+       struct bch_fs *c = trans->c;
        struct write_point *wp;
+       struct open_bucket *ob;
+       unsigned i;
 
        mutex_lock(&c->write_points_hash_lock);
        if (c->write_points_nr < old_nr) {
@@ -763,19 +1218,24 @@ static bool try_decrease_writepoints(struct bch_fs *c,
        hlist_del_rcu(&wp->node);
        mutex_unlock(&c->write_points_hash_lock);
 
-       bch2_writepoint_stop(c, NULL, wp);
+       bch2_trans_mutex_lock_norelock(trans, &wp->lock);
+       open_bucket_for_each(c, &wp->ptrs, ob, i)
+               open_bucket_free_unused(c, ob);
+       wp->ptrs.nr = 0;
+       mutex_unlock(&wp->lock);
        return true;
 }
 
-static struct write_point *writepoint_find(struct bch_fs *c,
+static struct write_point *writepoint_find(struct btree_trans *trans,
                                           unsigned long write_point)
 {
+       struct bch_fs *c = trans->c;
        struct write_point *wp, *oldest;
        struct hlist_head *head;
 
        if (!(write_point & 1UL)) {
                wp = (struct write_point *) write_point;
-               mutex_lock(&wp->lock);
+               bch2_trans_mutex_lock_norelock(trans, &wp->lock);
                return wp;
        }
 
@@ -784,7 +1244,7 @@ restart_find:
        wp = __writepoint_find(head, write_point);
        if (wp) {
 lock_wp:
-               mutex_lock(&wp->lock);
+               bch2_trans_mutex_lock_norelock(trans, &wp->lock);
                if (wp->write_point == write_point)
                        goto out;
                mutex_unlock(&wp->lock);
@@ -797,8 +1257,8 @@ restart_find_oldest:
                if (!oldest || time_before64(wp->last_used, oldest->last_used))
                        oldest = wp;
 
-       mutex_lock(&oldest->lock);
-       mutex_lock(&c->write_points_hash_lock);
+       bch2_trans_mutex_lock_norelock(trans, &oldest->lock);
+       bch2_trans_mutex_lock_norelock(trans, &c->write_points_hash_lock);
        if (oldest >= c->write_points + c->write_points_nr ||
            try_increase_writepoints(c)) {
                mutex_unlock(&c->write_points_hash_lock);
@@ -819,30 +1279,62 @@ restart_find_oldest:
        hlist_add_head_rcu(&wp->node, head);
        mutex_unlock(&c->write_points_hash_lock);
 out:
-       wp->last_used = sched_clock();
+       wp->last_used = local_clock();
        return wp;
 }
 
+static noinline void
+deallocate_extra_replicas(struct bch_fs *c,
+                         struct open_buckets *ptrs,
+                         struct open_buckets *ptrs_no_use,
+                         unsigned extra_replicas)
+{
+       struct open_buckets ptrs2 = { 0 };
+       struct open_bucket *ob;
+       unsigned i;
+
+       open_bucket_for_each(c, ptrs, ob, i) {
+               unsigned d = bch_dev_bkey_exists(c, ob->dev)->mi.durability;
+
+               if (d && d <= extra_replicas) {
+                       extra_replicas -= d;
+                       ob_push(c, ptrs_no_use, ob);
+               } else {
+                       ob_push(c, &ptrs2, ob);
+               }
+       }
+
+       *ptrs = ptrs2;
+}
+
 /*
  * Get us an open_bucket we can allocate from, return with it locked:
  */
-struct write_point *bch2_alloc_sectors_start(struct bch_fs *c,
-                               unsigned target,
-                               unsigned erasure_code,
-                               struct write_point_specifier write_point,
-                               struct bch_devs_list *devs_have,
-                               unsigned nr_replicas,
-                               unsigned nr_replicas_required,
-                               enum alloc_reserve reserve,
-                               unsigned flags,
-                               struct closure *cl)
+int bch2_alloc_sectors_start_trans(struct btree_trans *trans,
+                            unsigned target,
+                            unsigned erasure_code,
+                            struct write_point_specifier write_point,
+                            struct bch_devs_list *devs_have,
+                            unsigned nr_replicas,
+                            unsigned nr_replicas_required,
+                            enum bch_watermark watermark,
+                            unsigned flags,
+                            struct closure *cl,
+                            struct write_point **wp_ret)
 {
+       struct bch_fs *c = trans->c;
        struct write_point *wp;
        struct open_bucket *ob;
        struct open_buckets ptrs;
        unsigned nr_effective, write_points_nr;
        bool have_cache;
-       int ret, i;
+       int ret;
+       int i;
+
+       if (!IS_ENABLED(CONFIG_BCACHEFS_ERASURE_CODING))
+               erasure_code = false;
+
+       BUG_ON(flags & BCH_WRITE_ONLY_SPECIFIED_DEVS);
 
        BUG_ON(!nr_replicas || !nr_replicas_required);
 retry:
@@ -851,29 +1343,52 @@ retry:
        write_points_nr = c->write_points_nr;
        have_cache      = false;
 
-       wp = writepoint_find(c, write_point.v);
+       *wp_ret = wp = writepoint_find(trans, write_point.v);
 
        /* metadata may not allocate on cache devices: */
-       if (wp->type != BCH_DATA_USER)
+       if (wp->data_type != BCH_DATA_user)
                have_cache = true;
 
-       if (!target || (flags & BCH_WRITE_ONLY_SPECIFIED_DEVS)) {
-               ret = open_bucket_add_buckets(c, &ptrs, wp, devs_have,
+       if (target && !(flags & BCH_WRITE_ONLY_SPECIFIED_DEVS)) {
+               ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
                                              target, erasure_code,
                                              nr_replicas, &nr_effective,
-                                             &have_cache, reserve, cl);
-       } else {
-               ret = open_bucket_add_buckets(c, &ptrs, wp, devs_have,
+                                             &have_cache, watermark,
+                                             flags, NULL);
+               if (!ret ||
+                   bch2_err_matches(ret, BCH_ERR_transaction_restart))
+                       goto alloc_done;
+
+               /* Don't retry from all devices if we're out of open buckets: */
+               if (bch2_err_matches(ret, BCH_ERR_open_buckets_empty)) {
+                       int ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
                                              target, erasure_code,
                                              nr_replicas, &nr_effective,
-                                             &have_cache, reserve, NULL);
-               if (!ret)
-                       goto alloc_done;
+                                             &have_cache, watermark,
+                                             flags, cl);
+                       if (!ret ||
+                           bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
+                           bch2_err_matches(ret, BCH_ERR_open_buckets_empty))
+                               goto alloc_done;
+               }
 
-               ret = open_bucket_add_buckets(c, &ptrs, wp, devs_have,
+               /*
+                * Only try to allocate cache (durability = 0 devices) from the
+                * specified target:
+                */
+               have_cache = true;
+
+               ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
                                              0, erasure_code,
                                              nr_replicas, &nr_effective,
-                                             &have_cache, reserve, cl);
+                                             &have_cache, watermark,
+                                             flags, cl);
+       } else {
+               ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
+                                             target, erasure_code,
+                                             nr_replicas, &nr_effective,
+                                             &have_cache, watermark,
+                                             flags, cl);
        }
 alloc_done:
        BUG_ON(!ret && nr_effective < nr_replicas);
@@ -881,16 +1396,19 @@ alloc_done:
        if (erasure_code && !ec_open_bucket(c, &ptrs))
                pr_debug("failed to get ec bucket: ret %u", ret);
 
-       if (ret == -EROFS &&
+       if (ret == -BCH_ERR_insufficient_devices &&
            nr_effective >= nr_replicas_required)
                ret = 0;
 
        if (ret)
                goto err;
 
+       if (nr_effective > nr_replicas)
+               deallocate_extra_replicas(c, &ptrs, &wp->ptrs, nr_effective - nr_replicas);
+
        /* Free buckets we didn't use: */
        open_bucket_for_each(c, &wp->ptrs, ob, i)
-               open_bucket_free_unused(c, ob, wp->type == BCH_DATA_USER);
+               open_bucket_free_unused(c, ob);
 
        wp->ptrs = ptrs;
 
@@ -901,54 +1419,49 @@ alloc_done:
 
        BUG_ON(!wp->sectors_free || wp->sectors_free == UINT_MAX);
 
-       verify_not_stale(c, &wp->ptrs);
-
-       return wp;
+       return 0;
 err:
        open_bucket_for_each(c, &wp->ptrs, ob, i)
                if (ptrs.nr < ARRAY_SIZE(ptrs.v))
                        ob_push(c, &ptrs, ob);
                else
-                       open_bucket_free_unused(c, ob,
-                                       wp->type == BCH_DATA_USER);
+                       open_bucket_free_unused(c, ob);
        wp->ptrs = ptrs;
 
        mutex_unlock(&wp->lock);
 
-       if (ret == -ENOSPC &&
-           try_decrease_writepoints(c, write_points_nr))
+       if (bch2_err_matches(ret, BCH_ERR_freelist_empty) &&
+           try_decrease_writepoints(trans, write_points_nr))
                goto retry;
 
-       return ERR_PTR(ret);
-}
+       if (bch2_err_matches(ret, BCH_ERR_open_buckets_empty) ||
+           bch2_err_matches(ret, BCH_ERR_freelist_empty))
+               return cl
+                       ? -BCH_ERR_bucket_alloc_blocked
+                       : -BCH_ERR_ENOSPC_bucket_alloc;
 
-/*
- * Append pointers to the space we just allocated to @k, and mark @sectors space
- * as allocated out of @ob
- */
-void bch2_alloc_sectors_append_ptrs(struct bch_fs *c, struct write_point *wp,
-                                   struct bkey_i *k, unsigned sectors)
+       return ret;
+}
 
+struct bch_extent_ptr bch2_ob_ptr(struct bch_fs *c, struct open_bucket *ob)
 {
-       struct open_bucket *ob;
-       unsigned i;
-
-       BUG_ON(sectors > wp->sectors_free);
-       wp->sectors_free -= sectors;
+       struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
 
-       open_bucket_for_each(c, &wp->ptrs, ob, i) {
-               struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
-               struct bch_extent_ptr tmp = ob->ptr;
-
-               tmp.cached = !ca->mi.durability &&
-                       wp->type == BCH_DATA_USER;
-
-               tmp.offset += ca->mi.bucket_size - ob->sectors_free;
-               bch2_bkey_append_ptr(k, tmp);
+       return (struct bch_extent_ptr) {
+               .type   = 1 << BCH_EXTENT_ENTRY_ptr,
+               .gen    = ob->gen,
+               .dev    = ob->dev,
+               .offset = bucket_to_sector(ca, ob->bucket) +
+                       ca->mi.bucket_size -
+                       ob->sectors_free,
+       };
+}
 
-               BUG_ON(sectors > ob->sectors_free);
-               ob->sectors_free -= sectors;
-       }
+void bch2_alloc_sectors_append_ptrs(struct bch_fs *c, struct write_point *wp,
+                                   struct bkey_i *k, unsigned sectors,
+                                   bool cached)
+{
+       bch2_alloc_sectors_append_ptrs_inlined(c, wp, k, sectors, cached);
 }
 
 /*
@@ -957,17 +1470,18 @@ void bch2_alloc_sectors_append_ptrs(struct bch_fs *c, struct write_point *wp,
  */
 void bch2_alloc_sectors_done(struct bch_fs *c, struct write_point *wp)
 {
-       struct open_buckets ptrs = { .nr = 0 }, keep = { .nr = 0 };
-       struct open_bucket *ob;
-       unsigned i;
-
-       open_bucket_for_each(c, &wp->ptrs, ob, i)
-               ob_push(c, !ob->sectors_free ? &ptrs : &keep, ob);
-       wp->ptrs = keep;
+       bch2_alloc_sectors_done_inlined(c, wp);
+}
 
-       mutex_unlock(&wp->lock);
+static inline void writepoint_init(struct write_point *wp,
+                                  enum bch_data_type type)
+{
+       mutex_init(&wp->lock);
+       wp->data_type = type;
 
-       bch2_open_buckets_put(c, &ptrs);
+       INIT_WORK(&wp->index_update_work, bch2_write_point_do_index_updates);
+       INIT_LIST_HEAD(&wp->writes);
+       spin_lock_init(&wp->writes_lock);
 }
 
 void bch2_fs_allocator_foreground_init(struct bch_fs *c)
@@ -990,16 +1504,121 @@ void bch2_fs_allocator_foreground_init(struct bch_fs *c)
                c->open_buckets_freelist = ob - c->open_buckets;
        }
 
-       writepoint_init(&c->btree_write_point, BCH_DATA_BTREE);
-       writepoint_init(&c->rebalance_write_point, BCH_DATA_USER);
+       writepoint_init(&c->btree_write_point,          BCH_DATA_btree);
+       writepoint_init(&c->rebalance_write_point,      BCH_DATA_user);
+       writepoint_init(&c->copygc_write_point,         BCH_DATA_user);
 
        for (wp = c->write_points;
             wp < c->write_points + c->write_points_nr; wp++) {
-               writepoint_init(wp, BCH_DATA_USER);
+               writepoint_init(wp, BCH_DATA_user);
 
-               wp->last_used   = sched_clock();
+               wp->last_used   = local_clock();
                wp->write_point = (unsigned long) wp;
                hlist_add_head_rcu(&wp->node,
                                   writepoint_hash(c, wp->write_point));
        }
 }
+
+static void bch2_open_bucket_to_text(struct printbuf *out, struct bch_fs *c, struct open_bucket *ob)
+{
+       struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
+       unsigned data_type = ob->data_type;
+       barrier(); /* READ_ONCE() doesn't work on bitfields */
+
+       prt_printf(out, "%zu ref %u %s %u:%llu gen %u allocated %u/%u",
+                  ob - c->open_buckets,
+                  atomic_read(&ob->pin),
+                  data_type < BCH_DATA_NR ? bch2_data_types[data_type] : "invalid data type",
+                  ob->dev, ob->bucket, ob->gen,
+                  ca->mi.bucket_size - ob->sectors_free, ca->mi.bucket_size);
+       if (ob->ec)
+               prt_printf(out, " ec idx %llu", ob->ec->idx);
+       if (ob->on_partial_list)
+               prt_str(out, " partial");
+       prt_newline(out);
+}
+
+void bch2_open_buckets_to_text(struct printbuf *out, struct bch_fs *c)
+{
+       struct open_bucket *ob;
+
+       out->atomic++;
+
+       for (ob = c->open_buckets;
+            ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
+            ob++) {
+               spin_lock(&ob->lock);
+               if (ob->valid && !ob->on_partial_list)
+                       bch2_open_bucket_to_text(out, c, ob);
+               spin_unlock(&ob->lock);
+       }
+
+       --out->atomic;
+}
+
+void bch2_open_buckets_partial_to_text(struct printbuf *out, struct bch_fs *c)
+{
+       unsigned i;
+
+       out->atomic++;
+       spin_lock(&c->freelist_lock);
+
+       for (i = 0; i < c->open_buckets_partial_nr; i++)
+               bch2_open_bucket_to_text(out, c,
+                               c->open_buckets + c->open_buckets_partial[i]);
+
+       spin_unlock(&c->freelist_lock);
+       --out->atomic;
+}
+
+static const char * const bch2_write_point_states[] = {
+#define x(n)   #n,
+       WRITE_POINT_STATES()
+#undef x
+       NULL
+};
+
+static void bch2_write_point_to_text(struct printbuf *out, struct bch_fs *c,
+                                    struct write_point *wp)
+{
+       struct open_bucket *ob;
+       unsigned i;
+
+       prt_printf(out, "%lu: ", wp->write_point);
+       prt_human_readable_u64(out, wp->sectors_allocated);
+
+       prt_printf(out, " last wrote: ");
+       bch2_pr_time_units(out, sched_clock() - wp->last_used);
+
+       for (i = 0; i < WRITE_POINT_STATE_NR; i++) {
+               prt_printf(out, " %s: ", bch2_write_point_states[i]);
+               bch2_pr_time_units(out, wp->time[i]);
+       }
+
+       prt_newline(out);
+
+       printbuf_indent_add(out, 2);
+       open_bucket_for_each(c, &wp->ptrs, ob, i)
+               bch2_open_bucket_to_text(out, c, ob);
+       printbuf_indent_sub(out, 2);
+}
+
+void bch2_write_points_to_text(struct printbuf *out, struct bch_fs *c)
+{
+       struct write_point *wp;
+
+       prt_str(out, "Foreground write points\n");
+       for (wp = c->write_points;
+            wp < c->write_points + ARRAY_SIZE(c->write_points);
+            wp++)
+               bch2_write_point_to_text(out, c, wp);
+
+       prt_str(out, "Copygc write point\n");
+       bch2_write_point_to_text(out, c, &c->copygc_write_point);
+
+       prt_str(out, "Rebalance write point\n");
+       bch2_write_point_to_text(out, c, &c->rebalance_write_point);
+
+       prt_str(out, "Btree write point\n");
+       bch2_write_point_to_text(out, c, &c->btree_write_point);
+}