]> git.sesse.net Git - bcachefs-tools-debian/blobdiff - libbcachefs/alloc_foreground.c
Move c_src dirs back to toplevel
[bcachefs-tools-debian] / libbcachefs / alloc_foreground.c
index dce77cc27cbe9c911bdc7ab638015cac270988c6..b0ff47998a9440912f940dc09e27b34e6341cb9e 100644 (file)
 #include "bcachefs.h"
 #include "alloc_background.h"
 #include "alloc_foreground.h"
+#include "backpointers.h"
+#include "btree_iter.h"
+#include "btree_update.h"
 #include "btree_gc.h"
 #include "buckets.h"
+#include "buckets_waiting_for_journal.h"
 #include "clock.h"
 #include "debug.h"
 #include "disk_groups.h"
 #include "ec.h"
-#include "io.h"
+#include "error.h"
+#include "io_write.h"
+#include "journal.h"
+#include "movinggc.h"
+#include "nocow_locking.h"
+#include "trace.h"
 
 #include <linux/math64.h>
 #include <linux/rculist.h>
 #include <linux/rcupdate.h>
-#include <trace/events/bcachefs.h>
+
+static void bch2_trans_mutex_lock_norelock(struct btree_trans *trans,
+                                          struct mutex *lock)
+{
+       if (!mutex_trylock(lock)) {
+               bch2_trans_unlock(trans);
+               mutex_lock(lock);
+       }
+}
+
+const char * const bch2_watermarks[] = {
+#define x(t) #t,
+       BCH_WATERMARKS()
+#undef x
+       NULL
+};
 
 /*
  * Open buckets represent a bucket that's currently being allocated from.  They
  * reference _after_ doing the index update that makes its allocation reachable.
  */
 
+void bch2_reset_alloc_cursors(struct bch_fs *c)
+{
+       rcu_read_lock();
+       for_each_member_device_rcu(c, ca, NULL)
+               ca->alloc_cursor = 0;
+       rcu_read_unlock();
+}
+
+static void bch2_open_bucket_hash_add(struct bch_fs *c, struct open_bucket *ob)
+{
+       open_bucket_idx_t idx = ob - c->open_buckets;
+       open_bucket_idx_t *slot = open_bucket_hashslot(c, ob->dev, ob->bucket);
+
+       ob->hash = *slot;
+       *slot = idx;
+}
+
+static void bch2_open_bucket_hash_remove(struct bch_fs *c, struct open_bucket *ob)
+{
+       open_bucket_idx_t idx = ob - c->open_buckets;
+       open_bucket_idx_t *slot = open_bucket_hashslot(c, ob->dev, ob->bucket);
+
+       while (*slot != idx) {
+               BUG_ON(!*slot);
+               slot = &c->open_buckets[*slot].hash;
+       }
+
+       *slot = ob->hash;
+       ob->hash = 0;
+}
+
 void __bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob)
 {
-       struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
+       struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
 
        if (ob->ec) {
-               bch2_ec_bucket_written(c, ob);
+               ec_stripe_new_put(c, ob->ec, STRIPE_REF_io);
                return;
        }
 
        percpu_down_read(&c->mark_lock);
        spin_lock(&ob->lock);
 
-       bch2_mark_alloc_bucket(c, ca, PTR_BUCKET_NR(ca, &ob->ptr), false);
        ob->valid = false;
-       ob->type = 0;
+       ob->data_type = 0;
 
        spin_unlock(&ob->lock);
        percpu_up_read(&c->mark_lock);
 
        spin_lock(&c->freelist_lock);
+       bch2_open_bucket_hash_remove(c, ob);
+
        ob->freelist = c->open_buckets_freelist;
        c->open_buckets_freelist = ob - c->open_buckets;
 
@@ -81,8 +137,7 @@ void bch2_open_bucket_write_error(struct bch_fs *c,
        unsigned i;
 
        open_bucket_for_each(c, obs, ob, i)
-               if (ob->ptr.dev == dev &&
-                   ob->ec)
+               if (ob->dev == dev && ob->ec)
                        bch2_ec_bucket_cancel(c, ob);
 }
 
@@ -95,192 +150,472 @@ static struct open_bucket *bch2_open_bucket_alloc(struct bch_fs *c)
        ob = c->open_buckets + c->open_buckets_freelist;
        c->open_buckets_freelist = ob->freelist;
        atomic_set(&ob->pin, 1);
-       ob->type = 0;
+       ob->data_type = 0;
 
        c->open_buckets_nr_free--;
        return ob;
 }
 
-static void open_bucket_free_unused(struct bch_fs *c,
-                                   struct write_point *wp,
-                                   struct open_bucket *ob)
-{
-       struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
-       bool may_realloc = wp->type == BCH_DATA_user;
-
-       BUG_ON(ca->open_buckets_partial_nr >
-              ARRAY_SIZE(ca->open_buckets_partial));
-
-       if (ca->open_buckets_partial_nr <
-           ARRAY_SIZE(ca->open_buckets_partial) &&
-           may_realloc) {
-               spin_lock(&c->freelist_lock);
-               ob->on_partial_list = true;
-               ca->open_buckets_partial[ca->open_buckets_partial_nr++] =
-                       ob - c->open_buckets;
-               spin_unlock(&c->freelist_lock);
-
-               closure_wake_up(&c->open_buckets_wait);
-               closure_wake_up(&c->freelist_wait);
-       } else {
-               bch2_open_bucket_put(c, ob);
-       }
-}
-
-static void verify_not_stale(struct bch_fs *c, const struct open_buckets *obs)
+static void open_bucket_free_unused(struct bch_fs *c, struct open_bucket *ob)
 {
-#ifdef CONFIG_BCACHEFS_DEBUG
-       struct open_bucket *ob;
-       unsigned i;
+       BUG_ON(c->open_buckets_partial_nr >=
+              ARRAY_SIZE(c->open_buckets_partial));
 
-       open_bucket_for_each(c, obs, ob, i) {
-               struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
+       spin_lock(&c->freelist_lock);
+       ob->on_partial_list = true;
+       c->open_buckets_partial[c->open_buckets_partial_nr++] =
+               ob - c->open_buckets;
+       spin_unlock(&c->freelist_lock);
 
-               BUG_ON(ptr_stale(ca, &ob->ptr));
-       }
-#endif
+       closure_wake_up(&c->open_buckets_wait);
+       closure_wake_up(&c->freelist_wait);
 }
 
 /* _only_ for allocating the journal on a new device: */
 long bch2_bucket_alloc_new_fs(struct bch_dev *ca)
 {
-       struct bucket_array *buckets;
-       ssize_t b;
+       while (ca->new_fs_bucket_idx < ca->mi.nbuckets) {
+               u64 b = ca->new_fs_bucket_idx++;
 
-       rcu_read_lock();
-       buckets = bucket_array(ca);
-
-       for (b = buckets->first_bucket; b < buckets->nbuckets; b++)
-               if (is_available_bucket(buckets->b[b].mark) &&
-                   (!ca->buckets_nouse || !test_bit(b, ca->buckets_nouse)) &&
-                   !buckets->b[b].mark.owned_by_allocator)
-                       goto success;
-       b = -1;
-success:
-       rcu_read_unlock();
-       return b;
+               if (!is_superblock_bucket(ca, b) &&
+                   (!ca->buckets_nouse || !test_bit(b, ca->buckets_nouse)))
+                       return b;
+       }
+
+       return -1;
 }
 
-static inline unsigned open_buckets_reserved(enum alloc_reserve reserve)
+static inline unsigned open_buckets_reserved(enum bch_watermark watermark)
 {
-       switch (reserve) {
-       case RESERVE_BTREE:
-       case RESERVE_BTREE_MOVINGGC:
+       switch (watermark) {
+       case BCH_WATERMARK_reclaim:
                return 0;
-       case RESERVE_MOVINGGC:
+       case BCH_WATERMARK_btree:
+       case BCH_WATERMARK_btree_copygc:
                return OPEN_BUCKETS_COUNT / 4;
+       case BCH_WATERMARK_copygc:
+               return OPEN_BUCKETS_COUNT / 3;
        default:
                return OPEN_BUCKETS_COUNT / 2;
        }
 }
 
-/**
- * bch_bucket_alloc - allocate a single bucket from a specific device
- *
- * Returns index of bucket on success, 0 on failure
- * */
-struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca,
-                                     enum alloc_reserve reserve,
-                                     bool may_alloc_partial,
-                                     struct closure *cl)
+static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
+                                             u64 bucket,
+                                             enum bch_watermark watermark,
+                                             const struct bch_alloc_v4 *a,
+                                             struct bucket_alloc_state *s,
+                                             struct closure *cl)
 {
        struct open_bucket *ob;
-       long b = 0;
 
-       spin_lock(&c->freelist_lock);
+       if (unlikely(ca->buckets_nouse && test_bit(bucket, ca->buckets_nouse))) {
+               s->skipped_nouse++;
+               return NULL;
+       }
 
-       if (may_alloc_partial) {
-               int i;
+       if (bch2_bucket_is_open(c, ca->dev_idx, bucket)) {
+               s->skipped_open++;
+               return NULL;
+       }
 
-               for (i = ca->open_buckets_partial_nr - 1; i >= 0; --i) {
-                       ob = c->open_buckets + ca->open_buckets_partial[i];
+       if (bch2_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
+                       c->journal.flushed_seq_ondisk, ca->dev_idx, bucket)) {
+               s->skipped_need_journal_commit++;
+               return NULL;
+       }
 
-                       if (reserve <= ob->alloc_reserve) {
-                               array_remove_item(ca->open_buckets_partial,
-                                                 ca->open_buckets_partial_nr,
-                                                 i);
-                               ob->on_partial_list = false;
-                               ob->alloc_reserve = reserve;
-                               spin_unlock(&c->freelist_lock);
-                               return ob;
-                       }
-               }
+       if (bch2_bucket_nocow_is_locked(&c->nocow_locks, POS(ca->dev_idx, bucket))) {
+               s->skipped_nocow++;
+               return NULL;
        }
 
-       if (unlikely(c->open_buckets_nr_free <= open_buckets_reserved(reserve))) {
+       spin_lock(&c->freelist_lock);
+
+       if (unlikely(c->open_buckets_nr_free <= open_buckets_reserved(watermark))) {
                if (cl)
                        closure_wait(&c->open_buckets_wait, cl);
 
-               if (!c->blocked_allocate_open_bucket)
-                       c->blocked_allocate_open_bucket = local_clock();
+               track_event_change(&c->times[BCH_TIME_blocked_allocate_open_bucket],
+                                  &c->blocked_allocate_open_bucket, true);
+               spin_unlock(&c->freelist_lock);
+               return ERR_PTR(-BCH_ERR_open_buckets_empty);
+       }
 
+       /* Recheck under lock: */
+       if (bch2_bucket_is_open(c, ca->dev_idx, bucket)) {
                spin_unlock(&c->freelist_lock);
-               trace_open_bucket_alloc_fail(ca, reserve);
-               return ERR_PTR(-OPEN_BUCKETS_EMPTY);
+               s->skipped_open++;
+               return NULL;
        }
 
-       if (likely(fifo_pop(&ca->free[RESERVE_NONE], b)))
-               goto out;
+       ob = bch2_open_bucket_alloc(c);
 
-       switch (reserve) {
-       case RESERVE_BTREE_MOVINGGC:
-       case RESERVE_MOVINGGC:
-               if (fifo_pop(&ca->free[RESERVE_MOVINGGC], b))
-                       goto out;
-               break;
-       default:
-               break;
-       }
+       spin_lock(&ob->lock);
+
+       ob->valid       = true;
+       ob->sectors_free = ca->mi.bucket_size;
+       ob->dev         = ca->dev_idx;
+       ob->gen         = a->gen;
+       ob->bucket      = bucket;
+       spin_unlock(&ob->lock);
 
-       if (cl)
-               closure_wait(&c->freelist_wait, cl);
+       ca->nr_open_buckets++;
+       bch2_open_bucket_hash_add(c, ob);
+
+       track_event_change(&c->times[BCH_TIME_blocked_allocate_open_bucket],
+                          &c->blocked_allocate_open_bucket, false);
 
-       if (!c->blocked_allocate)
-               c->blocked_allocate = local_clock();
+       track_event_change(&c->times[BCH_TIME_blocked_allocate],
+                          &c->blocked_allocate, false);
 
        spin_unlock(&c->freelist_lock);
+       return ob;
+}
 
-       trace_bucket_alloc_fail(ca, reserve);
-       return ERR_PTR(-FREELIST_EMPTY);
-out:
-       verify_not_on_freelist(c, ca, b);
+static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bch_dev *ca,
+                                           enum bch_watermark watermark, u64 free_entry,
+                                           struct bucket_alloc_state *s,
+                                           struct bkey_s_c freespace_k,
+                                           struct closure *cl)
+{
+       struct bch_fs *c = trans->c;
+       struct btree_iter iter = { NULL };
+       struct bkey_s_c k;
+       struct open_bucket *ob;
+       struct bch_alloc_v4 a_convert;
+       const struct bch_alloc_v4 *a;
+       u64 b = free_entry & ~(~0ULL << 56);
+       unsigned genbits = free_entry >> 56;
+       struct printbuf buf = PRINTBUF;
+       int ret;
 
-       ob = bch2_open_bucket_alloc(c);
+       if (b < ca->mi.first_bucket || b >= ca->mi.nbuckets) {
+               prt_printf(&buf, "freespace btree has bucket outside allowed range %u-%llu\n"
+                      "  freespace key ",
+                       ca->mi.first_bucket, ca->mi.nbuckets);
+               bch2_bkey_val_to_text(&buf, c, freespace_k);
+               bch2_trans_inconsistent(trans, "%s", buf.buf);
+               ob = ERR_PTR(-EIO);
+               goto err;
+       }
 
-       spin_lock(&ob->lock);
+       k = bch2_bkey_get_iter(trans, &iter,
+                              BTREE_ID_alloc, POS(ca->dev_idx, b),
+                              BTREE_ITER_CACHED);
+       ret = bkey_err(k);
+       if (ret) {
+               ob = ERR_PTR(ret);
+               goto err;
+       }
 
-       ob->valid       = true;
-       ob->sectors_free = ca->mi.bucket_size;
-       ob->alloc_reserve = reserve;
-       ob->ptr         = (struct bch_extent_ptr) {
-               .type   = 1 << BCH_EXTENT_ENTRY_ptr,
-               .gen    = bucket(ca, b)->mark.gen,
-               .offset = bucket_to_sector(ca, b),
-               .dev    = ca->dev_idx,
-       };
+       a = bch2_alloc_to_v4(k, &a_convert);
 
-       spin_unlock(&ob->lock);
+       if (a->data_type != BCH_DATA_free) {
+               if (c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_alloc_info) {
+                       ob = NULL;
+                       goto err;
+               }
 
-       if (c->blocked_allocate_open_bucket) {
-               bch2_time_stats_update(
-                       &c->times[BCH_TIME_blocked_allocate_open_bucket],
-                       c->blocked_allocate_open_bucket);
-               c->blocked_allocate_open_bucket = 0;
+               prt_printf(&buf, "non free bucket in freespace btree\n"
+                      "  freespace key ");
+               bch2_bkey_val_to_text(&buf, c, freespace_k);
+               prt_printf(&buf, "\n  ");
+               bch2_bkey_val_to_text(&buf, c, k);
+               bch2_trans_inconsistent(trans, "%s", buf.buf);
+               ob = ERR_PTR(-EIO);
+               goto err;
        }
 
-       if (c->blocked_allocate) {
-               bch2_time_stats_update(
-                       &c->times[BCH_TIME_blocked_allocate],
-                       c->blocked_allocate);
-               c->blocked_allocate = 0;
+       if (genbits != (alloc_freespace_genbits(*a) >> 56) &&
+           c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info) {
+               prt_printf(&buf, "bucket in freespace btree with wrong genbits (got %u should be %llu)\n"
+                      "  freespace key ",
+                      genbits, alloc_freespace_genbits(*a) >> 56);
+               bch2_bkey_val_to_text(&buf, c, freespace_k);
+               prt_printf(&buf, "\n  ");
+               bch2_bkey_val_to_text(&buf, c, k);
+               bch2_trans_inconsistent(trans, "%s", buf.buf);
+               ob = ERR_PTR(-EIO);
+               goto err;
        }
 
-       ca->nr_open_buckets++;
-       spin_unlock(&c->freelist_lock);
+       if (c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_extents_to_backpointers) {
+               struct bch_backpointer bp;
+               struct bpos bp_pos = POS_MIN;
+
+               ret = bch2_get_next_backpointer(trans, POS(ca->dev_idx, b), -1,
+                                               &bp_pos, &bp,
+                                               BTREE_ITER_NOPRESERVE);
+               if (ret) {
+                       ob = ERR_PTR(ret);
+                       goto err;
+               }
+
+               if (!bkey_eq(bp_pos, POS_MAX)) {
+                       /*
+                        * Bucket may have data in it - we don't call
+                        * bc2h_trans_inconnsistent() because fsck hasn't
+                        * finished yet
+                        */
+                       ob = NULL;
+                       goto err;
+               }
+       }
+
+       ob = __try_alloc_bucket(c, ca, b, watermark, a, s, cl);
+       if (!ob)
+               set_btree_iter_dontneed(&iter);
+err:
+       if (iter.path)
+               set_btree_iter_dontneed(&iter);
+       bch2_trans_iter_exit(trans, &iter);
+       printbuf_exit(&buf);
+       return ob;
+}
+
+/*
+ * This path is for before the freespace btree is initialized:
+ *
+ * If ca->new_fs_bucket_idx is nonzero, we haven't yet marked superblock &
+ * journal buckets - journal buckets will be < ca->new_fs_bucket_idx
+ */
+static noinline struct open_bucket *
+bch2_bucket_alloc_early(struct btree_trans *trans,
+                       struct bch_dev *ca,
+                       enum bch_watermark watermark,
+                       struct bucket_alloc_state *s,
+                       struct closure *cl)
+{
+       struct btree_iter iter, citer;
+       struct bkey_s_c k, ck;
+       struct open_bucket *ob = NULL;
+       u64 first_bucket = max_t(u64, ca->mi.first_bucket, ca->new_fs_bucket_idx);
+       u64 alloc_start = max(first_bucket, READ_ONCE(ca->alloc_cursor));
+       u64 alloc_cursor = alloc_start;
+       int ret;
 
-       bch2_wake_allocator(ca);
+       /*
+        * Scan with an uncached iterator to avoid polluting the key cache. An
+        * uncached iter will return a cached key if one exists, but if not
+        * there is no other underlying protection for the associated key cache
+        * slot. To avoid racing bucket allocations, look up the cached key slot
+        * of any likely allocation candidate before attempting to proceed with
+        * the allocation. This provides proper exclusion on the associated
+        * bucket.
+        */
+again:
+       for_each_btree_key_norestart(trans, iter, BTREE_ID_alloc, POS(ca->dev_idx, alloc_cursor),
+                          BTREE_ITER_SLOTS, k, ret) {
+               struct bch_alloc_v4 a_convert;
+               const struct bch_alloc_v4 *a;
+
+               if (bkey_ge(k.k->p, POS(ca->dev_idx, ca->mi.nbuckets)))
+                       break;
+
+               if (ca->new_fs_bucket_idx &&
+                   is_superblock_bucket(ca, k.k->p.offset))
+                       continue;
+
+               a = bch2_alloc_to_v4(k, &a_convert);
+               if (a->data_type != BCH_DATA_free)
+                       continue;
+
+               /* now check the cached key to serialize concurrent allocs of the bucket */
+               ck = bch2_bkey_get_iter(trans, &citer, BTREE_ID_alloc, k.k->p, BTREE_ITER_CACHED);
+               ret = bkey_err(ck);
+               if (ret)
+                       break;
+
+               a = bch2_alloc_to_v4(ck, &a_convert);
+               if (a->data_type != BCH_DATA_free)
+                       goto next;
+
+               s->buckets_seen++;
+
+               ob = __try_alloc_bucket(trans->c, ca, k.k->p.offset, watermark, a, s, cl);
+next:
+               set_btree_iter_dontneed(&citer);
+               bch2_trans_iter_exit(trans, &citer);
+               if (ob)
+                       break;
+       }
+       bch2_trans_iter_exit(trans, &iter);
+
+       alloc_cursor = iter.pos.offset;
+       ca->alloc_cursor = alloc_cursor;
+
+       if (!ob && ret)
+               ob = ERR_PTR(ret);
+
+       if (!ob && alloc_start > first_bucket) {
+               alloc_cursor = alloc_start = first_bucket;
+               goto again;
+       }
+
+       return ob;
+}
+
+static struct open_bucket *bch2_bucket_alloc_freelist(struct btree_trans *trans,
+                                                  struct bch_dev *ca,
+                                                  enum bch_watermark watermark,
+                                                  struct bucket_alloc_state *s,
+                                                  struct closure *cl)
+{
+       struct btree_iter iter;
+       struct bkey_s_c k;
+       struct open_bucket *ob = NULL;
+       u64 alloc_start = max_t(u64, ca->mi.first_bucket, READ_ONCE(ca->alloc_cursor));
+       u64 alloc_cursor = alloc_start;
+       int ret;
+
+       BUG_ON(ca->new_fs_bucket_idx);
+again:
+       for_each_btree_key_norestart(trans, iter, BTREE_ID_freespace,
+                                    POS(ca->dev_idx, alloc_cursor), 0, k, ret) {
+               if (k.k->p.inode != ca->dev_idx)
+                       break;
+
+               for (alloc_cursor = max(alloc_cursor, bkey_start_offset(k.k));
+                    alloc_cursor < k.k->p.offset;
+                    alloc_cursor++) {
+                       ret = btree_trans_too_many_iters(trans);
+                       if (ret) {
+                               ob = ERR_PTR(ret);
+                               break;
+                       }
+
+                       s->buckets_seen++;
+
+                       ob = try_alloc_bucket(trans, ca, watermark,
+                                             alloc_cursor, s, k, cl);
+                       if (ob) {
+                               set_btree_iter_dontneed(&iter);
+                               break;
+                       }
+               }
+
+               if (ob || ret)
+                       break;
+       }
+       bch2_trans_iter_exit(trans, &iter);
+
+       ca->alloc_cursor = alloc_cursor;
+
+       if (!ob && ret)
+               ob = ERR_PTR(ret);
+
+       if (!ob && alloc_start > ca->mi.first_bucket) {
+               alloc_cursor = alloc_start = ca->mi.first_bucket;
+               goto again;
+       }
+
+       return ob;
+}
+
+/**
+ * bch2_bucket_alloc_trans - allocate a single bucket from a specific device
+ * @trans:     transaction object
+ * @ca:                device to allocate from
+ * @watermark: how important is this allocation?
+ * @cl:                if not NULL, closure to be used to wait if buckets not available
+ * @usage:     for secondarily also returning the current device usage
+ *
+ * Returns:    an open_bucket on success, or an ERR_PTR() on failure.
+ */
+static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans,
+                                     struct bch_dev *ca,
+                                     enum bch_watermark watermark,
+                                     struct closure *cl,
+                                     struct bch_dev_usage *usage)
+{
+       struct bch_fs *c = trans->c;
+       struct open_bucket *ob = NULL;
+       bool freespace = READ_ONCE(ca->mi.freespace_initialized);
+       u64 avail;
+       struct bucket_alloc_state s = { 0 };
+       bool waiting = false;
+again:
+       bch2_dev_usage_read_fast(ca, usage);
+       avail = dev_buckets_free(ca, *usage, watermark);
+
+       if (usage->d[BCH_DATA_need_discard].buckets > avail)
+               bch2_do_discards(c);
+
+       if (usage->d[BCH_DATA_need_gc_gens].buckets > avail)
+               bch2_do_gc_gens(c);
+
+       if (should_invalidate_buckets(ca, *usage))
+               bch2_do_invalidates(c);
+
+       if (!avail) {
+               if (cl && !waiting) {
+                       closure_wait(&c->freelist_wait, cl);
+                       waiting = true;
+                       goto again;
+               }
+
+               track_event_change(&c->times[BCH_TIME_blocked_allocate],
+                                  &c->blocked_allocate, true);
+
+               ob = ERR_PTR(-BCH_ERR_freelist_empty);
+               goto err;
+       }
+
+       if (waiting)
+               closure_wake_up(&c->freelist_wait);
+alloc:
+       ob = likely(freespace)
+               ? bch2_bucket_alloc_freelist(trans, ca, watermark, &s, cl)
+               : bch2_bucket_alloc_early(trans, ca, watermark, &s, cl);
+
+       if (s.skipped_need_journal_commit * 2 > avail)
+               bch2_journal_flush_async(&c->journal, NULL);
+
+       if (!ob && freespace && c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_alloc_info) {
+               freespace = false;
+               goto alloc;
+       }
+err:
+       if (!ob)
+               ob = ERR_PTR(-BCH_ERR_no_buckets_found);
+
+       if (!IS_ERR(ob))
+               trace_and_count(c, bucket_alloc, ca,
+                               bch2_watermarks[watermark],
+                               ob->bucket,
+                               usage->d[BCH_DATA_free].buckets,
+                               avail,
+                               bch2_copygc_wait_amount(c),
+                               c->copygc_wait - atomic64_read(&c->io_clock[WRITE].now),
+                               &s,
+                               cl == NULL,
+                               "");
+       else if (!bch2_err_matches(PTR_ERR(ob), BCH_ERR_transaction_restart))
+               trace_and_count(c, bucket_alloc_fail, ca,
+                               bch2_watermarks[watermark],
+                               0,
+                               usage->d[BCH_DATA_free].buckets,
+                               avail,
+                               bch2_copygc_wait_amount(c),
+                               c->copygc_wait - atomic64_read(&c->io_clock[WRITE].now),
+                               &s,
+                               cl == NULL,
+                               bch2_err_str(PTR_ERR(ob)));
 
-       trace_bucket_alloc(ca, reserve);
+       return ob;
+}
+
+struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca,
+                                     enum bch_watermark watermark,
+                                     struct closure *cl)
+{
+       struct bch_dev_usage usage;
+       struct open_bucket *ob;
+
+       bch2_trans_do(c, NULL, NULL, 0,
+                     PTR_ERR_OR_ZERO(ob = bch2_bucket_alloc_trans(trans, ca, watermark,
+                                                       cl, &usage)));
        return ob;
 }
 
@@ -307,11 +642,12 @@ struct dev_alloc_list bch2_dev_alloc_list(struct bch_fs *c,
        return ret;
 }
 
-void bch2_dev_stripe_increment(struct bch_dev *ca,
-                              struct dev_stripe_state *stripe)
+static inline void bch2_dev_stripe_increment_inlined(struct bch_dev *ca,
+                              struct dev_stripe_state *stripe,
+                              struct bch_dev_usage *usage)
 {
        u64 *v = stripe->next_alloc + ca->dev_idx;
-       u64 free_space = dev_buckets_available(ca);
+       u64 free_space = dev_buckets_available(ca, BCH_WATERMARK_normal);
        u64 free_space_inv = free_space
                ? div64_u64(1ULL << 48, free_space)
                : 1ULL << 48;
@@ -327,74 +663,104 @@ void bch2_dev_stripe_increment(struct bch_dev *ca,
                *v = *v < scale ? 0 : *v - scale;
 }
 
-#define BUCKET_MAY_ALLOC_PARTIAL       (1 << 0)
-#define BUCKET_ALLOC_USE_DURABILITY    (1 << 1)
+void bch2_dev_stripe_increment(struct bch_dev *ca,
+                              struct dev_stripe_state *stripe)
+{
+       struct bch_dev_usage usage;
+
+       bch2_dev_usage_read_fast(ca, &usage);
+       bch2_dev_stripe_increment_inlined(ca, stripe, &usage);
+}
 
-static void add_new_bucket(struct bch_fs *c,
+static int add_new_bucket(struct bch_fs *c,
                           struct open_buckets *ptrs,
                           struct bch_devs_mask *devs_may_alloc,
+                          unsigned nr_replicas,
                           unsigned *nr_effective,
                           bool *have_cache,
                           unsigned flags,
                           struct open_bucket *ob)
 {
        unsigned durability =
-               bch_dev_bkey_exists(c, ob->ptr.dev)->mi.durability;
+               bch_dev_bkey_exists(c, ob->dev)->mi.durability;
 
-       __clear_bit(ob->ptr.dev, devs_may_alloc->d);
-       *nr_effective   += (flags & BUCKET_ALLOC_USE_DURABILITY)
-               ? durability : 1;
+       BUG_ON(*nr_effective >= nr_replicas);
+
+       __clear_bit(ob->dev, devs_may_alloc->d);
+       *nr_effective   += durability;
        *have_cache     |= !durability;
 
        ob_push(c, ptrs, ob);
+
+       if (*nr_effective >= nr_replicas)
+               return 1;
+       if (ob->ec)
+               return 1;
+       return 0;
 }
 
-int bch2_bucket_alloc_set(struct bch_fs *c,
+int bch2_bucket_alloc_set_trans(struct btree_trans *trans,
                      struct open_buckets *ptrs,
                      struct dev_stripe_state *stripe,
                      struct bch_devs_mask *devs_may_alloc,
                      unsigned nr_replicas,
                      unsigned *nr_effective,
                      bool *have_cache,
-                     enum alloc_reserve reserve,
                      unsigned flags,
+                     enum bch_data_type data_type,
+                     enum bch_watermark watermark,
                      struct closure *cl)
 {
+       struct bch_fs *c = trans->c;
        struct dev_alloc_list devs_sorted =
                bch2_dev_alloc_list(c, stripe, devs_may_alloc);
+       unsigned dev;
        struct bch_dev *ca;
-       int ret = -INSUFFICIENT_DEVICES;
+       int ret = -BCH_ERR_insufficient_devices;
        unsigned i;
 
        BUG_ON(*nr_effective >= nr_replicas);
 
        for (i = 0; i < devs_sorted.nr; i++) {
+               struct bch_dev_usage usage;
                struct open_bucket *ob;
 
-               ca = rcu_dereference(c->devs[devs_sorted.devs[i]]);
+               dev = devs_sorted.devs[i];
+
+               rcu_read_lock();
+               ca = rcu_dereference(c->devs[dev]);
+               if (ca)
+                       percpu_ref_get(&ca->ref);
+               rcu_read_unlock();
+
                if (!ca)
                        continue;
 
-               if (!ca->mi.durability && *have_cache)
+               if (!ca->mi.durability && *have_cache) {
+                       percpu_ref_put(&ca->ref);
                        continue;
+               }
+
+               ob = bch2_bucket_alloc_trans(trans, ca, watermark, cl, &usage);
+               if (!IS_ERR(ob))
+                       bch2_dev_stripe_increment_inlined(ca, stripe, &usage);
+               percpu_ref_put(&ca->ref);
 
-               ob = bch2_bucket_alloc(c, ca, reserve,
-                               flags & BUCKET_MAY_ALLOC_PARTIAL, cl);
                if (IS_ERR(ob)) {
                        ret = PTR_ERR(ob);
-
-                       if (cl)
-                               return ret;
+                       if (bch2_err_matches(ret, BCH_ERR_transaction_restart) || cl)
+                               break;
                        continue;
                }
 
-               add_new_bucket(c, ptrs, devs_may_alloc,
-                              nr_effective, have_cache, flags, ob);
+               ob->data_type = data_type;
 
-               bch2_dev_stripe_increment(ca, stripe);
-
-               if (*nr_effective >= nr_replicas)
-                       return 0;
+               if (add_new_bucket(c, ptrs, devs_may_alloc,
+                                  nr_replicas, nr_effective,
+                                  have_cache, flags, ob)) {
+                       ret = 0;
+                       break;
+               }
        }
 
        return ret;
@@ -408,26 +774,24 @@ int bch2_bucket_alloc_set(struct bch_fs *c,
  * it's to a device we don't want:
  */
 
-static int bucket_alloc_from_stripe(struct bch_fs *c,
+static int bucket_alloc_from_stripe(struct btree_trans *trans,
                         struct open_buckets *ptrs,
                         struct write_point *wp,
                         struct bch_devs_mask *devs_may_alloc,
                         u16 target,
-                        unsigned erasure_code,
                         unsigned nr_replicas,
                         unsigned *nr_effective,
                         bool *have_cache,
+                        enum bch_watermark watermark,
                         unsigned flags,
                         struct closure *cl)
 {
+       struct bch_fs *c = trans->c;
        struct dev_alloc_list devs_sorted;
        struct ec_stripe_head *h;
        struct open_bucket *ob;
-       struct bch_dev *ca;
        unsigned i, ec_idx;
-
-       if (!erasure_code)
-               return 0;
+       int ret = 0;
 
        if (nr_replicas < 2)
                return 0;
@@ -435,11 +799,9 @@ static int bucket_alloc_from_stripe(struct bch_fs *c,
        if (ec_open_bucket(c, ptrs))
                return 0;
 
-       h = bch2_ec_stripe_head_get(c, target, 0, nr_replicas - 1,
-                                   wp == &c->copygc_write_point,
-                                   cl);
+       h = bch2_ec_stripe_head_get(trans, target, 0, nr_replicas - 1, watermark, cl);
        if (IS_ERR(h))
-               return -PTR_ERR(h);
+               return PTR_ERR(h);
        if (!h)
                return 0;
 
@@ -451,178 +813,342 @@ static int bucket_alloc_from_stripe(struct bch_fs *c,
                                continue;
 
                        ob = c->open_buckets + h->s->blocks[ec_idx];
-                       if (ob->ptr.dev == devs_sorted.devs[i] &&
+                       if (ob->dev == devs_sorted.devs[i] &&
                            !test_and_set_bit(ec_idx, h->s->blocks_allocated))
                                goto got_bucket;
                }
        goto out_put_head;
 got_bucket:
-       ca = bch_dev_bkey_exists(c, ob->ptr.dev);
-
        ob->ec_idx      = ec_idx;
        ob->ec          = h->s;
+       ec_stripe_new_get(h->s, STRIPE_REF_io);
 
-       add_new_bucket(c, ptrs, devs_may_alloc,
-                      nr_effective, have_cache, flags, ob);
-       atomic_inc(&h->s->pin);
+       ret = add_new_bucket(c, ptrs, devs_may_alloc,
+                            nr_replicas, nr_effective,
+                            have_cache, flags, ob);
 out_put_head:
        bch2_ec_stripe_head_put(c, h);
-       return 0;
+       return ret;
 }
 
 /* Sector allocator */
 
-static void get_buckets_from_writepoint(struct bch_fs *c,
-                                       struct open_buckets *ptrs,
-                                       struct write_point *wp,
-                                       struct bch_devs_mask *devs_may_alloc,
-                                       unsigned nr_replicas,
-                                       unsigned *nr_effective,
-                                       bool *have_cache,
-                                       unsigned flags,
-                                       bool need_ec)
+static bool want_bucket(struct bch_fs *c,
+                       struct write_point *wp,
+                       struct bch_devs_mask *devs_may_alloc,
+                       bool *have_cache, bool ec,
+                       struct open_bucket *ob)
+{
+       struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
+
+       if (!test_bit(ob->dev, devs_may_alloc->d))
+               return false;
+
+       if (ob->data_type != wp->data_type)
+               return false;
+
+       if (!ca->mi.durability &&
+           (wp->data_type == BCH_DATA_btree || ec || *have_cache))
+               return false;
+
+       if (ec != (ob->ec != NULL))
+               return false;
+
+       return true;
+}
+
+static int bucket_alloc_set_writepoint(struct bch_fs *c,
+                                      struct open_buckets *ptrs,
+                                      struct write_point *wp,
+                                      struct bch_devs_mask *devs_may_alloc,
+                                      unsigned nr_replicas,
+                                      unsigned *nr_effective,
+                                      bool *have_cache,
+                                      bool ec, unsigned flags)
 {
        struct open_buckets ptrs_skip = { .nr = 0 };
        struct open_bucket *ob;
        unsigned i;
+       int ret = 0;
 
        open_bucket_for_each(c, &wp->ptrs, ob, i) {
-               struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
-
-               if (*nr_effective < nr_replicas &&
-                   test_bit(ob->ptr.dev, devs_may_alloc->d) &&
-                   (ca->mi.durability ||
-                    (wp->type == BCH_DATA_user && !*have_cache)) &&
-                   (ob->ec || !need_ec)) {
-                       add_new_bucket(c, ptrs, devs_may_alloc,
-                                      nr_effective, have_cache,
-                                      flags, ob);
-               } else {
+               if (!ret && want_bucket(c, wp, devs_may_alloc,
+                                       have_cache, ec, ob))
+                       ret = add_new_bucket(c, ptrs, devs_may_alloc,
+                                      nr_replicas, nr_effective,
+                                      have_cache, flags, ob);
+               else
                        ob_push(c, &ptrs_skip, ob);
-               }
        }
        wp->ptrs = ptrs_skip;
+
+       return ret;
+}
+
+static int bucket_alloc_set_partial(struct bch_fs *c,
+                                   struct open_buckets *ptrs,
+                                   struct write_point *wp,
+                                   struct bch_devs_mask *devs_may_alloc,
+                                   unsigned nr_replicas,
+                                   unsigned *nr_effective,
+                                   bool *have_cache, bool ec,
+                                   enum bch_watermark watermark,
+                                   unsigned flags)
+{
+       int i, ret = 0;
+
+       if (!c->open_buckets_partial_nr)
+               return 0;
+
+       spin_lock(&c->freelist_lock);
+
+       if (!c->open_buckets_partial_nr)
+               goto unlock;
+
+       for (i = c->open_buckets_partial_nr - 1; i >= 0; --i) {
+               struct open_bucket *ob = c->open_buckets + c->open_buckets_partial[i];
+
+               if (want_bucket(c, wp, devs_may_alloc, have_cache, ec, ob)) {
+                       struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
+                       struct bch_dev_usage usage;
+                       u64 avail;
+
+                       bch2_dev_usage_read_fast(ca, &usage);
+                       avail = dev_buckets_free(ca, usage, watermark);
+                       if (!avail)
+                               continue;
+
+                       array_remove_item(c->open_buckets_partial,
+                                         c->open_buckets_partial_nr,
+                                         i);
+                       ob->on_partial_list = false;
+
+                       ret = add_new_bucket(c, ptrs, devs_may_alloc,
+                                            nr_replicas, nr_effective,
+                                            have_cache, flags, ob);
+                       if (ret)
+                               break;
+               }
+       }
+unlock:
+       spin_unlock(&c->freelist_lock);
+       return ret;
 }
 
-static int open_bucket_add_buckets(struct bch_fs *c,
+static int __open_bucket_add_buckets(struct btree_trans *trans,
                        struct open_buckets *ptrs,
                        struct write_point *wp,
                        struct bch_devs_list *devs_have,
                        u16 target,
-                       unsigned erasure_code,
+                       bool erasure_code,
                        unsigned nr_replicas,
                        unsigned *nr_effective,
                        bool *have_cache,
-                       enum alloc_reserve reserve,
+                       enum bch_watermark watermark,
                        unsigned flags,
                        struct closure *_cl)
 {
+       struct bch_fs *c = trans->c;
        struct bch_devs_mask devs;
        struct open_bucket *ob;
        struct closure *cl = NULL;
-       int ret;
        unsigned i;
+       int ret;
 
-       rcu_read_lock();
-       devs = target_rw_devs(c, wp->type, target);
-       rcu_read_unlock();
+       devs = target_rw_devs(c, wp->data_type, target);
 
        /* Don't allocate from devices we already have pointers to: */
-       for (i = 0; i < devs_have->nr; i++)
-               __clear_bit(devs_have->devs[i], devs.d);
+       darray_for_each(*devs_have, i)
+               __clear_bit(*i, devs.d);
 
        open_bucket_for_each(c, ptrs, ob, i)
-               __clear_bit(ob->ptr.dev, devs.d);
+               __clear_bit(ob->dev, devs.d);
 
-       if (erasure_code) {
-               if (!ec_open_bucket(c, ptrs)) {
-                       get_buckets_from_writepoint(c, ptrs, wp, &devs,
-                                                   nr_replicas, nr_effective,
-                                                   have_cache, flags, true);
-                       if (*nr_effective >= nr_replicas)
-                               return 0;
-               }
+       if (erasure_code && ec_open_bucket(c, ptrs))
+               return 0;
 
-               if (!ec_open_bucket(c, ptrs)) {
-                       ret = bucket_alloc_from_stripe(c, ptrs, wp, &devs,
-                                                target, erasure_code,
-                                                nr_replicas, nr_effective,
-                                                have_cache, flags, _cl);
-                       if (ret == -FREELIST_EMPTY ||
-                           ret == -OPEN_BUCKETS_EMPTY)
-                               return ret;
-                       if (*nr_effective >= nr_replicas)
-                               return 0;
+       ret = bucket_alloc_set_writepoint(c, ptrs, wp, &devs,
+                                nr_replicas, nr_effective,
+                                have_cache, erasure_code, flags);
+       if (ret)
+               return ret;
+
+       ret = bucket_alloc_set_partial(c, ptrs, wp, &devs,
+                                nr_replicas, nr_effective,
+                                have_cache, erasure_code, watermark, flags);
+       if (ret)
+               return ret;
+
+       if (erasure_code) {
+               ret = bucket_alloc_from_stripe(trans, ptrs, wp, &devs,
+                                        target,
+                                        nr_replicas, nr_effective,
+                                        have_cache,
+                                        watermark, flags, _cl);
+       } else {
+retry_blocking:
+               /*
+                * Try nonblocking first, so that if one device is full we'll try from
+                * other devices:
+                */
+               ret = bch2_bucket_alloc_set_trans(trans, ptrs, &wp->stripe, &devs,
+                                       nr_replicas, nr_effective, have_cache,
+                                       flags, wp->data_type, watermark, cl);
+               if (ret &&
+                   !bch2_err_matches(ret, BCH_ERR_transaction_restart) &&
+                   !bch2_err_matches(ret, BCH_ERR_insufficient_devices) &&
+                   !cl && _cl) {
+                       cl = _cl;
+                       goto retry_blocking;
                }
        }
 
-       get_buckets_from_writepoint(c, ptrs, wp, &devs,
-                                   nr_replicas, nr_effective,
-                                   have_cache, flags, false);
-       if (*nr_effective >= nr_replicas)
-               return 0;
+       return ret;
+}
 
-       percpu_down_read(&c->mark_lock);
-       rcu_read_lock();
+static int open_bucket_add_buckets(struct btree_trans *trans,
+                       struct open_buckets *ptrs,
+                       struct write_point *wp,
+                       struct bch_devs_list *devs_have,
+                       u16 target,
+                       unsigned erasure_code,
+                       unsigned nr_replicas,
+                       unsigned *nr_effective,
+                       bool *have_cache,
+                       enum bch_watermark watermark,
+                       unsigned flags,
+                       struct closure *cl)
+{
+       int ret;
 
-retry_blocking:
-       /*
-        * Try nonblocking first, so that if one device is full we'll try from
-        * other devices:
-        */
-       ret = bch2_bucket_alloc_set(c, ptrs, &wp->stripe, &devs,
+       if (erasure_code) {
+               ret = __open_bucket_add_buckets(trans, ptrs, wp,
+                               devs_have, target, erasure_code,
                                nr_replicas, nr_effective, have_cache,
-                               reserve, flags, cl);
-       if (ret && ret != -INSUFFICIENT_DEVICES && !cl && _cl) {
-               cl = _cl;
-               goto retry_blocking;
+                               watermark, flags, cl);
+               if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
+                   bch2_err_matches(ret, BCH_ERR_operation_blocked) ||
+                   bch2_err_matches(ret, BCH_ERR_freelist_empty) ||
+                   bch2_err_matches(ret, BCH_ERR_open_buckets_empty))
+                       return ret;
+               if (*nr_effective >= nr_replicas)
+                       return 0;
        }
 
-       rcu_read_unlock();
-       percpu_up_read(&c->mark_lock);
-
-       return ret;
+       ret = __open_bucket_add_buckets(trans, ptrs, wp,
+                       devs_have, target, false,
+                       nr_replicas, nr_effective, have_cache,
+                       watermark, flags, cl);
+       return ret < 0 ? ret : 0;
 }
 
-void bch2_open_buckets_stop_dev(struct bch_fs *c, struct bch_dev *ca,
-                               struct open_buckets *obs)
+/**
+ * should_drop_bucket - check if this is open_bucket should go away
+ * @ob:                open_bucket to predicate on
+ * @c:         filesystem handle
+ * @ca:                if set, we're killing buckets for a particular device
+ * @ec:                if true, we're shutting down erasure coding and killing all ec
+ *             open_buckets
+ *             otherwise, return true
+ * Returns: true if we should kill this open_bucket
+ *
+ * We're killing open_buckets because we're shutting down a device, erasure
+ * coding, or the entire filesystem - check if this open_bucket matches:
+ */
+static bool should_drop_bucket(struct open_bucket *ob, struct bch_fs *c,
+                              struct bch_dev *ca, bool ec)
 {
-       struct open_buckets ptrs = { .nr = 0 };
-       struct open_bucket *ob, *ob2;
-       unsigned i, j;
-
-       open_bucket_for_each(c, obs, ob, i) {
-               bool drop = !ca || ob->ptr.dev == ca->dev_idx;
+       if (ec) {
+               return ob->ec != NULL;
+       } else if (ca) {
+               bool drop = ob->dev == ca->dev_idx;
+               struct open_bucket *ob2;
+               unsigned i;
 
                if (!drop && ob->ec) {
+                       unsigned nr_blocks;
+
                        mutex_lock(&ob->ec->lock);
-                       for (j = 0; j < ob->ec->new_stripe.key.v.nr_blocks; j++) {
-                               if (!ob->ec->blocks[j])
+                       nr_blocks = bkey_i_to_stripe(&ob->ec->new_stripe.key)->v.nr_blocks;
+
+                       for (i = 0; i < nr_blocks; i++) {
+                               if (!ob->ec->blocks[i])
                                        continue;
 
-                               ob2 = c->open_buckets + ob->ec->blocks[j];
-                               drop |= ob2->ptr.dev == ca->dev_idx;
+                               ob2 = c->open_buckets + ob->ec->blocks[i];
+                               drop |= ob2->dev == ca->dev_idx;
                        }
                        mutex_unlock(&ob->ec->lock);
                }
 
-               if (drop)
-                       bch2_open_bucket_put(c, ob);
-               else
-                       ob_push(c, &ptrs, ob);
+               return drop;
+       } else {
+               return true;
        }
-
-       *obs = ptrs;
 }
 
-void bch2_writepoint_stop(struct bch_fs *c, struct bch_dev *ca,
-                         struct write_point *wp)
+static void bch2_writepoint_stop(struct bch_fs *c, struct bch_dev *ca,
+                                bool ec, struct write_point *wp)
 {
+       struct open_buckets ptrs = { .nr = 0 };
+       struct open_bucket *ob;
+       unsigned i;
+
        mutex_lock(&wp->lock);
-       bch2_open_buckets_stop_dev(c, ca, &wp->ptrs);
+       open_bucket_for_each(c, &wp->ptrs, ob, i)
+               if (should_drop_bucket(ob, c, ca, ec))
+                       bch2_open_bucket_put(c, ob);
+               else
+                       ob_push(c, &ptrs, ob);
+       wp->ptrs = ptrs;
        mutex_unlock(&wp->lock);
 }
 
+void bch2_open_buckets_stop(struct bch_fs *c, struct bch_dev *ca,
+                           bool ec)
+{
+       unsigned i;
+
+       /* Next, close write points that point to this device... */
+       for (i = 0; i < ARRAY_SIZE(c->write_points); i++)
+               bch2_writepoint_stop(c, ca, ec, &c->write_points[i]);
+
+       bch2_writepoint_stop(c, ca, ec, &c->copygc_write_point);
+       bch2_writepoint_stop(c, ca, ec, &c->rebalance_write_point);
+       bch2_writepoint_stop(c, ca, ec, &c->btree_write_point);
+
+       mutex_lock(&c->btree_reserve_cache_lock);
+       while (c->btree_reserve_cache_nr) {
+               struct btree_alloc *a =
+                       &c->btree_reserve_cache[--c->btree_reserve_cache_nr];
+
+               bch2_open_buckets_put(c, &a->ob);
+       }
+       mutex_unlock(&c->btree_reserve_cache_lock);
+
+       spin_lock(&c->freelist_lock);
+       i = 0;
+       while (i < c->open_buckets_partial_nr) {
+               struct open_bucket *ob =
+                       c->open_buckets + c->open_buckets_partial[i];
+
+               if (should_drop_bucket(ob, c, ca, ec)) {
+                       --c->open_buckets_partial_nr;
+                       swap(c->open_buckets_partial[i],
+                            c->open_buckets_partial[c->open_buckets_partial_nr]);
+                       ob->on_partial_list = false;
+                       spin_unlock(&c->freelist_lock);
+                       bch2_open_bucket_put(c, ob);
+                       spin_lock(&c->freelist_lock);
+               } else {
+                       i++;
+               }
+       }
+       spin_unlock(&c->freelist_lock);
+
+       bch2_ec_stop_dev(c, ca);
+}
+
 static inline struct hlist_head *writepoint_hash(struct bch_fs *c,
                                                 unsigned long write_point)
 {
@@ -668,10 +1194,12 @@ static bool try_increase_writepoints(struct bch_fs *c)
        return true;
 }
 
-static bool try_decrease_writepoints(struct bch_fs *c,
-                                    unsigned old_nr)
+static bool try_decrease_writepoints(struct btree_trans *trans, unsigned old_nr)
 {
+       struct bch_fs *c = trans->c;
        struct write_point *wp;
+       struct open_bucket *ob;
+       unsigned i;
 
        mutex_lock(&c->write_points_hash_lock);
        if (c->write_points_nr < old_nr) {
@@ -690,19 +1218,24 @@ static bool try_decrease_writepoints(struct bch_fs *c,
        hlist_del_rcu(&wp->node);
        mutex_unlock(&c->write_points_hash_lock);
 
-       bch2_writepoint_stop(c, NULL, wp);
+       bch2_trans_mutex_lock_norelock(trans, &wp->lock);
+       open_bucket_for_each(c, &wp->ptrs, ob, i)
+               open_bucket_free_unused(c, ob);
+       wp->ptrs.nr = 0;
+       mutex_unlock(&wp->lock);
        return true;
 }
 
-static struct write_point *writepoint_find(struct bch_fs *c,
+static struct write_point *writepoint_find(struct btree_trans *trans,
                                           unsigned long write_point)
 {
+       struct bch_fs *c = trans->c;
        struct write_point *wp, *oldest;
        struct hlist_head *head;
 
        if (!(write_point & 1UL)) {
                wp = (struct write_point *) write_point;
-               mutex_lock(&wp->lock);
+               bch2_trans_mutex_lock_norelock(trans, &wp->lock);
                return wp;
        }
 
@@ -711,7 +1244,7 @@ restart_find:
        wp = __writepoint_find(head, write_point);
        if (wp) {
 lock_wp:
-               mutex_lock(&wp->lock);
+               bch2_trans_mutex_lock_norelock(trans, &wp->lock);
                if (wp->write_point == write_point)
                        goto out;
                mutex_unlock(&wp->lock);
@@ -724,8 +1257,8 @@ restart_find_oldest:
                if (!oldest || time_before64(wp->last_used, oldest->last_used))
                        oldest = wp;
 
-       mutex_lock(&oldest->lock);
-       mutex_lock(&c->write_points_hash_lock);
+       bch2_trans_mutex_lock_norelock(trans, &oldest->lock);
+       bch2_trans_mutex_lock_norelock(trans, &c->write_points_hash_lock);
        if (oldest >= c->write_points + c->write_points_nr ||
            try_increase_writepoints(c)) {
                mutex_unlock(&c->write_points_hash_lock);
@@ -746,35 +1279,62 @@ restart_find_oldest:
        hlist_add_head_rcu(&wp->node, head);
        mutex_unlock(&c->write_points_hash_lock);
 out:
-       wp->last_used = sched_clock();
+       wp->last_used = local_clock();
        return wp;
 }
 
+static noinline void
+deallocate_extra_replicas(struct bch_fs *c,
+                         struct open_buckets *ptrs,
+                         struct open_buckets *ptrs_no_use,
+                         unsigned extra_replicas)
+{
+       struct open_buckets ptrs2 = { 0 };
+       struct open_bucket *ob;
+       unsigned i;
+
+       open_bucket_for_each(c, ptrs, ob, i) {
+               unsigned d = bch_dev_bkey_exists(c, ob->dev)->mi.durability;
+
+               if (d && d <= extra_replicas) {
+                       extra_replicas -= d;
+                       ob_push(c, ptrs_no_use, ob);
+               } else {
+                       ob_push(c, &ptrs2, ob);
+               }
+       }
+
+       *ptrs = ptrs2;
+}
+
 /*
  * Get us an open_bucket we can allocate from, return with it locked:
  */
-struct write_point *bch2_alloc_sectors_start(struct bch_fs *c,
-                               unsigned target,
-                               unsigned erasure_code,
-                               struct write_point_specifier write_point,
-                               struct bch_devs_list *devs_have,
-                               unsigned nr_replicas,
-                               unsigned nr_replicas_required,
-                               enum alloc_reserve reserve,
-                               unsigned flags,
-                               struct closure *cl)
+int bch2_alloc_sectors_start_trans(struct btree_trans *trans,
+                            unsigned target,
+                            unsigned erasure_code,
+                            struct write_point_specifier write_point,
+                            struct bch_devs_list *devs_have,
+                            unsigned nr_replicas,
+                            unsigned nr_replicas_required,
+                            enum bch_watermark watermark,
+                            unsigned flags,
+                            struct closure *cl,
+                            struct write_point **wp_ret)
 {
+       struct bch_fs *c = trans->c;
        struct write_point *wp;
        struct open_bucket *ob;
        struct open_buckets ptrs;
        unsigned nr_effective, write_points_nr;
-       unsigned ob_flags = 0;
        bool have_cache;
        int ret;
        int i;
 
-       if (!(flags & BCH_WRITE_ONLY_SPECIFIED_DEVS))
-               ob_flags |= BUCKET_ALLOC_USE_DURABILITY;
+       if (!IS_ENABLED(CONFIG_BCACHEFS_ERASURE_CODING))
+               erasure_code = false;
+
+       BUG_ON(flags & BCH_WRITE_ONLY_SPECIFIED_DEVS);
 
        BUG_ON(!nr_replicas || !nr_replicas_required);
 retry:
@@ -783,35 +1343,52 @@ retry:
        write_points_nr = c->write_points_nr;
        have_cache      = false;
 
-       wp = writepoint_find(c, write_point.v);
-
-       if (wp->type == BCH_DATA_user)
-               ob_flags |= BUCKET_MAY_ALLOC_PARTIAL;
+       *wp_ret = wp = writepoint_find(trans, write_point.v);
 
        /* metadata may not allocate on cache devices: */
-       if (wp->type != BCH_DATA_user)
+       if (wp->data_type != BCH_DATA_user)
                have_cache = true;
 
-       if (!target || (flags & BCH_WRITE_ONLY_SPECIFIED_DEVS)) {
-               ret = open_bucket_add_buckets(c, &ptrs, wp, devs_have,
+       if (target && !(flags & BCH_WRITE_ONLY_SPECIFIED_DEVS)) {
+               ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
                                              target, erasure_code,
                                              nr_replicas, &nr_effective,
-                                             &have_cache, reserve,
-                                             ob_flags, cl);
-       } else {
-               ret = open_bucket_add_buckets(c, &ptrs, wp, devs_have,
+                                             &have_cache, watermark,
+                                             flags, NULL);
+               if (!ret ||
+                   bch2_err_matches(ret, BCH_ERR_transaction_restart))
+                       goto alloc_done;
+
+               /* Don't retry from all devices if we're out of open buckets: */
+               if (bch2_err_matches(ret, BCH_ERR_open_buckets_empty)) {
+                       int ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
                                              target, erasure_code,
                                              nr_replicas, &nr_effective,
-                                             &have_cache, reserve,
-                                             ob_flags, NULL);
-               if (!ret)
-                       goto alloc_done;
+                                             &have_cache, watermark,
+                                             flags, cl);
+                       if (!ret ||
+                           bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
+                           bch2_err_matches(ret, BCH_ERR_open_buckets_empty))
+                               goto alloc_done;
+               }
+
+               /*
+                * Only try to allocate cache (durability = 0 devices) from the
+                * specified target:
+                */
+               have_cache = true;
 
-               ret = open_bucket_add_buckets(c, &ptrs, wp, devs_have,
+               ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
                                              0, erasure_code,
                                              nr_replicas, &nr_effective,
-                                             &have_cache, reserve,
-                                             ob_flags, cl);
+                                             &have_cache, watermark,
+                                             flags, cl);
+       } else {
+               ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
+                                             target, erasure_code,
+                                             nr_replicas, &nr_effective,
+                                             &have_cache, watermark,
+                                             flags, cl);
        }
 alloc_done:
        BUG_ON(!ret && nr_effective < nr_replicas);
@@ -819,16 +1396,19 @@ alloc_done:
        if (erasure_code && !ec_open_bucket(c, &ptrs))
                pr_debug("failed to get ec bucket: ret %u", ret);
 
-       if (ret == -INSUFFICIENT_DEVICES &&
+       if (ret == -BCH_ERR_insufficient_devices &&
            nr_effective >= nr_replicas_required)
                ret = 0;
 
        if (ret)
                goto err;
 
+       if (nr_effective > nr_replicas)
+               deallocate_extra_replicas(c, &ptrs, &wp->ptrs, nr_effective - nr_replicas);
+
        /* Free buckets we didn't use: */
        open_bucket_for_each(c, &wp->ptrs, ob, i)
-               open_bucket_free_unused(c, wp, ob);
+               open_bucket_free_unused(c, ob);
 
        wp->ptrs = ptrs;
 
@@ -839,61 +1419,49 @@ alloc_done:
 
        BUG_ON(!wp->sectors_free || wp->sectors_free == UINT_MAX);
 
-       verify_not_stale(c, &wp->ptrs);
-
-       return wp;
+       return 0;
 err:
        open_bucket_for_each(c, &wp->ptrs, ob, i)
                if (ptrs.nr < ARRAY_SIZE(ptrs.v))
                        ob_push(c, &ptrs, ob);
                else
-                       open_bucket_free_unused(c, wp, ob);
+                       open_bucket_free_unused(c, ob);
        wp->ptrs = ptrs;
 
        mutex_unlock(&wp->lock);
 
-       if (ret == -FREELIST_EMPTY &&
-           try_decrease_writepoints(c, write_points_nr))
+       if (bch2_err_matches(ret, BCH_ERR_freelist_empty) &&
+           try_decrease_writepoints(trans, write_points_nr))
                goto retry;
 
-       switch (ret) {
-       case -OPEN_BUCKETS_EMPTY:
-       case -FREELIST_EMPTY:
-               return cl ? ERR_PTR(-EAGAIN) : ERR_PTR(-ENOSPC);
-       case -INSUFFICIENT_DEVICES:
-               return ERR_PTR(-EROFS);
-       default:
-               BUG();
-       }
-}
+       if (bch2_err_matches(ret, BCH_ERR_open_buckets_empty) ||
+           bch2_err_matches(ret, BCH_ERR_freelist_empty))
+               return cl
+                       ? -BCH_ERR_bucket_alloc_blocked
+                       : -BCH_ERR_ENOSPC_bucket_alloc;
 
-/*
- * Append pointers to the space we just allocated to @k, and mark @sectors space
- * as allocated out of @ob
- */
-void bch2_alloc_sectors_append_ptrs(struct bch_fs *c, struct write_point *wp,
-                                   struct bkey_i *k, unsigned sectors)
+       return ret;
+}
 
+struct bch_extent_ptr bch2_ob_ptr(struct bch_fs *c, struct open_bucket *ob)
 {
-       struct open_bucket *ob;
-       unsigned i;
-
-       BUG_ON(sectors > wp->sectors_free);
-       wp->sectors_free -= sectors;
-
-       open_bucket_for_each(c, &wp->ptrs, ob, i) {
-               struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
-               struct bch_extent_ptr tmp = ob->ptr;
+       struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
 
-               tmp.cached = !ca->mi.durability &&
-                       wp->type == BCH_DATA_user;
-
-               tmp.offset += ca->mi.bucket_size - ob->sectors_free;
-               bch2_bkey_append_ptr(k, tmp);
+       return (struct bch_extent_ptr) {
+               .type   = 1 << BCH_EXTENT_ENTRY_ptr,
+               .gen    = ob->gen,
+               .dev    = ob->dev,
+               .offset = bucket_to_sector(ca, ob->bucket) +
+                       ca->mi.bucket_size -
+                       ob->sectors_free,
+       };
+}
 
-               BUG_ON(sectors > ob->sectors_free);
-               ob->sectors_free -= sectors;
-       }
+void bch2_alloc_sectors_append_ptrs(struct bch_fs *c, struct write_point *wp,
+                                   struct bkey_i *k, unsigned sectors,
+                                   bool cached)
+{
+       bch2_alloc_sectors_append_ptrs_inlined(c, wp, k, sectors, cached);
 }
 
 /*
@@ -902,24 +1470,18 @@ void bch2_alloc_sectors_append_ptrs(struct bch_fs *c, struct write_point *wp,
  */
 void bch2_alloc_sectors_done(struct bch_fs *c, struct write_point *wp)
 {
-       struct open_buckets ptrs = { .nr = 0 }, keep = { .nr = 0 };
-       struct open_bucket *ob;
-       unsigned i;
-
-       open_bucket_for_each(c, &wp->ptrs, ob, i)
-               ob_push(c, !ob->sectors_free ? &ptrs : &keep, ob);
-       wp->ptrs = keep;
-
-       mutex_unlock(&wp->lock);
-
-       bch2_open_buckets_put(c, &ptrs);
+       bch2_alloc_sectors_done_inlined(c, wp);
 }
 
 static inline void writepoint_init(struct write_point *wp,
                                   enum bch_data_type type)
 {
        mutex_init(&wp->lock);
-       wp->type = type;
+       wp->data_type = type;
+
+       INIT_WORK(&wp->index_update_work, bch2_write_point_do_index_updates);
+       INIT_LIST_HEAD(&wp->writes);
+       spin_lock_init(&wp->writes_lock);
 }
 
 void bch2_fs_allocator_foreground_init(struct bch_fs *c)
@@ -950,9 +1512,113 @@ void bch2_fs_allocator_foreground_init(struct bch_fs *c)
             wp < c->write_points + c->write_points_nr; wp++) {
                writepoint_init(wp, BCH_DATA_user);
 
-               wp->last_used   = sched_clock();
+               wp->last_used   = local_clock();
                wp->write_point = (unsigned long) wp;
                hlist_add_head_rcu(&wp->node,
                                   writepoint_hash(c, wp->write_point));
        }
 }
+
+static void bch2_open_bucket_to_text(struct printbuf *out, struct bch_fs *c, struct open_bucket *ob)
+{
+       struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
+       unsigned data_type = ob->data_type;
+       barrier(); /* READ_ONCE() doesn't work on bitfields */
+
+       prt_printf(out, "%zu ref %u %s %u:%llu gen %u allocated %u/%u",
+                  ob - c->open_buckets,
+                  atomic_read(&ob->pin),
+                  data_type < BCH_DATA_NR ? bch2_data_types[data_type] : "invalid data type",
+                  ob->dev, ob->bucket, ob->gen,
+                  ca->mi.bucket_size - ob->sectors_free, ca->mi.bucket_size);
+       if (ob->ec)
+               prt_printf(out, " ec idx %llu", ob->ec->idx);
+       if (ob->on_partial_list)
+               prt_str(out, " partial");
+       prt_newline(out);
+}
+
+void bch2_open_buckets_to_text(struct printbuf *out, struct bch_fs *c)
+{
+       struct open_bucket *ob;
+
+       out->atomic++;
+
+       for (ob = c->open_buckets;
+            ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
+            ob++) {
+               spin_lock(&ob->lock);
+               if (ob->valid && !ob->on_partial_list)
+                       bch2_open_bucket_to_text(out, c, ob);
+               spin_unlock(&ob->lock);
+       }
+
+       --out->atomic;
+}
+
+void bch2_open_buckets_partial_to_text(struct printbuf *out, struct bch_fs *c)
+{
+       unsigned i;
+
+       out->atomic++;
+       spin_lock(&c->freelist_lock);
+
+       for (i = 0; i < c->open_buckets_partial_nr; i++)
+               bch2_open_bucket_to_text(out, c,
+                               c->open_buckets + c->open_buckets_partial[i]);
+
+       spin_unlock(&c->freelist_lock);
+       --out->atomic;
+}
+
+static const char * const bch2_write_point_states[] = {
+#define x(n)   #n,
+       WRITE_POINT_STATES()
+#undef x
+       NULL
+};
+
+static void bch2_write_point_to_text(struct printbuf *out, struct bch_fs *c,
+                                    struct write_point *wp)
+{
+       struct open_bucket *ob;
+       unsigned i;
+
+       prt_printf(out, "%lu: ", wp->write_point);
+       prt_human_readable_u64(out, wp->sectors_allocated);
+
+       prt_printf(out, " last wrote: ");
+       bch2_pr_time_units(out, sched_clock() - wp->last_used);
+
+       for (i = 0; i < WRITE_POINT_STATE_NR; i++) {
+               prt_printf(out, " %s: ", bch2_write_point_states[i]);
+               bch2_pr_time_units(out, wp->time[i]);
+       }
+
+       prt_newline(out);
+
+       printbuf_indent_add(out, 2);
+       open_bucket_for_each(c, &wp->ptrs, ob, i)
+               bch2_open_bucket_to_text(out, c, ob);
+       printbuf_indent_sub(out, 2);
+}
+
+void bch2_write_points_to_text(struct printbuf *out, struct bch_fs *c)
+{
+       struct write_point *wp;
+
+       prt_str(out, "Foreground write points\n");
+       for (wp = c->write_points;
+            wp < c->write_points + ARRAY_SIZE(c->write_points);
+            wp++)
+               bch2_write_point_to_text(out, c, wp);
+
+       prt_str(out, "Copygc write point\n");
+       bch2_write_point_to_text(out, c, &c->copygc_write_point);
+
+       prt_str(out, "Rebalance write point\n");
+       bch2_write_point_to_text(out, c, &c->rebalance_write_point);
+
+       prt_str(out, "Btree write point\n");
+       bch2_write_point_to_text(out, c, &c->btree_write_point);
+}