]> git.sesse.net Git - bcachefs-tools-debian/blobdiff - libbcachefs/alloc_foreground.c
Update bcachefs sources to 84505cfd37 bcachefs: Go RW before check_alloc_info()
[bcachefs-tools-debian] / libbcachefs / alloc_foreground.c
index ce365fec7f4129ed45089d06d461c1f7ff873677..2010a9af0eb2de8a36eb74633c79dea18f17ac25 100644 (file)
@@ -14,6 +14,7 @@
 #include "bcachefs.h"
 #include "alloc_background.h"
 #include "alloc_foreground.h"
+#include "backpointers.h"
 #include "btree_iter.h"
 #include "btree_update.h"
 #include "btree_gc.h"
@@ -193,27 +194,30 @@ static inline unsigned open_buckets_reserved(enum alloc_reserve reserve)
 static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
                                              u64 bucket,
                                              enum alloc_reserve reserve,
-                                             struct bch_alloc_v4 *a,
-                                             u64 *skipped_open,
-                                             u64 *skipped_need_journal_commit,
-                                             u64 *skipped_nouse,
+                                             const struct bch_alloc_v4 *a,
+                                             struct bucket_alloc_state *s,
                                              struct closure *cl)
 {
        struct open_bucket *ob;
 
        if (unlikely(ca->buckets_nouse && test_bit(bucket, ca->buckets_nouse))) {
-               (*skipped_nouse)++;
+               s->skipped_nouse++;
                return NULL;
        }
 
        if (bch2_bucket_is_open(c, ca->dev_idx, bucket)) {
-               (*skipped_open)++;
+               s->skipped_open++;
                return NULL;
        }
 
        if (bch2_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
                        c->journal.flushed_seq_ondisk, ca->dev_idx, bucket)) {
-               (*skipped_need_journal_commit)++;
+               s->skipped_need_journal_commit++;
+               return NULL;
+       }
+
+       if (bch2_bucket_nocow_is_locked(&c->nocow_locks, POS(ca->dev_idx, bucket))) {
+               s->skipped_nocow++;
                return NULL;
        }
 
@@ -233,7 +237,7 @@ static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev *
        /* Recheck under lock: */
        if (bch2_bucket_is_open(c, ca->dev_idx, bucket)) {
                spin_unlock(&c->freelist_lock);
-               (*skipped_open)++;
+               s->skipped_open++;
                return NULL;
        }
 
@@ -273,9 +277,7 @@ static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev *
 
 static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bch_dev *ca,
                                            enum alloc_reserve reserve, u64 free_entry,
-                                           u64 *skipped_open,
-                                           u64 *skipped_need_journal_commit,
-                                           u64 *skipped_nouse,
+                                           struct bucket_alloc_state *s,
                                            struct bkey_s_c freespace_k,
                                            struct closure *cl)
 {
@@ -283,7 +285,8 @@ static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bc
        struct btree_iter iter = { NULL };
        struct bkey_s_c k;
        struct open_bucket *ob;
-       struct bch_alloc_v4 a;
+       struct bch_alloc_v4 a_convert;
+       const struct bch_alloc_v4 *a;
        u64 b = free_entry & ~(~0ULL << 56);
        unsigned genbits = free_entry >> 56;
        struct printbuf buf = PRINTBUF;
@@ -307,37 +310,62 @@ static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bc
                goto err;
        }
 
-       bch2_alloc_to_v4(k, &a);
+       a = bch2_alloc_to_v4(k, &a_convert);
 
-       if (genbits != (alloc_freespace_genbits(a) >> 56)) {
-               prt_printf(&buf, "bucket in freespace btree with wrong genbits (got %u should be %llu)\n"
-                      "  freespace key ",
-                      genbits, alloc_freespace_genbits(a) >> 56);
+       if (a->data_type != BCH_DATA_free) {
+               if (!test_bit(BCH_FS_CHECK_ALLOC_DONE, &c->flags)) {
+                       ob = NULL;
+                       goto err;
+               }
+
+               prt_printf(&buf, "non free bucket in freespace btree\n"
+                      "  freespace key ");
                bch2_bkey_val_to_text(&buf, c, freespace_k);
                prt_printf(&buf, "\n  ");
                bch2_bkey_val_to_text(&buf, c, k);
                bch2_trans_inconsistent(trans, "%s", buf.buf);
                ob = ERR_PTR(-EIO);
                goto err;
-
        }
 
-       if (a.data_type != BCH_DATA_free) {
-               prt_printf(&buf, "non free bucket in freespace btree\n"
-                      "  freespace key ");
+       if (genbits != (alloc_freespace_genbits(*a) >> 56) &&
+           test_bit(BCH_FS_CHECK_ALLOC_DONE, &c->flags)) {
+               prt_printf(&buf, "bucket in freespace btree with wrong genbits (got %u should be %llu)\n"
+                      "  freespace key ",
+                      genbits, alloc_freespace_genbits(*a) >> 56);
                bch2_bkey_val_to_text(&buf, c, freespace_k);
                prt_printf(&buf, "\n  ");
                bch2_bkey_val_to_text(&buf, c, k);
                bch2_trans_inconsistent(trans, "%s", buf.buf);
                ob = ERR_PTR(-EIO);
                goto err;
+
        }
 
-       ob = __try_alloc_bucket(c, ca, b, reserve, &a,
-                               skipped_open,
-                               skipped_need_journal_commit,
-                               skipped_nouse,
-                               cl);
+       if (!test_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags)) {
+               struct bch_backpointer bp;
+               u64 bp_offset = 0;
+
+               ret = bch2_get_next_backpointer(trans, POS(ca->dev_idx, b), -1,
+                                               &bp_offset, &bp,
+                                               BTREE_ITER_NOPRESERVE);
+               if (ret) {
+                       ob = ERR_PTR(ret);
+                       goto err;
+               }
+
+               if (bp_offset != U64_MAX) {
+                       /*
+                        * Bucket may have data in it - we don't call
+                        * bc2h_trans_inconnsistent() because fsck hasn't
+                        * finished yet
+                        */
+                       ob = NULL;
+                       goto err;
+               }
+       }
+
+       ob = __try_alloc_bucket(c, ca, b, reserve, a, s, cl);
        if (!ob)
                iter.path->preserve = false;
 err:
@@ -383,11 +411,7 @@ static noinline struct open_bucket *
 bch2_bucket_alloc_early(struct btree_trans *trans,
                        struct bch_dev *ca,
                        enum alloc_reserve reserve,
-                       u64 *cur_bucket,
-                       u64 *buckets_seen,
-                       u64 *skipped_open,
-                       u64 *skipped_need_journal_commit,
-                       u64 *skipped_nouse,
+                       struct bucket_alloc_state *s,
                        struct closure *cl)
 {
        struct btree_iter iter;
@@ -395,38 +419,35 @@ bch2_bucket_alloc_early(struct btree_trans *trans,
        struct open_bucket *ob = NULL;
        int ret;
 
-       *cur_bucket = max_t(u64, *cur_bucket, ca->mi.first_bucket);
-       *cur_bucket = max_t(u64, *cur_bucket, ca->new_fs_bucket_idx);
+       s->cur_bucket = max_t(u64, s->cur_bucket, ca->mi.first_bucket);
+       s->cur_bucket = max_t(u64, s->cur_bucket, ca->new_fs_bucket_idx);
 
-       for_each_btree_key_norestart(trans, iter, BTREE_ID_alloc, POS(ca->dev_idx, *cur_bucket),
+       for_each_btree_key_norestart(trans, iter, BTREE_ID_alloc, POS(ca->dev_idx, s->cur_bucket),
                           BTREE_ITER_SLOTS, k, ret) {
-               struct bch_alloc_v4 a;
+               struct bch_alloc_v4 a_convert;
+               const struct bch_alloc_v4 *a;
 
-               if (bkey_cmp(k.k->p, POS(ca->dev_idx, ca->mi.nbuckets)) >= 0)
+               if (bkey_ge(k.k->p, POS(ca->dev_idx, ca->mi.nbuckets)))
                        break;
 
                if (ca->new_fs_bucket_idx &&
                    is_superblock_bucket(ca, k.k->p.offset))
                        continue;
 
-               bch2_alloc_to_v4(k, &a);
+               a = bch2_alloc_to_v4(k, &a_convert);
 
-               if (a.data_type != BCH_DATA_free)
+               if (a->data_type != BCH_DATA_free)
                        continue;
 
-               (*buckets_seen)++;
+               s->buckets_seen++;
 
-               ob = __try_alloc_bucket(trans->c, ca, k.k->p.offset, reserve, &a,
-                                       skipped_open,
-                                       skipped_need_journal_commit,
-                                       skipped_nouse,
-                                       cl);
+               ob = __try_alloc_bucket(trans->c, ca, k.k->p.offset, reserve, a, s, cl);
                if (ob)
                        break;
        }
        bch2_trans_iter_exit(trans, &iter);
 
-       *cur_bucket = iter.pos.offset;
+       s->cur_bucket = iter.pos.offset;
 
        return ob ?: ERR_PTR(ret ?: -BCH_ERR_no_buckets_found);
 }
@@ -434,11 +455,7 @@ bch2_bucket_alloc_early(struct btree_trans *trans,
 static struct open_bucket *bch2_bucket_alloc_freelist(struct btree_trans *trans,
                                                   struct bch_dev *ca,
                                                   enum alloc_reserve reserve,
-                                                  u64 *cur_bucket,
-                                                  u64 *buckets_seen,
-                                                  u64 *skipped_open,
-                                                  u64 *skipped_need_journal_commit,
-                                                  u64 *skipped_nouse,
+                                                  struct bucket_alloc_state *s,
                                                   struct closure *cl)
 {
        struct btree_iter iter;
@@ -454,25 +471,21 @@ static struct open_bucket *bch2_bucket_alloc_freelist(struct btree_trans *trans,
         * at previously
         */
        for_each_btree_key_norestart(trans, iter, BTREE_ID_freespace,
-                                    POS(ca->dev_idx, *cur_bucket), 0, k, ret) {
+                                    POS(ca->dev_idx, s->cur_bucket), 0, k, ret) {
                if (k.k->p.inode != ca->dev_idx)
                        break;
 
-               for (*cur_bucket = max(*cur_bucket, bkey_start_offset(k.k));
-                    *cur_bucket < k.k->p.offset;
-                    (*cur_bucket)++) {
+               for (s->cur_bucket = max(s->cur_bucket, bkey_start_offset(k.k));
+                    s->cur_bucket < k.k->p.offset;
+                    s->cur_bucket++) {
                        ret = btree_trans_too_many_iters(trans);
                        if (ret)
                                break;
 
-                       (*buckets_seen)++;
+                       s->buckets_seen++;
 
                        ob = try_alloc_bucket(trans, ca, reserve,
-                                             *cur_bucket,
-                                             skipped_open,
-                                             skipped_need_journal_commit,
-                                             skipped_nouse,
-                                             k, cl);
+                                             s->cur_bucket, s, k, cl);
                        if (ob)
                                break;
                }
@@ -489,36 +502,32 @@ static struct open_bucket *bch2_bucket_alloc_freelist(struct btree_trans *trans,
  * bch_bucket_alloc - allocate a single bucket from a specific device
  *
  * Returns index of bucket on success, 0 on failure
- * */
+ */
 static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans,
                                      struct bch_dev *ca,
                                      enum alloc_reserve reserve,
                                      bool may_alloc_partial,
-                                     struct closure *cl)
+                                     struct closure *cl,
+                                     struct bch_dev_usage *usage)
 {
        struct bch_fs *c = trans->c;
        struct open_bucket *ob = NULL;
-       struct bch_dev_usage usage;
-       bool freespace_initialized = READ_ONCE(ca->mi.freespace_initialized);
-       u64 start = freespace_initialized ? 0 : ca->bucket_alloc_trans_early_cursor;
+       bool freespace = READ_ONCE(ca->mi.freespace_initialized);
+       u64 start = freespace ? 0 : ca->bucket_alloc_trans_early_cursor;
        u64 avail;
-       u64 cur_bucket = start;
-       u64 buckets_seen = 0;
-       u64 skipped_open = 0;
-       u64 skipped_need_journal_commit = 0;
-       u64 skipped_nouse = 0;
+       struct bucket_alloc_state s = { .cur_bucket = start };
        bool waiting = false;
 again:
-       usage = bch2_dev_usage_read(ca);
-       avail = dev_buckets_free(ca, usage, reserve);
+       bch2_dev_usage_read_fast(ca, usage);
+       avail = dev_buckets_free(ca, *usage, reserve);
 
-       if (usage.d[BCH_DATA_need_discard].buckets > avail)
+       if (usage->d[BCH_DATA_need_discard].buckets > avail)
                bch2_do_discards(c);
 
-       if (usage.d[BCH_DATA_need_gc_gens].buckets > avail)
+       if (usage->d[BCH_DATA_need_gc_gens].buckets > avail)
                bch2_do_gc_gens(c);
 
-       if (should_invalidate_buckets(ca, usage))
+       if (should_invalidate_buckets(ca, *usage))
                bch2_do_invalidates(c);
 
        if (!avail) {
@@ -543,33 +552,26 @@ again:
                if (ob)
                        return ob;
        }
+alloc:
+       ob = likely(freespace)
+               ? bch2_bucket_alloc_freelist(trans, ca, reserve, &s, cl)
+               : bch2_bucket_alloc_early(trans, ca, reserve, &s, cl);
 
-       ob = likely(ca->mi.freespace_initialized)
-               ? bch2_bucket_alloc_freelist(trans, ca, reserve,
-                                       &cur_bucket,
-                                       &buckets_seen,
-                                       &skipped_open,
-                                       &skipped_need_journal_commit,
-                                       &skipped_nouse,
-                                       cl)
-               : bch2_bucket_alloc_early(trans, ca, reserve,
-                                       &cur_bucket,
-                                       &buckets_seen,
-                                       &skipped_open,
-                                       &skipped_need_journal_commit,
-                                       &skipped_nouse,
-                                       cl);
-
-       if (skipped_need_journal_commit * 2 > avail)
+       if (s.skipped_need_journal_commit * 2 > avail)
                bch2_journal_flush_async(&c->journal, NULL);
 
-       if (!ob && !freespace_initialized && start) {
-               start = cur_bucket = 0;
-               goto again;
+       if (!ob && !freespace && start) {
+               start = s.cur_bucket = 0;
+               goto alloc;
        }
 
-       if (!freespace_initialized)
-               ca->bucket_alloc_trans_early_cursor = cur_bucket;
+       if (!ob && freespace && !test_bit(BCH_FS_CHECK_ALLOC_DONE, &c->flags)) {
+               freespace = false;
+               goto alloc;
+       }
+
+       if (!freespace)
+               ca->bucket_alloc_trans_early_cursor = s.cur_bucket;
 err:
        if (!ob)
                ob = ERR_PTR(-BCH_ERR_no_buckets_found);
@@ -577,17 +579,14 @@ err:
        if (!IS_ERR(ob))
                trace_and_count(c, bucket_alloc, ca, bch2_alloc_reserves[reserve],
                                may_alloc_partial, ob->bucket);
-       else
+       else if (!bch2_err_matches(PTR_ERR(ob), BCH_ERR_transaction_restart))
                trace_and_count(c, bucket_alloc_fail,
                                ca, bch2_alloc_reserves[reserve],
-                               usage.d[BCH_DATA_free].buckets,
+                               usage->d[BCH_DATA_free].buckets,
                                avail,
                                bch2_copygc_wait_amount(c),
                                c->copygc_wait - atomic64_read(&c->io_clock[WRITE].now),
-                               buckets_seen,
-                               skipped_open,
-                               skipped_need_journal_commit,
-                               skipped_nouse,
+                               &s,
                                cl == NULL,
                                bch2_err_str(PTR_ERR(ob)));
 
@@ -599,11 +598,12 @@ struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca,
                                      bool may_alloc_partial,
                                      struct closure *cl)
 {
+       struct bch_dev_usage usage;
        struct open_bucket *ob;
 
        bch2_trans_do(c, NULL, NULL, 0,
                      PTR_ERR_OR_ZERO(ob = bch2_bucket_alloc_trans(&trans, ca, reserve,
-                                                                  may_alloc_partial, cl)));
+                                                       may_alloc_partial, cl, &usage)));
        return ob;
 }
 
@@ -630,8 +630,9 @@ struct dev_alloc_list bch2_dev_alloc_list(struct bch_fs *c,
        return ret;
 }
 
-void bch2_dev_stripe_increment(struct bch_dev *ca,
-                              struct dev_stripe_state *stripe)
+static inline void bch2_dev_stripe_increment_inlined(struct bch_dev *ca,
+                              struct dev_stripe_state *stripe,
+                              struct bch_dev_usage *usage)
 {
        u64 *v = stripe->next_alloc + ca->dev_idx;
        u64 free_space = dev_buckets_available(ca, RESERVE_none);
@@ -650,6 +651,15 @@ void bch2_dev_stripe_increment(struct bch_dev *ca,
                *v = *v < scale ? 0 : *v - scale;
 }
 
+void bch2_dev_stripe_increment(struct bch_dev *ca,
+                              struct dev_stripe_state *stripe)
+{
+       struct bch_dev_usage usage;
+
+       bch2_dev_usage_read_fast(ca, &usage);
+       bch2_dev_stripe_increment_inlined(ca, stripe, &usage);
+}
+
 #define BUCKET_MAY_ALLOC_PARTIAL       (1 << 0)
 #define BUCKET_ALLOC_USE_DURABILITY    (1 << 1)
 
@@ -688,12 +698,13 @@ static int bch2_bucket_alloc_set_trans(struct btree_trans *trans,
                bch2_dev_alloc_list(c, stripe, devs_may_alloc);
        unsigned dev;
        struct bch_dev *ca;
-       int ret = 0;
+       int ret = -BCH_ERR_insufficient_devices;
        unsigned i;
 
        BUG_ON(*nr_effective >= nr_replicas);
 
        for (i = 0; i < devs_sorted.nr; i++) {
+               struct bch_dev_usage usage;
                struct open_bucket *ob;
 
                dev = devs_sorted.devs[i];
@@ -713,13 +724,13 @@ static int bch2_bucket_alloc_set_trans(struct btree_trans *trans,
                }
 
                ob = bch2_bucket_alloc_trans(trans, ca, reserve,
-                               flags & BUCKET_MAY_ALLOC_PARTIAL, cl);
+                               flags & BUCKET_MAY_ALLOC_PARTIAL, cl, &usage);
                if (!IS_ERR(ob))
-                       bch2_dev_stripe_increment(ca, stripe);
+                       bch2_dev_stripe_increment_inlined(ca, stripe, &usage);
                percpu_ref_put(&ca->ref);
 
-               ret = PTR_ERR_OR_ZERO(ob);
-               if (ret) {
+               if (IS_ERR(ob)) {
+                       ret = PTR_ERR(ob);
                        if (bch2_err_matches(ret, BCH_ERR_transaction_restart) || cl)
                                break;
                        continue;
@@ -728,15 +739,12 @@ static int bch2_bucket_alloc_set_trans(struct btree_trans *trans,
                add_new_bucket(c, ptrs, devs_may_alloc,
                               nr_effective, have_cache, flags, ob);
 
-               if (*nr_effective >= nr_replicas)
+               if (*nr_effective >= nr_replicas) {
+                       ret = 0;
                        break;
+               }
        }
 
-       if (*nr_effective >= nr_replicas)
-               ret = 0;
-       else if (!ret)
-               ret = -BCH_ERR_insufficient_devices;
-
        return ret;
 }
 
@@ -1113,23 +1121,24 @@ restart_find_oldest:
        hlist_add_head_rcu(&wp->node, head);
        mutex_unlock(&c->write_points_hash_lock);
 out:
-       wp->last_used = sched_clock();
+       wp->last_used = local_clock();
        return wp;
 }
 
 /*
  * Get us an open_bucket we can allocate from, return with it locked:
  */
-struct write_point *bch2_alloc_sectors_start_trans(struct btree_trans *trans,
-                               unsigned target,
-                               unsigned erasure_code,
-                               struct write_point_specifier write_point,
-                               struct bch_devs_list *devs_have,
-                               unsigned nr_replicas,
-                               unsigned nr_replicas_required,
-                               enum alloc_reserve reserve,
-                               unsigned flags,
-                               struct closure *cl)
+int bch2_alloc_sectors_start_trans(struct btree_trans *trans,
+                                  unsigned target,
+                                  unsigned erasure_code,
+                                  struct write_point_specifier write_point,
+                                  struct bch_devs_list *devs_have,
+                                  unsigned nr_replicas,
+                                  unsigned nr_replicas_required,
+                                  enum alloc_reserve reserve,
+                                  unsigned flags,
+                                  struct closure *cl,
+                                  struct write_point **wp_ret)
 {
        struct bch_fs *c = trans->c;
        struct write_point *wp;
@@ -1151,7 +1160,7 @@ retry:
        write_points_nr = c->write_points_nr;
        have_cache      = false;
 
-       wp = writepoint_find(trans, write_point.v);
+       *wp_ret = wp = writepoint_find(trans, write_point.v);
 
        if (wp->data_type == BCH_DATA_user)
                ob_flags |= BUCKET_MAY_ALLOC_PARTIAL;
@@ -1208,7 +1217,7 @@ alloc_done:
 
        BUG_ON(!wp->sectors_free || wp->sectors_free == UINT_MAX);
 
-       return wp;
+       return 0;
 err:
        open_bucket_for_each(c, &wp->ptrs, ob, i)
                if (ptrs.nr < ARRAY_SIZE(ptrs.v))
@@ -1226,39 +1235,10 @@ err:
        if (bch2_err_matches(ret, BCH_ERR_open_buckets_empty) ||
            bch2_err_matches(ret, BCH_ERR_freelist_empty))
                return cl
-                       ? ERR_PTR(-EAGAIN)
-                       : ERR_PTR(-BCH_ERR_ENOSPC_bucket_alloc);
-
-       if (bch2_err_matches(ret, BCH_ERR_insufficient_devices))
-               return ERR_PTR(-EROFS);
-
-       return ERR_PTR(ret);
-}
-
-struct write_point *bch2_alloc_sectors_start(struct bch_fs *c,
-                               unsigned target,
-                               unsigned erasure_code,
-                               struct write_point_specifier write_point,
-                               struct bch_devs_list *devs_have,
-                               unsigned nr_replicas,
-                               unsigned nr_replicas_required,
-                               enum alloc_reserve reserve,
-                               unsigned flags,
-                               struct closure *cl)
-{
-       struct write_point *wp;
-
-       bch2_trans_do(c, NULL, NULL, 0,
-                     PTR_ERR_OR_ZERO(wp = bch2_alloc_sectors_start_trans(&trans, target,
-                                                       erasure_code,
-                                                       write_point,
-                                                       devs_have,
-                                                       nr_replicas,
-                                                       nr_replicas_required,
-                                                       reserve,
-                                                       flags, cl)));
-       return wp;
+                       ? -BCH_ERR_bucket_alloc_blocked
+                       : -BCH_ERR_ENOSPC_bucket_alloc;
 
+       return ret;
 }
 
 struct bch_extent_ptr bch2_ob_ptr(struct bch_fs *c, struct open_bucket *ob)
@@ -1275,34 +1255,11 @@ struct bch_extent_ptr bch2_ob_ptr(struct bch_fs *c, struct open_bucket *ob)
        };
 }
 
-/*
- * Append pointers to the space we just allocated to @k, and mark @sectors space
- * as allocated out of @ob
- */
 void bch2_alloc_sectors_append_ptrs(struct bch_fs *c, struct write_point *wp,
                                    struct bkey_i *k, unsigned sectors,
                                    bool cached)
-
 {
-       struct open_bucket *ob;
-       unsigned i;
-
-       BUG_ON(sectors > wp->sectors_free);
-       wp->sectors_free -= sectors;
-
-       open_bucket_for_each(c, &wp->ptrs, ob, i) {
-               struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
-               struct bch_extent_ptr ptr = bch2_ob_ptr(c, ob);
-
-               ptr.cached = cached ||
-                       (!ca->mi.durability &&
-                        wp->data_type == BCH_DATA_user);
-
-               bch2_bkey_append_ptr(k, ptr);
-
-               BUG_ON(sectors > ob->sectors_free);
-               ob->sectors_free -= sectors;
-       }
+       bch2_alloc_sectors_append_ptrs_inlined(c, wp, k, sectors, cached);
 }
 
 /*
@@ -1311,17 +1268,7 @@ void bch2_alloc_sectors_append_ptrs(struct bch_fs *c, struct write_point *wp,
  */
 void bch2_alloc_sectors_done(struct bch_fs *c, struct write_point *wp)
 {
-       struct open_buckets ptrs = { .nr = 0 }, keep = { .nr = 0 };
-       struct open_bucket *ob;
-       unsigned i;
-
-       open_bucket_for_each(c, &wp->ptrs, ob, i)
-               ob_push(c, !ob->sectors_free ? &ptrs : &keep, ob);
-       wp->ptrs = keep;
-
-       mutex_unlock(&wp->lock);
-
-       bch2_open_buckets_put(c, &ptrs);
+       bch2_alloc_sectors_done_inlined(c, wp);
 }
 
 static inline void writepoint_init(struct write_point *wp,
@@ -1329,6 +1276,10 @@ static inline void writepoint_init(struct write_point *wp,
 {
        mutex_init(&wp->lock);
        wp->data_type = type;
+
+       INIT_WORK(&wp->index_update_work, bch2_write_point_do_index_updates);
+       INIT_LIST_HEAD(&wp->writes);
+       spin_lock_init(&wp->writes_lock);
 }
 
 void bch2_fs_allocator_foreground_init(struct bch_fs *c)
@@ -1359,7 +1310,7 @@ void bch2_fs_allocator_foreground_init(struct bch_fs *c)
             wp < c->write_points + c->write_points_nr; wp++) {
                writepoint_init(wp, BCH_DATA_user);
 
-               wp->last_used   = sched_clock();
+               wp->last_used   = local_clock();
                wp->write_point = (unsigned long) wp;
                hlist_add_head_rcu(&wp->node,
                                   writepoint_hash(c, wp->write_point));