]> git.sesse.net Git - bcachefs-tools-debian/blobdiff - libbcachefs/alloc_foreground.c
Update bcachefs sources to 50d6a25d9c bcachefs: Erasure coding fixes
[bcachefs-tools-debian] / libbcachefs / alloc_foreground.c
index f2f9015dbb00ca144fe0eb2cf31d2f4e53f2b909..2bb107b8b0b9f2aa6bbd66915cd26f8bc12b0d48 100644 (file)
@@ -1,56 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
- * Primary bucket allocation code
- *
  * Copyright 2012 Google, Inc.
  *
- * Allocation in bcache is done in terms of buckets:
- *
- * Each bucket has associated an 8 bit gen; this gen corresponds to the gen in
- * btree pointers - they must match for the pointer to be considered valid.
- *
- * Thus (assuming a bucket has no dirty data or metadata in it) we can reuse a
- * bucket simply by incrementing its gen.
- *
- * The gens (along with the priorities; it's really the gens are important but
- * the code is named as if it's the priorities) are written in an arbitrary list
- * of buckets on disk, with a pointer to them in the journal header.
- *
- * When we invalidate a bucket, we have to write its new gen to disk and wait
- * for that write to complete before we use it - otherwise after a crash we
- * could have pointers that appeared to be good but pointed to data that had
- * been overwritten.
- *
- * Since the gens and priorities are all stored contiguously on disk, we can
- * batch this up: We fill up the free_inc list with freshly invalidated buckets,
- * call prio_write(), and when prio_write() finishes we pull buckets off the
- * free_inc list and optionally discard them.
- *
- * free_inc isn't the only freelist - if it was, we'd often have to sleep while
- * priorities and gens were being written before we could allocate. c->free is a
- * smaller freelist, and buckets on that list are always ready to be used.
- *
- * If we've got discards enabled, that happens when a bucket moves from the
- * free_inc list to the free list.
- *
- * It's important to ensure that gens don't wrap around - with respect to
- * either the oldest gen in the btree or the gen on disk. This is quite
- * difficult to do in practice, but we explicitly guard against it anyways - if
- * a bucket is in danger of wrapping around we simply skip invalidating it that
- * time around, and we garbage collect or rewrite the priorities sooner than we
- * would have otherwise.
+ * Foreground allocator code: allocate buckets from freelist, and allocate in
+ * sector granularity from writepoints.
  *
  * bch2_bucket_alloc() allocates a single bucket from a specific device.
  *
  * bch2_bucket_alloc_set() allocates one or more buckets from different devices
  * in a given filesystem.
- *
- * invalidate_buckets() drives all the processes described above. It's called
- * from bch2_bucket_alloc() and a few other places that need to make sure free
- * buckets are ready.
- *
- * invalidate_buckets_(lru|fifo)() find buckets that are available to be
- * invalidated, and then invalidate them and stick them on the free_inc list -
- * in either lru or fifo order.
  */
 
 #include "bcachefs.h"
 #include <linux/rcupdate.h>
 #include <trace/events/bcachefs.h>
 
-enum bucket_alloc_ret {
-       ALLOC_SUCCESS,
-       OPEN_BUCKETS_EMPTY,
-       FREELIST_EMPTY,         /* Allocator thread not keeping up */
-};
-
 /*
  * Open buckets represent a bucket that's currently being allocated from.  They
  * serve two purposes:
@@ -100,21 +52,22 @@ void __bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob)
                return;
        }
 
-       percpu_down_read_preempt_disable(&c->mark_lock);
+       percpu_down_read(&c->mark_lock);
        spin_lock(&ob->lock);
 
-       bch2_mark_alloc_bucket(c, ca, PTR_BUCKET_NR(ca, &ob->ptr),
-                              false, gc_pos_alloc(c, ob), 0);
+       bch2_mark_alloc_bucket(c, ca, PTR_BUCKET_NR(ca, &ob->ptr), false);
        ob->valid = false;
        ob->type = 0;
 
        spin_unlock(&ob->lock);
-       percpu_up_read_preempt_enable(&c->mark_lock);
+       percpu_up_read(&c->mark_lock);
 
        spin_lock(&c->freelist_lock);
        ob->freelist = c->open_buckets_freelist;
        c->open_buckets_freelist = ob - c->open_buckets;
+
        c->open_buckets_nr_free++;
+       ca->nr_open_buckets--;
        spin_unlock(&c->freelist_lock);
 
        closure_wake_up(&c->open_buckets_wait);
@@ -149,12 +102,13 @@ static struct open_bucket *bch2_open_bucket_alloc(struct bch_fs *c)
 }
 
 static void open_bucket_free_unused(struct bch_fs *c,
-                                   struct open_bucket *ob,
-                                   bool may_realloc)
+                                   struct write_point *wp,
+                                   struct open_bucket *ob)
 {
        struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
+       bool may_realloc = wp->type == BCH_DATA_user;
 
-       BUG_ON(ca->open_buckets_partial_nr >=
+       BUG_ON(ca->open_buckets_partial_nr >
               ARRAY_SIZE(ca->open_buckets_partial));
 
        if (ca->open_buckets_partial_nr <
@@ -196,8 +150,9 @@ long bch2_bucket_alloc_new_fs(struct bch_dev *ca)
        rcu_read_lock();
        buckets = bucket_array(ca);
 
-       for (b = ca->mi.first_bucket; b < ca->mi.nbuckets; b++)
-               if (is_available_bucket(buckets->b[b].mark))
+       for (b = buckets->first_bucket; b < buckets->nbuckets; b++)
+               if (is_available_bucket(buckets->b[b].mark) &&
+                   !buckets->b[b].mark.owned_by_allocator)
                        goto success;
        b = -1;
 success:
@@ -208,12 +163,13 @@ success:
 static inline unsigned open_buckets_reserved(enum alloc_reserve reserve)
 {
        switch (reserve) {
-       case RESERVE_ALLOC:
-               return 0;
        case RESERVE_BTREE:
-               return BTREE_NODE_OPEN_BUCKET_RESERVE;
+       case RESERVE_BTREE_MOVINGGC:
+               return 0;
+       case RESERVE_MOVINGGC:
+               return OPEN_BUCKETS_COUNT / 4;
        default:
-               return BTREE_NODE_OPEN_BUCKET_RESERVE * 2;
+               return OPEN_BUCKETS_COUNT / 2;
        }
 }
 
@@ -227,45 +183,48 @@ struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca,
                                      bool may_alloc_partial,
                                      struct closure *cl)
 {
-       struct bucket_array *buckets;
        struct open_bucket *ob;
-       long bucket = 0;
+       long b = 0;
 
        spin_lock(&c->freelist_lock);
 
-       if (may_alloc_partial &&
-           ca->open_buckets_partial_nr) {
-               ob = c->open_buckets +
-                       ca->open_buckets_partial[--ca->open_buckets_partial_nr];
-               ob->on_partial_list = false;
-               spin_unlock(&c->freelist_lock);
-               return ob;
+       if (may_alloc_partial) {
+               int i;
+
+               for (i = ca->open_buckets_partial_nr - 1; i >= 0; --i) {
+                       ob = c->open_buckets + ca->open_buckets_partial[i];
+
+                       if (reserve <= ob->alloc_reserve) {
+                               array_remove_item(ca->open_buckets_partial,
+                                                 ca->open_buckets_partial_nr,
+                                                 i);
+                               ob->on_partial_list = false;
+                               ob->alloc_reserve = reserve;
+                               spin_unlock(&c->freelist_lock);
+                               return ob;
+                       }
+               }
        }
 
        if (unlikely(c->open_buckets_nr_free <= open_buckets_reserved(reserve))) {
                if (cl)
                        closure_wait(&c->open_buckets_wait, cl);
+
+               if (!c->blocked_allocate_open_bucket)
+                       c->blocked_allocate_open_bucket = local_clock();
+
                spin_unlock(&c->freelist_lock);
                trace_open_bucket_alloc_fail(ca, reserve);
                return ERR_PTR(-OPEN_BUCKETS_EMPTY);
        }
 
-       if (likely(fifo_pop(&ca->free[RESERVE_NONE], bucket)))
+       if (likely(fifo_pop(&ca->free[RESERVE_NONE], b)))
                goto out;
 
        switch (reserve) {
-       case RESERVE_ALLOC:
-               if (fifo_pop(&ca->free[RESERVE_BTREE], bucket))
-                       goto out;
-               break;
-       case RESERVE_BTREE:
-               if (fifo_used(&ca->free[RESERVE_BTREE]) * 2 >=
-                   ca->free[RESERVE_BTREE].size &&
-                   fifo_pop(&ca->free[RESERVE_BTREE], bucket))
-                       goto out;
-               break;
+       case RESERVE_BTREE_MOVINGGC:
        case RESERVE_MOVINGGC:
-               if (fifo_pop(&ca->free[RESERVE_MOVINGGC], bucket))
+               if (fifo_pop(&ca->free[RESERVE_MOVINGGC], b))
                        goto out;
                break;
        default:
@@ -275,31 +234,47 @@ struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca,
        if (cl)
                closure_wait(&c->freelist_wait, cl);
 
+       if (!c->blocked_allocate)
+               c->blocked_allocate = local_clock();
+
        spin_unlock(&c->freelist_lock);
 
        trace_bucket_alloc_fail(ca, reserve);
        return ERR_PTR(-FREELIST_EMPTY);
 out:
-       verify_not_on_freelist(c, ca, bucket);
+       verify_not_on_freelist(c, ca, b);
 
        ob = bch2_open_bucket_alloc(c);
 
        spin_lock(&ob->lock);
-       buckets = bucket_array(ca);
 
        ob->valid       = true;
        ob->sectors_free = ca->mi.bucket_size;
+       ob->alloc_reserve = reserve;
        ob->ptr         = (struct bch_extent_ptr) {
                .type   = 1 << BCH_EXTENT_ENTRY_ptr,
-               .gen    = buckets->b[bucket].mark.gen,
-               .offset = bucket_to_sector(ca, bucket),
+               .gen    = bucket(ca, b)->mark.gen,
+               .offset = bucket_to_sector(ca, b),
                .dev    = ca->dev_idx,
        };
 
-       bucket_io_clock_reset(c, ca, bucket, READ);
-       bucket_io_clock_reset(c, ca, bucket, WRITE);
        spin_unlock(&ob->lock);
 
+       if (c->blocked_allocate_open_bucket) {
+               bch2_time_stats_update(
+                       &c->times[BCH_TIME_blocked_allocate_open_bucket],
+                       c->blocked_allocate_open_bucket);
+               c->blocked_allocate_open_bucket = 0;
+       }
+
+       if (c->blocked_allocate) {
+               bch2_time_stats_update(
+                       &c->times[BCH_TIME_blocked_allocate],
+                       c->blocked_allocate);
+               c->blocked_allocate = 0;
+       }
+
+       ca->nr_open_buckets++;
        spin_unlock(&c->freelist_lock);
 
        bch2_wake_allocator(ca);
@@ -322,21 +297,20 @@ struct dev_alloc_list bch2_dev_alloc_list(struct bch_fs *c,
                                          struct bch_devs_mask *devs)
 {
        struct dev_alloc_list ret = { .nr = 0 };
-       struct bch_dev *ca;
        unsigned i;
 
-       for_each_member_device_rcu(ca, c, i, devs)
+       for_each_set_bit(i, devs->d, BCH_SB_MEMBERS_MAX)
                ret.devs[ret.nr++] = i;
 
        bubble_sort(ret.devs, ret.nr, dev_stripe_cmp);
        return ret;
 }
 
-void bch2_dev_stripe_increment(struct bch_fs *c, struct bch_dev *ca,
+void bch2_dev_stripe_increment(struct bch_dev *ca,
                               struct dev_stripe_state *stripe)
 {
        u64 *v = stripe->next_alloc + ca->dev_idx;
-       u64 free_space = dev_buckets_free(c, ca);
+       u64 free_space = dev_buckets_available(ca);
        u64 free_space_inv = free_space
                ? div64_u64(1ULL << 48, free_space)
                : 1ULL << 48;
@@ -355,22 +329,41 @@ void bch2_dev_stripe_increment(struct bch_fs *c, struct bch_dev *ca,
 #define BUCKET_MAY_ALLOC_PARTIAL       (1 << 0)
 #define BUCKET_ALLOC_USE_DURABILITY    (1 << 1)
 
-static int bch2_bucket_alloc_set(struct bch_fs *c,
-                                struct open_buckets *ptrs,
-                                struct dev_stripe_state *stripe,
-                                struct bch_devs_mask *devs_may_alloc,
-                                unsigned nr_replicas,
-                                unsigned *nr_effective,
-                                bool *have_cache,
-                                enum alloc_reserve reserve,
-                                unsigned flags,
-                                struct closure *cl)
+static void add_new_bucket(struct bch_fs *c,
+                          struct open_buckets *ptrs,
+                          struct bch_devs_mask *devs_may_alloc,
+                          unsigned *nr_effective,
+                          bool *have_cache,
+                          unsigned flags,
+                          struct open_bucket *ob)
+{
+       unsigned durability =
+               bch_dev_bkey_exists(c, ob->ptr.dev)->mi.durability;
+
+       __clear_bit(ob->ptr.dev, devs_may_alloc->d);
+       *nr_effective   += (flags & BUCKET_ALLOC_USE_DURABILITY)
+               ? durability : 1;
+       *have_cache     |= !durability;
+
+       ob_push(c, ptrs, ob);
+}
+
+int bch2_bucket_alloc_set(struct bch_fs *c,
+                     struct open_buckets *ptrs,
+                     struct dev_stripe_state *stripe,
+                     struct bch_devs_mask *devs_may_alloc,
+                     unsigned nr_replicas,
+                     unsigned *nr_effective,
+                     bool *have_cache,
+                     enum alloc_reserve reserve,
+                     unsigned flags,
+                     struct closure *cl)
 {
        struct dev_alloc_list devs_sorted =
                bch2_dev_alloc_list(c, stripe, devs_may_alloc);
        struct bch_dev *ca;
-       bool alloc_failure = false;
-       unsigned i, durability;
+       int ret = -INSUFFICIENT_DEVICES;
+       unsigned i;
 
        BUG_ON(*nr_effective >= nr_replicas);
 
@@ -387,122 +380,44 @@ static int bch2_bucket_alloc_set(struct bch_fs *c,
                ob = bch2_bucket_alloc(c, ca, reserve,
                                flags & BUCKET_MAY_ALLOC_PARTIAL, cl);
                if (IS_ERR(ob)) {
-                       enum bucket_alloc_ret ret = -PTR_ERR(ob);
-
-                       WARN_ON(reserve == RESERVE_MOVINGGC &&
-                               ret != OPEN_BUCKETS_EMPTY);
+                       ret = PTR_ERR(ob);
 
                        if (cl)
-                               return -EAGAIN;
-                       if (ret == OPEN_BUCKETS_EMPTY)
-                               return -ENOSPC;
-                       alloc_failure = true;
+                               return ret;
                        continue;
                }
 
-               durability = (flags & BUCKET_ALLOC_USE_DURABILITY)
-                       ? ca->mi.durability : 1;
+               add_new_bucket(c, ptrs, devs_may_alloc,
+                              nr_effective, have_cache, flags, ob);
 
-               __clear_bit(ca->dev_idx, devs_may_alloc->d);
-               *nr_effective   += durability;
-               *have_cache     |= !durability;
-
-               ob_push(c, ptrs, ob);
-
-               bch2_dev_stripe_increment(c, ca, stripe);
+               bch2_dev_stripe_increment(ca, stripe);
 
                if (*nr_effective >= nr_replicas)
                        return 0;
        }
 
-       return alloc_failure ? -ENOSPC : -EROFS;
+       return ret;
 }
 
 /* Allocate from stripes: */
 
-/*
- * XXX: use a higher watermark for allocating open buckets here:
- */
-static int ec_stripe_alloc(struct bch_fs *c, struct ec_stripe_head *h)
-{
-       struct bch_devs_mask devs;
-       struct open_bucket *ob;
-       unsigned i, nr_have = 0, nr_data =
-               min_t(unsigned, h->nr_active_devs,
-                     EC_STRIPE_MAX) - h->redundancy;
-       bool have_cache = true;
-       int ret = 0;
-
-       BUG_ON(h->blocks.nr > nr_data);
-       BUG_ON(h->parity.nr > h->redundancy);
-
-       devs = h->devs;
-
-       open_bucket_for_each(c, &h->parity, ob, i)
-               __clear_bit(ob->ptr.dev, devs.d);
-       open_bucket_for_each(c, &h->blocks, ob, i)
-               __clear_bit(ob->ptr.dev, devs.d);
-
-       percpu_down_read_preempt_disable(&c->mark_lock);
-       rcu_read_lock();
-
-       if (h->parity.nr < h->redundancy) {
-               nr_have = h->parity.nr;
-
-               ret = bch2_bucket_alloc_set(c, &h->parity,
-                                           &h->parity_stripe,
-                                           &devs,
-                                           h->redundancy,
-                                           &nr_have,
-                                           &have_cache,
-                                           RESERVE_NONE,
-                                           0,
-                                           NULL);
-               if (ret)
-                       goto err;
-       }
-
-       if (h->blocks.nr < nr_data) {
-               nr_have = h->blocks.nr;
-
-               ret = bch2_bucket_alloc_set(c, &h->blocks,
-                                           &h->block_stripe,
-                                           &devs,
-                                           nr_data,
-                                           &nr_have,
-                                           &have_cache,
-                                           RESERVE_NONE,
-                                           0,
-                                           NULL);
-               if (ret)
-                       goto err;
-       }
-
-       rcu_read_unlock();
-       percpu_up_read_preempt_enable(&c->mark_lock);
-
-       return bch2_ec_stripe_new_alloc(c, h);
-err:
-       rcu_read_unlock();
-       percpu_up_read_preempt_enable(&c->mark_lock);
-       return -1;
-}
-
 /*
  * if we can't allocate a new stripe because there are already too many
  * partially filled stripes, force allocating from an existing stripe even when
  * it's to a device we don't want:
  */
 
-static void bucket_alloc_from_stripe(struct bch_fs *c,
-                                    struct open_buckets *ptrs,
-                                    struct write_point *wp,
-                                    struct bch_devs_mask *devs_may_alloc,
-                                    u16 target,
-                                    unsigned erasure_code,
-                                    unsigned nr_replicas,
-                                    unsigned *nr_effective,
-                                    bool *have_cache)
+static int bucket_alloc_from_stripe(struct bch_fs *c,
+                        struct open_buckets *ptrs,
+                        struct write_point *wp,
+                        struct bch_devs_mask *devs_may_alloc,
+                        u16 target,
+                        unsigned erasure_code,
+                        unsigned nr_replicas,
+                        unsigned *nr_effective,
+                        bool *have_cache,
+                        unsigned flags,
+                        struct closure *cl)
 {
        struct dev_alloc_list devs_sorted;
        struct ec_stripe_head *h;
@@ -511,30 +426,34 @@ static void bucket_alloc_from_stripe(struct bch_fs *c,
        unsigned i, ec_idx;
 
        if (!erasure_code)
-               return;
+               return 0;
 
        if (nr_replicas < 2)
-               return;
+               return 0;
 
        if (ec_open_bucket(c, ptrs))
-               return;
+               return 0;
 
-       h = bch2_ec_stripe_head_get(c, target, erasure_code, nr_replicas - 1);
+       h = bch2_ec_stripe_head_get(c, target, 0, nr_replicas - 1,
+                                   wp == &c->copygc_write_point,
+                                   cl);
+       if (IS_ERR(h))
+               return -PTR_ERR(h);
        if (!h)
-               return;
-
-       if (!h->s && ec_stripe_alloc(c, h))
-               goto out_put_head;
+               return 0;
 
-       rcu_read_lock();
        devs_sorted = bch2_dev_alloc_list(c, &wp->stripe, devs_may_alloc);
-       rcu_read_unlock();
 
        for (i = 0; i < devs_sorted.nr; i++)
-               open_bucket_for_each(c, &h->s->blocks, ob, ec_idx)
+               for (ec_idx = 0; ec_idx < h->s->nr_data; ec_idx++) {
+                       if (!h->s->blocks[ec_idx])
+                               continue;
+
+                       ob = c->open_buckets + h->s->blocks[ec_idx];
                        if (ob->ptr.dev == devs_sorted.devs[i] &&
                            !test_and_set_bit(ec_idx, h->s->blocks_allocated))
                                goto got_bucket;
+               }
        goto out_put_head;
 got_bucket:
        ca = bch_dev_bkey_exists(c, ob->ptr.dev);
@@ -542,14 +461,12 @@ got_bucket:
        ob->ec_idx      = ec_idx;
        ob->ec          = h->s;
 
-       __clear_bit(ob->ptr.dev, devs_may_alloc->d);
-       *nr_effective   += ca->mi.durability;
-       *have_cache     |= !ca->mi.durability;
-
-       ob_push(c, ptrs, ob);
+       add_new_bucket(c, ptrs, devs_may_alloc,
+                      nr_effective, have_cache, flags, ob);
        atomic_inc(&h->s->pin);
 out_put_head:
-       bch2_ec_stripe_head_put(h);
+       bch2_ec_stripe_head_put(c, h);
+       return 0;
 }
 
 /* Sector allocator */
@@ -561,6 +478,7 @@ static void get_buckets_from_writepoint(struct bch_fs *c,
                                        unsigned nr_replicas,
                                        unsigned *nr_effective,
                                        bool *have_cache,
+                                       unsigned flags,
                                        bool need_ec)
 {
        struct open_buckets ptrs_skip = { .nr = 0 };
@@ -573,13 +491,11 @@ static void get_buckets_from_writepoint(struct bch_fs *c,
                if (*nr_effective < nr_replicas &&
                    test_bit(ob->ptr.dev, devs_may_alloc->d) &&
                    (ca->mi.durability ||
-                    (wp->type == BCH_DATA_USER && !*have_cache)) &&
+                    (wp->type == BCH_DATA_user && !*have_cache)) &&
                    (ob->ec || !need_ec)) {
-                       __clear_bit(ob->ptr.dev, devs_may_alloc->d);
-                       *nr_effective   += ca->mi.durability;
-                       *have_cache     |= !ca->mi.durability;
-
-                       ob_push(c, ptrs, ob);
+                       add_new_bucket(c, ptrs, devs_may_alloc,
+                                      nr_effective, have_cache,
+                                      flags, ob);
                } else {
                        ob_push(c, &ptrs_skip, ob);
                }
@@ -588,25 +504,23 @@ static void get_buckets_from_writepoint(struct bch_fs *c,
 }
 
 static int open_bucket_add_buckets(struct bch_fs *c,
-                                  struct open_buckets *ptrs,
-                                  struct write_point *wp,
-                                  struct bch_devs_list *devs_have,
-                                  u16 target,
-                                  unsigned erasure_code,
-                                  unsigned nr_replicas,
-                                  unsigned *nr_effective,
-                                  bool *have_cache,
-                                  enum alloc_reserve reserve,
-                                  struct closure *_cl)
+                       struct open_buckets *ptrs,
+                       struct write_point *wp,
+                       struct bch_devs_list *devs_have,
+                       u16 target,
+                       unsigned erasure_code,
+                       unsigned nr_replicas,
+                       unsigned *nr_effective,
+                       bool *have_cache,
+                       enum alloc_reserve reserve,
+                       unsigned flags,
+                       struct closure *_cl)
 {
        struct bch_devs_mask devs;
        struct open_bucket *ob;
        struct closure *cl = NULL;
-       unsigned i, flags = BUCKET_ALLOC_USE_DURABILITY;
        int ret;
-
-       if (wp->type == BCH_DATA_USER)
-               flags |= BUCKET_MAY_ALLOC_PARTIAL;
+       unsigned i;
 
        rcu_read_lock();
        devs = target_rw_devs(c, wp->type, target);
@@ -620,27 +534,34 @@ static int open_bucket_add_buckets(struct bch_fs *c,
                __clear_bit(ob->ptr.dev, devs.d);
 
        if (erasure_code) {
-               get_buckets_from_writepoint(c, ptrs, wp, &devs,
-                                           nr_replicas, nr_effective,
-                                           have_cache, true);
-               if (*nr_effective >= nr_replicas)
-                       return 0;
+               if (!ec_open_bucket(c, ptrs)) {
+                       get_buckets_from_writepoint(c, ptrs, wp, &devs,
+                                                   nr_replicas, nr_effective,
+                                                   have_cache, flags, true);
+                       if (*nr_effective >= nr_replicas)
+                               return 0;
+               }
 
-               bucket_alloc_from_stripe(c, ptrs, wp, &devs,
-                                        target, erasure_code,
-                                        nr_replicas, nr_effective,
-                                        have_cache);
-               if (*nr_effective >= nr_replicas)
-                       return 0;
+               if (!ec_open_bucket(c, ptrs)) {
+                       ret = bucket_alloc_from_stripe(c, ptrs, wp, &devs,
+                                                target, erasure_code,
+                                                nr_replicas, nr_effective,
+                                                have_cache, flags, _cl);
+                       if (ret == -FREELIST_EMPTY ||
+                           ret == -OPEN_BUCKETS_EMPTY)
+                               return ret;
+                       if (*nr_effective >= nr_replicas)
+                               return 0;
+               }
        }
 
        get_buckets_from_writepoint(c, ptrs, wp, &devs,
                                    nr_replicas, nr_effective,
-                                   have_cache, false);
+                                   have_cache, flags, false);
        if (*nr_effective >= nr_replicas)
                return 0;
 
-       percpu_down_read_preempt_disable(&c->mark_lock);
+       percpu_down_read(&c->mark_lock);
        rcu_read_lock();
 
 retry_blocking:
@@ -651,20 +572,19 @@ retry_blocking:
        ret = bch2_bucket_alloc_set(c, ptrs, &wp->stripe, &devs,
                                nr_replicas, nr_effective, have_cache,
                                reserve, flags, cl);
-       if (ret && ret != -EROFS && !cl && _cl) {
+       if (ret && ret != -INSUFFICIENT_DEVICES && !cl && _cl) {
                cl = _cl;
                goto retry_blocking;
        }
 
        rcu_read_unlock();
-       percpu_up_read_preempt_enable(&c->mark_lock);
+       percpu_up_read(&c->mark_lock);
 
        return ret;
 }
 
 void bch2_open_buckets_stop_dev(struct bch_fs *c, struct bch_dev *ca,
-                               struct open_buckets *obs,
-                               enum bch_data_type data_type)
+                               struct open_buckets *obs)
 {
        struct open_buckets ptrs = { .nr = 0 };
        struct open_bucket *ob, *ob2;
@@ -675,10 +595,13 @@ void bch2_open_buckets_stop_dev(struct bch_fs *c, struct bch_dev *ca,
 
                if (!drop && ob->ec) {
                        mutex_lock(&ob->ec->lock);
-                       open_bucket_for_each(c, &ob->ec->blocks, ob2, j)
-                               drop |= ob2->ptr.dev == ca->dev_idx;
-                       open_bucket_for_each(c, &ob->ec->parity, ob2, j)
+                       for (j = 0; j < ob->ec->new_stripe.key.v.nr_blocks; j++) {
+                               if (!ob->ec->blocks[j])
+                                       continue;
+
+                               ob2 = c->open_buckets + ob->ec->blocks[j];
                                drop |= ob2->ptr.dev == ca->dev_idx;
+                       }
                        mutex_unlock(&ob->ec->lock);
                }
 
@@ -695,7 +618,7 @@ void bch2_writepoint_stop(struct bch_fs *c, struct bch_dev *ca,
                          struct write_point *wp)
 {
        mutex_lock(&wp->lock);
-       bch2_open_buckets_stop_dev(c, ca, &wp->ptrs, wp->type);
+       bch2_open_buckets_stop_dev(c, ca, &wp->ptrs);
        mutex_unlock(&wp->lock);
 }
 
@@ -713,17 +636,20 @@ static struct write_point *__writepoint_find(struct hlist_head *head,
 {
        struct write_point *wp;
 
+       rcu_read_lock();
        hlist_for_each_entry_rcu(wp, head, node)
                if (wp->write_point == write_point)
-                       return wp;
-
-       return NULL;
+                       goto out;
+       wp = NULL;
+out:
+       rcu_read_unlock();
+       return wp;
 }
 
 static inline bool too_many_writepoints(struct bch_fs *c, unsigned factor)
 {
        u64 stranded    = c->write_points_nr * c->bucket_size_max;
-       u64 free        = bch2_fs_sectors_free(c);
+       u64 free        = bch2_fs_usage_read_short(c).free;
 
        return stranded * factor > free;
 }
@@ -841,8 +767,13 @@ struct write_point *bch2_alloc_sectors_start(struct bch_fs *c,
        struct open_bucket *ob;
        struct open_buckets ptrs;
        unsigned nr_effective, write_points_nr;
+       unsigned ob_flags = 0;
        bool have_cache;
-       int ret, i;
+       int ret;
+       int i;
+
+       if (!(flags & BCH_WRITE_ONLY_SPECIFIED_DEVS))
+               ob_flags |= BUCKET_ALLOC_USE_DURABILITY;
 
        BUG_ON(!nr_replicas || !nr_replicas_required);
 retry:
@@ -853,27 +784,33 @@ retry:
 
        wp = writepoint_find(c, write_point.v);
 
+       if (wp->type == BCH_DATA_user)
+               ob_flags |= BUCKET_MAY_ALLOC_PARTIAL;
+
        /* metadata may not allocate on cache devices: */
-       if (wp->type != BCH_DATA_USER)
+       if (wp->type != BCH_DATA_user)
                have_cache = true;
 
        if (!target || (flags & BCH_WRITE_ONLY_SPECIFIED_DEVS)) {
                ret = open_bucket_add_buckets(c, &ptrs, wp, devs_have,
                                              target, erasure_code,
                                              nr_replicas, &nr_effective,
-                                             &have_cache, reserve, cl);
+                                             &have_cache, reserve,
+                                             ob_flags, cl);
        } else {
                ret = open_bucket_add_buckets(c, &ptrs, wp, devs_have,
                                              target, erasure_code,
                                              nr_replicas, &nr_effective,
-                                             &have_cache, reserve, NULL);
+                                             &have_cache, reserve,
+                                             ob_flags, NULL);
                if (!ret)
                        goto alloc_done;
 
                ret = open_bucket_add_buckets(c, &ptrs, wp, devs_have,
                                              0, erasure_code,
                                              nr_replicas, &nr_effective,
-                                             &have_cache, reserve, cl);
+                                             &have_cache, reserve,
+                                             ob_flags, cl);
        }
 alloc_done:
        BUG_ON(!ret && nr_effective < nr_replicas);
@@ -881,7 +818,7 @@ alloc_done:
        if (erasure_code && !ec_open_bucket(c, &ptrs))
                pr_debug("failed to get ec bucket: ret %u", ret);
 
-       if (ret == -EROFS &&
+       if (ret == -INSUFFICIENT_DEVICES &&
            nr_effective >= nr_replicas_required)
                ret = 0;
 
@@ -890,7 +827,7 @@ alloc_done:
 
        /* Free buckets we didn't use: */
        open_bucket_for_each(c, &wp->ptrs, ob, i)
-               open_bucket_free_unused(c, ob, wp->type == BCH_DATA_USER);
+               open_bucket_free_unused(c, wp, ob);
 
        wp->ptrs = ptrs;
 
@@ -909,17 +846,24 @@ err:
                if (ptrs.nr < ARRAY_SIZE(ptrs.v))
                        ob_push(c, &ptrs, ob);
                else
-                       open_bucket_free_unused(c, ob,
-                                       wp->type == BCH_DATA_USER);
+                       open_bucket_free_unused(c, wp, ob);
        wp->ptrs = ptrs;
 
        mutex_unlock(&wp->lock);
 
-       if (ret == -ENOSPC &&
+       if (ret == -FREELIST_EMPTY &&
            try_decrease_writepoints(c, write_points_nr))
                goto retry;
 
-       return ERR_PTR(ret);
+       switch (ret) {
+       case -OPEN_BUCKETS_EMPTY:
+       case -FREELIST_EMPTY:
+               return cl ? ERR_PTR(-EAGAIN) : ERR_PTR(-ENOSPC);
+       case -INSUFFICIENT_DEVICES:
+               return ERR_PTR(-EROFS);
+       default:
+               BUG();
+       }
 }
 
 /*
@@ -941,7 +885,7 @@ void bch2_alloc_sectors_append_ptrs(struct bch_fs *c, struct write_point *wp,
                struct bch_extent_ptr tmp = ob->ptr;
 
                tmp.cached = !ca->mi.durability &&
-                       wp->type == BCH_DATA_USER;
+                       wp->type == BCH_DATA_user;
 
                tmp.offset += ca->mi.bucket_size - ob->sectors_free;
                bch2_bkey_append_ptr(k, tmp);
@@ -970,6 +914,13 @@ void bch2_alloc_sectors_done(struct bch_fs *c, struct write_point *wp)
        bch2_open_buckets_put(c, &ptrs);
 }
 
+static inline void writepoint_init(struct write_point *wp,
+                                  enum bch_data_type type)
+{
+       mutex_init(&wp->lock);
+       wp->type = type;
+}
+
 void bch2_fs_allocator_foreground_init(struct bch_fs *c)
 {
        struct open_bucket *ob;
@@ -990,12 +941,13 @@ void bch2_fs_allocator_foreground_init(struct bch_fs *c)
                c->open_buckets_freelist = ob - c->open_buckets;
        }
 
-       writepoint_init(&c->btree_write_point, BCH_DATA_BTREE);
-       writepoint_init(&c->rebalance_write_point, BCH_DATA_USER);
+       writepoint_init(&c->btree_write_point,          BCH_DATA_btree);
+       writepoint_init(&c->rebalance_write_point,      BCH_DATA_user);
+       writepoint_init(&c->copygc_write_point,         BCH_DATA_user);
 
        for (wp = c->write_points;
             wp < c->write_points + c->write_points_nr; wp++) {
-               writepoint_init(wp, BCH_DATA_USER);
+               writepoint_init(wp, BCH_DATA_user);
 
                wp->last_used   = sched_clock();
                wp->write_point = (unsigned long) wp;