2 * Primary bucket allocation code
4 * Copyright 2012 Google, Inc.
6 * Allocation in bcache is done in terms of buckets:
8 * Each bucket has associated an 8 bit gen; this gen corresponds to the gen in
9 * btree pointers - they must match for the pointer to be considered valid.
11 * Thus (assuming a bucket has no dirty data or metadata in it) we can reuse a
12 * bucket simply by incrementing its gen.
14 * The gens (along with the priorities; it's really the gens are important but
15 * the code is named as if it's the priorities) are written in an arbitrary list
16 * of buckets on disk, with a pointer to them in the journal header.
18 * When we invalidate a bucket, we have to write its new gen to disk and wait
19 * for that write to complete before we use it - otherwise after a crash we
20 * could have pointers that appeared to be good but pointed to data that had
23 * Since the gens and priorities are all stored contiguously on disk, we can
24 * batch this up: We fill up the free_inc list with freshly invalidated buckets,
25 * call prio_write(), and when prio_write() finishes we pull buckets off the
26 * free_inc list and optionally discard them.
28 * free_inc isn't the only freelist - if it was, we'd often have to sleep while
29 * priorities and gens were being written before we could allocate. c->free is a
30 * smaller freelist, and buckets on that list are always ready to be used.
32 * If we've got discards enabled, that happens when a bucket moves from the
33 * free_inc list to the free list.
35 * It's important to ensure that gens don't wrap around - with respect to
36 * either the oldest gen in the btree or the gen on disk. This is quite
37 * difficult to do in practice, but we explicitly guard against it anyways - if
38 * a bucket is in danger of wrapping around we simply skip invalidating it that
39 * time around, and we garbage collect or rewrite the priorities sooner than we
40 * would have otherwise.
42 * bch2_bucket_alloc() allocates a single bucket from a specific device.
44 * bch2_bucket_alloc_set() allocates one or more buckets from different devices
45 * in a given filesystem.
47 * invalidate_buckets() drives all the processes described above. It's called
48 * from bch2_bucket_alloc() and a few other places that need to make sure free
51 * invalidate_buckets_(lru|fifo)() find buckets that are available to be
52 * invalidated, and then invalidate them and stick them on the free_inc list -
53 * in either lru or fifo order.
57 #include "alloc_background.h"
58 #include "alloc_foreground.h"
63 #include "disk_groups.h"
67 #include <linux/math64.h>
68 #include <linux/rculist.h>
69 #include <linux/rcupdate.h>
70 #include <trace/events/bcachefs.h>
72 enum bucket_alloc_ret {
75 FREELIST_EMPTY, /* Allocator thread not keeping up */
79 * Open buckets represent a bucket that's currently being allocated from. They
82 * - They track buckets that have been partially allocated, allowing for
83 * sub-bucket sized allocations - they're used by the sector allocator below
85 * - They provide a reference to the buckets they own that mark and sweep GC
86 * can find, until the new allocation has a pointer to it inserted into the
89 * When allocating some space with the sector allocator, the allocation comes
90 * with a reference to an open bucket - the caller is required to put that
91 * reference _after_ doing the index update that makes its allocation reachable.
94 void __bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob)
96 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
99 bch2_ec_bucket_written(c, ob);
103 percpu_down_read_preempt_disable(&c->mark_lock);
104 spin_lock(&ob->lock);
106 bch2_mark_alloc_bucket(c, ca, PTR_BUCKET_NR(ca, &ob->ptr),
107 false, gc_pos_alloc(c, ob), 0);
111 spin_unlock(&ob->lock);
112 percpu_up_read_preempt_enable(&c->mark_lock);
114 spin_lock(&c->freelist_lock);
115 ob->freelist = c->open_buckets_freelist;
116 c->open_buckets_freelist = ob - c->open_buckets;
117 c->open_buckets_nr_free++;
118 spin_unlock(&c->freelist_lock);
120 closure_wake_up(&c->open_buckets_wait);
123 void bch2_open_bucket_write_error(struct bch_fs *c,
124 struct open_buckets *obs,
127 struct open_bucket *ob;
130 open_bucket_for_each(c, obs, ob, i)
131 if (ob->ptr.dev == dev &&
133 bch2_ec_bucket_cancel(c, ob);
136 static struct open_bucket *bch2_open_bucket_alloc(struct bch_fs *c)
138 struct open_bucket *ob;
140 BUG_ON(!c->open_buckets_freelist || !c->open_buckets_nr_free);
142 ob = c->open_buckets + c->open_buckets_freelist;
143 c->open_buckets_freelist = ob->freelist;
144 atomic_set(&ob->pin, 1);
147 c->open_buckets_nr_free--;
151 static void open_bucket_free_unused(struct bch_fs *c,
152 struct open_bucket *ob,
155 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
157 BUG_ON(ca->open_buckets_partial_nr >=
158 ARRAY_SIZE(ca->open_buckets_partial));
160 if (ca->open_buckets_partial_nr <
161 ARRAY_SIZE(ca->open_buckets_partial) &&
163 spin_lock(&c->freelist_lock);
164 ob->on_partial_list = true;
165 ca->open_buckets_partial[ca->open_buckets_partial_nr++] =
166 ob - c->open_buckets;
167 spin_unlock(&c->freelist_lock);
169 closure_wake_up(&c->open_buckets_wait);
170 closure_wake_up(&c->freelist_wait);
172 bch2_open_bucket_put(c, ob);
176 static void verify_not_stale(struct bch_fs *c, const struct open_buckets *obs)
178 #ifdef CONFIG_BCACHEFS_DEBUG
179 struct open_bucket *ob;
182 open_bucket_for_each(c, obs, ob, i) {
183 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
185 BUG_ON(ptr_stale(ca, &ob->ptr));
190 /* _only_ for allocating the journal on a new device: */
191 long bch2_bucket_alloc_new_fs(struct bch_dev *ca)
193 struct bucket_array *buckets;
197 buckets = bucket_array(ca);
199 for (b = ca->mi.first_bucket; b < ca->mi.nbuckets; b++)
200 if (is_available_bucket(buckets->b[b].mark))
208 static inline unsigned open_buckets_reserved(enum alloc_reserve reserve)
214 return BTREE_NODE_OPEN_BUCKET_RESERVE;
216 return BTREE_NODE_OPEN_BUCKET_RESERVE * 2;
221 * bch_bucket_alloc - allocate a single bucket from a specific device
223 * Returns index of bucket on success, 0 on failure
225 struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca,
226 enum alloc_reserve reserve,
227 bool may_alloc_partial,
230 struct bucket_array *buckets;
231 struct open_bucket *ob;
234 spin_lock(&c->freelist_lock);
236 if (may_alloc_partial &&
237 ca->open_buckets_partial_nr) {
238 ob = c->open_buckets +
239 ca->open_buckets_partial[--ca->open_buckets_partial_nr];
240 ob->on_partial_list = false;
241 spin_unlock(&c->freelist_lock);
245 if (unlikely(c->open_buckets_nr_free <= open_buckets_reserved(reserve))) {
247 closure_wait(&c->open_buckets_wait, cl);
249 if (!c->blocked_allocate_open_bucket)
250 c->blocked_allocate_open_bucket = local_clock();
252 spin_unlock(&c->freelist_lock);
253 trace_open_bucket_alloc_fail(ca, reserve);
254 return ERR_PTR(-OPEN_BUCKETS_EMPTY);
257 if (likely(fifo_pop(&ca->free[RESERVE_NONE], bucket)))
262 if (fifo_pop(&ca->free[RESERVE_BTREE], bucket))
266 if (fifo_used(&ca->free[RESERVE_BTREE]) * 2 >=
267 ca->free[RESERVE_BTREE].size &&
268 fifo_pop(&ca->free[RESERVE_BTREE], bucket))
271 case RESERVE_MOVINGGC:
272 if (fifo_pop(&ca->free[RESERVE_MOVINGGC], bucket))
280 closure_wait(&c->freelist_wait, cl);
282 if (!c->blocked_allocate)
283 c->blocked_allocate = local_clock();
285 spin_unlock(&c->freelist_lock);
287 trace_bucket_alloc_fail(ca, reserve);
288 return ERR_PTR(-FREELIST_EMPTY);
290 verify_not_on_freelist(c, ca, bucket);
292 ob = bch2_open_bucket_alloc(c);
294 spin_lock(&ob->lock);
295 buckets = bucket_array(ca);
298 ob->sectors_free = ca->mi.bucket_size;
299 ob->ptr = (struct bch_extent_ptr) {
300 .type = 1 << BCH_EXTENT_ENTRY_ptr,
301 .gen = buckets->b[bucket].mark.gen,
302 .offset = bucket_to_sector(ca, bucket),
306 bucket_io_clock_reset(c, ca, bucket, READ);
307 bucket_io_clock_reset(c, ca, bucket, WRITE);
308 spin_unlock(&ob->lock);
310 if (c->blocked_allocate_open_bucket) {
311 bch2_time_stats_update(
312 &c->times[BCH_TIME_blocked_allocate_open_bucket],
313 c->blocked_allocate_open_bucket);
314 c->blocked_allocate_open_bucket = 0;
317 if (c->blocked_allocate) {
318 bch2_time_stats_update(
319 &c->times[BCH_TIME_blocked_allocate],
320 c->blocked_allocate);
321 c->blocked_allocate = 0;
324 spin_unlock(&c->freelist_lock);
326 bch2_wake_allocator(ca);
328 trace_bucket_alloc(ca, reserve);
332 static int __dev_stripe_cmp(struct dev_stripe_state *stripe,
333 unsigned l, unsigned r)
335 return ((stripe->next_alloc[l] > stripe->next_alloc[r]) -
336 (stripe->next_alloc[l] < stripe->next_alloc[r]));
339 #define dev_stripe_cmp(l, r) __dev_stripe_cmp(stripe, l, r)
341 struct dev_alloc_list bch2_dev_alloc_list(struct bch_fs *c,
342 struct dev_stripe_state *stripe,
343 struct bch_devs_mask *devs)
345 struct dev_alloc_list ret = { .nr = 0 };
349 for_each_member_device_rcu(ca, c, i, devs)
350 ret.devs[ret.nr++] = i;
352 bubble_sort(ret.devs, ret.nr, dev_stripe_cmp);
356 void bch2_dev_stripe_increment(struct bch_fs *c, struct bch_dev *ca,
357 struct dev_stripe_state *stripe)
359 u64 *v = stripe->next_alloc + ca->dev_idx;
360 u64 free_space = dev_buckets_free(c, ca);
361 u64 free_space_inv = free_space
362 ? div64_u64(1ULL << 48, free_space)
366 if (*v + free_space_inv >= *v)
367 *v += free_space_inv;
371 for (v = stripe->next_alloc;
372 v < stripe->next_alloc + ARRAY_SIZE(stripe->next_alloc); v++)
373 *v = *v < scale ? 0 : *v - scale;
376 #define BUCKET_MAY_ALLOC_PARTIAL (1 << 0)
377 #define BUCKET_ALLOC_USE_DURABILITY (1 << 1)
379 static int bch2_bucket_alloc_set(struct bch_fs *c,
380 struct open_buckets *ptrs,
381 struct dev_stripe_state *stripe,
382 struct bch_devs_mask *devs_may_alloc,
383 unsigned nr_replicas,
384 unsigned *nr_effective,
386 enum alloc_reserve reserve,
390 struct dev_alloc_list devs_sorted =
391 bch2_dev_alloc_list(c, stripe, devs_may_alloc);
393 bool alloc_failure = false;
394 unsigned i, durability;
396 BUG_ON(*nr_effective >= nr_replicas);
398 for (i = 0; i < devs_sorted.nr; i++) {
399 struct open_bucket *ob;
401 ca = rcu_dereference(c->devs[devs_sorted.devs[i]]);
405 if (!ca->mi.durability && *have_cache)
408 ob = bch2_bucket_alloc(c, ca, reserve,
409 flags & BUCKET_MAY_ALLOC_PARTIAL, cl);
411 enum bucket_alloc_ret ret = -PTR_ERR(ob);
413 WARN_ON(reserve == RESERVE_MOVINGGC &&
414 ret != OPEN_BUCKETS_EMPTY);
418 if (ret == OPEN_BUCKETS_EMPTY)
420 alloc_failure = true;
424 durability = (flags & BUCKET_ALLOC_USE_DURABILITY)
425 ? ca->mi.durability : 1;
427 __clear_bit(ca->dev_idx, devs_may_alloc->d);
428 *nr_effective += durability;
429 *have_cache |= !durability;
431 ob_push(c, ptrs, ob);
433 bch2_dev_stripe_increment(c, ca, stripe);
435 if (*nr_effective >= nr_replicas)
439 return alloc_failure ? -ENOSPC : -EROFS;
442 /* Allocate from stripes: */
445 * XXX: use a higher watermark for allocating open buckets here:
447 static int ec_stripe_alloc(struct bch_fs *c, struct ec_stripe_head *h)
449 struct bch_devs_mask devs;
450 struct open_bucket *ob;
451 unsigned i, nr_have = 0, nr_data =
452 min_t(unsigned, h->nr_active_devs,
453 EC_STRIPE_MAX) - h->redundancy;
454 bool have_cache = true;
457 BUG_ON(h->blocks.nr > nr_data);
458 BUG_ON(h->parity.nr > h->redundancy);
462 open_bucket_for_each(c, &h->parity, ob, i)
463 __clear_bit(ob->ptr.dev, devs.d);
464 open_bucket_for_each(c, &h->blocks, ob, i)
465 __clear_bit(ob->ptr.dev, devs.d);
467 percpu_down_read_preempt_disable(&c->mark_lock);
470 if (h->parity.nr < h->redundancy) {
471 nr_have = h->parity.nr;
473 ret = bch2_bucket_alloc_set(c, &h->parity,
486 if (h->blocks.nr < nr_data) {
487 nr_have = h->blocks.nr;
489 ret = bch2_bucket_alloc_set(c, &h->blocks,
503 percpu_up_read_preempt_enable(&c->mark_lock);
505 return bch2_ec_stripe_new_alloc(c, h);
508 percpu_up_read_preempt_enable(&c->mark_lock);
513 * if we can't allocate a new stripe because there are already too many
514 * partially filled stripes, force allocating from an existing stripe even when
515 * it's to a device we don't want:
518 static void bucket_alloc_from_stripe(struct bch_fs *c,
519 struct open_buckets *ptrs,
520 struct write_point *wp,
521 struct bch_devs_mask *devs_may_alloc,
523 unsigned erasure_code,
524 unsigned nr_replicas,
525 unsigned *nr_effective,
528 struct dev_alloc_list devs_sorted;
529 struct ec_stripe_head *h;
530 struct open_bucket *ob;
540 if (ec_open_bucket(c, ptrs))
543 h = bch2_ec_stripe_head_get(c, target, erasure_code, nr_replicas - 1);
547 if (!h->s && ec_stripe_alloc(c, h))
551 devs_sorted = bch2_dev_alloc_list(c, &wp->stripe, devs_may_alloc);
554 for (i = 0; i < devs_sorted.nr; i++)
555 open_bucket_for_each(c, &h->s->blocks, ob, ec_idx)
556 if (ob->ptr.dev == devs_sorted.devs[i] &&
557 !test_and_set_bit(ec_idx, h->s->blocks_allocated))
561 ca = bch_dev_bkey_exists(c, ob->ptr.dev);
566 __clear_bit(ob->ptr.dev, devs_may_alloc->d);
567 *nr_effective += ca->mi.durability;
568 *have_cache |= !ca->mi.durability;
570 ob_push(c, ptrs, ob);
571 atomic_inc(&h->s->pin);
573 bch2_ec_stripe_head_put(h);
576 /* Sector allocator */
578 static void get_buckets_from_writepoint(struct bch_fs *c,
579 struct open_buckets *ptrs,
580 struct write_point *wp,
581 struct bch_devs_mask *devs_may_alloc,
582 unsigned nr_replicas,
583 unsigned *nr_effective,
587 struct open_buckets ptrs_skip = { .nr = 0 };
588 struct open_bucket *ob;
591 open_bucket_for_each(c, &wp->ptrs, ob, i) {
592 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
594 if (*nr_effective < nr_replicas &&
595 test_bit(ob->ptr.dev, devs_may_alloc->d) &&
596 (ca->mi.durability ||
597 (wp->type == BCH_DATA_USER && !*have_cache)) &&
598 (ob->ec || !need_ec)) {
599 __clear_bit(ob->ptr.dev, devs_may_alloc->d);
600 *nr_effective += ca->mi.durability;
601 *have_cache |= !ca->mi.durability;
603 ob_push(c, ptrs, ob);
605 ob_push(c, &ptrs_skip, ob);
608 wp->ptrs = ptrs_skip;
611 static int open_bucket_add_buckets(struct bch_fs *c,
612 struct open_buckets *ptrs,
613 struct write_point *wp,
614 struct bch_devs_list *devs_have,
616 unsigned erasure_code,
617 unsigned nr_replicas,
618 unsigned *nr_effective,
620 enum alloc_reserve reserve,
623 struct bch_devs_mask devs;
624 struct open_bucket *ob;
625 struct closure *cl = NULL;
626 unsigned i, flags = BUCKET_ALLOC_USE_DURABILITY;
629 if (wp->type == BCH_DATA_USER)
630 flags |= BUCKET_MAY_ALLOC_PARTIAL;
633 devs = target_rw_devs(c, wp->type, target);
636 /* Don't allocate from devices we already have pointers to: */
637 for (i = 0; i < devs_have->nr; i++)
638 __clear_bit(devs_have->devs[i], devs.d);
640 open_bucket_for_each(c, ptrs, ob, i)
641 __clear_bit(ob->ptr.dev, devs.d);
644 get_buckets_from_writepoint(c, ptrs, wp, &devs,
645 nr_replicas, nr_effective,
647 if (*nr_effective >= nr_replicas)
650 bucket_alloc_from_stripe(c, ptrs, wp, &devs,
651 target, erasure_code,
652 nr_replicas, nr_effective,
654 if (*nr_effective >= nr_replicas)
658 get_buckets_from_writepoint(c, ptrs, wp, &devs,
659 nr_replicas, nr_effective,
661 if (*nr_effective >= nr_replicas)
664 percpu_down_read_preempt_disable(&c->mark_lock);
669 * Try nonblocking first, so that if one device is full we'll try from
672 ret = bch2_bucket_alloc_set(c, ptrs, &wp->stripe, &devs,
673 nr_replicas, nr_effective, have_cache,
675 if (ret && ret != -EROFS && !cl && _cl) {
681 percpu_up_read_preempt_enable(&c->mark_lock);
686 void bch2_open_buckets_stop_dev(struct bch_fs *c, struct bch_dev *ca,
687 struct open_buckets *obs,
688 enum bch_data_type data_type)
690 struct open_buckets ptrs = { .nr = 0 };
691 struct open_bucket *ob, *ob2;
694 open_bucket_for_each(c, obs, ob, i) {
695 bool drop = !ca || ob->ptr.dev == ca->dev_idx;
697 if (!drop && ob->ec) {
698 mutex_lock(&ob->ec->lock);
699 open_bucket_for_each(c, &ob->ec->blocks, ob2, j)
700 drop |= ob2->ptr.dev == ca->dev_idx;
701 open_bucket_for_each(c, &ob->ec->parity, ob2, j)
702 drop |= ob2->ptr.dev == ca->dev_idx;
703 mutex_unlock(&ob->ec->lock);
707 bch2_open_bucket_put(c, ob);
709 ob_push(c, &ptrs, ob);
715 void bch2_writepoint_stop(struct bch_fs *c, struct bch_dev *ca,
716 struct write_point *wp)
718 mutex_lock(&wp->lock);
719 bch2_open_buckets_stop_dev(c, ca, &wp->ptrs, wp->type);
720 mutex_unlock(&wp->lock);
723 static inline struct hlist_head *writepoint_hash(struct bch_fs *c,
724 unsigned long write_point)
727 hash_long(write_point, ilog2(ARRAY_SIZE(c->write_points_hash)));
729 return &c->write_points_hash[hash];
732 static struct write_point *__writepoint_find(struct hlist_head *head,
733 unsigned long write_point)
735 struct write_point *wp;
737 hlist_for_each_entry_rcu(wp, head, node)
738 if (wp->write_point == write_point)
744 static inline bool too_many_writepoints(struct bch_fs *c, unsigned factor)
746 u64 stranded = c->write_points_nr * c->bucket_size_max;
747 u64 free = bch2_fs_usage_read_short(c).free;
749 return stranded * factor > free;
752 static bool try_increase_writepoints(struct bch_fs *c)
754 struct write_point *wp;
756 if (c->write_points_nr == ARRAY_SIZE(c->write_points) ||
757 too_many_writepoints(c, 32))
760 wp = c->write_points + c->write_points_nr++;
761 hlist_add_head_rcu(&wp->node, writepoint_hash(c, wp->write_point));
765 static bool try_decrease_writepoints(struct bch_fs *c,
768 struct write_point *wp;
770 mutex_lock(&c->write_points_hash_lock);
771 if (c->write_points_nr < old_nr) {
772 mutex_unlock(&c->write_points_hash_lock);
776 if (c->write_points_nr == 1 ||
777 !too_many_writepoints(c, 8)) {
778 mutex_unlock(&c->write_points_hash_lock);
782 wp = c->write_points + --c->write_points_nr;
784 hlist_del_rcu(&wp->node);
785 mutex_unlock(&c->write_points_hash_lock);
787 bch2_writepoint_stop(c, NULL, wp);
791 static struct write_point *writepoint_find(struct bch_fs *c,
792 unsigned long write_point)
794 struct write_point *wp, *oldest;
795 struct hlist_head *head;
797 if (!(write_point & 1UL)) {
798 wp = (struct write_point *) write_point;
799 mutex_lock(&wp->lock);
803 head = writepoint_hash(c, write_point);
805 wp = __writepoint_find(head, write_point);
808 mutex_lock(&wp->lock);
809 if (wp->write_point == write_point)
811 mutex_unlock(&wp->lock);
816 for (wp = c->write_points;
817 wp < c->write_points + c->write_points_nr; wp++)
818 if (!oldest || time_before64(wp->last_used, oldest->last_used))
821 mutex_lock(&oldest->lock);
822 mutex_lock(&c->write_points_hash_lock);
823 if (oldest >= c->write_points + c->write_points_nr ||
824 try_increase_writepoints(c)) {
825 mutex_unlock(&c->write_points_hash_lock);
826 mutex_unlock(&oldest->lock);
827 goto restart_find_oldest;
830 wp = __writepoint_find(head, write_point);
831 if (wp && wp != oldest) {
832 mutex_unlock(&c->write_points_hash_lock);
833 mutex_unlock(&oldest->lock);
838 hlist_del_rcu(&wp->node);
839 wp->write_point = write_point;
840 hlist_add_head_rcu(&wp->node, head);
841 mutex_unlock(&c->write_points_hash_lock);
843 wp->last_used = sched_clock();
848 * Get us an open_bucket we can allocate from, return with it locked:
850 struct write_point *bch2_alloc_sectors_start(struct bch_fs *c,
852 unsigned erasure_code,
853 struct write_point_specifier write_point,
854 struct bch_devs_list *devs_have,
855 unsigned nr_replicas,
856 unsigned nr_replicas_required,
857 enum alloc_reserve reserve,
861 struct write_point *wp;
862 struct open_bucket *ob;
863 struct open_buckets ptrs;
864 unsigned nr_effective, write_points_nr;
868 BUG_ON(!nr_replicas || !nr_replicas_required);
872 write_points_nr = c->write_points_nr;
875 wp = writepoint_find(c, write_point.v);
877 /* metadata may not allocate on cache devices: */
878 if (wp->type != BCH_DATA_USER)
881 if (!target || (flags & BCH_WRITE_ONLY_SPECIFIED_DEVS)) {
882 ret = open_bucket_add_buckets(c, &ptrs, wp, devs_have,
883 target, erasure_code,
884 nr_replicas, &nr_effective,
885 &have_cache, reserve, cl);
887 ret = open_bucket_add_buckets(c, &ptrs, wp, devs_have,
888 target, erasure_code,
889 nr_replicas, &nr_effective,
890 &have_cache, reserve, NULL);
894 ret = open_bucket_add_buckets(c, &ptrs, wp, devs_have,
896 nr_replicas, &nr_effective,
897 &have_cache, reserve, cl);
900 BUG_ON(!ret && nr_effective < nr_replicas);
902 if (erasure_code && !ec_open_bucket(c, &ptrs))
903 pr_debug("failed to get ec bucket: ret %u", ret);
906 nr_effective >= nr_replicas_required)
912 /* Free buckets we didn't use: */
913 open_bucket_for_each(c, &wp->ptrs, ob, i)
914 open_bucket_free_unused(c, ob, wp->type == BCH_DATA_USER);
918 wp->sectors_free = UINT_MAX;
920 open_bucket_for_each(c, &wp->ptrs, ob, i)
921 wp->sectors_free = min(wp->sectors_free, ob->sectors_free);
923 BUG_ON(!wp->sectors_free || wp->sectors_free == UINT_MAX);
925 verify_not_stale(c, &wp->ptrs);
929 open_bucket_for_each(c, &wp->ptrs, ob, i)
930 if (ptrs.nr < ARRAY_SIZE(ptrs.v))
931 ob_push(c, &ptrs, ob);
933 open_bucket_free_unused(c, ob,
934 wp->type == BCH_DATA_USER);
937 mutex_unlock(&wp->lock);
939 if (ret == -ENOSPC &&
940 try_decrease_writepoints(c, write_points_nr))
947 * Append pointers to the space we just allocated to @k, and mark @sectors space
948 * as allocated out of @ob
950 void bch2_alloc_sectors_append_ptrs(struct bch_fs *c, struct write_point *wp,
951 struct bkey_i *k, unsigned sectors)
954 struct open_bucket *ob;
957 BUG_ON(sectors > wp->sectors_free);
958 wp->sectors_free -= sectors;
960 open_bucket_for_each(c, &wp->ptrs, ob, i) {
961 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
962 struct bch_extent_ptr tmp = ob->ptr;
964 tmp.cached = !ca->mi.durability &&
965 wp->type == BCH_DATA_USER;
967 tmp.offset += ca->mi.bucket_size - ob->sectors_free;
968 bch2_bkey_append_ptr(k, tmp);
970 BUG_ON(sectors > ob->sectors_free);
971 ob->sectors_free -= sectors;
976 * Append pointers to the space we just allocated to @k, and mark @sectors space
977 * as allocated out of @ob
979 void bch2_alloc_sectors_done(struct bch_fs *c, struct write_point *wp)
981 struct open_buckets ptrs = { .nr = 0 }, keep = { .nr = 0 };
982 struct open_bucket *ob;
985 open_bucket_for_each(c, &wp->ptrs, ob, i)
986 ob_push(c, !ob->sectors_free ? &ptrs : &keep, ob);
989 mutex_unlock(&wp->lock);
991 bch2_open_buckets_put(c, &ptrs);
994 void bch2_fs_allocator_foreground_init(struct bch_fs *c)
996 struct open_bucket *ob;
997 struct write_point *wp;
999 mutex_init(&c->write_points_hash_lock);
1000 c->write_points_nr = ARRAY_SIZE(c->write_points);
1002 /* open bucket 0 is a sentinal NULL: */
1003 spin_lock_init(&c->open_buckets[0].lock);
1005 for (ob = c->open_buckets + 1;
1006 ob < c->open_buckets + ARRAY_SIZE(c->open_buckets); ob++) {
1007 spin_lock_init(&ob->lock);
1008 c->open_buckets_nr_free++;
1010 ob->freelist = c->open_buckets_freelist;
1011 c->open_buckets_freelist = ob - c->open_buckets;
1014 writepoint_init(&c->btree_write_point, BCH_DATA_BTREE);
1015 writepoint_init(&c->rebalance_write_point, BCH_DATA_USER);
1017 for (wp = c->write_points;
1018 wp < c->write_points + c->write_points_nr; wp++) {
1019 writepoint_init(wp, BCH_DATA_USER);
1021 wp->last_used = sched_clock();
1022 wp->write_point = (unsigned long) wp;
1023 hlist_add_head_rcu(&wp->node,
1024 writepoint_hash(c, wp->write_point));