1 // SPDX-License-Identifier: GPL-2.0
3 * Primary bucket allocation code
5 * Copyright 2012 Google, Inc.
7 * Allocation in bcache is done in terms of buckets:
9 * Each bucket has associated an 8 bit gen; this gen corresponds to the gen in
10 * btree pointers - they must match for the pointer to be considered valid.
12 * Thus (assuming a bucket has no dirty data or metadata in it) we can reuse a
13 * bucket simply by incrementing its gen.
15 * The gens (along with the priorities; it's really the gens are important but
16 * the code is named as if it's the priorities) are written in an arbitrary list
17 * of buckets on disk, with a pointer to them in the journal header.
19 * When we invalidate a bucket, we have to write its new gen to disk and wait
20 * for that write to complete before we use it - otherwise after a crash we
21 * could have pointers that appeared to be good but pointed to data that had
24 * Since the gens and priorities are all stored contiguously on disk, we can
25 * batch this up: We fill up the free_inc list with freshly invalidated buckets,
26 * call prio_write(), and when prio_write() finishes we pull buckets off the
27 * free_inc list and optionally discard them.
29 * free_inc isn't the only freelist - if it was, we'd often have to sleep while
30 * priorities and gens were being written before we could allocate. c->free is a
31 * smaller freelist, and buckets on that list are always ready to be used.
33 * If we've got discards enabled, that happens when a bucket moves from the
34 * free_inc list to the free list.
36 * It's important to ensure that gens don't wrap around - with respect to
37 * either the oldest gen in the btree or the gen on disk. This is quite
38 * difficult to do in practice, but we explicitly guard against it anyways - if
39 * a bucket is in danger of wrapping around we simply skip invalidating it that
40 * time around, and we garbage collect or rewrite the priorities sooner than we
41 * would have otherwise.
43 * bch2_bucket_alloc() allocates a single bucket from a specific device.
45 * bch2_bucket_alloc_set() allocates one or more buckets from different devices
46 * in a given filesystem.
48 * invalidate_buckets() drives all the processes described above. It's called
49 * from bch2_bucket_alloc() and a few other places that need to make sure free
52 * invalidate_buckets_(lru|fifo)() find buckets that are available to be
53 * invalidated, and then invalidate them and stick them on the free_inc list -
54 * in either lru or fifo order.
58 #include "alloc_background.h"
59 #include "alloc_foreground.h"
64 #include "disk_groups.h"
68 #include <linux/math64.h>
69 #include <linux/rculist.h>
70 #include <linux/rcupdate.h>
71 #include <trace/events/bcachefs.h>
73 enum bucket_alloc_ret {
76 FREELIST_EMPTY, /* Allocator thread not keeping up */
80 * Open buckets represent a bucket that's currently being allocated from. They
83 * - They track buckets that have been partially allocated, allowing for
84 * sub-bucket sized allocations - they're used by the sector allocator below
86 * - They provide a reference to the buckets they own that mark and sweep GC
87 * can find, until the new allocation has a pointer to it inserted into the
90 * When allocating some space with the sector allocator, the allocation comes
91 * with a reference to an open bucket - the caller is required to put that
92 * reference _after_ doing the index update that makes its allocation reachable.
95 void __bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob)
97 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
100 bch2_ec_bucket_written(c, ob);
104 percpu_down_read(&c->mark_lock);
105 spin_lock(&ob->lock);
107 bch2_mark_alloc_bucket(c, ca, PTR_BUCKET_NR(ca, &ob->ptr),
108 false, gc_pos_alloc(c, ob), 0);
112 spin_unlock(&ob->lock);
113 percpu_up_read(&c->mark_lock);
115 spin_lock(&c->freelist_lock);
116 ob->freelist = c->open_buckets_freelist;
117 c->open_buckets_freelist = ob - c->open_buckets;
118 c->open_buckets_nr_free++;
119 spin_unlock(&c->freelist_lock);
121 closure_wake_up(&c->open_buckets_wait);
124 void bch2_open_bucket_write_error(struct bch_fs *c,
125 struct open_buckets *obs,
128 struct open_bucket *ob;
131 open_bucket_for_each(c, obs, ob, i)
132 if (ob->ptr.dev == dev &&
134 bch2_ec_bucket_cancel(c, ob);
137 static struct open_bucket *bch2_open_bucket_alloc(struct bch_fs *c)
139 struct open_bucket *ob;
141 BUG_ON(!c->open_buckets_freelist || !c->open_buckets_nr_free);
143 ob = c->open_buckets + c->open_buckets_freelist;
144 c->open_buckets_freelist = ob->freelist;
145 atomic_set(&ob->pin, 1);
148 c->open_buckets_nr_free--;
152 static void open_bucket_free_unused(struct bch_fs *c,
153 struct open_bucket *ob,
156 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
158 BUG_ON(ca->open_buckets_partial_nr >=
159 ARRAY_SIZE(ca->open_buckets_partial));
161 if (ca->open_buckets_partial_nr <
162 ARRAY_SIZE(ca->open_buckets_partial) &&
164 spin_lock(&c->freelist_lock);
165 ob->on_partial_list = true;
166 ca->open_buckets_partial[ca->open_buckets_partial_nr++] =
167 ob - c->open_buckets;
168 spin_unlock(&c->freelist_lock);
170 closure_wake_up(&c->open_buckets_wait);
171 closure_wake_up(&c->freelist_wait);
173 bch2_open_bucket_put(c, ob);
177 static void verify_not_stale(struct bch_fs *c, const struct open_buckets *obs)
179 #ifdef CONFIG_BCACHEFS_DEBUG
180 struct open_bucket *ob;
183 open_bucket_for_each(c, obs, ob, i) {
184 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
186 BUG_ON(ptr_stale(ca, &ob->ptr));
191 /* _only_ for allocating the journal on a new device: */
192 long bch2_bucket_alloc_new_fs(struct bch_dev *ca)
194 struct bucket_array *buckets;
198 buckets = bucket_array(ca);
200 for (b = ca->mi.first_bucket; b < ca->mi.nbuckets; b++)
201 if (is_available_bucket(buckets->b[b].mark))
209 static inline unsigned open_buckets_reserved(enum alloc_reserve reserve)
215 return BTREE_NODE_OPEN_BUCKET_RESERVE;
217 return BTREE_NODE_OPEN_BUCKET_RESERVE * 2;
222 * bch_bucket_alloc - allocate a single bucket from a specific device
224 * Returns index of bucket on success, 0 on failure
226 struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca,
227 enum alloc_reserve reserve,
228 bool may_alloc_partial,
231 struct bucket_array *buckets;
232 struct open_bucket *ob;
235 spin_lock(&c->freelist_lock);
237 if (may_alloc_partial &&
238 ca->open_buckets_partial_nr) {
239 ob = c->open_buckets +
240 ca->open_buckets_partial[--ca->open_buckets_partial_nr];
241 ob->on_partial_list = false;
242 spin_unlock(&c->freelist_lock);
246 if (unlikely(c->open_buckets_nr_free <= open_buckets_reserved(reserve))) {
248 closure_wait(&c->open_buckets_wait, cl);
250 if (!c->blocked_allocate_open_bucket)
251 c->blocked_allocate_open_bucket = local_clock();
253 spin_unlock(&c->freelist_lock);
254 trace_open_bucket_alloc_fail(ca, reserve);
255 return ERR_PTR(-OPEN_BUCKETS_EMPTY);
258 if (likely(fifo_pop(&ca->free[RESERVE_NONE], bucket)))
263 if (fifo_pop(&ca->free[RESERVE_BTREE], bucket))
267 if (fifo_used(&ca->free[RESERVE_BTREE]) * 2 >=
268 ca->free[RESERVE_BTREE].size &&
269 fifo_pop(&ca->free[RESERVE_BTREE], bucket))
272 case RESERVE_MOVINGGC:
273 if (fifo_pop(&ca->free[RESERVE_MOVINGGC], bucket))
281 closure_wait(&c->freelist_wait, cl);
283 if (!c->blocked_allocate)
284 c->blocked_allocate = local_clock();
286 spin_unlock(&c->freelist_lock);
288 trace_bucket_alloc_fail(ca, reserve);
289 return ERR_PTR(-FREELIST_EMPTY);
291 verify_not_on_freelist(c, ca, bucket);
293 ob = bch2_open_bucket_alloc(c);
295 spin_lock(&ob->lock);
296 buckets = bucket_array(ca);
299 ob->sectors_free = ca->mi.bucket_size;
300 ob->ptr = (struct bch_extent_ptr) {
301 .type = 1 << BCH_EXTENT_ENTRY_ptr,
302 .gen = buckets->b[bucket].mark.gen,
303 .offset = bucket_to_sector(ca, bucket),
307 bucket_io_clock_reset(c, ca, bucket, READ);
308 bucket_io_clock_reset(c, ca, bucket, WRITE);
309 spin_unlock(&ob->lock);
311 if (c->blocked_allocate_open_bucket) {
312 bch2_time_stats_update(
313 &c->times[BCH_TIME_blocked_allocate_open_bucket],
314 c->blocked_allocate_open_bucket);
315 c->blocked_allocate_open_bucket = 0;
318 if (c->blocked_allocate) {
319 bch2_time_stats_update(
320 &c->times[BCH_TIME_blocked_allocate],
321 c->blocked_allocate);
322 c->blocked_allocate = 0;
325 spin_unlock(&c->freelist_lock);
327 bch2_wake_allocator(ca);
329 trace_bucket_alloc(ca, reserve);
333 static int __dev_stripe_cmp(struct dev_stripe_state *stripe,
334 unsigned l, unsigned r)
336 return ((stripe->next_alloc[l] > stripe->next_alloc[r]) -
337 (stripe->next_alloc[l] < stripe->next_alloc[r]));
340 #define dev_stripe_cmp(l, r) __dev_stripe_cmp(stripe, l, r)
342 struct dev_alloc_list bch2_dev_alloc_list(struct bch_fs *c,
343 struct dev_stripe_state *stripe,
344 struct bch_devs_mask *devs)
346 struct dev_alloc_list ret = { .nr = 0 };
350 for_each_member_device_rcu(ca, c, i, devs)
351 ret.devs[ret.nr++] = i;
353 bubble_sort(ret.devs, ret.nr, dev_stripe_cmp);
357 void bch2_dev_stripe_increment(struct bch_fs *c, struct bch_dev *ca,
358 struct dev_stripe_state *stripe)
360 u64 *v = stripe->next_alloc + ca->dev_idx;
361 u64 free_space = dev_buckets_free(c, ca);
362 u64 free_space_inv = free_space
363 ? div64_u64(1ULL << 48, free_space)
367 if (*v + free_space_inv >= *v)
368 *v += free_space_inv;
372 for (v = stripe->next_alloc;
373 v < stripe->next_alloc + ARRAY_SIZE(stripe->next_alloc); v++)
374 *v = *v < scale ? 0 : *v - scale;
377 #define BUCKET_MAY_ALLOC_PARTIAL (1 << 0)
378 #define BUCKET_ALLOC_USE_DURABILITY (1 << 1)
380 static void add_new_bucket(struct bch_fs *c,
381 struct open_buckets *ptrs,
382 struct bch_devs_mask *devs_may_alloc,
383 unsigned *nr_effective,
386 struct open_bucket *ob)
388 unsigned durability =
389 bch_dev_bkey_exists(c, ob->ptr.dev)->mi.durability;
391 __clear_bit(ob->ptr.dev, devs_may_alloc->d);
392 *nr_effective += (flags & BUCKET_ALLOC_USE_DURABILITY)
394 *have_cache |= !durability;
396 ob_push(c, ptrs, ob);
399 static int bch2_bucket_alloc_set(struct bch_fs *c,
400 struct open_buckets *ptrs,
401 struct dev_stripe_state *stripe,
402 struct bch_devs_mask *devs_may_alloc,
403 unsigned nr_replicas,
404 unsigned *nr_effective,
406 enum alloc_reserve reserve,
410 struct dev_alloc_list devs_sorted =
411 bch2_dev_alloc_list(c, stripe, devs_may_alloc);
413 bool alloc_failure = false;
416 BUG_ON(*nr_effective >= nr_replicas);
418 for (i = 0; i < devs_sorted.nr; i++) {
419 struct open_bucket *ob;
421 ca = rcu_dereference(c->devs[devs_sorted.devs[i]]);
425 if (!ca->mi.durability && *have_cache)
428 ob = bch2_bucket_alloc(c, ca, reserve,
429 flags & BUCKET_MAY_ALLOC_PARTIAL, cl);
431 enum bucket_alloc_ret ret = -PTR_ERR(ob);
433 WARN_ON(reserve == RESERVE_MOVINGGC &&
434 ret != OPEN_BUCKETS_EMPTY);
438 if (ret == OPEN_BUCKETS_EMPTY)
440 alloc_failure = true;
444 add_new_bucket(c, ptrs, devs_may_alloc,
445 nr_effective, have_cache, flags, ob);
447 bch2_dev_stripe_increment(c, ca, stripe);
449 if (*nr_effective >= nr_replicas)
453 return alloc_failure ? -ENOSPC : -EROFS;
456 /* Allocate from stripes: */
459 * XXX: use a higher watermark for allocating open buckets here:
461 static int ec_stripe_alloc(struct bch_fs *c, struct ec_stripe_head *h)
463 struct bch_devs_mask devs;
464 struct open_bucket *ob;
465 unsigned i, nr_have = 0, nr_data =
466 min_t(unsigned, h->nr_active_devs,
467 EC_STRIPE_MAX) - h->redundancy;
468 bool have_cache = true;
471 BUG_ON(h->blocks.nr > nr_data);
472 BUG_ON(h->parity.nr > h->redundancy);
476 open_bucket_for_each(c, &h->parity, ob, i)
477 __clear_bit(ob->ptr.dev, devs.d);
478 open_bucket_for_each(c, &h->blocks, ob, i)
479 __clear_bit(ob->ptr.dev, devs.d);
481 percpu_down_read(&c->mark_lock);
484 if (h->parity.nr < h->redundancy) {
485 nr_have = h->parity.nr;
487 ret = bch2_bucket_alloc_set(c, &h->parity,
500 if (h->blocks.nr < nr_data) {
501 nr_have = h->blocks.nr;
503 ret = bch2_bucket_alloc_set(c, &h->blocks,
517 percpu_up_read(&c->mark_lock);
519 return bch2_ec_stripe_new_alloc(c, h);
522 percpu_up_read(&c->mark_lock);
527 * if we can't allocate a new stripe because there are already too many
528 * partially filled stripes, force allocating from an existing stripe even when
529 * it's to a device we don't want:
532 static void bucket_alloc_from_stripe(struct bch_fs *c,
533 struct open_buckets *ptrs,
534 struct write_point *wp,
535 struct bch_devs_mask *devs_may_alloc,
537 unsigned erasure_code,
538 unsigned nr_replicas,
539 unsigned *nr_effective,
543 struct dev_alloc_list devs_sorted;
544 struct ec_stripe_head *h;
545 struct open_bucket *ob;
555 if (ec_open_bucket(c, ptrs))
558 h = bch2_ec_stripe_head_get(c, target, erasure_code, nr_replicas - 1);
562 if (!h->s && ec_stripe_alloc(c, h))
566 devs_sorted = bch2_dev_alloc_list(c, &wp->stripe, devs_may_alloc);
569 for (i = 0; i < devs_sorted.nr; i++)
570 open_bucket_for_each(c, &h->s->blocks, ob, ec_idx)
571 if (ob->ptr.dev == devs_sorted.devs[i] &&
572 !test_and_set_bit(ec_idx, h->s->blocks_allocated))
576 ca = bch_dev_bkey_exists(c, ob->ptr.dev);
581 add_new_bucket(c, ptrs, devs_may_alloc,
582 nr_effective, have_cache, flags, ob);
583 atomic_inc(&h->s->pin);
585 bch2_ec_stripe_head_put(h);
588 /* Sector allocator */
590 static void get_buckets_from_writepoint(struct bch_fs *c,
591 struct open_buckets *ptrs,
592 struct write_point *wp,
593 struct bch_devs_mask *devs_may_alloc,
594 unsigned nr_replicas,
595 unsigned *nr_effective,
600 struct open_buckets ptrs_skip = { .nr = 0 };
601 struct open_bucket *ob;
604 open_bucket_for_each(c, &wp->ptrs, ob, i) {
605 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
607 if (*nr_effective < nr_replicas &&
608 test_bit(ob->ptr.dev, devs_may_alloc->d) &&
609 (ca->mi.durability ||
610 (wp->type == BCH_DATA_USER && !*have_cache)) &&
611 (ob->ec || !need_ec)) {
612 add_new_bucket(c, ptrs, devs_may_alloc,
613 nr_effective, have_cache,
616 ob_push(c, &ptrs_skip, ob);
619 wp->ptrs = ptrs_skip;
622 static int open_bucket_add_buckets(struct bch_fs *c,
623 struct open_buckets *ptrs,
624 struct write_point *wp,
625 struct bch_devs_list *devs_have,
627 unsigned erasure_code,
628 unsigned nr_replicas,
629 unsigned *nr_effective,
631 enum alloc_reserve reserve,
635 struct bch_devs_mask devs;
636 struct open_bucket *ob;
637 struct closure *cl = NULL;
642 devs = target_rw_devs(c, wp->type, target);
645 /* Don't allocate from devices we already have pointers to: */
646 for (i = 0; i < devs_have->nr; i++)
647 __clear_bit(devs_have->devs[i], devs.d);
649 open_bucket_for_each(c, ptrs, ob, i)
650 __clear_bit(ob->ptr.dev, devs.d);
653 get_buckets_from_writepoint(c, ptrs, wp, &devs,
654 nr_replicas, nr_effective,
655 have_cache, flags, true);
656 if (*nr_effective >= nr_replicas)
659 bucket_alloc_from_stripe(c, ptrs, wp, &devs,
660 target, erasure_code,
661 nr_replicas, nr_effective,
663 if (*nr_effective >= nr_replicas)
667 get_buckets_from_writepoint(c, ptrs, wp, &devs,
668 nr_replicas, nr_effective,
669 have_cache, flags, false);
670 if (*nr_effective >= nr_replicas)
673 percpu_down_read(&c->mark_lock);
678 * Try nonblocking first, so that if one device is full we'll try from
681 ret = bch2_bucket_alloc_set(c, ptrs, &wp->stripe, &devs,
682 nr_replicas, nr_effective, have_cache,
684 if (ret && ret != -EROFS && !cl && _cl) {
690 percpu_up_read(&c->mark_lock);
695 void bch2_open_buckets_stop_dev(struct bch_fs *c, struct bch_dev *ca,
696 struct open_buckets *obs)
698 struct open_buckets ptrs = { .nr = 0 };
699 struct open_bucket *ob, *ob2;
702 open_bucket_for_each(c, obs, ob, i) {
703 bool drop = !ca || ob->ptr.dev == ca->dev_idx;
705 if (!drop && ob->ec) {
706 mutex_lock(&ob->ec->lock);
707 open_bucket_for_each(c, &ob->ec->blocks, ob2, j)
708 drop |= ob2->ptr.dev == ca->dev_idx;
709 open_bucket_for_each(c, &ob->ec->parity, ob2, j)
710 drop |= ob2->ptr.dev == ca->dev_idx;
711 mutex_unlock(&ob->ec->lock);
715 bch2_open_bucket_put(c, ob);
717 ob_push(c, &ptrs, ob);
723 void bch2_writepoint_stop(struct bch_fs *c, struct bch_dev *ca,
724 struct write_point *wp)
726 mutex_lock(&wp->lock);
727 bch2_open_buckets_stop_dev(c, ca, &wp->ptrs);
728 mutex_unlock(&wp->lock);
731 static inline struct hlist_head *writepoint_hash(struct bch_fs *c,
732 unsigned long write_point)
735 hash_long(write_point, ilog2(ARRAY_SIZE(c->write_points_hash)));
737 return &c->write_points_hash[hash];
740 static struct write_point *__writepoint_find(struct hlist_head *head,
741 unsigned long write_point)
743 struct write_point *wp;
745 hlist_for_each_entry_rcu(wp, head, node)
746 if (wp->write_point == write_point)
752 static inline bool too_many_writepoints(struct bch_fs *c, unsigned factor)
754 u64 stranded = c->write_points_nr * c->bucket_size_max;
755 u64 free = bch2_fs_usage_read_short(c).free;
757 return stranded * factor > free;
760 static bool try_increase_writepoints(struct bch_fs *c)
762 struct write_point *wp;
764 if (c->write_points_nr == ARRAY_SIZE(c->write_points) ||
765 too_many_writepoints(c, 32))
768 wp = c->write_points + c->write_points_nr++;
769 hlist_add_head_rcu(&wp->node, writepoint_hash(c, wp->write_point));
773 static bool try_decrease_writepoints(struct bch_fs *c,
776 struct write_point *wp;
778 mutex_lock(&c->write_points_hash_lock);
779 if (c->write_points_nr < old_nr) {
780 mutex_unlock(&c->write_points_hash_lock);
784 if (c->write_points_nr == 1 ||
785 !too_many_writepoints(c, 8)) {
786 mutex_unlock(&c->write_points_hash_lock);
790 wp = c->write_points + --c->write_points_nr;
792 hlist_del_rcu(&wp->node);
793 mutex_unlock(&c->write_points_hash_lock);
795 bch2_writepoint_stop(c, NULL, wp);
799 static struct write_point *writepoint_find(struct bch_fs *c,
800 unsigned long write_point)
802 struct write_point *wp, *oldest;
803 struct hlist_head *head;
805 if (!(write_point & 1UL)) {
806 wp = (struct write_point *) write_point;
807 mutex_lock(&wp->lock);
811 head = writepoint_hash(c, write_point);
813 wp = __writepoint_find(head, write_point);
816 mutex_lock(&wp->lock);
817 if (wp->write_point == write_point)
819 mutex_unlock(&wp->lock);
824 for (wp = c->write_points;
825 wp < c->write_points + c->write_points_nr; wp++)
826 if (!oldest || time_before64(wp->last_used, oldest->last_used))
829 mutex_lock(&oldest->lock);
830 mutex_lock(&c->write_points_hash_lock);
831 if (oldest >= c->write_points + c->write_points_nr ||
832 try_increase_writepoints(c)) {
833 mutex_unlock(&c->write_points_hash_lock);
834 mutex_unlock(&oldest->lock);
835 goto restart_find_oldest;
838 wp = __writepoint_find(head, write_point);
839 if (wp && wp != oldest) {
840 mutex_unlock(&c->write_points_hash_lock);
841 mutex_unlock(&oldest->lock);
846 hlist_del_rcu(&wp->node);
847 wp->write_point = write_point;
848 hlist_add_head_rcu(&wp->node, head);
849 mutex_unlock(&c->write_points_hash_lock);
851 wp->last_used = sched_clock();
856 * Get us an open_bucket we can allocate from, return with it locked:
858 struct write_point *bch2_alloc_sectors_start(struct bch_fs *c,
860 unsigned erasure_code,
861 struct write_point_specifier write_point,
862 struct bch_devs_list *devs_have,
863 unsigned nr_replicas,
864 unsigned nr_replicas_required,
865 enum alloc_reserve reserve,
869 struct write_point *wp;
870 struct open_bucket *ob;
871 struct open_buckets ptrs;
872 unsigned nr_effective, write_points_nr;
873 unsigned ob_flags = 0;
877 if (!(flags & BCH_WRITE_ONLY_SPECIFIED_DEVS))
878 ob_flags |= BUCKET_ALLOC_USE_DURABILITY;
880 BUG_ON(!nr_replicas || !nr_replicas_required);
884 write_points_nr = c->write_points_nr;
887 wp = writepoint_find(c, write_point.v);
889 if (wp->type == BCH_DATA_USER)
890 ob_flags |= BUCKET_MAY_ALLOC_PARTIAL;
892 /* metadata may not allocate on cache devices: */
893 if (wp->type != BCH_DATA_USER)
896 if (!target || (flags & BCH_WRITE_ONLY_SPECIFIED_DEVS)) {
897 ret = open_bucket_add_buckets(c, &ptrs, wp, devs_have,
898 target, erasure_code,
899 nr_replicas, &nr_effective,
900 &have_cache, reserve,
903 ret = open_bucket_add_buckets(c, &ptrs, wp, devs_have,
904 target, erasure_code,
905 nr_replicas, &nr_effective,
906 &have_cache, reserve,
911 ret = open_bucket_add_buckets(c, &ptrs, wp, devs_have,
913 nr_replicas, &nr_effective,
914 &have_cache, reserve,
918 BUG_ON(!ret && nr_effective < nr_replicas);
920 if (erasure_code && !ec_open_bucket(c, &ptrs))
921 pr_debug("failed to get ec bucket: ret %u", ret);
924 nr_effective >= nr_replicas_required)
930 /* Free buckets we didn't use: */
931 open_bucket_for_each(c, &wp->ptrs, ob, i)
932 open_bucket_free_unused(c, ob, wp->type == BCH_DATA_USER);
936 wp->sectors_free = UINT_MAX;
938 open_bucket_for_each(c, &wp->ptrs, ob, i)
939 wp->sectors_free = min(wp->sectors_free, ob->sectors_free);
941 BUG_ON(!wp->sectors_free || wp->sectors_free == UINT_MAX);
943 verify_not_stale(c, &wp->ptrs);
947 open_bucket_for_each(c, &wp->ptrs, ob, i)
948 if (ptrs.nr < ARRAY_SIZE(ptrs.v))
949 ob_push(c, &ptrs, ob);
951 open_bucket_free_unused(c, ob,
952 wp->type == BCH_DATA_USER);
955 mutex_unlock(&wp->lock);
957 if (ret == -ENOSPC &&
958 try_decrease_writepoints(c, write_points_nr))
965 * Append pointers to the space we just allocated to @k, and mark @sectors space
966 * as allocated out of @ob
968 void bch2_alloc_sectors_append_ptrs(struct bch_fs *c, struct write_point *wp,
969 struct bkey_i *k, unsigned sectors)
972 struct open_bucket *ob;
975 BUG_ON(sectors > wp->sectors_free);
976 wp->sectors_free -= sectors;
978 open_bucket_for_each(c, &wp->ptrs, ob, i) {
979 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
980 struct bch_extent_ptr tmp = ob->ptr;
982 tmp.cached = !ca->mi.durability &&
983 wp->type == BCH_DATA_USER;
985 tmp.offset += ca->mi.bucket_size - ob->sectors_free;
986 bch2_bkey_append_ptr(k, tmp);
988 BUG_ON(sectors > ob->sectors_free);
989 ob->sectors_free -= sectors;
994 * Append pointers to the space we just allocated to @k, and mark @sectors space
995 * as allocated out of @ob
997 void bch2_alloc_sectors_done(struct bch_fs *c, struct write_point *wp)
999 struct open_buckets ptrs = { .nr = 0 }, keep = { .nr = 0 };
1000 struct open_bucket *ob;
1003 open_bucket_for_each(c, &wp->ptrs, ob, i)
1004 ob_push(c, !ob->sectors_free ? &ptrs : &keep, ob);
1007 mutex_unlock(&wp->lock);
1009 bch2_open_buckets_put(c, &ptrs);
1012 void bch2_fs_allocator_foreground_init(struct bch_fs *c)
1014 struct open_bucket *ob;
1015 struct write_point *wp;
1017 mutex_init(&c->write_points_hash_lock);
1018 c->write_points_nr = ARRAY_SIZE(c->write_points);
1020 /* open bucket 0 is a sentinal NULL: */
1021 spin_lock_init(&c->open_buckets[0].lock);
1023 for (ob = c->open_buckets + 1;
1024 ob < c->open_buckets + ARRAY_SIZE(c->open_buckets); ob++) {
1025 spin_lock_init(&ob->lock);
1026 c->open_buckets_nr_free++;
1028 ob->freelist = c->open_buckets_freelist;
1029 c->open_buckets_freelist = ob - c->open_buckets;
1032 writepoint_init(&c->btree_write_point, BCH_DATA_BTREE);
1033 writepoint_init(&c->rebalance_write_point, BCH_DATA_USER);
1035 for (wp = c->write_points;
1036 wp < c->write_points + c->write_points_nr; wp++) {
1037 writepoint_init(wp, BCH_DATA_USER);
1039 wp->last_used = sched_clock();
1040 wp->write_point = (unsigned long) wp;
1041 hlist_add_head_rcu(&wp->node,
1042 writepoint_hash(c, wp->write_point));