1 // SPDX-License-Identifier: GPL-2.0
3 * Primary bucket allocation code
5 * Copyright 2012 Google, Inc.
7 * Allocation in bcache is done in terms of buckets:
9 * Each bucket has associated an 8 bit gen; this gen corresponds to the gen in
10 * btree pointers - they must match for the pointer to be considered valid.
12 * Thus (assuming a bucket has no dirty data or metadata in it) we can reuse a
13 * bucket simply by incrementing its gen.
15 * The gens (along with the priorities; it's really the gens are important but
16 * the code is named as if it's the priorities) are written in an arbitrary list
17 * of buckets on disk, with a pointer to them in the journal header.
19 * When we invalidate a bucket, we have to write its new gen to disk and wait
20 * for that write to complete before we use it - otherwise after a crash we
21 * could have pointers that appeared to be good but pointed to data that had
24 * Since the gens and priorities are all stored contiguously on disk, we can
25 * batch this up: We fill up the free_inc list with freshly invalidated buckets,
26 * call prio_write(), and when prio_write() finishes we pull buckets off the
27 * free_inc list and optionally discard them.
29 * free_inc isn't the only freelist - if it was, we'd often have to sleep while
30 * priorities and gens were being written before we could allocate. c->free is a
31 * smaller freelist, and buckets on that list are always ready to be used.
33 * If we've got discards enabled, that happens when a bucket moves from the
34 * free_inc list to the free list.
36 * It's important to ensure that gens don't wrap around - with respect to
37 * either the oldest gen in the btree or the gen on disk. This is quite
38 * difficult to do in practice, but we explicitly guard against it anyways - if
39 * a bucket is in danger of wrapping around we simply skip invalidating it that
40 * time around, and we garbage collect or rewrite the priorities sooner than we
41 * would have otherwise.
43 * bch2_bucket_alloc() allocates a single bucket from a specific device.
45 * bch2_bucket_alloc_set() allocates one or more buckets from different devices
46 * in a given filesystem.
48 * invalidate_buckets() drives all the processes described above. It's called
49 * from bch2_bucket_alloc() and a few other places that need to make sure free
52 * invalidate_buckets_(lru|fifo)() find buckets that are available to be
53 * invalidated, and then invalidate them and stick them on the free_inc list -
54 * in either lru or fifo order.
58 #include "alloc_background.h"
59 #include "alloc_foreground.h"
64 #include "disk_groups.h"
68 #include <linux/math64.h>
69 #include <linux/rculist.h>
70 #include <linux/rcupdate.h>
71 #include <trace/events/bcachefs.h>
74 * Open buckets represent a bucket that's currently being allocated from. They
77 * - They track buckets that have been partially allocated, allowing for
78 * sub-bucket sized allocations - they're used by the sector allocator below
80 * - They provide a reference to the buckets they own that mark and sweep GC
81 * can find, until the new allocation has a pointer to it inserted into the
84 * When allocating some space with the sector allocator, the allocation comes
85 * with a reference to an open bucket - the caller is required to put that
86 * reference _after_ doing the index update that makes its allocation reachable.
89 void __bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob)
91 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
94 bch2_ec_bucket_written(c, ob);
98 percpu_down_read(&c->mark_lock);
101 bch2_mark_alloc_bucket(c, ca, PTR_BUCKET_NR(ca, &ob->ptr),
102 false, gc_pos_alloc(c, ob), 0);
106 spin_unlock(&ob->lock);
107 percpu_up_read(&c->mark_lock);
109 spin_lock(&c->freelist_lock);
110 ob->freelist = c->open_buckets_freelist;
111 c->open_buckets_freelist = ob - c->open_buckets;
112 c->open_buckets_nr_free++;
113 spin_unlock(&c->freelist_lock);
115 closure_wake_up(&c->open_buckets_wait);
118 void bch2_open_bucket_write_error(struct bch_fs *c,
119 struct open_buckets *obs,
122 struct open_bucket *ob;
125 open_bucket_for_each(c, obs, ob, i)
126 if (ob->ptr.dev == dev &&
128 bch2_ec_bucket_cancel(c, ob);
131 static struct open_bucket *bch2_open_bucket_alloc(struct bch_fs *c)
133 struct open_bucket *ob;
135 BUG_ON(!c->open_buckets_freelist || !c->open_buckets_nr_free);
137 ob = c->open_buckets + c->open_buckets_freelist;
138 c->open_buckets_freelist = ob->freelist;
139 atomic_set(&ob->pin, 1);
142 c->open_buckets_nr_free--;
146 static void open_bucket_free_unused(struct bch_fs *c,
147 struct write_point *wp,
148 struct open_bucket *ob)
150 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
151 bool may_realloc = wp->type == BCH_DATA_user;
153 BUG_ON(ca->open_buckets_partial_nr >
154 ARRAY_SIZE(ca->open_buckets_partial));
156 if (ca->open_buckets_partial_nr <
157 ARRAY_SIZE(ca->open_buckets_partial) &&
159 spin_lock(&c->freelist_lock);
160 ob->on_partial_list = true;
161 ca->open_buckets_partial[ca->open_buckets_partial_nr++] =
162 ob - c->open_buckets;
163 spin_unlock(&c->freelist_lock);
165 closure_wake_up(&c->open_buckets_wait);
166 closure_wake_up(&c->freelist_wait);
168 bch2_open_bucket_put(c, ob);
172 static void verify_not_stale(struct bch_fs *c, const struct open_buckets *obs)
174 #ifdef CONFIG_BCACHEFS_DEBUG
175 struct open_bucket *ob;
178 open_bucket_for_each(c, obs, ob, i) {
179 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
181 BUG_ON(ptr_stale(ca, &ob->ptr));
186 /* _only_ for allocating the journal on a new device: */
187 long bch2_bucket_alloc_new_fs(struct bch_dev *ca)
189 struct bucket_array *buckets;
193 buckets = bucket_array(ca);
195 for (b = buckets->first_bucket; b < buckets->nbuckets; b++)
196 if (is_available_bucket(buckets->b[b].mark) &&
197 !buckets->b[b].mark.owned_by_allocator)
205 static inline unsigned open_buckets_reserved(enum alloc_reserve reserve)
209 case RESERVE_BTREE_MOVINGGC:
211 case RESERVE_MOVINGGC:
212 return OPEN_BUCKETS_COUNT / 4;
214 return OPEN_BUCKETS_COUNT / 2;
219 * bch_bucket_alloc - allocate a single bucket from a specific device
221 * Returns index of bucket on success, 0 on failure
223 struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca,
224 enum alloc_reserve reserve,
225 bool may_alloc_partial,
228 struct open_bucket *ob;
231 spin_lock(&c->freelist_lock);
233 if (may_alloc_partial) {
236 for (i = ca->open_buckets_partial_nr - 1; i >= 0; --i) {
237 ob = c->open_buckets + ca->open_buckets_partial[i];
239 if (reserve <= ob->alloc_reserve) {
240 array_remove_item(ca->open_buckets_partial,
241 ca->open_buckets_partial_nr,
243 ob->on_partial_list = false;
244 ob->alloc_reserve = reserve;
245 spin_unlock(&c->freelist_lock);
251 if (unlikely(c->open_buckets_nr_free <= open_buckets_reserved(reserve))) {
253 closure_wait(&c->open_buckets_wait, cl);
255 if (!c->blocked_allocate_open_bucket)
256 c->blocked_allocate_open_bucket = local_clock();
258 spin_unlock(&c->freelist_lock);
259 trace_open_bucket_alloc_fail(ca, reserve);
260 return ERR_PTR(-OPEN_BUCKETS_EMPTY);
263 if (likely(fifo_pop(&ca->free[RESERVE_NONE], b)))
267 case RESERVE_BTREE_MOVINGGC:
268 case RESERVE_MOVINGGC:
269 if (fifo_pop(&ca->free[RESERVE_MOVINGGC], b))
277 closure_wait(&c->freelist_wait, cl);
279 if (!c->blocked_allocate)
280 c->blocked_allocate = local_clock();
282 spin_unlock(&c->freelist_lock);
284 trace_bucket_alloc_fail(ca, reserve);
285 return ERR_PTR(-FREELIST_EMPTY);
287 verify_not_on_freelist(c, ca, b);
289 ob = bch2_open_bucket_alloc(c);
291 spin_lock(&ob->lock);
294 ob->sectors_free = ca->mi.bucket_size;
295 ob->alloc_reserve = reserve;
296 ob->ptr = (struct bch_extent_ptr) {
297 .type = 1 << BCH_EXTENT_ENTRY_ptr,
298 .gen = bucket(ca, b)->mark.gen,
299 .offset = bucket_to_sector(ca, b),
303 spin_unlock(&ob->lock);
305 if (c->blocked_allocate_open_bucket) {
306 bch2_time_stats_update(
307 &c->times[BCH_TIME_blocked_allocate_open_bucket],
308 c->blocked_allocate_open_bucket);
309 c->blocked_allocate_open_bucket = 0;
312 if (c->blocked_allocate) {
313 bch2_time_stats_update(
314 &c->times[BCH_TIME_blocked_allocate],
315 c->blocked_allocate);
316 c->blocked_allocate = 0;
319 spin_unlock(&c->freelist_lock);
321 bch2_wake_allocator(ca);
323 trace_bucket_alloc(ca, reserve);
327 static int __dev_stripe_cmp(struct dev_stripe_state *stripe,
328 unsigned l, unsigned r)
330 return ((stripe->next_alloc[l] > stripe->next_alloc[r]) -
331 (stripe->next_alloc[l] < stripe->next_alloc[r]));
334 #define dev_stripe_cmp(l, r) __dev_stripe_cmp(stripe, l, r)
336 struct dev_alloc_list bch2_dev_alloc_list(struct bch_fs *c,
337 struct dev_stripe_state *stripe,
338 struct bch_devs_mask *devs)
340 struct dev_alloc_list ret = { .nr = 0 };
343 for_each_set_bit(i, devs->d, BCH_SB_MEMBERS_MAX)
344 ret.devs[ret.nr++] = i;
346 bubble_sort(ret.devs, ret.nr, dev_stripe_cmp);
350 void bch2_dev_stripe_increment(struct bch_dev *ca,
351 struct dev_stripe_state *stripe)
353 u64 *v = stripe->next_alloc + ca->dev_idx;
354 u64 free_space = dev_buckets_available(ca);
355 u64 free_space_inv = free_space
356 ? div64_u64(1ULL << 48, free_space)
360 if (*v + free_space_inv >= *v)
361 *v += free_space_inv;
365 for (v = stripe->next_alloc;
366 v < stripe->next_alloc + ARRAY_SIZE(stripe->next_alloc); v++)
367 *v = *v < scale ? 0 : *v - scale;
370 #define BUCKET_MAY_ALLOC_PARTIAL (1 << 0)
371 #define BUCKET_ALLOC_USE_DURABILITY (1 << 1)
373 static void add_new_bucket(struct bch_fs *c,
374 struct open_buckets *ptrs,
375 struct bch_devs_mask *devs_may_alloc,
376 unsigned *nr_effective,
379 struct open_bucket *ob)
381 unsigned durability =
382 bch_dev_bkey_exists(c, ob->ptr.dev)->mi.durability;
384 __clear_bit(ob->ptr.dev, devs_may_alloc->d);
385 *nr_effective += (flags & BUCKET_ALLOC_USE_DURABILITY)
387 *have_cache |= !durability;
389 ob_push(c, ptrs, ob);
392 enum bucket_alloc_ret
393 bch2_bucket_alloc_set(struct bch_fs *c,
394 struct open_buckets *ptrs,
395 struct dev_stripe_state *stripe,
396 struct bch_devs_mask *devs_may_alloc,
397 unsigned nr_replicas,
398 unsigned *nr_effective,
400 enum alloc_reserve reserve,
404 struct dev_alloc_list devs_sorted =
405 bch2_dev_alloc_list(c, stripe, devs_may_alloc);
407 enum bucket_alloc_ret ret = INSUFFICIENT_DEVICES;
410 BUG_ON(*nr_effective >= nr_replicas);
412 for (i = 0; i < devs_sorted.nr; i++) {
413 struct open_bucket *ob;
415 ca = rcu_dereference(c->devs[devs_sorted.devs[i]]);
419 if (!ca->mi.durability && *have_cache)
422 ob = bch2_bucket_alloc(c, ca, reserve,
423 flags & BUCKET_MAY_ALLOC_PARTIAL, cl);
432 add_new_bucket(c, ptrs, devs_may_alloc,
433 nr_effective, have_cache, flags, ob);
435 bch2_dev_stripe_increment(ca, stripe);
437 if (*nr_effective >= nr_replicas)
438 return ALLOC_SUCCESS;
444 /* Allocate from stripes: */
447 * if we can't allocate a new stripe because there are already too many
448 * partially filled stripes, force allocating from an existing stripe even when
449 * it's to a device we don't want:
452 static enum bucket_alloc_ret
453 bucket_alloc_from_stripe(struct bch_fs *c,
454 struct open_buckets *ptrs,
455 struct write_point *wp,
456 struct bch_devs_mask *devs_may_alloc,
458 unsigned erasure_code,
459 unsigned nr_replicas,
460 unsigned *nr_effective,
465 struct dev_alloc_list devs_sorted;
466 struct ec_stripe_head *h;
467 struct open_bucket *ob;
477 if (ec_open_bucket(c, ptrs))
480 h = bch2_ec_stripe_head_get(c, target, 0, nr_replicas - 1,
481 wp == &c->copygc_write_point,
488 devs_sorted = bch2_dev_alloc_list(c, &wp->stripe, devs_may_alloc);
490 for (i = 0; i < devs_sorted.nr; i++)
491 for (ec_idx = 0; ec_idx < h->s->nr_data; ec_idx++) {
492 if (!h->s->blocks[ec_idx])
495 ob = c->open_buckets + h->s->blocks[ec_idx];
496 if (ob->ptr.dev == devs_sorted.devs[i] &&
497 !test_and_set_bit(ec_idx, h->s->blocks_allocated))
502 ca = bch_dev_bkey_exists(c, ob->ptr.dev);
507 add_new_bucket(c, ptrs, devs_may_alloc,
508 nr_effective, have_cache, flags, ob);
509 atomic_inc(&h->s->pin);
511 bch2_ec_stripe_head_put(c, h);
515 /* Sector allocator */
517 static void get_buckets_from_writepoint(struct bch_fs *c,
518 struct open_buckets *ptrs,
519 struct write_point *wp,
520 struct bch_devs_mask *devs_may_alloc,
521 unsigned nr_replicas,
522 unsigned *nr_effective,
527 struct open_buckets ptrs_skip = { .nr = 0 };
528 struct open_bucket *ob;
531 open_bucket_for_each(c, &wp->ptrs, ob, i) {
532 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
534 if (*nr_effective < nr_replicas &&
535 test_bit(ob->ptr.dev, devs_may_alloc->d) &&
536 (ca->mi.durability ||
537 (wp->type == BCH_DATA_user && !*have_cache)) &&
538 (ob->ec || !need_ec)) {
539 add_new_bucket(c, ptrs, devs_may_alloc,
540 nr_effective, have_cache,
543 ob_push(c, &ptrs_skip, ob);
546 wp->ptrs = ptrs_skip;
549 static enum bucket_alloc_ret
550 open_bucket_add_buckets(struct bch_fs *c,
551 struct open_buckets *ptrs,
552 struct write_point *wp,
553 struct bch_devs_list *devs_have,
555 unsigned erasure_code,
556 unsigned nr_replicas,
557 unsigned *nr_effective,
559 enum alloc_reserve reserve,
563 struct bch_devs_mask devs;
564 struct open_bucket *ob;
565 struct closure *cl = NULL;
566 enum bucket_alloc_ret ret;
570 devs = target_rw_devs(c, wp->type, target);
573 /* Don't allocate from devices we already have pointers to: */
574 for (i = 0; i < devs_have->nr; i++)
575 __clear_bit(devs_have->devs[i], devs.d);
577 open_bucket_for_each(c, ptrs, ob, i)
578 __clear_bit(ob->ptr.dev, devs.d);
581 if (!ec_open_bucket(c, ptrs)) {
582 get_buckets_from_writepoint(c, ptrs, wp, &devs,
583 nr_replicas, nr_effective,
584 have_cache, flags, true);
585 if (*nr_effective >= nr_replicas)
589 if (!ec_open_bucket(c, ptrs)) {
590 ret = bucket_alloc_from_stripe(c, ptrs, wp, &devs,
591 target, erasure_code,
592 nr_replicas, nr_effective,
593 have_cache, flags, _cl);
594 if (ret == FREELIST_EMPTY ||
595 ret == OPEN_BUCKETS_EMPTY)
597 if (*nr_effective >= nr_replicas)
602 get_buckets_from_writepoint(c, ptrs, wp, &devs,
603 nr_replicas, nr_effective,
604 have_cache, flags, false);
605 if (*nr_effective >= nr_replicas)
608 percpu_down_read(&c->mark_lock);
613 * Try nonblocking first, so that if one device is full we'll try from
616 ret = bch2_bucket_alloc_set(c, ptrs, &wp->stripe, &devs,
617 nr_replicas, nr_effective, have_cache,
619 if (ret && ret != INSUFFICIENT_DEVICES && !cl && _cl) {
625 percpu_up_read(&c->mark_lock);
630 void bch2_open_buckets_stop_dev(struct bch_fs *c, struct bch_dev *ca,
631 struct open_buckets *obs)
633 struct open_buckets ptrs = { .nr = 0 };
634 struct open_bucket *ob, *ob2;
637 open_bucket_for_each(c, obs, ob, i) {
638 bool drop = !ca || ob->ptr.dev == ca->dev_idx;
640 if (!drop && ob->ec) {
641 mutex_lock(&ob->ec->lock);
642 for (j = 0; j < ob->ec->new_stripe.key.v.nr_blocks; j++) {
643 if (!ob->ec->blocks[j])
646 ob2 = c->open_buckets + ob->ec->blocks[j];
647 drop |= ob2->ptr.dev == ca->dev_idx;
649 mutex_unlock(&ob->ec->lock);
653 bch2_open_bucket_put(c, ob);
655 ob_push(c, &ptrs, ob);
661 void bch2_writepoint_stop(struct bch_fs *c, struct bch_dev *ca,
662 struct write_point *wp)
664 mutex_lock(&wp->lock);
665 bch2_open_buckets_stop_dev(c, ca, &wp->ptrs);
666 mutex_unlock(&wp->lock);
669 static inline struct hlist_head *writepoint_hash(struct bch_fs *c,
670 unsigned long write_point)
673 hash_long(write_point, ilog2(ARRAY_SIZE(c->write_points_hash)));
675 return &c->write_points_hash[hash];
678 static struct write_point *__writepoint_find(struct hlist_head *head,
679 unsigned long write_point)
681 struct write_point *wp;
683 hlist_for_each_entry_rcu(wp, head, node)
684 if (wp->write_point == write_point)
690 static inline bool too_many_writepoints(struct bch_fs *c, unsigned factor)
692 u64 stranded = c->write_points_nr * c->bucket_size_max;
693 u64 free = bch2_fs_usage_read_short(c).free;
695 return stranded * factor > free;
698 static bool try_increase_writepoints(struct bch_fs *c)
700 struct write_point *wp;
702 if (c->write_points_nr == ARRAY_SIZE(c->write_points) ||
703 too_many_writepoints(c, 32))
706 wp = c->write_points + c->write_points_nr++;
707 hlist_add_head_rcu(&wp->node, writepoint_hash(c, wp->write_point));
711 static bool try_decrease_writepoints(struct bch_fs *c,
714 struct write_point *wp;
716 mutex_lock(&c->write_points_hash_lock);
717 if (c->write_points_nr < old_nr) {
718 mutex_unlock(&c->write_points_hash_lock);
722 if (c->write_points_nr == 1 ||
723 !too_many_writepoints(c, 8)) {
724 mutex_unlock(&c->write_points_hash_lock);
728 wp = c->write_points + --c->write_points_nr;
730 hlist_del_rcu(&wp->node);
731 mutex_unlock(&c->write_points_hash_lock);
733 bch2_writepoint_stop(c, NULL, wp);
737 static struct write_point *writepoint_find(struct bch_fs *c,
738 unsigned long write_point)
740 struct write_point *wp, *oldest;
741 struct hlist_head *head;
743 if (!(write_point & 1UL)) {
744 wp = (struct write_point *) write_point;
745 mutex_lock(&wp->lock);
749 head = writepoint_hash(c, write_point);
751 wp = __writepoint_find(head, write_point);
754 mutex_lock(&wp->lock);
755 if (wp->write_point == write_point)
757 mutex_unlock(&wp->lock);
762 for (wp = c->write_points;
763 wp < c->write_points + c->write_points_nr; wp++)
764 if (!oldest || time_before64(wp->last_used, oldest->last_used))
767 mutex_lock(&oldest->lock);
768 mutex_lock(&c->write_points_hash_lock);
769 if (oldest >= c->write_points + c->write_points_nr ||
770 try_increase_writepoints(c)) {
771 mutex_unlock(&c->write_points_hash_lock);
772 mutex_unlock(&oldest->lock);
773 goto restart_find_oldest;
776 wp = __writepoint_find(head, write_point);
777 if (wp && wp != oldest) {
778 mutex_unlock(&c->write_points_hash_lock);
779 mutex_unlock(&oldest->lock);
784 hlist_del_rcu(&wp->node);
785 wp->write_point = write_point;
786 hlist_add_head_rcu(&wp->node, head);
787 mutex_unlock(&c->write_points_hash_lock);
789 wp->last_used = sched_clock();
794 * Get us an open_bucket we can allocate from, return with it locked:
796 struct write_point *bch2_alloc_sectors_start(struct bch_fs *c,
798 unsigned erasure_code,
799 struct write_point_specifier write_point,
800 struct bch_devs_list *devs_have,
801 unsigned nr_replicas,
802 unsigned nr_replicas_required,
803 enum alloc_reserve reserve,
807 struct write_point *wp;
808 struct open_bucket *ob;
809 struct open_buckets ptrs;
810 unsigned nr_effective, write_points_nr;
811 unsigned ob_flags = 0;
813 enum bucket_alloc_ret ret;
816 if (!(flags & BCH_WRITE_ONLY_SPECIFIED_DEVS))
817 ob_flags |= BUCKET_ALLOC_USE_DURABILITY;
819 BUG_ON(!nr_replicas || !nr_replicas_required);
823 write_points_nr = c->write_points_nr;
826 wp = writepoint_find(c, write_point.v);
828 if (wp->type == BCH_DATA_user)
829 ob_flags |= BUCKET_MAY_ALLOC_PARTIAL;
831 /* metadata may not allocate on cache devices: */
832 if (wp->type != BCH_DATA_user)
835 if (!target || (flags & BCH_WRITE_ONLY_SPECIFIED_DEVS)) {
836 ret = open_bucket_add_buckets(c, &ptrs, wp, devs_have,
837 target, erasure_code,
838 nr_replicas, &nr_effective,
839 &have_cache, reserve,
842 ret = open_bucket_add_buckets(c, &ptrs, wp, devs_have,
843 target, erasure_code,
844 nr_replicas, &nr_effective,
845 &have_cache, reserve,
850 ret = open_bucket_add_buckets(c, &ptrs, wp, devs_have,
852 nr_replicas, &nr_effective,
853 &have_cache, reserve,
857 BUG_ON(!ret && nr_effective < nr_replicas);
859 if (erasure_code && !ec_open_bucket(c, &ptrs))
860 pr_debug("failed to get ec bucket: ret %u", ret);
862 if (ret == INSUFFICIENT_DEVICES &&
863 nr_effective >= nr_replicas_required)
869 /* Free buckets we didn't use: */
870 open_bucket_for_each(c, &wp->ptrs, ob, i)
871 open_bucket_free_unused(c, wp, ob);
875 wp->sectors_free = UINT_MAX;
877 open_bucket_for_each(c, &wp->ptrs, ob, i)
878 wp->sectors_free = min(wp->sectors_free, ob->sectors_free);
880 BUG_ON(!wp->sectors_free || wp->sectors_free == UINT_MAX);
882 verify_not_stale(c, &wp->ptrs);
886 open_bucket_for_each(c, &wp->ptrs, ob, i)
887 if (ptrs.nr < ARRAY_SIZE(ptrs.v))
888 ob_push(c, &ptrs, ob);
890 open_bucket_free_unused(c, wp, ob);
893 mutex_unlock(&wp->lock);
895 if (ret == FREELIST_EMPTY &&
896 try_decrease_writepoints(c, write_points_nr))
900 case OPEN_BUCKETS_EMPTY:
902 return cl ? ERR_PTR(-EAGAIN) : ERR_PTR(-ENOSPC);
903 case INSUFFICIENT_DEVICES:
904 return ERR_PTR(-EROFS);
911 * Append pointers to the space we just allocated to @k, and mark @sectors space
912 * as allocated out of @ob
914 void bch2_alloc_sectors_append_ptrs(struct bch_fs *c, struct write_point *wp,
915 struct bkey_i *k, unsigned sectors)
918 struct open_bucket *ob;
921 BUG_ON(sectors > wp->sectors_free);
922 wp->sectors_free -= sectors;
924 open_bucket_for_each(c, &wp->ptrs, ob, i) {
925 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
926 struct bch_extent_ptr tmp = ob->ptr;
928 tmp.cached = !ca->mi.durability &&
929 wp->type == BCH_DATA_user;
931 tmp.offset += ca->mi.bucket_size - ob->sectors_free;
932 bch2_bkey_append_ptr(k, tmp);
934 BUG_ON(sectors > ob->sectors_free);
935 ob->sectors_free -= sectors;
940 * Append pointers to the space we just allocated to @k, and mark @sectors space
941 * as allocated out of @ob
943 void bch2_alloc_sectors_done(struct bch_fs *c, struct write_point *wp)
945 struct open_buckets ptrs = { .nr = 0 }, keep = { .nr = 0 };
946 struct open_bucket *ob;
949 open_bucket_for_each(c, &wp->ptrs, ob, i)
950 ob_push(c, !ob->sectors_free ? &ptrs : &keep, ob);
953 mutex_unlock(&wp->lock);
955 bch2_open_buckets_put(c, &ptrs);
958 static inline void writepoint_init(struct write_point *wp,
959 enum bch_data_type type)
961 mutex_init(&wp->lock);
965 void bch2_fs_allocator_foreground_init(struct bch_fs *c)
967 struct open_bucket *ob;
968 struct write_point *wp;
970 mutex_init(&c->write_points_hash_lock);
971 c->write_points_nr = ARRAY_SIZE(c->write_points);
973 /* open bucket 0 is a sentinal NULL: */
974 spin_lock_init(&c->open_buckets[0].lock);
976 for (ob = c->open_buckets + 1;
977 ob < c->open_buckets + ARRAY_SIZE(c->open_buckets); ob++) {
978 spin_lock_init(&ob->lock);
979 c->open_buckets_nr_free++;
981 ob->freelist = c->open_buckets_freelist;
982 c->open_buckets_freelist = ob - c->open_buckets;
985 writepoint_init(&c->btree_write_point, BCH_DATA_btree);
986 writepoint_init(&c->rebalance_write_point, BCH_DATA_user);
987 writepoint_init(&c->copygc_write_point, BCH_DATA_user);
989 for (wp = c->write_points;
990 wp < c->write_points + c->write_points_nr; wp++) {
991 writepoint_init(wp, BCH_DATA_user);
993 wp->last_used = sched_clock();
994 wp->write_point = (unsigned long) wp;
995 hlist_add_head_rcu(&wp->node,
996 writepoint_hash(c, wp->write_point));