1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright 2012 Google, Inc.
5 * Foreground allocator code: allocate buckets from freelist, and allocate in
6 * sector granularity from writepoints.
8 * bch2_bucket_alloc() allocates a single bucket from a specific device.
10 * bch2_bucket_alloc_set() allocates one or more buckets from different devices
11 * in a given filesystem.
15 #include "alloc_background.h"
16 #include "alloc_foreground.h"
21 #include "disk_groups.h"
25 #include <linux/math64.h>
26 #include <linux/rculist.h>
27 #include <linux/rcupdate.h>
28 #include <trace/events/bcachefs.h>
31 * Open buckets represent a bucket that's currently being allocated from. They
34 * - They track buckets that have been partially allocated, allowing for
35 * sub-bucket sized allocations - they're used by the sector allocator below
37 * - They provide a reference to the buckets they own that mark and sweep GC
38 * can find, until the new allocation has a pointer to it inserted into the
41 * When allocating some space with the sector allocator, the allocation comes
42 * with a reference to an open bucket - the caller is required to put that
43 * reference _after_ doing the index update that makes its allocation reachable.
46 void __bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob)
48 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
51 bch2_ec_bucket_written(c, ob);
55 percpu_down_read(&c->mark_lock);
58 bch2_mark_alloc_bucket(c, ca, PTR_BUCKET_NR(ca, &ob->ptr), false);
62 spin_unlock(&ob->lock);
63 percpu_up_read(&c->mark_lock);
65 spin_lock(&c->freelist_lock);
66 ob->freelist = c->open_buckets_freelist;
67 c->open_buckets_freelist = ob - c->open_buckets;
69 c->open_buckets_nr_free++;
70 ca->nr_open_buckets--;
71 spin_unlock(&c->freelist_lock);
73 closure_wake_up(&c->open_buckets_wait);
76 void bch2_open_bucket_write_error(struct bch_fs *c,
77 struct open_buckets *obs,
80 struct open_bucket *ob;
83 open_bucket_for_each(c, obs, ob, i)
84 if (ob->ptr.dev == dev &&
86 bch2_ec_bucket_cancel(c, ob);
89 static struct open_bucket *bch2_open_bucket_alloc(struct bch_fs *c)
91 struct open_bucket *ob;
93 BUG_ON(!c->open_buckets_freelist || !c->open_buckets_nr_free);
95 ob = c->open_buckets + c->open_buckets_freelist;
96 c->open_buckets_freelist = ob->freelist;
97 atomic_set(&ob->pin, 1);
100 c->open_buckets_nr_free--;
104 static void open_bucket_free_unused(struct bch_fs *c,
105 struct write_point *wp,
106 struct open_bucket *ob)
108 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
109 bool may_realloc = wp->type == BCH_DATA_user;
111 BUG_ON(ca->open_buckets_partial_nr >
112 ARRAY_SIZE(ca->open_buckets_partial));
114 if (ca->open_buckets_partial_nr <
115 ARRAY_SIZE(ca->open_buckets_partial) &&
117 spin_lock(&c->freelist_lock);
118 ob->on_partial_list = true;
119 ca->open_buckets_partial[ca->open_buckets_partial_nr++] =
120 ob - c->open_buckets;
121 spin_unlock(&c->freelist_lock);
123 closure_wake_up(&c->open_buckets_wait);
124 closure_wake_up(&c->freelist_wait);
126 bch2_open_bucket_put(c, ob);
130 static void verify_not_stale(struct bch_fs *c, const struct open_buckets *obs)
132 #ifdef CONFIG_BCACHEFS_DEBUG
133 struct open_bucket *ob;
136 open_bucket_for_each(c, obs, ob, i) {
137 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
139 BUG_ON(ptr_stale(ca, &ob->ptr));
144 /* _only_ for allocating the journal on a new device: */
145 long bch2_bucket_alloc_new_fs(struct bch_dev *ca)
147 struct bucket_array *buckets;
151 buckets = bucket_array(ca);
153 for (b = buckets->first_bucket; b < buckets->nbuckets; b++)
154 if (is_available_bucket(buckets->b[b].mark) &&
155 (!ca->buckets_nouse || !test_bit(b, ca->buckets_nouse)) &&
156 !buckets->b[b].mark.owned_by_allocator)
164 static inline unsigned open_buckets_reserved(enum alloc_reserve reserve)
168 case RESERVE_BTREE_MOVINGGC:
170 case RESERVE_MOVINGGC:
171 return OPEN_BUCKETS_COUNT / 4;
173 return OPEN_BUCKETS_COUNT / 2;
178 * bch_bucket_alloc - allocate a single bucket from a specific device
180 * Returns index of bucket on success, 0 on failure
182 struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca,
183 enum alloc_reserve reserve,
184 bool may_alloc_partial,
187 struct open_bucket *ob;
190 spin_lock(&c->freelist_lock);
192 if (may_alloc_partial) {
195 for (i = ca->open_buckets_partial_nr - 1; i >= 0; --i) {
196 ob = c->open_buckets + ca->open_buckets_partial[i];
198 if (reserve <= ob->alloc_reserve) {
199 array_remove_item(ca->open_buckets_partial,
200 ca->open_buckets_partial_nr,
202 ob->on_partial_list = false;
203 ob->alloc_reserve = reserve;
204 spin_unlock(&c->freelist_lock);
210 if (unlikely(c->open_buckets_nr_free <= open_buckets_reserved(reserve))) {
212 closure_wait(&c->open_buckets_wait, cl);
214 if (!c->blocked_allocate_open_bucket)
215 c->blocked_allocate_open_bucket = local_clock();
217 spin_unlock(&c->freelist_lock);
218 trace_open_bucket_alloc_fail(ca, reserve);
219 return ERR_PTR(-OPEN_BUCKETS_EMPTY);
222 if (likely(fifo_pop(&ca->free[RESERVE_NONE], b)))
226 case RESERVE_BTREE_MOVINGGC:
227 case RESERVE_MOVINGGC:
228 if (fifo_pop(&ca->free[RESERVE_MOVINGGC], b))
236 closure_wait(&c->freelist_wait, cl);
238 if (!c->blocked_allocate)
239 c->blocked_allocate = local_clock();
241 spin_unlock(&c->freelist_lock);
243 trace_bucket_alloc_fail(ca, reserve);
244 return ERR_PTR(-FREELIST_EMPTY);
246 verify_not_on_freelist(c, ca, b);
248 ob = bch2_open_bucket_alloc(c);
250 spin_lock(&ob->lock);
253 ob->sectors_free = ca->mi.bucket_size;
254 ob->alloc_reserve = reserve;
255 ob->ptr = (struct bch_extent_ptr) {
256 .type = 1 << BCH_EXTENT_ENTRY_ptr,
257 .gen = bucket(ca, b)->mark.gen,
258 .offset = bucket_to_sector(ca, b),
262 spin_unlock(&ob->lock);
264 if (c->blocked_allocate_open_bucket) {
265 bch2_time_stats_update(
266 &c->times[BCH_TIME_blocked_allocate_open_bucket],
267 c->blocked_allocate_open_bucket);
268 c->blocked_allocate_open_bucket = 0;
271 if (c->blocked_allocate) {
272 bch2_time_stats_update(
273 &c->times[BCH_TIME_blocked_allocate],
274 c->blocked_allocate);
275 c->blocked_allocate = 0;
278 ca->nr_open_buckets++;
279 spin_unlock(&c->freelist_lock);
281 bch2_wake_allocator(ca);
283 trace_bucket_alloc(ca, reserve);
287 static int __dev_stripe_cmp(struct dev_stripe_state *stripe,
288 unsigned l, unsigned r)
290 return ((stripe->next_alloc[l] > stripe->next_alloc[r]) -
291 (stripe->next_alloc[l] < stripe->next_alloc[r]));
294 #define dev_stripe_cmp(l, r) __dev_stripe_cmp(stripe, l, r)
296 struct dev_alloc_list bch2_dev_alloc_list(struct bch_fs *c,
297 struct dev_stripe_state *stripe,
298 struct bch_devs_mask *devs)
300 struct dev_alloc_list ret = { .nr = 0 };
303 for_each_set_bit(i, devs->d, BCH_SB_MEMBERS_MAX)
304 ret.devs[ret.nr++] = i;
306 bubble_sort(ret.devs, ret.nr, dev_stripe_cmp);
310 void bch2_dev_stripe_increment(struct bch_dev *ca,
311 struct dev_stripe_state *stripe)
313 u64 *v = stripe->next_alloc + ca->dev_idx;
314 u64 free_space = dev_buckets_available(ca);
315 u64 free_space_inv = free_space
316 ? div64_u64(1ULL << 48, free_space)
320 if (*v + free_space_inv >= *v)
321 *v += free_space_inv;
325 for (v = stripe->next_alloc;
326 v < stripe->next_alloc + ARRAY_SIZE(stripe->next_alloc); v++)
327 *v = *v < scale ? 0 : *v - scale;
330 #define BUCKET_MAY_ALLOC_PARTIAL (1 << 0)
331 #define BUCKET_ALLOC_USE_DURABILITY (1 << 1)
333 static void add_new_bucket(struct bch_fs *c,
334 struct open_buckets *ptrs,
335 struct bch_devs_mask *devs_may_alloc,
336 unsigned *nr_effective,
339 struct open_bucket *ob)
341 unsigned durability =
342 bch_dev_bkey_exists(c, ob->ptr.dev)->mi.durability;
344 __clear_bit(ob->ptr.dev, devs_may_alloc->d);
345 *nr_effective += (flags & BUCKET_ALLOC_USE_DURABILITY)
347 *have_cache |= !durability;
349 ob_push(c, ptrs, ob);
352 int bch2_bucket_alloc_set(struct bch_fs *c,
353 struct open_buckets *ptrs,
354 struct dev_stripe_state *stripe,
355 struct bch_devs_mask *devs_may_alloc,
356 unsigned nr_replicas,
357 unsigned *nr_effective,
359 enum alloc_reserve reserve,
363 struct dev_alloc_list devs_sorted =
364 bch2_dev_alloc_list(c, stripe, devs_may_alloc);
366 int ret = -INSUFFICIENT_DEVICES;
369 BUG_ON(*nr_effective >= nr_replicas);
371 for (i = 0; i < devs_sorted.nr; i++) {
372 struct open_bucket *ob;
374 ca = rcu_dereference(c->devs[devs_sorted.devs[i]]);
378 if (!ca->mi.durability && *have_cache)
381 ob = bch2_bucket_alloc(c, ca, reserve,
382 flags & BUCKET_MAY_ALLOC_PARTIAL, cl);
391 add_new_bucket(c, ptrs, devs_may_alloc,
392 nr_effective, have_cache, flags, ob);
394 bch2_dev_stripe_increment(ca, stripe);
396 if (*nr_effective >= nr_replicas)
403 /* Allocate from stripes: */
406 * if we can't allocate a new stripe because there are already too many
407 * partially filled stripes, force allocating from an existing stripe even when
408 * it's to a device we don't want:
411 static int bucket_alloc_from_stripe(struct bch_fs *c,
412 struct open_buckets *ptrs,
413 struct write_point *wp,
414 struct bch_devs_mask *devs_may_alloc,
416 unsigned erasure_code,
417 unsigned nr_replicas,
418 unsigned *nr_effective,
423 struct dev_alloc_list devs_sorted;
424 struct ec_stripe_head *h;
425 struct open_bucket *ob;
435 if (ec_open_bucket(c, ptrs))
438 h = bch2_ec_stripe_head_get(c, target, 0, nr_replicas - 1,
439 wp == &c->copygc_write_point,
446 devs_sorted = bch2_dev_alloc_list(c, &wp->stripe, devs_may_alloc);
448 for (i = 0; i < devs_sorted.nr; i++)
449 for (ec_idx = 0; ec_idx < h->s->nr_data; ec_idx++) {
450 if (!h->s->blocks[ec_idx])
453 ob = c->open_buckets + h->s->blocks[ec_idx];
454 if (ob->ptr.dev == devs_sorted.devs[i] &&
455 !test_and_set_bit(ec_idx, h->s->blocks_allocated))
460 ca = bch_dev_bkey_exists(c, ob->ptr.dev);
465 add_new_bucket(c, ptrs, devs_may_alloc,
466 nr_effective, have_cache, flags, ob);
467 atomic_inc(&h->s->pin);
469 bch2_ec_stripe_head_put(c, h);
473 /* Sector allocator */
475 static void get_buckets_from_writepoint(struct bch_fs *c,
476 struct open_buckets *ptrs,
477 struct write_point *wp,
478 struct bch_devs_mask *devs_may_alloc,
479 unsigned nr_replicas,
480 unsigned *nr_effective,
485 struct open_buckets ptrs_skip = { .nr = 0 };
486 struct open_bucket *ob;
489 open_bucket_for_each(c, &wp->ptrs, ob, i) {
490 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
492 if (*nr_effective < nr_replicas &&
493 test_bit(ob->ptr.dev, devs_may_alloc->d) &&
494 (ca->mi.durability ||
495 (wp->type == BCH_DATA_user && !*have_cache)) &&
496 (ob->ec || !need_ec)) {
497 add_new_bucket(c, ptrs, devs_may_alloc,
498 nr_effective, have_cache,
501 ob_push(c, &ptrs_skip, ob);
504 wp->ptrs = ptrs_skip;
507 static int open_bucket_add_buckets(struct bch_fs *c,
508 struct open_buckets *ptrs,
509 struct write_point *wp,
510 struct bch_devs_list *devs_have,
512 unsigned erasure_code,
513 unsigned nr_replicas,
514 unsigned *nr_effective,
516 enum alloc_reserve reserve,
520 struct bch_devs_mask devs;
521 struct open_bucket *ob;
522 struct closure *cl = NULL;
527 devs = target_rw_devs(c, wp->type, target);
530 /* Don't allocate from devices we already have pointers to: */
531 for (i = 0; i < devs_have->nr; i++)
532 __clear_bit(devs_have->devs[i], devs.d);
534 open_bucket_for_each(c, ptrs, ob, i)
535 __clear_bit(ob->ptr.dev, devs.d);
538 if (!ec_open_bucket(c, ptrs)) {
539 get_buckets_from_writepoint(c, ptrs, wp, &devs,
540 nr_replicas, nr_effective,
541 have_cache, flags, true);
542 if (*nr_effective >= nr_replicas)
546 if (!ec_open_bucket(c, ptrs)) {
547 ret = bucket_alloc_from_stripe(c, ptrs, wp, &devs,
548 target, erasure_code,
549 nr_replicas, nr_effective,
550 have_cache, flags, _cl);
551 if (ret == -FREELIST_EMPTY ||
552 ret == -OPEN_BUCKETS_EMPTY)
554 if (*nr_effective >= nr_replicas)
559 get_buckets_from_writepoint(c, ptrs, wp, &devs,
560 nr_replicas, nr_effective,
561 have_cache, flags, false);
562 if (*nr_effective >= nr_replicas)
565 percpu_down_read(&c->mark_lock);
570 * Try nonblocking first, so that if one device is full we'll try from
573 ret = bch2_bucket_alloc_set(c, ptrs, &wp->stripe, &devs,
574 nr_replicas, nr_effective, have_cache,
576 if (ret && ret != -INSUFFICIENT_DEVICES && !cl && _cl) {
582 percpu_up_read(&c->mark_lock);
587 void bch2_open_buckets_stop_dev(struct bch_fs *c, struct bch_dev *ca,
588 struct open_buckets *obs)
590 struct open_buckets ptrs = { .nr = 0 };
591 struct open_bucket *ob, *ob2;
594 open_bucket_for_each(c, obs, ob, i) {
595 bool drop = !ca || ob->ptr.dev == ca->dev_idx;
597 if (!drop && ob->ec) {
598 mutex_lock(&ob->ec->lock);
599 for (j = 0; j < ob->ec->new_stripe.key.v.nr_blocks; j++) {
600 if (!ob->ec->blocks[j])
603 ob2 = c->open_buckets + ob->ec->blocks[j];
604 drop |= ob2->ptr.dev == ca->dev_idx;
606 mutex_unlock(&ob->ec->lock);
610 bch2_open_bucket_put(c, ob);
612 ob_push(c, &ptrs, ob);
618 void bch2_writepoint_stop(struct bch_fs *c, struct bch_dev *ca,
619 struct write_point *wp)
621 mutex_lock(&wp->lock);
622 bch2_open_buckets_stop_dev(c, ca, &wp->ptrs);
623 mutex_unlock(&wp->lock);
626 static inline struct hlist_head *writepoint_hash(struct bch_fs *c,
627 unsigned long write_point)
630 hash_long(write_point, ilog2(ARRAY_SIZE(c->write_points_hash)));
632 return &c->write_points_hash[hash];
635 static struct write_point *__writepoint_find(struct hlist_head *head,
636 unsigned long write_point)
638 struct write_point *wp;
641 hlist_for_each_entry_rcu(wp, head, node)
642 if (wp->write_point == write_point)
650 static inline bool too_many_writepoints(struct bch_fs *c, unsigned factor)
652 u64 stranded = c->write_points_nr * c->bucket_size_max;
653 u64 free = bch2_fs_usage_read_short(c).free;
655 return stranded * factor > free;
658 static bool try_increase_writepoints(struct bch_fs *c)
660 struct write_point *wp;
662 if (c->write_points_nr == ARRAY_SIZE(c->write_points) ||
663 too_many_writepoints(c, 32))
666 wp = c->write_points + c->write_points_nr++;
667 hlist_add_head_rcu(&wp->node, writepoint_hash(c, wp->write_point));
671 static bool try_decrease_writepoints(struct bch_fs *c,
674 struct write_point *wp;
676 mutex_lock(&c->write_points_hash_lock);
677 if (c->write_points_nr < old_nr) {
678 mutex_unlock(&c->write_points_hash_lock);
682 if (c->write_points_nr == 1 ||
683 !too_many_writepoints(c, 8)) {
684 mutex_unlock(&c->write_points_hash_lock);
688 wp = c->write_points + --c->write_points_nr;
690 hlist_del_rcu(&wp->node);
691 mutex_unlock(&c->write_points_hash_lock);
693 bch2_writepoint_stop(c, NULL, wp);
697 static struct write_point *writepoint_find(struct bch_fs *c,
698 unsigned long write_point)
700 struct write_point *wp, *oldest;
701 struct hlist_head *head;
703 if (!(write_point & 1UL)) {
704 wp = (struct write_point *) write_point;
705 mutex_lock(&wp->lock);
709 head = writepoint_hash(c, write_point);
711 wp = __writepoint_find(head, write_point);
714 mutex_lock(&wp->lock);
715 if (wp->write_point == write_point)
717 mutex_unlock(&wp->lock);
722 for (wp = c->write_points;
723 wp < c->write_points + c->write_points_nr; wp++)
724 if (!oldest || time_before64(wp->last_used, oldest->last_used))
727 mutex_lock(&oldest->lock);
728 mutex_lock(&c->write_points_hash_lock);
729 if (oldest >= c->write_points + c->write_points_nr ||
730 try_increase_writepoints(c)) {
731 mutex_unlock(&c->write_points_hash_lock);
732 mutex_unlock(&oldest->lock);
733 goto restart_find_oldest;
736 wp = __writepoint_find(head, write_point);
737 if (wp && wp != oldest) {
738 mutex_unlock(&c->write_points_hash_lock);
739 mutex_unlock(&oldest->lock);
744 hlist_del_rcu(&wp->node);
745 wp->write_point = write_point;
746 hlist_add_head_rcu(&wp->node, head);
747 mutex_unlock(&c->write_points_hash_lock);
749 wp->last_used = sched_clock();
754 * Get us an open_bucket we can allocate from, return with it locked:
756 struct write_point *bch2_alloc_sectors_start(struct bch_fs *c,
758 unsigned erasure_code,
759 struct write_point_specifier write_point,
760 struct bch_devs_list *devs_have,
761 unsigned nr_replicas,
762 unsigned nr_replicas_required,
763 enum alloc_reserve reserve,
767 struct write_point *wp;
768 struct open_bucket *ob;
769 struct open_buckets ptrs;
770 unsigned nr_effective, write_points_nr;
771 unsigned ob_flags = 0;
776 if (!(flags & BCH_WRITE_ONLY_SPECIFIED_DEVS))
777 ob_flags |= BUCKET_ALLOC_USE_DURABILITY;
779 BUG_ON(!nr_replicas || !nr_replicas_required);
783 write_points_nr = c->write_points_nr;
786 wp = writepoint_find(c, write_point.v);
788 if (wp->type == BCH_DATA_user)
789 ob_flags |= BUCKET_MAY_ALLOC_PARTIAL;
791 /* metadata may not allocate on cache devices: */
792 if (wp->type != BCH_DATA_user)
795 if (!target || (flags & BCH_WRITE_ONLY_SPECIFIED_DEVS)) {
796 ret = open_bucket_add_buckets(c, &ptrs, wp, devs_have,
797 target, erasure_code,
798 nr_replicas, &nr_effective,
799 &have_cache, reserve,
802 ret = open_bucket_add_buckets(c, &ptrs, wp, devs_have,
803 target, erasure_code,
804 nr_replicas, &nr_effective,
805 &have_cache, reserve,
810 ret = open_bucket_add_buckets(c, &ptrs, wp, devs_have,
812 nr_replicas, &nr_effective,
813 &have_cache, reserve,
817 BUG_ON(!ret && nr_effective < nr_replicas);
819 if (erasure_code && !ec_open_bucket(c, &ptrs))
820 pr_debug("failed to get ec bucket: ret %u", ret);
822 if (ret == -INSUFFICIENT_DEVICES &&
823 nr_effective >= nr_replicas_required)
829 /* Free buckets we didn't use: */
830 open_bucket_for_each(c, &wp->ptrs, ob, i)
831 open_bucket_free_unused(c, wp, ob);
835 wp->sectors_free = UINT_MAX;
837 open_bucket_for_each(c, &wp->ptrs, ob, i)
838 wp->sectors_free = min(wp->sectors_free, ob->sectors_free);
840 BUG_ON(!wp->sectors_free || wp->sectors_free == UINT_MAX);
842 verify_not_stale(c, &wp->ptrs);
846 open_bucket_for_each(c, &wp->ptrs, ob, i)
847 if (ptrs.nr < ARRAY_SIZE(ptrs.v))
848 ob_push(c, &ptrs, ob);
850 open_bucket_free_unused(c, wp, ob);
853 mutex_unlock(&wp->lock);
855 if (ret == -FREELIST_EMPTY &&
856 try_decrease_writepoints(c, write_points_nr))
860 case -OPEN_BUCKETS_EMPTY:
861 case -FREELIST_EMPTY:
862 return cl ? ERR_PTR(-EAGAIN) : ERR_PTR(-ENOSPC);
863 case -INSUFFICIENT_DEVICES:
864 return ERR_PTR(-EROFS);
871 * Append pointers to the space we just allocated to @k, and mark @sectors space
872 * as allocated out of @ob
874 void bch2_alloc_sectors_append_ptrs(struct bch_fs *c, struct write_point *wp,
875 struct bkey_i *k, unsigned sectors)
878 struct open_bucket *ob;
881 BUG_ON(sectors > wp->sectors_free);
882 wp->sectors_free -= sectors;
884 open_bucket_for_each(c, &wp->ptrs, ob, i) {
885 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
886 struct bch_extent_ptr tmp = ob->ptr;
888 tmp.cached = !ca->mi.durability &&
889 wp->type == BCH_DATA_user;
891 tmp.offset += ca->mi.bucket_size - ob->sectors_free;
892 bch2_bkey_append_ptr(k, tmp);
894 BUG_ON(sectors > ob->sectors_free);
895 ob->sectors_free -= sectors;
900 * Append pointers to the space we just allocated to @k, and mark @sectors space
901 * as allocated out of @ob
903 void bch2_alloc_sectors_done(struct bch_fs *c, struct write_point *wp)
905 struct open_buckets ptrs = { .nr = 0 }, keep = { .nr = 0 };
906 struct open_bucket *ob;
909 open_bucket_for_each(c, &wp->ptrs, ob, i)
910 ob_push(c, !ob->sectors_free ? &ptrs : &keep, ob);
913 mutex_unlock(&wp->lock);
915 bch2_open_buckets_put(c, &ptrs);
918 static inline void writepoint_init(struct write_point *wp,
919 enum bch_data_type type)
921 mutex_init(&wp->lock);
925 void bch2_fs_allocator_foreground_init(struct bch_fs *c)
927 struct open_bucket *ob;
928 struct write_point *wp;
930 mutex_init(&c->write_points_hash_lock);
931 c->write_points_nr = ARRAY_SIZE(c->write_points);
933 /* open bucket 0 is a sentinal NULL: */
934 spin_lock_init(&c->open_buckets[0].lock);
936 for (ob = c->open_buckets + 1;
937 ob < c->open_buckets + ARRAY_SIZE(c->open_buckets); ob++) {
938 spin_lock_init(&ob->lock);
939 c->open_buckets_nr_free++;
941 ob->freelist = c->open_buckets_freelist;
942 c->open_buckets_freelist = ob - c->open_buckets;
945 writepoint_init(&c->btree_write_point, BCH_DATA_btree);
946 writepoint_init(&c->rebalance_write_point, BCH_DATA_user);
947 writepoint_init(&c->copygc_write_point, BCH_DATA_user);
949 for (wp = c->write_points;
950 wp < c->write_points + c->write_points_nr; wp++) {
951 writepoint_init(wp, BCH_DATA_user);
953 wp->last_used = sched_clock();
954 wp->write_point = (unsigned long) wp;
955 hlist_add_head_rcu(&wp->node,
956 writepoint_hash(c, wp->write_point));