1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright 2012 Google, Inc.
5 * Foreground allocator code: allocate buckets from freelist, and allocate in
6 * sector granularity from writepoints.
8 * bch2_bucket_alloc() allocates a single bucket from a specific device.
10 * bch2_bucket_alloc_set() allocates one or more buckets from different devices
11 * in a given filesystem.
15 #include "alloc_background.h"
16 #include "alloc_foreground.h"
21 #include "disk_groups.h"
25 #include <linux/math64.h>
26 #include <linux/rculist.h>
27 #include <linux/rcupdate.h>
28 #include <trace/events/bcachefs.h>
31 * Open buckets represent a bucket that's currently being allocated from. They
34 * - They track buckets that have been partially allocated, allowing for
35 * sub-bucket sized allocations - they're used by the sector allocator below
37 * - They provide a reference to the buckets they own that mark and sweep GC
38 * can find, until the new allocation has a pointer to it inserted into the
41 * When allocating some space with the sector allocator, the allocation comes
42 * with a reference to an open bucket - the caller is required to put that
43 * reference _after_ doing the index update that makes its allocation reachable.
46 void __bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob)
48 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
51 bch2_ec_bucket_written(c, ob);
55 percpu_down_read(&c->mark_lock);
58 bch2_mark_alloc_bucket(c, ca, PTR_BUCKET_NR(ca, &ob->ptr), false);
62 spin_unlock(&ob->lock);
63 percpu_up_read(&c->mark_lock);
65 spin_lock(&c->freelist_lock);
66 ob->freelist = c->open_buckets_freelist;
67 c->open_buckets_freelist = ob - c->open_buckets;
69 c->open_buckets_nr_free++;
70 ca->nr_open_buckets--;
71 spin_unlock(&c->freelist_lock);
73 closure_wake_up(&c->open_buckets_wait);
76 void bch2_open_bucket_write_error(struct bch_fs *c,
77 struct open_buckets *obs,
80 struct open_bucket *ob;
83 open_bucket_for_each(c, obs, ob, i)
84 if (ob->ptr.dev == dev &&
86 bch2_ec_bucket_cancel(c, ob);
89 static struct open_bucket *bch2_open_bucket_alloc(struct bch_fs *c)
91 struct open_bucket *ob;
93 BUG_ON(!c->open_buckets_freelist || !c->open_buckets_nr_free);
95 ob = c->open_buckets + c->open_buckets_freelist;
96 c->open_buckets_freelist = ob->freelist;
97 atomic_set(&ob->pin, 1);
100 c->open_buckets_nr_free--;
104 static void open_bucket_free_unused(struct bch_fs *c,
105 struct write_point *wp,
106 struct open_bucket *ob)
108 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
109 bool may_realloc = wp->type == BCH_DATA_user;
111 BUG_ON(ca->open_buckets_partial_nr >
112 ARRAY_SIZE(ca->open_buckets_partial));
114 if (ca->open_buckets_partial_nr <
115 ARRAY_SIZE(ca->open_buckets_partial) &&
117 spin_lock(&c->freelist_lock);
118 ob->on_partial_list = true;
119 ca->open_buckets_partial[ca->open_buckets_partial_nr++] =
120 ob - c->open_buckets;
121 spin_unlock(&c->freelist_lock);
123 closure_wake_up(&c->open_buckets_wait);
124 closure_wake_up(&c->freelist_wait);
126 bch2_open_bucket_put(c, ob);
130 static void verify_not_stale(struct bch_fs *c, const struct open_buckets *obs)
132 #ifdef CONFIG_BCACHEFS_DEBUG
133 struct open_bucket *ob;
136 open_bucket_for_each(c, obs, ob, i) {
137 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
139 BUG_ON(ptr_stale(ca, &ob->ptr));
144 /* _only_ for allocating the journal on a new device: */
145 long bch2_bucket_alloc_new_fs(struct bch_dev *ca)
147 struct bucket_array *buckets;
151 buckets = bucket_array(ca);
153 for (b = buckets->first_bucket; b < buckets->nbuckets; b++)
154 if (is_available_bucket(buckets->b[b].mark) &&
155 !buckets->b[b].mark.owned_by_allocator)
163 static inline unsigned open_buckets_reserved(enum alloc_reserve reserve)
167 case RESERVE_BTREE_MOVINGGC:
169 case RESERVE_MOVINGGC:
170 return OPEN_BUCKETS_COUNT / 4;
172 return OPEN_BUCKETS_COUNT / 2;
177 * bch_bucket_alloc - allocate a single bucket from a specific device
179 * Returns index of bucket on success, 0 on failure
181 struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca,
182 enum alloc_reserve reserve,
183 bool may_alloc_partial,
186 struct open_bucket *ob;
189 spin_lock(&c->freelist_lock);
191 if (may_alloc_partial) {
194 for (i = ca->open_buckets_partial_nr - 1; i >= 0; --i) {
195 ob = c->open_buckets + ca->open_buckets_partial[i];
197 if (reserve <= ob->alloc_reserve) {
198 array_remove_item(ca->open_buckets_partial,
199 ca->open_buckets_partial_nr,
201 ob->on_partial_list = false;
202 ob->alloc_reserve = reserve;
203 spin_unlock(&c->freelist_lock);
209 if (unlikely(c->open_buckets_nr_free <= open_buckets_reserved(reserve))) {
211 closure_wait(&c->open_buckets_wait, cl);
213 if (!c->blocked_allocate_open_bucket)
214 c->blocked_allocate_open_bucket = local_clock();
216 spin_unlock(&c->freelist_lock);
217 trace_open_bucket_alloc_fail(ca, reserve);
218 return ERR_PTR(-OPEN_BUCKETS_EMPTY);
221 if (likely(fifo_pop(&ca->free[RESERVE_NONE], b)))
225 case RESERVE_BTREE_MOVINGGC:
226 case RESERVE_MOVINGGC:
227 if (fifo_pop(&ca->free[RESERVE_MOVINGGC], b))
235 closure_wait(&c->freelist_wait, cl);
237 if (!c->blocked_allocate)
238 c->blocked_allocate = local_clock();
240 spin_unlock(&c->freelist_lock);
242 trace_bucket_alloc_fail(ca, reserve);
243 return ERR_PTR(-FREELIST_EMPTY);
245 verify_not_on_freelist(c, ca, b);
247 ob = bch2_open_bucket_alloc(c);
249 spin_lock(&ob->lock);
252 ob->sectors_free = ca->mi.bucket_size;
253 ob->alloc_reserve = reserve;
254 ob->ptr = (struct bch_extent_ptr) {
255 .type = 1 << BCH_EXTENT_ENTRY_ptr,
256 .gen = bucket(ca, b)->mark.gen,
257 .offset = bucket_to_sector(ca, b),
261 spin_unlock(&ob->lock);
263 if (c->blocked_allocate_open_bucket) {
264 bch2_time_stats_update(
265 &c->times[BCH_TIME_blocked_allocate_open_bucket],
266 c->blocked_allocate_open_bucket);
267 c->blocked_allocate_open_bucket = 0;
270 if (c->blocked_allocate) {
271 bch2_time_stats_update(
272 &c->times[BCH_TIME_blocked_allocate],
273 c->blocked_allocate);
274 c->blocked_allocate = 0;
277 ca->nr_open_buckets++;
278 spin_unlock(&c->freelist_lock);
280 bch2_wake_allocator(ca);
282 trace_bucket_alloc(ca, reserve);
286 static int __dev_stripe_cmp(struct dev_stripe_state *stripe,
287 unsigned l, unsigned r)
289 return ((stripe->next_alloc[l] > stripe->next_alloc[r]) -
290 (stripe->next_alloc[l] < stripe->next_alloc[r]));
293 #define dev_stripe_cmp(l, r) __dev_stripe_cmp(stripe, l, r)
295 struct dev_alloc_list bch2_dev_alloc_list(struct bch_fs *c,
296 struct dev_stripe_state *stripe,
297 struct bch_devs_mask *devs)
299 struct dev_alloc_list ret = { .nr = 0 };
302 for_each_set_bit(i, devs->d, BCH_SB_MEMBERS_MAX)
303 ret.devs[ret.nr++] = i;
305 bubble_sort(ret.devs, ret.nr, dev_stripe_cmp);
309 void bch2_dev_stripe_increment(struct bch_dev *ca,
310 struct dev_stripe_state *stripe)
312 u64 *v = stripe->next_alloc + ca->dev_idx;
313 u64 free_space = dev_buckets_available(ca);
314 u64 free_space_inv = free_space
315 ? div64_u64(1ULL << 48, free_space)
319 if (*v + free_space_inv >= *v)
320 *v += free_space_inv;
324 for (v = stripe->next_alloc;
325 v < stripe->next_alloc + ARRAY_SIZE(stripe->next_alloc); v++)
326 *v = *v < scale ? 0 : *v - scale;
329 #define BUCKET_MAY_ALLOC_PARTIAL (1 << 0)
330 #define BUCKET_ALLOC_USE_DURABILITY (1 << 1)
332 static void add_new_bucket(struct bch_fs *c,
333 struct open_buckets *ptrs,
334 struct bch_devs_mask *devs_may_alloc,
335 unsigned *nr_effective,
338 struct open_bucket *ob)
340 unsigned durability =
341 bch_dev_bkey_exists(c, ob->ptr.dev)->mi.durability;
343 __clear_bit(ob->ptr.dev, devs_may_alloc->d);
344 *nr_effective += (flags & BUCKET_ALLOC_USE_DURABILITY)
346 *have_cache |= !durability;
348 ob_push(c, ptrs, ob);
351 int bch2_bucket_alloc_set(struct bch_fs *c,
352 struct open_buckets *ptrs,
353 struct dev_stripe_state *stripe,
354 struct bch_devs_mask *devs_may_alloc,
355 unsigned nr_replicas,
356 unsigned *nr_effective,
358 enum alloc_reserve reserve,
362 struct dev_alloc_list devs_sorted =
363 bch2_dev_alloc_list(c, stripe, devs_may_alloc);
365 int ret = -INSUFFICIENT_DEVICES;
368 BUG_ON(*nr_effective >= nr_replicas);
370 for (i = 0; i < devs_sorted.nr; i++) {
371 struct open_bucket *ob;
373 ca = rcu_dereference(c->devs[devs_sorted.devs[i]]);
377 if (!ca->mi.durability && *have_cache)
380 ob = bch2_bucket_alloc(c, ca, reserve,
381 flags & BUCKET_MAY_ALLOC_PARTIAL, cl);
390 add_new_bucket(c, ptrs, devs_may_alloc,
391 nr_effective, have_cache, flags, ob);
393 bch2_dev_stripe_increment(ca, stripe);
395 if (*nr_effective >= nr_replicas)
402 /* Allocate from stripes: */
405 * if we can't allocate a new stripe because there are already too many
406 * partially filled stripes, force allocating from an existing stripe even when
407 * it's to a device we don't want:
410 static int bucket_alloc_from_stripe(struct bch_fs *c,
411 struct open_buckets *ptrs,
412 struct write_point *wp,
413 struct bch_devs_mask *devs_may_alloc,
415 unsigned erasure_code,
416 unsigned nr_replicas,
417 unsigned *nr_effective,
422 struct dev_alloc_list devs_sorted;
423 struct ec_stripe_head *h;
424 struct open_bucket *ob;
434 if (ec_open_bucket(c, ptrs))
437 h = bch2_ec_stripe_head_get(c, target, 0, nr_replicas - 1,
438 wp == &c->copygc_write_point,
445 devs_sorted = bch2_dev_alloc_list(c, &wp->stripe, devs_may_alloc);
447 for (i = 0; i < devs_sorted.nr; i++)
448 for (ec_idx = 0; ec_idx < h->s->nr_data; ec_idx++) {
449 if (!h->s->blocks[ec_idx])
452 ob = c->open_buckets + h->s->blocks[ec_idx];
453 if (ob->ptr.dev == devs_sorted.devs[i] &&
454 !test_and_set_bit(ec_idx, h->s->blocks_allocated))
459 ca = bch_dev_bkey_exists(c, ob->ptr.dev);
464 add_new_bucket(c, ptrs, devs_may_alloc,
465 nr_effective, have_cache, flags, ob);
466 atomic_inc(&h->s->pin);
468 bch2_ec_stripe_head_put(c, h);
472 /* Sector allocator */
474 static void get_buckets_from_writepoint(struct bch_fs *c,
475 struct open_buckets *ptrs,
476 struct write_point *wp,
477 struct bch_devs_mask *devs_may_alloc,
478 unsigned nr_replicas,
479 unsigned *nr_effective,
484 struct open_buckets ptrs_skip = { .nr = 0 };
485 struct open_bucket *ob;
488 open_bucket_for_each(c, &wp->ptrs, ob, i) {
489 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
491 if (*nr_effective < nr_replicas &&
492 test_bit(ob->ptr.dev, devs_may_alloc->d) &&
493 (ca->mi.durability ||
494 (wp->type == BCH_DATA_user && !*have_cache)) &&
495 (ob->ec || !need_ec)) {
496 add_new_bucket(c, ptrs, devs_may_alloc,
497 nr_effective, have_cache,
500 ob_push(c, &ptrs_skip, ob);
503 wp->ptrs = ptrs_skip;
506 static int open_bucket_add_buckets(struct bch_fs *c,
507 struct open_buckets *ptrs,
508 struct write_point *wp,
509 struct bch_devs_list *devs_have,
511 unsigned erasure_code,
512 unsigned nr_replicas,
513 unsigned *nr_effective,
515 enum alloc_reserve reserve,
519 struct bch_devs_mask devs;
520 struct open_bucket *ob;
521 struct closure *cl = NULL;
526 devs = target_rw_devs(c, wp->type, target);
529 /* Don't allocate from devices we already have pointers to: */
530 for (i = 0; i < devs_have->nr; i++)
531 __clear_bit(devs_have->devs[i], devs.d);
533 open_bucket_for_each(c, ptrs, ob, i)
534 __clear_bit(ob->ptr.dev, devs.d);
537 if (!ec_open_bucket(c, ptrs)) {
538 get_buckets_from_writepoint(c, ptrs, wp, &devs,
539 nr_replicas, nr_effective,
540 have_cache, flags, true);
541 if (*nr_effective >= nr_replicas)
545 if (!ec_open_bucket(c, ptrs)) {
546 ret = bucket_alloc_from_stripe(c, ptrs, wp, &devs,
547 target, erasure_code,
548 nr_replicas, nr_effective,
549 have_cache, flags, _cl);
550 if (ret == -FREELIST_EMPTY ||
551 ret == -OPEN_BUCKETS_EMPTY)
553 if (*nr_effective >= nr_replicas)
558 get_buckets_from_writepoint(c, ptrs, wp, &devs,
559 nr_replicas, nr_effective,
560 have_cache, flags, false);
561 if (*nr_effective >= nr_replicas)
564 percpu_down_read(&c->mark_lock);
569 * Try nonblocking first, so that if one device is full we'll try from
572 ret = bch2_bucket_alloc_set(c, ptrs, &wp->stripe, &devs,
573 nr_replicas, nr_effective, have_cache,
575 if (ret && ret != -INSUFFICIENT_DEVICES && !cl && _cl) {
581 percpu_up_read(&c->mark_lock);
586 void bch2_open_buckets_stop_dev(struct bch_fs *c, struct bch_dev *ca,
587 struct open_buckets *obs)
589 struct open_buckets ptrs = { .nr = 0 };
590 struct open_bucket *ob, *ob2;
593 open_bucket_for_each(c, obs, ob, i) {
594 bool drop = !ca || ob->ptr.dev == ca->dev_idx;
596 if (!drop && ob->ec) {
597 mutex_lock(&ob->ec->lock);
598 for (j = 0; j < ob->ec->new_stripe.key.v.nr_blocks; j++) {
599 if (!ob->ec->blocks[j])
602 ob2 = c->open_buckets + ob->ec->blocks[j];
603 drop |= ob2->ptr.dev == ca->dev_idx;
605 mutex_unlock(&ob->ec->lock);
609 bch2_open_bucket_put(c, ob);
611 ob_push(c, &ptrs, ob);
617 void bch2_writepoint_stop(struct bch_fs *c, struct bch_dev *ca,
618 struct write_point *wp)
620 mutex_lock(&wp->lock);
621 bch2_open_buckets_stop_dev(c, ca, &wp->ptrs);
622 mutex_unlock(&wp->lock);
625 static inline struct hlist_head *writepoint_hash(struct bch_fs *c,
626 unsigned long write_point)
629 hash_long(write_point, ilog2(ARRAY_SIZE(c->write_points_hash)));
631 return &c->write_points_hash[hash];
634 static struct write_point *__writepoint_find(struct hlist_head *head,
635 unsigned long write_point)
637 struct write_point *wp;
640 hlist_for_each_entry_rcu(wp, head, node)
641 if (wp->write_point == write_point)
649 static inline bool too_many_writepoints(struct bch_fs *c, unsigned factor)
651 u64 stranded = c->write_points_nr * c->bucket_size_max;
652 u64 free = bch2_fs_usage_read_short(c).free;
654 return stranded * factor > free;
657 static bool try_increase_writepoints(struct bch_fs *c)
659 struct write_point *wp;
661 if (c->write_points_nr == ARRAY_SIZE(c->write_points) ||
662 too_many_writepoints(c, 32))
665 wp = c->write_points + c->write_points_nr++;
666 hlist_add_head_rcu(&wp->node, writepoint_hash(c, wp->write_point));
670 static bool try_decrease_writepoints(struct bch_fs *c,
673 struct write_point *wp;
675 mutex_lock(&c->write_points_hash_lock);
676 if (c->write_points_nr < old_nr) {
677 mutex_unlock(&c->write_points_hash_lock);
681 if (c->write_points_nr == 1 ||
682 !too_many_writepoints(c, 8)) {
683 mutex_unlock(&c->write_points_hash_lock);
687 wp = c->write_points + --c->write_points_nr;
689 hlist_del_rcu(&wp->node);
690 mutex_unlock(&c->write_points_hash_lock);
692 bch2_writepoint_stop(c, NULL, wp);
696 static struct write_point *writepoint_find(struct bch_fs *c,
697 unsigned long write_point)
699 struct write_point *wp, *oldest;
700 struct hlist_head *head;
702 if (!(write_point & 1UL)) {
703 wp = (struct write_point *) write_point;
704 mutex_lock(&wp->lock);
708 head = writepoint_hash(c, write_point);
710 wp = __writepoint_find(head, write_point);
713 mutex_lock(&wp->lock);
714 if (wp->write_point == write_point)
716 mutex_unlock(&wp->lock);
721 for (wp = c->write_points;
722 wp < c->write_points + c->write_points_nr; wp++)
723 if (!oldest || time_before64(wp->last_used, oldest->last_used))
726 mutex_lock(&oldest->lock);
727 mutex_lock(&c->write_points_hash_lock);
728 if (oldest >= c->write_points + c->write_points_nr ||
729 try_increase_writepoints(c)) {
730 mutex_unlock(&c->write_points_hash_lock);
731 mutex_unlock(&oldest->lock);
732 goto restart_find_oldest;
735 wp = __writepoint_find(head, write_point);
736 if (wp && wp != oldest) {
737 mutex_unlock(&c->write_points_hash_lock);
738 mutex_unlock(&oldest->lock);
743 hlist_del_rcu(&wp->node);
744 wp->write_point = write_point;
745 hlist_add_head_rcu(&wp->node, head);
746 mutex_unlock(&c->write_points_hash_lock);
748 wp->last_used = sched_clock();
753 * Get us an open_bucket we can allocate from, return with it locked:
755 struct write_point *bch2_alloc_sectors_start(struct bch_fs *c,
757 unsigned erasure_code,
758 struct write_point_specifier write_point,
759 struct bch_devs_list *devs_have,
760 unsigned nr_replicas,
761 unsigned nr_replicas_required,
762 enum alloc_reserve reserve,
766 struct write_point *wp;
767 struct open_bucket *ob;
768 struct open_buckets ptrs;
769 unsigned nr_effective, write_points_nr;
770 unsigned ob_flags = 0;
775 if (!(flags & BCH_WRITE_ONLY_SPECIFIED_DEVS))
776 ob_flags |= BUCKET_ALLOC_USE_DURABILITY;
778 BUG_ON(!nr_replicas || !nr_replicas_required);
782 write_points_nr = c->write_points_nr;
785 wp = writepoint_find(c, write_point.v);
787 if (wp->type == BCH_DATA_user)
788 ob_flags |= BUCKET_MAY_ALLOC_PARTIAL;
790 /* metadata may not allocate on cache devices: */
791 if (wp->type != BCH_DATA_user)
794 if (!target || (flags & BCH_WRITE_ONLY_SPECIFIED_DEVS)) {
795 ret = open_bucket_add_buckets(c, &ptrs, wp, devs_have,
796 target, erasure_code,
797 nr_replicas, &nr_effective,
798 &have_cache, reserve,
801 ret = open_bucket_add_buckets(c, &ptrs, wp, devs_have,
802 target, erasure_code,
803 nr_replicas, &nr_effective,
804 &have_cache, reserve,
809 ret = open_bucket_add_buckets(c, &ptrs, wp, devs_have,
811 nr_replicas, &nr_effective,
812 &have_cache, reserve,
816 BUG_ON(!ret && nr_effective < nr_replicas);
818 if (erasure_code && !ec_open_bucket(c, &ptrs))
819 pr_debug("failed to get ec bucket: ret %u", ret);
821 if (ret == -INSUFFICIENT_DEVICES &&
822 nr_effective >= nr_replicas_required)
828 /* Free buckets we didn't use: */
829 open_bucket_for_each(c, &wp->ptrs, ob, i)
830 open_bucket_free_unused(c, wp, ob);
834 wp->sectors_free = UINT_MAX;
836 open_bucket_for_each(c, &wp->ptrs, ob, i)
837 wp->sectors_free = min(wp->sectors_free, ob->sectors_free);
839 BUG_ON(!wp->sectors_free || wp->sectors_free == UINT_MAX);
841 verify_not_stale(c, &wp->ptrs);
845 open_bucket_for_each(c, &wp->ptrs, ob, i)
846 if (ptrs.nr < ARRAY_SIZE(ptrs.v))
847 ob_push(c, &ptrs, ob);
849 open_bucket_free_unused(c, wp, ob);
852 mutex_unlock(&wp->lock);
854 if (ret == -FREELIST_EMPTY &&
855 try_decrease_writepoints(c, write_points_nr))
859 case -OPEN_BUCKETS_EMPTY:
860 case -FREELIST_EMPTY:
861 return cl ? ERR_PTR(-EAGAIN) : ERR_PTR(-ENOSPC);
862 case -INSUFFICIENT_DEVICES:
863 return ERR_PTR(-EROFS);
870 * Append pointers to the space we just allocated to @k, and mark @sectors space
871 * as allocated out of @ob
873 void bch2_alloc_sectors_append_ptrs(struct bch_fs *c, struct write_point *wp,
874 struct bkey_i *k, unsigned sectors)
877 struct open_bucket *ob;
880 BUG_ON(sectors > wp->sectors_free);
881 wp->sectors_free -= sectors;
883 open_bucket_for_each(c, &wp->ptrs, ob, i) {
884 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
885 struct bch_extent_ptr tmp = ob->ptr;
887 tmp.cached = !ca->mi.durability &&
888 wp->type == BCH_DATA_user;
890 tmp.offset += ca->mi.bucket_size - ob->sectors_free;
891 bch2_bkey_append_ptr(k, tmp);
893 BUG_ON(sectors > ob->sectors_free);
894 ob->sectors_free -= sectors;
899 * Append pointers to the space we just allocated to @k, and mark @sectors space
900 * as allocated out of @ob
902 void bch2_alloc_sectors_done(struct bch_fs *c, struct write_point *wp)
904 struct open_buckets ptrs = { .nr = 0 }, keep = { .nr = 0 };
905 struct open_bucket *ob;
908 open_bucket_for_each(c, &wp->ptrs, ob, i)
909 ob_push(c, !ob->sectors_free ? &ptrs : &keep, ob);
912 mutex_unlock(&wp->lock);
914 bch2_open_buckets_put(c, &ptrs);
917 static inline void writepoint_init(struct write_point *wp,
918 enum bch_data_type type)
920 mutex_init(&wp->lock);
924 void bch2_fs_allocator_foreground_init(struct bch_fs *c)
926 struct open_bucket *ob;
927 struct write_point *wp;
929 mutex_init(&c->write_points_hash_lock);
930 c->write_points_nr = ARRAY_SIZE(c->write_points);
932 /* open bucket 0 is a sentinal NULL: */
933 spin_lock_init(&c->open_buckets[0].lock);
935 for (ob = c->open_buckets + 1;
936 ob < c->open_buckets + ARRAY_SIZE(c->open_buckets); ob++) {
937 spin_lock_init(&ob->lock);
938 c->open_buckets_nr_free++;
940 ob->freelist = c->open_buckets_freelist;
941 c->open_buckets_freelist = ob - c->open_buckets;
944 writepoint_init(&c->btree_write_point, BCH_DATA_btree);
945 writepoint_init(&c->rebalance_write_point, BCH_DATA_user);
946 writepoint_init(&c->copygc_write_point, BCH_DATA_user);
948 for (wp = c->write_points;
949 wp < c->write_points + c->write_points_nr; wp++) {
950 writepoint_init(wp, BCH_DATA_user);
952 wp->last_used = sched_clock();
953 wp->write_point = (unsigned long) wp;
954 hlist_add_head_rcu(&wp->node,
955 writepoint_hash(c, wp->write_point));