1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright 2012 Google, Inc.
5 * Foreground allocator code: allocate buckets from freelist, and allocate in
6 * sector granularity from writepoints.
8 * bch2_bucket_alloc() allocates a single bucket from a specific device.
10 * bch2_bucket_alloc_set() allocates one or more buckets from different devices
11 * in a given filesystem.
15 #include "alloc_background.h"
16 #include "alloc_foreground.h"
21 #include "disk_groups.h"
25 #include <linux/math64.h>
26 #include <linux/rculist.h>
27 #include <linux/rcupdate.h>
28 #include <trace/events/bcachefs.h>
31 * Open buckets represent a bucket that's currently being allocated from. They
34 * - They track buckets that have been partially allocated, allowing for
35 * sub-bucket sized allocations - they're used by the sector allocator below
37 * - They provide a reference to the buckets they own that mark and sweep GC
38 * can find, until the new allocation has a pointer to it inserted into the
41 * When allocating some space with the sector allocator, the allocation comes
42 * with a reference to an open bucket - the caller is required to put that
43 * reference _after_ doing the index update that makes its allocation reachable.
46 void __bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob)
48 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
51 bch2_ec_bucket_written(c, ob);
55 percpu_down_read(&c->mark_lock);
58 bch2_mark_alloc_bucket(c, ca, PTR_BUCKET_NR(ca, &ob->ptr), false);
62 spin_unlock(&ob->lock);
63 percpu_up_read(&c->mark_lock);
65 spin_lock(&c->freelist_lock);
66 ob->freelist = c->open_buckets_freelist;
67 c->open_buckets_freelist = ob - c->open_buckets;
69 c->open_buckets_nr_free++;
70 ca->nr_open_buckets--;
71 spin_unlock(&c->freelist_lock);
73 closure_wake_up(&c->open_buckets_wait);
76 void bch2_open_bucket_write_error(struct bch_fs *c,
77 struct open_buckets *obs,
80 struct open_bucket *ob;
83 open_bucket_for_each(c, obs, ob, i)
84 if (ob->ptr.dev == dev &&
86 bch2_ec_bucket_cancel(c, ob);
89 static struct open_bucket *bch2_open_bucket_alloc(struct bch_fs *c)
91 struct open_bucket *ob;
93 BUG_ON(!c->open_buckets_freelist || !c->open_buckets_nr_free);
95 ob = c->open_buckets + c->open_buckets_freelist;
96 c->open_buckets_freelist = ob->freelist;
97 atomic_set(&ob->pin, 1);
100 c->open_buckets_nr_free--;
104 static void open_bucket_free_unused(struct bch_fs *c,
105 struct write_point *wp,
106 struct open_bucket *ob)
108 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
109 bool may_realloc = wp->type == BCH_DATA_user;
111 BUG_ON(ca->open_buckets_partial_nr >
112 ARRAY_SIZE(ca->open_buckets_partial));
114 if (ca->open_buckets_partial_nr <
115 ARRAY_SIZE(ca->open_buckets_partial) &&
117 spin_lock(&c->freelist_lock);
118 ob->on_partial_list = true;
119 ca->open_buckets_partial[ca->open_buckets_partial_nr++] =
120 ob - c->open_buckets;
121 spin_unlock(&c->freelist_lock);
123 closure_wake_up(&c->open_buckets_wait);
124 closure_wake_up(&c->freelist_wait);
126 bch2_open_bucket_put(c, ob);
130 static void verify_not_stale(struct bch_fs *c, const struct open_buckets *obs)
132 #ifdef CONFIG_BCACHEFS_DEBUG
133 struct open_bucket *ob;
136 open_bucket_for_each(c, obs, ob, i) {
137 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
139 BUG_ON(ptr_stale(ca, &ob->ptr));
144 /* _only_ for allocating the journal on a new device: */
145 long bch2_bucket_alloc_new_fs(struct bch_dev *ca)
147 struct bucket_array *buckets;
151 buckets = bucket_array(ca);
153 for (b = buckets->first_bucket; b < buckets->nbuckets; b++)
154 if (is_available_bucket(buckets->b[b].mark) &&
155 !buckets->b[b].mark.owned_by_allocator)
163 static inline unsigned open_buckets_reserved(enum alloc_reserve reserve)
167 case RESERVE_BTREE_MOVINGGC:
169 case RESERVE_MOVINGGC:
170 return OPEN_BUCKETS_COUNT / 4;
172 return OPEN_BUCKETS_COUNT / 2;
177 * bch_bucket_alloc - allocate a single bucket from a specific device
179 * Returns index of bucket on success, 0 on failure
181 struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca,
182 enum alloc_reserve reserve,
183 bool may_alloc_partial,
186 struct open_bucket *ob;
189 spin_lock(&c->freelist_lock);
191 if (may_alloc_partial) {
194 for (i = ca->open_buckets_partial_nr - 1; i >= 0; --i) {
195 ob = c->open_buckets + ca->open_buckets_partial[i];
197 if (reserve <= ob->alloc_reserve) {
198 array_remove_item(ca->open_buckets_partial,
199 ca->open_buckets_partial_nr,
201 ob->on_partial_list = false;
202 ob->alloc_reserve = reserve;
203 spin_unlock(&c->freelist_lock);
209 if (unlikely(c->open_buckets_nr_free <= open_buckets_reserved(reserve))) {
211 closure_wait(&c->open_buckets_wait, cl);
213 if (!c->blocked_allocate_open_bucket)
214 c->blocked_allocate_open_bucket = local_clock();
216 spin_unlock(&c->freelist_lock);
217 trace_open_bucket_alloc_fail(ca, reserve);
218 return ERR_PTR(-OPEN_BUCKETS_EMPTY);
221 if (likely(fifo_pop(&ca->free[RESERVE_NONE], b)))
225 case RESERVE_BTREE_MOVINGGC:
226 case RESERVE_MOVINGGC:
227 if (fifo_pop(&ca->free[RESERVE_MOVINGGC], b))
235 closure_wait(&c->freelist_wait, cl);
237 if (!c->blocked_allocate)
238 c->blocked_allocate = local_clock();
240 spin_unlock(&c->freelist_lock);
242 trace_bucket_alloc_fail(ca, reserve);
243 return ERR_PTR(-FREELIST_EMPTY);
245 verify_not_on_freelist(c, ca, b);
247 ob = bch2_open_bucket_alloc(c);
249 spin_lock(&ob->lock);
252 ob->sectors_free = ca->mi.bucket_size;
253 ob->alloc_reserve = reserve;
254 ob->ptr = (struct bch_extent_ptr) {
255 .type = 1 << BCH_EXTENT_ENTRY_ptr,
256 .gen = bucket(ca, b)->mark.gen,
257 .offset = bucket_to_sector(ca, b),
261 spin_unlock(&ob->lock);
263 if (c->blocked_allocate_open_bucket) {
264 bch2_time_stats_update(
265 &c->times[BCH_TIME_blocked_allocate_open_bucket],
266 c->blocked_allocate_open_bucket);
267 c->blocked_allocate_open_bucket = 0;
270 if (c->blocked_allocate) {
271 bch2_time_stats_update(
272 &c->times[BCH_TIME_blocked_allocate],
273 c->blocked_allocate);
274 c->blocked_allocate = 0;
277 ca->nr_open_buckets++;
278 spin_unlock(&c->freelist_lock);
280 bch2_wake_allocator(ca);
282 trace_bucket_alloc(ca, reserve);
286 static int __dev_stripe_cmp(struct dev_stripe_state *stripe,
287 unsigned l, unsigned r)
289 return ((stripe->next_alloc[l] > stripe->next_alloc[r]) -
290 (stripe->next_alloc[l] < stripe->next_alloc[r]));
293 #define dev_stripe_cmp(l, r) __dev_stripe_cmp(stripe, l, r)
295 struct dev_alloc_list bch2_dev_alloc_list(struct bch_fs *c,
296 struct dev_stripe_state *stripe,
297 struct bch_devs_mask *devs)
299 struct dev_alloc_list ret = { .nr = 0 };
302 for_each_set_bit(i, devs->d, BCH_SB_MEMBERS_MAX)
303 ret.devs[ret.nr++] = i;
305 bubble_sort(ret.devs, ret.nr, dev_stripe_cmp);
309 void bch2_dev_stripe_increment(struct bch_dev *ca,
310 struct dev_stripe_state *stripe)
312 u64 *v = stripe->next_alloc + ca->dev_idx;
313 u64 free_space = dev_buckets_available(ca);
314 u64 free_space_inv = free_space
315 ? div64_u64(1ULL << 48, free_space)
319 if (*v + free_space_inv >= *v)
320 *v += free_space_inv;
324 for (v = stripe->next_alloc;
325 v < stripe->next_alloc + ARRAY_SIZE(stripe->next_alloc); v++)
326 *v = *v < scale ? 0 : *v - scale;
329 #define BUCKET_MAY_ALLOC_PARTIAL (1 << 0)
330 #define BUCKET_ALLOC_USE_DURABILITY (1 << 1)
332 static void add_new_bucket(struct bch_fs *c,
333 struct open_buckets *ptrs,
334 struct bch_devs_mask *devs_may_alloc,
335 unsigned *nr_effective,
338 struct open_bucket *ob)
340 unsigned durability =
341 bch_dev_bkey_exists(c, ob->ptr.dev)->mi.durability;
343 __clear_bit(ob->ptr.dev, devs_may_alloc->d);
344 *nr_effective += (flags & BUCKET_ALLOC_USE_DURABILITY)
346 *have_cache |= !durability;
348 ob_push(c, ptrs, ob);
351 enum bucket_alloc_ret
352 bch2_bucket_alloc_set(struct bch_fs *c,
353 struct open_buckets *ptrs,
354 struct dev_stripe_state *stripe,
355 struct bch_devs_mask *devs_may_alloc,
356 unsigned nr_replicas,
357 unsigned *nr_effective,
359 enum alloc_reserve reserve,
363 struct dev_alloc_list devs_sorted =
364 bch2_dev_alloc_list(c, stripe, devs_may_alloc);
366 enum bucket_alloc_ret ret = INSUFFICIENT_DEVICES;
369 BUG_ON(*nr_effective >= nr_replicas);
371 for (i = 0; i < devs_sorted.nr; i++) {
372 struct open_bucket *ob;
374 ca = rcu_dereference(c->devs[devs_sorted.devs[i]]);
378 if (!ca->mi.durability && *have_cache)
381 ob = bch2_bucket_alloc(c, ca, reserve,
382 flags & BUCKET_MAY_ALLOC_PARTIAL, cl);
391 add_new_bucket(c, ptrs, devs_may_alloc,
392 nr_effective, have_cache, flags, ob);
394 bch2_dev_stripe_increment(ca, stripe);
396 if (*nr_effective >= nr_replicas)
397 return ALLOC_SUCCESS;
403 /* Allocate from stripes: */
406 * if we can't allocate a new stripe because there are already too many
407 * partially filled stripes, force allocating from an existing stripe even when
408 * it's to a device we don't want:
411 static enum bucket_alloc_ret
412 bucket_alloc_from_stripe(struct bch_fs *c,
413 struct open_buckets *ptrs,
414 struct write_point *wp,
415 struct bch_devs_mask *devs_may_alloc,
417 unsigned erasure_code,
418 unsigned nr_replicas,
419 unsigned *nr_effective,
424 struct dev_alloc_list devs_sorted;
425 struct ec_stripe_head *h;
426 struct open_bucket *ob;
436 if (ec_open_bucket(c, ptrs))
439 h = bch2_ec_stripe_head_get(c, target, 0, nr_replicas - 1,
440 wp == &c->copygc_write_point,
447 devs_sorted = bch2_dev_alloc_list(c, &wp->stripe, devs_may_alloc);
449 for (i = 0; i < devs_sorted.nr; i++)
450 for (ec_idx = 0; ec_idx < h->s->nr_data; ec_idx++) {
451 if (!h->s->blocks[ec_idx])
454 ob = c->open_buckets + h->s->blocks[ec_idx];
455 if (ob->ptr.dev == devs_sorted.devs[i] &&
456 !test_and_set_bit(ec_idx, h->s->blocks_allocated))
461 ca = bch_dev_bkey_exists(c, ob->ptr.dev);
466 add_new_bucket(c, ptrs, devs_may_alloc,
467 nr_effective, have_cache, flags, ob);
468 atomic_inc(&h->s->pin);
470 bch2_ec_stripe_head_put(c, h);
474 /* Sector allocator */
476 static void get_buckets_from_writepoint(struct bch_fs *c,
477 struct open_buckets *ptrs,
478 struct write_point *wp,
479 struct bch_devs_mask *devs_may_alloc,
480 unsigned nr_replicas,
481 unsigned *nr_effective,
486 struct open_buckets ptrs_skip = { .nr = 0 };
487 struct open_bucket *ob;
490 open_bucket_for_each(c, &wp->ptrs, ob, i) {
491 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
493 if (*nr_effective < nr_replicas &&
494 test_bit(ob->ptr.dev, devs_may_alloc->d) &&
495 (ca->mi.durability ||
496 (wp->type == BCH_DATA_user && !*have_cache)) &&
497 (ob->ec || !need_ec)) {
498 add_new_bucket(c, ptrs, devs_may_alloc,
499 nr_effective, have_cache,
502 ob_push(c, &ptrs_skip, ob);
505 wp->ptrs = ptrs_skip;
508 static enum bucket_alloc_ret
509 open_bucket_add_buckets(struct bch_fs *c,
510 struct open_buckets *ptrs,
511 struct write_point *wp,
512 struct bch_devs_list *devs_have,
514 unsigned erasure_code,
515 unsigned nr_replicas,
516 unsigned *nr_effective,
518 enum alloc_reserve reserve,
522 struct bch_devs_mask devs;
523 struct open_bucket *ob;
524 struct closure *cl = NULL;
525 enum bucket_alloc_ret ret;
529 devs = target_rw_devs(c, wp->type, target);
532 /* Don't allocate from devices we already have pointers to: */
533 for (i = 0; i < devs_have->nr; i++)
534 __clear_bit(devs_have->devs[i], devs.d);
536 open_bucket_for_each(c, ptrs, ob, i)
537 __clear_bit(ob->ptr.dev, devs.d);
540 if (!ec_open_bucket(c, ptrs)) {
541 get_buckets_from_writepoint(c, ptrs, wp, &devs,
542 nr_replicas, nr_effective,
543 have_cache, flags, true);
544 if (*nr_effective >= nr_replicas)
548 if (!ec_open_bucket(c, ptrs)) {
549 ret = bucket_alloc_from_stripe(c, ptrs, wp, &devs,
550 target, erasure_code,
551 nr_replicas, nr_effective,
552 have_cache, flags, _cl);
553 if (ret == FREELIST_EMPTY ||
554 ret == OPEN_BUCKETS_EMPTY)
556 if (*nr_effective >= nr_replicas)
561 get_buckets_from_writepoint(c, ptrs, wp, &devs,
562 nr_replicas, nr_effective,
563 have_cache, flags, false);
564 if (*nr_effective >= nr_replicas)
567 percpu_down_read(&c->mark_lock);
572 * Try nonblocking first, so that if one device is full we'll try from
575 ret = bch2_bucket_alloc_set(c, ptrs, &wp->stripe, &devs,
576 nr_replicas, nr_effective, have_cache,
578 if (ret && ret != INSUFFICIENT_DEVICES && !cl && _cl) {
584 percpu_up_read(&c->mark_lock);
589 void bch2_open_buckets_stop_dev(struct bch_fs *c, struct bch_dev *ca,
590 struct open_buckets *obs)
592 struct open_buckets ptrs = { .nr = 0 };
593 struct open_bucket *ob, *ob2;
596 open_bucket_for_each(c, obs, ob, i) {
597 bool drop = !ca || ob->ptr.dev == ca->dev_idx;
599 if (!drop && ob->ec) {
600 mutex_lock(&ob->ec->lock);
601 for (j = 0; j < ob->ec->new_stripe.key.v.nr_blocks; j++) {
602 if (!ob->ec->blocks[j])
605 ob2 = c->open_buckets + ob->ec->blocks[j];
606 drop |= ob2->ptr.dev == ca->dev_idx;
608 mutex_unlock(&ob->ec->lock);
612 bch2_open_bucket_put(c, ob);
614 ob_push(c, &ptrs, ob);
620 void bch2_writepoint_stop(struct bch_fs *c, struct bch_dev *ca,
621 struct write_point *wp)
623 mutex_lock(&wp->lock);
624 bch2_open_buckets_stop_dev(c, ca, &wp->ptrs);
625 mutex_unlock(&wp->lock);
628 static inline struct hlist_head *writepoint_hash(struct bch_fs *c,
629 unsigned long write_point)
632 hash_long(write_point, ilog2(ARRAY_SIZE(c->write_points_hash)));
634 return &c->write_points_hash[hash];
637 static struct write_point *__writepoint_find(struct hlist_head *head,
638 unsigned long write_point)
640 struct write_point *wp;
643 hlist_for_each_entry_rcu(wp, head, node)
644 if (wp->write_point == write_point)
652 static inline bool too_many_writepoints(struct bch_fs *c, unsigned factor)
654 u64 stranded = c->write_points_nr * c->bucket_size_max;
655 u64 free = bch2_fs_usage_read_short(c).free;
657 return stranded * factor > free;
660 static bool try_increase_writepoints(struct bch_fs *c)
662 struct write_point *wp;
664 if (c->write_points_nr == ARRAY_SIZE(c->write_points) ||
665 too_many_writepoints(c, 32))
668 wp = c->write_points + c->write_points_nr++;
669 hlist_add_head_rcu(&wp->node, writepoint_hash(c, wp->write_point));
673 static bool try_decrease_writepoints(struct bch_fs *c,
676 struct write_point *wp;
678 mutex_lock(&c->write_points_hash_lock);
679 if (c->write_points_nr < old_nr) {
680 mutex_unlock(&c->write_points_hash_lock);
684 if (c->write_points_nr == 1 ||
685 !too_many_writepoints(c, 8)) {
686 mutex_unlock(&c->write_points_hash_lock);
690 wp = c->write_points + --c->write_points_nr;
692 hlist_del_rcu(&wp->node);
693 mutex_unlock(&c->write_points_hash_lock);
695 bch2_writepoint_stop(c, NULL, wp);
699 static struct write_point *writepoint_find(struct bch_fs *c,
700 unsigned long write_point)
702 struct write_point *wp, *oldest;
703 struct hlist_head *head;
705 if (!(write_point & 1UL)) {
706 wp = (struct write_point *) write_point;
707 mutex_lock(&wp->lock);
711 head = writepoint_hash(c, write_point);
713 wp = __writepoint_find(head, write_point);
716 mutex_lock(&wp->lock);
717 if (wp->write_point == write_point)
719 mutex_unlock(&wp->lock);
724 for (wp = c->write_points;
725 wp < c->write_points + c->write_points_nr; wp++)
726 if (!oldest || time_before64(wp->last_used, oldest->last_used))
729 mutex_lock(&oldest->lock);
730 mutex_lock(&c->write_points_hash_lock);
731 if (oldest >= c->write_points + c->write_points_nr ||
732 try_increase_writepoints(c)) {
733 mutex_unlock(&c->write_points_hash_lock);
734 mutex_unlock(&oldest->lock);
735 goto restart_find_oldest;
738 wp = __writepoint_find(head, write_point);
739 if (wp && wp != oldest) {
740 mutex_unlock(&c->write_points_hash_lock);
741 mutex_unlock(&oldest->lock);
746 hlist_del_rcu(&wp->node);
747 wp->write_point = write_point;
748 hlist_add_head_rcu(&wp->node, head);
749 mutex_unlock(&c->write_points_hash_lock);
751 wp->last_used = sched_clock();
756 * Get us an open_bucket we can allocate from, return with it locked:
758 struct write_point *bch2_alloc_sectors_start(struct bch_fs *c,
760 unsigned erasure_code,
761 struct write_point_specifier write_point,
762 struct bch_devs_list *devs_have,
763 unsigned nr_replicas,
764 unsigned nr_replicas_required,
765 enum alloc_reserve reserve,
769 struct write_point *wp;
770 struct open_bucket *ob;
771 struct open_buckets ptrs;
772 unsigned nr_effective, write_points_nr;
773 unsigned ob_flags = 0;
775 enum bucket_alloc_ret ret;
778 if (!(flags & BCH_WRITE_ONLY_SPECIFIED_DEVS))
779 ob_flags |= BUCKET_ALLOC_USE_DURABILITY;
781 BUG_ON(!nr_replicas || !nr_replicas_required);
785 write_points_nr = c->write_points_nr;
788 wp = writepoint_find(c, write_point.v);
790 if (wp->type == BCH_DATA_user)
791 ob_flags |= BUCKET_MAY_ALLOC_PARTIAL;
793 /* metadata may not allocate on cache devices: */
794 if (wp->type != BCH_DATA_user)
797 if (!target || (flags & BCH_WRITE_ONLY_SPECIFIED_DEVS)) {
798 ret = open_bucket_add_buckets(c, &ptrs, wp, devs_have,
799 target, erasure_code,
800 nr_replicas, &nr_effective,
801 &have_cache, reserve,
804 ret = open_bucket_add_buckets(c, &ptrs, wp, devs_have,
805 target, erasure_code,
806 nr_replicas, &nr_effective,
807 &have_cache, reserve,
812 ret = open_bucket_add_buckets(c, &ptrs, wp, devs_have,
814 nr_replicas, &nr_effective,
815 &have_cache, reserve,
819 BUG_ON(!ret && nr_effective < nr_replicas);
821 if (erasure_code && !ec_open_bucket(c, &ptrs))
822 pr_debug("failed to get ec bucket: ret %u", ret);
824 if (ret == INSUFFICIENT_DEVICES &&
825 nr_effective >= nr_replicas_required)
831 /* Free buckets we didn't use: */
832 open_bucket_for_each(c, &wp->ptrs, ob, i)
833 open_bucket_free_unused(c, wp, ob);
837 wp->sectors_free = UINT_MAX;
839 open_bucket_for_each(c, &wp->ptrs, ob, i)
840 wp->sectors_free = min(wp->sectors_free, ob->sectors_free);
842 BUG_ON(!wp->sectors_free || wp->sectors_free == UINT_MAX);
844 verify_not_stale(c, &wp->ptrs);
848 open_bucket_for_each(c, &wp->ptrs, ob, i)
849 if (ptrs.nr < ARRAY_SIZE(ptrs.v))
850 ob_push(c, &ptrs, ob);
852 open_bucket_free_unused(c, wp, ob);
855 mutex_unlock(&wp->lock);
857 if (ret == FREELIST_EMPTY &&
858 try_decrease_writepoints(c, write_points_nr))
862 case OPEN_BUCKETS_EMPTY:
864 return cl ? ERR_PTR(-EAGAIN) : ERR_PTR(-ENOSPC);
865 case INSUFFICIENT_DEVICES:
866 return ERR_PTR(-EROFS);
873 * Append pointers to the space we just allocated to @k, and mark @sectors space
874 * as allocated out of @ob
876 void bch2_alloc_sectors_append_ptrs(struct bch_fs *c, struct write_point *wp,
877 struct bkey_i *k, unsigned sectors)
880 struct open_bucket *ob;
883 BUG_ON(sectors > wp->sectors_free);
884 wp->sectors_free -= sectors;
886 open_bucket_for_each(c, &wp->ptrs, ob, i) {
887 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
888 struct bch_extent_ptr tmp = ob->ptr;
890 tmp.cached = !ca->mi.durability &&
891 wp->type == BCH_DATA_user;
893 tmp.offset += ca->mi.bucket_size - ob->sectors_free;
894 bch2_bkey_append_ptr(k, tmp);
896 BUG_ON(sectors > ob->sectors_free);
897 ob->sectors_free -= sectors;
902 * Append pointers to the space we just allocated to @k, and mark @sectors space
903 * as allocated out of @ob
905 void bch2_alloc_sectors_done(struct bch_fs *c, struct write_point *wp)
907 struct open_buckets ptrs = { .nr = 0 }, keep = { .nr = 0 };
908 struct open_bucket *ob;
911 open_bucket_for_each(c, &wp->ptrs, ob, i)
912 ob_push(c, !ob->sectors_free ? &ptrs : &keep, ob);
915 mutex_unlock(&wp->lock);
917 bch2_open_buckets_put(c, &ptrs);
920 static inline void writepoint_init(struct write_point *wp,
921 enum bch_data_type type)
923 mutex_init(&wp->lock);
927 void bch2_fs_allocator_foreground_init(struct bch_fs *c)
929 struct open_bucket *ob;
930 struct write_point *wp;
932 mutex_init(&c->write_points_hash_lock);
933 c->write_points_nr = ARRAY_SIZE(c->write_points);
935 /* open bucket 0 is a sentinal NULL: */
936 spin_lock_init(&c->open_buckets[0].lock);
938 for (ob = c->open_buckets + 1;
939 ob < c->open_buckets + ARRAY_SIZE(c->open_buckets); ob++) {
940 spin_lock_init(&ob->lock);
941 c->open_buckets_nr_free++;
943 ob->freelist = c->open_buckets_freelist;
944 c->open_buckets_freelist = ob - c->open_buckets;
947 writepoint_init(&c->btree_write_point, BCH_DATA_btree);
948 writepoint_init(&c->rebalance_write_point, BCH_DATA_user);
949 writepoint_init(&c->copygc_write_point, BCH_DATA_user);
951 for (wp = c->write_points;
952 wp < c->write_points + c->write_points_nr; wp++) {
953 writepoint_init(wp, BCH_DATA_user);
955 wp->last_used = sched_clock();
956 wp->write_point = (unsigned long) wp;
957 hlist_add_head_rcu(&wp->node,
958 writepoint_hash(c, wp->write_point));