1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright 2012 Google, Inc.
5 * Foreground allocator code: allocate buckets from freelist, and allocate in
6 * sector granularity from writepoints.
8 * bch2_bucket_alloc() allocates a single bucket from a specific device.
10 * bch2_bucket_alloc_set() allocates one or more buckets from different devices
11 * in a given filesystem.
15 #include "alloc_background.h"
16 #include "alloc_foreground.h"
21 #include "disk_groups.h"
25 #include <linux/math64.h>
26 #include <linux/rculist.h>
27 #include <linux/rcupdate.h>
28 #include <trace/events/bcachefs.h>
31 * Open buckets represent a bucket that's currently being allocated from. They
34 * - They track buckets that have been partially allocated, allowing for
35 * sub-bucket sized allocations - they're used by the sector allocator below
37 * - They provide a reference to the buckets they own that mark and sweep GC
38 * can find, until the new allocation has a pointer to it inserted into the
41 * When allocating some space with the sector allocator, the allocation comes
42 * with a reference to an open bucket - the caller is required to put that
43 * reference _after_ doing the index update that makes its allocation reachable.
46 static void bch2_open_bucket_hash_add(struct bch_fs *c, struct open_bucket *ob)
48 open_bucket_idx_t idx = ob - c->open_buckets;
49 open_bucket_idx_t *slot = open_bucket_hashslot(c, ob->dev, ob->bucket);
55 static void bch2_open_bucket_hash_remove(struct bch_fs *c, struct open_bucket *ob)
57 open_bucket_idx_t idx = ob - c->open_buckets;
58 open_bucket_idx_t *slot = open_bucket_hashslot(c, ob->dev, ob->bucket);
60 while (*slot != idx) {
62 slot = &c->open_buckets[*slot].hash;
69 void __bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob)
71 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
74 bch2_ec_bucket_written(c, ob);
78 percpu_down_read(&c->mark_lock);
81 bch2_mark_alloc_bucket(c, ca, ob->bucket, false);
85 spin_unlock(&ob->lock);
86 percpu_up_read(&c->mark_lock);
88 spin_lock(&c->freelist_lock);
89 bch2_open_bucket_hash_remove(c, ob);
91 ob->freelist = c->open_buckets_freelist;
92 c->open_buckets_freelist = ob - c->open_buckets;
94 c->open_buckets_nr_free++;
95 ca->nr_open_buckets--;
96 spin_unlock(&c->freelist_lock);
98 closure_wake_up(&c->open_buckets_wait);
101 void bch2_open_bucket_write_error(struct bch_fs *c,
102 struct open_buckets *obs,
105 struct open_bucket *ob;
108 open_bucket_for_each(c, obs, ob, i)
109 if (ob->dev == dev && ob->ec)
110 bch2_ec_bucket_cancel(c, ob);
113 static struct open_bucket *bch2_open_bucket_alloc(struct bch_fs *c)
115 struct open_bucket *ob;
117 BUG_ON(!c->open_buckets_freelist || !c->open_buckets_nr_free);
119 ob = c->open_buckets + c->open_buckets_freelist;
120 c->open_buckets_freelist = ob->freelist;
121 atomic_set(&ob->pin, 1);
124 c->open_buckets_nr_free--;
128 static void open_bucket_free_unused(struct bch_fs *c,
129 struct write_point *wp,
130 struct open_bucket *ob)
132 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
133 bool may_realloc = wp->data_type == BCH_DATA_user;
135 BUG_ON(ca->open_buckets_partial_nr >
136 ARRAY_SIZE(ca->open_buckets_partial));
138 if (ca->open_buckets_partial_nr <
139 ARRAY_SIZE(ca->open_buckets_partial) &&
141 spin_lock(&c->freelist_lock);
142 ob->on_partial_list = true;
143 ca->open_buckets_partial[ca->open_buckets_partial_nr++] =
144 ob - c->open_buckets;
145 spin_unlock(&c->freelist_lock);
147 closure_wake_up(&c->open_buckets_wait);
148 closure_wake_up(&c->freelist_wait);
150 bch2_open_bucket_put(c, ob);
154 /* _only_ for allocating the journal on a new device: */
155 long bch2_bucket_alloc_new_fs(struct bch_dev *ca)
157 while (ca->new_fs_bucket_idx < ca->mi.nbuckets) {
158 u64 b = ca->new_fs_bucket_idx++;
160 if (!is_superblock_bucket(ca, b) &&
161 (!ca->buckets_nouse || !test_bit(b, ca->buckets_nouse)))
168 static inline unsigned open_buckets_reserved(enum alloc_reserve reserve)
172 case RESERVE_BTREE_MOVINGGC:
174 case RESERVE_MOVINGGC:
175 return OPEN_BUCKETS_COUNT / 4;
177 return OPEN_BUCKETS_COUNT / 2;
182 * bch_bucket_alloc - allocate a single bucket from a specific device
184 * Returns index of bucket on success, 0 on failure
186 struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca,
187 enum alloc_reserve reserve,
188 bool may_alloc_partial,
191 struct open_bucket *ob;
194 spin_lock(&c->freelist_lock);
196 if (may_alloc_partial) {
199 for (i = ca->open_buckets_partial_nr - 1; i >= 0; --i) {
200 ob = c->open_buckets + ca->open_buckets_partial[i];
202 if (reserve <= ob->alloc_reserve) {
203 array_remove_item(ca->open_buckets_partial,
204 ca->open_buckets_partial_nr,
206 ob->on_partial_list = false;
207 ob->alloc_reserve = reserve;
208 spin_unlock(&c->freelist_lock);
214 if (unlikely(c->open_buckets_nr_free <= open_buckets_reserved(reserve))) {
216 closure_wait(&c->open_buckets_wait, cl);
218 if (!c->blocked_allocate_open_bucket)
219 c->blocked_allocate_open_bucket = local_clock();
221 spin_unlock(&c->freelist_lock);
222 trace_open_bucket_alloc_fail(ca, reserve);
223 return ERR_PTR(-OPEN_BUCKETS_EMPTY);
226 if (likely(fifo_pop(&ca->free[RESERVE_NONE], b)))
230 case RESERVE_BTREE_MOVINGGC:
231 case RESERVE_MOVINGGC:
232 if (fifo_pop(&ca->free[RESERVE_MOVINGGC], b))
240 closure_wait(&c->freelist_wait, cl);
242 if (!c->blocked_allocate)
243 c->blocked_allocate = local_clock();
245 spin_unlock(&c->freelist_lock);
247 trace_bucket_alloc_fail(ca, reserve);
248 return ERR_PTR(-FREELIST_EMPTY);
250 verify_not_on_freelist(c, ca, b);
252 ob = bch2_open_bucket_alloc(c);
254 spin_lock(&ob->lock);
257 ob->sectors_free = ca->mi.bucket_size;
258 ob->alloc_reserve = reserve;
259 ob->dev = ca->dev_idx;
260 ob->gen = *bucket_gen(ca, b);
262 spin_unlock(&ob->lock);
264 ca->nr_open_buckets++;
265 bch2_open_bucket_hash_add(c, ob);
267 if (c->blocked_allocate_open_bucket) {
268 bch2_time_stats_update(
269 &c->times[BCH_TIME_blocked_allocate_open_bucket],
270 c->blocked_allocate_open_bucket);
271 c->blocked_allocate_open_bucket = 0;
274 if (c->blocked_allocate) {
275 bch2_time_stats_update(
276 &c->times[BCH_TIME_blocked_allocate],
277 c->blocked_allocate);
278 c->blocked_allocate = 0;
281 spin_unlock(&c->freelist_lock);
283 bch2_wake_allocator(ca);
285 trace_bucket_alloc(ca, reserve);
289 static int __dev_stripe_cmp(struct dev_stripe_state *stripe,
290 unsigned l, unsigned r)
292 return ((stripe->next_alloc[l] > stripe->next_alloc[r]) -
293 (stripe->next_alloc[l] < stripe->next_alloc[r]));
296 #define dev_stripe_cmp(l, r) __dev_stripe_cmp(stripe, l, r)
298 struct dev_alloc_list bch2_dev_alloc_list(struct bch_fs *c,
299 struct dev_stripe_state *stripe,
300 struct bch_devs_mask *devs)
302 struct dev_alloc_list ret = { .nr = 0 };
305 for_each_set_bit(i, devs->d, BCH_SB_MEMBERS_MAX)
306 ret.devs[ret.nr++] = i;
308 bubble_sort(ret.devs, ret.nr, dev_stripe_cmp);
312 void bch2_dev_stripe_increment(struct bch_dev *ca,
313 struct dev_stripe_state *stripe)
315 u64 *v = stripe->next_alloc + ca->dev_idx;
316 u64 free_space = dev_buckets_available(ca);
317 u64 free_space_inv = free_space
318 ? div64_u64(1ULL << 48, free_space)
322 if (*v + free_space_inv >= *v)
323 *v += free_space_inv;
327 for (v = stripe->next_alloc;
328 v < stripe->next_alloc + ARRAY_SIZE(stripe->next_alloc); v++)
329 *v = *v < scale ? 0 : *v - scale;
332 #define BUCKET_MAY_ALLOC_PARTIAL (1 << 0)
333 #define BUCKET_ALLOC_USE_DURABILITY (1 << 1)
335 static void add_new_bucket(struct bch_fs *c,
336 struct open_buckets *ptrs,
337 struct bch_devs_mask *devs_may_alloc,
338 unsigned *nr_effective,
341 struct open_bucket *ob)
343 unsigned durability =
344 bch_dev_bkey_exists(c, ob->dev)->mi.durability;
346 __clear_bit(ob->dev, devs_may_alloc->d);
347 *nr_effective += (flags & BUCKET_ALLOC_USE_DURABILITY)
349 *have_cache |= !durability;
351 ob_push(c, ptrs, ob);
354 int bch2_bucket_alloc_set(struct bch_fs *c,
355 struct open_buckets *ptrs,
356 struct dev_stripe_state *stripe,
357 struct bch_devs_mask *devs_may_alloc,
358 unsigned nr_replicas,
359 unsigned *nr_effective,
361 enum alloc_reserve reserve,
365 struct dev_alloc_list devs_sorted =
366 bch2_dev_alloc_list(c, stripe, devs_may_alloc);
368 int ret = -INSUFFICIENT_DEVICES;
371 BUG_ON(*nr_effective >= nr_replicas);
373 for (i = 0; i < devs_sorted.nr; i++) {
374 struct open_bucket *ob;
376 ca = rcu_dereference(c->devs[devs_sorted.devs[i]]);
380 if (!ca->mi.durability && *have_cache)
383 ob = bch2_bucket_alloc(c, ca, reserve,
384 flags & BUCKET_MAY_ALLOC_PARTIAL, cl);
393 add_new_bucket(c, ptrs, devs_may_alloc,
394 nr_effective, have_cache, flags, ob);
396 bch2_dev_stripe_increment(ca, stripe);
398 if (*nr_effective >= nr_replicas)
405 /* Allocate from stripes: */
408 * if we can't allocate a new stripe because there are already too many
409 * partially filled stripes, force allocating from an existing stripe even when
410 * it's to a device we don't want:
413 static int bucket_alloc_from_stripe(struct bch_fs *c,
414 struct open_buckets *ptrs,
415 struct write_point *wp,
416 struct bch_devs_mask *devs_may_alloc,
418 unsigned erasure_code,
419 unsigned nr_replicas,
420 unsigned *nr_effective,
425 struct dev_alloc_list devs_sorted;
426 struct ec_stripe_head *h;
427 struct open_bucket *ob;
437 if (ec_open_bucket(c, ptrs))
440 h = bch2_ec_stripe_head_get(c, target, 0, nr_replicas - 1,
441 wp == &c->copygc_write_point,
448 devs_sorted = bch2_dev_alloc_list(c, &wp->stripe, devs_may_alloc);
450 for (i = 0; i < devs_sorted.nr; i++)
451 for (ec_idx = 0; ec_idx < h->s->nr_data; ec_idx++) {
452 if (!h->s->blocks[ec_idx])
455 ob = c->open_buckets + h->s->blocks[ec_idx];
456 if (ob->dev == devs_sorted.devs[i] &&
457 !test_and_set_bit(ec_idx, h->s->blocks_allocated))
462 ca = bch_dev_bkey_exists(c, ob->dev);
467 add_new_bucket(c, ptrs, devs_may_alloc,
468 nr_effective, have_cache, flags, ob);
469 atomic_inc(&h->s->pin);
471 bch2_ec_stripe_head_put(c, h);
475 /* Sector allocator */
477 static void get_buckets_from_writepoint(struct bch_fs *c,
478 struct open_buckets *ptrs,
479 struct write_point *wp,
480 struct bch_devs_mask *devs_may_alloc,
481 unsigned nr_replicas,
482 unsigned *nr_effective,
487 struct open_buckets ptrs_skip = { .nr = 0 };
488 struct open_bucket *ob;
491 open_bucket_for_each(c, &wp->ptrs, ob, i) {
492 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
494 if (*nr_effective < nr_replicas &&
495 test_bit(ob->dev, devs_may_alloc->d) &&
496 (ca->mi.durability ||
497 (wp->data_type == BCH_DATA_user && !*have_cache)) &&
498 (ob->ec || !need_ec)) {
499 add_new_bucket(c, ptrs, devs_may_alloc,
500 nr_effective, have_cache,
503 ob_push(c, &ptrs_skip, ob);
506 wp->ptrs = ptrs_skip;
509 static int open_bucket_add_buckets(struct bch_fs *c,
510 struct open_buckets *ptrs,
511 struct write_point *wp,
512 struct bch_devs_list *devs_have,
514 unsigned erasure_code,
515 unsigned nr_replicas,
516 unsigned *nr_effective,
518 enum alloc_reserve reserve,
522 struct bch_devs_mask devs;
523 struct open_bucket *ob;
524 struct closure *cl = NULL;
529 devs = target_rw_devs(c, wp->data_type, target);
532 /* Don't allocate from devices we already have pointers to: */
533 for (i = 0; i < devs_have->nr; i++)
534 __clear_bit(devs_have->devs[i], devs.d);
536 open_bucket_for_each(c, ptrs, ob, i)
537 __clear_bit(ob->dev, devs.d);
540 if (!ec_open_bucket(c, ptrs)) {
541 get_buckets_from_writepoint(c, ptrs, wp, &devs,
542 nr_replicas, nr_effective,
543 have_cache, flags, true);
544 if (*nr_effective >= nr_replicas)
548 if (!ec_open_bucket(c, ptrs)) {
549 ret = bucket_alloc_from_stripe(c, ptrs, wp, &devs,
550 target, erasure_code,
551 nr_replicas, nr_effective,
552 have_cache, flags, _cl);
553 if (ret == -FREELIST_EMPTY ||
554 ret == -OPEN_BUCKETS_EMPTY)
556 if (*nr_effective >= nr_replicas)
561 get_buckets_from_writepoint(c, ptrs, wp, &devs,
562 nr_replicas, nr_effective,
563 have_cache, flags, false);
564 if (*nr_effective >= nr_replicas)
567 percpu_down_read(&c->mark_lock);
572 * Try nonblocking first, so that if one device is full we'll try from
575 ret = bch2_bucket_alloc_set(c, ptrs, &wp->stripe, &devs,
576 nr_replicas, nr_effective, have_cache,
578 if (ret && ret != -INSUFFICIENT_DEVICES && !cl && _cl) {
584 percpu_up_read(&c->mark_lock);
589 void bch2_open_buckets_stop_dev(struct bch_fs *c, struct bch_dev *ca,
590 struct open_buckets *obs)
592 struct open_buckets ptrs = { .nr = 0 };
593 struct open_bucket *ob, *ob2;
596 open_bucket_for_each(c, obs, ob, i) {
597 bool drop = !ca || ob->dev == ca->dev_idx;
599 if (!drop && ob->ec) {
600 mutex_lock(&ob->ec->lock);
601 for (j = 0; j < ob->ec->new_stripe.key.v.nr_blocks; j++) {
602 if (!ob->ec->blocks[j])
605 ob2 = c->open_buckets + ob->ec->blocks[j];
606 drop |= ob2->dev == ca->dev_idx;
608 mutex_unlock(&ob->ec->lock);
612 bch2_open_bucket_put(c, ob);
614 ob_push(c, &ptrs, ob);
620 void bch2_writepoint_stop(struct bch_fs *c, struct bch_dev *ca,
621 struct write_point *wp)
623 mutex_lock(&wp->lock);
624 bch2_open_buckets_stop_dev(c, ca, &wp->ptrs);
625 mutex_unlock(&wp->lock);
628 static inline struct hlist_head *writepoint_hash(struct bch_fs *c,
629 unsigned long write_point)
632 hash_long(write_point, ilog2(ARRAY_SIZE(c->write_points_hash)));
634 return &c->write_points_hash[hash];
637 static struct write_point *__writepoint_find(struct hlist_head *head,
638 unsigned long write_point)
640 struct write_point *wp;
643 hlist_for_each_entry_rcu(wp, head, node)
644 if (wp->write_point == write_point)
652 static inline bool too_many_writepoints(struct bch_fs *c, unsigned factor)
654 u64 stranded = c->write_points_nr * c->bucket_size_max;
655 u64 free = bch2_fs_usage_read_short(c).free;
657 return stranded * factor > free;
660 static bool try_increase_writepoints(struct bch_fs *c)
662 struct write_point *wp;
664 if (c->write_points_nr == ARRAY_SIZE(c->write_points) ||
665 too_many_writepoints(c, 32))
668 wp = c->write_points + c->write_points_nr++;
669 hlist_add_head_rcu(&wp->node, writepoint_hash(c, wp->write_point));
673 static bool try_decrease_writepoints(struct bch_fs *c,
676 struct write_point *wp;
678 mutex_lock(&c->write_points_hash_lock);
679 if (c->write_points_nr < old_nr) {
680 mutex_unlock(&c->write_points_hash_lock);
684 if (c->write_points_nr == 1 ||
685 !too_many_writepoints(c, 8)) {
686 mutex_unlock(&c->write_points_hash_lock);
690 wp = c->write_points + --c->write_points_nr;
692 hlist_del_rcu(&wp->node);
693 mutex_unlock(&c->write_points_hash_lock);
695 bch2_writepoint_stop(c, NULL, wp);
699 static struct write_point *writepoint_find(struct bch_fs *c,
700 unsigned long write_point)
702 struct write_point *wp, *oldest;
703 struct hlist_head *head;
705 if (!(write_point & 1UL)) {
706 wp = (struct write_point *) write_point;
707 mutex_lock(&wp->lock);
711 head = writepoint_hash(c, write_point);
713 wp = __writepoint_find(head, write_point);
716 mutex_lock(&wp->lock);
717 if (wp->write_point == write_point)
719 mutex_unlock(&wp->lock);
724 for (wp = c->write_points;
725 wp < c->write_points + c->write_points_nr; wp++)
726 if (!oldest || time_before64(wp->last_used, oldest->last_used))
729 mutex_lock(&oldest->lock);
730 mutex_lock(&c->write_points_hash_lock);
731 if (oldest >= c->write_points + c->write_points_nr ||
732 try_increase_writepoints(c)) {
733 mutex_unlock(&c->write_points_hash_lock);
734 mutex_unlock(&oldest->lock);
735 goto restart_find_oldest;
738 wp = __writepoint_find(head, write_point);
739 if (wp && wp != oldest) {
740 mutex_unlock(&c->write_points_hash_lock);
741 mutex_unlock(&oldest->lock);
746 hlist_del_rcu(&wp->node);
747 wp->write_point = write_point;
748 hlist_add_head_rcu(&wp->node, head);
749 mutex_unlock(&c->write_points_hash_lock);
751 wp->last_used = sched_clock();
756 * Get us an open_bucket we can allocate from, return with it locked:
758 struct write_point *bch2_alloc_sectors_start(struct bch_fs *c,
760 unsigned erasure_code,
761 struct write_point_specifier write_point,
762 struct bch_devs_list *devs_have,
763 unsigned nr_replicas,
764 unsigned nr_replicas_required,
765 enum alloc_reserve reserve,
769 struct write_point *wp;
770 struct open_bucket *ob;
771 struct open_buckets ptrs;
772 unsigned nr_effective, write_points_nr;
773 unsigned ob_flags = 0;
778 if (!(flags & BCH_WRITE_ONLY_SPECIFIED_DEVS))
779 ob_flags |= BUCKET_ALLOC_USE_DURABILITY;
781 BUG_ON(!nr_replicas || !nr_replicas_required);
785 write_points_nr = c->write_points_nr;
788 wp = writepoint_find(c, write_point.v);
790 if (wp->data_type == BCH_DATA_user)
791 ob_flags |= BUCKET_MAY_ALLOC_PARTIAL;
793 /* metadata may not allocate on cache devices: */
794 if (wp->data_type != BCH_DATA_user)
797 if (!target || (flags & BCH_WRITE_ONLY_SPECIFIED_DEVS)) {
798 ret = open_bucket_add_buckets(c, &ptrs, wp, devs_have,
799 target, erasure_code,
800 nr_replicas, &nr_effective,
801 &have_cache, reserve,
804 ret = open_bucket_add_buckets(c, &ptrs, wp, devs_have,
805 target, erasure_code,
806 nr_replicas, &nr_effective,
807 &have_cache, reserve,
812 ret = open_bucket_add_buckets(c, &ptrs, wp, devs_have,
814 nr_replicas, &nr_effective,
815 &have_cache, reserve,
819 BUG_ON(!ret && nr_effective < nr_replicas);
821 if (erasure_code && !ec_open_bucket(c, &ptrs))
822 pr_debug("failed to get ec bucket: ret %u", ret);
824 if (ret == -INSUFFICIENT_DEVICES &&
825 nr_effective >= nr_replicas_required)
831 /* Free buckets we didn't use: */
832 open_bucket_for_each(c, &wp->ptrs, ob, i)
833 open_bucket_free_unused(c, wp, ob);
837 wp->sectors_free = UINT_MAX;
839 open_bucket_for_each(c, &wp->ptrs, ob, i)
840 wp->sectors_free = min(wp->sectors_free, ob->sectors_free);
842 BUG_ON(!wp->sectors_free || wp->sectors_free == UINT_MAX);
846 open_bucket_for_each(c, &wp->ptrs, ob, i)
847 if (ptrs.nr < ARRAY_SIZE(ptrs.v))
848 ob_push(c, &ptrs, ob);
850 open_bucket_free_unused(c, wp, ob);
853 mutex_unlock(&wp->lock);
855 if (ret == -FREELIST_EMPTY &&
856 try_decrease_writepoints(c, write_points_nr))
860 case -OPEN_BUCKETS_EMPTY:
861 case -FREELIST_EMPTY:
862 return cl ? ERR_PTR(-EAGAIN) : ERR_PTR(-ENOSPC);
863 case -INSUFFICIENT_DEVICES:
864 return ERR_PTR(-EROFS);
870 struct bch_extent_ptr bch2_ob_ptr(struct bch_fs *c, struct open_bucket *ob)
872 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
874 return (struct bch_extent_ptr) {
875 .type = 1 << BCH_EXTENT_ENTRY_ptr,
878 .offset = bucket_to_sector(ca, ob->bucket) +
885 * Append pointers to the space we just allocated to @k, and mark @sectors space
886 * as allocated out of @ob
888 void bch2_alloc_sectors_append_ptrs(struct bch_fs *c, struct write_point *wp,
889 struct bkey_i *k, unsigned sectors,
893 struct open_bucket *ob;
896 BUG_ON(sectors > wp->sectors_free);
897 wp->sectors_free -= sectors;
899 open_bucket_for_each(c, &wp->ptrs, ob, i) {
900 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
901 struct bch_extent_ptr ptr = bch2_ob_ptr(c, ob);
903 ptr.cached = cached ||
904 (!ca->mi.durability &&
905 wp->data_type == BCH_DATA_user);
907 bch2_bkey_append_ptr(k, ptr);
909 BUG_ON(sectors > ob->sectors_free);
910 ob->sectors_free -= sectors;
915 * Append pointers to the space we just allocated to @k, and mark @sectors space
916 * as allocated out of @ob
918 void bch2_alloc_sectors_done(struct bch_fs *c, struct write_point *wp)
920 struct open_buckets ptrs = { .nr = 0 }, keep = { .nr = 0 };
921 struct open_bucket *ob;
924 open_bucket_for_each(c, &wp->ptrs, ob, i)
925 ob_push(c, !ob->sectors_free ? &ptrs : &keep, ob);
928 mutex_unlock(&wp->lock);
930 bch2_open_buckets_put(c, &ptrs);
933 static inline void writepoint_init(struct write_point *wp,
934 enum bch_data_type type)
936 mutex_init(&wp->lock);
937 wp->data_type = type;
940 void bch2_fs_allocator_foreground_init(struct bch_fs *c)
942 struct open_bucket *ob;
943 struct write_point *wp;
945 mutex_init(&c->write_points_hash_lock);
946 c->write_points_nr = ARRAY_SIZE(c->write_points);
948 /* open bucket 0 is a sentinal NULL: */
949 spin_lock_init(&c->open_buckets[0].lock);
951 for (ob = c->open_buckets + 1;
952 ob < c->open_buckets + ARRAY_SIZE(c->open_buckets); ob++) {
953 spin_lock_init(&ob->lock);
954 c->open_buckets_nr_free++;
956 ob->freelist = c->open_buckets_freelist;
957 c->open_buckets_freelist = ob - c->open_buckets;
960 writepoint_init(&c->btree_write_point, BCH_DATA_btree);
961 writepoint_init(&c->rebalance_write_point, BCH_DATA_user);
962 writepoint_init(&c->copygc_write_point, BCH_DATA_user);
964 for (wp = c->write_points;
965 wp < c->write_points + c->write_points_nr; wp++) {
966 writepoint_init(wp, BCH_DATA_user);
968 wp->last_used = sched_clock();
969 wp->write_point = (unsigned long) wp;
970 hlist_add_head_rcu(&wp->node,
971 writepoint_hash(c, wp->write_point));
975 void bch2_open_buckets_to_text(struct printbuf *out, struct bch_fs *c)
977 struct open_bucket *ob;
979 for (ob = c->open_buckets;
980 ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
982 spin_lock(&ob->lock);
983 if (ob->valid && !ob->on_partial_list) {
984 pr_buf(out, "%zu ref %u type %s\n",
985 ob - c->open_buckets,
986 atomic_read(&ob->pin),
987 bch2_data_types[ob->data_type]);
989 spin_unlock(&ob->lock);