1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright 2012 Google, Inc.
5 * Foreground allocator code: allocate buckets from freelist, and allocate in
6 * sector granularity from writepoints.
8 * bch2_bucket_alloc() allocates a single bucket from a specific device.
10 * bch2_bucket_alloc_set() allocates one or more buckets from different devices
11 * in a given filesystem.
15 #include "alloc_background.h"
16 #include "alloc_foreground.h"
17 #include "btree_iter.h"
18 #include "btree_update.h"
21 #include "buckets_waiting_for_journal.h"
24 #include "disk_groups.h"
30 #include <linux/math64.h>
31 #include <linux/rculist.h>
32 #include <linux/rcupdate.h>
33 #include <trace/events/bcachefs.h>
36 * Open buckets represent a bucket that's currently being allocated from. They
39 * - They track buckets that have been partially allocated, allowing for
40 * sub-bucket sized allocations - they're used by the sector allocator below
42 * - They provide a reference to the buckets they own that mark and sweep GC
43 * can find, until the new allocation has a pointer to it inserted into the
46 * When allocating some space with the sector allocator, the allocation comes
47 * with a reference to an open bucket - the caller is required to put that
48 * reference _after_ doing the index update that makes its allocation reachable.
51 static void bch2_open_bucket_hash_add(struct bch_fs *c, struct open_bucket *ob)
53 open_bucket_idx_t idx = ob - c->open_buckets;
54 open_bucket_idx_t *slot = open_bucket_hashslot(c, ob->dev, ob->bucket);
60 static void bch2_open_bucket_hash_remove(struct bch_fs *c, struct open_bucket *ob)
62 open_bucket_idx_t idx = ob - c->open_buckets;
63 open_bucket_idx_t *slot = open_bucket_hashslot(c, ob->dev, ob->bucket);
65 while (*slot != idx) {
67 slot = &c->open_buckets[*slot].hash;
74 void __bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob)
76 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
79 bch2_ec_bucket_written(c, ob);
83 percpu_down_read(&c->mark_lock);
89 spin_unlock(&ob->lock);
90 percpu_up_read(&c->mark_lock);
92 spin_lock(&c->freelist_lock);
93 bch2_open_bucket_hash_remove(c, ob);
95 ob->freelist = c->open_buckets_freelist;
96 c->open_buckets_freelist = ob - c->open_buckets;
98 c->open_buckets_nr_free++;
99 ca->nr_open_buckets--;
100 spin_unlock(&c->freelist_lock);
102 closure_wake_up(&c->open_buckets_wait);
105 void bch2_open_bucket_write_error(struct bch_fs *c,
106 struct open_buckets *obs,
109 struct open_bucket *ob;
112 open_bucket_for_each(c, obs, ob, i)
113 if (ob->dev == dev && ob->ec)
114 bch2_ec_bucket_cancel(c, ob);
117 static struct open_bucket *bch2_open_bucket_alloc(struct bch_fs *c)
119 struct open_bucket *ob;
121 BUG_ON(!c->open_buckets_freelist || !c->open_buckets_nr_free);
123 ob = c->open_buckets + c->open_buckets_freelist;
124 c->open_buckets_freelist = ob->freelist;
125 atomic_set(&ob->pin, 1);
128 c->open_buckets_nr_free--;
132 static void open_bucket_free_unused(struct bch_fs *c,
133 struct write_point *wp,
134 struct open_bucket *ob)
136 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
137 bool may_realloc = wp->data_type == BCH_DATA_user;
139 BUG_ON(ca->open_buckets_partial_nr >
140 ARRAY_SIZE(ca->open_buckets_partial));
142 if (ca->open_buckets_partial_nr <
143 ARRAY_SIZE(ca->open_buckets_partial) &&
145 spin_lock(&c->freelist_lock);
146 ob->on_partial_list = true;
147 ca->open_buckets_partial[ca->open_buckets_partial_nr++] =
148 ob - c->open_buckets;
149 spin_unlock(&c->freelist_lock);
151 closure_wake_up(&c->open_buckets_wait);
152 closure_wake_up(&c->freelist_wait);
154 bch2_open_bucket_put(c, ob);
158 /* _only_ for allocating the journal on a new device: */
159 long bch2_bucket_alloc_new_fs(struct bch_dev *ca)
161 while (ca->new_fs_bucket_idx < ca->mi.nbuckets) {
162 u64 b = ca->new_fs_bucket_idx++;
164 if (!is_superblock_bucket(ca, b) &&
165 (!ca->buckets_nouse || !test_bit(b, ca->buckets_nouse)))
172 static inline unsigned open_buckets_reserved(enum alloc_reserve reserve)
176 case RESERVE_BTREE_MOVINGGC:
178 case RESERVE_MOVINGGC:
179 return OPEN_BUCKETS_COUNT / 4;
181 return OPEN_BUCKETS_COUNT / 2;
185 static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
186 enum alloc_reserve reserve,
187 struct bkey_alloc_unpacked a,
188 size_t *need_journal_commit,
191 struct open_bucket *ob;
193 if (unlikely(ca->buckets_nouse && test_bit(a.bucket, ca->buckets_nouse)))
196 if (bch2_bucket_is_open(c, ca->dev_idx, a.bucket))
199 if (bch2_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
200 c->journal.flushed_seq_ondisk, ca->dev_idx, a.bucket)) {
201 (*need_journal_commit)++;
205 spin_lock(&c->freelist_lock);
207 if (unlikely(c->open_buckets_nr_free <= open_buckets_reserved(reserve))) {
209 closure_wait(&c->open_buckets_wait, cl);
211 if (!c->blocked_allocate_open_bucket)
212 c->blocked_allocate_open_bucket = local_clock();
214 spin_unlock(&c->freelist_lock);
216 trace_open_bucket_alloc_fail(ca, reserve);
217 return ERR_PTR(-OPEN_BUCKETS_EMPTY);
220 /* Recheck under lock: */
221 if (bch2_bucket_is_open(c, ca->dev_idx, a.bucket)) {
222 spin_unlock(&c->freelist_lock);
226 ob = bch2_open_bucket_alloc(c);
228 spin_lock(&ob->lock);
231 ob->sectors_free = ca->mi.bucket_size;
232 ob->alloc_reserve = reserve;
233 ob->dev = ca->dev_idx;
235 ob->bucket = a.bucket;
236 spin_unlock(&ob->lock);
238 ca->nr_open_buckets++;
239 bch2_open_bucket_hash_add(c, ob);
241 if (c->blocked_allocate_open_bucket) {
242 bch2_time_stats_update(
243 &c->times[BCH_TIME_blocked_allocate_open_bucket],
244 c->blocked_allocate_open_bucket);
245 c->blocked_allocate_open_bucket = 0;
248 if (c->blocked_allocate) {
249 bch2_time_stats_update(
250 &c->times[BCH_TIME_blocked_allocate],
251 c->blocked_allocate);
252 c->blocked_allocate = 0;
255 spin_unlock(&c->freelist_lock);
257 trace_bucket_alloc(ca, reserve);
261 static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bch_dev *ca,
262 enum alloc_reserve reserve, u64 free_entry,
263 size_t *need_journal_commit,
266 struct bch_fs *c = trans->c;
267 struct btree_iter iter;
269 struct open_bucket *ob;
270 struct bkey_alloc_unpacked a;
271 u64 b = free_entry & ~(~0ULL << 56);
272 unsigned genbits = free_entry >> 56;
273 struct printbuf buf = PRINTBUF;
276 bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, POS(ca->dev_idx, b), BTREE_ITER_CACHED);
277 k = bch2_btree_iter_peek_slot(&iter);
284 a = bch2_alloc_unpack(k);
286 if (bch2_fs_inconsistent_on(bucket_state(a) != BUCKET_free, c,
287 "non free bucket in freespace btree (state %s)\n"
289 " at %llu (genbits %u)",
290 bch2_bucket_states[bucket_state(a)],
291 (bch2_bkey_val_to_text(&buf, c, k), buf.buf),
292 free_entry, genbits)) {
297 if (bch2_fs_inconsistent_on(genbits != (alloc_freespace_genbits(a) >> 56), c,
298 "bucket in freespace btree with wrong genbits (got %u should be %llu)\n"
300 genbits, alloc_freespace_genbits(a) >> 56,
301 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
306 if (bch2_fs_inconsistent_on(b < ca->mi.first_bucket || b >= ca->mi.nbuckets, c,
307 "freespace btree has bucket outside allowed range (got %llu, valid %u-%llu)",
308 b, ca->mi.first_bucket, ca->mi.nbuckets)) {
313 ob = __try_alloc_bucket(c, ca, reserve, a, need_journal_commit, cl);
315 bch2_trans_iter_exit(trans, &iter);
320 static struct open_bucket *try_alloc_partial_bucket(struct bch_fs *c, struct bch_dev *ca,
321 enum alloc_reserve reserve)
323 struct open_bucket *ob;
326 spin_lock(&c->freelist_lock);
328 for (i = ca->open_buckets_partial_nr - 1; i >= 0; --i) {
329 ob = c->open_buckets + ca->open_buckets_partial[i];
331 if (reserve <= ob->alloc_reserve) {
332 array_remove_item(ca->open_buckets_partial,
333 ca->open_buckets_partial_nr,
335 ob->on_partial_list = false;
336 ob->alloc_reserve = reserve;
337 spin_unlock(&c->freelist_lock);
342 spin_unlock(&c->freelist_lock);
347 * This path is for before the freespace btree is initialized:
349 * If ca->new_fs_bucket_idx is nonzero, we haven't yet marked superblock &
350 * journal buckets - journal buckets will be < ca->new_fs_bucket_idx
352 static noinline struct open_bucket *
353 bch2_bucket_alloc_trans_early(struct btree_trans *trans,
355 enum alloc_reserve reserve,
357 size_t *need_journal_commit,
360 struct btree_iter iter;
362 struct open_bucket *ob = NULL;
365 *b = max_t(u64, *b, ca->mi.first_bucket);
366 *b = max_t(u64, *b, ca->new_fs_bucket_idx);
368 for_each_btree_key(trans, iter, BTREE_ID_alloc, POS(ca->dev_idx, *b),
369 BTREE_ITER_SLOTS, k, ret) {
370 struct bkey_alloc_unpacked a;
372 if (bkey_cmp(k.k->p, POS(ca->dev_idx, ca->mi.nbuckets)) >= 0)
375 if (ca->new_fs_bucket_idx &&
376 is_superblock_bucket(ca, k.k->p.offset))
379 a = bch2_alloc_unpack(k);
381 if (bucket_state(a) != BUCKET_free)
384 ob = __try_alloc_bucket(trans->c, ca, reserve, a,
385 need_journal_commit, cl);
389 bch2_trans_iter_exit(trans, &iter);
391 *b = iter.pos.offset;
393 return ob ?: ERR_PTR(ret ?: -FREELIST_EMPTY);
396 static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans,
398 enum alloc_reserve reserve,
400 size_t *need_journal_commit,
403 struct btree_iter iter;
405 struct open_bucket *ob = NULL;
408 if (unlikely(!ca->mi.freespace_initialized))
409 return bch2_bucket_alloc_trans_early(trans, ca, reserve, b,
410 need_journal_commit, cl);
412 BUG_ON(ca->new_fs_bucket_idx);
414 for_each_btree_key(trans, iter, BTREE_ID_freespace,
415 POS(ca->dev_idx, *b), 0, k, ret) {
416 if (k.k->p.inode != ca->dev_idx)
419 for (*b = max(*b, bkey_start_offset(k.k));
420 *b != k.k->p.offset && !ob;
422 if (btree_trans_too_many_iters(trans)) {
423 ob = ERR_PTR(-EINTR);
427 ob = try_alloc_bucket(trans, ca, reserve, *b,
428 need_journal_commit, cl);
433 bch2_trans_iter_exit(trans, &iter);
435 return ob ?: ERR_PTR(ret);
439 * bch_bucket_alloc - allocate a single bucket from a specific device
441 * Returns index of bucket on success, 0 on failure
443 struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca,
444 enum alloc_reserve reserve,
445 bool may_alloc_partial,
448 struct open_bucket *ob = NULL;
449 size_t need_journal_commit = 0;
450 u64 avail = dev_buckets_available(ca, reserve);
454 if (may_alloc_partial) {
455 ob = try_alloc_partial_bucket(c, ca, reserve);
462 closure_wait(&c->freelist_wait, cl);
463 /* recheck after putting ourself on waitlist */
464 avail = dev_buckets_available(ca, reserve);
466 closure_wake_up(&c->freelist_wait);
471 if (!c->blocked_allocate)
472 c->blocked_allocate = local_clock();
474 ob = ERR_PTR(-FREELIST_EMPTY);
478 ret = bch2_trans_do(c, NULL, NULL, 0,
479 PTR_ERR_OR_ZERO(ob = bch2_bucket_alloc_trans(&trans,
481 &need_journal_commit, cl)));
483 if (need_journal_commit * 2 > avail)
484 bch2_journal_flush_async(&c->journal, NULL);
487 ob = ERR_PTR(ret ?: -FREELIST_EMPTY);
489 if (ob == ERR_PTR(-FREELIST_EMPTY)) {
490 trace_bucket_alloc_fail(ca, reserve, avail, need_journal_commit);
491 atomic_long_inc(&c->bucket_alloc_fail);
497 static int __dev_stripe_cmp(struct dev_stripe_state *stripe,
498 unsigned l, unsigned r)
500 return ((stripe->next_alloc[l] > stripe->next_alloc[r]) -
501 (stripe->next_alloc[l] < stripe->next_alloc[r]));
504 #define dev_stripe_cmp(l, r) __dev_stripe_cmp(stripe, l, r)
506 struct dev_alloc_list bch2_dev_alloc_list(struct bch_fs *c,
507 struct dev_stripe_state *stripe,
508 struct bch_devs_mask *devs)
510 struct dev_alloc_list ret = { .nr = 0 };
513 for_each_set_bit(i, devs->d, BCH_SB_MEMBERS_MAX)
514 ret.devs[ret.nr++] = i;
516 bubble_sort(ret.devs, ret.nr, dev_stripe_cmp);
520 void bch2_dev_stripe_increment(struct bch_dev *ca,
521 struct dev_stripe_state *stripe)
523 u64 *v = stripe->next_alloc + ca->dev_idx;
524 u64 free_space = dev_buckets_available(ca, RESERVE_NONE);
525 u64 free_space_inv = free_space
526 ? div64_u64(1ULL << 48, free_space)
530 if (*v + free_space_inv >= *v)
531 *v += free_space_inv;
535 for (v = stripe->next_alloc;
536 v < stripe->next_alloc + ARRAY_SIZE(stripe->next_alloc); v++)
537 *v = *v < scale ? 0 : *v - scale;
540 #define BUCKET_MAY_ALLOC_PARTIAL (1 << 0)
541 #define BUCKET_ALLOC_USE_DURABILITY (1 << 1)
543 static void add_new_bucket(struct bch_fs *c,
544 struct open_buckets *ptrs,
545 struct bch_devs_mask *devs_may_alloc,
546 unsigned *nr_effective,
549 struct open_bucket *ob)
551 unsigned durability =
552 bch_dev_bkey_exists(c, ob->dev)->mi.durability;
554 __clear_bit(ob->dev, devs_may_alloc->d);
555 *nr_effective += (flags & BUCKET_ALLOC_USE_DURABILITY)
557 *have_cache |= !durability;
559 ob_push(c, ptrs, ob);
562 int bch2_bucket_alloc_set(struct bch_fs *c,
563 struct open_buckets *ptrs,
564 struct dev_stripe_state *stripe,
565 struct bch_devs_mask *devs_may_alloc,
566 unsigned nr_replicas,
567 unsigned *nr_effective,
569 enum alloc_reserve reserve,
573 struct dev_alloc_list devs_sorted =
574 bch2_dev_alloc_list(c, stripe, devs_may_alloc);
577 int ret = -INSUFFICIENT_DEVICES;
580 BUG_ON(*nr_effective >= nr_replicas);
582 for (i = 0; i < devs_sorted.nr; i++) {
583 struct open_bucket *ob;
585 dev = devs_sorted.devs[i];
588 ca = rcu_dereference(c->devs[dev]);
590 percpu_ref_get(&ca->ref);
596 if (!ca->mi.durability && *have_cache) {
597 percpu_ref_put(&ca->ref);
601 ob = bch2_bucket_alloc(c, ca, reserve,
602 flags & BUCKET_MAY_ALLOC_PARTIAL, cl);
604 bch2_dev_stripe_increment(ca, stripe);
605 percpu_ref_put(&ca->ref);
615 add_new_bucket(c, ptrs, devs_may_alloc,
616 nr_effective, have_cache, flags, ob);
618 if (*nr_effective >= nr_replicas) {
627 /* Allocate from stripes: */
630 * if we can't allocate a new stripe because there are already too many
631 * partially filled stripes, force allocating from an existing stripe even when
632 * it's to a device we don't want:
635 static int bucket_alloc_from_stripe(struct bch_fs *c,
636 struct open_buckets *ptrs,
637 struct write_point *wp,
638 struct bch_devs_mask *devs_may_alloc,
640 unsigned erasure_code,
641 unsigned nr_replicas,
642 unsigned *nr_effective,
647 struct dev_alloc_list devs_sorted;
648 struct ec_stripe_head *h;
649 struct open_bucket *ob;
659 if (ec_open_bucket(c, ptrs))
662 h = bch2_ec_stripe_head_get(c, target, 0, nr_replicas - 1,
663 wp == &c->copygc_write_point,
670 devs_sorted = bch2_dev_alloc_list(c, &wp->stripe, devs_may_alloc);
672 for (i = 0; i < devs_sorted.nr; i++)
673 for (ec_idx = 0; ec_idx < h->s->nr_data; ec_idx++) {
674 if (!h->s->blocks[ec_idx])
677 ob = c->open_buckets + h->s->blocks[ec_idx];
678 if (ob->dev == devs_sorted.devs[i] &&
679 !test_and_set_bit(ec_idx, h->s->blocks_allocated))
684 ca = bch_dev_bkey_exists(c, ob->dev);
689 add_new_bucket(c, ptrs, devs_may_alloc,
690 nr_effective, have_cache, flags, ob);
691 atomic_inc(&h->s->pin);
693 bch2_ec_stripe_head_put(c, h);
697 /* Sector allocator */
699 static void get_buckets_from_writepoint(struct bch_fs *c,
700 struct open_buckets *ptrs,
701 struct write_point *wp,
702 struct bch_devs_mask *devs_may_alloc,
703 unsigned nr_replicas,
704 unsigned *nr_effective,
709 struct open_buckets ptrs_skip = { .nr = 0 };
710 struct open_bucket *ob;
713 open_bucket_for_each(c, &wp->ptrs, ob, i) {
714 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
716 if (*nr_effective < nr_replicas &&
717 test_bit(ob->dev, devs_may_alloc->d) &&
718 (ca->mi.durability ||
719 (wp->data_type == BCH_DATA_user && !*have_cache)) &&
720 (ob->ec || !need_ec)) {
721 add_new_bucket(c, ptrs, devs_may_alloc,
722 nr_effective, have_cache,
725 ob_push(c, &ptrs_skip, ob);
728 wp->ptrs = ptrs_skip;
731 static int open_bucket_add_buckets(struct bch_fs *c,
732 struct open_buckets *ptrs,
733 struct write_point *wp,
734 struct bch_devs_list *devs_have,
736 unsigned erasure_code,
737 unsigned nr_replicas,
738 unsigned *nr_effective,
740 enum alloc_reserve reserve,
744 struct bch_devs_mask devs;
745 struct open_bucket *ob;
746 struct closure *cl = NULL;
751 devs = target_rw_devs(c, wp->data_type, target);
754 /* Don't allocate from devices we already have pointers to: */
755 for (i = 0; i < devs_have->nr; i++)
756 __clear_bit(devs_have->devs[i], devs.d);
758 open_bucket_for_each(c, ptrs, ob, i)
759 __clear_bit(ob->dev, devs.d);
762 if (!ec_open_bucket(c, ptrs)) {
763 get_buckets_from_writepoint(c, ptrs, wp, &devs,
764 nr_replicas, nr_effective,
765 have_cache, flags, true);
766 if (*nr_effective >= nr_replicas)
770 if (!ec_open_bucket(c, ptrs)) {
771 ret = bucket_alloc_from_stripe(c, ptrs, wp, &devs,
772 target, erasure_code,
773 nr_replicas, nr_effective,
774 have_cache, flags, _cl);
775 if (ret == -FREELIST_EMPTY ||
776 ret == -OPEN_BUCKETS_EMPTY)
778 if (*nr_effective >= nr_replicas)
783 get_buckets_from_writepoint(c, ptrs, wp, &devs,
784 nr_replicas, nr_effective,
785 have_cache, flags, false);
786 if (*nr_effective >= nr_replicas)
791 * Try nonblocking first, so that if one device is full we'll try from
794 ret = bch2_bucket_alloc_set(c, ptrs, &wp->stripe, &devs,
795 nr_replicas, nr_effective, have_cache,
797 if (ret && ret != -INSUFFICIENT_DEVICES && !cl && _cl) {
805 void bch2_open_buckets_stop_dev(struct bch_fs *c, struct bch_dev *ca,
806 struct open_buckets *obs)
808 struct open_buckets ptrs = { .nr = 0 };
809 struct open_bucket *ob, *ob2;
812 open_bucket_for_each(c, obs, ob, i) {
813 bool drop = !ca || ob->dev == ca->dev_idx;
815 if (!drop && ob->ec) {
816 mutex_lock(&ob->ec->lock);
817 for (j = 0; j < ob->ec->new_stripe.key.v.nr_blocks; j++) {
818 if (!ob->ec->blocks[j])
821 ob2 = c->open_buckets + ob->ec->blocks[j];
822 drop |= ob2->dev == ca->dev_idx;
824 mutex_unlock(&ob->ec->lock);
828 bch2_open_bucket_put(c, ob);
830 ob_push(c, &ptrs, ob);
836 void bch2_writepoint_stop(struct bch_fs *c, struct bch_dev *ca,
837 struct write_point *wp)
839 mutex_lock(&wp->lock);
840 bch2_open_buckets_stop_dev(c, ca, &wp->ptrs);
841 mutex_unlock(&wp->lock);
844 static inline struct hlist_head *writepoint_hash(struct bch_fs *c,
845 unsigned long write_point)
848 hash_long(write_point, ilog2(ARRAY_SIZE(c->write_points_hash)));
850 return &c->write_points_hash[hash];
853 static struct write_point *__writepoint_find(struct hlist_head *head,
854 unsigned long write_point)
856 struct write_point *wp;
859 hlist_for_each_entry_rcu(wp, head, node)
860 if (wp->write_point == write_point)
868 static inline bool too_many_writepoints(struct bch_fs *c, unsigned factor)
870 u64 stranded = c->write_points_nr * c->bucket_size_max;
871 u64 free = bch2_fs_usage_read_short(c).free;
873 return stranded * factor > free;
876 static bool try_increase_writepoints(struct bch_fs *c)
878 struct write_point *wp;
880 if (c->write_points_nr == ARRAY_SIZE(c->write_points) ||
881 too_many_writepoints(c, 32))
884 wp = c->write_points + c->write_points_nr++;
885 hlist_add_head_rcu(&wp->node, writepoint_hash(c, wp->write_point));
889 static bool try_decrease_writepoints(struct bch_fs *c,
892 struct write_point *wp;
894 mutex_lock(&c->write_points_hash_lock);
895 if (c->write_points_nr < old_nr) {
896 mutex_unlock(&c->write_points_hash_lock);
900 if (c->write_points_nr == 1 ||
901 !too_many_writepoints(c, 8)) {
902 mutex_unlock(&c->write_points_hash_lock);
906 wp = c->write_points + --c->write_points_nr;
908 hlist_del_rcu(&wp->node);
909 mutex_unlock(&c->write_points_hash_lock);
911 bch2_writepoint_stop(c, NULL, wp);
915 static struct write_point *writepoint_find(struct bch_fs *c,
916 unsigned long write_point)
918 struct write_point *wp, *oldest;
919 struct hlist_head *head;
921 if (!(write_point & 1UL)) {
922 wp = (struct write_point *) write_point;
923 mutex_lock(&wp->lock);
927 head = writepoint_hash(c, write_point);
929 wp = __writepoint_find(head, write_point);
932 mutex_lock(&wp->lock);
933 if (wp->write_point == write_point)
935 mutex_unlock(&wp->lock);
940 for (wp = c->write_points;
941 wp < c->write_points + c->write_points_nr; wp++)
942 if (!oldest || time_before64(wp->last_used, oldest->last_used))
945 mutex_lock(&oldest->lock);
946 mutex_lock(&c->write_points_hash_lock);
947 if (oldest >= c->write_points + c->write_points_nr ||
948 try_increase_writepoints(c)) {
949 mutex_unlock(&c->write_points_hash_lock);
950 mutex_unlock(&oldest->lock);
951 goto restart_find_oldest;
954 wp = __writepoint_find(head, write_point);
955 if (wp && wp != oldest) {
956 mutex_unlock(&c->write_points_hash_lock);
957 mutex_unlock(&oldest->lock);
962 hlist_del_rcu(&wp->node);
963 wp->write_point = write_point;
964 hlist_add_head_rcu(&wp->node, head);
965 mutex_unlock(&c->write_points_hash_lock);
967 wp->last_used = sched_clock();
972 * Get us an open_bucket we can allocate from, return with it locked:
974 struct write_point *bch2_alloc_sectors_start(struct bch_fs *c,
976 unsigned erasure_code,
977 struct write_point_specifier write_point,
978 struct bch_devs_list *devs_have,
979 unsigned nr_replicas,
980 unsigned nr_replicas_required,
981 enum alloc_reserve reserve,
985 struct write_point *wp;
986 struct open_bucket *ob;
987 struct open_buckets ptrs;
988 unsigned nr_effective, write_points_nr;
989 unsigned ob_flags = 0;
994 if (!(flags & BCH_WRITE_ONLY_SPECIFIED_DEVS))
995 ob_flags |= BUCKET_ALLOC_USE_DURABILITY;
997 BUG_ON(!nr_replicas || !nr_replicas_required);
1001 write_points_nr = c->write_points_nr;
1004 wp = writepoint_find(c, write_point.v);
1006 if (wp->data_type == BCH_DATA_user)
1007 ob_flags |= BUCKET_MAY_ALLOC_PARTIAL;
1009 /* metadata may not allocate on cache devices: */
1010 if (wp->data_type != BCH_DATA_user)
1013 if (!target || (flags & BCH_WRITE_ONLY_SPECIFIED_DEVS)) {
1014 ret = open_bucket_add_buckets(c, &ptrs, wp, devs_have,
1015 target, erasure_code,
1016 nr_replicas, &nr_effective,
1017 &have_cache, reserve,
1020 ret = open_bucket_add_buckets(c, &ptrs, wp, devs_have,
1021 target, erasure_code,
1022 nr_replicas, &nr_effective,
1023 &have_cache, reserve,
1028 ret = open_bucket_add_buckets(c, &ptrs, wp, devs_have,
1030 nr_replicas, &nr_effective,
1031 &have_cache, reserve,
1035 BUG_ON(!ret && nr_effective < nr_replicas);
1037 if (erasure_code && !ec_open_bucket(c, &ptrs))
1038 pr_debug("failed to get ec bucket: ret %u", ret);
1040 if (ret == -INSUFFICIENT_DEVICES &&
1041 nr_effective >= nr_replicas_required)
1047 /* Free buckets we didn't use: */
1048 open_bucket_for_each(c, &wp->ptrs, ob, i)
1049 open_bucket_free_unused(c, wp, ob);
1053 wp->sectors_free = UINT_MAX;
1055 open_bucket_for_each(c, &wp->ptrs, ob, i)
1056 wp->sectors_free = min(wp->sectors_free, ob->sectors_free);
1058 BUG_ON(!wp->sectors_free || wp->sectors_free == UINT_MAX);
1062 open_bucket_for_each(c, &wp->ptrs, ob, i)
1063 if (ptrs.nr < ARRAY_SIZE(ptrs.v))
1064 ob_push(c, &ptrs, ob);
1066 open_bucket_free_unused(c, wp, ob);
1069 mutex_unlock(&wp->lock);
1071 if (ret == -FREELIST_EMPTY &&
1072 try_decrease_writepoints(c, write_points_nr))
1076 case -OPEN_BUCKETS_EMPTY:
1077 case -FREELIST_EMPTY:
1078 return cl ? ERR_PTR(-EAGAIN) : ERR_PTR(-ENOSPC);
1079 case -INSUFFICIENT_DEVICES:
1080 return ERR_PTR(-EROFS);
1082 return ERR_PTR(ret);
1086 struct bch_extent_ptr bch2_ob_ptr(struct bch_fs *c, struct open_bucket *ob)
1088 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
1090 return (struct bch_extent_ptr) {
1091 .type = 1 << BCH_EXTENT_ENTRY_ptr,
1094 .offset = bucket_to_sector(ca, ob->bucket) +
1095 ca->mi.bucket_size -
1101 * Append pointers to the space we just allocated to @k, and mark @sectors space
1102 * as allocated out of @ob
1104 void bch2_alloc_sectors_append_ptrs(struct bch_fs *c, struct write_point *wp,
1105 struct bkey_i *k, unsigned sectors,
1109 struct open_bucket *ob;
1112 BUG_ON(sectors > wp->sectors_free);
1113 wp->sectors_free -= sectors;
1115 open_bucket_for_each(c, &wp->ptrs, ob, i) {
1116 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
1117 struct bch_extent_ptr ptr = bch2_ob_ptr(c, ob);
1119 ptr.cached = cached ||
1120 (!ca->mi.durability &&
1121 wp->data_type == BCH_DATA_user);
1123 bch2_bkey_append_ptr(k, ptr);
1125 BUG_ON(sectors > ob->sectors_free);
1126 ob->sectors_free -= sectors;
1131 * Append pointers to the space we just allocated to @k, and mark @sectors space
1132 * as allocated out of @ob
1134 void bch2_alloc_sectors_done(struct bch_fs *c, struct write_point *wp)
1136 struct open_buckets ptrs = { .nr = 0 }, keep = { .nr = 0 };
1137 struct open_bucket *ob;
1140 open_bucket_for_each(c, &wp->ptrs, ob, i)
1141 ob_push(c, !ob->sectors_free ? &ptrs : &keep, ob);
1144 mutex_unlock(&wp->lock);
1146 bch2_open_buckets_put(c, &ptrs);
1149 static inline void writepoint_init(struct write_point *wp,
1150 enum bch_data_type type)
1152 mutex_init(&wp->lock);
1153 wp->data_type = type;
1156 void bch2_fs_allocator_foreground_init(struct bch_fs *c)
1158 struct open_bucket *ob;
1159 struct write_point *wp;
1161 mutex_init(&c->write_points_hash_lock);
1162 c->write_points_nr = ARRAY_SIZE(c->write_points);
1164 /* open bucket 0 is a sentinal NULL: */
1165 spin_lock_init(&c->open_buckets[0].lock);
1167 for (ob = c->open_buckets + 1;
1168 ob < c->open_buckets + ARRAY_SIZE(c->open_buckets); ob++) {
1169 spin_lock_init(&ob->lock);
1170 c->open_buckets_nr_free++;
1172 ob->freelist = c->open_buckets_freelist;
1173 c->open_buckets_freelist = ob - c->open_buckets;
1176 writepoint_init(&c->btree_write_point, BCH_DATA_btree);
1177 writepoint_init(&c->rebalance_write_point, BCH_DATA_user);
1178 writepoint_init(&c->copygc_write_point, BCH_DATA_user);
1180 for (wp = c->write_points;
1181 wp < c->write_points + c->write_points_nr; wp++) {
1182 writepoint_init(wp, BCH_DATA_user);
1184 wp->last_used = sched_clock();
1185 wp->write_point = (unsigned long) wp;
1186 hlist_add_head_rcu(&wp->node,
1187 writepoint_hash(c, wp->write_point));
1191 void bch2_open_buckets_to_text(struct printbuf *out, struct bch_fs *c)
1193 struct open_bucket *ob;
1195 for (ob = c->open_buckets;
1196 ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
1198 spin_lock(&ob->lock);
1199 if (ob->valid && !ob->on_partial_list) {
1200 pr_buf(out, "%zu ref %u type %s\n",
1201 ob - c->open_buckets,
1202 atomic_read(&ob->pin),
1203 bch2_data_types[ob->data_type]);
1205 spin_unlock(&ob->lock);