1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright 2012 Google, Inc.
5 * Foreground allocator code: allocate buckets from freelist, and allocate in
6 * sector granularity from writepoints.
8 * bch2_bucket_alloc() allocates a single bucket from a specific device.
10 * bch2_bucket_alloc_set() allocates one or more buckets from different devices
11 * in a given filesystem.
15 #include "alloc_background.h"
16 #include "alloc_foreground.h"
17 #include "backpointers.h"
18 #include "btree_iter.h"
19 #include "btree_update.h"
22 #include "buckets_waiting_for_journal.h"
25 #include "disk_groups.h"
31 #include "nocow_locking.h"
33 #include <linux/math64.h>
34 #include <linux/rculist.h>
35 #include <linux/rcupdate.h>
36 #include <trace/events/bcachefs.h>
38 const char * const bch2_alloc_reserves[] = {
46 * Open buckets represent a bucket that's currently being allocated from. They
49 * - They track buckets that have been partially allocated, allowing for
50 * sub-bucket sized allocations - they're used by the sector allocator below
52 * - They provide a reference to the buckets they own that mark and sweep GC
53 * can find, until the new allocation has a pointer to it inserted into the
56 * When allocating some space with the sector allocator, the allocation comes
57 * with a reference to an open bucket - the caller is required to put that
58 * reference _after_ doing the index update that makes its allocation reachable.
61 void bch2_reset_alloc_cursors(struct bch_fs *c)
67 for_each_member_device_rcu(ca, c, i, NULL)
72 static void bch2_open_bucket_hash_add(struct bch_fs *c, struct open_bucket *ob)
74 open_bucket_idx_t idx = ob - c->open_buckets;
75 open_bucket_idx_t *slot = open_bucket_hashslot(c, ob->dev, ob->bucket);
81 static void bch2_open_bucket_hash_remove(struct bch_fs *c, struct open_bucket *ob)
83 open_bucket_idx_t idx = ob - c->open_buckets;
84 open_bucket_idx_t *slot = open_bucket_hashslot(c, ob->dev, ob->bucket);
86 while (*slot != idx) {
88 slot = &c->open_buckets[*slot].hash;
95 void __bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob)
97 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
100 bch2_ec_bucket_written(c, ob);
104 percpu_down_read(&c->mark_lock);
105 spin_lock(&ob->lock);
110 spin_unlock(&ob->lock);
111 percpu_up_read(&c->mark_lock);
113 spin_lock(&c->freelist_lock);
114 bch2_open_bucket_hash_remove(c, ob);
116 ob->freelist = c->open_buckets_freelist;
117 c->open_buckets_freelist = ob - c->open_buckets;
119 c->open_buckets_nr_free++;
120 ca->nr_open_buckets--;
121 spin_unlock(&c->freelist_lock);
123 closure_wake_up(&c->open_buckets_wait);
126 void bch2_open_bucket_write_error(struct bch_fs *c,
127 struct open_buckets *obs,
130 struct open_bucket *ob;
133 open_bucket_for_each(c, obs, ob, i)
134 if (ob->dev == dev && ob->ec)
135 bch2_ec_bucket_cancel(c, ob);
138 static struct open_bucket *bch2_open_bucket_alloc(struct bch_fs *c)
140 struct open_bucket *ob;
142 BUG_ON(!c->open_buckets_freelist || !c->open_buckets_nr_free);
144 ob = c->open_buckets + c->open_buckets_freelist;
145 c->open_buckets_freelist = ob->freelist;
146 atomic_set(&ob->pin, 1);
149 c->open_buckets_nr_free--;
153 static void open_bucket_free_unused(struct bch_fs *c,
154 struct write_point *wp,
155 struct open_bucket *ob)
157 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
158 bool may_realloc = wp->data_type == BCH_DATA_user;
160 BUG_ON(ca->open_buckets_partial_nr >
161 ARRAY_SIZE(ca->open_buckets_partial));
163 if (ca->open_buckets_partial_nr <
164 ARRAY_SIZE(ca->open_buckets_partial) &&
166 spin_lock(&c->freelist_lock);
167 ob->on_partial_list = true;
168 ca->open_buckets_partial[ca->open_buckets_partial_nr++] =
169 ob - c->open_buckets;
170 spin_unlock(&c->freelist_lock);
172 closure_wake_up(&c->open_buckets_wait);
173 closure_wake_up(&c->freelist_wait);
175 bch2_open_bucket_put(c, ob);
179 /* _only_ for allocating the journal on a new device: */
180 long bch2_bucket_alloc_new_fs(struct bch_dev *ca)
182 while (ca->new_fs_bucket_idx < ca->mi.nbuckets) {
183 u64 b = ca->new_fs_bucket_idx++;
185 if (!is_superblock_bucket(ca, b) &&
186 (!ca->buckets_nouse || !test_bit(b, ca->buckets_nouse)))
193 static inline unsigned open_buckets_reserved(enum alloc_reserve reserve)
197 case RESERVE_btree_movinggc:
199 case RESERVE_movinggc:
200 return OPEN_BUCKETS_COUNT / 4;
202 return OPEN_BUCKETS_COUNT / 2;
206 static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
208 enum alloc_reserve reserve,
209 const struct bch_alloc_v4 *a,
210 struct bucket_alloc_state *s,
213 struct open_bucket *ob;
215 if (unlikely(ca->buckets_nouse && test_bit(bucket, ca->buckets_nouse))) {
220 if (bch2_bucket_is_open(c, ca->dev_idx, bucket)) {
225 if (bch2_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
226 c->journal.flushed_seq_ondisk, ca->dev_idx, bucket)) {
227 s->skipped_need_journal_commit++;
231 if (bch2_bucket_nocow_is_locked(&c->nocow_locks, POS(ca->dev_idx, bucket))) {
236 spin_lock(&c->freelist_lock);
238 if (unlikely(c->open_buckets_nr_free <= open_buckets_reserved(reserve))) {
240 closure_wait(&c->open_buckets_wait, cl);
242 if (!c->blocked_allocate_open_bucket)
243 c->blocked_allocate_open_bucket = local_clock();
245 spin_unlock(&c->freelist_lock);
246 return ERR_PTR(-BCH_ERR_open_buckets_empty);
249 /* Recheck under lock: */
250 if (bch2_bucket_is_open(c, ca->dev_idx, bucket)) {
251 spin_unlock(&c->freelist_lock);
256 ob = bch2_open_bucket_alloc(c);
258 spin_lock(&ob->lock);
261 ob->sectors_free = ca->mi.bucket_size;
262 ob->alloc_reserve = reserve;
263 ob->dev = ca->dev_idx;
266 spin_unlock(&ob->lock);
268 ca->nr_open_buckets++;
269 bch2_open_bucket_hash_add(c, ob);
271 if (c->blocked_allocate_open_bucket) {
272 bch2_time_stats_update(
273 &c->times[BCH_TIME_blocked_allocate_open_bucket],
274 c->blocked_allocate_open_bucket);
275 c->blocked_allocate_open_bucket = 0;
278 if (c->blocked_allocate) {
279 bch2_time_stats_update(
280 &c->times[BCH_TIME_blocked_allocate],
281 c->blocked_allocate);
282 c->blocked_allocate = 0;
285 spin_unlock(&c->freelist_lock);
289 static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bch_dev *ca,
290 enum alloc_reserve reserve, u64 free_entry,
291 struct bucket_alloc_state *s,
292 struct bkey_s_c freespace_k,
295 struct bch_fs *c = trans->c;
296 struct btree_iter iter = { NULL };
298 struct open_bucket *ob;
299 struct bch_alloc_v4 a_convert;
300 const struct bch_alloc_v4 *a;
301 u64 b = free_entry & ~(~0ULL << 56);
302 unsigned genbits = free_entry >> 56;
303 struct printbuf buf = PRINTBUF;
306 if (b < ca->mi.first_bucket || b >= ca->mi.nbuckets) {
307 prt_printf(&buf, "freespace btree has bucket outside allowed range %u-%llu\n"
309 ca->mi.first_bucket, ca->mi.nbuckets);
310 bch2_bkey_val_to_text(&buf, c, freespace_k);
311 bch2_trans_inconsistent(trans, "%s", buf.buf);
316 bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, POS(ca->dev_idx, b), BTREE_ITER_CACHED);
317 k = bch2_btree_iter_peek_slot(&iter);
324 a = bch2_alloc_to_v4(k, &a_convert);
326 if (a->data_type != BCH_DATA_free) {
327 if (!test_bit(BCH_FS_CHECK_ALLOC_DONE, &c->flags)) {
332 prt_printf(&buf, "non free bucket in freespace btree\n"
334 bch2_bkey_val_to_text(&buf, c, freespace_k);
335 prt_printf(&buf, "\n ");
336 bch2_bkey_val_to_text(&buf, c, k);
337 bch2_trans_inconsistent(trans, "%s", buf.buf);
342 if (genbits != (alloc_freespace_genbits(*a) >> 56) &&
343 test_bit(BCH_FS_CHECK_ALLOC_DONE, &c->flags)) {
344 prt_printf(&buf, "bucket in freespace btree with wrong genbits (got %u should be %llu)\n"
346 genbits, alloc_freespace_genbits(*a) >> 56);
347 bch2_bkey_val_to_text(&buf, c, freespace_k);
348 prt_printf(&buf, "\n ");
349 bch2_bkey_val_to_text(&buf, c, k);
350 bch2_trans_inconsistent(trans, "%s", buf.buf);
356 if (!test_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags)) {
357 struct bch_backpointer bp;
360 ret = bch2_get_next_backpointer(trans, POS(ca->dev_idx, b), -1,
362 BTREE_ITER_NOPRESERVE);
368 if (bp_offset != U64_MAX) {
370 * Bucket may have data in it - we don't call
371 * bc2h_trans_inconnsistent() because fsck hasn't
379 ob = __try_alloc_bucket(c, ca, b, reserve, a, s, cl);
381 iter.path->preserve = false;
383 set_btree_iter_dontneed(&iter);
384 bch2_trans_iter_exit(trans, &iter);
389 static struct open_bucket *try_alloc_partial_bucket(struct bch_fs *c, struct bch_dev *ca,
390 enum alloc_reserve reserve)
392 struct open_bucket *ob;
395 spin_lock(&c->freelist_lock);
397 for (i = ca->open_buckets_partial_nr - 1; i >= 0; --i) {
398 ob = c->open_buckets + ca->open_buckets_partial[i];
400 if (reserve <= ob->alloc_reserve) {
401 array_remove_item(ca->open_buckets_partial,
402 ca->open_buckets_partial_nr,
404 ob->on_partial_list = false;
405 ob->alloc_reserve = reserve;
406 spin_unlock(&c->freelist_lock);
411 spin_unlock(&c->freelist_lock);
416 * This path is for before the freespace btree is initialized:
418 * If ca->new_fs_bucket_idx is nonzero, we haven't yet marked superblock &
419 * journal buckets - journal buckets will be < ca->new_fs_bucket_idx
421 static noinline struct open_bucket *
422 bch2_bucket_alloc_early(struct btree_trans *trans,
424 enum alloc_reserve reserve,
425 struct bucket_alloc_state *s,
428 struct btree_iter iter;
430 struct open_bucket *ob = NULL;
431 u64 alloc_start = max_t(u64, ca->mi.first_bucket, ca->new_fs_bucket_idx);
432 u64 alloc_cursor = max(alloc_start, READ_ONCE(ca->alloc_cursor));
435 for_each_btree_key_norestart(trans, iter, BTREE_ID_alloc, POS(ca->dev_idx, alloc_cursor),
436 BTREE_ITER_SLOTS, k, ret) {
437 struct bch_alloc_v4 a_convert;
438 const struct bch_alloc_v4 *a;
440 if (bkey_ge(k.k->p, POS(ca->dev_idx, ca->mi.nbuckets)))
443 if (ca->new_fs_bucket_idx &&
444 is_superblock_bucket(ca, k.k->p.offset))
447 a = bch2_alloc_to_v4(k, &a_convert);
449 if (a->data_type != BCH_DATA_free)
454 ob = __try_alloc_bucket(trans->c, ca, k.k->p.offset, reserve, a, s, cl);
458 bch2_trans_iter_exit(trans, &iter);
460 ca->alloc_cursor = alloc_cursor;
465 if (!ob && alloc_cursor > alloc_start) {
466 alloc_cursor = alloc_start;
473 static struct open_bucket *bch2_bucket_alloc_freelist(struct btree_trans *trans,
475 enum alloc_reserve reserve,
476 struct bucket_alloc_state *s,
479 struct btree_iter iter;
481 struct open_bucket *ob = NULL;
482 u64 alloc_start = max_t(u64, ca->mi.first_bucket, READ_ONCE(ca->alloc_cursor));
483 u64 alloc_cursor = alloc_start;
486 BUG_ON(ca->new_fs_bucket_idx);
488 for_each_btree_key_norestart(trans, iter, BTREE_ID_freespace,
489 POS(ca->dev_idx, alloc_cursor), 0, k, ret) {
490 if (k.k->p.inode != ca->dev_idx)
493 for (alloc_cursor = max(alloc_cursor, bkey_start_offset(k.k));
494 alloc_cursor < k.k->p.offset;
496 ret = btree_trans_too_many_iters(trans);
504 ob = try_alloc_bucket(trans, ca, reserve,
505 alloc_cursor, s, k, cl);
507 iter.path->preserve = false;
515 bch2_trans_iter_exit(trans, &iter);
517 ca->alloc_cursor = alloc_cursor;
522 if (!ob && alloc_start > ca->mi.first_bucket) {
523 alloc_cursor = alloc_start = ca->mi.first_bucket;
531 * bch_bucket_alloc - allocate a single bucket from a specific device
533 * Returns index of bucket on success, 0 on failure
535 static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans,
537 enum alloc_reserve reserve,
538 bool may_alloc_partial,
540 struct bch_dev_usage *usage)
542 struct bch_fs *c = trans->c;
543 struct open_bucket *ob = NULL;
544 bool freespace = READ_ONCE(ca->mi.freespace_initialized);
546 struct bucket_alloc_state s = { 0 };
547 bool waiting = false;
549 bch2_dev_usage_read_fast(ca, usage);
550 avail = dev_buckets_free(ca, *usage, reserve);
552 if (usage->d[BCH_DATA_need_discard].buckets > avail)
555 if (usage->d[BCH_DATA_need_gc_gens].buckets > avail)
558 if (should_invalidate_buckets(ca, *usage))
559 bch2_do_invalidates(c);
562 if (cl && !waiting) {
563 closure_wait(&c->freelist_wait, cl);
568 if (!c->blocked_allocate)
569 c->blocked_allocate = local_clock();
571 ob = ERR_PTR(-BCH_ERR_freelist_empty);
576 closure_wake_up(&c->freelist_wait);
578 if (may_alloc_partial) {
579 ob = try_alloc_partial_bucket(c, ca, reserve);
584 ob = likely(freespace)
585 ? bch2_bucket_alloc_freelist(trans, ca, reserve, &s, cl)
586 : bch2_bucket_alloc_early(trans, ca, reserve, &s, cl);
588 if (s.skipped_need_journal_commit * 2 > avail)
589 bch2_journal_flush_async(&c->journal, NULL);
591 if (!ob && freespace && !test_bit(BCH_FS_CHECK_ALLOC_DONE, &c->flags)) {
597 ob = ERR_PTR(-BCH_ERR_no_buckets_found);
600 trace_and_count(c, bucket_alloc, ca,
601 bch2_alloc_reserves[reserve],
604 usage->d[BCH_DATA_free].buckets,
606 bch2_copygc_wait_amount(c),
607 c->copygc_wait - atomic64_read(&c->io_clock[WRITE].now),
611 else if (!bch2_err_matches(PTR_ERR(ob), BCH_ERR_transaction_restart))
612 trace_and_count(c, bucket_alloc_fail, ca,
613 bch2_alloc_reserves[reserve],
616 usage->d[BCH_DATA_free].buckets,
618 bch2_copygc_wait_amount(c),
619 c->copygc_wait - atomic64_read(&c->io_clock[WRITE].now),
622 bch2_err_str(PTR_ERR(ob)));
627 struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca,
628 enum alloc_reserve reserve,
629 bool may_alloc_partial,
632 struct bch_dev_usage usage;
633 struct open_bucket *ob;
635 bch2_trans_do(c, NULL, NULL, 0,
636 PTR_ERR_OR_ZERO(ob = bch2_bucket_alloc_trans(&trans, ca, reserve,
637 may_alloc_partial, cl, &usage)));
641 static int __dev_stripe_cmp(struct dev_stripe_state *stripe,
642 unsigned l, unsigned r)
644 return ((stripe->next_alloc[l] > stripe->next_alloc[r]) -
645 (stripe->next_alloc[l] < stripe->next_alloc[r]));
648 #define dev_stripe_cmp(l, r) __dev_stripe_cmp(stripe, l, r)
650 struct dev_alloc_list bch2_dev_alloc_list(struct bch_fs *c,
651 struct dev_stripe_state *stripe,
652 struct bch_devs_mask *devs)
654 struct dev_alloc_list ret = { .nr = 0 };
657 for_each_set_bit(i, devs->d, BCH_SB_MEMBERS_MAX)
658 ret.devs[ret.nr++] = i;
660 bubble_sort(ret.devs, ret.nr, dev_stripe_cmp);
664 static inline void bch2_dev_stripe_increment_inlined(struct bch_dev *ca,
665 struct dev_stripe_state *stripe,
666 struct bch_dev_usage *usage)
668 u64 *v = stripe->next_alloc + ca->dev_idx;
669 u64 free_space = dev_buckets_available(ca, RESERVE_none);
670 u64 free_space_inv = free_space
671 ? div64_u64(1ULL << 48, free_space)
675 if (*v + free_space_inv >= *v)
676 *v += free_space_inv;
680 for (v = stripe->next_alloc;
681 v < stripe->next_alloc + ARRAY_SIZE(stripe->next_alloc); v++)
682 *v = *v < scale ? 0 : *v - scale;
685 void bch2_dev_stripe_increment(struct bch_dev *ca,
686 struct dev_stripe_state *stripe)
688 struct bch_dev_usage usage;
690 bch2_dev_usage_read_fast(ca, &usage);
691 bch2_dev_stripe_increment_inlined(ca, stripe, &usage);
694 #define BUCKET_MAY_ALLOC_PARTIAL (1 << 0)
695 #define BUCKET_ALLOC_USE_DURABILITY (1 << 1)
697 static void add_new_bucket(struct bch_fs *c,
698 struct open_buckets *ptrs,
699 struct bch_devs_mask *devs_may_alloc,
700 unsigned *nr_effective,
703 struct open_bucket *ob)
705 unsigned durability =
706 bch_dev_bkey_exists(c, ob->dev)->mi.durability;
708 __clear_bit(ob->dev, devs_may_alloc->d);
709 *nr_effective += (flags & BUCKET_ALLOC_USE_DURABILITY)
711 *have_cache |= !durability;
713 ob_push(c, ptrs, ob);
716 static int bch2_bucket_alloc_set_trans(struct btree_trans *trans,
717 struct open_buckets *ptrs,
718 struct dev_stripe_state *stripe,
719 struct bch_devs_mask *devs_may_alloc,
720 unsigned nr_replicas,
721 unsigned *nr_effective,
723 enum alloc_reserve reserve,
727 struct bch_fs *c = trans->c;
728 struct dev_alloc_list devs_sorted =
729 bch2_dev_alloc_list(c, stripe, devs_may_alloc);
732 int ret = -BCH_ERR_insufficient_devices;
735 BUG_ON(*nr_effective >= nr_replicas);
737 for (i = 0; i < devs_sorted.nr; i++) {
738 struct bch_dev_usage usage;
739 struct open_bucket *ob;
741 dev = devs_sorted.devs[i];
744 ca = rcu_dereference(c->devs[dev]);
746 percpu_ref_get(&ca->ref);
752 if (!ca->mi.durability && *have_cache) {
753 percpu_ref_put(&ca->ref);
757 ob = bch2_bucket_alloc_trans(trans, ca, reserve,
758 flags & BUCKET_MAY_ALLOC_PARTIAL, cl, &usage);
760 bch2_dev_stripe_increment_inlined(ca, stripe, &usage);
761 percpu_ref_put(&ca->ref);
765 if (bch2_err_matches(ret, BCH_ERR_transaction_restart) || cl)
770 add_new_bucket(c, ptrs, devs_may_alloc,
771 nr_effective, have_cache, flags, ob);
773 if (*nr_effective >= nr_replicas) {
782 int bch2_bucket_alloc_set(struct bch_fs *c,
783 struct open_buckets *ptrs,
784 struct dev_stripe_state *stripe,
785 struct bch_devs_mask *devs_may_alloc,
786 unsigned nr_replicas,
787 unsigned *nr_effective,
789 enum alloc_reserve reserve,
793 return bch2_trans_do(c, NULL, NULL, 0,
794 bch2_bucket_alloc_set_trans(&trans, ptrs, stripe,
795 devs_may_alloc, nr_replicas,
796 nr_effective, have_cache, reserve,
800 /* Allocate from stripes: */
803 * if we can't allocate a new stripe because there are already too many
804 * partially filled stripes, force allocating from an existing stripe even when
805 * it's to a device we don't want:
808 static int bucket_alloc_from_stripe(struct bch_fs *c,
809 struct open_buckets *ptrs,
810 struct write_point *wp,
811 struct bch_devs_mask *devs_may_alloc,
813 unsigned erasure_code,
814 unsigned nr_replicas,
815 unsigned *nr_effective,
820 struct dev_alloc_list devs_sorted;
821 struct ec_stripe_head *h;
822 struct open_bucket *ob;
832 if (ec_open_bucket(c, ptrs))
835 h = bch2_ec_stripe_head_get(c, target, 0, nr_replicas - 1,
836 wp == &c->copygc_write_point,
843 devs_sorted = bch2_dev_alloc_list(c, &wp->stripe, devs_may_alloc);
845 for (i = 0; i < devs_sorted.nr; i++)
846 for (ec_idx = 0; ec_idx < h->s->nr_data; ec_idx++) {
847 if (!h->s->blocks[ec_idx])
850 ob = c->open_buckets + h->s->blocks[ec_idx];
851 if (ob->dev == devs_sorted.devs[i] &&
852 !test_and_set_bit(ec_idx, h->s->blocks_allocated))
857 ca = bch_dev_bkey_exists(c, ob->dev);
862 add_new_bucket(c, ptrs, devs_may_alloc,
863 nr_effective, have_cache, flags, ob);
864 atomic_inc(&h->s->pin);
866 bch2_ec_stripe_head_put(c, h);
870 /* Sector allocator */
872 static void get_buckets_from_writepoint(struct bch_fs *c,
873 struct open_buckets *ptrs,
874 struct write_point *wp,
875 struct bch_devs_mask *devs_may_alloc,
876 unsigned nr_replicas,
877 unsigned *nr_effective,
882 struct open_buckets ptrs_skip = { .nr = 0 };
883 struct open_bucket *ob;
886 open_bucket_for_each(c, &wp->ptrs, ob, i) {
887 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
889 if (*nr_effective < nr_replicas &&
890 test_bit(ob->dev, devs_may_alloc->d) &&
891 (ca->mi.durability ||
892 (wp->data_type == BCH_DATA_user && !*have_cache)) &&
893 (ob->ec || !need_ec)) {
894 add_new_bucket(c, ptrs, devs_may_alloc,
895 nr_effective, have_cache,
898 ob_push(c, &ptrs_skip, ob);
901 wp->ptrs = ptrs_skip;
904 static int open_bucket_add_buckets(struct btree_trans *trans,
905 struct open_buckets *ptrs,
906 struct write_point *wp,
907 struct bch_devs_list *devs_have,
909 unsigned erasure_code,
910 unsigned nr_replicas,
911 unsigned *nr_effective,
913 enum alloc_reserve reserve,
917 struct bch_fs *c = trans->c;
918 struct bch_devs_mask devs;
919 struct open_bucket *ob;
920 struct closure *cl = NULL;
925 devs = target_rw_devs(c, wp->data_type, target);
928 /* Don't allocate from devices we already have pointers to: */
929 for (i = 0; i < devs_have->nr; i++)
930 __clear_bit(devs_have->devs[i], devs.d);
932 open_bucket_for_each(c, ptrs, ob, i)
933 __clear_bit(ob->dev, devs.d);
936 if (!ec_open_bucket(c, ptrs)) {
937 get_buckets_from_writepoint(c, ptrs, wp, &devs,
938 nr_replicas, nr_effective,
939 have_cache, flags, true);
940 if (*nr_effective >= nr_replicas)
944 if (!ec_open_bucket(c, ptrs)) {
945 ret = bucket_alloc_from_stripe(c, ptrs, wp, &devs,
946 target, erasure_code,
947 nr_replicas, nr_effective,
948 have_cache, flags, _cl);
949 if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
950 bch2_err_matches(ret, BCH_ERR_freelist_empty) ||
951 bch2_err_matches(ret, BCH_ERR_open_buckets_empty))
953 if (*nr_effective >= nr_replicas)
958 get_buckets_from_writepoint(c, ptrs, wp, &devs,
959 nr_replicas, nr_effective,
960 have_cache, flags, false);
961 if (*nr_effective >= nr_replicas)
966 * Try nonblocking first, so that if one device is full we'll try from
969 ret = bch2_bucket_alloc_set_trans(trans, ptrs, &wp->stripe, &devs,
970 nr_replicas, nr_effective, have_cache,
973 !bch2_err_matches(ret, BCH_ERR_transaction_restart) &&
974 !bch2_err_matches(ret, BCH_ERR_insufficient_devices) &&
983 void bch2_open_buckets_stop_dev(struct bch_fs *c, struct bch_dev *ca,
984 struct open_buckets *obs)
986 struct open_buckets ptrs = { .nr = 0 };
987 struct open_bucket *ob, *ob2;
990 open_bucket_for_each(c, obs, ob, i) {
991 bool drop = !ca || ob->dev == ca->dev_idx;
993 if (!drop && ob->ec) {
994 mutex_lock(&ob->ec->lock);
995 for (j = 0; j < ob->ec->new_stripe.key.v.nr_blocks; j++) {
996 if (!ob->ec->blocks[j])
999 ob2 = c->open_buckets + ob->ec->blocks[j];
1000 drop |= ob2->dev == ca->dev_idx;
1002 mutex_unlock(&ob->ec->lock);
1006 bch2_open_bucket_put(c, ob);
1008 ob_push(c, &ptrs, ob);
1014 void bch2_writepoint_stop(struct bch_fs *c, struct bch_dev *ca,
1015 struct write_point *wp)
1017 mutex_lock(&wp->lock);
1018 bch2_open_buckets_stop_dev(c, ca, &wp->ptrs);
1019 mutex_unlock(&wp->lock);
1022 static inline struct hlist_head *writepoint_hash(struct bch_fs *c,
1023 unsigned long write_point)
1026 hash_long(write_point, ilog2(ARRAY_SIZE(c->write_points_hash)));
1028 return &c->write_points_hash[hash];
1031 static struct write_point *__writepoint_find(struct hlist_head *head,
1032 unsigned long write_point)
1034 struct write_point *wp;
1037 hlist_for_each_entry_rcu(wp, head, node)
1038 if (wp->write_point == write_point)
1046 static inline bool too_many_writepoints(struct bch_fs *c, unsigned factor)
1048 u64 stranded = c->write_points_nr * c->bucket_size_max;
1049 u64 free = bch2_fs_usage_read_short(c).free;
1051 return stranded * factor > free;
1054 static bool try_increase_writepoints(struct bch_fs *c)
1056 struct write_point *wp;
1058 if (c->write_points_nr == ARRAY_SIZE(c->write_points) ||
1059 too_many_writepoints(c, 32))
1062 wp = c->write_points + c->write_points_nr++;
1063 hlist_add_head_rcu(&wp->node, writepoint_hash(c, wp->write_point));
1067 static bool try_decrease_writepoints(struct bch_fs *c,
1070 struct write_point *wp;
1072 mutex_lock(&c->write_points_hash_lock);
1073 if (c->write_points_nr < old_nr) {
1074 mutex_unlock(&c->write_points_hash_lock);
1078 if (c->write_points_nr == 1 ||
1079 !too_many_writepoints(c, 8)) {
1080 mutex_unlock(&c->write_points_hash_lock);
1084 wp = c->write_points + --c->write_points_nr;
1086 hlist_del_rcu(&wp->node);
1087 mutex_unlock(&c->write_points_hash_lock);
1089 bch2_writepoint_stop(c, NULL, wp);
1093 static void bch2_trans_mutex_lock(struct btree_trans *trans,
1096 if (!mutex_trylock(lock)) {
1097 bch2_trans_unlock(trans);
1102 static struct write_point *writepoint_find(struct btree_trans *trans,
1103 unsigned long write_point)
1105 struct bch_fs *c = trans->c;
1106 struct write_point *wp, *oldest;
1107 struct hlist_head *head;
1109 if (!(write_point & 1UL)) {
1110 wp = (struct write_point *) write_point;
1111 bch2_trans_mutex_lock(trans, &wp->lock);
1115 head = writepoint_hash(c, write_point);
1117 wp = __writepoint_find(head, write_point);
1120 bch2_trans_mutex_lock(trans, &wp->lock);
1121 if (wp->write_point == write_point)
1123 mutex_unlock(&wp->lock);
1126 restart_find_oldest:
1128 for (wp = c->write_points;
1129 wp < c->write_points + c->write_points_nr; wp++)
1130 if (!oldest || time_before64(wp->last_used, oldest->last_used))
1133 bch2_trans_mutex_lock(trans, &oldest->lock);
1134 bch2_trans_mutex_lock(trans, &c->write_points_hash_lock);
1135 if (oldest >= c->write_points + c->write_points_nr ||
1136 try_increase_writepoints(c)) {
1137 mutex_unlock(&c->write_points_hash_lock);
1138 mutex_unlock(&oldest->lock);
1139 goto restart_find_oldest;
1142 wp = __writepoint_find(head, write_point);
1143 if (wp && wp != oldest) {
1144 mutex_unlock(&c->write_points_hash_lock);
1145 mutex_unlock(&oldest->lock);
1150 hlist_del_rcu(&wp->node);
1151 wp->write_point = write_point;
1152 hlist_add_head_rcu(&wp->node, head);
1153 mutex_unlock(&c->write_points_hash_lock);
1155 wp->last_used = local_clock();
1160 * Get us an open_bucket we can allocate from, return with it locked:
1162 int bch2_alloc_sectors_start_trans(struct btree_trans *trans,
1164 unsigned erasure_code,
1165 struct write_point_specifier write_point,
1166 struct bch_devs_list *devs_have,
1167 unsigned nr_replicas,
1168 unsigned nr_replicas_required,
1169 enum alloc_reserve reserve,
1172 struct write_point **wp_ret)
1174 struct bch_fs *c = trans->c;
1175 struct write_point *wp;
1176 struct open_bucket *ob;
1177 struct open_buckets ptrs;
1178 unsigned nr_effective, write_points_nr;
1179 unsigned ob_flags = 0;
1184 if (!(flags & BCH_WRITE_ONLY_SPECIFIED_DEVS))
1185 ob_flags |= BUCKET_ALLOC_USE_DURABILITY;
1187 BUG_ON(!nr_replicas || !nr_replicas_required);
1191 write_points_nr = c->write_points_nr;
1194 *wp_ret = wp = writepoint_find(trans, write_point.v);
1196 if (wp->data_type == BCH_DATA_user)
1197 ob_flags |= BUCKET_MAY_ALLOC_PARTIAL;
1199 /* metadata may not allocate on cache devices: */
1200 if (wp->data_type != BCH_DATA_user)
1203 if (!target || (flags & BCH_WRITE_ONLY_SPECIFIED_DEVS)) {
1204 ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
1205 target, erasure_code,
1206 nr_replicas, &nr_effective,
1207 &have_cache, reserve,
1210 ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
1211 target, erasure_code,
1212 nr_replicas, &nr_effective,
1213 &have_cache, reserve,
1216 bch2_err_matches(ret, BCH_ERR_transaction_restart))
1219 ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
1221 nr_replicas, &nr_effective,
1222 &have_cache, reserve,
1226 BUG_ON(!ret && nr_effective < nr_replicas);
1228 if (erasure_code && !ec_open_bucket(c, &ptrs))
1229 pr_debug("failed to get ec bucket: ret %u", ret);
1231 if (ret == -BCH_ERR_insufficient_devices &&
1232 nr_effective >= nr_replicas_required)
1238 /* Free buckets we didn't use: */
1239 open_bucket_for_each(c, &wp->ptrs, ob, i)
1240 open_bucket_free_unused(c, wp, ob);
1244 wp->sectors_free = UINT_MAX;
1246 open_bucket_for_each(c, &wp->ptrs, ob, i)
1247 wp->sectors_free = min(wp->sectors_free, ob->sectors_free);
1249 BUG_ON(!wp->sectors_free || wp->sectors_free == UINT_MAX);
1253 open_bucket_for_each(c, &wp->ptrs, ob, i)
1254 if (ptrs.nr < ARRAY_SIZE(ptrs.v))
1255 ob_push(c, &ptrs, ob);
1257 open_bucket_free_unused(c, wp, ob);
1260 mutex_unlock(&wp->lock);
1262 if (bch2_err_matches(ret, BCH_ERR_freelist_empty) &&
1263 try_decrease_writepoints(c, write_points_nr))
1266 if (bch2_err_matches(ret, BCH_ERR_open_buckets_empty) ||
1267 bch2_err_matches(ret, BCH_ERR_freelist_empty))
1269 ? -BCH_ERR_bucket_alloc_blocked
1270 : -BCH_ERR_ENOSPC_bucket_alloc;
1275 struct bch_extent_ptr bch2_ob_ptr(struct bch_fs *c, struct open_bucket *ob)
1277 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
1279 return (struct bch_extent_ptr) {
1280 .type = 1 << BCH_EXTENT_ENTRY_ptr,
1283 .offset = bucket_to_sector(ca, ob->bucket) +
1284 ca->mi.bucket_size -
1289 void bch2_alloc_sectors_append_ptrs(struct bch_fs *c, struct write_point *wp,
1290 struct bkey_i *k, unsigned sectors,
1293 bch2_alloc_sectors_append_ptrs_inlined(c, wp, k, sectors, cached);
1297 * Append pointers to the space we just allocated to @k, and mark @sectors space
1298 * as allocated out of @ob
1300 void bch2_alloc_sectors_done(struct bch_fs *c, struct write_point *wp)
1302 bch2_alloc_sectors_done_inlined(c, wp);
1305 static inline void writepoint_init(struct write_point *wp,
1306 enum bch_data_type type)
1308 mutex_init(&wp->lock);
1309 wp->data_type = type;
1311 INIT_WORK(&wp->index_update_work, bch2_write_point_do_index_updates);
1312 INIT_LIST_HEAD(&wp->writes);
1313 spin_lock_init(&wp->writes_lock);
1316 void bch2_fs_allocator_foreground_init(struct bch_fs *c)
1318 struct open_bucket *ob;
1319 struct write_point *wp;
1321 mutex_init(&c->write_points_hash_lock);
1322 c->write_points_nr = ARRAY_SIZE(c->write_points);
1324 /* open bucket 0 is a sentinal NULL: */
1325 spin_lock_init(&c->open_buckets[0].lock);
1327 for (ob = c->open_buckets + 1;
1328 ob < c->open_buckets + ARRAY_SIZE(c->open_buckets); ob++) {
1329 spin_lock_init(&ob->lock);
1330 c->open_buckets_nr_free++;
1332 ob->freelist = c->open_buckets_freelist;
1333 c->open_buckets_freelist = ob - c->open_buckets;
1336 writepoint_init(&c->btree_write_point, BCH_DATA_btree);
1337 writepoint_init(&c->rebalance_write_point, BCH_DATA_user);
1338 writepoint_init(&c->copygc_write_point, BCH_DATA_user);
1340 for (wp = c->write_points;
1341 wp < c->write_points + c->write_points_nr; wp++) {
1342 writepoint_init(wp, BCH_DATA_user);
1344 wp->last_used = local_clock();
1345 wp->write_point = (unsigned long) wp;
1346 hlist_add_head_rcu(&wp->node,
1347 writepoint_hash(c, wp->write_point));
1351 void bch2_open_buckets_to_text(struct printbuf *out, struct bch_fs *c)
1353 struct open_bucket *ob;
1355 for (ob = c->open_buckets;
1356 ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
1358 spin_lock(&ob->lock);
1359 if (ob->valid && !ob->on_partial_list) {
1360 prt_printf(out, "%zu ref %u type %s %u:%llu:%u\n",
1361 ob - c->open_buckets,
1362 atomic_read(&ob->pin),
1363 bch2_data_types[ob->data_type],
1364 ob->dev, ob->bucket, ob->gen);
1366 spin_unlock(&ob->lock);
1370 static const char * const bch2_write_point_states[] = {
1372 WRITE_POINT_STATES()
1377 void bch2_write_points_to_text(struct printbuf *out, struct bch_fs *c)
1379 struct write_point *wp;
1382 for (wp = c->write_points;
1383 wp < c->write_points + ARRAY_SIZE(c->write_points);
1385 prt_printf(out, "%lu: ", wp->write_point);
1386 prt_human_readable_u64(out, wp->sectors_allocated);
1388 prt_printf(out, " last wrote: ");
1389 bch2_pr_time_units(out, sched_clock() - wp->last_used);
1391 for (i = 0; i < WRITE_POINT_STATE_NR; i++) {
1392 prt_printf(out, " %s: ", bch2_write_point_states[i]);
1393 bch2_pr_time_units(out, wp->time[i]);