1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright 2012 Google, Inc.
5 * Foreground allocator code: allocate buckets from freelist, and allocate in
6 * sector granularity from writepoints.
8 * bch2_bucket_alloc() allocates a single bucket from a specific device.
10 * bch2_bucket_alloc_set() allocates one or more buckets from different devices
11 * in a given filesystem.
15 #include "alloc_background.h"
16 #include "alloc_foreground.h"
17 #include "backpointers.h"
18 #include "btree_iter.h"
19 #include "btree_update.h"
22 #include "buckets_waiting_for_journal.h"
25 #include "disk_groups.h"
31 #include "nocow_locking.h"
34 #include <linux/math64.h>
35 #include <linux/rculist.h>
36 #include <linux/rcupdate.h>
38 const char * const bch2_alloc_reserves[] = {
46 * Open buckets represent a bucket that's currently being allocated from. They
49 * - They track buckets that have been partially allocated, allowing for
50 * sub-bucket sized allocations - they're used by the sector allocator below
52 * - They provide a reference to the buckets they own that mark and sweep GC
53 * can find, until the new allocation has a pointer to it inserted into the
56 * When allocating some space with the sector allocator, the allocation comes
57 * with a reference to an open bucket - the caller is required to put that
58 * reference _after_ doing the index update that makes its allocation reachable.
61 void bch2_reset_alloc_cursors(struct bch_fs *c)
67 for_each_member_device_rcu(ca, c, i, NULL)
72 static void bch2_open_bucket_hash_add(struct bch_fs *c, struct open_bucket *ob)
74 open_bucket_idx_t idx = ob - c->open_buckets;
75 open_bucket_idx_t *slot = open_bucket_hashslot(c, ob->dev, ob->bucket);
81 static void bch2_open_bucket_hash_remove(struct bch_fs *c, struct open_bucket *ob)
83 open_bucket_idx_t idx = ob - c->open_buckets;
84 open_bucket_idx_t *slot = open_bucket_hashslot(c, ob->dev, ob->bucket);
86 while (*slot != idx) {
88 slot = &c->open_buckets[*slot].hash;
95 void __bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob)
97 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
100 ec_stripe_new_put(c, ob->ec, STRIPE_REF_io);
104 percpu_down_read(&c->mark_lock);
105 spin_lock(&ob->lock);
110 spin_unlock(&ob->lock);
111 percpu_up_read(&c->mark_lock);
113 spin_lock(&c->freelist_lock);
114 bch2_open_bucket_hash_remove(c, ob);
116 ob->freelist = c->open_buckets_freelist;
117 c->open_buckets_freelist = ob - c->open_buckets;
119 c->open_buckets_nr_free++;
120 ca->nr_open_buckets--;
121 spin_unlock(&c->freelist_lock);
123 closure_wake_up(&c->open_buckets_wait);
126 void bch2_open_bucket_write_error(struct bch_fs *c,
127 struct open_buckets *obs,
130 struct open_bucket *ob;
133 open_bucket_for_each(c, obs, ob, i)
134 if (ob->dev == dev && ob->ec)
135 bch2_ec_bucket_cancel(c, ob);
138 static struct open_bucket *bch2_open_bucket_alloc(struct bch_fs *c)
140 struct open_bucket *ob;
142 BUG_ON(!c->open_buckets_freelist || !c->open_buckets_nr_free);
144 ob = c->open_buckets + c->open_buckets_freelist;
145 c->open_buckets_freelist = ob->freelist;
146 atomic_set(&ob->pin, 1);
149 c->open_buckets_nr_free--;
153 static void open_bucket_free_unused(struct bch_fs *c,
154 struct write_point *wp,
155 struct open_bucket *ob)
157 BUG_ON(c->open_buckets_partial_nr >=
158 ARRAY_SIZE(c->open_buckets_partial));
160 spin_lock(&c->freelist_lock);
161 ob->on_partial_list = true;
162 c->open_buckets_partial[c->open_buckets_partial_nr++] =
163 ob - c->open_buckets;
164 spin_unlock(&c->freelist_lock);
166 closure_wake_up(&c->open_buckets_wait);
167 closure_wake_up(&c->freelist_wait);
170 /* _only_ for allocating the journal on a new device: */
171 long bch2_bucket_alloc_new_fs(struct bch_dev *ca)
173 while (ca->new_fs_bucket_idx < ca->mi.nbuckets) {
174 u64 b = ca->new_fs_bucket_idx++;
176 if (!is_superblock_bucket(ca, b) &&
177 (!ca->buckets_nouse || !test_bit(b, ca->buckets_nouse)))
184 static inline unsigned open_buckets_reserved(enum alloc_reserve reserve)
188 case RESERVE_btree_movinggc:
190 case RESERVE_movinggc:
191 return OPEN_BUCKETS_COUNT / 4;
193 return OPEN_BUCKETS_COUNT / 2;
197 static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
199 enum alloc_reserve reserve,
200 const struct bch_alloc_v4 *a,
201 struct bucket_alloc_state *s,
204 struct open_bucket *ob;
206 if (unlikely(ca->buckets_nouse && test_bit(bucket, ca->buckets_nouse))) {
211 if (bch2_bucket_is_open(c, ca->dev_idx, bucket)) {
216 if (bch2_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
217 c->journal.flushed_seq_ondisk, ca->dev_idx, bucket)) {
218 s->skipped_need_journal_commit++;
222 if (bch2_bucket_nocow_is_locked(&c->nocow_locks, POS(ca->dev_idx, bucket))) {
227 spin_lock(&c->freelist_lock);
229 if (unlikely(c->open_buckets_nr_free <= open_buckets_reserved(reserve))) {
231 closure_wait(&c->open_buckets_wait, cl);
233 if (!c->blocked_allocate_open_bucket)
234 c->blocked_allocate_open_bucket = local_clock();
236 spin_unlock(&c->freelist_lock);
237 return ERR_PTR(-BCH_ERR_open_buckets_empty);
240 /* Recheck under lock: */
241 if (bch2_bucket_is_open(c, ca->dev_idx, bucket)) {
242 spin_unlock(&c->freelist_lock);
247 ob = bch2_open_bucket_alloc(c);
249 spin_lock(&ob->lock);
252 ob->sectors_free = ca->mi.bucket_size;
253 ob->dev = ca->dev_idx;
256 spin_unlock(&ob->lock);
258 ca->nr_open_buckets++;
259 bch2_open_bucket_hash_add(c, ob);
261 if (c->blocked_allocate_open_bucket) {
262 bch2_time_stats_update(
263 &c->times[BCH_TIME_blocked_allocate_open_bucket],
264 c->blocked_allocate_open_bucket);
265 c->blocked_allocate_open_bucket = 0;
268 if (c->blocked_allocate) {
269 bch2_time_stats_update(
270 &c->times[BCH_TIME_blocked_allocate],
271 c->blocked_allocate);
272 c->blocked_allocate = 0;
275 spin_unlock(&c->freelist_lock);
279 static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bch_dev *ca,
280 enum alloc_reserve reserve, u64 free_entry,
281 struct bucket_alloc_state *s,
282 struct bkey_s_c freespace_k,
285 struct bch_fs *c = trans->c;
286 struct btree_iter iter = { NULL };
288 struct open_bucket *ob;
289 struct bch_alloc_v4 a_convert;
290 const struct bch_alloc_v4 *a;
291 u64 b = free_entry & ~(~0ULL << 56);
292 unsigned genbits = free_entry >> 56;
293 struct printbuf buf = PRINTBUF;
296 if (b < ca->mi.first_bucket || b >= ca->mi.nbuckets) {
297 prt_printf(&buf, "freespace btree has bucket outside allowed range %u-%llu\n"
299 ca->mi.first_bucket, ca->mi.nbuckets);
300 bch2_bkey_val_to_text(&buf, c, freespace_k);
301 bch2_trans_inconsistent(trans, "%s", buf.buf);
306 k = bch2_bkey_get_iter(trans, &iter,
307 BTREE_ID_alloc, POS(ca->dev_idx, b),
315 a = bch2_alloc_to_v4(k, &a_convert);
317 if (a->data_type != BCH_DATA_free) {
318 if (!test_bit(BCH_FS_CHECK_ALLOC_DONE, &c->flags)) {
323 prt_printf(&buf, "non free bucket in freespace btree\n"
325 bch2_bkey_val_to_text(&buf, c, freespace_k);
326 prt_printf(&buf, "\n ");
327 bch2_bkey_val_to_text(&buf, c, k);
328 bch2_trans_inconsistent(trans, "%s", buf.buf);
333 if (genbits != (alloc_freespace_genbits(*a) >> 56) &&
334 test_bit(BCH_FS_CHECK_ALLOC_DONE, &c->flags)) {
335 prt_printf(&buf, "bucket in freespace btree with wrong genbits (got %u should be %llu)\n"
337 genbits, alloc_freespace_genbits(*a) >> 56);
338 bch2_bkey_val_to_text(&buf, c, freespace_k);
339 prt_printf(&buf, "\n ");
340 bch2_bkey_val_to_text(&buf, c, k);
341 bch2_trans_inconsistent(trans, "%s", buf.buf);
347 if (!test_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags)) {
348 struct bch_backpointer bp;
349 struct bpos bp_pos = POS_MIN;
351 ret = bch2_get_next_backpointer(trans, POS(ca->dev_idx, b), -1,
353 BTREE_ITER_NOPRESERVE);
359 if (!bkey_eq(bp_pos, POS_MAX)) {
361 * Bucket may have data in it - we don't call
362 * bc2h_trans_inconnsistent() because fsck hasn't
370 ob = __try_alloc_bucket(c, ca, b, reserve, a, s, cl);
372 iter.path->preserve = false;
374 if (iter.trans && iter.path)
375 set_btree_iter_dontneed(&iter);
376 bch2_trans_iter_exit(trans, &iter);
382 * This path is for before the freespace btree is initialized:
384 * If ca->new_fs_bucket_idx is nonzero, we haven't yet marked superblock &
385 * journal buckets - journal buckets will be < ca->new_fs_bucket_idx
387 static noinline struct open_bucket *
388 bch2_bucket_alloc_early(struct btree_trans *trans,
390 enum alloc_reserve reserve,
391 struct bucket_alloc_state *s,
394 struct btree_iter iter;
396 struct open_bucket *ob = NULL;
397 u64 alloc_start = max_t(u64, ca->mi.first_bucket, ca->new_fs_bucket_idx);
398 u64 alloc_cursor = max(alloc_start, READ_ONCE(ca->alloc_cursor));
401 for_each_btree_key_norestart(trans, iter, BTREE_ID_alloc, POS(ca->dev_idx, alloc_cursor),
402 BTREE_ITER_SLOTS, k, ret) {
403 struct bch_alloc_v4 a_convert;
404 const struct bch_alloc_v4 *a;
406 if (bkey_ge(k.k->p, POS(ca->dev_idx, ca->mi.nbuckets)))
409 if (ca->new_fs_bucket_idx &&
410 is_superblock_bucket(ca, k.k->p.offset))
413 a = bch2_alloc_to_v4(k, &a_convert);
415 if (a->data_type != BCH_DATA_free)
420 ob = __try_alloc_bucket(trans->c, ca, k.k->p.offset, reserve, a, s, cl);
424 bch2_trans_iter_exit(trans, &iter);
426 ca->alloc_cursor = alloc_cursor;
431 if (!ob && alloc_cursor > alloc_start) {
432 alloc_cursor = alloc_start;
439 static struct open_bucket *bch2_bucket_alloc_freelist(struct btree_trans *trans,
441 enum alloc_reserve reserve,
442 struct bucket_alloc_state *s,
445 struct btree_iter iter;
447 struct open_bucket *ob = NULL;
448 u64 alloc_start = max_t(u64, ca->mi.first_bucket, READ_ONCE(ca->alloc_cursor));
449 u64 alloc_cursor = alloc_start;
452 BUG_ON(ca->new_fs_bucket_idx);
454 for_each_btree_key_norestart(trans, iter, BTREE_ID_freespace,
455 POS(ca->dev_idx, alloc_cursor), 0, k, ret) {
456 if (k.k->p.inode != ca->dev_idx)
459 for (alloc_cursor = max(alloc_cursor, bkey_start_offset(k.k));
460 alloc_cursor < k.k->p.offset;
462 ret = btree_trans_too_many_iters(trans);
470 ob = try_alloc_bucket(trans, ca, reserve,
471 alloc_cursor, s, k, cl);
473 iter.path->preserve = false;
481 bch2_trans_iter_exit(trans, &iter);
483 ca->alloc_cursor = alloc_cursor;
488 if (!ob && alloc_start > ca->mi.first_bucket) {
489 alloc_cursor = alloc_start = ca->mi.first_bucket;
497 * bch_bucket_alloc - allocate a single bucket from a specific device
499 * Returns index of bucket on success, 0 on failure
501 static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans,
503 enum alloc_reserve reserve,
505 struct bch_dev_usage *usage)
507 struct bch_fs *c = trans->c;
508 struct open_bucket *ob = NULL;
509 bool freespace = READ_ONCE(ca->mi.freespace_initialized);
511 struct bucket_alloc_state s = { 0 };
512 bool waiting = false;
514 bch2_dev_usage_read_fast(ca, usage);
515 avail = dev_buckets_free(ca, *usage, reserve);
517 if (usage->d[BCH_DATA_need_discard].buckets > avail)
520 if (usage->d[BCH_DATA_need_gc_gens].buckets > avail)
523 if (should_invalidate_buckets(ca, *usage))
524 bch2_do_invalidates(c);
527 if (cl && !waiting) {
528 closure_wait(&c->freelist_wait, cl);
533 if (!c->blocked_allocate)
534 c->blocked_allocate = local_clock();
536 ob = ERR_PTR(-BCH_ERR_freelist_empty);
541 closure_wake_up(&c->freelist_wait);
543 ob = likely(freespace)
544 ? bch2_bucket_alloc_freelist(trans, ca, reserve, &s, cl)
545 : bch2_bucket_alloc_early(trans, ca, reserve, &s, cl);
547 if (s.skipped_need_journal_commit * 2 > avail)
548 bch2_journal_flush_async(&c->journal, NULL);
550 if (!ob && freespace && !test_bit(BCH_FS_CHECK_ALLOC_DONE, &c->flags)) {
556 ob = ERR_PTR(-BCH_ERR_no_buckets_found);
559 trace_and_count(c, bucket_alloc, ca,
560 bch2_alloc_reserves[reserve],
562 usage->d[BCH_DATA_free].buckets,
564 bch2_copygc_wait_amount(c),
565 c->copygc_wait - atomic64_read(&c->io_clock[WRITE].now),
569 else if (!bch2_err_matches(PTR_ERR(ob), BCH_ERR_transaction_restart))
570 trace_and_count(c, bucket_alloc_fail, ca,
571 bch2_alloc_reserves[reserve],
573 usage->d[BCH_DATA_free].buckets,
575 bch2_copygc_wait_amount(c),
576 c->copygc_wait - atomic64_read(&c->io_clock[WRITE].now),
579 bch2_err_str(PTR_ERR(ob)));
584 struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca,
585 enum alloc_reserve reserve,
588 struct bch_dev_usage usage;
589 struct open_bucket *ob;
591 bch2_trans_do(c, NULL, NULL, 0,
592 PTR_ERR_OR_ZERO(ob = bch2_bucket_alloc_trans(&trans, ca, reserve,
597 static int __dev_stripe_cmp(struct dev_stripe_state *stripe,
598 unsigned l, unsigned r)
600 return ((stripe->next_alloc[l] > stripe->next_alloc[r]) -
601 (stripe->next_alloc[l] < stripe->next_alloc[r]));
604 #define dev_stripe_cmp(l, r) __dev_stripe_cmp(stripe, l, r)
606 struct dev_alloc_list bch2_dev_alloc_list(struct bch_fs *c,
607 struct dev_stripe_state *stripe,
608 struct bch_devs_mask *devs)
610 struct dev_alloc_list ret = { .nr = 0 };
613 for_each_set_bit(i, devs->d, BCH_SB_MEMBERS_MAX)
614 ret.devs[ret.nr++] = i;
616 bubble_sort(ret.devs, ret.nr, dev_stripe_cmp);
620 static inline void bch2_dev_stripe_increment_inlined(struct bch_dev *ca,
621 struct dev_stripe_state *stripe,
622 struct bch_dev_usage *usage)
624 u64 *v = stripe->next_alloc + ca->dev_idx;
625 u64 free_space = dev_buckets_available(ca, RESERVE_none);
626 u64 free_space_inv = free_space
627 ? div64_u64(1ULL << 48, free_space)
631 if (*v + free_space_inv >= *v)
632 *v += free_space_inv;
636 for (v = stripe->next_alloc;
637 v < stripe->next_alloc + ARRAY_SIZE(stripe->next_alloc); v++)
638 *v = *v < scale ? 0 : *v - scale;
641 void bch2_dev_stripe_increment(struct bch_dev *ca,
642 struct dev_stripe_state *stripe)
644 struct bch_dev_usage usage;
646 bch2_dev_usage_read_fast(ca, &usage);
647 bch2_dev_stripe_increment_inlined(ca, stripe, &usage);
650 static int add_new_bucket(struct bch_fs *c,
651 struct open_buckets *ptrs,
652 struct bch_devs_mask *devs_may_alloc,
653 unsigned nr_replicas,
654 unsigned *nr_effective,
657 struct open_bucket *ob)
659 unsigned durability =
660 bch_dev_bkey_exists(c, ob->dev)->mi.durability;
662 BUG_ON(*nr_effective >= nr_replicas);
663 BUG_ON(flags & BCH_WRITE_ONLY_SPECIFIED_DEVS);
665 __clear_bit(ob->dev, devs_may_alloc->d);
666 *nr_effective += (flags & BCH_WRITE_ONLY_SPECIFIED_DEVS)
668 *have_cache |= !durability;
670 ob_push(c, ptrs, ob);
672 if (*nr_effective >= nr_replicas)
679 int bch2_bucket_alloc_set_trans(struct btree_trans *trans,
680 struct open_buckets *ptrs,
681 struct dev_stripe_state *stripe,
682 struct bch_devs_mask *devs_may_alloc,
683 unsigned nr_replicas,
684 unsigned *nr_effective,
687 enum bch_data_type data_type,
688 enum alloc_reserve reserve,
691 struct bch_fs *c = trans->c;
692 struct dev_alloc_list devs_sorted =
693 bch2_dev_alloc_list(c, stripe, devs_may_alloc);
696 int ret = -BCH_ERR_insufficient_devices;
699 BUG_ON(*nr_effective >= nr_replicas);
701 for (i = 0; i < devs_sorted.nr; i++) {
702 struct bch_dev_usage usage;
703 struct open_bucket *ob;
705 dev = devs_sorted.devs[i];
708 ca = rcu_dereference(c->devs[dev]);
710 percpu_ref_get(&ca->ref);
716 if (!ca->mi.durability && *have_cache) {
717 percpu_ref_put(&ca->ref);
721 ob = bch2_bucket_alloc_trans(trans, ca, reserve, cl, &usage);
723 bch2_dev_stripe_increment_inlined(ca, stripe, &usage);
724 percpu_ref_put(&ca->ref);
728 if (bch2_err_matches(ret, BCH_ERR_transaction_restart) || cl)
733 ob->data_type = data_type;
735 if (add_new_bucket(c, ptrs, devs_may_alloc,
736 nr_replicas, nr_effective,
737 have_cache, flags, ob)) {
746 /* Allocate from stripes: */
749 * if we can't allocate a new stripe because there are already too many
750 * partially filled stripes, force allocating from an existing stripe even when
751 * it's to a device we don't want:
754 static int bucket_alloc_from_stripe(struct btree_trans *trans,
755 struct open_buckets *ptrs,
756 struct write_point *wp,
757 struct bch_devs_mask *devs_may_alloc,
759 unsigned nr_replicas,
760 unsigned *nr_effective,
762 enum alloc_reserve reserve,
766 struct bch_fs *c = trans->c;
767 struct dev_alloc_list devs_sorted;
768 struct ec_stripe_head *h;
769 struct open_bucket *ob;
777 if (ec_open_bucket(c, ptrs))
780 h = bch2_ec_stripe_head_get(trans, target, 0, nr_replicas - 1, reserve, cl);
786 devs_sorted = bch2_dev_alloc_list(c, &wp->stripe, devs_may_alloc);
788 for (i = 0; i < devs_sorted.nr; i++)
789 for (ec_idx = 0; ec_idx < h->s->nr_data; ec_idx++) {
790 if (!h->s->blocks[ec_idx])
793 ob = c->open_buckets + h->s->blocks[ec_idx];
794 if (ob->dev == devs_sorted.devs[i] &&
795 !test_and_set_bit(ec_idx, h->s->blocks_allocated))
800 ca = bch_dev_bkey_exists(c, ob->dev);
804 ec_stripe_new_get(h->s, STRIPE_REF_io);
806 ret = add_new_bucket(c, ptrs, devs_may_alloc,
807 nr_replicas, nr_effective,
808 have_cache, flags, ob);
810 bch2_ec_stripe_head_put(c, h);
814 /* Sector allocator */
816 static bool want_bucket(struct bch_fs *c,
817 struct write_point *wp,
818 struct bch_devs_mask *devs_may_alloc,
819 bool *have_cache, bool ec,
820 struct open_bucket *ob)
822 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
824 if (!test_bit(ob->dev, devs_may_alloc->d))
827 if (ob->data_type != wp->data_type)
830 if (!ca->mi.durability &&
831 (wp->data_type == BCH_DATA_btree || ec || *have_cache))
834 if (ec != (ob->ec != NULL))
840 static int bucket_alloc_set_writepoint(struct bch_fs *c,
841 struct open_buckets *ptrs,
842 struct write_point *wp,
843 struct bch_devs_mask *devs_may_alloc,
844 unsigned nr_replicas,
845 unsigned *nr_effective,
847 bool ec, unsigned flags)
849 struct open_buckets ptrs_skip = { .nr = 0 };
850 struct open_bucket *ob;
854 open_bucket_for_each(c, &wp->ptrs, ob, i) {
855 if (!ret && want_bucket(c, wp, devs_may_alloc,
857 ret = add_new_bucket(c, ptrs, devs_may_alloc,
858 nr_replicas, nr_effective,
859 have_cache, flags, ob);
861 ob_push(c, &ptrs_skip, ob);
863 wp->ptrs = ptrs_skip;
868 static int bucket_alloc_set_partial(struct bch_fs *c,
869 struct open_buckets *ptrs,
870 struct write_point *wp,
871 struct bch_devs_mask *devs_may_alloc,
872 unsigned nr_replicas,
873 unsigned *nr_effective,
874 bool *have_cache, bool ec,
875 enum alloc_reserve reserve,
880 if (!c->open_buckets_partial_nr)
883 spin_lock(&c->freelist_lock);
885 if (!c->open_buckets_partial_nr)
888 for (i = c->open_buckets_partial_nr - 1; i >= 0; --i) {
889 struct open_bucket *ob = c->open_buckets + c->open_buckets_partial[i];
891 if (want_bucket(c, wp, devs_may_alloc, have_cache, ec, ob)) {
892 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
893 struct bch_dev_usage usage;
896 bch2_dev_usage_read_fast(ca, &usage);
897 avail = dev_buckets_free(ca, usage, reserve);
901 array_remove_item(c->open_buckets_partial,
902 c->open_buckets_partial_nr,
904 ob->on_partial_list = false;
906 ret = add_new_bucket(c, ptrs, devs_may_alloc,
907 nr_replicas, nr_effective,
908 have_cache, flags, ob);
914 spin_unlock(&c->freelist_lock);
918 static int __open_bucket_add_buckets(struct btree_trans *trans,
919 struct open_buckets *ptrs,
920 struct write_point *wp,
921 struct bch_devs_list *devs_have,
924 unsigned nr_replicas,
925 unsigned *nr_effective,
927 enum alloc_reserve reserve,
931 struct bch_fs *c = trans->c;
932 struct bch_devs_mask devs;
933 struct open_bucket *ob;
934 struct closure *cl = NULL;
938 devs = target_rw_devs(c, wp->data_type, target);
940 /* Don't allocate from devices we already have pointers to: */
941 for (i = 0; i < devs_have->nr; i++)
942 __clear_bit(devs_have->devs[i], devs.d);
944 open_bucket_for_each(c, ptrs, ob, i)
945 __clear_bit(ob->dev, devs.d);
947 if (erasure_code && ec_open_bucket(c, ptrs))
950 ret = bucket_alloc_set_writepoint(c, ptrs, wp, &devs,
951 nr_replicas, nr_effective,
952 have_cache, erasure_code, flags);
956 ret = bucket_alloc_set_partial(c, ptrs, wp, &devs,
957 nr_replicas, nr_effective,
958 have_cache, erasure_code, reserve, flags);
963 ret = bucket_alloc_from_stripe(trans, ptrs, wp, &devs,
965 nr_replicas, nr_effective,
967 reserve, flags, _cl);
971 * Try nonblocking first, so that if one device is full we'll try from
974 ret = bch2_bucket_alloc_set_trans(trans, ptrs, &wp->stripe, &devs,
975 nr_replicas, nr_effective, have_cache,
976 flags, wp->data_type, reserve, cl);
978 !bch2_err_matches(ret, BCH_ERR_transaction_restart) &&
979 !bch2_err_matches(ret, BCH_ERR_insufficient_devices) &&
990 static int open_bucket_add_buckets(struct btree_trans *trans,
991 struct open_buckets *ptrs,
992 struct write_point *wp,
993 struct bch_devs_list *devs_have,
995 unsigned erasure_code,
996 unsigned nr_replicas,
997 unsigned *nr_effective,
999 enum alloc_reserve reserve,
1006 ret = __open_bucket_add_buckets(trans, ptrs, wp,
1007 devs_have, target, erasure_code,
1008 nr_replicas, nr_effective, have_cache,
1009 reserve, flags, cl);
1010 if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
1011 bch2_err_matches(ret, BCH_ERR_operation_blocked) ||
1012 bch2_err_matches(ret, BCH_ERR_freelist_empty) ||
1013 bch2_err_matches(ret, BCH_ERR_open_buckets_empty))
1015 if (*nr_effective >= nr_replicas)
1019 ret = __open_bucket_add_buckets(trans, ptrs, wp,
1020 devs_have, target, false,
1021 nr_replicas, nr_effective, have_cache,
1022 reserve, flags, cl);
1023 return ret < 0 ? ret : 0;
1026 static bool should_drop_bucket(struct open_bucket *ob, struct bch_fs *c,
1027 struct bch_dev *ca, bool ec)
1030 return ob->ec != NULL;
1032 bool drop = ob->dev == ca->dev_idx;
1033 struct open_bucket *ob2;
1036 if (!drop && ob->ec) {
1037 mutex_lock(&ob->ec->lock);
1038 for (i = 0; i < ob->ec->new_stripe.key.v.nr_blocks; i++) {
1039 if (!ob->ec->blocks[i])
1042 ob2 = c->open_buckets + ob->ec->blocks[i];
1043 drop |= ob2->dev == ca->dev_idx;
1045 mutex_unlock(&ob->ec->lock);
1054 static void bch2_writepoint_stop(struct bch_fs *c, struct bch_dev *ca,
1055 bool ec, struct write_point *wp)
1057 struct open_buckets ptrs = { .nr = 0 };
1058 struct open_bucket *ob;
1061 mutex_lock(&wp->lock);
1062 open_bucket_for_each(c, &wp->ptrs, ob, i)
1063 if (should_drop_bucket(ob, c, ca, ec))
1064 bch2_open_bucket_put(c, ob);
1066 ob_push(c, &ptrs, ob);
1068 mutex_unlock(&wp->lock);
1071 void bch2_open_buckets_stop(struct bch_fs *c, struct bch_dev *ca,
1076 /* Next, close write points that point to this device... */
1077 for (i = 0; i < ARRAY_SIZE(c->write_points); i++)
1078 bch2_writepoint_stop(c, ca, ec, &c->write_points[i]);
1080 bch2_writepoint_stop(c, ca, ec, &c->copygc_write_point);
1081 bch2_writepoint_stop(c, ca, ec, &c->rebalance_write_point);
1082 bch2_writepoint_stop(c, ca, ec, &c->btree_write_point);
1084 mutex_lock(&c->btree_reserve_cache_lock);
1085 while (c->btree_reserve_cache_nr) {
1086 struct btree_alloc *a =
1087 &c->btree_reserve_cache[--c->btree_reserve_cache_nr];
1089 bch2_open_buckets_put(c, &a->ob);
1091 mutex_unlock(&c->btree_reserve_cache_lock);
1093 spin_lock(&c->freelist_lock);
1095 while (i < c->open_buckets_partial_nr) {
1096 struct open_bucket *ob =
1097 c->open_buckets + c->open_buckets_partial[i];
1099 if (should_drop_bucket(ob, c, ca, ec)) {
1100 --c->open_buckets_partial_nr;
1101 swap(c->open_buckets_partial[i],
1102 c->open_buckets_partial[c->open_buckets_partial_nr]);
1103 ob->on_partial_list = false;
1104 spin_unlock(&c->freelist_lock);
1105 bch2_open_bucket_put(c, ob);
1106 spin_lock(&c->freelist_lock);
1111 spin_unlock(&c->freelist_lock);
1113 bch2_ec_stop_dev(c, ca);
1116 static inline struct hlist_head *writepoint_hash(struct bch_fs *c,
1117 unsigned long write_point)
1120 hash_long(write_point, ilog2(ARRAY_SIZE(c->write_points_hash)));
1122 return &c->write_points_hash[hash];
1125 static struct write_point *__writepoint_find(struct hlist_head *head,
1126 unsigned long write_point)
1128 struct write_point *wp;
1131 hlist_for_each_entry_rcu(wp, head, node)
1132 if (wp->write_point == write_point)
1140 static inline bool too_many_writepoints(struct bch_fs *c, unsigned factor)
1142 u64 stranded = c->write_points_nr * c->bucket_size_max;
1143 u64 free = bch2_fs_usage_read_short(c).free;
1145 return stranded * factor > free;
1148 static bool try_increase_writepoints(struct bch_fs *c)
1150 struct write_point *wp;
1152 if (c->write_points_nr == ARRAY_SIZE(c->write_points) ||
1153 too_many_writepoints(c, 32))
1156 wp = c->write_points + c->write_points_nr++;
1157 hlist_add_head_rcu(&wp->node, writepoint_hash(c, wp->write_point));
1161 static bool try_decrease_writepoints(struct bch_fs *c, unsigned old_nr)
1163 struct write_point *wp;
1165 mutex_lock(&c->write_points_hash_lock);
1166 if (c->write_points_nr < old_nr) {
1167 mutex_unlock(&c->write_points_hash_lock);
1171 if (c->write_points_nr == 1 ||
1172 !too_many_writepoints(c, 8)) {
1173 mutex_unlock(&c->write_points_hash_lock);
1177 wp = c->write_points + --c->write_points_nr;
1179 hlist_del_rcu(&wp->node);
1180 mutex_unlock(&c->write_points_hash_lock);
1182 bch2_writepoint_stop(c, NULL, false, wp);
1186 static void bch2_trans_mutex_lock_norelock(struct btree_trans *trans,
1189 if (!mutex_trylock(lock)) {
1190 bch2_trans_unlock(trans);
1195 static struct write_point *writepoint_find(struct btree_trans *trans,
1196 unsigned long write_point)
1198 struct bch_fs *c = trans->c;
1199 struct write_point *wp, *oldest;
1200 struct hlist_head *head;
1202 if (!(write_point & 1UL)) {
1203 wp = (struct write_point *) write_point;
1204 bch2_trans_mutex_lock_norelock(trans, &wp->lock);
1208 head = writepoint_hash(c, write_point);
1210 wp = __writepoint_find(head, write_point);
1213 bch2_trans_mutex_lock_norelock(trans, &wp->lock);
1214 if (wp->write_point == write_point)
1216 mutex_unlock(&wp->lock);
1219 restart_find_oldest:
1221 for (wp = c->write_points;
1222 wp < c->write_points + c->write_points_nr; wp++)
1223 if (!oldest || time_before64(wp->last_used, oldest->last_used))
1226 bch2_trans_mutex_lock_norelock(trans, &oldest->lock);
1227 bch2_trans_mutex_lock_norelock(trans, &c->write_points_hash_lock);
1228 if (oldest >= c->write_points + c->write_points_nr ||
1229 try_increase_writepoints(c)) {
1230 mutex_unlock(&c->write_points_hash_lock);
1231 mutex_unlock(&oldest->lock);
1232 goto restart_find_oldest;
1235 wp = __writepoint_find(head, write_point);
1236 if (wp && wp != oldest) {
1237 mutex_unlock(&c->write_points_hash_lock);
1238 mutex_unlock(&oldest->lock);
1243 hlist_del_rcu(&wp->node);
1244 wp->write_point = write_point;
1245 hlist_add_head_rcu(&wp->node, head);
1246 mutex_unlock(&c->write_points_hash_lock);
1248 wp->last_used = local_clock();
1253 * Get us an open_bucket we can allocate from, return with it locked:
1255 int bch2_alloc_sectors_start_trans(struct btree_trans *trans,
1257 unsigned erasure_code,
1258 struct write_point_specifier write_point,
1259 struct bch_devs_list *devs_have,
1260 unsigned nr_replicas,
1261 unsigned nr_replicas_required,
1262 enum alloc_reserve reserve,
1265 struct write_point **wp_ret)
1267 struct bch_fs *c = trans->c;
1268 struct write_point *wp;
1269 struct open_bucket *ob;
1270 struct open_buckets ptrs;
1271 unsigned nr_effective, write_points_nr;
1276 BUG_ON(flags & BCH_WRITE_ONLY_SPECIFIED_DEVS);
1278 BUG_ON(!nr_replicas || !nr_replicas_required);
1282 write_points_nr = c->write_points_nr;
1285 *wp_ret = wp = writepoint_find(trans, write_point.v);
1287 /* metadata may not allocate on cache devices: */
1288 if (wp->data_type != BCH_DATA_user)
1291 if (target && !(flags & BCH_WRITE_ONLY_SPECIFIED_DEVS)) {
1292 ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
1293 target, erasure_code,
1294 nr_replicas, &nr_effective,
1295 &have_cache, reserve,
1298 bch2_err_matches(ret, BCH_ERR_transaction_restart))
1301 /* Don't retry from all devices if we're out of open buckets: */
1302 if (bch2_err_matches(ret, BCH_ERR_open_buckets_empty))
1303 goto allocate_blocking;
1306 * Only try to allocate cache (durability = 0 devices) from the
1311 ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
1313 nr_replicas, &nr_effective,
1314 &have_cache, reserve,
1318 ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
1319 target, erasure_code,
1320 nr_replicas, &nr_effective,
1321 &have_cache, reserve,
1325 BUG_ON(!ret && nr_effective < nr_replicas);
1327 if (erasure_code && !ec_open_bucket(c, &ptrs))
1328 pr_debug("failed to get ec bucket: ret %u", ret);
1330 if (ret == -BCH_ERR_insufficient_devices &&
1331 nr_effective >= nr_replicas_required)
1337 /* Free buckets we didn't use: */
1338 open_bucket_for_each(c, &wp->ptrs, ob, i)
1339 open_bucket_free_unused(c, wp, ob);
1343 wp->sectors_free = UINT_MAX;
1345 open_bucket_for_each(c, &wp->ptrs, ob, i)
1346 wp->sectors_free = min(wp->sectors_free, ob->sectors_free);
1348 BUG_ON(!wp->sectors_free || wp->sectors_free == UINT_MAX);
1352 open_bucket_for_each(c, &wp->ptrs, ob, i)
1353 if (ptrs.nr < ARRAY_SIZE(ptrs.v))
1354 ob_push(c, &ptrs, ob);
1356 open_bucket_free_unused(c, wp, ob);
1359 mutex_unlock(&wp->lock);
1361 if (bch2_err_matches(ret, BCH_ERR_freelist_empty) &&
1362 try_decrease_writepoints(c, write_points_nr))
1365 if (bch2_err_matches(ret, BCH_ERR_open_buckets_empty) ||
1366 bch2_err_matches(ret, BCH_ERR_freelist_empty))
1368 ? -BCH_ERR_bucket_alloc_blocked
1369 : -BCH_ERR_ENOSPC_bucket_alloc;
1374 struct bch_extent_ptr bch2_ob_ptr(struct bch_fs *c, struct open_bucket *ob)
1376 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
1378 return (struct bch_extent_ptr) {
1379 .type = 1 << BCH_EXTENT_ENTRY_ptr,
1382 .offset = bucket_to_sector(ca, ob->bucket) +
1383 ca->mi.bucket_size -
1388 void bch2_alloc_sectors_append_ptrs(struct bch_fs *c, struct write_point *wp,
1389 struct bkey_i *k, unsigned sectors,
1392 bch2_alloc_sectors_append_ptrs_inlined(c, wp, k, sectors, cached);
1396 * Append pointers to the space we just allocated to @k, and mark @sectors space
1397 * as allocated out of @ob
1399 void bch2_alloc_sectors_done(struct bch_fs *c, struct write_point *wp)
1401 bch2_alloc_sectors_done_inlined(c, wp);
1404 static inline void writepoint_init(struct write_point *wp,
1405 enum bch_data_type type)
1407 mutex_init(&wp->lock);
1408 wp->data_type = type;
1410 INIT_WORK(&wp->index_update_work, bch2_write_point_do_index_updates);
1411 INIT_LIST_HEAD(&wp->writes);
1412 spin_lock_init(&wp->writes_lock);
1415 void bch2_fs_allocator_foreground_init(struct bch_fs *c)
1417 struct open_bucket *ob;
1418 struct write_point *wp;
1420 mutex_init(&c->write_points_hash_lock);
1421 c->write_points_nr = ARRAY_SIZE(c->write_points);
1423 /* open bucket 0 is a sentinal NULL: */
1424 spin_lock_init(&c->open_buckets[0].lock);
1426 for (ob = c->open_buckets + 1;
1427 ob < c->open_buckets + ARRAY_SIZE(c->open_buckets); ob++) {
1428 spin_lock_init(&ob->lock);
1429 c->open_buckets_nr_free++;
1431 ob->freelist = c->open_buckets_freelist;
1432 c->open_buckets_freelist = ob - c->open_buckets;
1435 writepoint_init(&c->btree_write_point, BCH_DATA_btree);
1436 writepoint_init(&c->rebalance_write_point, BCH_DATA_user);
1437 writepoint_init(&c->copygc_write_point, BCH_DATA_user);
1439 for (wp = c->write_points;
1440 wp < c->write_points + c->write_points_nr; wp++) {
1441 writepoint_init(wp, BCH_DATA_user);
1443 wp->last_used = local_clock();
1444 wp->write_point = (unsigned long) wp;
1445 hlist_add_head_rcu(&wp->node,
1446 writepoint_hash(c, wp->write_point));
1450 static void bch2_open_bucket_to_text(struct printbuf *out, struct bch_fs *c, struct open_bucket *ob)
1452 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
1453 unsigned data_type = ob->data_type;
1454 barrier(); /* READ_ONCE() doesn't work on bitfields */
1456 prt_printf(out, "%zu ref %u %s %u:%llu gen %u allocated %u/%u",
1457 ob - c->open_buckets,
1458 atomic_read(&ob->pin),
1459 data_type < BCH_DATA_NR ? bch2_data_types[data_type] : "invalid data type",
1460 ob->dev, ob->bucket, ob->gen,
1461 ca->mi.bucket_size - ob->sectors_free, ca->mi.bucket_size);
1463 prt_printf(out, " ec idx %llu", ob->ec->idx);
1464 if (ob->on_partial_list)
1465 prt_str(out, " partial");
1469 void bch2_open_buckets_to_text(struct printbuf *out, struct bch_fs *c)
1471 struct open_bucket *ob;
1475 for (ob = c->open_buckets;
1476 ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
1478 spin_lock(&ob->lock);
1479 if (ob->valid && !ob->on_partial_list)
1480 bch2_open_bucket_to_text(out, c, ob);
1481 spin_unlock(&ob->lock);
1487 void bch2_open_buckets_partial_to_text(struct printbuf *out, struct bch_fs *c)
1492 spin_lock(&c->freelist_lock);
1494 for (i = 0; i < c->open_buckets_partial_nr; i++)
1495 bch2_open_bucket_to_text(out, c,
1496 c->open_buckets + c->open_buckets_partial[i]);
1498 spin_unlock(&c->freelist_lock);
1502 static const char * const bch2_write_point_states[] = {
1504 WRITE_POINT_STATES()
1509 void bch2_write_points_to_text(struct printbuf *out, struct bch_fs *c)
1511 struct write_point *wp;
1514 for (wp = c->write_points;
1515 wp < c->write_points + ARRAY_SIZE(c->write_points);
1517 prt_printf(out, "%lu: ", wp->write_point);
1518 prt_human_readable_u64(out, wp->sectors_allocated);
1520 prt_printf(out, " last wrote: ");
1521 bch2_pr_time_units(out, sched_clock() - wp->last_used);
1523 for (i = 0; i < WRITE_POINT_STATE_NR; i++) {
1524 prt_printf(out, " %s: ", bch2_write_point_states[i]);
1525 bch2_pr_time_units(out, wp->time[i]);