1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright 2012 Google, Inc.
5 * Foreground allocator code: allocate buckets from freelist, and allocate in
6 * sector granularity from writepoints.
8 * bch2_bucket_alloc() allocates a single bucket from a specific device.
10 * bch2_bucket_alloc_set() allocates one or more buckets from different devices
11 * in a given filesystem.
15 #include "alloc_background.h"
16 #include "alloc_foreground.h"
17 #include "backpointers.h"
18 #include "btree_iter.h"
19 #include "btree_update.h"
22 #include "buckets_waiting_for_journal.h"
25 #include "disk_groups.h"
32 #include <linux/math64.h>
33 #include <linux/rculist.h>
34 #include <linux/rcupdate.h>
35 #include <trace/events/bcachefs.h>
37 const char * const bch2_alloc_reserves[] = {
45 * Open buckets represent a bucket that's currently being allocated from. They
48 * - They track buckets that have been partially allocated, allowing for
49 * sub-bucket sized allocations - they're used by the sector allocator below
51 * - They provide a reference to the buckets they own that mark and sweep GC
52 * can find, until the new allocation has a pointer to it inserted into the
55 * When allocating some space with the sector allocator, the allocation comes
56 * with a reference to an open bucket - the caller is required to put that
57 * reference _after_ doing the index update that makes its allocation reachable.
60 static void bch2_open_bucket_hash_add(struct bch_fs *c, struct open_bucket *ob)
62 open_bucket_idx_t idx = ob - c->open_buckets;
63 open_bucket_idx_t *slot = open_bucket_hashslot(c, ob->dev, ob->bucket);
69 static void bch2_open_bucket_hash_remove(struct bch_fs *c, struct open_bucket *ob)
71 open_bucket_idx_t idx = ob - c->open_buckets;
72 open_bucket_idx_t *slot = open_bucket_hashslot(c, ob->dev, ob->bucket);
74 while (*slot != idx) {
76 slot = &c->open_buckets[*slot].hash;
83 void __bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob)
85 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
88 bch2_ec_bucket_written(c, ob);
92 percpu_down_read(&c->mark_lock);
98 spin_unlock(&ob->lock);
99 percpu_up_read(&c->mark_lock);
101 spin_lock(&c->freelist_lock);
102 bch2_open_bucket_hash_remove(c, ob);
104 ob->freelist = c->open_buckets_freelist;
105 c->open_buckets_freelist = ob - c->open_buckets;
107 c->open_buckets_nr_free++;
108 ca->nr_open_buckets--;
109 spin_unlock(&c->freelist_lock);
111 closure_wake_up(&c->open_buckets_wait);
114 void bch2_open_bucket_write_error(struct bch_fs *c,
115 struct open_buckets *obs,
118 struct open_bucket *ob;
121 open_bucket_for_each(c, obs, ob, i)
122 if (ob->dev == dev && ob->ec)
123 bch2_ec_bucket_cancel(c, ob);
126 static struct open_bucket *bch2_open_bucket_alloc(struct bch_fs *c)
128 struct open_bucket *ob;
130 BUG_ON(!c->open_buckets_freelist || !c->open_buckets_nr_free);
132 ob = c->open_buckets + c->open_buckets_freelist;
133 c->open_buckets_freelist = ob->freelist;
134 atomic_set(&ob->pin, 1);
137 c->open_buckets_nr_free--;
141 static void open_bucket_free_unused(struct bch_fs *c,
142 struct write_point *wp,
143 struct open_bucket *ob)
145 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
146 bool may_realloc = wp->data_type == BCH_DATA_user;
148 BUG_ON(ca->open_buckets_partial_nr >
149 ARRAY_SIZE(ca->open_buckets_partial));
151 if (ca->open_buckets_partial_nr <
152 ARRAY_SIZE(ca->open_buckets_partial) &&
154 spin_lock(&c->freelist_lock);
155 ob->on_partial_list = true;
156 ca->open_buckets_partial[ca->open_buckets_partial_nr++] =
157 ob - c->open_buckets;
158 spin_unlock(&c->freelist_lock);
160 closure_wake_up(&c->open_buckets_wait);
161 closure_wake_up(&c->freelist_wait);
163 bch2_open_bucket_put(c, ob);
167 /* _only_ for allocating the journal on a new device: */
168 long bch2_bucket_alloc_new_fs(struct bch_dev *ca)
170 while (ca->new_fs_bucket_idx < ca->mi.nbuckets) {
171 u64 b = ca->new_fs_bucket_idx++;
173 if (!is_superblock_bucket(ca, b) &&
174 (!ca->buckets_nouse || !test_bit(b, ca->buckets_nouse)))
181 static inline unsigned open_buckets_reserved(enum alloc_reserve reserve)
185 case RESERVE_btree_movinggc:
187 case RESERVE_movinggc:
188 return OPEN_BUCKETS_COUNT / 4;
190 return OPEN_BUCKETS_COUNT / 2;
194 static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
196 enum alloc_reserve reserve,
197 struct bch_alloc_v4 *a,
198 struct bucket_alloc_state *s,
201 struct open_bucket *ob;
203 if (unlikely(ca->buckets_nouse && test_bit(bucket, ca->buckets_nouse))) {
208 if (bch2_bucket_is_open(c, ca->dev_idx, bucket)) {
213 if (bch2_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
214 c->journal.flushed_seq_ondisk, ca->dev_idx, bucket)) {
215 s->skipped_need_journal_commit++;
219 if (bch2_bucket_nocow_is_locked(&c->nocow_locks, POS(ca->dev_idx, bucket))) {
224 spin_lock(&c->freelist_lock);
226 if (unlikely(c->open_buckets_nr_free <= open_buckets_reserved(reserve))) {
228 closure_wait(&c->open_buckets_wait, cl);
230 if (!c->blocked_allocate_open_bucket)
231 c->blocked_allocate_open_bucket = local_clock();
233 spin_unlock(&c->freelist_lock);
234 return ERR_PTR(-BCH_ERR_open_buckets_empty);
237 /* Recheck under lock: */
238 if (bch2_bucket_is_open(c, ca->dev_idx, bucket)) {
239 spin_unlock(&c->freelist_lock);
244 ob = bch2_open_bucket_alloc(c);
246 spin_lock(&ob->lock);
249 ob->sectors_free = ca->mi.bucket_size;
250 ob->alloc_reserve = reserve;
251 ob->dev = ca->dev_idx;
254 spin_unlock(&ob->lock);
256 ca->nr_open_buckets++;
257 bch2_open_bucket_hash_add(c, ob);
259 if (c->blocked_allocate_open_bucket) {
260 bch2_time_stats_update(
261 &c->times[BCH_TIME_blocked_allocate_open_bucket],
262 c->blocked_allocate_open_bucket);
263 c->blocked_allocate_open_bucket = 0;
266 if (c->blocked_allocate) {
267 bch2_time_stats_update(
268 &c->times[BCH_TIME_blocked_allocate],
269 c->blocked_allocate);
270 c->blocked_allocate = 0;
273 spin_unlock(&c->freelist_lock);
278 static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bch_dev *ca,
279 enum alloc_reserve reserve, u64 free_entry,
280 struct bucket_alloc_state *s,
281 struct bkey_s_c freespace_k,
284 struct bch_fs *c = trans->c;
285 struct btree_iter iter = { NULL };
287 struct open_bucket *ob;
288 struct bch_alloc_v4 a;
289 u64 b = free_entry & ~(~0ULL << 56);
290 unsigned genbits = free_entry >> 56;
291 struct printbuf buf = PRINTBUF;
294 if (b < ca->mi.first_bucket || b >= ca->mi.nbuckets) {
295 prt_printf(&buf, "freespace btree has bucket outside allowed range %u-%llu\n"
297 ca->mi.first_bucket, ca->mi.nbuckets);
298 bch2_bkey_val_to_text(&buf, c, freespace_k);
299 bch2_trans_inconsistent(trans, "%s", buf.buf);
304 bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, POS(ca->dev_idx, b), BTREE_ITER_CACHED);
305 k = bch2_btree_iter_peek_slot(&iter);
312 bch2_alloc_to_v4(k, &a);
314 if (genbits != (alloc_freespace_genbits(a) >> 56)) {
315 prt_printf(&buf, "bucket in freespace btree with wrong genbits (got %u should be %llu)\n"
317 genbits, alloc_freespace_genbits(a) >> 56);
318 bch2_bkey_val_to_text(&buf, c, freespace_k);
319 prt_printf(&buf, "\n ");
320 bch2_bkey_val_to_text(&buf, c, k);
321 bch2_trans_inconsistent(trans, "%s", buf.buf);
327 if (a.data_type != BCH_DATA_free) {
328 prt_printf(&buf, "non free bucket in freespace btree\n"
330 bch2_bkey_val_to_text(&buf, c, freespace_k);
331 prt_printf(&buf, "\n ");
332 bch2_bkey_val_to_text(&buf, c, k);
333 bch2_trans_inconsistent(trans, "%s", buf.buf);
338 if (!test_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags)) {
339 struct bch_backpointer bp;
342 ret = bch2_get_next_backpointer(trans, POS(ca->dev_idx, b), -1,
344 BTREE_ITER_NOPRESERVE);
350 if (bp_offset != U64_MAX) {
352 * Bucket may have data in it - we don't call
353 * bc2h_trans_inconnsistent() because fsck hasn't
361 ob = __try_alloc_bucket(c, ca, b, reserve, &a, s, cl);
363 iter.path->preserve = false;
365 set_btree_iter_dontneed(&iter);
366 bch2_trans_iter_exit(trans, &iter);
371 static struct open_bucket *try_alloc_partial_bucket(struct bch_fs *c, struct bch_dev *ca,
372 enum alloc_reserve reserve)
374 struct open_bucket *ob;
377 spin_lock(&c->freelist_lock);
379 for (i = ca->open_buckets_partial_nr - 1; i >= 0; --i) {
380 ob = c->open_buckets + ca->open_buckets_partial[i];
382 if (reserve <= ob->alloc_reserve) {
383 array_remove_item(ca->open_buckets_partial,
384 ca->open_buckets_partial_nr,
386 ob->on_partial_list = false;
387 ob->alloc_reserve = reserve;
388 spin_unlock(&c->freelist_lock);
393 spin_unlock(&c->freelist_lock);
398 * This path is for before the freespace btree is initialized:
400 * If ca->new_fs_bucket_idx is nonzero, we haven't yet marked superblock &
401 * journal buckets - journal buckets will be < ca->new_fs_bucket_idx
403 static noinline struct open_bucket *
404 bch2_bucket_alloc_early(struct btree_trans *trans,
406 enum alloc_reserve reserve,
407 struct bucket_alloc_state *s,
410 struct btree_iter iter;
412 struct open_bucket *ob = NULL;
415 s->cur_bucket = max_t(u64, s->cur_bucket, ca->mi.first_bucket);
416 s->cur_bucket = max_t(u64, s->cur_bucket, ca->new_fs_bucket_idx);
418 for_each_btree_key_norestart(trans, iter, BTREE_ID_alloc, POS(ca->dev_idx, s->cur_bucket),
419 BTREE_ITER_SLOTS, k, ret) {
420 struct bch_alloc_v4 a;
422 if (bkey_cmp(k.k->p, POS(ca->dev_idx, ca->mi.nbuckets)) >= 0)
425 if (ca->new_fs_bucket_idx &&
426 is_superblock_bucket(ca, k.k->p.offset))
429 bch2_alloc_to_v4(k, &a);
431 if (a.data_type != BCH_DATA_free)
436 ob = __try_alloc_bucket(trans->c, ca, k.k->p.offset, reserve, &a, s, cl);
440 bch2_trans_iter_exit(trans, &iter);
442 s->cur_bucket = iter.pos.offset;
444 return ob ?: ERR_PTR(ret ?: -BCH_ERR_no_buckets_found);
447 static struct open_bucket *bch2_bucket_alloc_freelist(struct btree_trans *trans,
449 enum alloc_reserve reserve,
450 struct bucket_alloc_state *s,
453 struct btree_iter iter;
455 struct open_bucket *ob = NULL;
458 BUG_ON(ca->new_fs_bucket_idx);
462 * On transaction restart, we'd like to restart from the bucket we were
465 for_each_btree_key_norestart(trans, iter, BTREE_ID_freespace,
466 POS(ca->dev_idx, s->cur_bucket), 0, k, ret) {
467 if (k.k->p.inode != ca->dev_idx)
470 for (s->cur_bucket = max(s->cur_bucket, bkey_start_offset(k.k));
471 s->cur_bucket < k.k->p.offset;
473 ret = btree_trans_too_many_iters(trans);
479 ob = try_alloc_bucket(trans, ca, reserve,
480 s->cur_bucket, s, k, cl);
488 bch2_trans_iter_exit(trans, &iter);
490 return ob ?: ERR_PTR(ret);
494 * bch_bucket_alloc - allocate a single bucket from a specific device
496 * Returns index of bucket on success, 0 on failure
498 static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans,
500 enum alloc_reserve reserve,
501 bool may_alloc_partial,
503 struct bch_dev_usage *usage)
505 struct bch_fs *c = trans->c;
506 struct open_bucket *ob = NULL;
507 bool freespace_initialized = READ_ONCE(ca->mi.freespace_initialized);
508 u64 start = freespace_initialized ? 0 : ca->bucket_alloc_trans_early_cursor;
510 struct bucket_alloc_state s = { .cur_bucket = start };
511 bool waiting = false;
513 bch2_dev_usage_read_fast(ca, usage);
514 avail = dev_buckets_free(ca, *usage, reserve);
516 if (usage->d[BCH_DATA_need_discard].buckets > avail)
519 if (usage->d[BCH_DATA_need_gc_gens].buckets > avail)
522 if (should_invalidate_buckets(ca, *usage))
523 bch2_do_invalidates(c);
526 if (cl && !waiting) {
527 closure_wait(&c->freelist_wait, cl);
532 if (!c->blocked_allocate)
533 c->blocked_allocate = local_clock();
535 ob = ERR_PTR(-BCH_ERR_freelist_empty);
540 closure_wake_up(&c->freelist_wait);
542 if (may_alloc_partial) {
543 ob = try_alloc_partial_bucket(c, ca, reserve);
548 ob = likely(ca->mi.freespace_initialized)
549 ? bch2_bucket_alloc_freelist(trans, ca, reserve, &s, cl)
550 : bch2_bucket_alloc_early(trans, ca, reserve, &s, cl);
552 if (s.skipped_need_journal_commit * 2 > avail)
553 bch2_journal_flush_async(&c->journal, NULL);
555 if (!ob && !freespace_initialized && start) {
556 start = s.cur_bucket = 0;
560 if (!freespace_initialized)
561 ca->bucket_alloc_trans_early_cursor = s.cur_bucket;
564 ob = ERR_PTR(-BCH_ERR_no_buckets_found);
567 trace_and_count(c, bucket_alloc, ca, bch2_alloc_reserves[reserve],
568 may_alloc_partial, ob->bucket);
569 else if (!bch2_err_matches(PTR_ERR(ob), BCH_ERR_transaction_restart))
570 trace_and_count(c, bucket_alloc_fail,
571 ca, bch2_alloc_reserves[reserve],
572 usage->d[BCH_DATA_free].buckets,
574 bch2_copygc_wait_amount(c),
575 c->copygc_wait - atomic64_read(&c->io_clock[WRITE].now),
578 bch2_err_str(PTR_ERR(ob)));
583 struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca,
584 enum alloc_reserve reserve,
585 bool may_alloc_partial,
588 struct bch_dev_usage usage;
589 struct open_bucket *ob;
591 bch2_trans_do(c, NULL, NULL, 0,
592 PTR_ERR_OR_ZERO(ob = bch2_bucket_alloc_trans(&trans, ca, reserve,
593 may_alloc_partial, cl, &usage)));
597 static int __dev_stripe_cmp(struct dev_stripe_state *stripe,
598 unsigned l, unsigned r)
600 return ((stripe->next_alloc[l] > stripe->next_alloc[r]) -
601 (stripe->next_alloc[l] < stripe->next_alloc[r]));
604 #define dev_stripe_cmp(l, r) __dev_stripe_cmp(stripe, l, r)
606 struct dev_alloc_list bch2_dev_alloc_list(struct bch_fs *c,
607 struct dev_stripe_state *stripe,
608 struct bch_devs_mask *devs)
610 struct dev_alloc_list ret = { .nr = 0 };
613 for_each_set_bit(i, devs->d, BCH_SB_MEMBERS_MAX)
614 ret.devs[ret.nr++] = i;
616 bubble_sort(ret.devs, ret.nr, dev_stripe_cmp);
620 static inline void bch2_dev_stripe_increment_inlined(struct bch_dev *ca,
621 struct dev_stripe_state *stripe,
622 struct bch_dev_usage *usage)
624 u64 *v = stripe->next_alloc + ca->dev_idx;
625 u64 free_space = dev_buckets_available(ca, RESERVE_none);
626 u64 free_space_inv = free_space
627 ? div64_u64(1ULL << 48, free_space)
631 if (*v + free_space_inv >= *v)
632 *v += free_space_inv;
636 for (v = stripe->next_alloc;
637 v < stripe->next_alloc + ARRAY_SIZE(stripe->next_alloc); v++)
638 *v = *v < scale ? 0 : *v - scale;
641 void bch2_dev_stripe_increment(struct bch_dev *ca,
642 struct dev_stripe_state *stripe)
644 struct bch_dev_usage usage;
646 bch2_dev_usage_read_fast(ca, &usage);
647 bch2_dev_stripe_increment_inlined(ca, stripe, &usage);
650 #define BUCKET_MAY_ALLOC_PARTIAL (1 << 0)
651 #define BUCKET_ALLOC_USE_DURABILITY (1 << 1)
653 static void add_new_bucket(struct bch_fs *c,
654 struct open_buckets *ptrs,
655 struct bch_devs_mask *devs_may_alloc,
656 unsigned *nr_effective,
659 struct open_bucket *ob)
661 unsigned durability =
662 bch_dev_bkey_exists(c, ob->dev)->mi.durability;
664 __clear_bit(ob->dev, devs_may_alloc->d);
665 *nr_effective += (flags & BUCKET_ALLOC_USE_DURABILITY)
667 *have_cache |= !durability;
669 ob_push(c, ptrs, ob);
672 static int bch2_bucket_alloc_set_trans(struct btree_trans *trans,
673 struct open_buckets *ptrs,
674 struct dev_stripe_state *stripe,
675 struct bch_devs_mask *devs_may_alloc,
676 unsigned nr_replicas,
677 unsigned *nr_effective,
679 enum alloc_reserve reserve,
683 struct bch_fs *c = trans->c;
684 struct dev_alloc_list devs_sorted =
685 bch2_dev_alloc_list(c, stripe, devs_may_alloc);
688 int ret = -BCH_ERR_insufficient_devices;
691 BUG_ON(*nr_effective >= nr_replicas);
693 for (i = 0; i < devs_sorted.nr; i++) {
694 struct bch_dev_usage usage;
695 struct open_bucket *ob;
697 dev = devs_sorted.devs[i];
700 ca = rcu_dereference(c->devs[dev]);
702 percpu_ref_get(&ca->ref);
708 if (!ca->mi.durability && *have_cache) {
709 percpu_ref_put(&ca->ref);
713 ob = bch2_bucket_alloc_trans(trans, ca, reserve,
714 flags & BUCKET_MAY_ALLOC_PARTIAL, cl, &usage);
716 bch2_dev_stripe_increment_inlined(ca, stripe, &usage);
717 percpu_ref_put(&ca->ref);
721 if (bch2_err_matches(ret, BCH_ERR_transaction_restart) || cl)
726 add_new_bucket(c, ptrs, devs_may_alloc,
727 nr_effective, have_cache, flags, ob);
729 if (*nr_effective >= nr_replicas) {
738 int bch2_bucket_alloc_set(struct bch_fs *c,
739 struct open_buckets *ptrs,
740 struct dev_stripe_state *stripe,
741 struct bch_devs_mask *devs_may_alloc,
742 unsigned nr_replicas,
743 unsigned *nr_effective,
745 enum alloc_reserve reserve,
749 return bch2_trans_do(c, NULL, NULL, 0,
750 bch2_bucket_alloc_set_trans(&trans, ptrs, stripe,
751 devs_may_alloc, nr_replicas,
752 nr_effective, have_cache, reserve,
756 /* Allocate from stripes: */
759 * if we can't allocate a new stripe because there are already too many
760 * partially filled stripes, force allocating from an existing stripe even when
761 * it's to a device we don't want:
764 static int bucket_alloc_from_stripe(struct bch_fs *c,
765 struct open_buckets *ptrs,
766 struct write_point *wp,
767 struct bch_devs_mask *devs_may_alloc,
769 unsigned erasure_code,
770 unsigned nr_replicas,
771 unsigned *nr_effective,
776 struct dev_alloc_list devs_sorted;
777 struct ec_stripe_head *h;
778 struct open_bucket *ob;
788 if (ec_open_bucket(c, ptrs))
791 h = bch2_ec_stripe_head_get(c, target, 0, nr_replicas - 1,
792 wp == &c->copygc_write_point,
799 devs_sorted = bch2_dev_alloc_list(c, &wp->stripe, devs_may_alloc);
801 for (i = 0; i < devs_sorted.nr; i++)
802 for (ec_idx = 0; ec_idx < h->s->nr_data; ec_idx++) {
803 if (!h->s->blocks[ec_idx])
806 ob = c->open_buckets + h->s->blocks[ec_idx];
807 if (ob->dev == devs_sorted.devs[i] &&
808 !test_and_set_bit(ec_idx, h->s->blocks_allocated))
813 ca = bch_dev_bkey_exists(c, ob->dev);
818 add_new_bucket(c, ptrs, devs_may_alloc,
819 nr_effective, have_cache, flags, ob);
820 atomic_inc(&h->s->pin);
822 bch2_ec_stripe_head_put(c, h);
826 /* Sector allocator */
828 static void get_buckets_from_writepoint(struct bch_fs *c,
829 struct open_buckets *ptrs,
830 struct write_point *wp,
831 struct bch_devs_mask *devs_may_alloc,
832 unsigned nr_replicas,
833 unsigned *nr_effective,
838 struct open_buckets ptrs_skip = { .nr = 0 };
839 struct open_bucket *ob;
842 open_bucket_for_each(c, &wp->ptrs, ob, i) {
843 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
845 if (*nr_effective < nr_replicas &&
846 test_bit(ob->dev, devs_may_alloc->d) &&
847 (ca->mi.durability ||
848 (wp->data_type == BCH_DATA_user && !*have_cache)) &&
849 (ob->ec || !need_ec)) {
850 add_new_bucket(c, ptrs, devs_may_alloc,
851 nr_effective, have_cache,
854 ob_push(c, &ptrs_skip, ob);
857 wp->ptrs = ptrs_skip;
860 static int open_bucket_add_buckets(struct btree_trans *trans,
861 struct open_buckets *ptrs,
862 struct write_point *wp,
863 struct bch_devs_list *devs_have,
865 unsigned erasure_code,
866 unsigned nr_replicas,
867 unsigned *nr_effective,
869 enum alloc_reserve reserve,
873 struct bch_fs *c = trans->c;
874 struct bch_devs_mask devs;
875 struct open_bucket *ob;
876 struct closure *cl = NULL;
881 devs = target_rw_devs(c, wp->data_type, target);
884 /* Don't allocate from devices we already have pointers to: */
885 for (i = 0; i < devs_have->nr; i++)
886 __clear_bit(devs_have->devs[i], devs.d);
888 open_bucket_for_each(c, ptrs, ob, i)
889 __clear_bit(ob->dev, devs.d);
892 if (!ec_open_bucket(c, ptrs)) {
893 get_buckets_from_writepoint(c, ptrs, wp, &devs,
894 nr_replicas, nr_effective,
895 have_cache, flags, true);
896 if (*nr_effective >= nr_replicas)
900 if (!ec_open_bucket(c, ptrs)) {
901 ret = bucket_alloc_from_stripe(c, ptrs, wp, &devs,
902 target, erasure_code,
903 nr_replicas, nr_effective,
904 have_cache, flags, _cl);
905 if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
906 bch2_err_matches(ret, BCH_ERR_freelist_empty) ||
907 bch2_err_matches(ret, BCH_ERR_open_buckets_empty))
909 if (*nr_effective >= nr_replicas)
914 get_buckets_from_writepoint(c, ptrs, wp, &devs,
915 nr_replicas, nr_effective,
916 have_cache, flags, false);
917 if (*nr_effective >= nr_replicas)
922 * Try nonblocking first, so that if one device is full we'll try from
925 ret = bch2_bucket_alloc_set_trans(trans, ptrs, &wp->stripe, &devs,
926 nr_replicas, nr_effective, have_cache,
929 !bch2_err_matches(ret, BCH_ERR_transaction_restart) &&
930 !bch2_err_matches(ret, BCH_ERR_insufficient_devices) &&
939 void bch2_open_buckets_stop_dev(struct bch_fs *c, struct bch_dev *ca,
940 struct open_buckets *obs)
942 struct open_buckets ptrs = { .nr = 0 };
943 struct open_bucket *ob, *ob2;
946 open_bucket_for_each(c, obs, ob, i) {
947 bool drop = !ca || ob->dev == ca->dev_idx;
949 if (!drop && ob->ec) {
950 mutex_lock(&ob->ec->lock);
951 for (j = 0; j < ob->ec->new_stripe.key.v.nr_blocks; j++) {
952 if (!ob->ec->blocks[j])
955 ob2 = c->open_buckets + ob->ec->blocks[j];
956 drop |= ob2->dev == ca->dev_idx;
958 mutex_unlock(&ob->ec->lock);
962 bch2_open_bucket_put(c, ob);
964 ob_push(c, &ptrs, ob);
970 void bch2_writepoint_stop(struct bch_fs *c, struct bch_dev *ca,
971 struct write_point *wp)
973 mutex_lock(&wp->lock);
974 bch2_open_buckets_stop_dev(c, ca, &wp->ptrs);
975 mutex_unlock(&wp->lock);
978 static inline struct hlist_head *writepoint_hash(struct bch_fs *c,
979 unsigned long write_point)
982 hash_long(write_point, ilog2(ARRAY_SIZE(c->write_points_hash)));
984 return &c->write_points_hash[hash];
987 static struct write_point *__writepoint_find(struct hlist_head *head,
988 unsigned long write_point)
990 struct write_point *wp;
993 hlist_for_each_entry_rcu(wp, head, node)
994 if (wp->write_point == write_point)
1002 static inline bool too_many_writepoints(struct bch_fs *c, unsigned factor)
1004 u64 stranded = c->write_points_nr * c->bucket_size_max;
1005 u64 free = bch2_fs_usage_read_short(c).free;
1007 return stranded * factor > free;
1010 static bool try_increase_writepoints(struct bch_fs *c)
1012 struct write_point *wp;
1014 if (c->write_points_nr == ARRAY_SIZE(c->write_points) ||
1015 too_many_writepoints(c, 32))
1018 wp = c->write_points + c->write_points_nr++;
1019 hlist_add_head_rcu(&wp->node, writepoint_hash(c, wp->write_point));
1023 static bool try_decrease_writepoints(struct bch_fs *c,
1026 struct write_point *wp;
1028 mutex_lock(&c->write_points_hash_lock);
1029 if (c->write_points_nr < old_nr) {
1030 mutex_unlock(&c->write_points_hash_lock);
1034 if (c->write_points_nr == 1 ||
1035 !too_many_writepoints(c, 8)) {
1036 mutex_unlock(&c->write_points_hash_lock);
1040 wp = c->write_points + --c->write_points_nr;
1042 hlist_del_rcu(&wp->node);
1043 mutex_unlock(&c->write_points_hash_lock);
1045 bch2_writepoint_stop(c, NULL, wp);
1049 static void bch2_trans_mutex_lock(struct btree_trans *trans,
1052 if (!mutex_trylock(lock)) {
1053 bch2_trans_unlock(trans);
1058 static struct write_point *writepoint_find(struct btree_trans *trans,
1059 unsigned long write_point)
1061 struct bch_fs *c = trans->c;
1062 struct write_point *wp, *oldest;
1063 struct hlist_head *head;
1065 if (!(write_point & 1UL)) {
1066 wp = (struct write_point *) write_point;
1067 bch2_trans_mutex_lock(trans, &wp->lock);
1071 head = writepoint_hash(c, write_point);
1073 wp = __writepoint_find(head, write_point);
1076 bch2_trans_mutex_lock(trans, &wp->lock);
1077 if (wp->write_point == write_point)
1079 mutex_unlock(&wp->lock);
1082 restart_find_oldest:
1084 for (wp = c->write_points;
1085 wp < c->write_points + c->write_points_nr; wp++)
1086 if (!oldest || time_before64(wp->last_used, oldest->last_used))
1089 bch2_trans_mutex_lock(trans, &oldest->lock);
1090 bch2_trans_mutex_lock(trans, &c->write_points_hash_lock);
1091 if (oldest >= c->write_points + c->write_points_nr ||
1092 try_increase_writepoints(c)) {
1093 mutex_unlock(&c->write_points_hash_lock);
1094 mutex_unlock(&oldest->lock);
1095 goto restart_find_oldest;
1098 wp = __writepoint_find(head, write_point);
1099 if (wp && wp != oldest) {
1100 mutex_unlock(&c->write_points_hash_lock);
1101 mutex_unlock(&oldest->lock);
1106 hlist_del_rcu(&wp->node);
1107 wp->write_point = write_point;
1108 hlist_add_head_rcu(&wp->node, head);
1109 mutex_unlock(&c->write_points_hash_lock);
1111 wp->last_used = local_clock();
1116 * Get us an open_bucket we can allocate from, return with it locked:
1118 int bch2_alloc_sectors_start_trans(struct btree_trans *trans,
1120 unsigned erasure_code,
1121 struct write_point_specifier write_point,
1122 struct bch_devs_list *devs_have,
1123 unsigned nr_replicas,
1124 unsigned nr_replicas_required,
1125 enum alloc_reserve reserve,
1128 struct write_point **wp_ret)
1130 struct bch_fs *c = trans->c;
1131 struct write_point *wp;
1132 struct open_bucket *ob;
1133 struct open_buckets ptrs;
1134 unsigned nr_effective, write_points_nr;
1135 unsigned ob_flags = 0;
1140 if (!(flags & BCH_WRITE_ONLY_SPECIFIED_DEVS))
1141 ob_flags |= BUCKET_ALLOC_USE_DURABILITY;
1143 BUG_ON(!nr_replicas || !nr_replicas_required);
1147 write_points_nr = c->write_points_nr;
1150 *wp_ret = wp = writepoint_find(trans, write_point.v);
1152 if (wp->data_type == BCH_DATA_user)
1153 ob_flags |= BUCKET_MAY_ALLOC_PARTIAL;
1155 /* metadata may not allocate on cache devices: */
1156 if (wp->data_type != BCH_DATA_user)
1159 if (!target || (flags & BCH_WRITE_ONLY_SPECIFIED_DEVS)) {
1160 ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
1161 target, erasure_code,
1162 nr_replicas, &nr_effective,
1163 &have_cache, reserve,
1166 ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
1167 target, erasure_code,
1168 nr_replicas, &nr_effective,
1169 &have_cache, reserve,
1172 bch2_err_matches(ret, BCH_ERR_transaction_restart))
1175 ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
1177 nr_replicas, &nr_effective,
1178 &have_cache, reserve,
1182 BUG_ON(!ret && nr_effective < nr_replicas);
1184 if (erasure_code && !ec_open_bucket(c, &ptrs))
1185 pr_debug("failed to get ec bucket: ret %u", ret);
1187 if (ret == -BCH_ERR_insufficient_devices &&
1188 nr_effective >= nr_replicas_required)
1194 /* Free buckets we didn't use: */
1195 open_bucket_for_each(c, &wp->ptrs, ob, i)
1196 open_bucket_free_unused(c, wp, ob);
1200 wp->sectors_free = UINT_MAX;
1202 open_bucket_for_each(c, &wp->ptrs, ob, i)
1203 wp->sectors_free = min(wp->sectors_free, ob->sectors_free);
1205 BUG_ON(!wp->sectors_free || wp->sectors_free == UINT_MAX);
1209 open_bucket_for_each(c, &wp->ptrs, ob, i)
1210 if (ptrs.nr < ARRAY_SIZE(ptrs.v))
1211 ob_push(c, &ptrs, ob);
1213 open_bucket_free_unused(c, wp, ob);
1216 mutex_unlock(&wp->lock);
1218 if (bch2_err_matches(ret, BCH_ERR_freelist_empty) &&
1219 try_decrease_writepoints(c, write_points_nr))
1222 if (bch2_err_matches(ret, BCH_ERR_open_buckets_empty) ||
1223 bch2_err_matches(ret, BCH_ERR_freelist_empty))
1226 : -BCH_ERR_ENOSPC_bucket_alloc;
1228 if (bch2_err_matches(ret, BCH_ERR_insufficient_devices))
1234 struct bch_extent_ptr bch2_ob_ptr(struct bch_fs *c, struct open_bucket *ob)
1236 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
1238 return (struct bch_extent_ptr) {
1239 .type = 1 << BCH_EXTENT_ENTRY_ptr,
1242 .offset = bucket_to_sector(ca, ob->bucket) +
1243 ca->mi.bucket_size -
1249 * Append pointers to the space we just allocated to @k, and mark @sectors space
1250 * as allocated out of @ob
1252 void bch2_alloc_sectors_append_ptrs(struct bch_fs *c, struct write_point *wp,
1253 struct bkey_i *k, unsigned sectors,
1257 struct open_bucket *ob;
1260 BUG_ON(sectors > wp->sectors_free);
1261 wp->sectors_free -= sectors;
1263 open_bucket_for_each(c, &wp->ptrs, ob, i) {
1264 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
1265 struct bch_extent_ptr ptr = bch2_ob_ptr(c, ob);
1267 ptr.cached = cached ||
1268 (!ca->mi.durability &&
1269 wp->data_type == BCH_DATA_user);
1271 bch2_bkey_append_ptr(k, ptr);
1273 BUG_ON(sectors > ob->sectors_free);
1274 ob->sectors_free -= sectors;
1279 * Append pointers to the space we just allocated to @k, and mark @sectors space
1280 * as allocated out of @ob
1282 void bch2_alloc_sectors_done(struct bch_fs *c, struct write_point *wp)
1284 struct open_buckets ptrs = { .nr = 0 }, keep = { .nr = 0 };
1285 struct open_bucket *ob;
1288 open_bucket_for_each(c, &wp->ptrs, ob, i)
1289 ob_push(c, !ob->sectors_free ? &ptrs : &keep, ob);
1292 mutex_unlock(&wp->lock);
1294 bch2_open_buckets_put(c, &ptrs);
1297 static inline void writepoint_init(struct write_point *wp,
1298 enum bch_data_type type)
1300 mutex_init(&wp->lock);
1301 wp->data_type = type;
1303 INIT_WORK(&wp->index_update_work, bch2_write_point_do_index_updates);
1304 INIT_LIST_HEAD(&wp->writes);
1305 spin_lock_init(&wp->writes_lock);
1308 void bch2_fs_allocator_foreground_init(struct bch_fs *c)
1310 struct open_bucket *ob;
1311 struct write_point *wp;
1313 mutex_init(&c->write_points_hash_lock);
1314 c->write_points_nr = ARRAY_SIZE(c->write_points);
1316 /* open bucket 0 is a sentinal NULL: */
1317 spin_lock_init(&c->open_buckets[0].lock);
1319 for (ob = c->open_buckets + 1;
1320 ob < c->open_buckets + ARRAY_SIZE(c->open_buckets); ob++) {
1321 spin_lock_init(&ob->lock);
1322 c->open_buckets_nr_free++;
1324 ob->freelist = c->open_buckets_freelist;
1325 c->open_buckets_freelist = ob - c->open_buckets;
1328 writepoint_init(&c->btree_write_point, BCH_DATA_btree);
1329 writepoint_init(&c->rebalance_write_point, BCH_DATA_user);
1330 writepoint_init(&c->copygc_write_point, BCH_DATA_user);
1332 for (wp = c->write_points;
1333 wp < c->write_points + c->write_points_nr; wp++) {
1334 writepoint_init(wp, BCH_DATA_user);
1336 wp->last_used = local_clock();
1337 wp->write_point = (unsigned long) wp;
1338 hlist_add_head_rcu(&wp->node,
1339 writepoint_hash(c, wp->write_point));
1343 void bch2_open_buckets_to_text(struct printbuf *out, struct bch_fs *c)
1345 struct open_bucket *ob;
1347 for (ob = c->open_buckets;
1348 ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
1350 spin_lock(&ob->lock);
1351 if (ob->valid && !ob->on_partial_list) {
1352 prt_printf(out, "%zu ref %u type %s %u:%llu:%u\n",
1353 ob - c->open_buckets,
1354 atomic_read(&ob->pin),
1355 bch2_data_types[ob->data_type],
1356 ob->dev, ob->bucket, ob->gen);
1358 spin_unlock(&ob->lock);