1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright 2012 Google, Inc.
5 * Foreground allocator code: allocate buckets from freelist, and allocate in
6 * sector granularity from writepoints.
8 * bch2_bucket_alloc() allocates a single bucket from a specific device.
10 * bch2_bucket_alloc_set() allocates one or more buckets from different devices
11 * in a given filesystem.
15 #include "alloc_background.h"
16 #include "alloc_foreground.h"
17 #include "backpointers.h"
18 #include "btree_iter.h"
19 #include "btree_update.h"
22 #include "buckets_waiting_for_journal.h"
25 #include "disk_groups.h"
31 #include "nocow_locking.h"
34 #include <linux/math64.h>
35 #include <linux/rculist.h>
36 #include <linux/rcupdate.h>
38 static void bch2_trans_mutex_lock_norelock(struct btree_trans *trans,
41 if (!mutex_trylock(lock)) {
42 bch2_trans_unlock(trans);
47 const char * const bch2_watermarks[] = {
55 * Open buckets represent a bucket that's currently being allocated from. They
58 * - They track buckets that have been partially allocated, allowing for
59 * sub-bucket sized allocations - they're used by the sector allocator below
61 * - They provide a reference to the buckets they own that mark and sweep GC
62 * can find, until the new allocation has a pointer to it inserted into the
65 * When allocating some space with the sector allocator, the allocation comes
66 * with a reference to an open bucket - the caller is required to put that
67 * reference _after_ doing the index update that makes its allocation reachable.
70 void bch2_reset_alloc_cursors(struct bch_fs *c)
76 for_each_member_device_rcu(ca, c, i, NULL)
81 static void bch2_open_bucket_hash_add(struct bch_fs *c, struct open_bucket *ob)
83 open_bucket_idx_t idx = ob - c->open_buckets;
84 open_bucket_idx_t *slot = open_bucket_hashslot(c, ob->dev, ob->bucket);
90 static void bch2_open_bucket_hash_remove(struct bch_fs *c, struct open_bucket *ob)
92 open_bucket_idx_t idx = ob - c->open_buckets;
93 open_bucket_idx_t *slot = open_bucket_hashslot(c, ob->dev, ob->bucket);
95 while (*slot != idx) {
97 slot = &c->open_buckets[*slot].hash;
104 void __bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob)
106 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
109 ec_stripe_new_put(c, ob->ec, STRIPE_REF_io);
113 percpu_down_read(&c->mark_lock);
114 spin_lock(&ob->lock);
119 spin_unlock(&ob->lock);
120 percpu_up_read(&c->mark_lock);
122 spin_lock(&c->freelist_lock);
123 bch2_open_bucket_hash_remove(c, ob);
125 ob->freelist = c->open_buckets_freelist;
126 c->open_buckets_freelist = ob - c->open_buckets;
128 c->open_buckets_nr_free++;
129 ca->nr_open_buckets--;
130 spin_unlock(&c->freelist_lock);
132 closure_wake_up(&c->open_buckets_wait);
135 void bch2_open_bucket_write_error(struct bch_fs *c,
136 struct open_buckets *obs,
139 struct open_bucket *ob;
142 open_bucket_for_each(c, obs, ob, i)
143 if (ob->dev == dev && ob->ec)
144 bch2_ec_bucket_cancel(c, ob);
147 static struct open_bucket *bch2_open_bucket_alloc(struct bch_fs *c)
149 struct open_bucket *ob;
151 BUG_ON(!c->open_buckets_freelist || !c->open_buckets_nr_free);
153 ob = c->open_buckets + c->open_buckets_freelist;
154 c->open_buckets_freelist = ob->freelist;
155 atomic_set(&ob->pin, 1);
158 c->open_buckets_nr_free--;
162 static void open_bucket_free_unused(struct bch_fs *c, struct open_bucket *ob)
164 BUG_ON(c->open_buckets_partial_nr >=
165 ARRAY_SIZE(c->open_buckets_partial));
167 spin_lock(&c->freelist_lock);
168 ob->on_partial_list = true;
169 c->open_buckets_partial[c->open_buckets_partial_nr++] =
170 ob - c->open_buckets;
171 spin_unlock(&c->freelist_lock);
173 closure_wake_up(&c->open_buckets_wait);
174 closure_wake_up(&c->freelist_wait);
177 /* _only_ for allocating the journal on a new device: */
178 long bch2_bucket_alloc_new_fs(struct bch_dev *ca)
180 while (ca->new_fs_bucket_idx < ca->mi.nbuckets) {
181 u64 b = ca->new_fs_bucket_idx++;
183 if (!is_superblock_bucket(ca, b) &&
184 (!ca->buckets_nouse || !test_bit(b, ca->buckets_nouse)))
191 static inline unsigned open_buckets_reserved(enum bch_watermark watermark)
194 case BCH_WATERMARK_reclaim:
196 case BCH_WATERMARK_btree:
197 case BCH_WATERMARK_btree_copygc:
198 return OPEN_BUCKETS_COUNT / 4;
199 case BCH_WATERMARK_copygc:
200 return OPEN_BUCKETS_COUNT / 3;
202 return OPEN_BUCKETS_COUNT / 2;
206 static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
208 enum bch_watermark watermark,
209 const struct bch_alloc_v4 *a,
210 struct bucket_alloc_state *s,
213 struct open_bucket *ob;
215 if (unlikely(ca->buckets_nouse && test_bit(bucket, ca->buckets_nouse))) {
220 if (bch2_bucket_is_open(c, ca->dev_idx, bucket)) {
225 if (bch2_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
226 c->journal.flushed_seq_ondisk, ca->dev_idx, bucket)) {
227 s->skipped_need_journal_commit++;
231 if (bch2_bucket_nocow_is_locked(&c->nocow_locks, POS(ca->dev_idx, bucket))) {
236 spin_lock(&c->freelist_lock);
238 if (unlikely(c->open_buckets_nr_free <= open_buckets_reserved(watermark))) {
240 closure_wait(&c->open_buckets_wait, cl);
242 if (!c->blocked_allocate_open_bucket)
243 c->blocked_allocate_open_bucket = local_clock();
245 spin_unlock(&c->freelist_lock);
246 return ERR_PTR(-BCH_ERR_open_buckets_empty);
249 /* Recheck under lock: */
250 if (bch2_bucket_is_open(c, ca->dev_idx, bucket)) {
251 spin_unlock(&c->freelist_lock);
256 ob = bch2_open_bucket_alloc(c);
258 spin_lock(&ob->lock);
261 ob->sectors_free = ca->mi.bucket_size;
262 ob->dev = ca->dev_idx;
265 spin_unlock(&ob->lock);
267 ca->nr_open_buckets++;
268 bch2_open_bucket_hash_add(c, ob);
270 if (c->blocked_allocate_open_bucket) {
271 bch2_time_stats_update(
272 &c->times[BCH_TIME_blocked_allocate_open_bucket],
273 c->blocked_allocate_open_bucket);
274 c->blocked_allocate_open_bucket = 0;
277 if (c->blocked_allocate) {
278 bch2_time_stats_update(
279 &c->times[BCH_TIME_blocked_allocate],
280 c->blocked_allocate);
281 c->blocked_allocate = 0;
284 spin_unlock(&c->freelist_lock);
288 static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bch_dev *ca,
289 enum bch_watermark watermark, u64 free_entry,
290 struct bucket_alloc_state *s,
291 struct bkey_s_c freespace_k,
294 struct bch_fs *c = trans->c;
295 struct btree_iter iter = { NULL };
297 struct open_bucket *ob;
298 struct bch_alloc_v4 a_convert;
299 const struct bch_alloc_v4 *a;
300 u64 b = free_entry & ~(~0ULL << 56);
301 unsigned genbits = free_entry >> 56;
302 struct printbuf buf = PRINTBUF;
305 if (b < ca->mi.first_bucket || b >= ca->mi.nbuckets) {
306 prt_printf(&buf, "freespace btree has bucket outside allowed range %u-%llu\n"
308 ca->mi.first_bucket, ca->mi.nbuckets);
309 bch2_bkey_val_to_text(&buf, c, freespace_k);
310 bch2_trans_inconsistent(trans, "%s", buf.buf);
315 k = bch2_bkey_get_iter(trans, &iter,
316 BTREE_ID_alloc, POS(ca->dev_idx, b),
324 a = bch2_alloc_to_v4(k, &a_convert);
326 if (a->data_type != BCH_DATA_free) {
327 if (c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_alloc_info) {
332 prt_printf(&buf, "non free bucket in freespace btree\n"
334 bch2_bkey_val_to_text(&buf, c, freespace_k);
335 prt_printf(&buf, "\n ");
336 bch2_bkey_val_to_text(&buf, c, k);
337 bch2_trans_inconsistent(trans, "%s", buf.buf);
342 if (genbits != (alloc_freespace_genbits(*a) >> 56) &&
343 c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info) {
344 prt_printf(&buf, "bucket in freespace btree with wrong genbits (got %u should be %llu)\n"
346 genbits, alloc_freespace_genbits(*a) >> 56);
347 bch2_bkey_val_to_text(&buf, c, freespace_k);
348 prt_printf(&buf, "\n ");
349 bch2_bkey_val_to_text(&buf, c, k);
350 bch2_trans_inconsistent(trans, "%s", buf.buf);
355 if (c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_extents_to_backpointers) {
356 struct bch_backpointer bp;
357 struct bpos bp_pos = POS_MIN;
359 ret = bch2_get_next_backpointer(trans, POS(ca->dev_idx, b), -1,
361 BTREE_ITER_NOPRESERVE);
367 if (!bkey_eq(bp_pos, POS_MAX)) {
369 * Bucket may have data in it - we don't call
370 * bc2h_trans_inconnsistent() because fsck hasn't
378 ob = __try_alloc_bucket(c, ca, b, watermark, a, s, cl);
380 iter.path->preserve = false;
382 if (iter.trans && iter.path)
383 set_btree_iter_dontneed(&iter);
384 bch2_trans_iter_exit(trans, &iter);
390 * This path is for before the freespace btree is initialized:
392 * If ca->new_fs_bucket_idx is nonzero, we haven't yet marked superblock &
393 * journal buckets - journal buckets will be < ca->new_fs_bucket_idx
395 static noinline struct open_bucket *
396 bch2_bucket_alloc_early(struct btree_trans *trans,
398 enum bch_watermark watermark,
399 struct bucket_alloc_state *s,
402 struct btree_iter iter;
404 struct open_bucket *ob = NULL;
405 u64 alloc_start = max_t(u64, ca->mi.first_bucket, ca->new_fs_bucket_idx);
406 u64 alloc_cursor = max(alloc_start, READ_ONCE(ca->alloc_cursor));
409 for_each_btree_key_norestart(trans, iter, BTREE_ID_alloc, POS(ca->dev_idx, alloc_cursor),
410 BTREE_ITER_SLOTS, k, ret) {
411 struct bch_alloc_v4 a_convert;
412 const struct bch_alloc_v4 *a;
414 if (bkey_ge(k.k->p, POS(ca->dev_idx, ca->mi.nbuckets)))
417 if (ca->new_fs_bucket_idx &&
418 is_superblock_bucket(ca, k.k->p.offset))
421 a = bch2_alloc_to_v4(k, &a_convert);
423 if (a->data_type != BCH_DATA_free)
428 ob = __try_alloc_bucket(trans->c, ca, k.k->p.offset, watermark, a, s, cl);
432 bch2_trans_iter_exit(trans, &iter);
434 ca->alloc_cursor = alloc_cursor;
439 if (!ob && alloc_cursor > alloc_start) {
440 alloc_cursor = alloc_start;
447 static struct open_bucket *bch2_bucket_alloc_freelist(struct btree_trans *trans,
449 enum bch_watermark watermark,
450 struct bucket_alloc_state *s,
453 struct btree_iter iter;
455 struct open_bucket *ob = NULL;
456 u64 alloc_start = max_t(u64, ca->mi.first_bucket, READ_ONCE(ca->alloc_cursor));
457 u64 alloc_cursor = alloc_start;
460 BUG_ON(ca->new_fs_bucket_idx);
462 for_each_btree_key_norestart(trans, iter, BTREE_ID_freespace,
463 POS(ca->dev_idx, alloc_cursor), 0, k, ret) {
464 if (k.k->p.inode != ca->dev_idx)
467 for (alloc_cursor = max(alloc_cursor, bkey_start_offset(k.k));
468 alloc_cursor < k.k->p.offset;
470 ret = btree_trans_too_many_iters(trans);
478 ob = try_alloc_bucket(trans, ca, watermark,
479 alloc_cursor, s, k, cl);
481 iter.path->preserve = false;
489 bch2_trans_iter_exit(trans, &iter);
491 ca->alloc_cursor = alloc_cursor;
496 if (!ob && alloc_start > ca->mi.first_bucket) {
497 alloc_cursor = alloc_start = ca->mi.first_bucket;
505 * bch2_bucket_alloc_trans - allocate a single bucket from a specific device
506 * @trans: transaction object
507 * @ca: device to allocate from
508 * @watermark: how important is this allocation?
509 * @cl: if not NULL, closure to be used to wait if buckets not available
510 * @usage: for secondarily also returning the current device usage
512 * Returns: an open_bucket on success, or an ERR_PTR() on failure.
514 static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans,
516 enum bch_watermark watermark,
518 struct bch_dev_usage *usage)
520 struct bch_fs *c = trans->c;
521 struct open_bucket *ob = NULL;
522 bool freespace = READ_ONCE(ca->mi.freespace_initialized);
524 struct bucket_alloc_state s = { 0 };
525 bool waiting = false;
527 bch2_dev_usage_read_fast(ca, usage);
528 avail = dev_buckets_free(ca, *usage, watermark);
530 if (usage->d[BCH_DATA_need_discard].buckets > avail)
533 if (usage->d[BCH_DATA_need_gc_gens].buckets > avail)
536 if (should_invalidate_buckets(ca, *usage))
537 bch2_do_invalidates(c);
540 if (cl && !waiting) {
541 closure_wait(&c->freelist_wait, cl);
546 if (!c->blocked_allocate)
547 c->blocked_allocate = local_clock();
549 ob = ERR_PTR(-BCH_ERR_freelist_empty);
554 closure_wake_up(&c->freelist_wait);
556 ob = likely(freespace)
557 ? bch2_bucket_alloc_freelist(trans, ca, watermark, &s, cl)
558 : bch2_bucket_alloc_early(trans, ca, watermark, &s, cl);
560 if (s.skipped_need_journal_commit * 2 > avail)
561 bch2_journal_flush_async(&c->journal, NULL);
563 if (!ob && freespace && c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_alloc_info) {
569 ob = ERR_PTR(-BCH_ERR_no_buckets_found);
572 trace_and_count(c, bucket_alloc, ca,
573 bch2_watermarks[watermark],
575 usage->d[BCH_DATA_free].buckets,
577 bch2_copygc_wait_amount(c),
578 c->copygc_wait - atomic64_read(&c->io_clock[WRITE].now),
582 else if (!bch2_err_matches(PTR_ERR(ob), BCH_ERR_transaction_restart))
583 trace_and_count(c, bucket_alloc_fail, ca,
584 bch2_watermarks[watermark],
586 usage->d[BCH_DATA_free].buckets,
588 bch2_copygc_wait_amount(c),
589 c->copygc_wait - atomic64_read(&c->io_clock[WRITE].now),
592 bch2_err_str(PTR_ERR(ob)));
597 struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca,
598 enum bch_watermark watermark,
601 struct bch_dev_usage usage;
602 struct open_bucket *ob;
604 bch2_trans_do(c, NULL, NULL, 0,
605 PTR_ERR_OR_ZERO(ob = bch2_bucket_alloc_trans(trans, ca, watermark,
610 static int __dev_stripe_cmp(struct dev_stripe_state *stripe,
611 unsigned l, unsigned r)
613 return ((stripe->next_alloc[l] > stripe->next_alloc[r]) -
614 (stripe->next_alloc[l] < stripe->next_alloc[r]));
617 #define dev_stripe_cmp(l, r) __dev_stripe_cmp(stripe, l, r)
619 struct dev_alloc_list bch2_dev_alloc_list(struct bch_fs *c,
620 struct dev_stripe_state *stripe,
621 struct bch_devs_mask *devs)
623 struct dev_alloc_list ret = { .nr = 0 };
626 for_each_set_bit(i, devs->d, BCH_SB_MEMBERS_MAX)
627 ret.devs[ret.nr++] = i;
629 bubble_sort(ret.devs, ret.nr, dev_stripe_cmp);
633 static inline void bch2_dev_stripe_increment_inlined(struct bch_dev *ca,
634 struct dev_stripe_state *stripe,
635 struct bch_dev_usage *usage)
637 u64 *v = stripe->next_alloc + ca->dev_idx;
638 u64 free_space = dev_buckets_available(ca, BCH_WATERMARK_normal);
639 u64 free_space_inv = free_space
640 ? div64_u64(1ULL << 48, free_space)
644 if (*v + free_space_inv >= *v)
645 *v += free_space_inv;
649 for (v = stripe->next_alloc;
650 v < stripe->next_alloc + ARRAY_SIZE(stripe->next_alloc); v++)
651 *v = *v < scale ? 0 : *v - scale;
654 void bch2_dev_stripe_increment(struct bch_dev *ca,
655 struct dev_stripe_state *stripe)
657 struct bch_dev_usage usage;
659 bch2_dev_usage_read_fast(ca, &usage);
660 bch2_dev_stripe_increment_inlined(ca, stripe, &usage);
663 static int add_new_bucket(struct bch_fs *c,
664 struct open_buckets *ptrs,
665 struct bch_devs_mask *devs_may_alloc,
666 unsigned nr_replicas,
667 unsigned *nr_effective,
670 struct open_bucket *ob)
672 unsigned durability =
673 bch_dev_bkey_exists(c, ob->dev)->mi.durability;
675 BUG_ON(*nr_effective >= nr_replicas);
676 BUG_ON(flags & BCH_WRITE_ONLY_SPECIFIED_DEVS);
678 __clear_bit(ob->dev, devs_may_alloc->d);
679 *nr_effective += (flags & BCH_WRITE_ONLY_SPECIFIED_DEVS)
681 *have_cache |= !durability;
683 ob_push(c, ptrs, ob);
685 if (*nr_effective >= nr_replicas)
692 int bch2_bucket_alloc_set_trans(struct btree_trans *trans,
693 struct open_buckets *ptrs,
694 struct dev_stripe_state *stripe,
695 struct bch_devs_mask *devs_may_alloc,
696 unsigned nr_replicas,
697 unsigned *nr_effective,
700 enum bch_data_type data_type,
701 enum bch_watermark watermark,
704 struct bch_fs *c = trans->c;
705 struct dev_alloc_list devs_sorted =
706 bch2_dev_alloc_list(c, stripe, devs_may_alloc);
709 int ret = -BCH_ERR_insufficient_devices;
712 BUG_ON(*nr_effective >= nr_replicas);
714 for (i = 0; i < devs_sorted.nr; i++) {
715 struct bch_dev_usage usage;
716 struct open_bucket *ob;
718 dev = devs_sorted.devs[i];
721 ca = rcu_dereference(c->devs[dev]);
723 percpu_ref_get(&ca->ref);
729 if (!ca->mi.durability && *have_cache) {
730 percpu_ref_put(&ca->ref);
734 ob = bch2_bucket_alloc_trans(trans, ca, watermark, cl, &usage);
736 bch2_dev_stripe_increment_inlined(ca, stripe, &usage);
737 percpu_ref_put(&ca->ref);
741 if (bch2_err_matches(ret, BCH_ERR_transaction_restart) || cl)
746 ob->data_type = data_type;
748 if (add_new_bucket(c, ptrs, devs_may_alloc,
749 nr_replicas, nr_effective,
750 have_cache, flags, ob)) {
759 /* Allocate from stripes: */
762 * if we can't allocate a new stripe because there are already too many
763 * partially filled stripes, force allocating from an existing stripe even when
764 * it's to a device we don't want:
767 static int bucket_alloc_from_stripe(struct btree_trans *trans,
768 struct open_buckets *ptrs,
769 struct write_point *wp,
770 struct bch_devs_mask *devs_may_alloc,
772 unsigned nr_replicas,
773 unsigned *nr_effective,
775 enum bch_watermark watermark,
779 struct bch_fs *c = trans->c;
780 struct dev_alloc_list devs_sorted;
781 struct ec_stripe_head *h;
782 struct open_bucket *ob;
789 if (ec_open_bucket(c, ptrs))
792 h = bch2_ec_stripe_head_get(trans, target, 0, nr_replicas - 1, watermark, cl);
798 devs_sorted = bch2_dev_alloc_list(c, &wp->stripe, devs_may_alloc);
800 for (i = 0; i < devs_sorted.nr; i++)
801 for (ec_idx = 0; ec_idx < h->s->nr_data; ec_idx++) {
802 if (!h->s->blocks[ec_idx])
805 ob = c->open_buckets + h->s->blocks[ec_idx];
806 if (ob->dev == devs_sorted.devs[i] &&
807 !test_and_set_bit(ec_idx, h->s->blocks_allocated))
814 ec_stripe_new_get(h->s, STRIPE_REF_io);
816 ret = add_new_bucket(c, ptrs, devs_may_alloc,
817 nr_replicas, nr_effective,
818 have_cache, flags, ob);
820 bch2_ec_stripe_head_put(c, h);
824 /* Sector allocator */
826 static bool want_bucket(struct bch_fs *c,
827 struct write_point *wp,
828 struct bch_devs_mask *devs_may_alloc,
829 bool *have_cache, bool ec,
830 struct open_bucket *ob)
832 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
834 if (!test_bit(ob->dev, devs_may_alloc->d))
837 if (ob->data_type != wp->data_type)
840 if (!ca->mi.durability &&
841 (wp->data_type == BCH_DATA_btree || ec || *have_cache))
844 if (ec != (ob->ec != NULL))
850 static int bucket_alloc_set_writepoint(struct bch_fs *c,
851 struct open_buckets *ptrs,
852 struct write_point *wp,
853 struct bch_devs_mask *devs_may_alloc,
854 unsigned nr_replicas,
855 unsigned *nr_effective,
857 bool ec, unsigned flags)
859 struct open_buckets ptrs_skip = { .nr = 0 };
860 struct open_bucket *ob;
864 open_bucket_for_each(c, &wp->ptrs, ob, i) {
865 if (!ret && want_bucket(c, wp, devs_may_alloc,
867 ret = add_new_bucket(c, ptrs, devs_may_alloc,
868 nr_replicas, nr_effective,
869 have_cache, flags, ob);
871 ob_push(c, &ptrs_skip, ob);
873 wp->ptrs = ptrs_skip;
878 static int bucket_alloc_set_partial(struct bch_fs *c,
879 struct open_buckets *ptrs,
880 struct write_point *wp,
881 struct bch_devs_mask *devs_may_alloc,
882 unsigned nr_replicas,
883 unsigned *nr_effective,
884 bool *have_cache, bool ec,
885 enum bch_watermark watermark,
890 if (!c->open_buckets_partial_nr)
893 spin_lock(&c->freelist_lock);
895 if (!c->open_buckets_partial_nr)
898 for (i = c->open_buckets_partial_nr - 1; i >= 0; --i) {
899 struct open_bucket *ob = c->open_buckets + c->open_buckets_partial[i];
901 if (want_bucket(c, wp, devs_may_alloc, have_cache, ec, ob)) {
902 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
903 struct bch_dev_usage usage;
906 bch2_dev_usage_read_fast(ca, &usage);
907 avail = dev_buckets_free(ca, usage, watermark);
911 array_remove_item(c->open_buckets_partial,
912 c->open_buckets_partial_nr,
914 ob->on_partial_list = false;
916 ret = add_new_bucket(c, ptrs, devs_may_alloc,
917 nr_replicas, nr_effective,
918 have_cache, flags, ob);
924 spin_unlock(&c->freelist_lock);
928 static int __open_bucket_add_buckets(struct btree_trans *trans,
929 struct open_buckets *ptrs,
930 struct write_point *wp,
931 struct bch_devs_list *devs_have,
934 unsigned nr_replicas,
935 unsigned *nr_effective,
937 enum bch_watermark watermark,
941 struct bch_fs *c = trans->c;
942 struct bch_devs_mask devs;
943 struct open_bucket *ob;
944 struct closure *cl = NULL;
948 devs = target_rw_devs(c, wp->data_type, target);
950 /* Don't allocate from devices we already have pointers to: */
951 for (i = 0; i < devs_have->nr; i++)
952 __clear_bit(devs_have->devs[i], devs.d);
954 open_bucket_for_each(c, ptrs, ob, i)
955 __clear_bit(ob->dev, devs.d);
957 if (erasure_code && ec_open_bucket(c, ptrs))
960 ret = bucket_alloc_set_writepoint(c, ptrs, wp, &devs,
961 nr_replicas, nr_effective,
962 have_cache, erasure_code, flags);
966 ret = bucket_alloc_set_partial(c, ptrs, wp, &devs,
967 nr_replicas, nr_effective,
968 have_cache, erasure_code, watermark, flags);
973 ret = bucket_alloc_from_stripe(trans, ptrs, wp, &devs,
975 nr_replicas, nr_effective,
977 watermark, flags, _cl);
981 * Try nonblocking first, so that if one device is full we'll try from
984 ret = bch2_bucket_alloc_set_trans(trans, ptrs, &wp->stripe, &devs,
985 nr_replicas, nr_effective, have_cache,
986 flags, wp->data_type, watermark, cl);
988 !bch2_err_matches(ret, BCH_ERR_transaction_restart) &&
989 !bch2_err_matches(ret, BCH_ERR_insufficient_devices) &&
999 static int open_bucket_add_buckets(struct btree_trans *trans,
1000 struct open_buckets *ptrs,
1001 struct write_point *wp,
1002 struct bch_devs_list *devs_have,
1004 unsigned erasure_code,
1005 unsigned nr_replicas,
1006 unsigned *nr_effective,
1008 enum bch_watermark watermark,
1015 ret = __open_bucket_add_buckets(trans, ptrs, wp,
1016 devs_have, target, erasure_code,
1017 nr_replicas, nr_effective, have_cache,
1018 watermark, flags, cl);
1019 if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
1020 bch2_err_matches(ret, BCH_ERR_operation_blocked) ||
1021 bch2_err_matches(ret, BCH_ERR_freelist_empty) ||
1022 bch2_err_matches(ret, BCH_ERR_open_buckets_empty))
1024 if (*nr_effective >= nr_replicas)
1028 ret = __open_bucket_add_buckets(trans, ptrs, wp,
1029 devs_have, target, false,
1030 nr_replicas, nr_effective, have_cache,
1031 watermark, flags, cl);
1032 return ret < 0 ? ret : 0;
1036 * should_drop_bucket - check if this is open_bucket should go away
1037 * @ob: open_bucket to predicate on
1038 * @c: filesystem handle
1039 * @ca: if set, we're killing buckets for a particular device
1040 * @ec: if true, we're shutting down erasure coding and killing all ec
1042 * otherwise, return true
1043 * Returns: true if we should kill this open_bucket
1045 * We're killing open_buckets because we're shutting down a device, erasure
1046 * coding, or the entire filesystem - check if this open_bucket matches:
1048 static bool should_drop_bucket(struct open_bucket *ob, struct bch_fs *c,
1049 struct bch_dev *ca, bool ec)
1052 return ob->ec != NULL;
1054 bool drop = ob->dev == ca->dev_idx;
1055 struct open_bucket *ob2;
1058 if (!drop && ob->ec) {
1061 mutex_lock(&ob->ec->lock);
1062 nr_blocks = bkey_i_to_stripe(&ob->ec->new_stripe.key)->v.nr_blocks;
1064 for (i = 0; i < nr_blocks; i++) {
1065 if (!ob->ec->blocks[i])
1068 ob2 = c->open_buckets + ob->ec->blocks[i];
1069 drop |= ob2->dev == ca->dev_idx;
1071 mutex_unlock(&ob->ec->lock);
1080 static void bch2_writepoint_stop(struct bch_fs *c, struct bch_dev *ca,
1081 bool ec, struct write_point *wp)
1083 struct open_buckets ptrs = { .nr = 0 };
1084 struct open_bucket *ob;
1087 mutex_lock(&wp->lock);
1088 open_bucket_for_each(c, &wp->ptrs, ob, i)
1089 if (should_drop_bucket(ob, c, ca, ec))
1090 bch2_open_bucket_put(c, ob);
1092 ob_push(c, &ptrs, ob);
1094 mutex_unlock(&wp->lock);
1097 void bch2_open_buckets_stop(struct bch_fs *c, struct bch_dev *ca,
1102 /* Next, close write points that point to this device... */
1103 for (i = 0; i < ARRAY_SIZE(c->write_points); i++)
1104 bch2_writepoint_stop(c, ca, ec, &c->write_points[i]);
1106 bch2_writepoint_stop(c, ca, ec, &c->copygc_write_point);
1107 bch2_writepoint_stop(c, ca, ec, &c->rebalance_write_point);
1108 bch2_writepoint_stop(c, ca, ec, &c->btree_write_point);
1110 mutex_lock(&c->btree_reserve_cache_lock);
1111 while (c->btree_reserve_cache_nr) {
1112 struct btree_alloc *a =
1113 &c->btree_reserve_cache[--c->btree_reserve_cache_nr];
1115 bch2_open_buckets_put(c, &a->ob);
1117 mutex_unlock(&c->btree_reserve_cache_lock);
1119 spin_lock(&c->freelist_lock);
1121 while (i < c->open_buckets_partial_nr) {
1122 struct open_bucket *ob =
1123 c->open_buckets + c->open_buckets_partial[i];
1125 if (should_drop_bucket(ob, c, ca, ec)) {
1126 --c->open_buckets_partial_nr;
1127 swap(c->open_buckets_partial[i],
1128 c->open_buckets_partial[c->open_buckets_partial_nr]);
1129 ob->on_partial_list = false;
1130 spin_unlock(&c->freelist_lock);
1131 bch2_open_bucket_put(c, ob);
1132 spin_lock(&c->freelist_lock);
1137 spin_unlock(&c->freelist_lock);
1139 bch2_ec_stop_dev(c, ca);
1142 static inline struct hlist_head *writepoint_hash(struct bch_fs *c,
1143 unsigned long write_point)
1146 hash_long(write_point, ilog2(ARRAY_SIZE(c->write_points_hash)));
1148 return &c->write_points_hash[hash];
1151 static struct write_point *__writepoint_find(struct hlist_head *head,
1152 unsigned long write_point)
1154 struct write_point *wp;
1157 hlist_for_each_entry_rcu(wp, head, node)
1158 if (wp->write_point == write_point)
1166 static inline bool too_many_writepoints(struct bch_fs *c, unsigned factor)
1168 u64 stranded = c->write_points_nr * c->bucket_size_max;
1169 u64 free = bch2_fs_usage_read_short(c).free;
1171 return stranded * factor > free;
1174 static bool try_increase_writepoints(struct bch_fs *c)
1176 struct write_point *wp;
1178 if (c->write_points_nr == ARRAY_SIZE(c->write_points) ||
1179 too_many_writepoints(c, 32))
1182 wp = c->write_points + c->write_points_nr++;
1183 hlist_add_head_rcu(&wp->node, writepoint_hash(c, wp->write_point));
1187 static bool try_decrease_writepoints(struct btree_trans *trans, unsigned old_nr)
1189 struct bch_fs *c = trans->c;
1190 struct write_point *wp;
1191 struct open_bucket *ob;
1194 mutex_lock(&c->write_points_hash_lock);
1195 if (c->write_points_nr < old_nr) {
1196 mutex_unlock(&c->write_points_hash_lock);
1200 if (c->write_points_nr == 1 ||
1201 !too_many_writepoints(c, 8)) {
1202 mutex_unlock(&c->write_points_hash_lock);
1206 wp = c->write_points + --c->write_points_nr;
1208 hlist_del_rcu(&wp->node);
1209 mutex_unlock(&c->write_points_hash_lock);
1211 bch2_trans_mutex_lock_norelock(trans, &wp->lock);
1212 open_bucket_for_each(c, &wp->ptrs, ob, i)
1213 open_bucket_free_unused(c, ob);
1215 mutex_unlock(&wp->lock);
1219 static struct write_point *writepoint_find(struct btree_trans *trans,
1220 unsigned long write_point)
1222 struct bch_fs *c = trans->c;
1223 struct write_point *wp, *oldest;
1224 struct hlist_head *head;
1226 if (!(write_point & 1UL)) {
1227 wp = (struct write_point *) write_point;
1228 bch2_trans_mutex_lock_norelock(trans, &wp->lock);
1232 head = writepoint_hash(c, write_point);
1234 wp = __writepoint_find(head, write_point);
1237 bch2_trans_mutex_lock_norelock(trans, &wp->lock);
1238 if (wp->write_point == write_point)
1240 mutex_unlock(&wp->lock);
1243 restart_find_oldest:
1245 for (wp = c->write_points;
1246 wp < c->write_points + c->write_points_nr; wp++)
1247 if (!oldest || time_before64(wp->last_used, oldest->last_used))
1250 bch2_trans_mutex_lock_norelock(trans, &oldest->lock);
1251 bch2_trans_mutex_lock_norelock(trans, &c->write_points_hash_lock);
1252 if (oldest >= c->write_points + c->write_points_nr ||
1253 try_increase_writepoints(c)) {
1254 mutex_unlock(&c->write_points_hash_lock);
1255 mutex_unlock(&oldest->lock);
1256 goto restart_find_oldest;
1259 wp = __writepoint_find(head, write_point);
1260 if (wp && wp != oldest) {
1261 mutex_unlock(&c->write_points_hash_lock);
1262 mutex_unlock(&oldest->lock);
1267 hlist_del_rcu(&wp->node);
1268 wp->write_point = write_point;
1269 hlist_add_head_rcu(&wp->node, head);
1270 mutex_unlock(&c->write_points_hash_lock);
1272 wp->last_used = local_clock();
1277 * Get us an open_bucket we can allocate from, return with it locked:
1279 int bch2_alloc_sectors_start_trans(struct btree_trans *trans,
1281 unsigned erasure_code,
1282 struct write_point_specifier write_point,
1283 struct bch_devs_list *devs_have,
1284 unsigned nr_replicas,
1285 unsigned nr_replicas_required,
1286 enum bch_watermark watermark,
1289 struct write_point **wp_ret)
1291 struct bch_fs *c = trans->c;
1292 struct write_point *wp;
1293 struct open_bucket *ob;
1294 struct open_buckets ptrs;
1295 unsigned nr_effective, write_points_nr;
1300 BUG_ON(flags & BCH_WRITE_ONLY_SPECIFIED_DEVS);
1302 BUG_ON(!nr_replicas || !nr_replicas_required);
1306 write_points_nr = c->write_points_nr;
1309 *wp_ret = wp = writepoint_find(trans, write_point.v);
1311 /* metadata may not allocate on cache devices: */
1312 if (wp->data_type != BCH_DATA_user)
1315 if (target && !(flags & BCH_WRITE_ONLY_SPECIFIED_DEVS)) {
1316 ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
1317 target, erasure_code,
1318 nr_replicas, &nr_effective,
1319 &have_cache, watermark,
1322 bch2_err_matches(ret, BCH_ERR_transaction_restart))
1325 /* Don't retry from all devices if we're out of open buckets: */
1326 if (bch2_err_matches(ret, BCH_ERR_open_buckets_empty))
1327 goto allocate_blocking;
1330 * Only try to allocate cache (durability = 0 devices) from the
1335 ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
1337 nr_replicas, &nr_effective,
1338 &have_cache, watermark,
1342 ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
1343 target, erasure_code,
1344 nr_replicas, &nr_effective,
1345 &have_cache, watermark,
1349 BUG_ON(!ret && nr_effective < nr_replicas);
1351 if (erasure_code && !ec_open_bucket(c, &ptrs))
1352 pr_debug("failed to get ec bucket: ret %u", ret);
1354 if (ret == -BCH_ERR_insufficient_devices &&
1355 nr_effective >= nr_replicas_required)
1361 /* Free buckets we didn't use: */
1362 open_bucket_for_each(c, &wp->ptrs, ob, i)
1363 open_bucket_free_unused(c, ob);
1367 wp->sectors_free = UINT_MAX;
1369 open_bucket_for_each(c, &wp->ptrs, ob, i)
1370 wp->sectors_free = min(wp->sectors_free, ob->sectors_free);
1372 BUG_ON(!wp->sectors_free || wp->sectors_free == UINT_MAX);
1376 open_bucket_for_each(c, &wp->ptrs, ob, i)
1377 if (ptrs.nr < ARRAY_SIZE(ptrs.v))
1378 ob_push(c, &ptrs, ob);
1380 open_bucket_free_unused(c, ob);
1383 mutex_unlock(&wp->lock);
1385 if (bch2_err_matches(ret, BCH_ERR_freelist_empty) &&
1386 try_decrease_writepoints(trans, write_points_nr))
1389 if (bch2_err_matches(ret, BCH_ERR_open_buckets_empty) ||
1390 bch2_err_matches(ret, BCH_ERR_freelist_empty))
1392 ? -BCH_ERR_bucket_alloc_blocked
1393 : -BCH_ERR_ENOSPC_bucket_alloc;
1398 struct bch_extent_ptr bch2_ob_ptr(struct bch_fs *c, struct open_bucket *ob)
1400 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
1402 return (struct bch_extent_ptr) {
1403 .type = 1 << BCH_EXTENT_ENTRY_ptr,
1406 .offset = bucket_to_sector(ca, ob->bucket) +
1407 ca->mi.bucket_size -
1412 void bch2_alloc_sectors_append_ptrs(struct bch_fs *c, struct write_point *wp,
1413 struct bkey_i *k, unsigned sectors,
1416 bch2_alloc_sectors_append_ptrs_inlined(c, wp, k, sectors, cached);
1420 * Append pointers to the space we just allocated to @k, and mark @sectors space
1421 * as allocated out of @ob
1423 void bch2_alloc_sectors_done(struct bch_fs *c, struct write_point *wp)
1425 bch2_alloc_sectors_done_inlined(c, wp);
1428 static inline void writepoint_init(struct write_point *wp,
1429 enum bch_data_type type)
1431 mutex_init(&wp->lock);
1432 wp->data_type = type;
1434 INIT_WORK(&wp->index_update_work, bch2_write_point_do_index_updates);
1435 INIT_LIST_HEAD(&wp->writes);
1436 spin_lock_init(&wp->writes_lock);
1439 void bch2_fs_allocator_foreground_init(struct bch_fs *c)
1441 struct open_bucket *ob;
1442 struct write_point *wp;
1444 mutex_init(&c->write_points_hash_lock);
1445 c->write_points_nr = ARRAY_SIZE(c->write_points);
1447 /* open bucket 0 is a sentinal NULL: */
1448 spin_lock_init(&c->open_buckets[0].lock);
1450 for (ob = c->open_buckets + 1;
1451 ob < c->open_buckets + ARRAY_SIZE(c->open_buckets); ob++) {
1452 spin_lock_init(&ob->lock);
1453 c->open_buckets_nr_free++;
1455 ob->freelist = c->open_buckets_freelist;
1456 c->open_buckets_freelist = ob - c->open_buckets;
1459 writepoint_init(&c->btree_write_point, BCH_DATA_btree);
1460 writepoint_init(&c->rebalance_write_point, BCH_DATA_user);
1461 writepoint_init(&c->copygc_write_point, BCH_DATA_user);
1463 for (wp = c->write_points;
1464 wp < c->write_points + c->write_points_nr; wp++) {
1465 writepoint_init(wp, BCH_DATA_user);
1467 wp->last_used = local_clock();
1468 wp->write_point = (unsigned long) wp;
1469 hlist_add_head_rcu(&wp->node,
1470 writepoint_hash(c, wp->write_point));
1474 static void bch2_open_bucket_to_text(struct printbuf *out, struct bch_fs *c, struct open_bucket *ob)
1476 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
1477 unsigned data_type = ob->data_type;
1478 barrier(); /* READ_ONCE() doesn't work on bitfields */
1480 prt_printf(out, "%zu ref %u %s %u:%llu gen %u allocated %u/%u",
1481 ob - c->open_buckets,
1482 atomic_read(&ob->pin),
1483 data_type < BCH_DATA_NR ? bch2_data_types[data_type] : "invalid data type",
1484 ob->dev, ob->bucket, ob->gen,
1485 ca->mi.bucket_size - ob->sectors_free, ca->mi.bucket_size);
1487 prt_printf(out, " ec idx %llu", ob->ec->idx);
1488 if (ob->on_partial_list)
1489 prt_str(out, " partial");
1493 void bch2_open_buckets_to_text(struct printbuf *out, struct bch_fs *c)
1495 struct open_bucket *ob;
1499 for (ob = c->open_buckets;
1500 ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
1502 spin_lock(&ob->lock);
1503 if (ob->valid && !ob->on_partial_list)
1504 bch2_open_bucket_to_text(out, c, ob);
1505 spin_unlock(&ob->lock);
1511 void bch2_open_buckets_partial_to_text(struct printbuf *out, struct bch_fs *c)
1516 spin_lock(&c->freelist_lock);
1518 for (i = 0; i < c->open_buckets_partial_nr; i++)
1519 bch2_open_bucket_to_text(out, c,
1520 c->open_buckets + c->open_buckets_partial[i]);
1522 spin_unlock(&c->freelist_lock);
1526 static const char * const bch2_write_point_states[] = {
1528 WRITE_POINT_STATES()
1533 static void bch2_write_point_to_text(struct printbuf *out, struct bch_fs *c,
1534 struct write_point *wp)
1536 struct open_bucket *ob;
1539 prt_printf(out, "%lu: ", wp->write_point);
1540 prt_human_readable_u64(out, wp->sectors_allocated);
1542 prt_printf(out, " last wrote: ");
1543 bch2_pr_time_units(out, sched_clock() - wp->last_used);
1545 for (i = 0; i < WRITE_POINT_STATE_NR; i++) {
1546 prt_printf(out, " %s: ", bch2_write_point_states[i]);
1547 bch2_pr_time_units(out, wp->time[i]);
1552 printbuf_indent_add(out, 2);
1553 open_bucket_for_each(c, &wp->ptrs, ob, i)
1554 bch2_open_bucket_to_text(out, c, ob);
1555 printbuf_indent_sub(out, 2);
1558 void bch2_write_points_to_text(struct printbuf *out, struct bch_fs *c)
1560 struct write_point *wp;
1562 prt_str(out, "Foreground write points\n");
1563 for (wp = c->write_points;
1564 wp < c->write_points + ARRAY_SIZE(c->write_points);
1566 bch2_write_point_to_text(out, c, wp);
1568 prt_str(out, "Copygc write point\n");
1569 bch2_write_point_to_text(out, c, &c->copygc_write_point);
1571 prt_str(out, "Rebalance write point\n");
1572 bch2_write_point_to_text(out, c, &c->rebalance_write_point);
1574 prt_str(out, "Btree write point\n");
1575 bch2_write_point_to_text(out, c, &c->btree_write_point);