1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright 2012 Google, Inc.
5 * Foreground allocator code: allocate buckets from freelist, and allocate in
6 * sector granularity from writepoints.
8 * bch2_bucket_alloc() allocates a single bucket from a specific device.
10 * bch2_bucket_alloc_set() allocates one or more buckets from different devices
11 * in a given filesystem.
15 #include "alloc_background.h"
16 #include "alloc_foreground.h"
17 #include "btree_iter.h"
18 #include "btree_update.h"
21 #include "buckets_waiting_for_journal.h"
24 #include "disk_groups.h"
31 #include <linux/math64.h>
32 #include <linux/rculist.h>
33 #include <linux/rcupdate.h>
34 #include <trace/events/bcachefs.h>
36 const char * const bch2_alloc_reserves[] = {
44 * Open buckets represent a bucket that's currently being allocated from. They
47 * - They track buckets that have been partially allocated, allowing for
48 * sub-bucket sized allocations - they're used by the sector allocator below
50 * - They provide a reference to the buckets they own that mark and sweep GC
51 * can find, until the new allocation has a pointer to it inserted into the
54 * When allocating some space with the sector allocator, the allocation comes
55 * with a reference to an open bucket - the caller is required to put that
56 * reference _after_ doing the index update that makes its allocation reachable.
59 static void bch2_open_bucket_hash_add(struct bch_fs *c, struct open_bucket *ob)
61 open_bucket_idx_t idx = ob - c->open_buckets;
62 open_bucket_idx_t *slot = open_bucket_hashslot(c, ob->dev, ob->bucket);
68 static void bch2_open_bucket_hash_remove(struct bch_fs *c, struct open_bucket *ob)
70 open_bucket_idx_t idx = ob - c->open_buckets;
71 open_bucket_idx_t *slot = open_bucket_hashslot(c, ob->dev, ob->bucket);
73 while (*slot != idx) {
75 slot = &c->open_buckets[*slot].hash;
82 void __bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob)
84 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
87 bch2_ec_bucket_written(c, ob);
91 percpu_down_read(&c->mark_lock);
97 spin_unlock(&ob->lock);
98 percpu_up_read(&c->mark_lock);
100 spin_lock(&c->freelist_lock);
101 bch2_open_bucket_hash_remove(c, ob);
103 ob->freelist = c->open_buckets_freelist;
104 c->open_buckets_freelist = ob - c->open_buckets;
106 c->open_buckets_nr_free++;
107 ca->nr_open_buckets--;
108 spin_unlock(&c->freelist_lock);
110 closure_wake_up(&c->open_buckets_wait);
113 void bch2_open_bucket_write_error(struct bch_fs *c,
114 struct open_buckets *obs,
117 struct open_bucket *ob;
120 open_bucket_for_each(c, obs, ob, i)
121 if (ob->dev == dev && ob->ec)
122 bch2_ec_bucket_cancel(c, ob);
125 static struct open_bucket *bch2_open_bucket_alloc(struct bch_fs *c)
127 struct open_bucket *ob;
129 BUG_ON(!c->open_buckets_freelist || !c->open_buckets_nr_free);
131 ob = c->open_buckets + c->open_buckets_freelist;
132 c->open_buckets_freelist = ob->freelist;
133 atomic_set(&ob->pin, 1);
136 c->open_buckets_nr_free--;
140 static void open_bucket_free_unused(struct bch_fs *c,
141 struct write_point *wp,
142 struct open_bucket *ob)
144 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
145 bool may_realloc = wp->data_type == BCH_DATA_user;
147 BUG_ON(ca->open_buckets_partial_nr >
148 ARRAY_SIZE(ca->open_buckets_partial));
150 if (ca->open_buckets_partial_nr <
151 ARRAY_SIZE(ca->open_buckets_partial) &&
153 spin_lock(&c->freelist_lock);
154 ob->on_partial_list = true;
155 ca->open_buckets_partial[ca->open_buckets_partial_nr++] =
156 ob - c->open_buckets;
157 spin_unlock(&c->freelist_lock);
159 closure_wake_up(&c->open_buckets_wait);
160 closure_wake_up(&c->freelist_wait);
162 bch2_open_bucket_put(c, ob);
166 /* _only_ for allocating the journal on a new device: */
167 long bch2_bucket_alloc_new_fs(struct bch_dev *ca)
169 while (ca->new_fs_bucket_idx < ca->mi.nbuckets) {
170 u64 b = ca->new_fs_bucket_idx++;
172 if (!is_superblock_bucket(ca, b) &&
173 (!ca->buckets_nouse || !test_bit(b, ca->buckets_nouse)))
180 static inline unsigned open_buckets_reserved(enum alloc_reserve reserve)
184 case RESERVE_btree_movinggc:
186 case RESERVE_movinggc:
187 return OPEN_BUCKETS_COUNT / 4;
189 return OPEN_BUCKETS_COUNT / 2;
193 static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
195 enum alloc_reserve reserve,
196 struct bch_alloc_v4 *a,
198 u64 *skipped_need_journal_commit,
202 struct open_bucket *ob;
204 if (unlikely(ca->buckets_nouse && test_bit(bucket, ca->buckets_nouse))) {
209 if (bch2_bucket_is_open(c, ca->dev_idx, bucket)) {
214 if (bch2_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
215 c->journal.flushed_seq_ondisk, ca->dev_idx, bucket)) {
216 (*skipped_need_journal_commit)++;
220 spin_lock(&c->freelist_lock);
222 if (unlikely(c->open_buckets_nr_free <= open_buckets_reserved(reserve))) {
224 closure_wait(&c->open_buckets_wait, cl);
226 if (!c->blocked_allocate_open_bucket)
227 c->blocked_allocate_open_bucket = local_clock();
229 spin_unlock(&c->freelist_lock);
230 return ERR_PTR(-BCH_ERR_open_buckets_empty);
233 /* Recheck under lock: */
234 if (bch2_bucket_is_open(c, ca->dev_idx, bucket)) {
235 spin_unlock(&c->freelist_lock);
240 ob = bch2_open_bucket_alloc(c);
242 spin_lock(&ob->lock);
245 ob->sectors_free = ca->mi.bucket_size;
246 ob->alloc_reserve = reserve;
247 ob->dev = ca->dev_idx;
250 spin_unlock(&ob->lock);
252 ca->nr_open_buckets++;
253 bch2_open_bucket_hash_add(c, ob);
255 if (c->blocked_allocate_open_bucket) {
256 bch2_time_stats_update(
257 &c->times[BCH_TIME_blocked_allocate_open_bucket],
258 c->blocked_allocate_open_bucket);
259 c->blocked_allocate_open_bucket = 0;
262 if (c->blocked_allocate) {
263 bch2_time_stats_update(
264 &c->times[BCH_TIME_blocked_allocate],
265 c->blocked_allocate);
266 c->blocked_allocate = 0;
269 spin_unlock(&c->freelist_lock);
274 static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bch_dev *ca,
275 enum alloc_reserve reserve, u64 free_entry,
277 u64 *skipped_need_journal_commit,
279 struct bkey_s_c freespace_k,
282 struct bch_fs *c = trans->c;
283 struct btree_iter iter = { NULL };
285 struct open_bucket *ob;
286 struct bch_alloc_v4 a;
287 u64 b = free_entry & ~(~0ULL << 56);
288 unsigned genbits = free_entry >> 56;
289 struct printbuf buf = PRINTBUF;
292 if (b < ca->mi.first_bucket || b >= ca->mi.nbuckets) {
293 prt_printf(&buf, "freespace btree has bucket outside allowed range %u-%llu\n"
295 ca->mi.first_bucket, ca->mi.nbuckets);
296 bch2_bkey_val_to_text(&buf, c, freespace_k);
297 bch2_trans_inconsistent(trans, "%s", buf.buf);
302 bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, POS(ca->dev_idx, b), BTREE_ITER_CACHED);
303 k = bch2_btree_iter_peek_slot(&iter);
310 bch2_alloc_to_v4(k, &a);
312 if (genbits != (alloc_freespace_genbits(a) >> 56)) {
313 prt_printf(&buf, "bucket in freespace btree with wrong genbits (got %u should be %llu)\n"
315 genbits, alloc_freespace_genbits(a) >> 56);
316 bch2_bkey_val_to_text(&buf, c, freespace_k);
317 prt_printf(&buf, "\n ");
318 bch2_bkey_val_to_text(&buf, c, k);
319 bch2_trans_inconsistent(trans, "%s", buf.buf);
325 if (a.data_type != BCH_DATA_free) {
326 prt_printf(&buf, "non free bucket in freespace btree\n"
328 bch2_bkey_val_to_text(&buf, c, freespace_k);
329 prt_printf(&buf, "\n ");
330 bch2_bkey_val_to_text(&buf, c, k);
331 bch2_trans_inconsistent(trans, "%s", buf.buf);
336 ob = __try_alloc_bucket(c, ca, b, reserve, &a,
338 skipped_need_journal_commit,
342 iter.path->preserve = false;
344 set_btree_iter_dontneed(&iter);
345 bch2_trans_iter_exit(trans, &iter);
350 static struct open_bucket *try_alloc_partial_bucket(struct bch_fs *c, struct bch_dev *ca,
351 enum alloc_reserve reserve)
353 struct open_bucket *ob;
356 spin_lock(&c->freelist_lock);
358 for (i = ca->open_buckets_partial_nr - 1; i >= 0; --i) {
359 ob = c->open_buckets + ca->open_buckets_partial[i];
361 if (reserve <= ob->alloc_reserve) {
362 array_remove_item(ca->open_buckets_partial,
363 ca->open_buckets_partial_nr,
365 ob->on_partial_list = false;
366 ob->alloc_reserve = reserve;
367 spin_unlock(&c->freelist_lock);
372 spin_unlock(&c->freelist_lock);
377 * This path is for before the freespace btree is initialized:
379 * If ca->new_fs_bucket_idx is nonzero, we haven't yet marked superblock &
380 * journal buckets - journal buckets will be < ca->new_fs_bucket_idx
382 static noinline struct open_bucket *
383 bch2_bucket_alloc_early(struct btree_trans *trans,
385 enum alloc_reserve reserve,
389 u64 *skipped_need_journal_commit,
393 struct btree_iter iter;
395 struct open_bucket *ob = NULL;
398 *cur_bucket = max_t(u64, *cur_bucket, ca->mi.first_bucket);
399 *cur_bucket = max_t(u64, *cur_bucket, ca->new_fs_bucket_idx);
401 for_each_btree_key_norestart(trans, iter, BTREE_ID_alloc, POS(ca->dev_idx, *cur_bucket),
402 BTREE_ITER_SLOTS, k, ret) {
403 struct bch_alloc_v4 a;
405 if (bkey_cmp(k.k->p, POS(ca->dev_idx, ca->mi.nbuckets)) >= 0)
408 if (ca->new_fs_bucket_idx &&
409 is_superblock_bucket(ca, k.k->p.offset))
412 bch2_alloc_to_v4(k, &a);
414 if (a.data_type != BCH_DATA_free)
419 ob = __try_alloc_bucket(trans->c, ca, k.k->p.offset, reserve, &a,
421 skipped_need_journal_commit,
427 bch2_trans_iter_exit(trans, &iter);
429 *cur_bucket = iter.pos.offset;
431 return ob ?: ERR_PTR(ret ?: -BCH_ERR_no_buckets_found);
434 static struct open_bucket *bch2_bucket_alloc_freelist(struct btree_trans *trans,
436 enum alloc_reserve reserve,
440 u64 *skipped_need_journal_commit,
444 struct btree_iter iter;
446 struct open_bucket *ob = NULL;
449 BUG_ON(ca->new_fs_bucket_idx);
453 * On transaction restart, we'd like to restart from the bucket we were
456 for_each_btree_key_norestart(trans, iter, BTREE_ID_freespace,
457 POS(ca->dev_idx, *cur_bucket), 0, k, ret) {
458 if (k.k->p.inode != ca->dev_idx)
461 for (*cur_bucket = max(*cur_bucket, bkey_start_offset(k.k));
462 *cur_bucket < k.k->p.offset;
464 ret = btree_trans_too_many_iters(trans);
470 ob = try_alloc_bucket(trans, ca, reserve,
473 skipped_need_journal_commit,
483 bch2_trans_iter_exit(trans, &iter);
485 return ob ?: ERR_PTR(ret);
489 * bch_bucket_alloc - allocate a single bucket from a specific device
491 * Returns index of bucket on success, 0 on failure
493 static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans,
495 enum alloc_reserve reserve,
496 bool may_alloc_partial,
498 struct bch_dev_usage *usage)
500 struct bch_fs *c = trans->c;
501 struct open_bucket *ob = NULL;
502 bool freespace_initialized = READ_ONCE(ca->mi.freespace_initialized);
503 u64 start = freespace_initialized ? 0 : ca->bucket_alloc_trans_early_cursor;
505 u64 cur_bucket = start;
506 u64 buckets_seen = 0;
507 u64 skipped_open = 0;
508 u64 skipped_need_journal_commit = 0;
509 u64 skipped_nouse = 0;
510 bool waiting = false;
512 bch2_dev_usage_read_fast(ca, usage);
513 avail = dev_buckets_free(ca, *usage, reserve);
515 if (usage->d[BCH_DATA_need_discard].buckets > avail)
518 if (usage->d[BCH_DATA_need_gc_gens].buckets > avail)
521 if (should_invalidate_buckets(ca, *usage))
522 bch2_do_invalidates(c);
525 if (cl && !waiting) {
526 closure_wait(&c->freelist_wait, cl);
531 if (!c->blocked_allocate)
532 c->blocked_allocate = local_clock();
534 ob = ERR_PTR(-BCH_ERR_freelist_empty);
539 closure_wake_up(&c->freelist_wait);
541 if (may_alloc_partial) {
542 ob = try_alloc_partial_bucket(c, ca, reserve);
547 ob = likely(ca->mi.freespace_initialized)
548 ? bch2_bucket_alloc_freelist(trans, ca, reserve,
552 &skipped_need_journal_commit,
555 : bch2_bucket_alloc_early(trans, ca, reserve,
559 &skipped_need_journal_commit,
563 if (skipped_need_journal_commit * 2 > avail)
564 bch2_journal_flush_async(&c->journal, NULL);
566 if (!ob && !freespace_initialized && start) {
567 start = cur_bucket = 0;
571 if (!freespace_initialized)
572 ca->bucket_alloc_trans_early_cursor = cur_bucket;
575 ob = ERR_PTR(-BCH_ERR_no_buckets_found);
578 trace_and_count(c, bucket_alloc, ca, bch2_alloc_reserves[reserve],
579 may_alloc_partial, ob->bucket);
580 else if (!bch2_err_matches(PTR_ERR(ob), BCH_ERR_transaction_restart))
581 trace_and_count(c, bucket_alloc_fail,
582 ca, bch2_alloc_reserves[reserve],
583 usage->d[BCH_DATA_free].buckets,
585 bch2_copygc_wait_amount(c),
586 c->copygc_wait - atomic64_read(&c->io_clock[WRITE].now),
589 skipped_need_journal_commit,
592 bch2_err_str(PTR_ERR(ob)));
597 struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca,
598 enum alloc_reserve reserve,
599 bool may_alloc_partial,
602 struct bch_dev_usage usage;
603 struct open_bucket *ob;
605 bch2_trans_do(c, NULL, NULL, 0,
606 PTR_ERR_OR_ZERO(ob = bch2_bucket_alloc_trans(&trans, ca, reserve,
607 may_alloc_partial, cl, &usage)));
611 static int __dev_stripe_cmp(struct dev_stripe_state *stripe,
612 unsigned l, unsigned r)
614 return ((stripe->next_alloc[l] > stripe->next_alloc[r]) -
615 (stripe->next_alloc[l] < stripe->next_alloc[r]));
618 #define dev_stripe_cmp(l, r) __dev_stripe_cmp(stripe, l, r)
620 struct dev_alloc_list bch2_dev_alloc_list(struct bch_fs *c,
621 struct dev_stripe_state *stripe,
622 struct bch_devs_mask *devs)
624 struct dev_alloc_list ret = { .nr = 0 };
627 for_each_set_bit(i, devs->d, BCH_SB_MEMBERS_MAX)
628 ret.devs[ret.nr++] = i;
630 bubble_sort(ret.devs, ret.nr, dev_stripe_cmp);
634 static inline void bch2_dev_stripe_increment_inlined(struct bch_dev *ca,
635 struct dev_stripe_state *stripe,
636 struct bch_dev_usage *usage)
638 u64 *v = stripe->next_alloc + ca->dev_idx;
639 u64 free_space = dev_buckets_available(ca, RESERVE_none);
640 u64 free_space_inv = free_space
641 ? div64_u64(1ULL << 48, free_space)
645 if (*v + free_space_inv >= *v)
646 *v += free_space_inv;
650 for (v = stripe->next_alloc;
651 v < stripe->next_alloc + ARRAY_SIZE(stripe->next_alloc); v++)
652 *v = *v < scale ? 0 : *v - scale;
655 void bch2_dev_stripe_increment(struct bch_dev *ca,
656 struct dev_stripe_state *stripe)
658 struct bch_dev_usage usage;
660 bch2_dev_usage_read_fast(ca, &usage);
661 bch2_dev_stripe_increment_inlined(ca, stripe, &usage);
664 #define BUCKET_MAY_ALLOC_PARTIAL (1 << 0)
665 #define BUCKET_ALLOC_USE_DURABILITY (1 << 1)
667 static void add_new_bucket(struct bch_fs *c,
668 struct open_buckets *ptrs,
669 struct bch_devs_mask *devs_may_alloc,
670 unsigned *nr_effective,
673 struct open_bucket *ob)
675 unsigned durability =
676 bch_dev_bkey_exists(c, ob->dev)->mi.durability;
678 __clear_bit(ob->dev, devs_may_alloc->d);
679 *nr_effective += (flags & BUCKET_ALLOC_USE_DURABILITY)
681 *have_cache |= !durability;
683 ob_push(c, ptrs, ob);
686 static int bch2_bucket_alloc_set_trans(struct btree_trans *trans,
687 struct open_buckets *ptrs,
688 struct dev_stripe_state *stripe,
689 struct bch_devs_mask *devs_may_alloc,
690 unsigned nr_replicas,
691 unsigned *nr_effective,
693 enum alloc_reserve reserve,
697 struct bch_fs *c = trans->c;
698 struct dev_alloc_list devs_sorted =
699 bch2_dev_alloc_list(c, stripe, devs_may_alloc);
702 int ret = -BCH_ERR_insufficient_devices;
705 BUG_ON(*nr_effective >= nr_replicas);
707 for (i = 0; i < devs_sorted.nr; i++) {
708 struct bch_dev_usage usage;
709 struct open_bucket *ob;
711 dev = devs_sorted.devs[i];
714 ca = rcu_dereference(c->devs[dev]);
716 percpu_ref_get(&ca->ref);
722 if (!ca->mi.durability && *have_cache) {
723 percpu_ref_put(&ca->ref);
727 ob = bch2_bucket_alloc_trans(trans, ca, reserve,
728 flags & BUCKET_MAY_ALLOC_PARTIAL, cl, &usage);
730 bch2_dev_stripe_increment_inlined(ca, stripe, &usage);
731 percpu_ref_put(&ca->ref);
735 if (bch2_err_matches(ret, BCH_ERR_transaction_restart) || cl)
740 add_new_bucket(c, ptrs, devs_may_alloc,
741 nr_effective, have_cache, flags, ob);
743 if (*nr_effective >= nr_replicas) {
752 int bch2_bucket_alloc_set(struct bch_fs *c,
753 struct open_buckets *ptrs,
754 struct dev_stripe_state *stripe,
755 struct bch_devs_mask *devs_may_alloc,
756 unsigned nr_replicas,
757 unsigned *nr_effective,
759 enum alloc_reserve reserve,
763 return bch2_trans_do(c, NULL, NULL, 0,
764 bch2_bucket_alloc_set_trans(&trans, ptrs, stripe,
765 devs_may_alloc, nr_replicas,
766 nr_effective, have_cache, reserve,
770 /* Allocate from stripes: */
773 * if we can't allocate a new stripe because there are already too many
774 * partially filled stripes, force allocating from an existing stripe even when
775 * it's to a device we don't want:
778 static int bucket_alloc_from_stripe(struct bch_fs *c,
779 struct open_buckets *ptrs,
780 struct write_point *wp,
781 struct bch_devs_mask *devs_may_alloc,
783 unsigned erasure_code,
784 unsigned nr_replicas,
785 unsigned *nr_effective,
790 struct dev_alloc_list devs_sorted;
791 struct ec_stripe_head *h;
792 struct open_bucket *ob;
802 if (ec_open_bucket(c, ptrs))
805 h = bch2_ec_stripe_head_get(c, target, 0, nr_replicas - 1,
806 wp == &c->copygc_write_point,
813 devs_sorted = bch2_dev_alloc_list(c, &wp->stripe, devs_may_alloc);
815 for (i = 0; i < devs_sorted.nr; i++)
816 for (ec_idx = 0; ec_idx < h->s->nr_data; ec_idx++) {
817 if (!h->s->blocks[ec_idx])
820 ob = c->open_buckets + h->s->blocks[ec_idx];
821 if (ob->dev == devs_sorted.devs[i] &&
822 !test_and_set_bit(ec_idx, h->s->blocks_allocated))
827 ca = bch_dev_bkey_exists(c, ob->dev);
832 add_new_bucket(c, ptrs, devs_may_alloc,
833 nr_effective, have_cache, flags, ob);
834 atomic_inc(&h->s->pin);
836 bch2_ec_stripe_head_put(c, h);
840 /* Sector allocator */
842 static void get_buckets_from_writepoint(struct bch_fs *c,
843 struct open_buckets *ptrs,
844 struct write_point *wp,
845 struct bch_devs_mask *devs_may_alloc,
846 unsigned nr_replicas,
847 unsigned *nr_effective,
852 struct open_buckets ptrs_skip = { .nr = 0 };
853 struct open_bucket *ob;
856 open_bucket_for_each(c, &wp->ptrs, ob, i) {
857 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
859 if (*nr_effective < nr_replicas &&
860 test_bit(ob->dev, devs_may_alloc->d) &&
861 (ca->mi.durability ||
862 (wp->data_type == BCH_DATA_user && !*have_cache)) &&
863 (ob->ec || !need_ec)) {
864 add_new_bucket(c, ptrs, devs_may_alloc,
865 nr_effective, have_cache,
868 ob_push(c, &ptrs_skip, ob);
871 wp->ptrs = ptrs_skip;
874 static int open_bucket_add_buckets(struct btree_trans *trans,
875 struct open_buckets *ptrs,
876 struct write_point *wp,
877 struct bch_devs_list *devs_have,
879 unsigned erasure_code,
880 unsigned nr_replicas,
881 unsigned *nr_effective,
883 enum alloc_reserve reserve,
887 struct bch_fs *c = trans->c;
888 struct bch_devs_mask devs;
889 struct open_bucket *ob;
890 struct closure *cl = NULL;
895 devs = target_rw_devs(c, wp->data_type, target);
898 /* Don't allocate from devices we already have pointers to: */
899 for (i = 0; i < devs_have->nr; i++)
900 __clear_bit(devs_have->devs[i], devs.d);
902 open_bucket_for_each(c, ptrs, ob, i)
903 __clear_bit(ob->dev, devs.d);
906 if (!ec_open_bucket(c, ptrs)) {
907 get_buckets_from_writepoint(c, ptrs, wp, &devs,
908 nr_replicas, nr_effective,
909 have_cache, flags, true);
910 if (*nr_effective >= nr_replicas)
914 if (!ec_open_bucket(c, ptrs)) {
915 ret = bucket_alloc_from_stripe(c, ptrs, wp, &devs,
916 target, erasure_code,
917 nr_replicas, nr_effective,
918 have_cache, flags, _cl);
919 if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
920 bch2_err_matches(ret, BCH_ERR_freelist_empty) ||
921 bch2_err_matches(ret, BCH_ERR_open_buckets_empty))
923 if (*nr_effective >= nr_replicas)
928 get_buckets_from_writepoint(c, ptrs, wp, &devs,
929 nr_replicas, nr_effective,
930 have_cache, flags, false);
931 if (*nr_effective >= nr_replicas)
936 * Try nonblocking first, so that if one device is full we'll try from
939 ret = bch2_bucket_alloc_set_trans(trans, ptrs, &wp->stripe, &devs,
940 nr_replicas, nr_effective, have_cache,
943 !bch2_err_matches(ret, BCH_ERR_transaction_restart) &&
944 !bch2_err_matches(ret, BCH_ERR_insufficient_devices) &&
953 void bch2_open_buckets_stop_dev(struct bch_fs *c, struct bch_dev *ca,
954 struct open_buckets *obs)
956 struct open_buckets ptrs = { .nr = 0 };
957 struct open_bucket *ob, *ob2;
960 open_bucket_for_each(c, obs, ob, i) {
961 bool drop = !ca || ob->dev == ca->dev_idx;
963 if (!drop && ob->ec) {
964 mutex_lock(&ob->ec->lock);
965 for (j = 0; j < ob->ec->new_stripe.key.v.nr_blocks; j++) {
966 if (!ob->ec->blocks[j])
969 ob2 = c->open_buckets + ob->ec->blocks[j];
970 drop |= ob2->dev == ca->dev_idx;
972 mutex_unlock(&ob->ec->lock);
976 bch2_open_bucket_put(c, ob);
978 ob_push(c, &ptrs, ob);
984 void bch2_writepoint_stop(struct bch_fs *c, struct bch_dev *ca,
985 struct write_point *wp)
987 mutex_lock(&wp->lock);
988 bch2_open_buckets_stop_dev(c, ca, &wp->ptrs);
989 mutex_unlock(&wp->lock);
992 static inline struct hlist_head *writepoint_hash(struct bch_fs *c,
993 unsigned long write_point)
996 hash_long(write_point, ilog2(ARRAY_SIZE(c->write_points_hash)));
998 return &c->write_points_hash[hash];
1001 static struct write_point *__writepoint_find(struct hlist_head *head,
1002 unsigned long write_point)
1004 struct write_point *wp;
1007 hlist_for_each_entry_rcu(wp, head, node)
1008 if (wp->write_point == write_point)
1016 static inline bool too_many_writepoints(struct bch_fs *c, unsigned factor)
1018 u64 stranded = c->write_points_nr * c->bucket_size_max;
1019 u64 free = bch2_fs_usage_read_short(c).free;
1021 return stranded * factor > free;
1024 static bool try_increase_writepoints(struct bch_fs *c)
1026 struct write_point *wp;
1028 if (c->write_points_nr == ARRAY_SIZE(c->write_points) ||
1029 too_many_writepoints(c, 32))
1032 wp = c->write_points + c->write_points_nr++;
1033 hlist_add_head_rcu(&wp->node, writepoint_hash(c, wp->write_point));
1037 static bool try_decrease_writepoints(struct bch_fs *c,
1040 struct write_point *wp;
1042 mutex_lock(&c->write_points_hash_lock);
1043 if (c->write_points_nr < old_nr) {
1044 mutex_unlock(&c->write_points_hash_lock);
1048 if (c->write_points_nr == 1 ||
1049 !too_many_writepoints(c, 8)) {
1050 mutex_unlock(&c->write_points_hash_lock);
1054 wp = c->write_points + --c->write_points_nr;
1056 hlist_del_rcu(&wp->node);
1057 mutex_unlock(&c->write_points_hash_lock);
1059 bch2_writepoint_stop(c, NULL, wp);
1063 static void bch2_trans_mutex_lock(struct btree_trans *trans,
1066 if (!mutex_trylock(lock)) {
1067 bch2_trans_unlock(trans);
1072 static struct write_point *writepoint_find(struct btree_trans *trans,
1073 unsigned long write_point)
1075 struct bch_fs *c = trans->c;
1076 struct write_point *wp, *oldest;
1077 struct hlist_head *head;
1079 if (!(write_point & 1UL)) {
1080 wp = (struct write_point *) write_point;
1081 bch2_trans_mutex_lock(trans, &wp->lock);
1085 head = writepoint_hash(c, write_point);
1087 wp = __writepoint_find(head, write_point);
1090 bch2_trans_mutex_lock(trans, &wp->lock);
1091 if (wp->write_point == write_point)
1093 mutex_unlock(&wp->lock);
1096 restart_find_oldest:
1098 for (wp = c->write_points;
1099 wp < c->write_points + c->write_points_nr; wp++)
1100 if (!oldest || time_before64(wp->last_used, oldest->last_used))
1103 bch2_trans_mutex_lock(trans, &oldest->lock);
1104 bch2_trans_mutex_lock(trans, &c->write_points_hash_lock);
1105 if (oldest >= c->write_points + c->write_points_nr ||
1106 try_increase_writepoints(c)) {
1107 mutex_unlock(&c->write_points_hash_lock);
1108 mutex_unlock(&oldest->lock);
1109 goto restart_find_oldest;
1112 wp = __writepoint_find(head, write_point);
1113 if (wp && wp != oldest) {
1114 mutex_unlock(&c->write_points_hash_lock);
1115 mutex_unlock(&oldest->lock);
1120 hlist_del_rcu(&wp->node);
1121 wp->write_point = write_point;
1122 hlist_add_head_rcu(&wp->node, head);
1123 mutex_unlock(&c->write_points_hash_lock);
1125 wp->last_used = local_clock();
1130 * Get us an open_bucket we can allocate from, return with it locked:
1132 struct write_point *bch2_alloc_sectors_start_trans(struct btree_trans *trans,
1134 unsigned erasure_code,
1135 struct write_point_specifier write_point,
1136 struct bch_devs_list *devs_have,
1137 unsigned nr_replicas,
1138 unsigned nr_replicas_required,
1139 enum alloc_reserve reserve,
1143 struct bch_fs *c = trans->c;
1144 struct write_point *wp;
1145 struct open_bucket *ob;
1146 struct open_buckets ptrs;
1147 unsigned nr_effective, write_points_nr;
1148 unsigned ob_flags = 0;
1153 if (!(flags & BCH_WRITE_ONLY_SPECIFIED_DEVS))
1154 ob_flags |= BUCKET_ALLOC_USE_DURABILITY;
1156 BUG_ON(!nr_replicas || !nr_replicas_required);
1160 write_points_nr = c->write_points_nr;
1163 wp = writepoint_find(trans, write_point.v);
1165 if (wp->data_type == BCH_DATA_user)
1166 ob_flags |= BUCKET_MAY_ALLOC_PARTIAL;
1168 /* metadata may not allocate on cache devices: */
1169 if (wp->data_type != BCH_DATA_user)
1172 if (!target || (flags & BCH_WRITE_ONLY_SPECIFIED_DEVS)) {
1173 ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
1174 target, erasure_code,
1175 nr_replicas, &nr_effective,
1176 &have_cache, reserve,
1179 ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
1180 target, erasure_code,
1181 nr_replicas, &nr_effective,
1182 &have_cache, reserve,
1185 bch2_err_matches(ret, BCH_ERR_transaction_restart))
1188 ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
1190 nr_replicas, &nr_effective,
1191 &have_cache, reserve,
1195 BUG_ON(!ret && nr_effective < nr_replicas);
1197 if (erasure_code && !ec_open_bucket(c, &ptrs))
1198 pr_debug("failed to get ec bucket: ret %u", ret);
1200 if (ret == -BCH_ERR_insufficient_devices &&
1201 nr_effective >= nr_replicas_required)
1207 /* Free buckets we didn't use: */
1208 open_bucket_for_each(c, &wp->ptrs, ob, i)
1209 open_bucket_free_unused(c, wp, ob);
1213 wp->sectors_free = UINT_MAX;
1215 open_bucket_for_each(c, &wp->ptrs, ob, i)
1216 wp->sectors_free = min(wp->sectors_free, ob->sectors_free);
1218 BUG_ON(!wp->sectors_free || wp->sectors_free == UINT_MAX);
1222 open_bucket_for_each(c, &wp->ptrs, ob, i)
1223 if (ptrs.nr < ARRAY_SIZE(ptrs.v))
1224 ob_push(c, &ptrs, ob);
1226 open_bucket_free_unused(c, wp, ob);
1229 mutex_unlock(&wp->lock);
1231 if (bch2_err_matches(ret, BCH_ERR_freelist_empty) &&
1232 try_decrease_writepoints(c, write_points_nr))
1235 if (bch2_err_matches(ret, BCH_ERR_open_buckets_empty) ||
1236 bch2_err_matches(ret, BCH_ERR_freelist_empty))
1239 : ERR_PTR(-BCH_ERR_ENOSPC_bucket_alloc);
1241 if (bch2_err_matches(ret, BCH_ERR_insufficient_devices))
1242 return ERR_PTR(-EROFS);
1244 return ERR_PTR(ret);
1247 struct write_point *bch2_alloc_sectors_start(struct bch_fs *c,
1249 unsigned erasure_code,
1250 struct write_point_specifier write_point,
1251 struct bch_devs_list *devs_have,
1252 unsigned nr_replicas,
1253 unsigned nr_replicas_required,
1254 enum alloc_reserve reserve,
1258 struct write_point *wp;
1260 bch2_trans_do(c, NULL, NULL, 0,
1261 PTR_ERR_OR_ZERO(wp = bch2_alloc_sectors_start_trans(&trans, target,
1266 nr_replicas_required,
1273 struct bch_extent_ptr bch2_ob_ptr(struct bch_fs *c, struct open_bucket *ob)
1275 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
1277 return (struct bch_extent_ptr) {
1278 .type = 1 << BCH_EXTENT_ENTRY_ptr,
1281 .offset = bucket_to_sector(ca, ob->bucket) +
1282 ca->mi.bucket_size -
1288 * Append pointers to the space we just allocated to @k, and mark @sectors space
1289 * as allocated out of @ob
1291 void bch2_alloc_sectors_append_ptrs(struct bch_fs *c, struct write_point *wp,
1292 struct bkey_i *k, unsigned sectors,
1296 struct open_bucket *ob;
1299 BUG_ON(sectors > wp->sectors_free);
1300 wp->sectors_free -= sectors;
1302 open_bucket_for_each(c, &wp->ptrs, ob, i) {
1303 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
1304 struct bch_extent_ptr ptr = bch2_ob_ptr(c, ob);
1306 ptr.cached = cached ||
1307 (!ca->mi.durability &&
1308 wp->data_type == BCH_DATA_user);
1310 bch2_bkey_append_ptr(k, ptr);
1312 BUG_ON(sectors > ob->sectors_free);
1313 ob->sectors_free -= sectors;
1318 * Append pointers to the space we just allocated to @k, and mark @sectors space
1319 * as allocated out of @ob
1321 void bch2_alloc_sectors_done(struct bch_fs *c, struct write_point *wp)
1323 struct open_buckets ptrs = { .nr = 0 }, keep = { .nr = 0 };
1324 struct open_bucket *ob;
1327 open_bucket_for_each(c, &wp->ptrs, ob, i)
1328 ob_push(c, !ob->sectors_free ? &ptrs : &keep, ob);
1331 mutex_unlock(&wp->lock);
1333 bch2_open_buckets_put(c, &ptrs);
1336 static inline void writepoint_init(struct write_point *wp,
1337 enum bch_data_type type)
1339 mutex_init(&wp->lock);
1340 wp->data_type = type;
1343 void bch2_fs_allocator_foreground_init(struct bch_fs *c)
1345 struct open_bucket *ob;
1346 struct write_point *wp;
1348 mutex_init(&c->write_points_hash_lock);
1349 c->write_points_nr = ARRAY_SIZE(c->write_points);
1351 /* open bucket 0 is a sentinal NULL: */
1352 spin_lock_init(&c->open_buckets[0].lock);
1354 for (ob = c->open_buckets + 1;
1355 ob < c->open_buckets + ARRAY_SIZE(c->open_buckets); ob++) {
1356 spin_lock_init(&ob->lock);
1357 c->open_buckets_nr_free++;
1359 ob->freelist = c->open_buckets_freelist;
1360 c->open_buckets_freelist = ob - c->open_buckets;
1363 writepoint_init(&c->btree_write_point, BCH_DATA_btree);
1364 writepoint_init(&c->rebalance_write_point, BCH_DATA_user);
1365 writepoint_init(&c->copygc_write_point, BCH_DATA_user);
1367 for (wp = c->write_points;
1368 wp < c->write_points + c->write_points_nr; wp++) {
1369 writepoint_init(wp, BCH_DATA_user);
1371 wp->last_used = local_clock();
1372 wp->write_point = (unsigned long) wp;
1373 hlist_add_head_rcu(&wp->node,
1374 writepoint_hash(c, wp->write_point));
1378 void bch2_open_buckets_to_text(struct printbuf *out, struct bch_fs *c)
1380 struct open_bucket *ob;
1382 for (ob = c->open_buckets;
1383 ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
1385 spin_lock(&ob->lock);
1386 if (ob->valid && !ob->on_partial_list) {
1387 prt_printf(out, "%zu ref %u type %s %u:%llu:%u\n",
1388 ob - c->open_buckets,
1389 atomic_read(&ob->pin),
1390 bch2_data_types[ob->data_type],
1391 ob->dev, ob->bucket, ob->gen);
1393 spin_unlock(&ob->lock);