1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright 2012 Google, Inc.
5 * Foreground allocator code: allocate buckets from freelist, and allocate in
6 * sector granularity from writepoints.
8 * bch2_bucket_alloc() allocates a single bucket from a specific device.
10 * bch2_bucket_alloc_set() allocates one or more buckets from different devices
11 * in a given filesystem.
15 #include "alloc_background.h"
16 #include "alloc_foreground.h"
17 #include "btree_iter.h"
18 #include "btree_update.h"
21 #include "buckets_waiting_for_journal.h"
24 #include "disk_groups.h"
30 #include <linux/math64.h>
31 #include <linux/rculist.h>
32 #include <linux/rcupdate.h>
33 #include <trace/events/bcachefs.h>
35 const char * const bch2_alloc_reserves[] = {
43 * Open buckets represent a bucket that's currently being allocated from. They
46 * - They track buckets that have been partially allocated, allowing for
47 * sub-bucket sized allocations - they're used by the sector allocator below
49 * - They provide a reference to the buckets they own that mark and sweep GC
50 * can find, until the new allocation has a pointer to it inserted into the
53 * When allocating some space with the sector allocator, the allocation comes
54 * with a reference to an open bucket - the caller is required to put that
55 * reference _after_ doing the index update that makes its allocation reachable.
58 static void bch2_open_bucket_hash_add(struct bch_fs *c, struct open_bucket *ob)
60 open_bucket_idx_t idx = ob - c->open_buckets;
61 open_bucket_idx_t *slot = open_bucket_hashslot(c, ob->dev, ob->bucket);
67 static void bch2_open_bucket_hash_remove(struct bch_fs *c, struct open_bucket *ob)
69 open_bucket_idx_t idx = ob - c->open_buckets;
70 open_bucket_idx_t *slot = open_bucket_hashslot(c, ob->dev, ob->bucket);
72 while (*slot != idx) {
74 slot = &c->open_buckets[*slot].hash;
81 void __bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob)
83 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
86 bch2_ec_bucket_written(c, ob);
90 percpu_down_read(&c->mark_lock);
96 spin_unlock(&ob->lock);
97 percpu_up_read(&c->mark_lock);
99 spin_lock(&c->freelist_lock);
100 bch2_open_bucket_hash_remove(c, ob);
102 ob->freelist = c->open_buckets_freelist;
103 c->open_buckets_freelist = ob - c->open_buckets;
105 c->open_buckets_nr_free++;
106 ca->nr_open_buckets--;
107 spin_unlock(&c->freelist_lock);
109 closure_wake_up(&c->open_buckets_wait);
112 void bch2_open_bucket_write_error(struct bch_fs *c,
113 struct open_buckets *obs,
116 struct open_bucket *ob;
119 open_bucket_for_each(c, obs, ob, i)
120 if (ob->dev == dev && ob->ec)
121 bch2_ec_bucket_cancel(c, ob);
124 static struct open_bucket *bch2_open_bucket_alloc(struct bch_fs *c)
126 struct open_bucket *ob;
128 BUG_ON(!c->open_buckets_freelist || !c->open_buckets_nr_free);
130 ob = c->open_buckets + c->open_buckets_freelist;
131 c->open_buckets_freelist = ob->freelist;
132 atomic_set(&ob->pin, 1);
135 c->open_buckets_nr_free--;
139 static void open_bucket_free_unused(struct bch_fs *c,
140 struct write_point *wp,
141 struct open_bucket *ob)
143 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
144 bool may_realloc = wp->data_type == BCH_DATA_user;
146 BUG_ON(ca->open_buckets_partial_nr >
147 ARRAY_SIZE(ca->open_buckets_partial));
149 if (ca->open_buckets_partial_nr <
150 ARRAY_SIZE(ca->open_buckets_partial) &&
152 spin_lock(&c->freelist_lock);
153 ob->on_partial_list = true;
154 ca->open_buckets_partial[ca->open_buckets_partial_nr++] =
155 ob - c->open_buckets;
156 spin_unlock(&c->freelist_lock);
158 closure_wake_up(&c->open_buckets_wait);
159 closure_wake_up(&c->freelist_wait);
161 bch2_open_bucket_put(c, ob);
165 /* _only_ for allocating the journal on a new device: */
166 long bch2_bucket_alloc_new_fs(struct bch_dev *ca)
168 while (ca->new_fs_bucket_idx < ca->mi.nbuckets) {
169 u64 b = ca->new_fs_bucket_idx++;
171 if (!is_superblock_bucket(ca, b) &&
172 (!ca->buckets_nouse || !test_bit(b, ca->buckets_nouse)))
179 static inline unsigned open_buckets_reserved(enum alloc_reserve reserve)
183 case RESERVE_btree_movinggc:
185 case RESERVE_movinggc:
186 return OPEN_BUCKETS_COUNT / 4;
188 return OPEN_BUCKETS_COUNT / 2;
192 static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
194 enum alloc_reserve reserve,
195 struct bch_alloc_v4 *a,
197 u64 *skipped_need_journal_commit,
201 struct open_bucket *ob;
203 if (unlikely(ca->buckets_nouse && test_bit(bucket, ca->buckets_nouse))) {
208 if (bch2_bucket_is_open(c, ca->dev_idx, bucket)) {
213 if (bch2_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
214 c->journal.flushed_seq_ondisk, ca->dev_idx, bucket)) {
215 (*skipped_need_journal_commit)++;
219 spin_lock(&c->freelist_lock);
221 if (unlikely(c->open_buckets_nr_free <= open_buckets_reserved(reserve))) {
223 closure_wait(&c->open_buckets_wait, cl);
225 if (!c->blocked_allocate_open_bucket)
226 c->blocked_allocate_open_bucket = local_clock();
228 spin_unlock(&c->freelist_lock);
229 return ERR_PTR(-OPEN_BUCKETS_EMPTY);
232 /* Recheck under lock: */
233 if (bch2_bucket_is_open(c, ca->dev_idx, bucket)) {
234 spin_unlock(&c->freelist_lock);
239 ob = bch2_open_bucket_alloc(c);
241 spin_lock(&ob->lock);
244 ob->sectors_free = ca->mi.bucket_size;
245 ob->alloc_reserve = reserve;
246 ob->dev = ca->dev_idx;
249 spin_unlock(&ob->lock);
251 ca->nr_open_buckets++;
252 bch2_open_bucket_hash_add(c, ob);
254 if (c->blocked_allocate_open_bucket) {
255 bch2_time_stats_update(
256 &c->times[BCH_TIME_blocked_allocate_open_bucket],
257 c->blocked_allocate_open_bucket);
258 c->blocked_allocate_open_bucket = 0;
261 if (c->blocked_allocate) {
262 bch2_time_stats_update(
263 &c->times[BCH_TIME_blocked_allocate],
264 c->blocked_allocate);
265 c->blocked_allocate = 0;
268 spin_unlock(&c->freelist_lock);
270 trace_bucket_alloc(ca, bch2_alloc_reserves[reserve]);
274 static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bch_dev *ca,
275 enum alloc_reserve reserve, u64 free_entry,
277 u64 *skipped_need_journal_commit,
279 struct bkey_s_c freespace_k,
282 struct bch_fs *c = trans->c;
283 struct btree_iter iter = { NULL };
285 struct open_bucket *ob;
286 struct bch_alloc_v4 a;
287 u64 b = free_entry & ~(~0ULL << 56);
288 unsigned genbits = free_entry >> 56;
289 struct printbuf buf = PRINTBUF;
292 if (b < ca->mi.first_bucket || b >= ca->mi.nbuckets) {
293 prt_printf(&buf, "freespace btree has bucket outside allowed range %u-%llu\n"
295 ca->mi.first_bucket, ca->mi.nbuckets);
296 bch2_bkey_val_to_text(&buf, c, freespace_k);
297 bch2_trans_inconsistent(trans, "%s", buf.buf);
302 bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, POS(ca->dev_idx, b), BTREE_ITER_CACHED);
303 k = bch2_btree_iter_peek_slot(&iter);
310 bch2_alloc_to_v4(k, &a);
312 if (genbits != (alloc_freespace_genbits(a) >> 56)) {
313 prt_printf(&buf, "bucket in freespace btree with wrong genbits (got %u should be %llu)\n"
315 genbits, alloc_freespace_genbits(a) >> 56);
316 bch2_bkey_val_to_text(&buf, c, freespace_k);
317 prt_printf(&buf, "\n ");
318 bch2_bkey_val_to_text(&buf, c, k);
319 bch2_trans_inconsistent(trans, "%s", buf.buf);
325 if (a.data_type != BCH_DATA_free) {
326 prt_printf(&buf, "non free bucket in freespace btree\n"
328 bch2_bkey_val_to_text(&buf, c, freespace_k);
329 prt_printf(&buf, "\n ");
330 bch2_bkey_val_to_text(&buf, c, k);
331 bch2_trans_inconsistent(trans, "%s", buf.buf);
336 ob = __try_alloc_bucket(c, ca, b, reserve, &a,
338 skipped_need_journal_commit,
342 bch2_trans_iter_exit(trans, &iter);
347 static struct open_bucket *try_alloc_partial_bucket(struct bch_fs *c, struct bch_dev *ca,
348 enum alloc_reserve reserve)
350 struct open_bucket *ob;
353 spin_lock(&c->freelist_lock);
355 for (i = ca->open_buckets_partial_nr - 1; i >= 0; --i) {
356 ob = c->open_buckets + ca->open_buckets_partial[i];
358 if (reserve <= ob->alloc_reserve) {
359 array_remove_item(ca->open_buckets_partial,
360 ca->open_buckets_partial_nr,
362 ob->on_partial_list = false;
363 ob->alloc_reserve = reserve;
364 spin_unlock(&c->freelist_lock);
369 spin_unlock(&c->freelist_lock);
374 * This path is for before the freespace btree is initialized:
376 * If ca->new_fs_bucket_idx is nonzero, we haven't yet marked superblock &
377 * journal buckets - journal buckets will be < ca->new_fs_bucket_idx
379 static noinline struct open_bucket *
380 bch2_bucket_alloc_trans_early(struct btree_trans *trans,
382 enum alloc_reserve reserve,
386 u64 *skipped_need_journal_commit,
390 struct btree_iter iter;
392 struct open_bucket *ob = NULL;
395 *cur_bucket = max_t(u64, *cur_bucket, ca->mi.first_bucket);
396 *cur_bucket = max_t(u64, *cur_bucket, ca->new_fs_bucket_idx);
398 for_each_btree_key(trans, iter, BTREE_ID_alloc, POS(ca->dev_idx, *cur_bucket),
399 BTREE_ITER_SLOTS, k, ret) {
400 struct bch_alloc_v4 a;
402 if (bkey_cmp(k.k->p, POS(ca->dev_idx, ca->mi.nbuckets)) >= 0)
405 if (ca->new_fs_bucket_idx &&
406 is_superblock_bucket(ca, k.k->p.offset))
409 bch2_alloc_to_v4(k, &a);
411 if (a.data_type != BCH_DATA_free)
416 ob = __try_alloc_bucket(trans->c, ca, k.k->p.offset, reserve, &a,
418 skipped_need_journal_commit,
424 bch2_trans_iter_exit(trans, &iter);
426 *cur_bucket = iter.pos.offset;
428 return ob ?: ERR_PTR(ret ?: -FREELIST_EMPTY);
431 static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans,
433 enum alloc_reserve reserve,
437 u64 *skipped_need_journal_commit,
441 struct btree_iter iter;
443 struct open_bucket *ob = NULL;
446 if (unlikely(!ca->mi.freespace_initialized))
447 return bch2_bucket_alloc_trans_early(trans, ca, reserve,
451 skipped_need_journal_commit,
455 BUG_ON(ca->new_fs_bucket_idx);
457 for_each_btree_key_norestart(trans, iter, BTREE_ID_freespace,
458 POS(ca->dev_idx, *cur_bucket), 0, k, ret) {
459 if (k.k->p.inode != ca->dev_idx)
462 for (*cur_bucket = max(*cur_bucket, bkey_start_offset(k.k));
463 *cur_bucket < k.k->p.offset && !ob;
465 if (btree_trans_too_many_iters(trans)) {
466 ob = ERR_PTR(-EINTR);
472 ob = try_alloc_bucket(trans, ca, reserve,
475 skipped_need_journal_commit,
482 bch2_trans_iter_exit(trans, &iter);
484 return ob ?: ERR_PTR(ret);
488 * bch_bucket_alloc - allocate a single bucket from a specific device
490 * Returns index of bucket on success, 0 on failure
492 struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca,
493 enum alloc_reserve reserve,
494 bool may_alloc_partial,
497 struct open_bucket *ob = NULL;
498 struct bch_dev_usage usage;
501 u64 buckets_seen = 0;
502 u64 skipped_open = 0;
503 u64 skipped_need_journal_commit = 0;
504 u64 skipped_nouse = 0;
505 bool waiting = false;
508 usage = bch2_dev_usage_read(ca);
509 avail = __dev_buckets_available(ca, usage,reserve);
511 if (usage.d[BCH_DATA_need_discard].buckets > avail)
514 if (usage.d[BCH_DATA_need_gc_gens].buckets > avail)
517 if (should_invalidate_buckets(ca, usage))
518 bch2_do_invalidates(c);
521 if (cl && !waiting) {
522 closure_wait(&c->freelist_wait, cl);
527 if (!c->blocked_allocate)
528 c->blocked_allocate = local_clock();
530 ob = ERR_PTR(-FREELIST_EMPTY);
535 closure_wake_up(&c->freelist_wait);
537 if (may_alloc_partial) {
538 ob = try_alloc_partial_bucket(c, ca, reserve);
543 ret = bch2_trans_do(c, NULL, NULL, 0,
544 PTR_ERR_OR_ZERO(ob = bch2_bucket_alloc_trans(&trans, ca, reserve,
548 &skipped_need_journal_commit,
552 if (skipped_need_journal_commit * 2 > avail)
553 bch2_journal_flush_async(&c->journal, NULL);
556 ob = ERR_PTR(ret ?: -FREELIST_EMPTY);
559 trace_bucket_alloc_fail(ca, bch2_alloc_reserves[reserve], avail,
562 skipped_need_journal_commit,
564 cl == NULL, PTR_ERR(ob));
565 atomic_long_inc(&c->bucket_alloc_fail);
571 static int __dev_stripe_cmp(struct dev_stripe_state *stripe,
572 unsigned l, unsigned r)
574 return ((stripe->next_alloc[l] > stripe->next_alloc[r]) -
575 (stripe->next_alloc[l] < stripe->next_alloc[r]));
578 #define dev_stripe_cmp(l, r) __dev_stripe_cmp(stripe, l, r)
580 struct dev_alloc_list bch2_dev_alloc_list(struct bch_fs *c,
581 struct dev_stripe_state *stripe,
582 struct bch_devs_mask *devs)
584 struct dev_alloc_list ret = { .nr = 0 };
587 for_each_set_bit(i, devs->d, BCH_SB_MEMBERS_MAX)
588 ret.devs[ret.nr++] = i;
590 bubble_sort(ret.devs, ret.nr, dev_stripe_cmp);
594 void bch2_dev_stripe_increment(struct bch_dev *ca,
595 struct dev_stripe_state *stripe)
597 u64 *v = stripe->next_alloc + ca->dev_idx;
598 u64 free_space = dev_buckets_available(ca, RESERVE_none);
599 u64 free_space_inv = free_space
600 ? div64_u64(1ULL << 48, free_space)
604 if (*v + free_space_inv >= *v)
605 *v += free_space_inv;
609 for (v = stripe->next_alloc;
610 v < stripe->next_alloc + ARRAY_SIZE(stripe->next_alloc); v++)
611 *v = *v < scale ? 0 : *v - scale;
614 #define BUCKET_MAY_ALLOC_PARTIAL (1 << 0)
615 #define BUCKET_ALLOC_USE_DURABILITY (1 << 1)
617 static void add_new_bucket(struct bch_fs *c,
618 struct open_buckets *ptrs,
619 struct bch_devs_mask *devs_may_alloc,
620 unsigned *nr_effective,
623 struct open_bucket *ob)
625 unsigned durability =
626 bch_dev_bkey_exists(c, ob->dev)->mi.durability;
628 __clear_bit(ob->dev, devs_may_alloc->d);
629 *nr_effective += (flags & BUCKET_ALLOC_USE_DURABILITY)
631 *have_cache |= !durability;
633 ob_push(c, ptrs, ob);
636 int bch2_bucket_alloc_set(struct bch_fs *c,
637 struct open_buckets *ptrs,
638 struct dev_stripe_state *stripe,
639 struct bch_devs_mask *devs_may_alloc,
640 unsigned nr_replicas,
641 unsigned *nr_effective,
643 enum alloc_reserve reserve,
647 struct dev_alloc_list devs_sorted =
648 bch2_dev_alloc_list(c, stripe, devs_may_alloc);
651 int ret = -INSUFFICIENT_DEVICES;
654 BUG_ON(*nr_effective >= nr_replicas);
656 for (i = 0; i < devs_sorted.nr; i++) {
657 struct open_bucket *ob;
659 dev = devs_sorted.devs[i];
662 ca = rcu_dereference(c->devs[dev]);
664 percpu_ref_get(&ca->ref);
670 if (!ca->mi.durability && *have_cache) {
671 percpu_ref_put(&ca->ref);
675 ob = bch2_bucket_alloc(c, ca, reserve,
676 flags & BUCKET_MAY_ALLOC_PARTIAL, cl);
678 bch2_dev_stripe_increment(ca, stripe);
679 percpu_ref_put(&ca->ref);
689 add_new_bucket(c, ptrs, devs_may_alloc,
690 nr_effective, have_cache, flags, ob);
692 if (*nr_effective >= nr_replicas) {
701 /* Allocate from stripes: */
704 * if we can't allocate a new stripe because there are already too many
705 * partially filled stripes, force allocating from an existing stripe even when
706 * it's to a device we don't want:
709 static int bucket_alloc_from_stripe(struct bch_fs *c,
710 struct open_buckets *ptrs,
711 struct write_point *wp,
712 struct bch_devs_mask *devs_may_alloc,
714 unsigned erasure_code,
715 unsigned nr_replicas,
716 unsigned *nr_effective,
721 struct dev_alloc_list devs_sorted;
722 struct ec_stripe_head *h;
723 struct open_bucket *ob;
733 if (ec_open_bucket(c, ptrs))
736 h = bch2_ec_stripe_head_get(c, target, 0, nr_replicas - 1,
737 wp == &c->copygc_write_point,
744 devs_sorted = bch2_dev_alloc_list(c, &wp->stripe, devs_may_alloc);
746 for (i = 0; i < devs_sorted.nr; i++)
747 for (ec_idx = 0; ec_idx < h->s->nr_data; ec_idx++) {
748 if (!h->s->blocks[ec_idx])
751 ob = c->open_buckets + h->s->blocks[ec_idx];
752 if (ob->dev == devs_sorted.devs[i] &&
753 !test_and_set_bit(ec_idx, h->s->blocks_allocated))
758 ca = bch_dev_bkey_exists(c, ob->dev);
763 add_new_bucket(c, ptrs, devs_may_alloc,
764 nr_effective, have_cache, flags, ob);
765 atomic_inc(&h->s->pin);
767 bch2_ec_stripe_head_put(c, h);
771 /* Sector allocator */
773 static void get_buckets_from_writepoint(struct bch_fs *c,
774 struct open_buckets *ptrs,
775 struct write_point *wp,
776 struct bch_devs_mask *devs_may_alloc,
777 unsigned nr_replicas,
778 unsigned *nr_effective,
783 struct open_buckets ptrs_skip = { .nr = 0 };
784 struct open_bucket *ob;
787 open_bucket_for_each(c, &wp->ptrs, ob, i) {
788 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
790 if (*nr_effective < nr_replicas &&
791 test_bit(ob->dev, devs_may_alloc->d) &&
792 (ca->mi.durability ||
793 (wp->data_type == BCH_DATA_user && !*have_cache)) &&
794 (ob->ec || !need_ec)) {
795 add_new_bucket(c, ptrs, devs_may_alloc,
796 nr_effective, have_cache,
799 ob_push(c, &ptrs_skip, ob);
802 wp->ptrs = ptrs_skip;
805 static int open_bucket_add_buckets(struct bch_fs *c,
806 struct open_buckets *ptrs,
807 struct write_point *wp,
808 struct bch_devs_list *devs_have,
810 unsigned erasure_code,
811 unsigned nr_replicas,
812 unsigned *nr_effective,
814 enum alloc_reserve reserve,
818 struct bch_devs_mask devs;
819 struct open_bucket *ob;
820 struct closure *cl = NULL;
825 devs = target_rw_devs(c, wp->data_type, target);
828 /* Don't allocate from devices we already have pointers to: */
829 for (i = 0; i < devs_have->nr; i++)
830 __clear_bit(devs_have->devs[i], devs.d);
832 open_bucket_for_each(c, ptrs, ob, i)
833 __clear_bit(ob->dev, devs.d);
836 if (!ec_open_bucket(c, ptrs)) {
837 get_buckets_from_writepoint(c, ptrs, wp, &devs,
838 nr_replicas, nr_effective,
839 have_cache, flags, true);
840 if (*nr_effective >= nr_replicas)
844 if (!ec_open_bucket(c, ptrs)) {
845 ret = bucket_alloc_from_stripe(c, ptrs, wp, &devs,
846 target, erasure_code,
847 nr_replicas, nr_effective,
848 have_cache, flags, _cl);
849 if (ret == -FREELIST_EMPTY ||
850 ret == -OPEN_BUCKETS_EMPTY)
852 if (*nr_effective >= nr_replicas)
857 get_buckets_from_writepoint(c, ptrs, wp, &devs,
858 nr_replicas, nr_effective,
859 have_cache, flags, false);
860 if (*nr_effective >= nr_replicas)
865 * Try nonblocking first, so that if one device is full we'll try from
868 ret = bch2_bucket_alloc_set(c, ptrs, &wp->stripe, &devs,
869 nr_replicas, nr_effective, have_cache,
871 if (ret && ret != -INSUFFICIENT_DEVICES && !cl && _cl) {
879 void bch2_open_buckets_stop_dev(struct bch_fs *c, struct bch_dev *ca,
880 struct open_buckets *obs)
882 struct open_buckets ptrs = { .nr = 0 };
883 struct open_bucket *ob, *ob2;
886 open_bucket_for_each(c, obs, ob, i) {
887 bool drop = !ca || ob->dev == ca->dev_idx;
889 if (!drop && ob->ec) {
890 mutex_lock(&ob->ec->lock);
891 for (j = 0; j < ob->ec->new_stripe.key.v.nr_blocks; j++) {
892 if (!ob->ec->blocks[j])
895 ob2 = c->open_buckets + ob->ec->blocks[j];
896 drop |= ob2->dev == ca->dev_idx;
898 mutex_unlock(&ob->ec->lock);
902 bch2_open_bucket_put(c, ob);
904 ob_push(c, &ptrs, ob);
910 void bch2_writepoint_stop(struct bch_fs *c, struct bch_dev *ca,
911 struct write_point *wp)
913 mutex_lock(&wp->lock);
914 bch2_open_buckets_stop_dev(c, ca, &wp->ptrs);
915 mutex_unlock(&wp->lock);
918 static inline struct hlist_head *writepoint_hash(struct bch_fs *c,
919 unsigned long write_point)
922 hash_long(write_point, ilog2(ARRAY_SIZE(c->write_points_hash)));
924 return &c->write_points_hash[hash];
927 static struct write_point *__writepoint_find(struct hlist_head *head,
928 unsigned long write_point)
930 struct write_point *wp;
933 hlist_for_each_entry_rcu(wp, head, node)
934 if (wp->write_point == write_point)
942 static inline bool too_many_writepoints(struct bch_fs *c, unsigned factor)
944 u64 stranded = c->write_points_nr * c->bucket_size_max;
945 u64 free = bch2_fs_usage_read_short(c).free;
947 return stranded * factor > free;
950 static bool try_increase_writepoints(struct bch_fs *c)
952 struct write_point *wp;
954 if (c->write_points_nr == ARRAY_SIZE(c->write_points) ||
955 too_many_writepoints(c, 32))
958 wp = c->write_points + c->write_points_nr++;
959 hlist_add_head_rcu(&wp->node, writepoint_hash(c, wp->write_point));
963 static bool try_decrease_writepoints(struct bch_fs *c,
966 struct write_point *wp;
968 mutex_lock(&c->write_points_hash_lock);
969 if (c->write_points_nr < old_nr) {
970 mutex_unlock(&c->write_points_hash_lock);
974 if (c->write_points_nr == 1 ||
975 !too_many_writepoints(c, 8)) {
976 mutex_unlock(&c->write_points_hash_lock);
980 wp = c->write_points + --c->write_points_nr;
982 hlist_del_rcu(&wp->node);
983 mutex_unlock(&c->write_points_hash_lock);
985 bch2_writepoint_stop(c, NULL, wp);
989 static struct write_point *writepoint_find(struct bch_fs *c,
990 unsigned long write_point)
992 struct write_point *wp, *oldest;
993 struct hlist_head *head;
995 if (!(write_point & 1UL)) {
996 wp = (struct write_point *) write_point;
997 mutex_lock(&wp->lock);
1001 head = writepoint_hash(c, write_point);
1003 wp = __writepoint_find(head, write_point);
1006 mutex_lock(&wp->lock);
1007 if (wp->write_point == write_point)
1009 mutex_unlock(&wp->lock);
1012 restart_find_oldest:
1014 for (wp = c->write_points;
1015 wp < c->write_points + c->write_points_nr; wp++)
1016 if (!oldest || time_before64(wp->last_used, oldest->last_used))
1019 mutex_lock(&oldest->lock);
1020 mutex_lock(&c->write_points_hash_lock);
1021 if (oldest >= c->write_points + c->write_points_nr ||
1022 try_increase_writepoints(c)) {
1023 mutex_unlock(&c->write_points_hash_lock);
1024 mutex_unlock(&oldest->lock);
1025 goto restart_find_oldest;
1028 wp = __writepoint_find(head, write_point);
1029 if (wp && wp != oldest) {
1030 mutex_unlock(&c->write_points_hash_lock);
1031 mutex_unlock(&oldest->lock);
1036 hlist_del_rcu(&wp->node);
1037 wp->write_point = write_point;
1038 hlist_add_head_rcu(&wp->node, head);
1039 mutex_unlock(&c->write_points_hash_lock);
1041 wp->last_used = sched_clock();
1046 * Get us an open_bucket we can allocate from, return with it locked:
1048 struct write_point *bch2_alloc_sectors_start(struct bch_fs *c,
1050 unsigned erasure_code,
1051 struct write_point_specifier write_point,
1052 struct bch_devs_list *devs_have,
1053 unsigned nr_replicas,
1054 unsigned nr_replicas_required,
1055 enum alloc_reserve reserve,
1059 struct write_point *wp;
1060 struct open_bucket *ob;
1061 struct open_buckets ptrs;
1062 unsigned nr_effective, write_points_nr;
1063 unsigned ob_flags = 0;
1068 if (!(flags & BCH_WRITE_ONLY_SPECIFIED_DEVS))
1069 ob_flags |= BUCKET_ALLOC_USE_DURABILITY;
1071 BUG_ON(!nr_replicas || !nr_replicas_required);
1075 write_points_nr = c->write_points_nr;
1078 wp = writepoint_find(c, write_point.v);
1080 if (wp->data_type == BCH_DATA_user)
1081 ob_flags |= BUCKET_MAY_ALLOC_PARTIAL;
1083 /* metadata may not allocate on cache devices: */
1084 if (wp->data_type != BCH_DATA_user)
1087 if (!target || (flags & BCH_WRITE_ONLY_SPECIFIED_DEVS)) {
1088 ret = open_bucket_add_buckets(c, &ptrs, wp, devs_have,
1089 target, erasure_code,
1090 nr_replicas, &nr_effective,
1091 &have_cache, reserve,
1094 ret = open_bucket_add_buckets(c, &ptrs, wp, devs_have,
1095 target, erasure_code,
1096 nr_replicas, &nr_effective,
1097 &have_cache, reserve,
1102 ret = open_bucket_add_buckets(c, &ptrs, wp, devs_have,
1104 nr_replicas, &nr_effective,
1105 &have_cache, reserve,
1109 BUG_ON(!ret && nr_effective < nr_replicas);
1111 if (erasure_code && !ec_open_bucket(c, &ptrs))
1112 pr_debug("failed to get ec bucket: ret %u", ret);
1114 if (ret == -INSUFFICIENT_DEVICES &&
1115 nr_effective >= nr_replicas_required)
1121 /* Free buckets we didn't use: */
1122 open_bucket_for_each(c, &wp->ptrs, ob, i)
1123 open_bucket_free_unused(c, wp, ob);
1127 wp->sectors_free = UINT_MAX;
1129 open_bucket_for_each(c, &wp->ptrs, ob, i)
1130 wp->sectors_free = min(wp->sectors_free, ob->sectors_free);
1132 BUG_ON(!wp->sectors_free || wp->sectors_free == UINT_MAX);
1136 open_bucket_for_each(c, &wp->ptrs, ob, i)
1137 if (ptrs.nr < ARRAY_SIZE(ptrs.v))
1138 ob_push(c, &ptrs, ob);
1140 open_bucket_free_unused(c, wp, ob);
1143 mutex_unlock(&wp->lock);
1145 if (ret == -FREELIST_EMPTY &&
1146 try_decrease_writepoints(c, write_points_nr))
1150 case -OPEN_BUCKETS_EMPTY:
1151 case -FREELIST_EMPTY:
1152 return cl ? ERR_PTR(-EAGAIN) : ERR_PTR(-ENOSPC);
1153 case -INSUFFICIENT_DEVICES:
1154 return ERR_PTR(-EROFS);
1156 return ERR_PTR(ret);
1160 struct bch_extent_ptr bch2_ob_ptr(struct bch_fs *c, struct open_bucket *ob)
1162 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
1164 return (struct bch_extent_ptr) {
1165 .type = 1 << BCH_EXTENT_ENTRY_ptr,
1168 .offset = bucket_to_sector(ca, ob->bucket) +
1169 ca->mi.bucket_size -
1175 * Append pointers to the space we just allocated to @k, and mark @sectors space
1176 * as allocated out of @ob
1178 void bch2_alloc_sectors_append_ptrs(struct bch_fs *c, struct write_point *wp,
1179 struct bkey_i *k, unsigned sectors,
1183 struct open_bucket *ob;
1186 BUG_ON(sectors > wp->sectors_free);
1187 wp->sectors_free -= sectors;
1189 open_bucket_for_each(c, &wp->ptrs, ob, i) {
1190 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
1191 struct bch_extent_ptr ptr = bch2_ob_ptr(c, ob);
1193 ptr.cached = cached ||
1194 (!ca->mi.durability &&
1195 wp->data_type == BCH_DATA_user);
1197 bch2_bkey_append_ptr(k, ptr);
1199 BUG_ON(sectors > ob->sectors_free);
1200 ob->sectors_free -= sectors;
1205 * Append pointers to the space we just allocated to @k, and mark @sectors space
1206 * as allocated out of @ob
1208 void bch2_alloc_sectors_done(struct bch_fs *c, struct write_point *wp)
1210 struct open_buckets ptrs = { .nr = 0 }, keep = { .nr = 0 };
1211 struct open_bucket *ob;
1214 open_bucket_for_each(c, &wp->ptrs, ob, i)
1215 ob_push(c, !ob->sectors_free ? &ptrs : &keep, ob);
1218 mutex_unlock(&wp->lock);
1220 bch2_open_buckets_put(c, &ptrs);
1223 static inline void writepoint_init(struct write_point *wp,
1224 enum bch_data_type type)
1226 mutex_init(&wp->lock);
1227 wp->data_type = type;
1230 void bch2_fs_allocator_foreground_init(struct bch_fs *c)
1232 struct open_bucket *ob;
1233 struct write_point *wp;
1235 mutex_init(&c->write_points_hash_lock);
1236 c->write_points_nr = ARRAY_SIZE(c->write_points);
1238 /* open bucket 0 is a sentinal NULL: */
1239 spin_lock_init(&c->open_buckets[0].lock);
1241 for (ob = c->open_buckets + 1;
1242 ob < c->open_buckets + ARRAY_SIZE(c->open_buckets); ob++) {
1243 spin_lock_init(&ob->lock);
1244 c->open_buckets_nr_free++;
1246 ob->freelist = c->open_buckets_freelist;
1247 c->open_buckets_freelist = ob - c->open_buckets;
1250 writepoint_init(&c->btree_write_point, BCH_DATA_btree);
1251 writepoint_init(&c->rebalance_write_point, BCH_DATA_user);
1252 writepoint_init(&c->copygc_write_point, BCH_DATA_user);
1254 for (wp = c->write_points;
1255 wp < c->write_points + c->write_points_nr; wp++) {
1256 writepoint_init(wp, BCH_DATA_user);
1258 wp->last_used = sched_clock();
1259 wp->write_point = (unsigned long) wp;
1260 hlist_add_head_rcu(&wp->node,
1261 writepoint_hash(c, wp->write_point));
1265 void bch2_open_buckets_to_text(struct printbuf *out, struct bch_fs *c)
1267 struct open_bucket *ob;
1269 for (ob = c->open_buckets;
1270 ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
1272 spin_lock(&ob->lock);
1273 if (ob->valid && !ob->on_partial_list) {
1274 prt_printf(out, "%zu ref %u type %s %u:%llu:%u\n",
1275 ob - c->open_buckets,
1276 atomic_read(&ob->pin),
1277 bch2_data_types[ob->data_type],
1278 ob->dev, ob->bucket, ob->gen);
1280 spin_unlock(&ob->lock);