2 * Primary bucket allocation code
4 * Copyright 2012 Google, Inc.
6 * Allocation in bcache is done in terms of buckets:
8 * Each bucket has associated an 8 bit gen; this gen corresponds to the gen in
9 * btree pointers - they must match for the pointer to be considered valid.
11 * Thus (assuming a bucket has no dirty data or metadata in it) we can reuse a
12 * bucket simply by incrementing its gen.
14 * The gens (along with the priorities; it's really the gens are important but
15 * the code is named as if it's the priorities) are written in an arbitrary list
16 * of buckets on disk, with a pointer to them in the journal header.
18 * When we invalidate a bucket, we have to write its new gen to disk and wait
19 * for that write to complete before we use it - otherwise after a crash we
20 * could have pointers that appeared to be good but pointed to data that had
23 * Since the gens and priorities are all stored contiguously on disk, we can
24 * batch this up: We fill up the free_inc list with freshly invalidated buckets,
25 * call prio_write(), and when prio_write() finishes we pull buckets off the
26 * free_inc list and optionally discard them.
28 * free_inc isn't the only freelist - if it was, we'd often have to sleep while
29 * priorities and gens were being written before we could allocate. c->free is a
30 * smaller freelist, and buckets on that list are always ready to be used.
32 * If we've got discards enabled, that happens when a bucket moves from the
33 * free_inc list to the free list.
35 * It's important to ensure that gens don't wrap around - with respect to
36 * either the oldest gen in the btree or the gen on disk. This is quite
37 * difficult to do in practice, but we explicitly guard against it anyways - if
38 * a bucket is in danger of wrapping around we simply skip invalidating it that
39 * time around, and we garbage collect or rewrite the priorities sooner than we
40 * would have otherwise.
42 * bch2_bucket_alloc() allocates a single bucket from a specific device.
44 * bch2_bucket_alloc_set() allocates one or more buckets from different devices
45 * in a given filesystem.
47 * invalidate_buckets() drives all the processes described above. It's called
48 * from bch2_bucket_alloc() and a few other places that need to make sure free
51 * invalidate_buckets_(lru|fifo)() find buckets that are available to be
52 * invalidated, and then invalidate them and stick them on the free_inc list -
53 * in either lru or fifo order.
57 #include "alloc_background.h"
58 #include "alloc_foreground.h"
63 #include "disk_groups.h"
66 #include <linux/math64.h>
67 #include <linux/rculist.h>
68 #include <linux/rcupdate.h>
69 #include <trace/events/bcachefs.h>
71 enum bucket_alloc_ret {
74 FREELIST_EMPTY, /* Allocator thread not keeping up */
78 * Open buckets represent a bucket that's currently being allocated from. They
81 * - They track buckets that have been partially allocated, allowing for
82 * sub-bucket sized allocations - they're used by the sector allocator below
84 * - They provide a reference to the buckets they own that mark and sweep GC
85 * can find, until the new allocation has a pointer to it inserted into the
88 * When allocating some space with the sector allocator, the allocation comes
89 * with a reference to an open bucket - the caller is required to put that
90 * reference _after_ doing the index update that makes its allocation reachable.
93 void __bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob)
95 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
97 percpu_down_read_preempt_disable(&c->usage_lock);
100 bch2_mark_alloc_bucket(c, ca, PTR_BUCKET_NR(ca, &ob->ptr),
101 false, gc_pos_alloc(c, ob), 0);
104 spin_unlock(&ob->lock);
105 percpu_up_read_preempt_enable(&c->usage_lock);
107 spin_lock(&c->freelist_lock);
108 ob->freelist = c->open_buckets_freelist;
109 c->open_buckets_freelist = ob - c->open_buckets;
110 c->open_buckets_nr_free++;
111 spin_unlock(&c->freelist_lock);
113 closure_wake_up(&c->open_buckets_wait);
116 static struct open_bucket *bch2_open_bucket_alloc(struct bch_fs *c)
118 struct open_bucket *ob;
120 BUG_ON(!c->open_buckets_freelist || !c->open_buckets_nr_free);
122 ob = c->open_buckets + c->open_buckets_freelist;
123 c->open_buckets_freelist = ob->freelist;
124 atomic_set(&ob->pin, 1);
126 c->open_buckets_nr_free--;
130 static void open_bucket_free_unused(struct bch_fs *c,
131 struct write_point *wp,
132 struct open_bucket *ob)
134 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
136 BUG_ON(ca->open_buckets_partial_nr >=
137 ARRAY_SIZE(ca->open_buckets_partial));
139 if (wp->type == BCH_DATA_USER) {
140 spin_lock(&c->freelist_lock);
141 ob->on_partial_list = true;
142 ca->open_buckets_partial[ca->open_buckets_partial_nr++] =
143 ob - c->open_buckets;
144 spin_unlock(&c->freelist_lock);
146 closure_wake_up(&c->open_buckets_wait);
147 closure_wake_up(&c->freelist_wait);
149 bch2_open_bucket_put(c, ob);
153 static void verify_not_stale(struct bch_fs *c, const struct open_buckets *obs)
155 #ifdef CONFIG_BCACHEFS_DEBUG
156 struct open_bucket *ob;
159 open_bucket_for_each(c, obs, ob, i) {
160 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
162 BUG_ON(ptr_stale(ca, &ob->ptr));
167 /* _only_ for allocating the journal on a new device: */
168 long bch2_bucket_alloc_new_fs(struct bch_dev *ca)
170 struct bucket_array *buckets;
174 buckets = bucket_array(ca);
176 for (b = ca->mi.first_bucket; b < ca->mi.nbuckets; b++)
177 if (is_available_bucket(buckets->b[b].mark))
185 static inline unsigned open_buckets_reserved(enum alloc_reserve reserve)
191 return BTREE_NODE_RESERVE / 2;
193 return BTREE_NODE_RESERVE;
198 * bch_bucket_alloc - allocate a single bucket from a specific device
200 * Returns index of bucket on success, 0 on failure
202 struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca,
203 enum alloc_reserve reserve,
204 bool may_alloc_partial,
207 struct bucket_array *buckets;
208 struct open_bucket *ob;
211 spin_lock(&c->freelist_lock);
213 if (may_alloc_partial &&
214 ca->open_buckets_partial_nr) {
215 ob = c->open_buckets +
216 ca->open_buckets_partial[--ca->open_buckets_partial_nr];
217 ob->on_partial_list = false;
218 spin_unlock(&c->freelist_lock);
222 if (unlikely(c->open_buckets_nr_free <= open_buckets_reserved(reserve))) {
224 closure_wait(&c->open_buckets_wait, cl);
225 spin_unlock(&c->freelist_lock);
226 trace_open_bucket_alloc_fail(ca, reserve);
227 return ERR_PTR(-OPEN_BUCKETS_EMPTY);
230 if (likely(fifo_pop(&ca->free[RESERVE_NONE], bucket)))
235 if (fifo_pop(&ca->free[RESERVE_BTREE], bucket))
239 if (fifo_used(&ca->free[RESERVE_BTREE]) * 2 >=
240 ca->free[RESERVE_BTREE].size &&
241 fifo_pop(&ca->free[RESERVE_BTREE], bucket))
244 case RESERVE_MOVINGGC:
245 if (fifo_pop(&ca->free[RESERVE_MOVINGGC], bucket))
253 closure_wait(&c->freelist_wait, cl);
255 spin_unlock(&c->freelist_lock);
257 trace_bucket_alloc_fail(ca, reserve);
258 return ERR_PTR(-FREELIST_EMPTY);
260 verify_not_on_freelist(c, ca, bucket);
262 ob = bch2_open_bucket_alloc(c);
264 spin_lock(&ob->lock);
265 buckets = bucket_array(ca);
268 ob->sectors_free = ca->mi.bucket_size;
269 ob->ptr = (struct bch_extent_ptr) {
270 .gen = buckets->b[bucket].mark.gen,
271 .offset = bucket_to_sector(ca, bucket),
275 bucket_io_clock_reset(c, ca, bucket, READ);
276 bucket_io_clock_reset(c, ca, bucket, WRITE);
277 spin_unlock(&ob->lock);
279 spin_unlock(&c->freelist_lock);
281 bch2_wake_allocator(ca);
283 trace_bucket_alloc(ca, reserve);
287 static int __dev_alloc_cmp(struct write_point *wp,
288 unsigned l, unsigned r)
290 return ((wp->next_alloc[l] > wp->next_alloc[r]) -
291 (wp->next_alloc[l] < wp->next_alloc[r]));
294 #define dev_alloc_cmp(l, r) __dev_alloc_cmp(wp, l, r)
296 struct dev_alloc_list bch2_wp_alloc_list(struct bch_fs *c,
297 struct write_point *wp,
298 struct bch_devs_mask *devs)
300 struct dev_alloc_list ret = { .nr = 0 };
304 for_each_member_device_rcu(ca, c, i, devs)
305 ret.devs[ret.nr++] = i;
307 bubble_sort(ret.devs, ret.nr, dev_alloc_cmp);
311 void bch2_wp_rescale(struct bch_fs *c, struct bch_dev *ca,
312 struct write_point *wp)
314 u64 *v = wp->next_alloc + ca->dev_idx;
315 u64 free_space = dev_buckets_free(c, ca);
316 u64 free_space_inv = free_space
317 ? div64_u64(1ULL << 48, free_space)
321 if (*v + free_space_inv >= *v)
322 *v += free_space_inv;
326 for (v = wp->next_alloc;
327 v < wp->next_alloc + ARRAY_SIZE(wp->next_alloc); v++)
328 *v = *v < scale ? 0 : *v - scale;
331 static int bch2_bucket_alloc_set(struct bch_fs *c,
332 struct open_buckets *ptrs,
333 struct write_point *wp,
334 struct bch_devs_mask *devs_may_alloc,
335 unsigned nr_replicas,
336 unsigned *nr_effective,
338 enum alloc_reserve reserve,
341 struct dev_alloc_list devs_sorted =
342 bch2_wp_alloc_list(c, wp, devs_may_alloc);
344 bool alloc_failure = false;
347 BUG_ON(*nr_effective >= nr_replicas);
349 for (i = 0; i < devs_sorted.nr; i++) {
350 struct open_bucket *ob;
352 ca = rcu_dereference(c->devs[devs_sorted.devs[i]]);
356 if (!ca->mi.durability &&
358 wp->type != BCH_DATA_USER))
361 ob = bch2_bucket_alloc(c, ca, reserve,
362 wp->type == BCH_DATA_USER, cl);
364 enum bucket_alloc_ret ret = -PTR_ERR(ob);
366 WARN_ON(reserve == RESERVE_MOVINGGC &&
367 ret != OPEN_BUCKETS_EMPTY);
371 if (ret == OPEN_BUCKETS_EMPTY)
373 alloc_failure = true;
377 __clear_bit(ca->dev_idx, devs_may_alloc->d);
378 *nr_effective += ca->mi.durability;
379 *have_cache |= !ca->mi.durability;
381 ob_push(c, ptrs, ob);
383 bch2_wp_rescale(c, ca, wp);
385 if (*nr_effective >= nr_replicas)
389 return alloc_failure ? -ENOSPC : -EROFS;
392 /* Sector allocator */
394 static int get_buckets_from_writepoint(struct bch_fs *c,
395 struct open_buckets *ptrs,
396 struct write_point *wp,
397 struct bch_devs_mask *devs_may_alloc,
398 unsigned nr_replicas,
399 unsigned *nr_effective,
402 struct open_buckets ptrs_skip = { .nr = 0 };
403 struct open_bucket *ob;
406 open_bucket_for_each(c, &wp->ptrs, ob, i) {
407 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
409 if (*nr_effective < nr_replicas &&
410 test_bit(ob->ptr.dev, devs_may_alloc->d) &&
411 (ca->mi.durability ||
412 (wp->type == BCH_DATA_USER && !*have_cache))) {
413 __clear_bit(ob->ptr.dev, devs_may_alloc->d);
414 *nr_effective += ca->mi.durability;
415 *have_cache |= !ca->mi.durability;
417 ob_push(c, ptrs, ob);
419 ob_push(c, &ptrs_skip, ob);
422 wp->ptrs = ptrs_skip;
424 return *nr_effective < nr_replicas ? -ENOSPC : 0;
427 static int open_bucket_add_buckets(struct bch_fs *c,
428 struct open_buckets *ptrs,
429 struct write_point *wp,
430 struct bch_devs_list *devs_have,
432 unsigned nr_replicas,
433 unsigned *nr_effective,
435 enum alloc_reserve reserve,
438 struct bch_devs_mask devs;
439 const struct bch_devs_mask *t;
440 struct open_bucket *ob;
444 percpu_down_read_preempt_disable(&c->usage_lock);
447 devs = c->rw_devs[wp->type];
449 /* Don't allocate from devices we already have pointers to: */
450 for (i = 0; i < devs_have->nr; i++)
451 __clear_bit(devs_have->devs[i], devs.d);
453 open_bucket_for_each(c, ptrs, ob, i)
454 __clear_bit(ob->ptr.dev, devs.d);
456 t = bch2_target_to_mask(c, target);
458 bitmap_and(devs.d, devs.d, t->d, BCH_SB_MEMBERS_MAX);
460 ret = get_buckets_from_writepoint(c, ptrs, wp, &devs,
461 nr_replicas, nr_effective, have_cache);
466 * Try nonblocking first, so that if one device is full we'll try from
469 ret = bch2_bucket_alloc_set(c, ptrs, wp, &devs,
470 nr_replicas, nr_effective, have_cache,
472 if (!ret || ret == -EROFS || !cl)
475 ret = bch2_bucket_alloc_set(c, ptrs, wp, &devs,
476 nr_replicas, nr_effective, have_cache,
480 percpu_up_read_preempt_enable(&c->usage_lock);
485 void bch2_writepoint_stop(struct bch_fs *c, struct bch_dev *ca,
486 struct write_point *wp)
488 struct open_buckets ptrs = { .nr = 0 };
489 struct open_bucket *ob;
492 mutex_lock(&wp->lock);
493 open_bucket_for_each(c, &wp->ptrs, ob, i)
494 if (!ca || ob->ptr.dev == ca->dev_idx)
495 open_bucket_free_unused(c, wp, ob);
497 ob_push(c, &ptrs, ob);
500 mutex_unlock(&wp->lock);
503 static inline struct hlist_head *writepoint_hash(struct bch_fs *c,
504 unsigned long write_point)
507 hash_long(write_point, ilog2(ARRAY_SIZE(c->write_points_hash)));
509 return &c->write_points_hash[hash];
512 static struct write_point *__writepoint_find(struct hlist_head *head,
513 unsigned long write_point)
515 struct write_point *wp;
517 hlist_for_each_entry_rcu(wp, head, node)
518 if (wp->write_point == write_point)
524 static inline bool too_many_writepoints(struct bch_fs *c, unsigned factor)
526 u64 stranded = c->write_points_nr * c->bucket_size_max;
527 u64 free = bch2_fs_sectors_free(c, bch2_fs_usage_read(c));
529 return stranded * factor > free;
532 static bool try_increase_writepoints(struct bch_fs *c)
534 struct write_point *wp;
536 if (c->write_points_nr == ARRAY_SIZE(c->write_points) ||
537 too_many_writepoints(c, 32))
540 wp = c->write_points + c->write_points_nr++;
541 hlist_add_head_rcu(&wp->node, writepoint_hash(c, wp->write_point));
545 static bool try_decrease_writepoints(struct bch_fs *c,
548 struct write_point *wp;
550 mutex_lock(&c->write_points_hash_lock);
551 if (c->write_points_nr < old_nr) {
552 mutex_unlock(&c->write_points_hash_lock);
556 if (c->write_points_nr == 1 ||
557 !too_many_writepoints(c, 8)) {
558 mutex_unlock(&c->write_points_hash_lock);
562 wp = c->write_points + --c->write_points_nr;
564 hlist_del_rcu(&wp->node);
565 mutex_unlock(&c->write_points_hash_lock);
567 bch2_writepoint_stop(c, NULL, wp);
571 static struct write_point *writepoint_find(struct bch_fs *c,
572 unsigned long write_point)
574 struct write_point *wp, *oldest;
575 struct hlist_head *head;
577 if (!(write_point & 1UL)) {
578 wp = (struct write_point *) write_point;
579 mutex_lock(&wp->lock);
583 head = writepoint_hash(c, write_point);
585 wp = __writepoint_find(head, write_point);
588 mutex_lock(&wp->lock);
589 if (wp->write_point == write_point)
591 mutex_unlock(&wp->lock);
596 for (wp = c->write_points;
597 wp < c->write_points + c->write_points_nr; wp++)
598 if (!oldest || time_before64(wp->last_used, oldest->last_used))
601 mutex_lock(&oldest->lock);
602 mutex_lock(&c->write_points_hash_lock);
603 if (oldest >= c->write_points + c->write_points_nr ||
604 try_increase_writepoints(c)) {
605 mutex_unlock(&c->write_points_hash_lock);
606 mutex_unlock(&oldest->lock);
607 goto restart_find_oldest;
610 wp = __writepoint_find(head, write_point);
611 if (wp && wp != oldest) {
612 mutex_unlock(&c->write_points_hash_lock);
613 mutex_unlock(&oldest->lock);
618 hlist_del_rcu(&wp->node);
619 wp->write_point = write_point;
620 hlist_add_head_rcu(&wp->node, head);
621 mutex_unlock(&c->write_points_hash_lock);
623 wp->last_used = sched_clock();
628 * Get us an open_bucket we can allocate from, return with it locked:
630 struct write_point *bch2_alloc_sectors_start(struct bch_fs *c,
632 struct write_point_specifier write_point,
633 struct bch_devs_list *devs_have,
634 unsigned nr_replicas,
635 unsigned nr_replicas_required,
636 enum alloc_reserve reserve,
640 struct write_point *wp;
641 struct open_bucket *ob;
642 unsigned nr_effective = 0;
643 struct open_buckets ptrs = { .nr = 0 };
644 bool have_cache = false;
645 unsigned write_points_nr;
648 BUG_ON(!nr_replicas || !nr_replicas_required);
650 write_points_nr = c->write_points_nr;
651 wp = writepoint_find(c, write_point.v);
653 if (!target || (flags & BCH_WRITE_ONLY_SPECIFIED_DEVS)) {
654 ret = open_bucket_add_buckets(c, &ptrs, wp, devs_have, target,
655 nr_replicas, &nr_effective,
656 &have_cache, reserve, cl);
658 ret = open_bucket_add_buckets(c, &ptrs, wp, devs_have, target,
659 nr_replicas, &nr_effective,
660 &have_cache, reserve, NULL);
664 ret = open_bucket_add_buckets(c, &ptrs, wp, devs_have, 0,
665 nr_replicas, &nr_effective,
666 &have_cache, reserve, cl);
669 BUG_ON(!ret && nr_effective < nr_replicas);
672 nr_effective >= nr_replicas_required)
678 /* Free buckets we didn't use: */
679 open_bucket_for_each(c, &wp->ptrs, ob, i)
680 open_bucket_free_unused(c, wp, ob);
684 wp->sectors_free = UINT_MAX;
686 open_bucket_for_each(c, &wp->ptrs, ob, i)
687 wp->sectors_free = min(wp->sectors_free, ob->sectors_free);
689 BUG_ON(!wp->sectors_free || wp->sectors_free == UINT_MAX);
691 verify_not_stale(c, &wp->ptrs);
695 open_bucket_for_each(c, &wp->ptrs, ob, i)
696 if (ptrs.nr < ARRAY_SIZE(ptrs.v))
697 ob_push(c, &ptrs, ob);
699 open_bucket_free_unused(c, wp, ob);
702 mutex_unlock(&wp->lock);
704 if (ret == -ENOSPC &&
705 try_decrease_writepoints(c, write_points_nr))
712 * Append pointers to the space we just allocated to @k, and mark @sectors space
713 * as allocated out of @ob
715 void bch2_alloc_sectors_append_ptrs(struct bch_fs *c, struct write_point *wp,
716 struct bkey_i_extent *e, unsigned sectors)
718 struct open_bucket *ob;
721 BUG_ON(sectors > wp->sectors_free);
722 wp->sectors_free -= sectors;
724 open_bucket_for_each(c, &wp->ptrs, ob, i) {
725 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
726 struct bch_extent_ptr tmp = ob->ptr;
728 EBUG_ON(bch2_extent_has_device(extent_i_to_s_c(e), ob->ptr.dev));
730 tmp.cached = bkey_extent_is_cached(&e->k) ||
731 (!ca->mi.durability && wp->type == BCH_DATA_USER);
733 tmp.offset += ca->mi.bucket_size - ob->sectors_free;
734 extent_ptr_append(e, tmp);
736 BUG_ON(sectors > ob->sectors_free);
737 ob->sectors_free -= sectors;
742 * Append pointers to the space we just allocated to @k, and mark @sectors space
743 * as allocated out of @ob
745 void bch2_alloc_sectors_done(struct bch_fs *c, struct write_point *wp)
747 struct open_buckets ptrs = { .nr = 0 }, keep = { .nr = 0 };
748 struct open_bucket *ob;
751 open_bucket_for_each(c, &wp->ptrs, ob, i)
752 ob_push(c, !ob->sectors_free ? &ptrs : &keep, ob);
755 mutex_unlock(&wp->lock);
757 bch2_open_buckets_put(c, &ptrs);
760 void bch2_fs_allocator_foreground_init(struct bch_fs *c)
762 struct open_bucket *ob;
763 struct write_point *wp;
765 mutex_init(&c->write_points_hash_lock);
766 c->write_points_nr = ARRAY_SIZE(c->write_points);
768 /* open bucket 0 is a sentinal NULL: */
769 spin_lock_init(&c->open_buckets[0].lock);
771 for (ob = c->open_buckets + 1;
772 ob < c->open_buckets + ARRAY_SIZE(c->open_buckets); ob++) {
773 spin_lock_init(&ob->lock);
774 c->open_buckets_nr_free++;
776 ob->freelist = c->open_buckets_freelist;
777 c->open_buckets_freelist = ob - c->open_buckets;
780 writepoint_init(&c->btree_write_point, BCH_DATA_BTREE);
781 writepoint_init(&c->rebalance_write_point, BCH_DATA_USER);
783 for (wp = c->write_points;
784 wp < c->write_points + c->write_points_nr; wp++) {
785 writepoint_init(wp, BCH_DATA_USER);
787 wp->last_used = sched_clock();
788 wp->write_point = (unsigned long) wp;
789 hlist_add_head_rcu(&wp->node,
790 writepoint_hash(c, wp->write_point));