2 * Primary bucket allocation code
4 * Copyright 2012 Google, Inc.
6 * Allocation in bcache is done in terms of buckets:
8 * Each bucket has associated an 8 bit gen; this gen corresponds to the gen in
9 * btree pointers - they must match for the pointer to be considered valid.
11 * Thus (assuming a bucket has no dirty data or metadata in it) we can reuse a
12 * bucket simply by incrementing its gen.
14 * The gens (along with the priorities; it's really the gens are important but
15 * the code is named as if it's the priorities) are written in an arbitrary list
16 * of buckets on disk, with a pointer to them in the journal header.
18 * When we invalidate a bucket, we have to write its new gen to disk and wait
19 * for that write to complete before we use it - otherwise after a crash we
20 * could have pointers that appeared to be good but pointed to data that had
23 * Since the gens and priorities are all stored contiguously on disk, we can
24 * batch this up: We fill up the free_inc list with freshly invalidated buckets,
25 * call prio_write(), and when prio_write() finishes we pull buckets off the
26 * free_inc list and optionally discard them.
28 * free_inc isn't the only freelist - if it was, we'd often have to sleep while
29 * priorities and gens were being written before we could allocate. c->free is a
30 * smaller freelist, and buckets on that list are always ready to be used.
32 * If we've got discards enabled, that happens when a bucket moves from the
33 * free_inc list to the free list.
35 * It's important to ensure that gens don't wrap around - with respect to
36 * either the oldest gen in the btree or the gen on disk. This is quite
37 * difficult to do in practice, but we explicitly guard against it anyways - if
38 * a bucket is in danger of wrapping around we simply skip invalidating it that
39 * time around, and we garbage collect or rewrite the priorities sooner than we
40 * would have otherwise.
42 * bch2_bucket_alloc() allocates a single bucket from a specific device.
44 * bch2_bucket_alloc_set() allocates one or more buckets from different devices
45 * in a given filesystem.
47 * invalidate_buckets() drives all the processes described above. It's called
48 * from bch2_bucket_alloc() and a few other places that need to make sure free
51 * invalidate_buckets_(lru|fifo)() find buckets that are available to be
52 * invalidated, and then invalidate them and stick them on the free_inc list -
53 * in either lru or fifo order.
58 #include "btree_cache.h"
60 #include "btree_update.h"
72 #include <linux/blkdev.h>
73 #include <linux/kthread.h>
74 #include <linux/math64.h>
75 #include <linux/random.h>
76 #include <linux/rculist.h>
77 #include <linux/rcupdate.h>
78 #include <linux/sched/task.h>
79 #include <linux/sort.h>
80 #include <trace/events/bcachefs.h>
82 static void bch2_recalc_min_prio(struct bch_fs *, struct bch_dev *, int);
84 /* Ratelimiting/PD controllers */
86 static void pd_controllers_update(struct work_struct *work)
88 struct bch_fs *c = container_of(to_delayed_work(work),
90 pd_controllers_update);
94 /* All units are in bytes */
95 u64 faster_tiers_size = 0;
96 u64 faster_tiers_dirty = 0;
98 u64 copygc_can_free = 0;
101 for (i = 0; i < ARRAY_SIZE(c->tiers); i++) {
102 bch2_pd_controller_update(&c->tiers[i].pd,
103 div_u64(faster_tiers_size *
104 c->tiering_percent, 100),
108 for_each_member_device_rcu(ca, c, iter, &c->tiers[i].devs) {
109 struct bch_dev_usage stats = bch2_dev_usage_read(c, ca);
111 u64 size = bucket_to_sector(ca, ca->mi.nbuckets -
112 ca->mi.first_bucket) << 9;
113 u64 dirty = bucket_to_sector(ca,
114 stats.buckets[BCH_DATA_USER]) << 9;
115 u64 free = bucket_to_sector(ca,
116 __dev_buckets_free(ca, stats)) << 9;
118 * Bytes of internal fragmentation, which can be
119 * reclaimed by copy GC
121 s64 fragmented = (bucket_to_sector(ca,
122 stats.buckets[BCH_DATA_USER] +
123 stats.buckets[BCH_DATA_CACHED]) -
124 (stats.sectors[BCH_DATA_USER] +
125 stats.sectors[BCH_DATA_CACHED])) << 9;
127 fragmented = max(0LL, fragmented);
129 bch2_pd_controller_update(&ca->copygc_pd,
130 free, fragmented, -1);
132 faster_tiers_size += size;
133 faster_tiers_dirty += dirty;
135 copygc_can_free += fragmented;
142 * Throttle foreground writes if tier 0 is running out of free buckets,
143 * and either tiering or copygc can free up space.
145 * Target will be small if there isn't any work to do - we don't want to
146 * throttle foreground writes if we currently have all the free space
147 * we're ever going to have.
149 * Otherwise, if there's work to do, try to keep 20% of tier0 available
150 * for foreground writes.
153 copygc_can_free = U64_MAX;
155 schedule_delayed_work(&c->pd_controllers_update,
156 c->pd_controllers_update_seconds * HZ);
159 /* Persistent alloc info: */
161 static unsigned bch_alloc_val_u64s(const struct bch_alloc *a)
163 unsigned bytes = offsetof(struct bch_alloc, data);
165 if (a->fields & (1 << BCH_ALLOC_FIELD_READ_TIME))
167 if (a->fields & (1 << BCH_ALLOC_FIELD_WRITE_TIME))
170 return DIV_ROUND_UP(bytes, sizeof(u64));
173 static const char *bch2_alloc_invalid(const struct bch_fs *c,
176 if (k.k->p.inode >= c->sb.nr_devices ||
177 !c->devs[k.k->p.inode])
178 return "invalid device";
182 struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k);
184 if (bch_alloc_val_u64s(a.v) != bkey_val_u64s(a.k))
185 return "incorrect value size";
189 return "invalid type";
195 static void bch2_alloc_to_text(struct bch_fs *c, char *buf,
196 size_t size, struct bkey_s_c k)
206 const struct bkey_ops bch2_bkey_alloc_ops = {
207 .key_invalid = bch2_alloc_invalid,
208 .val_to_text = bch2_alloc_to_text,
211 static inline unsigned get_alloc_field(const u8 **p, unsigned bytes)
220 v = le16_to_cpup((void *) *p);
223 v = le32_to_cpup((void *) *p);
233 static inline void put_alloc_field(u8 **p, unsigned bytes, unsigned v)
240 *((__le16 *) *p) = cpu_to_le16(v);
243 *((__le32 *) *p) = cpu_to_le32(v);
252 static void bch2_alloc_read_key(struct bch_fs *c, struct bkey_s_c k)
255 struct bkey_s_c_alloc a;
256 struct bucket_mark new;
260 if (k.k->type != BCH_ALLOC)
263 a = bkey_s_c_to_alloc(k);
264 ca = bch_dev_bkey_exists(c, a.k->p.inode);
266 if (a.k->p.offset >= ca->mi.nbuckets)
269 lg_local_lock(&c->usage_lock);
271 g = bucket(ca, a.k->p.offset);
272 bucket_cmpxchg(g, new, ({
278 if (a.v->fields & (1 << BCH_ALLOC_FIELD_READ_TIME))
279 g->prio[READ] = get_alloc_field(&d, 2);
280 if (a.v->fields & (1 << BCH_ALLOC_FIELD_WRITE_TIME))
281 g->prio[WRITE] = get_alloc_field(&d, 2);
283 lg_local_unlock(&c->usage_lock);
286 int bch2_alloc_read(struct bch_fs *c, struct list_head *journal_replay_list)
288 struct journal_replay *r;
289 struct btree_iter iter;
295 for_each_btree_key(&iter, c, BTREE_ID_ALLOC, POS_MIN, 0, k) {
296 bch2_alloc_read_key(c, k);
297 bch2_btree_iter_cond_resched(&iter);
300 ret = bch2_btree_iter_unlock(&iter);
304 list_for_each_entry(r, journal_replay_list, list) {
305 struct bkey_i *k, *n;
306 struct jset_entry *entry;
308 for_each_jset_key(k, n, entry, &r->j)
309 if (entry->btree_id == BTREE_ID_ALLOC)
310 bch2_alloc_read_key(c, bkey_i_to_s_c(k));
313 mutex_lock(&c->prio_clock[READ].lock);
314 for_each_member_device(ca, c, i) {
315 down_read(&ca->bucket_lock);
316 bch2_recalc_min_prio(c, ca, READ);
317 up_read(&ca->bucket_lock);
319 mutex_unlock(&c->prio_clock[READ].lock);
321 mutex_lock(&c->prio_clock[WRITE].lock);
322 for_each_member_device(ca, c, i) {
323 down_read(&ca->bucket_lock);
324 bch2_recalc_min_prio(c, ca, WRITE);
325 up_read(&ca->bucket_lock);
327 mutex_unlock(&c->prio_clock[WRITE].lock);
332 static int __bch2_alloc_write_key(struct bch_fs *c, struct bch_dev *ca,
333 size_t b, struct btree_iter *iter,
336 struct bucket_mark m;
337 __BKEY_PADDED(k, DIV_ROUND_UP(sizeof(struct bch_alloc), 8)) alloc_key;
339 struct bkey_i_alloc *a;
343 bch2_btree_iter_set_pos(iter, POS(ca->dev_idx, b));
346 ret = btree_iter_err(bch2_btree_iter_peek_slot(iter));
350 lg_local_lock(&c->usage_lock);
353 /* read mark under btree node lock: */
354 m = READ_ONCE(g->mark);
355 a = bkey_alloc_init(&alloc_key.k);
359 set_bkey_val_u64s(&a->k, bch_alloc_val_u64s(&a->v));
362 if (a->v.fields & (1 << BCH_ALLOC_FIELD_READ_TIME))
363 put_alloc_field(&d, 2, g->prio[READ]);
364 if (a->v.fields & (1 << BCH_ALLOC_FIELD_WRITE_TIME))
365 put_alloc_field(&d, 2, g->prio[WRITE]);
366 lg_local_unlock(&c->usage_lock);
368 ret = bch2_btree_insert_at(c, NULL, NULL, journal_seq,
371 BTREE_INSERT_USE_RESERVE|
372 BTREE_INSERT_USE_ALLOC_RESERVE|
374 BTREE_INSERT_ENTRY(iter, &a->k_i));
375 bch2_btree_iter_cond_resched(iter);
376 } while (ret == -EINTR);
381 int bch2_alloc_replay_key(struct bch_fs *c, struct bpos pos)
384 struct btree_iter iter;
387 if (pos.inode >= c->sb.nr_devices || !c->devs[pos.inode])
390 ca = bch_dev_bkey_exists(c, pos.inode);
392 if (pos.offset >= ca->mi.nbuckets)
395 bch2_btree_iter_init(&iter, c, BTREE_ID_ALLOC, POS_MIN,
396 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
398 ret = __bch2_alloc_write_key(c, ca, pos.offset, &iter, NULL);
399 bch2_btree_iter_unlock(&iter);
403 int bch2_alloc_write(struct bch_fs *c)
409 for_each_rw_member(ca, c, i) {
410 struct btree_iter iter;
411 unsigned long bucket;
413 bch2_btree_iter_init(&iter, c, BTREE_ID_ALLOC, POS_MIN,
414 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
416 down_read(&ca->bucket_lock);
417 for_each_set_bit(bucket, ca->buckets_dirty, ca->mi.nbuckets) {
418 ret = __bch2_alloc_write_key(c, ca, bucket, &iter, NULL);
422 clear_bit(bucket, ca->buckets_dirty);
424 up_read(&ca->bucket_lock);
425 bch2_btree_iter_unlock(&iter);
428 percpu_ref_put(&ca->io_ref);
436 /* Bucket IO clocks: */
438 static void bch2_recalc_min_prio(struct bch_fs *c, struct bch_dev *ca, int rw)
440 struct prio_clock *clock = &c->prio_clock[rw];
441 struct bucket_array *buckets = bucket_array(ca);
446 lockdep_assert_held(&c->prio_clock[rw].lock);
448 /* Determine min prio for this particular device */
449 for_each_bucket(g, buckets)
450 max_delta = max(max_delta, (u16) (clock->hand - g->prio[rw]));
452 ca->min_prio[rw] = clock->hand - max_delta;
455 * This may possibly increase the min prio for the whole device, check
460 for_each_member_device(ca, c, i)
461 max_delta = max(max_delta,
462 (u16) (clock->hand - ca->min_prio[rw]));
464 clock->min_prio = clock->hand - max_delta;
467 static void bch2_rescale_prios(struct bch_fs *c, int rw)
469 struct prio_clock *clock = &c->prio_clock[rw];
470 struct bucket_array *buckets;
475 trace_rescale_prios(c);
477 for_each_member_device(ca, c, i) {
478 down_read(&ca->bucket_lock);
479 buckets = bucket_array(ca);
481 for_each_bucket(g, buckets)
482 g->prio[rw] = clock->hand -
483 (clock->hand - g->prio[rw]) / 2;
485 bch2_recalc_min_prio(c, ca, rw);
487 up_read(&ca->bucket_lock);
491 static void bch2_inc_clock_hand(struct io_timer *timer)
493 struct prio_clock *clock = container_of(timer,
494 struct prio_clock, rescale);
495 struct bch_fs *c = container_of(clock,
496 struct bch_fs, prio_clock[clock->rw]);
499 mutex_lock(&clock->lock);
503 /* if clock cannot be advanced more, rescale prio */
504 if (clock->hand == (u16) (clock->min_prio - 1))
505 bch2_rescale_prios(c, clock->rw);
507 mutex_unlock(&clock->lock);
509 capacity = READ_ONCE(c->capacity);
515 * we only increment when 0.1% of the filesystem capacity has been read
516 * or written too, this determines if it's time
518 * XXX: we shouldn't really be going off of the capacity of devices in
519 * RW mode (that will be 0 when we're RO, yet we can still service
522 timer->expire += capacity >> 10;
524 bch2_io_timer_add(&c->io_clock[clock->rw], timer);
527 static void bch2_prio_timer_init(struct bch_fs *c, int rw)
529 struct prio_clock *clock = &c->prio_clock[rw];
533 clock->rescale.fn = bch2_inc_clock_hand;
534 clock->rescale.expire = c->capacity >> 10;
535 mutex_init(&clock->lock);
538 /* Background allocator thread: */
541 * Scans for buckets to be invalidated, invalidates them, rewrites prios/gens
542 * (marking them as invalidated on disk), then optionally issues discard
543 * commands to the newly free buckets, then puts them on the various freelists.
546 static void verify_not_on_freelist(struct bch_fs *c, struct bch_dev *ca,
549 if (expensive_debug_checks(c) &&
550 test_bit(BCH_FS_ALLOCATOR_STARTED, &c->flags)) {
555 for (j = 0; j < RESERVE_NR; j++)
556 fifo_for_each_entry(i, &ca->free[j], iter)
558 fifo_for_each_entry(i, &ca->free_inc, iter)
563 #define BUCKET_GC_GEN_MAX 96U
566 * wait_buckets_available - wait on reclaimable buckets
568 * If there aren't enough available buckets to fill up free_inc, wait until
571 static int wait_buckets_available(struct bch_fs *c, struct bch_dev *ca)
573 unsigned long gc_count = c->gc_count;
577 set_current_state(TASK_INTERRUPTIBLE);
578 if (kthread_should_stop()) {
583 if (gc_count != c->gc_count)
584 ca->inc_gen_really_needs_gc = 0;
586 if ((ssize_t) (dev_buckets_available(c, ca) -
587 ca->inc_gen_really_needs_gc) >=
588 (ssize_t) fifo_free(&ca->free_inc))
591 up_read(&c->gc_lock);
594 down_read(&c->gc_lock);
597 __set_current_state(TASK_RUNNING);
601 static bool bch2_can_invalidate_bucket(struct bch_dev *ca,
603 struct bucket_mark mark)
607 if (!is_available_bucket(mark))
610 gc_gen = bucket_gc_gen(ca, bucket);
612 if (gc_gen >= BUCKET_GC_GEN_MAX / 2)
613 ca->inc_gen_needs_gc++;
615 if (gc_gen >= BUCKET_GC_GEN_MAX)
616 ca->inc_gen_really_needs_gc++;
618 return gc_gen < BUCKET_GC_GEN_MAX;
621 static void bch2_invalidate_one_bucket(struct bch_fs *c, struct bch_dev *ca,
624 struct bucket_mark m;
626 spin_lock(&c->freelist_lock);
627 if (!bch2_invalidate_bucket(c, ca, bucket, &m)) {
628 spin_unlock(&c->freelist_lock);
632 verify_not_on_freelist(c, ca, bucket);
633 BUG_ON(!fifo_push(&ca->free_inc, bucket));
634 spin_unlock(&c->freelist_lock);
637 bucket_io_clock_reset(c, ca, bucket, READ);
638 bucket_io_clock_reset(c, ca, bucket, WRITE);
640 if (m.cached_sectors) {
641 ca->allocator_invalidating_data = true;
642 } else if (m.journal_seq_valid) {
643 u64 journal_seq = atomic64_read(&c->journal.seq);
644 u64 bucket_seq = journal_seq;
646 bucket_seq &= ~((u64) U16_MAX);
647 bucket_seq |= m.journal_seq;
649 if (bucket_seq > journal_seq)
650 bucket_seq -= 1 << 16;
652 ca->allocator_journal_seq_flush =
653 max(ca->allocator_journal_seq_flush, bucket_seq);
658 * Determines what order we're going to reuse buckets, smallest bucket_key()
662 * - We take into account the read prio of the bucket, which gives us an
663 * indication of how hot the data is -- we scale the prio so that the prio
664 * farthest from the clock is worth 1/8th of the closest.
666 * - The number of sectors of cached data in the bucket, which gives us an
667 * indication of the cost in cache misses this eviction will cause.
669 * - If hotness * sectors used compares equal, we pick the bucket with the
670 * smallest bucket_gc_gen() - since incrementing the same bucket's generation
671 * number repeatedly forces us to run mark and sweep gc to avoid generation
675 static unsigned long bucket_sort_key(struct bch_fs *c, struct bch_dev *ca,
676 size_t b, struct bucket_mark m)
679 * Time since last read, scaled to [0, 8) where larger value indicates
680 * more recently read data:
682 unsigned long hotness =
683 (bucket(ca, b)->prio[READ] - ca->min_prio[READ]) * 7 /
684 (c->prio_clock[READ].hand - ca->min_prio[READ]);
686 /* How much we want to keep the data in this bucket: */
687 unsigned long data_wantness =
688 (hotness + 1) * bucket_sectors_used(m);
690 unsigned long needs_journal_commit =
691 bucket_needs_journal_commit(m, c->journal.last_seq_ondisk);
693 return (data_wantness << 9) |
694 (needs_journal_commit << 8) |
695 bucket_gc_gen(ca, b);
698 static inline int bucket_alloc_cmp(alloc_heap *h,
699 struct alloc_heap_entry l,
700 struct alloc_heap_entry r)
702 return (l.key > r.key) - (l.key < r.key);
705 static void find_reclaimable_buckets_lru(struct bch_fs *c, struct bch_dev *ca)
707 struct bucket_array *buckets;
708 struct alloc_heap_entry e;
711 ca->alloc_heap.used = 0;
713 mutex_lock(&c->prio_clock[READ].lock);
714 down_read(&ca->bucket_lock);
716 buckets = bucket_array(ca);
718 bch2_recalc_min_prio(c, ca, READ);
721 * Find buckets with lowest read priority, by building a maxheap sorted
722 * by read priority and repeatedly replacing the maximum element until
723 * all buckets have been visited.
725 for (b = ca->mi.first_bucket; b < ca->mi.nbuckets; b++) {
726 struct bucket_mark m = READ_ONCE(buckets->b[b].mark);
728 if (!bch2_can_invalidate_bucket(ca, b, m))
731 e = (struct alloc_heap_entry) {
733 .key = bucket_sort_key(c, ca, b, m)
736 heap_add_or_replace(&ca->alloc_heap, e, -bucket_alloc_cmp);
739 up_read(&ca->bucket_lock);
740 mutex_unlock(&c->prio_clock[READ].lock);
742 heap_resort(&ca->alloc_heap, bucket_alloc_cmp);
745 * If we run out of buckets to invalidate, bch2_allocator_thread() will
746 * kick stuff and retry us
748 while (!fifo_full(&ca->free_inc) &&
749 heap_pop(&ca->alloc_heap, e, bucket_alloc_cmp))
750 bch2_invalidate_one_bucket(c, ca, e.bucket);
753 static void find_reclaimable_buckets_fifo(struct bch_fs *c, struct bch_dev *ca)
755 struct bucket_array *buckets = bucket_array(ca);
756 struct bucket_mark m;
760 checked < ca->mi.nbuckets && !fifo_full(&ca->free_inc);
762 if (ca->fifo_last_bucket < ca->mi.first_bucket ||
763 ca->fifo_last_bucket >= ca->mi.nbuckets)
764 ca->fifo_last_bucket = ca->mi.first_bucket;
766 b = ca->fifo_last_bucket++;
768 m = READ_ONCE(buckets->b[b].mark);
770 if (bch2_can_invalidate_bucket(ca, b, m))
771 bch2_invalidate_one_bucket(c, ca, b);
775 static void find_reclaimable_buckets_random(struct bch_fs *c, struct bch_dev *ca)
777 struct bucket_array *buckets = bucket_array(ca);
778 struct bucket_mark m;
782 checked < ca->mi.nbuckets / 2 && !fifo_full(&ca->free_inc);
784 size_t b = bch2_rand_range(ca->mi.nbuckets -
785 ca->mi.first_bucket) +
788 m = READ_ONCE(buckets->b[b].mark);
790 if (bch2_can_invalidate_bucket(ca, b, m))
791 bch2_invalidate_one_bucket(c, ca, b);
795 static void find_reclaimable_buckets(struct bch_fs *c, struct bch_dev *ca)
797 ca->inc_gen_needs_gc = 0;
798 ca->inc_gen_really_needs_gc = 0;
800 switch (ca->mi.replacement) {
801 case CACHE_REPLACEMENT_LRU:
802 find_reclaimable_buckets_lru(c, ca);
804 case CACHE_REPLACEMENT_FIFO:
805 find_reclaimable_buckets_fifo(c, ca);
807 case CACHE_REPLACEMENT_RANDOM:
808 find_reclaimable_buckets_random(c, ca);
813 static int size_t_cmp(const void *_l, const void *_r)
815 const size_t *l = _l, *r = _r;
817 return (*l > *r) - (*l < *r);
820 static void sort_free_inc(struct bch_fs *c, struct bch_dev *ca)
822 BUG_ON(ca->free_inc.front);
824 spin_lock(&c->freelist_lock);
825 sort(ca->free_inc.data,
827 sizeof(ca->free_inc.data[0]),
829 spin_unlock(&c->freelist_lock);
832 static int bch2_invalidate_free_inc(struct bch_fs *c, struct bch_dev *ca,
833 u64 *journal_seq, size_t nr)
835 struct btree_iter iter;
838 bch2_btree_iter_init(&iter, c, BTREE_ID_ALLOC, POS(ca->dev_idx, 0),
839 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
842 * XXX: if ca->nr_invalidated != 0, just return if we'd block doing the
843 * btree update or journal_res_get
845 while (ca->nr_invalidated < min(nr, fifo_used(&ca->free_inc))) {
846 size_t b = fifo_idx_entry(&ca->free_inc, ca->nr_invalidated);
848 ret = __bch2_alloc_write_key(c, ca, b, &iter, journal_seq);
852 ca->nr_invalidated++;
855 bch2_btree_iter_unlock(&iter);
859 static bool __push_invalidated_bucket(struct bch_fs *c, struct bch_dev *ca, size_t bucket)
864 * Don't remove from free_inc until after it's added to
865 * freelist, so gc can find it:
867 spin_lock(&c->freelist_lock);
868 for (i = 0; i < RESERVE_NR; i++)
869 if (fifo_push(&ca->free[i], bucket)) {
870 fifo_pop(&ca->free_inc, bucket);
871 --ca->nr_invalidated;
872 closure_wake_up(&c->freelist_wait);
873 spin_unlock(&c->freelist_lock);
876 spin_unlock(&c->freelist_lock);
881 static int push_invalidated_bucket(struct bch_fs *c, struct bch_dev *ca, size_t bucket)
886 set_current_state(TASK_INTERRUPTIBLE);
888 if (__push_invalidated_bucket(c, ca, bucket))
891 if ((current->flags & PF_KTHREAD) &&
892 kthread_should_stop()) {
901 __set_current_state(TASK_RUNNING);
906 * Given an invalidated, ready to use bucket: issue a discard to it if enabled,
907 * then add it to the freelist, waiting until there's room if necessary:
909 static int discard_invalidated_buckets(struct bch_fs *c, struct bch_dev *ca)
911 while (ca->nr_invalidated) {
912 size_t bucket = fifo_peek(&ca->free_inc);
914 BUG_ON(fifo_empty(&ca->free_inc) || !ca->nr_invalidated);
916 if (ca->mi.discard &&
917 blk_queue_discard(bdev_get_queue(ca->disk_sb.bdev)))
918 blkdev_issue_discard(ca->disk_sb.bdev,
919 bucket_to_sector(ca, bucket),
920 ca->mi.bucket_size, GFP_NOIO, 0);
922 if (push_invalidated_bucket(c, ca, bucket))
930 * bch_allocator_thread - move buckets from free_inc to reserves
932 * The free_inc FIFO is populated by find_reclaimable_buckets(), and
933 * the reserves are depleted by bucket allocation. When we run out
934 * of free_inc, try to invalidate some buckets and write out
937 static int bch2_allocator_thread(void *arg)
939 struct bch_dev *ca = arg;
940 struct bch_fs *c = ca->fs;
948 ret = discard_invalidated_buckets(c, ca);
952 if (fifo_empty(&ca->free_inc))
956 ret = bch2_invalidate_free_inc(c, ca, &journal_seq, SIZE_MAX);
960 if (ca->allocator_invalidating_data)
961 ret = bch2_journal_flush_seq(&c->journal, journal_seq);
962 else if (ca->allocator_journal_seq_flush)
963 ret = bch2_journal_flush_seq(&c->journal,
964 ca->allocator_journal_seq_flush);
967 * journal error - buckets haven't actually been
968 * invalidated, can't discard them:
974 /* Reset front/back so we can easily sort fifo entries later: */
975 ca->free_inc.front = ca->free_inc.back = 0;
976 ca->allocator_journal_seq_flush = 0;
977 ca->allocator_invalidating_data = false;
979 down_read(&c->gc_lock);
980 if (test_bit(BCH_FS_GC_FAILURE, &c->flags)) {
981 up_read(&c->gc_lock);
987 * Find some buckets that we can invalidate, either
988 * they're completely unused, or only contain clean data
989 * that's been written back to the backing device or
993 find_reclaimable_buckets(c, ca);
994 trace_alloc_batch(ca, fifo_used(&ca->free_inc),
997 if ((ca->inc_gen_needs_gc >= ca->free_inc.size ||
998 (!fifo_full(&ca->free_inc) &&
999 ca->inc_gen_really_needs_gc >=
1000 fifo_free(&ca->free_inc))) &&
1002 atomic_inc(&c->kick_gc);
1003 wake_up_process(c->gc_thread);
1006 if (fifo_full(&ca->free_inc))
1009 if (wait_buckets_available(c, ca)) {
1010 up_read(&c->gc_lock);
1014 up_read(&c->gc_lock);
1016 sort_free_inc(c, ca);
1019 * free_inc is now full of newly-invalidated buckets: next,
1020 * write out the new bucket gens:
1028 * Open buckets represent a bucket that's currently being allocated from. They
1029 * serve two purposes:
1031 * - They track buckets that have been partially allocated, allowing for
1032 * sub-bucket sized allocations - they're used by the sector allocator below
1034 * - They provide a reference to the buckets they own that mark and sweep GC
1035 * can find, until the new allocation has a pointer to it inserted into the
1038 * When allocating some space with the sector allocator, the allocation comes
1039 * with a reference to an open bucket - the caller is required to put that
1040 * reference _after_ doing the index update that makes its allocation reachable.
1043 void __bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob)
1045 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
1047 spin_lock(&ob->lock);
1048 bch2_mark_alloc_bucket(c, ca, PTR_BUCKET_NR(ca, &ob->ptr),
1049 false, gc_pos_alloc(c, ob), 0);
1051 spin_unlock(&ob->lock);
1053 spin_lock(&c->freelist_lock);
1054 ob->freelist = c->open_buckets_freelist;
1055 c->open_buckets_freelist = ob - c->open_buckets;
1056 c->open_buckets_nr_free++;
1057 spin_unlock(&c->freelist_lock);
1059 closure_wake_up(&c->open_buckets_wait);
1062 static struct open_bucket *bch2_open_bucket_alloc(struct bch_fs *c)
1064 struct open_bucket *ob;
1066 BUG_ON(!c->open_buckets_freelist || !c->open_buckets_nr_free);
1068 ob = c->open_buckets + c->open_buckets_freelist;
1069 c->open_buckets_freelist = ob->freelist;
1070 atomic_set(&ob->pin, 1);
1072 c->open_buckets_nr_free--;
1076 /* _only_ for allocating the journal and btree roots on a brand new fs: */
1077 int bch2_bucket_alloc_startup(struct bch_fs *c, struct bch_dev *ca)
1079 struct bucket_array *buckets;
1083 buckets = bucket_array(ca);
1085 for (b = ca->mi.first_bucket; b < ca->mi.nbuckets; b++)
1086 if (is_available_bucket(buckets->b[b].mark)) {
1087 bch2_mark_alloc_bucket(c, ca, b, true,
1088 gc_pos_alloc(c, NULL),
1089 BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE|
1090 BCH_BUCKET_MARK_GC_LOCK_HELD);
1091 set_bit(b, ca->buckets_dirty);
1100 static inline unsigned open_buckets_reserved(enum alloc_reserve reserve)
1106 return BTREE_NODE_RESERVE / 2;
1108 return BTREE_NODE_RESERVE;
1113 * bch_bucket_alloc - allocate a single bucket from a specific device
1115 * Returns index of bucket on success, 0 on failure
1117 int bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca,
1118 enum alloc_reserve reserve,
1119 bool may_alloc_partial,
1122 struct bucket_array *buckets;
1123 struct open_bucket *ob;
1126 spin_lock(&c->freelist_lock);
1127 if (may_alloc_partial &&
1128 ca->open_buckets_partial_nr) {
1129 int ret = ca->open_buckets_partial[--ca->open_buckets_partial_nr];
1130 c->open_buckets[ret].on_partial_list = false;
1131 spin_unlock(&c->freelist_lock);
1135 if (unlikely(c->open_buckets_nr_free <= open_buckets_reserved(reserve))) {
1137 closure_wait(&c->open_buckets_wait, cl);
1138 spin_unlock(&c->freelist_lock);
1139 trace_open_bucket_alloc_fail(ca, reserve);
1140 return OPEN_BUCKETS_EMPTY;
1143 if (likely(fifo_pop(&ca->free[RESERVE_NONE], bucket)))
1148 if (fifo_pop(&ca->free[RESERVE_BTREE], bucket))
1152 if (fifo_used(&ca->free[RESERVE_BTREE]) * 2 >=
1153 ca->free[RESERVE_BTREE].size &&
1154 fifo_pop(&ca->free[RESERVE_BTREE], bucket))
1157 case RESERVE_MOVINGGC:
1158 if (fifo_pop(&ca->free[RESERVE_MOVINGGC], bucket))
1165 if (unlikely(test_bit(BCH_FS_BRAND_NEW_FS, &c->flags)) &&
1166 (bucket = bch2_bucket_alloc_startup(c, ca)) >= 0)
1169 spin_unlock(&c->freelist_lock);
1171 trace_bucket_alloc_fail(ca, reserve);
1172 return FREELIST_EMPTY;
1174 verify_not_on_freelist(c, ca, bucket);
1176 ob = bch2_open_bucket_alloc(c);
1178 spin_lock(&ob->lock);
1179 lg_local_lock(&c->usage_lock);
1180 buckets = bucket_array(ca);
1183 ob->sectors_free = ca->mi.bucket_size;
1184 ob->ptr = (struct bch_extent_ptr) {
1185 .gen = buckets->b[bucket].mark.gen,
1186 .offset = bucket_to_sector(ca, bucket),
1190 bucket_io_clock_reset(c, ca, bucket, READ);
1191 bucket_io_clock_reset(c, ca, bucket, WRITE);
1193 lg_local_unlock(&c->usage_lock);
1194 spin_unlock(&ob->lock);
1196 spin_unlock(&c->freelist_lock);
1198 bch2_wake_allocator(ca);
1200 trace_bucket_alloc(ca, reserve);
1201 return ob - c->open_buckets;
1204 struct dev_alloc_list bch2_wp_alloc_list(struct bch_fs *c,
1205 struct write_point *wp,
1206 struct bch_devs_mask *devs)
1208 struct dev_alloc_list ret = { .nr = 0 };
1209 struct bch_dev *ca, *ca2;
1212 for_each_member_device_rcu(ca, c, i, devs) {
1213 for (j = 0; j < ret.nr; j++) {
1214 unsigned idx = ret.devs[j];
1216 ca2 = rcu_dereference(c->devs[idx]);
1220 if (ca->mi.tier < ca2->mi.tier)
1223 if (ca->mi.tier == ca2->mi.tier &&
1224 wp->next_alloc[i] < wp->next_alloc[idx])
1228 array_insert_item(ret.devs, ret.nr, j, i);
1234 void bch2_wp_rescale(struct bch_fs *c, struct bch_dev *ca,
1235 struct write_point *wp)
1239 for (i = 0; i < ARRAY_SIZE(wp->next_alloc); i++)
1240 wp->next_alloc[i] >>= 1;
1243 static enum bucket_alloc_ret __bch2_bucket_alloc_set(struct bch_fs *c,
1244 struct write_point *wp,
1245 unsigned nr_replicas,
1246 enum alloc_reserve reserve,
1247 struct bch_devs_mask *devs,
1250 enum bucket_alloc_ret ret = NO_DEVICES;
1251 struct dev_alloc_list devs_sorted;
1255 BUG_ON(nr_replicas > ARRAY_SIZE(wp->ptrs));
1257 if (wp->nr_ptrs >= nr_replicas)
1258 return ALLOC_SUCCESS;
1261 devs_sorted = bch2_wp_alloc_list(c, wp, devs);
1263 for (i = 0; i < devs_sorted.nr; i++) {
1264 struct bch_dev *ca =
1265 rcu_dereference(c->devs[devs_sorted.devs[i]]);
1271 ob = bch2_bucket_alloc(c, ca, reserve,
1272 wp->type == BCH_DATA_USER, cl);
1275 if (ret == OPEN_BUCKETS_EMPTY)
1280 BUG_ON(ob <= 0 || ob > U8_MAX);
1281 BUG_ON(wp->nr_ptrs >= ARRAY_SIZE(wp->ptrs));
1282 wp->ptrs[wp->nr_ptrs++] = c->open_buckets + ob;
1284 buckets_free = U64_MAX, dev_buckets_free(c, ca);
1286 wp->next_alloc[ca->dev_idx] +=
1287 div64_u64(U64_MAX, buckets_free *
1288 ca->mi.bucket_size);
1290 wp->next_alloc[ca->dev_idx] = U64_MAX;
1291 bch2_wp_rescale(c, ca, wp);
1293 __clear_bit(ca->dev_idx, devs->d);
1295 if (wp->nr_ptrs == nr_replicas) {
1296 ret = ALLOC_SUCCESS;
1301 EBUG_ON(reserve == RESERVE_MOVINGGC &&
1302 ret != ALLOC_SUCCESS &&
1303 ret != OPEN_BUCKETS_EMPTY);
1308 static int bch2_bucket_alloc_set(struct bch_fs *c, struct write_point *wp,
1309 unsigned nr_replicas,
1310 enum alloc_reserve reserve,
1311 struct bch_devs_mask *devs,
1314 bool waiting = false;
1317 switch (__bch2_bucket_alloc_set(c, wp, nr_replicas,
1318 reserve, devs, cl)) {
1321 closure_wake_up(&c->freelist_wait);
1327 closure_wake_up(&c->freelist_wait);
1330 case FREELIST_EMPTY:
1337 /* Retry allocation after adding ourself to waitlist: */
1338 closure_wait(&c->freelist_wait, cl);
1341 case OPEN_BUCKETS_EMPTY:
1342 return cl ? -EAGAIN : -ENOSPC;
1349 /* Sector allocator */
1351 static void writepoint_drop_ptrs(struct bch_fs *c,
1352 struct write_point *wp,
1353 struct bch_devs_mask *devs,
1354 unsigned nr_ptrs_dislike)
1358 if (!nr_ptrs_dislike)
1361 for (i = wp->nr_ptrs - 1; i >= 0; --i) {
1362 struct open_bucket *ob = wp->ptrs[i];
1363 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
1365 if (nr_ptrs_dislike && !test_bit(ob->ptr.dev, devs->d)) {
1366 BUG_ON(ca->open_buckets_partial_nr >=
1367 ARRAY_SIZE(ca->open_buckets_partial));
1369 spin_lock(&c->freelist_lock);
1370 ob->on_partial_list = true;
1371 ca->open_buckets_partial[ca->open_buckets_partial_nr++] =
1372 ob - c->open_buckets;
1373 spin_unlock(&c->freelist_lock);
1375 closure_wake_up(&c->open_buckets_wait);
1376 closure_wake_up(&c->freelist_wait);
1378 array_remove_item(wp->ptrs, wp->nr_ptrs, i);
1384 static void verify_not_stale(struct bch_fs *c, const struct write_point *wp)
1386 #ifdef CONFIG_BCACHEFS_DEBUG
1387 struct open_bucket *ob;
1390 writepoint_for_each_ptr(wp, ob, i) {
1391 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
1393 BUG_ON(ptr_stale(ca, &ob->ptr));
1398 static int open_bucket_add_buckets(struct bch_fs *c,
1399 struct bch_devs_mask *_devs,
1400 struct write_point *wp,
1401 struct bch_devs_list *devs_have,
1402 unsigned nr_replicas,
1403 enum alloc_reserve reserve,
1406 struct bch_devs_mask devs = c->rw_devs[wp->type];
1407 struct open_bucket *ob;
1410 if (wp->nr_ptrs >= nr_replicas)
1413 /* Don't allocate from devices we already have pointers to: */
1414 for (i = 0; i < devs_have->nr; i++)
1415 __clear_bit(devs_have->devs[i], devs.d);
1417 writepoint_for_each_ptr(wp, ob, i)
1418 __clear_bit(ob->ptr.dev, devs.d);
1421 bitmap_and(devs.d, devs.d, _devs->d, BCH_SB_MEMBERS_MAX);
1423 return bch2_bucket_alloc_set(c, wp, nr_replicas, reserve, &devs, cl);
1426 static struct write_point *__writepoint_find(struct hlist_head *head,
1427 unsigned long write_point)
1429 struct write_point *wp;
1431 hlist_for_each_entry_rcu(wp, head, node)
1432 if (wp->write_point == write_point)
1438 static struct hlist_head *writepoint_hash(struct bch_fs *c,
1439 unsigned long write_point)
1442 hash_long(write_point, ilog2(ARRAY_SIZE(c->write_points_hash)));
1444 return &c->write_points_hash[hash];
1447 static struct write_point *writepoint_find(struct bch_fs *c,
1448 unsigned long write_point)
1450 struct write_point *wp, *oldest;
1451 struct hlist_head *head;
1453 if (!(write_point & 1UL)) {
1454 wp = (struct write_point *) write_point;
1455 mutex_lock(&wp->lock);
1459 head = writepoint_hash(c, write_point);
1461 wp = __writepoint_find(head, write_point);
1464 mutex_lock(&wp->lock);
1465 if (wp->write_point == write_point)
1467 mutex_unlock(&wp->lock);
1472 for (wp = c->write_points;
1473 wp < c->write_points + ARRAY_SIZE(c->write_points);
1475 if (!oldest || time_before64(wp->last_used, oldest->last_used))
1478 mutex_lock(&oldest->lock);
1479 mutex_lock(&c->write_points_hash_lock);
1480 wp = __writepoint_find(head, write_point);
1481 if (wp && wp != oldest) {
1482 mutex_unlock(&c->write_points_hash_lock);
1483 mutex_unlock(&oldest->lock);
1488 hlist_del_rcu(&wp->node);
1489 wp->write_point = write_point;
1490 hlist_add_head_rcu(&wp->node, head);
1491 mutex_unlock(&c->write_points_hash_lock);
1493 wp->last_used = sched_clock();
1498 * Get us an open_bucket we can allocate from, return with it locked:
1500 struct write_point *bch2_alloc_sectors_start(struct bch_fs *c,
1501 struct bch_devs_mask *devs,
1502 struct write_point_specifier write_point,
1503 struct bch_devs_list *devs_have,
1504 unsigned nr_replicas,
1505 unsigned nr_replicas_required,
1506 enum alloc_reserve reserve,
1510 struct write_point *wp;
1511 struct open_bucket *ob;
1512 unsigned i, nr_ptrs_dislike = 0, nr_ptrs_have = 0;
1515 BUG_ON(!nr_replicas || !nr_replicas_required);
1517 wp = writepoint_find(c, write_point.v);
1519 /* does ob have ptrs we don't need? */
1520 writepoint_for_each_ptr(wp, ob, i)
1521 if (bch2_dev_list_has_dev(*devs_have, ob->ptr.dev))
1523 else if (devs && !test_bit(ob->ptr.dev, devs->d))
1526 ret = open_bucket_add_buckets(c, devs, wp, devs_have,
1527 nr_replicas + nr_ptrs_have + nr_ptrs_dislike,
1529 if (ret && ret != -EROFS)
1533 nr_ptrs_have + nr_ptrs_dislike + nr_replicas_required) {
1538 if ((int) wp->nr_ptrs - nr_ptrs_dislike < nr_replicas)
1539 nr_ptrs_dislike = clamp_t(int, wp->nr_ptrs - nr_replicas,
1540 0, nr_ptrs_dislike);
1542 /* Remove pointers we don't want to use: */
1543 writepoint_drop_ptrs(c, wp, devs, nr_ptrs_dislike);
1546 * Move pointers to devices we already have to end of open bucket
1547 * pointer list - note that removing pointers we don't want to use might
1548 * have changed nr_ptrs_have:
1551 i = nr_ptrs_have = 0;
1552 while (i < wp->nr_ptrs - nr_ptrs_have)
1553 if (bch2_dev_list_has_dev(*devs_have, wp->ptrs[i]->ptr.dev)) {
1555 swap(wp->ptrs[i], wp->ptrs[wp->nr_ptrs - nr_ptrs_have]);
1561 wp->nr_ptrs_can_use =
1562 min_t(unsigned, nr_replicas, wp->nr_ptrs - nr_ptrs_have);
1564 BUG_ON(wp->nr_ptrs_can_use < nr_replicas_required ||
1565 wp->nr_ptrs_can_use > wp->nr_ptrs);
1567 wp->sectors_free = UINT_MAX;
1569 for (i = 0; i < wp->nr_ptrs_can_use; i++)
1570 wp->sectors_free = min(wp->sectors_free,
1571 wp->ptrs[i]->sectors_free);
1573 BUG_ON(!wp->sectors_free || wp->sectors_free == UINT_MAX);
1575 verify_not_stale(c, wp);
1579 mutex_unlock(&wp->lock);
1580 return ERR_PTR(ret);
1584 * Append pointers to the space we just allocated to @k, and mark @sectors space
1585 * as allocated out of @ob
1587 void bch2_alloc_sectors_append_ptrs(struct bch_fs *c, struct write_point *wp,
1588 struct bkey_i_extent *e, unsigned sectors)
1592 BUG_ON(sectors > wp->sectors_free);
1593 wp->sectors_free -= sectors;
1595 for (i = 0; i < wp->nr_ptrs_can_use; i++) {
1596 struct open_bucket *ob = wp->ptrs[i];
1597 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
1598 struct bch_extent_ptr tmp = ob->ptr;
1600 EBUG_ON(bch2_extent_has_device(extent_i_to_s_c(e), ob->ptr.dev));
1602 tmp.cached = bkey_extent_is_cached(&e->k);
1603 tmp.offset += ca->mi.bucket_size - ob->sectors_free;
1604 extent_ptr_append(e, tmp);
1606 BUG_ON(sectors > ob->sectors_free);
1607 ob->sectors_free -= sectors;
1612 * Append pointers to the space we just allocated to @k, and mark @sectors space
1613 * as allocated out of @ob
1615 void bch2_alloc_sectors_done(struct bch_fs *c, struct write_point *wp)
1619 for (i = wp->nr_ptrs - 1; i >= 0; --i) {
1620 struct open_bucket *ob = wp->ptrs[i];
1622 if (!ob->sectors_free) {
1623 array_remove_item(wp->ptrs, wp->nr_ptrs, i);
1624 bch2_open_bucket_put(c, ob);
1628 mutex_unlock(&wp->lock);
1631 /* Startup/shutdown (ro/rw): */
1633 void bch2_recalc_capacity(struct bch_fs *c)
1635 struct bch_tier *fastest_tier = NULL, *slowest_tier = NULL, *tier;
1637 u64 total_capacity, capacity = 0, reserved_sectors = 0;
1638 unsigned long ra_pages = 0;
1641 lockdep_assert_held(&c->state_lock);
1643 for_each_online_member(ca, c, i) {
1644 struct backing_dev_info *bdi = ca->disk_sb.bdev->bd_bdi;
1646 ra_pages += bdi->ra_pages;
1649 bch2_set_ra_pages(c, ra_pages);
1651 /* Find fastest, slowest tiers with devices: */
1653 for (tier = c->tiers;
1654 tier < c->tiers + ARRAY_SIZE(c->tiers); tier++) {
1655 if (!dev_mask_nr(&tier->devs))
1658 fastest_tier = tier;
1659 slowest_tier = tier;
1662 c->fastest_tier = fastest_tier != slowest_tier ? fastest_tier : NULL;
1663 c->fastest_devs = fastest_tier != slowest_tier ? &fastest_tier->devs : NULL;
1669 * Capacity of the filesystem is the capacity of all the devices in the
1670 * slowest (highest) tier - we don't include lower tier devices.
1672 for_each_member_device_rcu(ca, c, i, &slowest_tier->devs) {
1676 * We need to reserve buckets (from the number
1677 * of currently available buckets) against
1678 * foreground writes so that mainly copygc can
1679 * make forward progress.
1681 * We need enough to refill the various reserves
1682 * from scratch - copygc will use its entire
1683 * reserve all at once, then run against when
1684 * its reserve is refilled (from the formerly
1685 * available buckets).
1687 * This reserve is just used when considering if
1688 * allocations for foreground writes must wait -
1689 * not -ENOSPC calculations.
1691 for (j = 0; j < RESERVE_NONE; j++)
1692 reserve += ca->free[j].size;
1694 reserve += ca->free_inc.size;
1696 reserve += ARRAY_SIZE(c->write_points);
1699 reserve += 1; /* tiering write point */
1700 reserve += 1; /* btree write point */
1702 reserved_sectors += bucket_to_sector(ca, reserve);
1704 capacity += bucket_to_sector(ca, ca->mi.nbuckets -
1705 ca->mi.first_bucket);
1708 total_capacity = capacity;
1710 capacity *= (100 - c->opts.gc_reserve_percent);
1711 capacity = div64_u64(capacity, 100);
1713 BUG_ON(reserved_sectors > total_capacity);
1715 capacity = min(capacity, total_capacity - reserved_sectors);
1717 c->capacity = capacity;
1720 bch2_io_timer_add(&c->io_clock[READ],
1721 &c->prio_clock[READ].rescale);
1722 bch2_io_timer_add(&c->io_clock[WRITE],
1723 &c->prio_clock[WRITE].rescale);
1725 bch2_io_timer_del(&c->io_clock[READ],
1726 &c->prio_clock[READ].rescale);
1727 bch2_io_timer_del(&c->io_clock[WRITE],
1728 &c->prio_clock[WRITE].rescale);
1731 /* Wake up case someone was waiting for buckets */
1732 closure_wake_up(&c->freelist_wait);
1735 static void bch2_stop_write_point(struct bch_fs *c, struct bch_dev *ca,
1736 struct write_point *wp)
1738 struct bch_devs_mask not_self;
1740 bitmap_complement(not_self.d, ca->self.d, BCH_SB_MEMBERS_MAX);
1742 mutex_lock(&wp->lock);
1743 writepoint_drop_ptrs(c, wp, ¬_self, wp->nr_ptrs);
1744 mutex_unlock(&wp->lock);
1747 static bool bch2_dev_has_open_write_point(struct bch_fs *c, struct bch_dev *ca)
1749 struct open_bucket *ob;
1752 for (ob = c->open_buckets;
1753 ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
1755 spin_lock(&ob->lock);
1756 if (ob->valid && !ob->on_partial_list &&
1757 ob->ptr.dev == ca->dev_idx)
1759 spin_unlock(&ob->lock);
1765 /* device goes ro: */
1766 void bch2_dev_allocator_remove(struct bch_fs *c, struct bch_dev *ca)
1770 BUG_ON(ca->alloc_thread);
1772 /* First, remove device from allocation groups: */
1774 clear_bit(ca->dev_idx, c->tiers[ca->mi.tier].devs.d);
1775 for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
1776 clear_bit(ca->dev_idx, c->rw_devs[i].d);
1779 * Capacity is calculated based off of devices in allocation groups:
1781 bch2_recalc_capacity(c);
1783 /* Next, close write points that point to this device... */
1784 for (i = 0; i < ARRAY_SIZE(c->write_points); i++)
1785 bch2_stop_write_point(c, ca, &c->write_points[i]);
1787 bch2_stop_write_point(c, ca, &ca->copygc_write_point);
1788 bch2_stop_write_point(c, ca, &c->tiers[ca->mi.tier].wp);
1789 bch2_stop_write_point(c, ca, &c->btree_write_point);
1791 mutex_lock(&c->btree_reserve_cache_lock);
1792 while (c->btree_reserve_cache_nr) {
1793 struct btree_alloc *a =
1794 &c->btree_reserve_cache[--c->btree_reserve_cache_nr];
1796 bch2_open_bucket_put_refs(c, &a->ob.nr, a->ob.refs);
1798 mutex_unlock(&c->btree_reserve_cache_lock);
1801 * Wake up threads that were blocked on allocation, so they can notice
1802 * the device can no longer be removed and the capacity has changed:
1804 closure_wake_up(&c->freelist_wait);
1807 * journal_res_get() can block waiting for free space in the journal -
1808 * it needs to notice there may not be devices to allocate from anymore:
1810 wake_up(&c->journal.wait);
1812 /* Now wait for any in flight writes: */
1814 closure_wait_event(&c->open_buckets_wait,
1815 !bch2_dev_has_open_write_point(c, ca));
1818 /* device goes rw: */
1819 void bch2_dev_allocator_add(struct bch_fs *c, struct bch_dev *ca)
1823 for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
1824 if (ca->mi.data_allowed & (1 << i))
1825 set_bit(ca->dev_idx, c->rw_devs[i].d);
1826 set_bit(ca->dev_idx, c->tiers[ca->mi.tier].devs.d);
1829 /* stop allocator thread: */
1830 void bch2_dev_allocator_stop(struct bch_dev *ca)
1832 struct task_struct *p = ca->alloc_thread;
1834 ca->alloc_thread = NULL;
1837 * We need an rcu barrier between setting ca->alloc_thread = NULL and
1838 * the thread shutting down to avoid bch2_wake_allocator() racing:
1840 * XXX: it would be better to have the rcu barrier be asynchronous
1841 * instead of blocking us here
1851 /* start allocator thread: */
1852 int bch2_dev_allocator_start(struct bch_dev *ca)
1854 struct task_struct *p;
1857 * allocator thread already started?
1859 if (ca->alloc_thread)
1862 p = kthread_create(bch2_allocator_thread, ca, "bcache_allocator");
1867 ca->alloc_thread = p;
1872 static void allocator_start_issue_discards(struct bch_fs *c)
1878 for_each_rw_member(ca, c, dev_iter) {
1881 fifo_for_each_entry(bu, &ca->free_inc, i) {
1882 if (done == ca->nr_invalidated)
1885 blkdev_issue_discard(ca->disk_sb.bdev,
1886 bucket_to_sector(ca, bu),
1887 ca->mi.bucket_size, GFP_NOIO, 0);
1893 static int __bch2_fs_allocator_start(struct bch_fs *c)
1896 size_t bu, i, devs_have_enough = 0;
1898 u64 journal_seq = 0;
1899 bool invalidating_data = false;
1902 if (test_bit(BCH_FS_GC_FAILURE, &c->flags))
1905 /* Scan for buckets that are already invalidated: */
1906 for_each_rw_member(ca, c, dev_iter) {
1907 struct btree_iter iter;
1908 struct bucket_mark m;
1911 for_each_btree_key(&iter, c, BTREE_ID_ALLOC, POS(ca->dev_idx, 0), 0, k) {
1912 if (k.k->type != BCH_ALLOC)
1916 m = READ_ONCE(bucket(ca, bu)->mark);
1918 if (!is_available_bucket(m) || m.cached_sectors)
1921 bch2_mark_alloc_bucket(c, ca, bu, true,
1922 gc_pos_alloc(c, NULL),
1923 BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE|
1924 BCH_BUCKET_MARK_GC_LOCK_HELD);
1926 fifo_push(&ca->free_inc, bu);
1927 ca->nr_invalidated++;
1929 if (fifo_full(&ca->free_inc))
1932 bch2_btree_iter_unlock(&iter);
1935 /* did we find enough buckets? */
1936 for_each_rw_member(ca, c, dev_iter)
1937 devs_have_enough += (fifo_used(&ca->free_inc) >=
1938 ca->free[RESERVE_BTREE].size);
1940 if (devs_have_enough >= c->opts.metadata_replicas)
1943 /* clear out free_inc - find_reclaimable_buckets() assumes it's empty */
1944 for_each_rw_member(ca, c, dev_iter)
1945 discard_invalidated_buckets(c, ca);
1947 for_each_rw_member(ca, c, dev_iter) {
1948 BUG_ON(!fifo_empty(&ca->free_inc));
1949 ca->free_inc.front = ca->free_inc.back = 0;
1951 find_reclaimable_buckets(c, ca);
1952 sort_free_inc(c, ca);
1954 invalidating_data |= ca->allocator_invalidating_data;
1956 fifo_for_each_entry(bu, &ca->free_inc, i)
1957 if (!fifo_push(&ca->free[RESERVE_BTREE], bu))
1962 * We're moving buckets to freelists _before_ they've been marked as
1963 * invalidated on disk - we have to so that we can allocate new btree
1964 * nodes to mark them as invalidated on disk.
1966 * However, we can't _write_ to any of these buckets yet - they might
1967 * have cached data in them, which is live until they're marked as
1968 * invalidated on disk:
1970 if (invalidating_data)
1971 set_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags);
1973 allocator_start_issue_discards(c);
1976 * XXX: it's possible for this to deadlock waiting on journal reclaim,
1977 * since we're holding btree writes. What then?
1980 for_each_rw_member(ca, c, dev_iter) {
1981 ret = bch2_invalidate_free_inc(c, ca, &journal_seq,
1982 ca->free[RESERVE_BTREE].size);
1984 percpu_ref_put(&ca->io_ref);
1989 if (invalidating_data) {
1990 ret = bch2_journal_flush_seq(&c->journal, journal_seq);
1995 if (invalidating_data)
1996 allocator_start_issue_discards(c);
1998 for_each_rw_member(ca, c, dev_iter)
1999 while (ca->nr_invalidated) {
2000 BUG_ON(!fifo_pop(&ca->free_inc, bu));
2001 ca->nr_invalidated--;
2004 set_bit(BCH_FS_ALLOCATOR_STARTED, &c->flags);
2006 /* now flush dirty btree nodes: */
2007 if (invalidating_data) {
2008 struct bucket_table *tbl;
2009 struct rhash_head *pos;
2012 clear_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags);
2015 for_each_cached_btree(b, c, tbl, i, pos)
2016 if (btree_node_dirty(b) && (!b->written || b->level)) {
2018 six_lock_read(&b->lock);
2019 bch2_btree_node_write(c, b, SIX_LOCK_read);
2020 six_unlock_read(&b->lock);
2029 int bch2_fs_allocator_start(struct bch_fs *c)
2035 down_read(&c->gc_lock);
2036 ret = __bch2_fs_allocator_start(c);
2037 up_read(&c->gc_lock);
2042 for_each_rw_member(ca, c, i) {
2043 ret = bch2_dev_allocator_start(ca);
2045 percpu_ref_put(&ca->io_ref);
2050 return bch2_alloc_write(c);
2053 void bch2_fs_allocator_init(struct bch_fs *c)
2055 struct open_bucket *ob;
2056 struct write_point *wp;
2059 mutex_init(&c->write_points_hash_lock);
2060 spin_lock_init(&c->freelist_lock);
2061 bch2_prio_timer_init(c, READ);
2062 bch2_prio_timer_init(c, WRITE);
2064 /* open bucket 0 is a sentinal NULL: */
2065 spin_lock_init(&c->open_buckets[0].lock);
2067 for (ob = c->open_buckets + 1;
2068 ob < c->open_buckets + ARRAY_SIZE(c->open_buckets); ob++) {
2069 spin_lock_init(&ob->lock);
2070 c->open_buckets_nr_free++;
2072 ob->freelist = c->open_buckets_freelist;
2073 c->open_buckets_freelist = ob - c->open_buckets;
2076 writepoint_init(&c->btree_write_point, BCH_DATA_BTREE);
2078 for (i = 0; i < ARRAY_SIZE(c->tiers); i++)
2079 writepoint_init(&c->tiers[i].wp, BCH_DATA_USER);
2081 for (wp = c->write_points;
2082 wp < c->write_points + ARRAY_SIZE(c->write_points); wp++) {
2083 writepoint_init(wp, BCH_DATA_USER);
2085 wp->last_used = sched_clock();
2086 wp->write_point = (unsigned long) wp;
2087 hlist_add_head_rcu(&wp->node, writepoint_hash(c, wp->write_point));
2090 c->pd_controllers_update_seconds = 5;
2091 INIT_DELAYED_WORK(&c->pd_controllers_update, pd_controllers_update);