2 * Primary bucket allocation code
4 * Copyright 2012 Google, Inc.
6 * Allocation in bcache is done in terms of buckets:
8 * Each bucket has associated an 8 bit gen; this gen corresponds to the gen in
9 * btree pointers - they must match for the pointer to be considered valid.
11 * Thus (assuming a bucket has no dirty data or metadata in it) we can reuse a
12 * bucket simply by incrementing its gen.
14 * The gens (along with the priorities; it's really the gens are important but
15 * the code is named as if it's the priorities) are written in an arbitrary list
16 * of buckets on disk, with a pointer to them in the journal header.
18 * When we invalidate a bucket, we have to write its new gen to disk and wait
19 * for that write to complete before we use it - otherwise after a crash we
20 * could have pointers that appeared to be good but pointed to data that had
23 * Since the gens and priorities are all stored contiguously on disk, we can
24 * batch this up: We fill up the free_inc list with freshly invalidated buckets,
25 * call prio_write(), and when prio_write() finishes we pull buckets off the
26 * free_inc list and optionally discard them.
28 * free_inc isn't the only freelist - if it was, we'd often have to sleep while
29 * priorities and gens were being written before we could allocate. c->free is a
30 * smaller freelist, and buckets on that list are always ready to be used.
32 * If we've got discards enabled, that happens when a bucket moves from the
33 * free_inc list to the free list.
35 * It's important to ensure that gens don't wrap around - with respect to
36 * either the oldest gen in the btree or the gen on disk. This is quite
37 * difficult to do in practice, but we explicitly guard against it anyways - if
38 * a bucket is in danger of wrapping around we simply skip invalidating it that
39 * time around, and we garbage collect or rewrite the priorities sooner than we
40 * would have otherwise.
42 * bch2_bucket_alloc() allocates a single bucket from a specific device.
44 * bch2_bucket_alloc_set() allocates one or more buckets from different devices
45 * in a given filesystem.
47 * invalidate_buckets() drives all the processes described above. It's called
48 * from bch2_bucket_alloc() and a few other places that need to make sure free
51 * invalidate_buckets_(lru|fifo)() find buckets that are available to be
52 * invalidated, and then invalidate them and stick them on the free_inc list -
53 * in either lru or fifo order.
58 #include "btree_cache.h"
60 #include "btree_update.h"
61 #include "btree_update_interior.h"
67 #include "disk_groups.h"
74 #include <linux/blkdev.h>
75 #include <linux/kthread.h>
76 #include <linux/math64.h>
77 #include <linux/random.h>
78 #include <linux/rculist.h>
79 #include <linux/rcupdate.h>
80 #include <linux/sched/task.h>
81 #include <linux/sort.h>
82 #include <trace/events/bcachefs.h>
84 static void bch2_recalc_oldest_io(struct bch_fs *, struct bch_dev *, int);
86 /* Ratelimiting/PD controllers */
88 static void pd_controllers_update(struct work_struct *work)
90 struct bch_fs *c = container_of(to_delayed_work(work),
92 pd_controllers_update);
96 for_each_member_device(ca, c, i) {
97 struct bch_dev_usage stats = bch2_dev_usage_read(c, ca);
99 u64 free = bucket_to_sector(ca,
100 __dev_buckets_free(ca, stats)) << 9;
102 * Bytes of internal fragmentation, which can be
103 * reclaimed by copy GC
105 s64 fragmented = (bucket_to_sector(ca,
106 stats.buckets[BCH_DATA_USER] +
107 stats.buckets[BCH_DATA_CACHED]) -
108 (stats.sectors[BCH_DATA_USER] +
109 stats.sectors[BCH_DATA_CACHED])) << 9;
111 fragmented = max(0LL, fragmented);
113 bch2_pd_controller_update(&ca->copygc_pd,
114 free, fragmented, -1);
117 schedule_delayed_work(&c->pd_controllers_update,
118 c->pd_controllers_update_seconds * HZ);
121 /* Persistent alloc info: */
123 static unsigned bch_alloc_val_u64s(const struct bch_alloc *a)
125 unsigned bytes = offsetof(struct bch_alloc, data);
127 if (a->fields & (1 << BCH_ALLOC_FIELD_READ_TIME))
129 if (a->fields & (1 << BCH_ALLOC_FIELD_WRITE_TIME))
132 return DIV_ROUND_UP(bytes, sizeof(u64));
135 const char *bch2_alloc_invalid(const struct bch_fs *c, struct bkey_s_c k)
137 if (k.k->p.inode >= c->sb.nr_devices ||
138 !c->devs[k.k->p.inode])
139 return "invalid device";
143 struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k);
145 if (bch_alloc_val_u64s(a.v) != bkey_val_u64s(a.k))
146 return "incorrect value size";
150 return "invalid type";
156 void bch2_alloc_to_text(struct bch_fs *c, char *buf,
157 size_t size, struct bkey_s_c k)
167 static inline unsigned get_alloc_field(const u8 **p, unsigned bytes)
176 v = le16_to_cpup((void *) *p);
179 v = le32_to_cpup((void *) *p);
189 static inline void put_alloc_field(u8 **p, unsigned bytes, unsigned v)
196 *((__le16 *) *p) = cpu_to_le16(v);
199 *((__le32 *) *p) = cpu_to_le32(v);
208 static void bch2_alloc_read_key(struct bch_fs *c, struct bkey_s_c k)
211 struct bkey_s_c_alloc a;
212 struct bucket_mark new;
216 if (k.k->type != BCH_ALLOC)
219 a = bkey_s_c_to_alloc(k);
220 ca = bch_dev_bkey_exists(c, a.k->p.inode);
222 if (a.k->p.offset >= ca->mi.nbuckets)
225 lg_local_lock(&c->usage_lock);
227 g = bucket(ca, a.k->p.offset);
228 bucket_cmpxchg(g, new, ({
234 if (a.v->fields & (1 << BCH_ALLOC_FIELD_READ_TIME))
235 g->io_time[READ] = get_alloc_field(&d, 2);
236 if (a.v->fields & (1 << BCH_ALLOC_FIELD_WRITE_TIME))
237 g->io_time[WRITE] = get_alloc_field(&d, 2);
239 lg_local_unlock(&c->usage_lock);
242 int bch2_alloc_read(struct bch_fs *c, struct list_head *journal_replay_list)
244 struct journal_replay *r;
245 struct btree_iter iter;
251 for_each_btree_key(&iter, c, BTREE_ID_ALLOC, POS_MIN, 0, k) {
252 bch2_alloc_read_key(c, k);
253 bch2_btree_iter_cond_resched(&iter);
256 ret = bch2_btree_iter_unlock(&iter);
260 list_for_each_entry(r, journal_replay_list, list) {
261 struct bkey_i *k, *n;
262 struct jset_entry *entry;
264 for_each_jset_key(k, n, entry, &r->j)
265 if (entry->btree_id == BTREE_ID_ALLOC)
266 bch2_alloc_read_key(c, bkey_i_to_s_c(k));
269 mutex_lock(&c->bucket_clock[READ].lock);
270 for_each_member_device(ca, c, i) {
271 down_read(&ca->bucket_lock);
272 bch2_recalc_oldest_io(c, ca, READ);
273 up_read(&ca->bucket_lock);
275 mutex_unlock(&c->bucket_clock[READ].lock);
277 mutex_lock(&c->bucket_clock[WRITE].lock);
278 for_each_member_device(ca, c, i) {
279 down_read(&ca->bucket_lock);
280 bch2_recalc_oldest_io(c, ca, WRITE);
281 up_read(&ca->bucket_lock);
283 mutex_unlock(&c->bucket_clock[WRITE].lock);
288 static int __bch2_alloc_write_key(struct bch_fs *c, struct bch_dev *ca,
289 size_t b, struct btree_iter *iter,
292 struct bucket_mark m;
293 __BKEY_PADDED(k, DIV_ROUND_UP(sizeof(struct bch_alloc), 8)) alloc_key;
295 struct bkey_i_alloc *a;
299 bch2_btree_iter_set_pos(iter, POS(ca->dev_idx, b));
302 ret = btree_iter_err(bch2_btree_iter_peek_slot(iter));
306 lg_local_lock(&c->usage_lock);
309 /* read mark under btree node lock: */
310 m = READ_ONCE(g->mark);
311 a = bkey_alloc_init(&alloc_key.k);
315 set_bkey_val_u64s(&a->k, bch_alloc_val_u64s(&a->v));
318 if (a->v.fields & (1 << BCH_ALLOC_FIELD_READ_TIME))
319 put_alloc_field(&d, 2, g->io_time[READ]);
320 if (a->v.fields & (1 << BCH_ALLOC_FIELD_WRITE_TIME))
321 put_alloc_field(&d, 2, g->io_time[WRITE]);
322 lg_local_unlock(&c->usage_lock);
324 ret = bch2_btree_insert_at(c, NULL, NULL, journal_seq,
327 BTREE_INSERT_USE_RESERVE|
328 BTREE_INSERT_USE_ALLOC_RESERVE|
330 BTREE_INSERT_ENTRY(iter, &a->k_i));
331 bch2_btree_iter_cond_resched(iter);
332 } while (ret == -EINTR);
337 int bch2_alloc_replay_key(struct bch_fs *c, struct bpos pos)
340 struct btree_iter iter;
343 if (pos.inode >= c->sb.nr_devices || !c->devs[pos.inode])
346 ca = bch_dev_bkey_exists(c, pos.inode);
348 if (pos.offset >= ca->mi.nbuckets)
351 bch2_btree_iter_init(&iter, c, BTREE_ID_ALLOC, POS_MIN,
352 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
354 ret = __bch2_alloc_write_key(c, ca, pos.offset, &iter, NULL);
355 bch2_btree_iter_unlock(&iter);
359 int bch2_alloc_write(struct bch_fs *c)
365 for_each_rw_member(ca, c, i) {
366 struct btree_iter iter;
367 unsigned long bucket;
369 bch2_btree_iter_init(&iter, c, BTREE_ID_ALLOC, POS_MIN,
370 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
372 down_read(&ca->bucket_lock);
373 for_each_set_bit(bucket, ca->buckets_dirty, ca->mi.nbuckets) {
374 ret = __bch2_alloc_write_key(c, ca, bucket, &iter, NULL);
378 clear_bit(bucket, ca->buckets_dirty);
380 up_read(&ca->bucket_lock);
381 bch2_btree_iter_unlock(&iter);
384 percpu_ref_put(&ca->io_ref);
392 /* Bucket IO clocks: */
394 static void bch2_recalc_oldest_io(struct bch_fs *c, struct bch_dev *ca, int rw)
396 struct bucket_clock *clock = &c->bucket_clock[rw];
397 struct bucket_array *buckets = bucket_array(ca);
402 lockdep_assert_held(&c->bucket_clock[rw].lock);
404 /* Recalculate max_last_io for this device: */
405 for_each_bucket(g, buckets)
406 max_last_io = max(max_last_io, bucket_last_io(c, g, rw));
408 ca->max_last_bucket_io[rw] = max_last_io;
410 /* Recalculate global max_last_io: */
413 for_each_member_device(ca, c, i)
414 max_last_io = max(max_last_io, ca->max_last_bucket_io[rw]);
416 clock->max_last_io = max_last_io;
419 static void bch2_rescale_bucket_io_times(struct bch_fs *c, int rw)
421 struct bucket_clock *clock = &c->bucket_clock[rw];
422 struct bucket_array *buckets;
427 trace_rescale_prios(c);
429 for_each_member_device(ca, c, i) {
430 down_read(&ca->bucket_lock);
431 buckets = bucket_array(ca);
433 for_each_bucket(g, buckets)
434 g->io_time[rw] = clock->hand -
435 bucket_last_io(c, g, rw) / 2;
437 bch2_recalc_oldest_io(c, ca, rw);
439 up_read(&ca->bucket_lock);
443 static void bch2_inc_clock_hand(struct io_timer *timer)
445 struct bucket_clock *clock = container_of(timer,
446 struct bucket_clock, rescale);
447 struct bch_fs *c = container_of(clock,
448 struct bch_fs, bucket_clock[clock->rw]);
453 mutex_lock(&clock->lock);
455 /* if clock cannot be advanced more, rescale prio */
456 if (clock->max_last_io >= U16_MAX - 2)
457 bch2_rescale_bucket_io_times(c, clock->rw);
459 BUG_ON(clock->max_last_io >= U16_MAX - 2);
461 for_each_member_device(ca, c, i)
462 ca->max_last_bucket_io[clock->rw]++;
463 clock->max_last_io++;
466 mutex_unlock(&clock->lock);
468 capacity = READ_ONCE(c->capacity);
474 * we only increment when 0.1% of the filesystem capacity has been read
475 * or written too, this determines if it's time
477 * XXX: we shouldn't really be going off of the capacity of devices in
478 * RW mode (that will be 0 when we're RO, yet we can still service
481 timer->expire += capacity >> 10;
483 bch2_io_timer_add(&c->io_clock[clock->rw], timer);
486 static void bch2_bucket_clock_init(struct bch_fs *c, int rw)
488 struct bucket_clock *clock = &c->bucket_clock[rw];
492 clock->rescale.fn = bch2_inc_clock_hand;
493 clock->rescale.expire = c->capacity >> 10;
494 mutex_init(&clock->lock);
497 /* Background allocator thread: */
500 * Scans for buckets to be invalidated, invalidates them, rewrites prios/gens
501 * (marking them as invalidated on disk), then optionally issues discard
502 * commands to the newly free buckets, then puts them on the various freelists.
505 static void verify_not_on_freelist(struct bch_fs *c, struct bch_dev *ca,
508 if (expensive_debug_checks(c) &&
509 test_bit(BCH_FS_ALLOCATOR_STARTED, &c->flags)) {
514 for (j = 0; j < RESERVE_NR; j++)
515 fifo_for_each_entry(i, &ca->free[j], iter)
517 fifo_for_each_entry(i, &ca->free_inc, iter)
522 #define BUCKET_GC_GEN_MAX 96U
525 * wait_buckets_available - wait on reclaimable buckets
527 * If there aren't enough available buckets to fill up free_inc, wait until
530 static int wait_buckets_available(struct bch_fs *c, struct bch_dev *ca)
532 unsigned long gc_count = c->gc_count;
536 set_current_state(TASK_INTERRUPTIBLE);
537 if (kthread_should_stop()) {
542 if (gc_count != c->gc_count)
543 ca->inc_gen_really_needs_gc = 0;
545 if ((ssize_t) (dev_buckets_available(c, ca) -
546 ca->inc_gen_really_needs_gc) >=
547 (ssize_t) fifo_free(&ca->free_inc))
550 up_read(&c->gc_lock);
553 down_read(&c->gc_lock);
556 __set_current_state(TASK_RUNNING);
560 static bool bch2_can_invalidate_bucket(struct bch_dev *ca,
562 struct bucket_mark mark)
566 if (!is_available_bucket(mark))
569 gc_gen = bucket_gc_gen(ca, bucket);
571 if (gc_gen >= BUCKET_GC_GEN_MAX / 2)
572 ca->inc_gen_needs_gc++;
574 if (gc_gen >= BUCKET_GC_GEN_MAX)
575 ca->inc_gen_really_needs_gc++;
577 return gc_gen < BUCKET_GC_GEN_MAX;
580 static void bch2_invalidate_one_bucket(struct bch_fs *c, struct bch_dev *ca,
583 struct bucket_mark m;
585 spin_lock(&c->freelist_lock);
586 if (!bch2_invalidate_bucket(c, ca, bucket, &m)) {
587 spin_unlock(&c->freelist_lock);
591 verify_not_on_freelist(c, ca, bucket);
592 BUG_ON(!fifo_push(&ca->free_inc, bucket));
593 spin_unlock(&c->freelist_lock);
596 bucket_io_clock_reset(c, ca, bucket, READ);
597 bucket_io_clock_reset(c, ca, bucket, WRITE);
599 if (m.cached_sectors) {
600 ca->allocator_invalidating_data = true;
601 } else if (m.journal_seq_valid) {
602 u64 journal_seq = atomic64_read(&c->journal.seq);
603 u64 bucket_seq = journal_seq;
605 bucket_seq &= ~((u64) U16_MAX);
606 bucket_seq |= m.journal_seq;
608 if (bucket_seq > journal_seq)
609 bucket_seq -= 1 << 16;
611 ca->allocator_journal_seq_flush =
612 max(ca->allocator_journal_seq_flush, bucket_seq);
617 * Determines what order we're going to reuse buckets, smallest bucket_key()
621 * - We take into account the read prio of the bucket, which gives us an
622 * indication of how hot the data is -- we scale the prio so that the prio
623 * farthest from the clock is worth 1/8th of the closest.
625 * - The number of sectors of cached data in the bucket, which gives us an
626 * indication of the cost in cache misses this eviction will cause.
628 * - If hotness * sectors used compares equal, we pick the bucket with the
629 * smallest bucket_gc_gen() - since incrementing the same bucket's generation
630 * number repeatedly forces us to run mark and sweep gc to avoid generation
634 static unsigned long bucket_sort_key(struct bch_fs *c, struct bch_dev *ca,
635 size_t b, struct bucket_mark m)
637 unsigned last_io = bucket_last_io(c, bucket(ca, b), READ);
638 unsigned max_last_io = ca->max_last_bucket_io[READ];
641 * Time since last read, scaled to [0, 8) where larger value indicates
642 * more recently read data:
644 unsigned long hotness = (max_last_io - last_io) * 7 / max_last_io;
646 /* How much we want to keep the data in this bucket: */
647 unsigned long data_wantness =
648 (hotness + 1) * bucket_sectors_used(m);
650 unsigned long needs_journal_commit =
651 bucket_needs_journal_commit(m, c->journal.last_seq_ondisk);
653 return (data_wantness << 9) |
654 (needs_journal_commit << 8) |
655 bucket_gc_gen(ca, b);
658 static inline int bucket_alloc_cmp(alloc_heap *h,
659 struct alloc_heap_entry l,
660 struct alloc_heap_entry r)
662 return (l.key > r.key) - (l.key < r.key) ?:
663 (l.nr < r.nr) - (l.nr > r.nr) ?:
664 (l.bucket > r.bucket) - (l.bucket < r.bucket);
667 static void find_reclaimable_buckets_lru(struct bch_fs *c, struct bch_dev *ca)
669 struct bucket_array *buckets;
670 struct alloc_heap_entry e = { 0 };
673 ca->alloc_heap.used = 0;
675 mutex_lock(&c->bucket_clock[READ].lock);
676 down_read(&ca->bucket_lock);
678 buckets = bucket_array(ca);
680 bch2_recalc_oldest_io(c, ca, READ);
683 * Find buckets with lowest read priority, by building a maxheap sorted
684 * by read priority and repeatedly replacing the maximum element until
685 * all buckets have been visited.
687 for (b = ca->mi.first_bucket; b < ca->mi.nbuckets; b++) {
688 struct bucket_mark m = READ_ONCE(buckets->b[b].mark);
689 unsigned long key = bucket_sort_key(c, ca, b, m);
691 if (!bch2_can_invalidate_bucket(ca, b, m))
694 if (e.nr && e.bucket + e.nr == b && e.key == key) {
698 heap_add_or_replace(&ca->alloc_heap, e, -bucket_alloc_cmp);
700 e = (struct alloc_heap_entry) {
711 heap_add_or_replace(&ca->alloc_heap, e, -bucket_alloc_cmp);
713 up_read(&ca->bucket_lock);
714 mutex_unlock(&c->bucket_clock[READ].lock);
716 heap_resort(&ca->alloc_heap, bucket_alloc_cmp);
718 while (heap_pop(&ca->alloc_heap, e, bucket_alloc_cmp)) {
722 if (fifo_full(&ca->free_inc))
725 bch2_invalidate_one_bucket(c, ca, b);
730 static void find_reclaimable_buckets_fifo(struct bch_fs *c, struct bch_dev *ca)
732 struct bucket_array *buckets = bucket_array(ca);
733 struct bucket_mark m;
737 checked < ca->mi.nbuckets && !fifo_full(&ca->free_inc);
739 if (ca->fifo_last_bucket < ca->mi.first_bucket ||
740 ca->fifo_last_bucket >= ca->mi.nbuckets)
741 ca->fifo_last_bucket = ca->mi.first_bucket;
743 b = ca->fifo_last_bucket++;
745 m = READ_ONCE(buckets->b[b].mark);
747 if (bch2_can_invalidate_bucket(ca, b, m))
748 bch2_invalidate_one_bucket(c, ca, b);
754 static void find_reclaimable_buckets_random(struct bch_fs *c, struct bch_dev *ca)
756 struct bucket_array *buckets = bucket_array(ca);
757 struct bucket_mark m;
761 checked < ca->mi.nbuckets / 2 && !fifo_full(&ca->free_inc);
763 size_t b = bch2_rand_range(ca->mi.nbuckets -
764 ca->mi.first_bucket) +
767 m = READ_ONCE(buckets->b[b].mark);
769 if (bch2_can_invalidate_bucket(ca, b, m))
770 bch2_invalidate_one_bucket(c, ca, b);
776 static void find_reclaimable_buckets(struct bch_fs *c, struct bch_dev *ca)
778 ca->inc_gen_needs_gc = 0;
779 ca->inc_gen_really_needs_gc = 0;
781 switch (ca->mi.replacement) {
782 case CACHE_REPLACEMENT_LRU:
783 find_reclaimable_buckets_lru(c, ca);
785 case CACHE_REPLACEMENT_FIFO:
786 find_reclaimable_buckets_fifo(c, ca);
788 case CACHE_REPLACEMENT_RANDOM:
789 find_reclaimable_buckets_random(c, ca);
794 static int size_t_cmp(const void *_l, const void *_r)
796 const size_t *l = _l, *r = _r;
798 return (*l > *r) - (*l < *r);
801 static void sort_free_inc(struct bch_fs *c, struct bch_dev *ca)
803 BUG_ON(ca->free_inc.front);
805 spin_lock(&c->freelist_lock);
806 sort(ca->free_inc.data,
808 sizeof(ca->free_inc.data[0]),
810 spin_unlock(&c->freelist_lock);
813 static int bch2_invalidate_free_inc(struct bch_fs *c, struct bch_dev *ca,
814 u64 *journal_seq, size_t nr)
816 struct btree_iter iter;
819 bch2_btree_iter_init(&iter, c, BTREE_ID_ALLOC, POS(ca->dev_idx, 0),
820 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
823 * XXX: if ca->nr_invalidated != 0, just return if we'd block doing the
824 * btree update or journal_res_get
826 while (ca->nr_invalidated < min(nr, fifo_used(&ca->free_inc))) {
827 size_t b = fifo_idx_entry(&ca->free_inc, ca->nr_invalidated);
829 ret = __bch2_alloc_write_key(c, ca, b, &iter, journal_seq);
833 ca->nr_invalidated++;
836 bch2_btree_iter_unlock(&iter);
840 static bool __push_invalidated_bucket(struct bch_fs *c, struct bch_dev *ca, size_t bucket)
845 * Don't remove from free_inc until after it's added to
846 * freelist, so gc can find it:
848 spin_lock(&c->freelist_lock);
849 for (i = 0; i < RESERVE_NR; i++)
850 if (fifo_push(&ca->free[i], bucket)) {
851 fifo_pop(&ca->free_inc, bucket);
852 --ca->nr_invalidated;
853 closure_wake_up(&c->freelist_wait);
854 spin_unlock(&c->freelist_lock);
857 spin_unlock(&c->freelist_lock);
862 static int push_invalidated_bucket(struct bch_fs *c, struct bch_dev *ca, size_t bucket)
867 set_current_state(TASK_INTERRUPTIBLE);
869 if (__push_invalidated_bucket(c, ca, bucket))
872 if ((current->flags & PF_KTHREAD) &&
873 kthread_should_stop()) {
882 __set_current_state(TASK_RUNNING);
887 * Given an invalidated, ready to use bucket: issue a discard to it if enabled,
888 * then add it to the freelist, waiting until there's room if necessary:
890 static int discard_invalidated_buckets(struct bch_fs *c, struct bch_dev *ca)
892 while (ca->nr_invalidated) {
893 size_t bucket = fifo_peek(&ca->free_inc);
895 BUG_ON(fifo_empty(&ca->free_inc) || !ca->nr_invalidated);
897 if (ca->mi.discard &&
898 blk_queue_discard(bdev_get_queue(ca->disk_sb.bdev)))
899 blkdev_issue_discard(ca->disk_sb.bdev,
900 bucket_to_sector(ca, bucket),
901 ca->mi.bucket_size, GFP_NOIO, 0);
903 if (push_invalidated_bucket(c, ca, bucket))
911 * bch_allocator_thread - move buckets from free_inc to reserves
913 * The free_inc FIFO is populated by find_reclaimable_buckets(), and
914 * the reserves are depleted by bucket allocation. When we run out
915 * of free_inc, try to invalidate some buckets and write out
918 static int bch2_allocator_thread(void *arg)
920 struct bch_dev *ca = arg;
921 struct bch_fs *c = ca->fs;
931 pr_debug("discarding %zu invalidated buckets",
934 ret = discard_invalidated_buckets(c, ca);
938 if (fifo_empty(&ca->free_inc))
941 pr_debug("invalidating %zu buckets",
942 fifo_used(&ca->free_inc));
945 ret = bch2_invalidate_free_inc(c, ca, &journal_seq, SIZE_MAX);
947 bch_err(ca, "error invalidating buckets: %i", ret);
951 if (!ca->nr_invalidated) {
952 bch_err(ca, "allocator thread unable to make forward progress!");
956 if (ca->allocator_invalidating_data)
957 ret = bch2_journal_flush_seq(&c->journal, journal_seq);
958 else if (ca->allocator_journal_seq_flush)
959 ret = bch2_journal_flush_seq(&c->journal,
960 ca->allocator_journal_seq_flush);
963 * journal error - buckets haven't actually been
964 * invalidated, can't discard them:
967 bch_err(ca, "journal error: %i", ret);
972 pr_debug("free_inc now empty");
974 /* Reset front/back so we can easily sort fifo entries later: */
975 ca->free_inc.front = ca->free_inc.back = 0;
976 ca->allocator_journal_seq_flush = 0;
977 ca->allocator_invalidating_data = false;
979 down_read(&c->gc_lock);
981 size_t prev = fifo_used(&ca->free_inc);
983 if (test_bit(BCH_FS_GC_FAILURE, &c->flags)) {
984 up_read(&c->gc_lock);
985 bch_err(ca, "gc failure");
990 * Find some buckets that we can invalidate, either
991 * they're completely unused, or only contain clean data
992 * that's been written back to the backing device or
996 pr_debug("scanning for reclaimable buckets");
998 find_reclaimable_buckets(c, ca);
1000 pr_debug("found %zu buckets (free_inc %zu/%zu)",
1001 fifo_used(&ca->free_inc) - prev,
1002 fifo_used(&ca->free_inc), ca->free_inc.size);
1004 trace_alloc_batch(ca, fifo_used(&ca->free_inc),
1007 if ((ca->inc_gen_needs_gc >= ca->free_inc.size ||
1008 (!fifo_full(&ca->free_inc) &&
1009 ca->inc_gen_really_needs_gc >=
1010 fifo_free(&ca->free_inc))) &&
1012 atomic_inc(&c->kick_gc);
1013 wake_up_process(c->gc_thread);
1016 if (fifo_full(&ca->free_inc))
1019 if (!fifo_empty(&ca->free_inc) &&
1020 !fifo_full(&ca->free[RESERVE_MOVINGGC]))
1024 * copygc may be waiting until either its reserve fills
1025 * up, or we can't make forward progress:
1027 ca->allocator_blocked = true;
1028 closure_wake_up(&c->freelist_wait);
1030 ret = wait_buckets_available(c, ca);
1032 up_read(&c->gc_lock);
1037 ca->allocator_blocked = false;
1038 up_read(&c->gc_lock);
1040 pr_debug("free_inc now %zu/%zu",
1041 fifo_used(&ca->free_inc),
1044 sort_free_inc(c, ca);
1047 * free_inc is now full of newly-invalidated buckets: next,
1048 * write out the new bucket gens:
1053 pr_debug("alloc thread stopping (ret %i)", ret);
1060 * Open buckets represent a bucket that's currently being allocated from. They
1061 * serve two purposes:
1063 * - They track buckets that have been partially allocated, allowing for
1064 * sub-bucket sized allocations - they're used by the sector allocator below
1066 * - They provide a reference to the buckets they own that mark and sweep GC
1067 * can find, until the new allocation has a pointer to it inserted into the
1070 * When allocating some space with the sector allocator, the allocation comes
1071 * with a reference to an open bucket - the caller is required to put that
1072 * reference _after_ doing the index update that makes its allocation reachable.
1075 void __bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob)
1077 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
1079 spin_lock(&ob->lock);
1080 bch2_mark_alloc_bucket(c, ca, PTR_BUCKET_NR(ca, &ob->ptr),
1081 false, gc_pos_alloc(c, ob), 0);
1083 spin_unlock(&ob->lock);
1085 spin_lock(&c->freelist_lock);
1086 ob->freelist = c->open_buckets_freelist;
1087 c->open_buckets_freelist = ob - c->open_buckets;
1088 c->open_buckets_nr_free++;
1089 spin_unlock(&c->freelist_lock);
1091 closure_wake_up(&c->open_buckets_wait);
1094 static struct open_bucket *bch2_open_bucket_alloc(struct bch_fs *c)
1096 struct open_bucket *ob;
1098 BUG_ON(!c->open_buckets_freelist || !c->open_buckets_nr_free);
1100 ob = c->open_buckets + c->open_buckets_freelist;
1101 c->open_buckets_freelist = ob->freelist;
1102 atomic_set(&ob->pin, 1);
1104 c->open_buckets_nr_free--;
1108 /* _only_ for allocating the journal on a new device: */
1109 long bch2_bucket_alloc_new_fs(struct bch_dev *ca)
1111 struct bucket_array *buckets;
1115 buckets = bucket_array(ca);
1117 for (b = ca->mi.first_bucket; b < ca->mi.nbuckets; b++)
1118 if (is_available_bucket(buckets->b[b].mark))
1126 static inline unsigned open_buckets_reserved(enum alloc_reserve reserve)
1132 return BTREE_NODE_RESERVE / 2;
1134 return BTREE_NODE_RESERVE;
1139 * bch_bucket_alloc - allocate a single bucket from a specific device
1141 * Returns index of bucket on success, 0 on failure
1143 int bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca,
1144 enum alloc_reserve reserve,
1145 bool may_alloc_partial,
1148 struct bucket_array *buckets;
1149 struct open_bucket *ob;
1152 spin_lock(&c->freelist_lock);
1153 if (may_alloc_partial &&
1154 ca->open_buckets_partial_nr) {
1155 int ret = ca->open_buckets_partial[--ca->open_buckets_partial_nr];
1156 c->open_buckets[ret].on_partial_list = false;
1157 spin_unlock(&c->freelist_lock);
1161 if (unlikely(c->open_buckets_nr_free <= open_buckets_reserved(reserve))) {
1163 closure_wait(&c->open_buckets_wait, cl);
1164 spin_unlock(&c->freelist_lock);
1165 trace_open_bucket_alloc_fail(ca, reserve);
1166 return OPEN_BUCKETS_EMPTY;
1169 if (likely(fifo_pop(&ca->free[RESERVE_NONE], bucket)))
1174 if (fifo_pop(&ca->free[RESERVE_BTREE], bucket))
1178 if (fifo_used(&ca->free[RESERVE_BTREE]) * 2 >=
1179 ca->free[RESERVE_BTREE].size &&
1180 fifo_pop(&ca->free[RESERVE_BTREE], bucket))
1183 case RESERVE_MOVINGGC:
1184 if (fifo_pop(&ca->free[RESERVE_MOVINGGC], bucket))
1192 closure_wait(&c->freelist_wait, cl);
1194 spin_unlock(&c->freelist_lock);
1196 trace_bucket_alloc_fail(ca, reserve);
1197 return FREELIST_EMPTY;
1199 verify_not_on_freelist(c, ca, bucket);
1201 ob = bch2_open_bucket_alloc(c);
1203 spin_lock(&ob->lock);
1204 lg_local_lock(&c->usage_lock);
1205 buckets = bucket_array(ca);
1208 ob->sectors_free = ca->mi.bucket_size;
1209 ob->ptr = (struct bch_extent_ptr) {
1210 .gen = buckets->b[bucket].mark.gen,
1211 .offset = bucket_to_sector(ca, bucket),
1215 bucket_io_clock_reset(c, ca, bucket, READ);
1216 bucket_io_clock_reset(c, ca, bucket, WRITE);
1218 lg_local_unlock(&c->usage_lock);
1219 spin_unlock(&ob->lock);
1221 spin_unlock(&c->freelist_lock);
1223 bch2_wake_allocator(ca);
1225 trace_bucket_alloc(ca, reserve);
1226 return ob - c->open_buckets;
1229 static int __dev_alloc_cmp(struct write_point *wp,
1230 unsigned l, unsigned r)
1232 return ((wp->next_alloc[l] > wp->next_alloc[r]) -
1233 (wp->next_alloc[l] < wp->next_alloc[r]));
1236 #define dev_alloc_cmp(l, r) __dev_alloc_cmp(wp, l, r)
1238 struct dev_alloc_list bch2_wp_alloc_list(struct bch_fs *c,
1239 struct write_point *wp,
1240 struct bch_devs_mask *devs)
1242 struct dev_alloc_list ret = { .nr = 0 };
1246 for_each_member_device_rcu(ca, c, i, devs)
1247 ret.devs[ret.nr++] = i;
1249 bubble_sort(ret.devs, ret.nr, dev_alloc_cmp);
1253 void bch2_wp_rescale(struct bch_fs *c, struct bch_dev *ca,
1254 struct write_point *wp)
1256 u64 *v = wp->next_alloc + ca->dev_idx;
1257 u64 free_space = dev_buckets_free(c, ca);
1258 u64 free_space_inv = free_space
1259 ? div64_u64(1ULL << 48, free_space)
1263 if (*v + free_space_inv >= *v)
1264 *v += free_space_inv;
1268 for (v = wp->next_alloc;
1269 v < wp->next_alloc + ARRAY_SIZE(wp->next_alloc); v++)
1270 *v = *v < scale ? 0 : *v - scale;
1273 static enum bucket_alloc_ret bch2_bucket_alloc_set(struct bch_fs *c,
1274 struct write_point *wp,
1275 unsigned nr_replicas,
1276 enum alloc_reserve reserve,
1277 struct bch_devs_mask *devs,
1280 enum bucket_alloc_ret ret = NO_DEVICES;
1281 struct dev_alloc_list devs_sorted;
1283 unsigned i, nr_ptrs_effective = 0;
1284 bool have_cache_dev = false;
1286 BUG_ON(nr_replicas > ARRAY_SIZE(wp->ptrs));
1288 for (i = wp->first_ptr; i < wp->nr_ptrs; i++) {
1289 ca = bch_dev_bkey_exists(c, wp->ptrs[i]->ptr.dev);
1291 nr_ptrs_effective += ca->mi.durability;
1292 have_cache_dev |= !ca->mi.durability;
1295 if (nr_ptrs_effective >= nr_replicas)
1296 return ALLOC_SUCCESS;
1299 devs_sorted = bch2_wp_alloc_list(c, wp, devs);
1301 for (i = 0; i < devs_sorted.nr; i++) {
1304 ca = rcu_dereference(c->devs[devs_sorted.devs[i]]);
1308 if (!ca->mi.durability &&
1310 wp->type != BCH_DATA_USER))
1313 ob = bch2_bucket_alloc(c, ca, reserve,
1314 wp->type == BCH_DATA_USER, cl);
1317 if (ret == OPEN_BUCKETS_EMPTY)
1322 BUG_ON(ob <= 0 || ob > U8_MAX);
1323 BUG_ON(wp->nr_ptrs >= ARRAY_SIZE(wp->ptrs));
1325 wp->ptrs[wp->nr_ptrs++] = c->open_buckets + ob;
1327 bch2_wp_rescale(c, ca, wp);
1329 nr_ptrs_effective += ca->mi.durability;
1330 have_cache_dev |= !ca->mi.durability;
1332 __clear_bit(ca->dev_idx, devs->d);
1334 if (nr_ptrs_effective >= nr_replicas) {
1335 ret = ALLOC_SUCCESS;
1341 EBUG_ON(reserve == RESERVE_MOVINGGC &&
1342 ret != ALLOC_SUCCESS &&
1343 ret != OPEN_BUCKETS_EMPTY);
1350 case FREELIST_EMPTY:
1351 case OPEN_BUCKETS_EMPTY:
1352 return cl ? -EAGAIN : -ENOSPC;
1358 /* Sector allocator */
1360 static void writepoint_drop_ptr(struct bch_fs *c,
1361 struct write_point *wp,
1364 struct open_bucket *ob = wp->ptrs[i];
1365 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
1367 BUG_ON(ca->open_buckets_partial_nr >=
1368 ARRAY_SIZE(ca->open_buckets_partial));
1370 if (wp->type == BCH_DATA_USER) {
1371 spin_lock(&c->freelist_lock);
1372 ob->on_partial_list = true;
1373 ca->open_buckets_partial[ca->open_buckets_partial_nr++] =
1374 ob - c->open_buckets;
1375 spin_unlock(&c->freelist_lock);
1377 closure_wake_up(&c->open_buckets_wait);
1378 closure_wake_up(&c->freelist_wait);
1380 bch2_open_bucket_put(c, ob);
1383 array_remove_item(wp->ptrs, wp->nr_ptrs, i);
1385 if (i < wp->first_ptr)
1389 static void writepoint_drop_ptrs(struct bch_fs *c,
1390 struct write_point *wp,
1391 u16 target, bool in_target)
1395 for (i = wp->first_ptr - 1; i >= 0; --i) {
1396 struct bch_dev *ca = bch_dev_bkey_exists(c, wp->ptrs[i]->ptr.dev);
1398 if (dev_in_target(ca, target) == in_target)
1399 writepoint_drop_ptr(c, wp, i);
1403 static void verify_not_stale(struct bch_fs *c, const struct write_point *wp)
1405 #ifdef CONFIG_BCACHEFS_DEBUG
1406 struct open_bucket *ob;
1409 writepoint_for_each_ptr_all(wp, ob, i) {
1410 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
1412 BUG_ON(ptr_stale(ca, &ob->ptr));
1417 static int open_bucket_add_buckets(struct bch_fs *c,
1419 struct write_point *wp,
1420 struct bch_devs_list *devs_have,
1421 unsigned nr_replicas,
1422 enum alloc_reserve reserve,
1425 struct bch_devs_mask devs = c->rw_devs[wp->type];
1426 struct open_bucket *ob;
1429 /* Don't allocate from devices we already have pointers to: */
1430 for (i = 0; i < devs_have->nr; i++)
1431 __clear_bit(devs_have->devs[i], devs.d);
1433 writepoint_for_each_ptr_all(wp, ob, i)
1434 __clear_bit(ob->ptr.dev, devs.d);
1437 const struct bch_devs_mask *t;
1440 t = bch2_target_to_mask(c, target);
1442 bitmap_and(devs.d, devs.d, t->d, BCH_SB_MEMBERS_MAX);
1446 return bch2_bucket_alloc_set(c, wp, nr_replicas, reserve, &devs, cl);
1449 static struct write_point *__writepoint_find(struct hlist_head *head,
1450 unsigned long write_point)
1452 struct write_point *wp;
1454 hlist_for_each_entry_rcu(wp, head, node)
1455 if (wp->write_point == write_point)
1461 static struct hlist_head *writepoint_hash(struct bch_fs *c,
1462 unsigned long write_point)
1465 hash_long(write_point, ilog2(ARRAY_SIZE(c->write_points_hash)));
1467 return &c->write_points_hash[hash];
1470 static struct write_point *writepoint_find(struct bch_fs *c,
1471 unsigned long write_point)
1473 struct write_point *wp, *oldest;
1474 struct hlist_head *head;
1476 if (!(write_point & 1UL)) {
1477 wp = (struct write_point *) write_point;
1478 mutex_lock(&wp->lock);
1482 head = writepoint_hash(c, write_point);
1484 wp = __writepoint_find(head, write_point);
1487 mutex_lock(&wp->lock);
1488 if (wp->write_point == write_point)
1490 mutex_unlock(&wp->lock);
1495 for (wp = c->write_points;
1496 wp < c->write_points + ARRAY_SIZE(c->write_points);
1498 if (!oldest || time_before64(wp->last_used, oldest->last_used))
1501 mutex_lock(&oldest->lock);
1502 mutex_lock(&c->write_points_hash_lock);
1503 wp = __writepoint_find(head, write_point);
1504 if (wp && wp != oldest) {
1505 mutex_unlock(&c->write_points_hash_lock);
1506 mutex_unlock(&oldest->lock);
1511 hlist_del_rcu(&wp->node);
1512 wp->write_point = write_point;
1513 hlist_add_head_rcu(&wp->node, head);
1514 mutex_unlock(&c->write_points_hash_lock);
1516 wp->last_used = sched_clock();
1521 * Get us an open_bucket we can allocate from, return with it locked:
1523 struct write_point *bch2_alloc_sectors_start(struct bch_fs *c,
1525 struct write_point_specifier write_point,
1526 struct bch_devs_list *devs_have,
1527 unsigned nr_replicas,
1528 unsigned nr_replicas_required,
1529 enum alloc_reserve reserve,
1533 struct write_point *wp;
1534 struct open_bucket *ob;
1536 unsigned nr_ptrs_have, nr_ptrs_effective;
1537 int ret, i, cache_idx = -1;
1539 BUG_ON(!nr_replicas || !nr_replicas_required);
1541 wp = writepoint_find(c, write_point.v);
1545 /* does writepoint have ptrs we can't use? */
1546 writepoint_for_each_ptr(wp, ob, i)
1547 if (bch2_dev_list_has_dev(*devs_have, ob->ptr.dev)) {
1548 swap(wp->ptrs[i], wp->ptrs[wp->first_ptr]);
1552 nr_ptrs_have = wp->first_ptr;
1554 /* does writepoint have ptrs we don't want to use? */
1556 writepoint_for_each_ptr(wp, ob, i)
1557 if (!dev_idx_in_target(c, ob->ptr.dev, target)) {
1558 swap(wp->ptrs[i], wp->ptrs[wp->first_ptr]);
1562 if (flags & BCH_WRITE_ONLY_SPECIFIED_DEVS) {
1563 ret = open_bucket_add_buckets(c, target, wp, devs_have,
1564 nr_replicas, reserve, cl);
1566 ret = open_bucket_add_buckets(c, target, wp, devs_have,
1567 nr_replicas, reserve, NULL);
1571 wp->first_ptr = nr_ptrs_have;
1573 ret = open_bucket_add_buckets(c, 0, wp, devs_have,
1574 nr_replicas, reserve, cl);
1577 if (ret && ret != -EROFS)
1580 /* check for more than one cache: */
1581 for (i = wp->nr_ptrs - 1; i >= wp->first_ptr; --i) {
1582 ca = bch_dev_bkey_exists(c, wp->ptrs[i]->ptr.dev);
1584 if (ca->mi.durability)
1588 * if we ended up with more than one cache device, prefer the
1589 * one in the target we want:
1591 if (cache_idx >= 0) {
1592 if (!dev_in_target(ca, target)) {
1593 writepoint_drop_ptr(c, wp, i);
1595 writepoint_drop_ptr(c, wp, cache_idx);
1603 /* we might have more effective replicas than required: */
1604 nr_ptrs_effective = 0;
1605 writepoint_for_each_ptr(wp, ob, i) {
1606 ca = bch_dev_bkey_exists(c, ob->ptr.dev);
1607 nr_ptrs_effective += ca->mi.durability;
1610 if (ret == -EROFS &&
1611 nr_ptrs_effective >= nr_replicas_required)
1617 if (nr_ptrs_effective > nr_replicas) {
1618 writepoint_for_each_ptr(wp, ob, i) {
1619 ca = bch_dev_bkey_exists(c, ob->ptr.dev);
1621 if (ca->mi.durability &&
1622 ca->mi.durability <= nr_ptrs_effective - nr_replicas &&
1623 !dev_idx_in_target(c, ob->ptr.dev, target)) {
1624 swap(wp->ptrs[i], wp->ptrs[wp->first_ptr]);
1626 nr_ptrs_effective -= ca->mi.durability;
1631 if (nr_ptrs_effective > nr_replicas) {
1632 writepoint_for_each_ptr(wp, ob, i) {
1633 ca = bch_dev_bkey_exists(c, ob->ptr.dev);
1635 if (ca->mi.durability &&
1636 ca->mi.durability <= nr_ptrs_effective - nr_replicas) {
1637 swap(wp->ptrs[i], wp->ptrs[wp->first_ptr]);
1639 nr_ptrs_effective -= ca->mi.durability;
1644 /* Remove pointers we don't want to use: */
1646 writepoint_drop_ptrs(c, wp, target, false);
1648 BUG_ON(wp->first_ptr >= wp->nr_ptrs);
1649 BUG_ON(nr_ptrs_effective < nr_replicas_required);
1651 wp->sectors_free = UINT_MAX;
1653 writepoint_for_each_ptr(wp, ob, i)
1654 wp->sectors_free = min(wp->sectors_free, ob->sectors_free);
1656 BUG_ON(!wp->sectors_free || wp->sectors_free == UINT_MAX);
1658 verify_not_stale(c, wp);
1662 mutex_unlock(&wp->lock);
1663 return ERR_PTR(ret);
1667 * Append pointers to the space we just allocated to @k, and mark @sectors space
1668 * as allocated out of @ob
1670 void bch2_alloc_sectors_append_ptrs(struct bch_fs *c, struct write_point *wp,
1671 struct bkey_i_extent *e, unsigned sectors)
1673 struct open_bucket *ob;
1676 BUG_ON(sectors > wp->sectors_free);
1677 wp->sectors_free -= sectors;
1679 writepoint_for_each_ptr(wp, ob, i) {
1680 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
1681 struct bch_extent_ptr tmp = ob->ptr;
1683 EBUG_ON(bch2_extent_has_device(extent_i_to_s_c(e), ob->ptr.dev));
1685 tmp.cached = bkey_extent_is_cached(&e->k) ||
1686 (!ca->mi.durability && wp->type == BCH_DATA_USER);
1688 tmp.offset += ca->mi.bucket_size - ob->sectors_free;
1689 extent_ptr_append(e, tmp);
1691 BUG_ON(sectors > ob->sectors_free);
1692 ob->sectors_free -= sectors;
1697 * Append pointers to the space we just allocated to @k, and mark @sectors space
1698 * as allocated out of @ob
1700 void bch2_alloc_sectors_done(struct bch_fs *c, struct write_point *wp)
1704 for (i = wp->nr_ptrs - 1; i >= 0; --i) {
1705 struct open_bucket *ob = wp->ptrs[i];
1707 if (!ob->sectors_free) {
1708 array_remove_item(wp->ptrs, wp->nr_ptrs, i);
1709 bch2_open_bucket_put(c, ob);
1713 mutex_unlock(&wp->lock);
1716 /* Startup/shutdown (ro/rw): */
1718 void bch2_recalc_capacity(struct bch_fs *c)
1721 u64 total_capacity, capacity = 0, reserved_sectors = 0;
1722 unsigned long ra_pages = 0;
1725 lockdep_assert_held(&c->state_lock);
1727 for_each_online_member(ca, c, i) {
1728 struct backing_dev_info *bdi = ca->disk_sb.bdev->bd_bdi;
1730 ra_pages += bdi->ra_pages;
1733 bch2_set_ra_pages(c, ra_pages);
1735 for_each_rw_member(ca, c, i) {
1739 * We need to reserve buckets (from the number
1740 * of currently available buckets) against
1741 * foreground writes so that mainly copygc can
1742 * make forward progress.
1744 * We need enough to refill the various reserves
1745 * from scratch - copygc will use its entire
1746 * reserve all at once, then run against when
1747 * its reserve is refilled (from the formerly
1748 * available buckets).
1750 * This reserve is just used when considering if
1751 * allocations for foreground writes must wait -
1752 * not -ENOSPC calculations.
1754 for (j = 0; j < RESERVE_NONE; j++)
1755 reserve += ca->free[j].size;
1757 reserve += ca->free_inc.size;
1759 reserve += ARRAY_SIZE(c->write_points);
1761 reserve += 1; /* btree write point */
1763 reserved_sectors += bucket_to_sector(ca, reserve);
1765 capacity += bucket_to_sector(ca, ca->mi.nbuckets -
1766 ca->mi.first_bucket);
1769 total_capacity = capacity;
1771 capacity *= (100 - c->opts.gc_reserve_percent);
1772 capacity = div64_u64(capacity, 100);
1774 BUG_ON(reserved_sectors > total_capacity);
1776 capacity = min(capacity, total_capacity - reserved_sectors);
1778 c->capacity = capacity;
1781 bch2_io_timer_add(&c->io_clock[READ],
1782 &c->bucket_clock[READ].rescale);
1783 bch2_io_timer_add(&c->io_clock[WRITE],
1784 &c->bucket_clock[WRITE].rescale);
1786 bch2_io_timer_del(&c->io_clock[READ],
1787 &c->bucket_clock[READ].rescale);
1788 bch2_io_timer_del(&c->io_clock[WRITE],
1789 &c->bucket_clock[WRITE].rescale);
1792 /* Wake up case someone was waiting for buckets */
1793 closure_wake_up(&c->freelist_wait);
1796 static void bch2_stop_write_point(struct bch_fs *c, struct bch_dev *ca,
1797 struct write_point *wp)
1799 struct bch_devs_mask not_self;
1801 bitmap_complement(not_self.d, ca->self.d, BCH_SB_MEMBERS_MAX);
1803 mutex_lock(&wp->lock);
1804 wp->first_ptr = wp->nr_ptrs;
1805 writepoint_drop_ptrs(c, wp, dev_to_target(ca->dev_idx), true);
1806 mutex_unlock(&wp->lock);
1809 static bool bch2_dev_has_open_write_point(struct bch_fs *c, struct bch_dev *ca)
1811 struct open_bucket *ob;
1814 for (ob = c->open_buckets;
1815 ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
1817 spin_lock(&ob->lock);
1818 if (ob->valid && !ob->on_partial_list &&
1819 ob->ptr.dev == ca->dev_idx)
1821 spin_unlock(&ob->lock);
1827 /* device goes ro: */
1828 void bch2_dev_allocator_remove(struct bch_fs *c, struct bch_dev *ca)
1832 BUG_ON(ca->alloc_thread);
1834 /* First, remove device from allocation groups: */
1836 for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
1837 clear_bit(ca->dev_idx, c->rw_devs[i].d);
1840 * Capacity is calculated based off of devices in allocation groups:
1842 bch2_recalc_capacity(c);
1844 /* Next, close write points that point to this device... */
1845 for (i = 0; i < ARRAY_SIZE(c->write_points); i++)
1846 bch2_stop_write_point(c, ca, &c->write_points[i]);
1848 bch2_stop_write_point(c, ca, &ca->copygc_write_point);
1849 bch2_stop_write_point(c, ca, &c->rebalance_write_point);
1850 bch2_stop_write_point(c, ca, &c->btree_write_point);
1852 mutex_lock(&c->btree_reserve_cache_lock);
1853 while (c->btree_reserve_cache_nr) {
1854 struct btree_alloc *a =
1855 &c->btree_reserve_cache[--c->btree_reserve_cache_nr];
1857 bch2_open_bucket_put_refs(c, &a->ob.nr, a->ob.refs);
1859 mutex_unlock(&c->btree_reserve_cache_lock);
1862 * Wake up threads that were blocked on allocation, so they can notice
1863 * the device can no longer be removed and the capacity has changed:
1865 closure_wake_up(&c->freelist_wait);
1868 * journal_res_get() can block waiting for free space in the journal -
1869 * it needs to notice there may not be devices to allocate from anymore:
1871 wake_up(&c->journal.wait);
1873 /* Now wait for any in flight writes: */
1875 closure_wait_event(&c->open_buckets_wait,
1876 !bch2_dev_has_open_write_point(c, ca));
1879 /* device goes rw: */
1880 void bch2_dev_allocator_add(struct bch_fs *c, struct bch_dev *ca)
1884 for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
1885 if (ca->mi.data_allowed & (1 << i))
1886 set_bit(ca->dev_idx, c->rw_devs[i].d);
1889 /* stop allocator thread: */
1890 void bch2_dev_allocator_stop(struct bch_dev *ca)
1892 struct task_struct *p = ca->alloc_thread;
1894 ca->alloc_thread = NULL;
1897 * We need an rcu barrier between setting ca->alloc_thread = NULL and
1898 * the thread shutting down to avoid bch2_wake_allocator() racing:
1900 * XXX: it would be better to have the rcu barrier be asynchronous
1901 * instead of blocking us here
1911 /* start allocator thread: */
1912 int bch2_dev_allocator_start(struct bch_dev *ca)
1914 struct task_struct *p;
1917 * allocator thread already started?
1919 if (ca->alloc_thread)
1922 p = kthread_create(bch2_allocator_thread, ca,
1923 "bch_alloc[%s]", ca->name);
1928 ca->alloc_thread = p;
1933 static void allocator_start_issue_discards(struct bch_fs *c)
1939 for_each_rw_member(ca, c, dev_iter) {
1942 fifo_for_each_entry(bu, &ca->free_inc, i) {
1943 if (done == ca->nr_invalidated)
1946 blkdev_issue_discard(ca->disk_sb.bdev,
1947 bucket_to_sector(ca, bu),
1948 ca->mi.bucket_size, GFP_NOIO, 0);
1954 static int __bch2_fs_allocator_start(struct bch_fs *c)
1959 u64 journal_seq = 0;
1960 bool invalidating_data = false;
1963 if (test_bit(BCH_FS_GC_FAILURE, &c->flags))
1966 /* Scan for buckets that are already invalidated: */
1967 for_each_rw_member(ca, c, dev_iter) {
1968 struct btree_iter iter;
1969 struct bucket_mark m;
1972 for_each_btree_key(&iter, c, BTREE_ID_ALLOC, POS(ca->dev_idx, 0), 0, k) {
1973 if (k.k->type != BCH_ALLOC)
1977 m = READ_ONCE(bucket(ca, bu)->mark);
1979 if (!is_available_bucket(m) || m.cached_sectors)
1982 bch2_mark_alloc_bucket(c, ca, bu, true,
1983 gc_pos_alloc(c, NULL),
1984 BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE|
1985 BCH_BUCKET_MARK_GC_LOCK_HELD);
1987 fifo_push(&ca->free_inc, bu);
1988 ca->nr_invalidated++;
1990 if (fifo_full(&ca->free_inc))
1993 bch2_btree_iter_unlock(&iter);
1996 /* did we find enough buckets? */
1997 for_each_rw_member(ca, c, dev_iter)
1998 if (fifo_used(&ca->free_inc) < ca->free[RESERVE_BTREE].size) {
1999 percpu_ref_put(&ca->io_ref);
2005 pr_debug("did not find enough empty buckets; issuing discards");
2007 /* clear out free_inc - find_reclaimable_buckets() assumes it's empty */
2008 for_each_rw_member(ca, c, dev_iter)
2009 discard_invalidated_buckets(c, ca);
2011 pr_debug("scanning for reclaimable buckets");
2013 for_each_rw_member(ca, c, dev_iter) {
2014 BUG_ON(!fifo_empty(&ca->free_inc));
2015 ca->free_inc.front = ca->free_inc.back = 0;
2017 find_reclaimable_buckets(c, ca);
2018 sort_free_inc(c, ca);
2020 invalidating_data |= ca->allocator_invalidating_data;
2022 fifo_for_each_entry(bu, &ca->free_inc, i)
2023 if (!fifo_push(&ca->free[RESERVE_BTREE], bu))
2027 pr_debug("done scanning for reclaimable buckets");
2030 * We're moving buckets to freelists _before_ they've been marked as
2031 * invalidated on disk - we have to so that we can allocate new btree
2032 * nodes to mark them as invalidated on disk.
2034 * However, we can't _write_ to any of these buckets yet - they might
2035 * have cached data in them, which is live until they're marked as
2036 * invalidated on disk:
2038 if (invalidating_data) {
2039 pr_debug("invalidating existing data");
2040 set_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags);
2042 pr_debug("issuing discards");
2043 allocator_start_issue_discards(c);
2047 * XXX: it's possible for this to deadlock waiting on journal reclaim,
2048 * since we're holding btree writes. What then?
2051 for_each_rw_member(ca, c, dev_iter) {
2052 ret = bch2_invalidate_free_inc(c, ca, &journal_seq,
2053 ca->free[RESERVE_BTREE].size);
2055 percpu_ref_put(&ca->io_ref);
2060 if (invalidating_data) {
2061 pr_debug("flushing journal");
2063 ret = bch2_journal_flush_seq(&c->journal, journal_seq);
2067 pr_debug("issuing discards");
2068 allocator_start_issue_discards(c);
2071 for_each_rw_member(ca, c, dev_iter)
2072 while (ca->nr_invalidated) {
2073 BUG_ON(!fifo_pop(&ca->free_inc, bu));
2074 ca->nr_invalidated--;
2077 set_bit(BCH_FS_ALLOCATOR_STARTED, &c->flags);
2079 /* now flush dirty btree nodes: */
2080 if (invalidating_data) {
2081 struct bucket_table *tbl;
2082 struct rhash_head *pos;
2085 size_t nr_pending_updates;
2087 clear_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags);
2089 pr_debug("flushing dirty btree nodes");
2092 flush_updates = false;
2093 nr_pending_updates = bch2_btree_interior_updates_nr_pending(c);
2097 for_each_cached_btree(b, c, tbl, i, pos)
2098 if (btree_node_dirty(b) && (!b->written || b->level)) {
2099 if (btree_node_may_write(b)) {
2101 six_lock_read(&b->lock);
2102 bch2_btree_node_write(c, b, SIX_LOCK_read);
2103 six_unlock_read(&b->lock);
2106 flush_updates = true;
2112 * This is ugly, but it's needed to flush btree node writes
2113 * without spinning...
2115 if (flush_updates) {
2116 closure_wait_event(&c->btree_interior_update_wait,
2117 bch2_btree_interior_updates_nr_pending(c) <
2118 nr_pending_updates);
2126 int bch2_fs_allocator_start(struct bch_fs *c)
2132 down_read(&c->gc_lock);
2133 ret = __bch2_fs_allocator_start(c);
2134 up_read(&c->gc_lock);
2139 for_each_rw_member(ca, c, i) {
2140 ret = bch2_dev_allocator_start(ca);
2142 percpu_ref_put(&ca->io_ref);
2147 return bch2_alloc_write(c);
2150 void bch2_fs_allocator_init(struct bch_fs *c)
2152 struct open_bucket *ob;
2153 struct write_point *wp;
2155 mutex_init(&c->write_points_hash_lock);
2156 spin_lock_init(&c->freelist_lock);
2157 bch2_bucket_clock_init(c, READ);
2158 bch2_bucket_clock_init(c, WRITE);
2160 /* open bucket 0 is a sentinal NULL: */
2161 spin_lock_init(&c->open_buckets[0].lock);
2163 for (ob = c->open_buckets + 1;
2164 ob < c->open_buckets + ARRAY_SIZE(c->open_buckets); ob++) {
2165 spin_lock_init(&ob->lock);
2166 c->open_buckets_nr_free++;
2168 ob->freelist = c->open_buckets_freelist;
2169 c->open_buckets_freelist = ob - c->open_buckets;
2172 writepoint_init(&c->btree_write_point, BCH_DATA_BTREE);
2173 writepoint_init(&c->rebalance_write_point, BCH_DATA_USER);
2175 for (wp = c->write_points;
2176 wp < c->write_points + ARRAY_SIZE(c->write_points); wp++) {
2177 writepoint_init(wp, BCH_DATA_USER);
2179 wp->last_used = sched_clock();
2180 wp->write_point = (unsigned long) wp;
2181 hlist_add_head_rcu(&wp->node, writepoint_hash(c, wp->write_point));
2184 c->pd_controllers_update_seconds = 5;
2185 INIT_DELAYED_WORK(&c->pd_controllers_update, pd_controllers_update);