2 * Primary bucket allocation code
4 * Copyright 2012 Google, Inc.
6 * Allocation in bcache is done in terms of buckets:
8 * Each bucket has associated an 8 bit gen; this gen corresponds to the gen in
9 * btree pointers - they must match for the pointer to be considered valid.
11 * Thus (assuming a bucket has no dirty data or metadata in it) we can reuse a
12 * bucket simply by incrementing its gen.
14 * The gens (along with the priorities; it's really the gens are important but
15 * the code is named as if it's the priorities) are written in an arbitrary list
16 * of buckets on disk, with a pointer to them in the journal header.
18 * When we invalidate a bucket, we have to write its new gen to disk and wait
19 * for that write to complete before we use it - otherwise after a crash we
20 * could have pointers that appeared to be good but pointed to data that had
23 * Since the gens and priorities are all stored contiguously on disk, we can
24 * batch this up: We fill up the free_inc list with freshly invalidated buckets,
25 * call prio_write(), and when prio_write() finishes we pull buckets off the
26 * free_inc list and optionally discard them.
28 * free_inc isn't the only freelist - if it was, we'd often have to sleep while
29 * priorities and gens were being written before we could allocate. c->free is a
30 * smaller freelist, and buckets on that list are always ready to be used.
32 * If we've got discards enabled, that happens when a bucket moves from the
33 * free_inc list to the free list.
35 * It's important to ensure that gens don't wrap around - with respect to
36 * either the oldest gen in the btree or the gen on disk. This is quite
37 * difficult to do in practice, but we explicitly guard against it anyways - if
38 * a bucket is in danger of wrapping around we simply skip invalidating it that
39 * time around, and we garbage collect or rewrite the priorities sooner than we
40 * would have otherwise.
42 * bch2_bucket_alloc() allocates a single bucket from a specific device.
44 * bch2_bucket_alloc_set() allocates one or more buckets from different devices
45 * in a given filesystem.
47 * invalidate_buckets() drives all the processes described above. It's called
48 * from bch2_bucket_alloc() and a few other places that need to make sure free
51 * invalidate_buckets_(lru|fifo)() find buckets that are available to be
52 * invalidated, and then invalidate them and stick them on the free_inc list -
53 * in either lru or fifo order.
58 #include "btree_cache.h"
60 #include "btree_update.h"
61 #include "btree_update_interior.h"
67 #include "disk_groups.h"
72 #include "journal_io.h"
75 #include <linux/blkdev.h>
76 #include <linux/kthread.h>
77 #include <linux/math64.h>
78 #include <linux/random.h>
79 #include <linux/rculist.h>
80 #include <linux/rcupdate.h>
81 #include <linux/sched/task.h>
82 #include <linux/sort.h>
83 #include <trace/events/bcachefs.h>
85 static void bch2_recalc_oldest_io(struct bch_fs *, struct bch_dev *, int);
87 /* Ratelimiting/PD controllers */
89 static void pd_controllers_update(struct work_struct *work)
91 struct bch_fs *c = container_of(to_delayed_work(work),
93 pd_controllers_update);
97 for_each_member_device(ca, c, i) {
98 struct bch_dev_usage stats = bch2_dev_usage_read(c, ca);
100 u64 free = bucket_to_sector(ca,
101 __dev_buckets_free(ca, stats)) << 9;
103 * Bytes of internal fragmentation, which can be
104 * reclaimed by copy GC
106 s64 fragmented = (bucket_to_sector(ca,
107 stats.buckets[BCH_DATA_USER] +
108 stats.buckets[BCH_DATA_CACHED]) -
109 (stats.sectors[BCH_DATA_USER] +
110 stats.sectors[BCH_DATA_CACHED])) << 9;
112 fragmented = max(0LL, fragmented);
114 bch2_pd_controller_update(&ca->copygc_pd,
115 free, fragmented, -1);
118 schedule_delayed_work(&c->pd_controllers_update,
119 c->pd_controllers_update_seconds * HZ);
122 /* Persistent alloc info: */
124 static unsigned bch_alloc_val_u64s(const struct bch_alloc *a)
126 unsigned bytes = offsetof(struct bch_alloc, data);
128 if (a->fields & (1 << BCH_ALLOC_FIELD_READ_TIME))
130 if (a->fields & (1 << BCH_ALLOC_FIELD_WRITE_TIME))
133 return DIV_ROUND_UP(bytes, sizeof(u64));
136 const char *bch2_alloc_invalid(const struct bch_fs *c, struct bkey_s_c k)
138 if (k.k->p.inode >= c->sb.nr_devices ||
139 !c->devs[k.k->p.inode])
140 return "invalid device";
144 struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k);
146 if (bch_alloc_val_u64s(a.v) != bkey_val_u64s(a.k))
147 return "incorrect value size";
151 return "invalid type";
157 void bch2_alloc_to_text(struct bch_fs *c, char *buf,
158 size_t size, struct bkey_s_c k)
168 static inline unsigned get_alloc_field(const u8 **p, unsigned bytes)
177 v = le16_to_cpup((void *) *p);
180 v = le32_to_cpup((void *) *p);
190 static inline void put_alloc_field(u8 **p, unsigned bytes, unsigned v)
197 *((__le16 *) *p) = cpu_to_le16(v);
200 *((__le32 *) *p) = cpu_to_le32(v);
209 static void bch2_alloc_read_key(struct bch_fs *c, struct bkey_s_c k)
212 struct bkey_s_c_alloc a;
213 struct bucket_mark new;
217 if (k.k->type != BCH_ALLOC)
220 a = bkey_s_c_to_alloc(k);
221 ca = bch_dev_bkey_exists(c, a.k->p.inode);
223 if (a.k->p.offset >= ca->mi.nbuckets)
226 lg_local_lock(&c->usage_lock);
228 g = bucket(ca, a.k->p.offset);
229 bucket_cmpxchg(g, new, ({
235 if (a.v->fields & (1 << BCH_ALLOC_FIELD_READ_TIME))
236 g->io_time[READ] = get_alloc_field(&d, 2);
237 if (a.v->fields & (1 << BCH_ALLOC_FIELD_WRITE_TIME))
238 g->io_time[WRITE] = get_alloc_field(&d, 2);
240 lg_local_unlock(&c->usage_lock);
243 int bch2_alloc_read(struct bch_fs *c, struct list_head *journal_replay_list)
245 struct journal_replay *r;
246 struct btree_iter iter;
252 for_each_btree_key(&iter, c, BTREE_ID_ALLOC, POS_MIN, 0, k) {
253 bch2_alloc_read_key(c, k);
254 bch2_btree_iter_cond_resched(&iter);
257 ret = bch2_btree_iter_unlock(&iter);
261 list_for_each_entry(r, journal_replay_list, list) {
262 struct bkey_i *k, *n;
263 struct jset_entry *entry;
265 for_each_jset_key(k, n, entry, &r->j)
266 if (entry->btree_id == BTREE_ID_ALLOC)
267 bch2_alloc_read_key(c, bkey_i_to_s_c(k));
270 mutex_lock(&c->bucket_clock[READ].lock);
271 for_each_member_device(ca, c, i) {
272 down_read(&ca->bucket_lock);
273 bch2_recalc_oldest_io(c, ca, READ);
274 up_read(&ca->bucket_lock);
276 mutex_unlock(&c->bucket_clock[READ].lock);
278 mutex_lock(&c->bucket_clock[WRITE].lock);
279 for_each_member_device(ca, c, i) {
280 down_read(&ca->bucket_lock);
281 bch2_recalc_oldest_io(c, ca, WRITE);
282 up_read(&ca->bucket_lock);
284 mutex_unlock(&c->bucket_clock[WRITE].lock);
289 static int __bch2_alloc_write_key(struct bch_fs *c, struct bch_dev *ca,
290 size_t b, struct btree_iter *iter,
293 struct bucket_mark m;
294 __BKEY_PADDED(k, DIV_ROUND_UP(sizeof(struct bch_alloc), 8)) alloc_key;
296 struct bkey_i_alloc *a;
300 bch2_btree_iter_set_pos(iter, POS(ca->dev_idx, b));
303 ret = btree_iter_err(bch2_btree_iter_peek_slot(iter));
307 lg_local_lock(&c->usage_lock);
310 /* read mark under btree node lock: */
311 m = READ_ONCE(g->mark);
312 a = bkey_alloc_init(&alloc_key.k);
316 set_bkey_val_u64s(&a->k, bch_alloc_val_u64s(&a->v));
319 if (a->v.fields & (1 << BCH_ALLOC_FIELD_READ_TIME))
320 put_alloc_field(&d, 2, g->io_time[READ]);
321 if (a->v.fields & (1 << BCH_ALLOC_FIELD_WRITE_TIME))
322 put_alloc_field(&d, 2, g->io_time[WRITE]);
323 lg_local_unlock(&c->usage_lock);
325 ret = bch2_btree_insert_at(c, NULL, NULL, journal_seq,
328 BTREE_INSERT_USE_RESERVE|
329 BTREE_INSERT_USE_ALLOC_RESERVE|
331 BTREE_INSERT_ENTRY(iter, &a->k_i));
332 bch2_btree_iter_cond_resched(iter);
333 } while (ret == -EINTR);
338 int bch2_alloc_replay_key(struct bch_fs *c, struct bpos pos)
341 struct btree_iter iter;
344 if (pos.inode >= c->sb.nr_devices || !c->devs[pos.inode])
347 ca = bch_dev_bkey_exists(c, pos.inode);
349 if (pos.offset >= ca->mi.nbuckets)
352 bch2_btree_iter_init(&iter, c, BTREE_ID_ALLOC, POS_MIN,
353 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
355 ret = __bch2_alloc_write_key(c, ca, pos.offset, &iter, NULL);
356 bch2_btree_iter_unlock(&iter);
360 int bch2_alloc_write(struct bch_fs *c)
366 for_each_rw_member(ca, c, i) {
367 struct btree_iter iter;
368 unsigned long bucket;
370 bch2_btree_iter_init(&iter, c, BTREE_ID_ALLOC, POS_MIN,
371 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
373 down_read(&ca->bucket_lock);
374 for_each_set_bit(bucket, ca->buckets_dirty, ca->mi.nbuckets) {
375 ret = __bch2_alloc_write_key(c, ca, bucket, &iter, NULL);
379 clear_bit(bucket, ca->buckets_dirty);
381 up_read(&ca->bucket_lock);
382 bch2_btree_iter_unlock(&iter);
385 percpu_ref_put(&ca->io_ref);
393 /* Bucket IO clocks: */
395 static void bch2_recalc_oldest_io(struct bch_fs *c, struct bch_dev *ca, int rw)
397 struct bucket_clock *clock = &c->bucket_clock[rw];
398 struct bucket_array *buckets = bucket_array(ca);
403 lockdep_assert_held(&c->bucket_clock[rw].lock);
405 /* Recalculate max_last_io for this device: */
406 for_each_bucket(g, buckets)
407 max_last_io = max(max_last_io, bucket_last_io(c, g, rw));
409 ca->max_last_bucket_io[rw] = max_last_io;
411 /* Recalculate global max_last_io: */
414 for_each_member_device(ca, c, i)
415 max_last_io = max(max_last_io, ca->max_last_bucket_io[rw]);
417 clock->max_last_io = max_last_io;
420 static void bch2_rescale_bucket_io_times(struct bch_fs *c, int rw)
422 struct bucket_clock *clock = &c->bucket_clock[rw];
423 struct bucket_array *buckets;
428 trace_rescale_prios(c);
430 for_each_member_device(ca, c, i) {
431 down_read(&ca->bucket_lock);
432 buckets = bucket_array(ca);
434 for_each_bucket(g, buckets)
435 g->io_time[rw] = clock->hand -
436 bucket_last_io(c, g, rw) / 2;
438 bch2_recalc_oldest_io(c, ca, rw);
440 up_read(&ca->bucket_lock);
444 static void bch2_inc_clock_hand(struct io_timer *timer)
446 struct bucket_clock *clock = container_of(timer,
447 struct bucket_clock, rescale);
448 struct bch_fs *c = container_of(clock,
449 struct bch_fs, bucket_clock[clock->rw]);
454 mutex_lock(&clock->lock);
456 /* if clock cannot be advanced more, rescale prio */
457 if (clock->max_last_io >= U16_MAX - 2)
458 bch2_rescale_bucket_io_times(c, clock->rw);
460 BUG_ON(clock->max_last_io >= U16_MAX - 2);
462 for_each_member_device(ca, c, i)
463 ca->max_last_bucket_io[clock->rw]++;
464 clock->max_last_io++;
467 mutex_unlock(&clock->lock);
469 capacity = READ_ONCE(c->capacity);
475 * we only increment when 0.1% of the filesystem capacity has been read
476 * or written too, this determines if it's time
478 * XXX: we shouldn't really be going off of the capacity of devices in
479 * RW mode (that will be 0 when we're RO, yet we can still service
482 timer->expire += capacity >> 10;
484 bch2_io_timer_add(&c->io_clock[clock->rw], timer);
487 static void bch2_bucket_clock_init(struct bch_fs *c, int rw)
489 struct bucket_clock *clock = &c->bucket_clock[rw];
493 clock->rescale.fn = bch2_inc_clock_hand;
494 clock->rescale.expire = c->capacity >> 10;
495 mutex_init(&clock->lock);
498 /* Background allocator thread: */
501 * Scans for buckets to be invalidated, invalidates them, rewrites prios/gens
502 * (marking them as invalidated on disk), then optionally issues discard
503 * commands to the newly free buckets, then puts them on the various freelists.
506 static void verify_not_on_freelist(struct bch_fs *c, struct bch_dev *ca,
509 if (expensive_debug_checks(c) &&
510 test_bit(BCH_FS_ALLOCATOR_STARTED, &c->flags)) {
515 for (j = 0; j < RESERVE_NR; j++)
516 fifo_for_each_entry(i, &ca->free[j], iter)
518 fifo_for_each_entry(i, &ca->free_inc, iter)
523 #define BUCKET_GC_GEN_MAX 96U
526 * wait_buckets_available - wait on reclaimable buckets
528 * If there aren't enough available buckets to fill up free_inc, wait until
531 static int wait_buckets_available(struct bch_fs *c, struct bch_dev *ca)
533 unsigned long gc_count = c->gc_count;
537 set_current_state(TASK_INTERRUPTIBLE);
538 if (kthread_should_stop()) {
543 if (gc_count != c->gc_count)
544 ca->inc_gen_really_needs_gc = 0;
546 if ((ssize_t) (dev_buckets_available(c, ca) -
547 ca->inc_gen_really_needs_gc) >=
548 (ssize_t) fifo_free(&ca->free_inc))
551 up_read(&c->gc_lock);
554 down_read(&c->gc_lock);
557 __set_current_state(TASK_RUNNING);
561 static bool bch2_can_invalidate_bucket(struct bch_dev *ca,
563 struct bucket_mark mark)
567 if (!is_available_bucket(mark))
570 gc_gen = bucket_gc_gen(ca, bucket);
572 if (gc_gen >= BUCKET_GC_GEN_MAX / 2)
573 ca->inc_gen_needs_gc++;
575 if (gc_gen >= BUCKET_GC_GEN_MAX)
576 ca->inc_gen_really_needs_gc++;
578 return gc_gen < BUCKET_GC_GEN_MAX;
581 static void bch2_invalidate_one_bucket(struct bch_fs *c, struct bch_dev *ca,
584 struct bucket_mark m;
586 spin_lock(&c->freelist_lock);
587 if (!bch2_invalidate_bucket(c, ca, bucket, &m)) {
588 spin_unlock(&c->freelist_lock);
592 verify_not_on_freelist(c, ca, bucket);
593 BUG_ON(!fifo_push(&ca->free_inc, bucket));
594 spin_unlock(&c->freelist_lock);
597 bucket_io_clock_reset(c, ca, bucket, READ);
598 bucket_io_clock_reset(c, ca, bucket, WRITE);
600 if (m.cached_sectors) {
601 ca->allocator_invalidating_data = true;
602 } else if (m.journal_seq_valid) {
603 u64 journal_seq = atomic64_read(&c->journal.seq);
604 u64 bucket_seq = journal_seq;
606 bucket_seq &= ~((u64) U16_MAX);
607 bucket_seq |= m.journal_seq;
609 if (bucket_seq > journal_seq)
610 bucket_seq -= 1 << 16;
612 ca->allocator_journal_seq_flush =
613 max(ca->allocator_journal_seq_flush, bucket_seq);
618 * Determines what order we're going to reuse buckets, smallest bucket_key()
622 * - We take into account the read prio of the bucket, which gives us an
623 * indication of how hot the data is -- we scale the prio so that the prio
624 * farthest from the clock is worth 1/8th of the closest.
626 * - The number of sectors of cached data in the bucket, which gives us an
627 * indication of the cost in cache misses this eviction will cause.
629 * - If hotness * sectors used compares equal, we pick the bucket with the
630 * smallest bucket_gc_gen() - since incrementing the same bucket's generation
631 * number repeatedly forces us to run mark and sweep gc to avoid generation
635 static unsigned long bucket_sort_key(struct bch_fs *c, struct bch_dev *ca,
636 size_t b, struct bucket_mark m)
638 unsigned last_io = bucket_last_io(c, bucket(ca, b), READ);
639 unsigned max_last_io = ca->max_last_bucket_io[READ];
642 * Time since last read, scaled to [0, 8) where larger value indicates
643 * more recently read data:
645 unsigned long hotness = (max_last_io - last_io) * 7 / max_last_io;
647 /* How much we want to keep the data in this bucket: */
648 unsigned long data_wantness =
649 (hotness + 1) * bucket_sectors_used(m);
651 unsigned long needs_journal_commit =
652 bucket_needs_journal_commit(m, c->journal.last_seq_ondisk);
654 return (data_wantness << 9) |
655 (needs_journal_commit << 8) |
656 bucket_gc_gen(ca, b);
659 static inline int bucket_alloc_cmp(alloc_heap *h,
660 struct alloc_heap_entry l,
661 struct alloc_heap_entry r)
663 return (l.key > r.key) - (l.key < r.key) ?:
664 (l.nr < r.nr) - (l.nr > r.nr) ?:
665 (l.bucket > r.bucket) - (l.bucket < r.bucket);
668 static void find_reclaimable_buckets_lru(struct bch_fs *c, struct bch_dev *ca)
670 struct bucket_array *buckets;
671 struct alloc_heap_entry e = { 0 };
674 ca->alloc_heap.used = 0;
676 mutex_lock(&c->bucket_clock[READ].lock);
677 down_read(&ca->bucket_lock);
679 buckets = bucket_array(ca);
681 bch2_recalc_oldest_io(c, ca, READ);
684 * Find buckets with lowest read priority, by building a maxheap sorted
685 * by read priority and repeatedly replacing the maximum element until
686 * all buckets have been visited.
688 for (b = ca->mi.first_bucket; b < ca->mi.nbuckets; b++) {
689 struct bucket_mark m = READ_ONCE(buckets->b[b].mark);
690 unsigned long key = bucket_sort_key(c, ca, b, m);
692 if (!bch2_can_invalidate_bucket(ca, b, m))
695 if (e.nr && e.bucket + e.nr == b && e.key == key) {
699 heap_add_or_replace(&ca->alloc_heap, e, -bucket_alloc_cmp);
701 e = (struct alloc_heap_entry) {
712 heap_add_or_replace(&ca->alloc_heap, e, -bucket_alloc_cmp);
714 up_read(&ca->bucket_lock);
715 mutex_unlock(&c->bucket_clock[READ].lock);
717 heap_resort(&ca->alloc_heap, bucket_alloc_cmp);
719 while (heap_pop(&ca->alloc_heap, e, bucket_alloc_cmp)) {
723 if (fifo_full(&ca->free_inc))
726 bch2_invalidate_one_bucket(c, ca, b);
731 static void find_reclaimable_buckets_fifo(struct bch_fs *c, struct bch_dev *ca)
733 struct bucket_array *buckets = bucket_array(ca);
734 struct bucket_mark m;
738 checked < ca->mi.nbuckets && !fifo_full(&ca->free_inc);
740 if (ca->fifo_last_bucket < ca->mi.first_bucket ||
741 ca->fifo_last_bucket >= ca->mi.nbuckets)
742 ca->fifo_last_bucket = ca->mi.first_bucket;
744 b = ca->fifo_last_bucket++;
746 m = READ_ONCE(buckets->b[b].mark);
748 if (bch2_can_invalidate_bucket(ca, b, m))
749 bch2_invalidate_one_bucket(c, ca, b);
755 static void find_reclaimable_buckets_random(struct bch_fs *c, struct bch_dev *ca)
757 struct bucket_array *buckets = bucket_array(ca);
758 struct bucket_mark m;
762 checked < ca->mi.nbuckets / 2 && !fifo_full(&ca->free_inc);
764 size_t b = bch2_rand_range(ca->mi.nbuckets -
765 ca->mi.first_bucket) +
768 m = READ_ONCE(buckets->b[b].mark);
770 if (bch2_can_invalidate_bucket(ca, b, m))
771 bch2_invalidate_one_bucket(c, ca, b);
777 static void find_reclaimable_buckets(struct bch_fs *c, struct bch_dev *ca)
779 ca->inc_gen_needs_gc = 0;
780 ca->inc_gen_really_needs_gc = 0;
782 switch (ca->mi.replacement) {
783 case CACHE_REPLACEMENT_LRU:
784 find_reclaimable_buckets_lru(c, ca);
786 case CACHE_REPLACEMENT_FIFO:
787 find_reclaimable_buckets_fifo(c, ca);
789 case CACHE_REPLACEMENT_RANDOM:
790 find_reclaimable_buckets_random(c, ca);
795 static int size_t_cmp(const void *_l, const void *_r)
797 const size_t *l = _l, *r = _r;
799 return (*l > *r) - (*l < *r);
802 static void sort_free_inc(struct bch_fs *c, struct bch_dev *ca)
804 BUG_ON(ca->free_inc.front);
806 spin_lock(&c->freelist_lock);
807 sort(ca->free_inc.data,
809 sizeof(ca->free_inc.data[0]),
811 spin_unlock(&c->freelist_lock);
814 static int bch2_invalidate_free_inc(struct bch_fs *c, struct bch_dev *ca,
815 u64 *journal_seq, size_t nr)
817 struct btree_iter iter;
820 bch2_btree_iter_init(&iter, c, BTREE_ID_ALLOC, POS(ca->dev_idx, 0),
821 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
824 * XXX: if ca->nr_invalidated != 0, just return if we'd block doing the
825 * btree update or journal_res_get
827 while (ca->nr_invalidated < min(nr, fifo_used(&ca->free_inc))) {
828 size_t b = fifo_idx_entry(&ca->free_inc, ca->nr_invalidated);
830 ret = __bch2_alloc_write_key(c, ca, b, &iter, journal_seq);
834 ca->nr_invalidated++;
837 bch2_btree_iter_unlock(&iter);
841 static bool __push_invalidated_bucket(struct bch_fs *c, struct bch_dev *ca, size_t bucket)
846 * Don't remove from free_inc until after it's added to
847 * freelist, so gc can find it:
849 spin_lock(&c->freelist_lock);
850 for (i = 0; i < RESERVE_NR; i++)
851 if (fifo_push(&ca->free[i], bucket)) {
852 fifo_pop(&ca->free_inc, bucket);
853 --ca->nr_invalidated;
854 closure_wake_up(&c->freelist_wait);
855 spin_unlock(&c->freelist_lock);
858 spin_unlock(&c->freelist_lock);
863 static int push_invalidated_bucket(struct bch_fs *c, struct bch_dev *ca, size_t bucket)
868 set_current_state(TASK_INTERRUPTIBLE);
870 if (__push_invalidated_bucket(c, ca, bucket))
873 if ((current->flags & PF_KTHREAD) &&
874 kthread_should_stop()) {
883 __set_current_state(TASK_RUNNING);
888 * Given an invalidated, ready to use bucket: issue a discard to it if enabled,
889 * then add it to the freelist, waiting until there's room if necessary:
891 static int discard_invalidated_buckets(struct bch_fs *c, struct bch_dev *ca)
893 while (ca->nr_invalidated) {
894 size_t bucket = fifo_peek(&ca->free_inc);
896 BUG_ON(fifo_empty(&ca->free_inc) || !ca->nr_invalidated);
898 if (ca->mi.discard &&
899 blk_queue_discard(bdev_get_queue(ca->disk_sb.bdev)))
900 blkdev_issue_discard(ca->disk_sb.bdev,
901 bucket_to_sector(ca, bucket),
902 ca->mi.bucket_size, GFP_NOIO, 0);
904 if (push_invalidated_bucket(c, ca, bucket))
912 * bch_allocator_thread - move buckets from free_inc to reserves
914 * The free_inc FIFO is populated by find_reclaimable_buckets(), and
915 * the reserves are depleted by bucket allocation. When we run out
916 * of free_inc, try to invalidate some buckets and write out
919 static int bch2_allocator_thread(void *arg)
921 struct bch_dev *ca = arg;
922 struct bch_fs *c = ca->fs;
932 pr_debug("discarding %zu invalidated buckets",
935 ret = discard_invalidated_buckets(c, ca);
939 if (fifo_empty(&ca->free_inc))
942 pr_debug("invalidating %zu buckets",
943 fifo_used(&ca->free_inc));
946 ret = bch2_invalidate_free_inc(c, ca, &journal_seq, SIZE_MAX);
948 bch_err(ca, "error invalidating buckets: %i", ret);
952 if (!ca->nr_invalidated) {
953 bch_err(ca, "allocator thread unable to make forward progress!");
957 if (ca->allocator_invalidating_data)
958 ret = bch2_journal_flush_seq(&c->journal, journal_seq);
959 else if (ca->allocator_journal_seq_flush)
960 ret = bch2_journal_flush_seq(&c->journal,
961 ca->allocator_journal_seq_flush);
964 * journal error - buckets haven't actually been
965 * invalidated, can't discard them:
968 bch_err(ca, "journal error: %i", ret);
973 pr_debug("free_inc now empty");
975 /* Reset front/back so we can easily sort fifo entries later: */
976 ca->free_inc.front = ca->free_inc.back = 0;
977 ca->allocator_journal_seq_flush = 0;
978 ca->allocator_invalidating_data = false;
980 down_read(&c->gc_lock);
982 size_t prev = fifo_used(&ca->free_inc);
984 if (test_bit(BCH_FS_GC_FAILURE, &c->flags)) {
985 up_read(&c->gc_lock);
986 bch_err(ca, "gc failure");
991 * Find some buckets that we can invalidate, either
992 * they're completely unused, or only contain clean data
993 * that's been written back to the backing device or
997 pr_debug("scanning for reclaimable buckets");
999 find_reclaimable_buckets(c, ca);
1001 pr_debug("found %zu buckets (free_inc %zu/%zu)",
1002 fifo_used(&ca->free_inc) - prev,
1003 fifo_used(&ca->free_inc), ca->free_inc.size);
1005 trace_alloc_batch(ca, fifo_used(&ca->free_inc),
1008 if ((ca->inc_gen_needs_gc >= ca->free_inc.size ||
1009 (!fifo_full(&ca->free_inc) &&
1010 ca->inc_gen_really_needs_gc >=
1011 fifo_free(&ca->free_inc))) &&
1013 atomic_inc(&c->kick_gc);
1014 wake_up_process(c->gc_thread);
1017 if (fifo_full(&ca->free_inc))
1020 if (!fifo_empty(&ca->free_inc) &&
1021 !fifo_full(&ca->free[RESERVE_MOVINGGC]))
1025 * copygc may be waiting until either its reserve fills
1026 * up, or we can't make forward progress:
1028 ca->allocator_blocked = true;
1029 closure_wake_up(&c->freelist_wait);
1031 ret = wait_buckets_available(c, ca);
1033 up_read(&c->gc_lock);
1038 ca->allocator_blocked = false;
1039 up_read(&c->gc_lock);
1041 pr_debug("free_inc now %zu/%zu",
1042 fifo_used(&ca->free_inc),
1045 sort_free_inc(c, ca);
1048 * free_inc is now full of newly-invalidated buckets: next,
1049 * write out the new bucket gens:
1054 pr_debug("alloc thread stopping (ret %i)", ret);
1061 * Open buckets represent a bucket that's currently being allocated from. They
1062 * serve two purposes:
1064 * - They track buckets that have been partially allocated, allowing for
1065 * sub-bucket sized allocations - they're used by the sector allocator below
1067 * - They provide a reference to the buckets they own that mark and sweep GC
1068 * can find, until the new allocation has a pointer to it inserted into the
1071 * When allocating some space with the sector allocator, the allocation comes
1072 * with a reference to an open bucket - the caller is required to put that
1073 * reference _after_ doing the index update that makes its allocation reachable.
1076 void __bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob)
1078 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
1080 spin_lock(&ob->lock);
1081 bch2_mark_alloc_bucket(c, ca, PTR_BUCKET_NR(ca, &ob->ptr),
1082 false, gc_pos_alloc(c, ob), 0);
1084 spin_unlock(&ob->lock);
1086 spin_lock(&c->freelist_lock);
1087 ob->freelist = c->open_buckets_freelist;
1088 c->open_buckets_freelist = ob - c->open_buckets;
1089 c->open_buckets_nr_free++;
1090 spin_unlock(&c->freelist_lock);
1092 closure_wake_up(&c->open_buckets_wait);
1095 static struct open_bucket *bch2_open_bucket_alloc(struct bch_fs *c)
1097 struct open_bucket *ob;
1099 BUG_ON(!c->open_buckets_freelist || !c->open_buckets_nr_free);
1101 ob = c->open_buckets + c->open_buckets_freelist;
1102 c->open_buckets_freelist = ob->freelist;
1103 atomic_set(&ob->pin, 1);
1105 c->open_buckets_nr_free--;
1109 /* _only_ for allocating the journal on a new device: */
1110 long bch2_bucket_alloc_new_fs(struct bch_dev *ca)
1112 struct bucket_array *buckets;
1116 buckets = bucket_array(ca);
1118 for (b = ca->mi.first_bucket; b < ca->mi.nbuckets; b++)
1119 if (is_available_bucket(buckets->b[b].mark))
1127 static inline unsigned open_buckets_reserved(enum alloc_reserve reserve)
1133 return BTREE_NODE_RESERVE / 2;
1135 return BTREE_NODE_RESERVE;
1140 * bch_bucket_alloc - allocate a single bucket from a specific device
1142 * Returns index of bucket on success, 0 on failure
1144 int bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca,
1145 enum alloc_reserve reserve,
1146 bool may_alloc_partial,
1149 struct bucket_array *buckets;
1150 struct open_bucket *ob;
1153 spin_lock(&c->freelist_lock);
1154 if (may_alloc_partial &&
1155 ca->open_buckets_partial_nr) {
1156 int ret = ca->open_buckets_partial[--ca->open_buckets_partial_nr];
1157 c->open_buckets[ret].on_partial_list = false;
1158 spin_unlock(&c->freelist_lock);
1162 if (unlikely(c->open_buckets_nr_free <= open_buckets_reserved(reserve))) {
1164 closure_wait(&c->open_buckets_wait, cl);
1165 spin_unlock(&c->freelist_lock);
1166 trace_open_bucket_alloc_fail(ca, reserve);
1167 return OPEN_BUCKETS_EMPTY;
1170 if (likely(fifo_pop(&ca->free[RESERVE_NONE], bucket)))
1175 if (fifo_pop(&ca->free[RESERVE_BTREE], bucket))
1179 if (fifo_used(&ca->free[RESERVE_BTREE]) * 2 >=
1180 ca->free[RESERVE_BTREE].size &&
1181 fifo_pop(&ca->free[RESERVE_BTREE], bucket))
1184 case RESERVE_MOVINGGC:
1185 if (fifo_pop(&ca->free[RESERVE_MOVINGGC], bucket))
1193 closure_wait(&c->freelist_wait, cl);
1195 spin_unlock(&c->freelist_lock);
1197 trace_bucket_alloc_fail(ca, reserve);
1198 return FREELIST_EMPTY;
1200 verify_not_on_freelist(c, ca, bucket);
1202 ob = bch2_open_bucket_alloc(c);
1204 spin_lock(&ob->lock);
1205 lg_local_lock(&c->usage_lock);
1206 buckets = bucket_array(ca);
1209 ob->sectors_free = ca->mi.bucket_size;
1210 ob->ptr = (struct bch_extent_ptr) {
1211 .gen = buckets->b[bucket].mark.gen,
1212 .offset = bucket_to_sector(ca, bucket),
1216 bucket_io_clock_reset(c, ca, bucket, READ);
1217 bucket_io_clock_reset(c, ca, bucket, WRITE);
1219 lg_local_unlock(&c->usage_lock);
1220 spin_unlock(&ob->lock);
1222 spin_unlock(&c->freelist_lock);
1224 bch2_wake_allocator(ca);
1226 trace_bucket_alloc(ca, reserve);
1227 return ob - c->open_buckets;
1230 static int __dev_alloc_cmp(struct write_point *wp,
1231 unsigned l, unsigned r)
1233 return ((wp->next_alloc[l] > wp->next_alloc[r]) -
1234 (wp->next_alloc[l] < wp->next_alloc[r]));
1237 #define dev_alloc_cmp(l, r) __dev_alloc_cmp(wp, l, r)
1239 struct dev_alloc_list bch2_wp_alloc_list(struct bch_fs *c,
1240 struct write_point *wp,
1241 struct bch_devs_mask *devs)
1243 struct dev_alloc_list ret = { .nr = 0 };
1247 for_each_member_device_rcu(ca, c, i, devs)
1248 ret.devs[ret.nr++] = i;
1250 bubble_sort(ret.devs, ret.nr, dev_alloc_cmp);
1254 void bch2_wp_rescale(struct bch_fs *c, struct bch_dev *ca,
1255 struct write_point *wp)
1257 u64 *v = wp->next_alloc + ca->dev_idx;
1258 u64 free_space = dev_buckets_free(c, ca);
1259 u64 free_space_inv = free_space
1260 ? div64_u64(1ULL << 48, free_space)
1264 if (*v + free_space_inv >= *v)
1265 *v += free_space_inv;
1269 for (v = wp->next_alloc;
1270 v < wp->next_alloc + ARRAY_SIZE(wp->next_alloc); v++)
1271 *v = *v < scale ? 0 : *v - scale;
1274 static enum bucket_alloc_ret bch2_bucket_alloc_set(struct bch_fs *c,
1275 struct write_point *wp,
1276 unsigned nr_replicas,
1277 enum alloc_reserve reserve,
1278 struct bch_devs_mask *devs,
1281 enum bucket_alloc_ret ret = NO_DEVICES;
1282 struct dev_alloc_list devs_sorted;
1284 unsigned i, nr_ptrs_effective = 0;
1285 bool have_cache_dev = false;
1287 BUG_ON(nr_replicas > ARRAY_SIZE(wp->ptrs));
1289 for (i = wp->first_ptr; i < wp->nr_ptrs; i++) {
1290 ca = bch_dev_bkey_exists(c, wp->ptrs[i]->ptr.dev);
1292 nr_ptrs_effective += ca->mi.durability;
1293 have_cache_dev |= !ca->mi.durability;
1296 if (nr_ptrs_effective >= nr_replicas)
1297 return ALLOC_SUCCESS;
1300 devs_sorted = bch2_wp_alloc_list(c, wp, devs);
1302 for (i = 0; i < devs_sorted.nr; i++) {
1305 ca = rcu_dereference(c->devs[devs_sorted.devs[i]]);
1309 if (!ca->mi.durability &&
1311 wp->type != BCH_DATA_USER))
1314 ob = bch2_bucket_alloc(c, ca, reserve,
1315 wp->type == BCH_DATA_USER, cl);
1318 if (ret == OPEN_BUCKETS_EMPTY)
1323 BUG_ON(ob <= 0 || ob > U8_MAX);
1324 BUG_ON(wp->nr_ptrs >= ARRAY_SIZE(wp->ptrs));
1326 wp->ptrs[wp->nr_ptrs++] = c->open_buckets + ob;
1328 bch2_wp_rescale(c, ca, wp);
1330 nr_ptrs_effective += ca->mi.durability;
1331 have_cache_dev |= !ca->mi.durability;
1333 __clear_bit(ca->dev_idx, devs->d);
1335 if (nr_ptrs_effective >= nr_replicas) {
1336 ret = ALLOC_SUCCESS;
1342 EBUG_ON(reserve == RESERVE_MOVINGGC &&
1343 ret != ALLOC_SUCCESS &&
1344 ret != OPEN_BUCKETS_EMPTY);
1351 case FREELIST_EMPTY:
1352 case OPEN_BUCKETS_EMPTY:
1353 return cl ? -EAGAIN : -ENOSPC;
1359 /* Sector allocator */
1361 static void writepoint_drop_ptr(struct bch_fs *c,
1362 struct write_point *wp,
1365 struct open_bucket *ob = wp->ptrs[i];
1366 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
1368 BUG_ON(ca->open_buckets_partial_nr >=
1369 ARRAY_SIZE(ca->open_buckets_partial));
1371 if (wp->type == BCH_DATA_USER) {
1372 spin_lock(&c->freelist_lock);
1373 ob->on_partial_list = true;
1374 ca->open_buckets_partial[ca->open_buckets_partial_nr++] =
1375 ob - c->open_buckets;
1376 spin_unlock(&c->freelist_lock);
1378 closure_wake_up(&c->open_buckets_wait);
1379 closure_wake_up(&c->freelist_wait);
1381 bch2_open_bucket_put(c, ob);
1384 array_remove_item(wp->ptrs, wp->nr_ptrs, i);
1386 if (i < wp->first_ptr)
1390 static void writepoint_drop_ptrs(struct bch_fs *c,
1391 struct write_point *wp,
1392 u16 target, bool in_target)
1396 for (i = wp->first_ptr - 1; i >= 0; --i) {
1397 struct bch_dev *ca = bch_dev_bkey_exists(c, wp->ptrs[i]->ptr.dev);
1399 if (dev_in_target(ca, target) == in_target)
1400 writepoint_drop_ptr(c, wp, i);
1404 static void verify_not_stale(struct bch_fs *c, const struct write_point *wp)
1406 #ifdef CONFIG_BCACHEFS_DEBUG
1407 struct open_bucket *ob;
1410 writepoint_for_each_ptr_all(wp, ob, i) {
1411 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
1413 BUG_ON(ptr_stale(ca, &ob->ptr));
1418 static int open_bucket_add_buckets(struct bch_fs *c,
1420 struct write_point *wp,
1421 struct bch_devs_list *devs_have,
1422 unsigned nr_replicas,
1423 enum alloc_reserve reserve,
1426 struct bch_devs_mask devs = c->rw_devs[wp->type];
1427 struct open_bucket *ob;
1430 /* Don't allocate from devices we already have pointers to: */
1431 for (i = 0; i < devs_have->nr; i++)
1432 __clear_bit(devs_have->devs[i], devs.d);
1434 writepoint_for_each_ptr_all(wp, ob, i)
1435 __clear_bit(ob->ptr.dev, devs.d);
1438 const struct bch_devs_mask *t;
1441 t = bch2_target_to_mask(c, target);
1443 bitmap_and(devs.d, devs.d, t->d, BCH_SB_MEMBERS_MAX);
1447 return bch2_bucket_alloc_set(c, wp, nr_replicas, reserve, &devs, cl);
1450 static struct write_point *__writepoint_find(struct hlist_head *head,
1451 unsigned long write_point)
1453 struct write_point *wp;
1455 hlist_for_each_entry_rcu(wp, head, node)
1456 if (wp->write_point == write_point)
1462 static struct hlist_head *writepoint_hash(struct bch_fs *c,
1463 unsigned long write_point)
1466 hash_long(write_point, ilog2(ARRAY_SIZE(c->write_points_hash)));
1468 return &c->write_points_hash[hash];
1471 static struct write_point *writepoint_find(struct bch_fs *c,
1472 unsigned long write_point)
1474 struct write_point *wp, *oldest;
1475 struct hlist_head *head;
1477 if (!(write_point & 1UL)) {
1478 wp = (struct write_point *) write_point;
1479 mutex_lock(&wp->lock);
1483 head = writepoint_hash(c, write_point);
1485 wp = __writepoint_find(head, write_point);
1488 mutex_lock(&wp->lock);
1489 if (wp->write_point == write_point)
1491 mutex_unlock(&wp->lock);
1496 for (wp = c->write_points;
1497 wp < c->write_points + ARRAY_SIZE(c->write_points);
1499 if (!oldest || time_before64(wp->last_used, oldest->last_used))
1502 mutex_lock(&oldest->lock);
1503 mutex_lock(&c->write_points_hash_lock);
1504 wp = __writepoint_find(head, write_point);
1505 if (wp && wp != oldest) {
1506 mutex_unlock(&c->write_points_hash_lock);
1507 mutex_unlock(&oldest->lock);
1512 hlist_del_rcu(&wp->node);
1513 wp->write_point = write_point;
1514 hlist_add_head_rcu(&wp->node, head);
1515 mutex_unlock(&c->write_points_hash_lock);
1517 wp->last_used = sched_clock();
1522 * Get us an open_bucket we can allocate from, return with it locked:
1524 struct write_point *bch2_alloc_sectors_start(struct bch_fs *c,
1526 struct write_point_specifier write_point,
1527 struct bch_devs_list *devs_have,
1528 unsigned nr_replicas,
1529 unsigned nr_replicas_required,
1530 enum alloc_reserve reserve,
1534 struct write_point *wp;
1535 struct open_bucket *ob;
1537 unsigned nr_ptrs_have, nr_ptrs_effective;
1538 int ret, i, cache_idx = -1;
1540 BUG_ON(!nr_replicas || !nr_replicas_required);
1542 wp = writepoint_find(c, write_point.v);
1546 /* does writepoint have ptrs we can't use? */
1547 writepoint_for_each_ptr(wp, ob, i)
1548 if (bch2_dev_list_has_dev(*devs_have, ob->ptr.dev)) {
1549 swap(wp->ptrs[i], wp->ptrs[wp->first_ptr]);
1553 nr_ptrs_have = wp->first_ptr;
1555 /* does writepoint have ptrs we don't want to use? */
1557 writepoint_for_each_ptr(wp, ob, i)
1558 if (!dev_idx_in_target(c, ob->ptr.dev, target)) {
1559 swap(wp->ptrs[i], wp->ptrs[wp->first_ptr]);
1563 if (flags & BCH_WRITE_ONLY_SPECIFIED_DEVS) {
1564 ret = open_bucket_add_buckets(c, target, wp, devs_have,
1565 nr_replicas, reserve, cl);
1567 ret = open_bucket_add_buckets(c, target, wp, devs_have,
1568 nr_replicas, reserve, NULL);
1572 wp->first_ptr = nr_ptrs_have;
1574 ret = open_bucket_add_buckets(c, 0, wp, devs_have,
1575 nr_replicas, reserve, cl);
1578 if (ret && ret != -EROFS)
1581 /* check for more than one cache: */
1582 for (i = wp->nr_ptrs - 1; i >= wp->first_ptr; --i) {
1583 ca = bch_dev_bkey_exists(c, wp->ptrs[i]->ptr.dev);
1585 if (ca->mi.durability)
1589 * if we ended up with more than one cache device, prefer the
1590 * one in the target we want:
1592 if (cache_idx >= 0) {
1593 if (!dev_in_target(ca, target)) {
1594 writepoint_drop_ptr(c, wp, i);
1596 writepoint_drop_ptr(c, wp, cache_idx);
1604 /* we might have more effective replicas than required: */
1605 nr_ptrs_effective = 0;
1606 writepoint_for_each_ptr(wp, ob, i) {
1607 ca = bch_dev_bkey_exists(c, ob->ptr.dev);
1608 nr_ptrs_effective += ca->mi.durability;
1611 if (ret == -EROFS &&
1612 nr_ptrs_effective >= nr_replicas_required)
1618 if (nr_ptrs_effective > nr_replicas) {
1619 writepoint_for_each_ptr(wp, ob, i) {
1620 ca = bch_dev_bkey_exists(c, ob->ptr.dev);
1622 if (ca->mi.durability &&
1623 ca->mi.durability <= nr_ptrs_effective - nr_replicas &&
1624 !dev_idx_in_target(c, ob->ptr.dev, target)) {
1625 swap(wp->ptrs[i], wp->ptrs[wp->first_ptr]);
1627 nr_ptrs_effective -= ca->mi.durability;
1632 if (nr_ptrs_effective > nr_replicas) {
1633 writepoint_for_each_ptr(wp, ob, i) {
1634 ca = bch_dev_bkey_exists(c, ob->ptr.dev);
1636 if (ca->mi.durability &&
1637 ca->mi.durability <= nr_ptrs_effective - nr_replicas) {
1638 swap(wp->ptrs[i], wp->ptrs[wp->first_ptr]);
1640 nr_ptrs_effective -= ca->mi.durability;
1645 /* Remove pointers we don't want to use: */
1647 writepoint_drop_ptrs(c, wp, target, false);
1649 BUG_ON(wp->first_ptr >= wp->nr_ptrs);
1650 BUG_ON(nr_ptrs_effective < nr_replicas_required);
1652 wp->sectors_free = UINT_MAX;
1654 writepoint_for_each_ptr(wp, ob, i)
1655 wp->sectors_free = min(wp->sectors_free, ob->sectors_free);
1657 BUG_ON(!wp->sectors_free || wp->sectors_free == UINT_MAX);
1659 verify_not_stale(c, wp);
1663 mutex_unlock(&wp->lock);
1664 return ERR_PTR(ret);
1668 * Append pointers to the space we just allocated to @k, and mark @sectors space
1669 * as allocated out of @ob
1671 void bch2_alloc_sectors_append_ptrs(struct bch_fs *c, struct write_point *wp,
1672 struct bkey_i_extent *e, unsigned sectors)
1674 struct open_bucket *ob;
1677 BUG_ON(sectors > wp->sectors_free);
1678 wp->sectors_free -= sectors;
1680 writepoint_for_each_ptr(wp, ob, i) {
1681 struct bch_dev *ca = bch_dev_bkey_exists(c, ob->ptr.dev);
1682 struct bch_extent_ptr tmp = ob->ptr;
1684 EBUG_ON(bch2_extent_has_device(extent_i_to_s_c(e), ob->ptr.dev));
1686 tmp.cached = bkey_extent_is_cached(&e->k) ||
1687 (!ca->mi.durability && wp->type == BCH_DATA_USER);
1689 tmp.offset += ca->mi.bucket_size - ob->sectors_free;
1690 extent_ptr_append(e, tmp);
1692 BUG_ON(sectors > ob->sectors_free);
1693 ob->sectors_free -= sectors;
1698 * Append pointers to the space we just allocated to @k, and mark @sectors space
1699 * as allocated out of @ob
1701 void bch2_alloc_sectors_done(struct bch_fs *c, struct write_point *wp)
1705 for (i = wp->nr_ptrs - 1; i >= 0; --i) {
1706 struct open_bucket *ob = wp->ptrs[i];
1708 if (!ob->sectors_free) {
1709 array_remove_item(wp->ptrs, wp->nr_ptrs, i);
1710 bch2_open_bucket_put(c, ob);
1714 mutex_unlock(&wp->lock);
1717 /* Startup/shutdown (ro/rw): */
1719 void bch2_recalc_capacity(struct bch_fs *c)
1722 u64 total_capacity, capacity = 0, reserved_sectors = 0;
1723 unsigned long ra_pages = 0;
1726 lockdep_assert_held(&c->state_lock);
1728 for_each_online_member(ca, c, i) {
1729 struct backing_dev_info *bdi = ca->disk_sb.bdev->bd_bdi;
1731 ra_pages += bdi->ra_pages;
1734 bch2_set_ra_pages(c, ra_pages);
1736 for_each_rw_member(ca, c, i) {
1740 * We need to reserve buckets (from the number
1741 * of currently available buckets) against
1742 * foreground writes so that mainly copygc can
1743 * make forward progress.
1745 * We need enough to refill the various reserves
1746 * from scratch - copygc will use its entire
1747 * reserve all at once, then run against when
1748 * its reserve is refilled (from the formerly
1749 * available buckets).
1751 * This reserve is just used when considering if
1752 * allocations for foreground writes must wait -
1753 * not -ENOSPC calculations.
1755 for (j = 0; j < RESERVE_NONE; j++)
1756 reserve += ca->free[j].size;
1758 reserve += ca->free_inc.size;
1760 reserve += ARRAY_SIZE(c->write_points);
1762 reserve += 1; /* btree write point */
1764 reserved_sectors += bucket_to_sector(ca, reserve);
1766 capacity += bucket_to_sector(ca, ca->mi.nbuckets -
1767 ca->mi.first_bucket);
1770 total_capacity = capacity;
1772 capacity *= (100 - c->opts.gc_reserve_percent);
1773 capacity = div64_u64(capacity, 100);
1775 BUG_ON(reserved_sectors > total_capacity);
1777 capacity = min(capacity, total_capacity - reserved_sectors);
1779 c->capacity = capacity;
1782 bch2_io_timer_add(&c->io_clock[READ],
1783 &c->bucket_clock[READ].rescale);
1784 bch2_io_timer_add(&c->io_clock[WRITE],
1785 &c->bucket_clock[WRITE].rescale);
1787 bch2_io_timer_del(&c->io_clock[READ],
1788 &c->bucket_clock[READ].rescale);
1789 bch2_io_timer_del(&c->io_clock[WRITE],
1790 &c->bucket_clock[WRITE].rescale);
1793 /* Wake up case someone was waiting for buckets */
1794 closure_wake_up(&c->freelist_wait);
1797 static void bch2_stop_write_point(struct bch_fs *c, struct bch_dev *ca,
1798 struct write_point *wp)
1800 struct bch_devs_mask not_self;
1802 bitmap_complement(not_self.d, ca->self.d, BCH_SB_MEMBERS_MAX);
1804 mutex_lock(&wp->lock);
1805 wp->first_ptr = wp->nr_ptrs;
1806 writepoint_drop_ptrs(c, wp, dev_to_target(ca->dev_idx), true);
1807 mutex_unlock(&wp->lock);
1810 static bool bch2_dev_has_open_write_point(struct bch_fs *c, struct bch_dev *ca)
1812 struct open_bucket *ob;
1815 for (ob = c->open_buckets;
1816 ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
1818 spin_lock(&ob->lock);
1819 if (ob->valid && !ob->on_partial_list &&
1820 ob->ptr.dev == ca->dev_idx)
1822 spin_unlock(&ob->lock);
1828 /* device goes ro: */
1829 void bch2_dev_allocator_remove(struct bch_fs *c, struct bch_dev *ca)
1833 BUG_ON(ca->alloc_thread);
1835 /* First, remove device from allocation groups: */
1837 for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
1838 clear_bit(ca->dev_idx, c->rw_devs[i].d);
1841 * Capacity is calculated based off of devices in allocation groups:
1843 bch2_recalc_capacity(c);
1845 /* Next, close write points that point to this device... */
1846 for (i = 0; i < ARRAY_SIZE(c->write_points); i++)
1847 bch2_stop_write_point(c, ca, &c->write_points[i]);
1849 bch2_stop_write_point(c, ca, &ca->copygc_write_point);
1850 bch2_stop_write_point(c, ca, &c->rebalance_write_point);
1851 bch2_stop_write_point(c, ca, &c->btree_write_point);
1853 mutex_lock(&c->btree_reserve_cache_lock);
1854 while (c->btree_reserve_cache_nr) {
1855 struct btree_alloc *a =
1856 &c->btree_reserve_cache[--c->btree_reserve_cache_nr];
1858 bch2_open_bucket_put_refs(c, &a->ob.nr, a->ob.refs);
1860 mutex_unlock(&c->btree_reserve_cache_lock);
1863 * Wake up threads that were blocked on allocation, so they can notice
1864 * the device can no longer be removed and the capacity has changed:
1866 closure_wake_up(&c->freelist_wait);
1869 * journal_res_get() can block waiting for free space in the journal -
1870 * it needs to notice there may not be devices to allocate from anymore:
1872 wake_up(&c->journal.wait);
1874 /* Now wait for any in flight writes: */
1876 closure_wait_event(&c->open_buckets_wait,
1877 !bch2_dev_has_open_write_point(c, ca));
1880 /* device goes rw: */
1881 void bch2_dev_allocator_add(struct bch_fs *c, struct bch_dev *ca)
1885 for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
1886 if (ca->mi.data_allowed & (1 << i))
1887 set_bit(ca->dev_idx, c->rw_devs[i].d);
1890 /* stop allocator thread: */
1891 void bch2_dev_allocator_stop(struct bch_dev *ca)
1893 struct task_struct *p = ca->alloc_thread;
1895 ca->alloc_thread = NULL;
1898 * We need an rcu barrier between setting ca->alloc_thread = NULL and
1899 * the thread shutting down to avoid bch2_wake_allocator() racing:
1901 * XXX: it would be better to have the rcu barrier be asynchronous
1902 * instead of blocking us here
1912 /* start allocator thread: */
1913 int bch2_dev_allocator_start(struct bch_dev *ca)
1915 struct task_struct *p;
1918 * allocator thread already started?
1920 if (ca->alloc_thread)
1923 p = kthread_create(bch2_allocator_thread, ca,
1924 "bch_alloc[%s]", ca->name);
1929 ca->alloc_thread = p;
1934 static void allocator_start_issue_discards(struct bch_fs *c)
1940 for_each_rw_member(ca, c, dev_iter) {
1943 fifo_for_each_entry(bu, &ca->free_inc, i) {
1944 if (done == ca->nr_invalidated)
1947 blkdev_issue_discard(ca->disk_sb.bdev,
1948 bucket_to_sector(ca, bu),
1949 ca->mi.bucket_size, GFP_NOIO, 0);
1955 static int __bch2_fs_allocator_start(struct bch_fs *c)
1960 u64 journal_seq = 0;
1961 bool invalidating_data = false;
1964 if (test_bit(BCH_FS_GC_FAILURE, &c->flags))
1967 /* Scan for buckets that are already invalidated: */
1968 for_each_rw_member(ca, c, dev_iter) {
1969 struct btree_iter iter;
1970 struct bucket_mark m;
1973 for_each_btree_key(&iter, c, BTREE_ID_ALLOC, POS(ca->dev_idx, 0), 0, k) {
1974 if (k.k->type != BCH_ALLOC)
1978 m = READ_ONCE(bucket(ca, bu)->mark);
1980 if (!is_available_bucket(m) || m.cached_sectors)
1983 bch2_mark_alloc_bucket(c, ca, bu, true,
1984 gc_pos_alloc(c, NULL),
1985 BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE|
1986 BCH_BUCKET_MARK_GC_LOCK_HELD);
1988 fifo_push(&ca->free_inc, bu);
1989 ca->nr_invalidated++;
1991 if (fifo_full(&ca->free_inc))
1994 bch2_btree_iter_unlock(&iter);
1997 /* did we find enough buckets? */
1998 for_each_rw_member(ca, c, dev_iter)
1999 if (fifo_used(&ca->free_inc) < ca->free[RESERVE_BTREE].size) {
2000 percpu_ref_put(&ca->io_ref);
2006 pr_debug("did not find enough empty buckets; issuing discards");
2008 /* clear out free_inc - find_reclaimable_buckets() assumes it's empty */
2009 for_each_rw_member(ca, c, dev_iter)
2010 discard_invalidated_buckets(c, ca);
2012 pr_debug("scanning for reclaimable buckets");
2014 for_each_rw_member(ca, c, dev_iter) {
2015 BUG_ON(!fifo_empty(&ca->free_inc));
2016 ca->free_inc.front = ca->free_inc.back = 0;
2018 find_reclaimable_buckets(c, ca);
2019 sort_free_inc(c, ca);
2021 invalidating_data |= ca->allocator_invalidating_data;
2023 fifo_for_each_entry(bu, &ca->free_inc, i)
2024 if (!fifo_push(&ca->free[RESERVE_BTREE], bu))
2028 pr_debug("done scanning for reclaimable buckets");
2031 * We're moving buckets to freelists _before_ they've been marked as
2032 * invalidated on disk - we have to so that we can allocate new btree
2033 * nodes to mark them as invalidated on disk.
2035 * However, we can't _write_ to any of these buckets yet - they might
2036 * have cached data in them, which is live until they're marked as
2037 * invalidated on disk:
2039 if (invalidating_data) {
2040 pr_debug("invalidating existing data");
2041 set_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags);
2043 pr_debug("issuing discards");
2044 allocator_start_issue_discards(c);
2048 * XXX: it's possible for this to deadlock waiting on journal reclaim,
2049 * since we're holding btree writes. What then?
2052 for_each_rw_member(ca, c, dev_iter) {
2053 ret = bch2_invalidate_free_inc(c, ca, &journal_seq,
2054 ca->free[RESERVE_BTREE].size);
2056 percpu_ref_put(&ca->io_ref);
2061 if (invalidating_data) {
2062 pr_debug("flushing journal");
2064 ret = bch2_journal_flush_seq(&c->journal, journal_seq);
2068 pr_debug("issuing discards");
2069 allocator_start_issue_discards(c);
2072 for_each_rw_member(ca, c, dev_iter)
2073 while (ca->nr_invalidated) {
2074 BUG_ON(!fifo_pop(&ca->free_inc, bu));
2075 ca->nr_invalidated--;
2078 set_bit(BCH_FS_ALLOCATOR_STARTED, &c->flags);
2080 /* now flush dirty btree nodes: */
2081 if (invalidating_data) {
2082 struct bucket_table *tbl;
2083 struct rhash_head *pos;
2086 size_t nr_pending_updates;
2088 clear_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags);
2090 pr_debug("flushing dirty btree nodes");
2093 flush_updates = false;
2094 nr_pending_updates = bch2_btree_interior_updates_nr_pending(c);
2098 for_each_cached_btree(b, c, tbl, i, pos)
2099 if (btree_node_dirty(b) && (!b->written || b->level)) {
2100 if (btree_node_may_write(b)) {
2102 six_lock_read(&b->lock);
2103 bch2_btree_node_write(c, b, SIX_LOCK_read);
2104 six_unlock_read(&b->lock);
2107 flush_updates = true;
2113 * This is ugly, but it's needed to flush btree node writes
2114 * without spinning...
2116 if (flush_updates) {
2117 closure_wait_event(&c->btree_interior_update_wait,
2118 bch2_btree_interior_updates_nr_pending(c) <
2119 nr_pending_updates);
2127 int bch2_fs_allocator_start(struct bch_fs *c)
2133 down_read(&c->gc_lock);
2134 ret = __bch2_fs_allocator_start(c);
2135 up_read(&c->gc_lock);
2140 for_each_rw_member(ca, c, i) {
2141 ret = bch2_dev_allocator_start(ca);
2143 percpu_ref_put(&ca->io_ref);
2148 return bch2_alloc_write(c);
2151 void bch2_fs_allocator_init(struct bch_fs *c)
2153 struct open_bucket *ob;
2154 struct write_point *wp;
2156 mutex_init(&c->write_points_hash_lock);
2157 spin_lock_init(&c->freelist_lock);
2158 bch2_bucket_clock_init(c, READ);
2159 bch2_bucket_clock_init(c, WRITE);
2161 /* open bucket 0 is a sentinal NULL: */
2162 spin_lock_init(&c->open_buckets[0].lock);
2164 for (ob = c->open_buckets + 1;
2165 ob < c->open_buckets + ARRAY_SIZE(c->open_buckets); ob++) {
2166 spin_lock_init(&ob->lock);
2167 c->open_buckets_nr_free++;
2169 ob->freelist = c->open_buckets_freelist;
2170 c->open_buckets_freelist = ob - c->open_buckets;
2173 writepoint_init(&c->btree_write_point, BCH_DATA_BTREE);
2174 writepoint_init(&c->rebalance_write_point, BCH_DATA_USER);
2176 for (wp = c->write_points;
2177 wp < c->write_points + ARRAY_SIZE(c->write_points); wp++) {
2178 writepoint_init(wp, BCH_DATA_USER);
2180 wp->last_used = sched_clock();
2181 wp->write_point = (unsigned long) wp;
2182 hlist_add_head_rcu(&wp->node, writepoint_hash(c, wp->write_point));
2185 c->pd_controllers_update_seconds = 5;
2186 INIT_DELAYED_WORK(&c->pd_controllers_update, pd_controllers_update);