2 #include "alloc_background.h"
3 #include "alloc_foreground.h"
4 #include "btree_cache.h"
6 #include "btree_update.h"
7 #include "btree_update_interior.h"
13 #include "journal_io.h"
15 #include <linux/kthread.h>
16 #include <linux/math64.h>
17 #include <linux/random.h>
18 #include <linux/rculist.h>
19 #include <linux/rcupdate.h>
20 #include <linux/sched/task.h>
21 #include <linux/sort.h>
22 #include <trace/events/bcachefs.h>
24 static void bch2_recalc_oldest_io(struct bch_fs *, struct bch_dev *, int);
26 /* Ratelimiting/PD controllers */
28 static void pd_controllers_update(struct work_struct *work)
30 struct bch_fs *c = container_of(to_delayed_work(work),
32 pd_controllers_update);
36 for_each_member_device(ca, c, i) {
37 struct bch_dev_usage stats = bch2_dev_usage_read(c, ca);
39 u64 free = bucket_to_sector(ca,
40 __dev_buckets_free(ca, stats)) << 9;
42 * Bytes of internal fragmentation, which can be
43 * reclaimed by copy GC
45 s64 fragmented = (bucket_to_sector(ca,
46 stats.buckets[BCH_DATA_USER] +
47 stats.buckets[BCH_DATA_CACHED]) -
48 (stats.sectors[BCH_DATA_USER] +
49 stats.sectors[BCH_DATA_CACHED])) << 9;
51 fragmented = max(0LL, fragmented);
53 bch2_pd_controller_update(&ca->copygc_pd,
54 free, fragmented, -1);
57 schedule_delayed_work(&c->pd_controllers_update,
58 c->pd_controllers_update_seconds * HZ);
61 /* Persistent alloc info: */
63 static unsigned bch_alloc_val_u64s(const struct bch_alloc *a)
65 unsigned bytes = offsetof(struct bch_alloc, data);
67 if (a->fields & (1 << BCH_ALLOC_FIELD_READ_TIME))
69 if (a->fields & (1 << BCH_ALLOC_FIELD_WRITE_TIME))
72 return DIV_ROUND_UP(bytes, sizeof(u64));
75 const char *bch2_alloc_invalid(const struct bch_fs *c, struct bkey_s_c k)
77 if (k.k->p.inode >= c->sb.nr_devices ||
78 !c->devs[k.k->p.inode])
79 return "invalid device";
83 struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k);
85 if (bch_alloc_val_u64s(a.v) != bkey_val_u64s(a.k))
86 return "incorrect value size";
90 return "invalid type";
96 int bch2_alloc_to_text(struct bch_fs *c, char *buf,
97 size_t size, struct bkey_s_c k)
109 static inline unsigned get_alloc_field(const u8 **p, unsigned bytes)
118 v = le16_to_cpup((void *) *p);
121 v = le32_to_cpup((void *) *p);
131 static inline void put_alloc_field(u8 **p, unsigned bytes, unsigned v)
138 *((__le16 *) *p) = cpu_to_le16(v);
141 *((__le32 *) *p) = cpu_to_le32(v);
150 static void bch2_alloc_read_key(struct bch_fs *c, struct bkey_s_c k)
153 struct bkey_s_c_alloc a;
154 struct bucket_mark new;
158 if (k.k->type != BCH_ALLOC)
161 a = bkey_s_c_to_alloc(k);
162 ca = bch_dev_bkey_exists(c, a.k->p.inode);
164 if (a.k->p.offset >= ca->mi.nbuckets)
167 percpu_down_read_preempt_disable(&c->usage_lock);
169 g = bucket(ca, a.k->p.offset);
170 bucket_cmpxchg(g, new, ({
176 if (a.v->fields & (1 << BCH_ALLOC_FIELD_READ_TIME))
177 g->io_time[READ] = get_alloc_field(&d, 2);
178 if (a.v->fields & (1 << BCH_ALLOC_FIELD_WRITE_TIME))
179 g->io_time[WRITE] = get_alloc_field(&d, 2);
181 percpu_up_read_preempt_enable(&c->usage_lock);
184 int bch2_alloc_read(struct bch_fs *c, struct list_head *journal_replay_list)
186 struct journal_replay *r;
187 struct btree_iter iter;
193 for_each_btree_key(&iter, c, BTREE_ID_ALLOC, POS_MIN, 0, k) {
194 bch2_alloc_read_key(c, k);
195 bch2_btree_iter_cond_resched(&iter);
198 ret = bch2_btree_iter_unlock(&iter);
202 list_for_each_entry(r, journal_replay_list, list) {
203 struct bkey_i *k, *n;
204 struct jset_entry *entry;
206 for_each_jset_key(k, n, entry, &r->j)
207 if (entry->btree_id == BTREE_ID_ALLOC)
208 bch2_alloc_read_key(c, bkey_i_to_s_c(k));
211 mutex_lock(&c->bucket_clock[READ].lock);
212 for_each_member_device(ca, c, i) {
213 down_read(&ca->bucket_lock);
214 bch2_recalc_oldest_io(c, ca, READ);
215 up_read(&ca->bucket_lock);
217 mutex_unlock(&c->bucket_clock[READ].lock);
219 mutex_lock(&c->bucket_clock[WRITE].lock);
220 for_each_member_device(ca, c, i) {
221 down_read(&ca->bucket_lock);
222 bch2_recalc_oldest_io(c, ca, WRITE);
223 up_read(&ca->bucket_lock);
225 mutex_unlock(&c->bucket_clock[WRITE].lock);
230 static int __bch2_alloc_write_key(struct bch_fs *c, struct bch_dev *ca,
231 size_t b, struct btree_iter *iter,
232 u64 *journal_seq, unsigned flags)
234 struct bucket_mark m;
235 __BKEY_PADDED(k, DIV_ROUND_UP(sizeof(struct bch_alloc), 8)) alloc_key;
237 struct bkey_i_alloc *a;
240 percpu_down_read_preempt_disable(&c->usage_lock);
243 m = READ_ONCE(g->mark);
244 a = bkey_alloc_init(&alloc_key.k);
245 a->k.p = POS(ca->dev_idx, b);
248 set_bkey_val_u64s(&a->k, bch_alloc_val_u64s(&a->v));
251 if (a->v.fields & (1 << BCH_ALLOC_FIELD_READ_TIME))
252 put_alloc_field(&d, 2, g->io_time[READ]);
253 if (a->v.fields & (1 << BCH_ALLOC_FIELD_WRITE_TIME))
254 put_alloc_field(&d, 2, g->io_time[WRITE]);
255 percpu_up_read_preempt_enable(&c->usage_lock);
257 bch2_btree_iter_cond_resched(iter);
259 bch2_btree_iter_set_pos(iter, a->k.p);
261 return bch2_btree_insert_at(c, NULL, journal_seq,
263 BTREE_INSERT_USE_RESERVE|
264 BTREE_INSERT_USE_ALLOC_RESERVE|
266 BTREE_INSERT_ENTRY(iter, &a->k_i));
269 int bch2_alloc_replay_key(struct bch_fs *c, struct bpos pos)
272 struct btree_iter iter;
275 if (pos.inode >= c->sb.nr_devices || !c->devs[pos.inode])
278 ca = bch_dev_bkey_exists(c, pos.inode);
280 if (pos.offset >= ca->mi.nbuckets)
283 bch2_btree_iter_init(&iter, c, BTREE_ID_ALLOC, POS_MIN,
284 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
286 ret = __bch2_alloc_write_key(c, ca, pos.offset, &iter, NULL, 0);
287 bch2_btree_iter_unlock(&iter);
291 int bch2_alloc_write(struct bch_fs *c)
297 for_each_rw_member(ca, c, i) {
298 struct btree_iter iter;
299 unsigned long bucket;
301 bch2_btree_iter_init(&iter, c, BTREE_ID_ALLOC, POS_MIN,
302 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
304 down_read(&ca->bucket_lock);
305 for_each_set_bit(bucket, ca->buckets_dirty, ca->mi.nbuckets) {
306 ret = __bch2_alloc_write_key(c, ca, bucket,
311 clear_bit(bucket, ca->buckets_dirty);
313 up_read(&ca->bucket_lock);
314 bch2_btree_iter_unlock(&iter);
317 percpu_ref_put(&ca->io_ref);
325 /* Bucket IO clocks: */
327 static void bch2_recalc_oldest_io(struct bch_fs *c, struct bch_dev *ca, int rw)
329 struct bucket_clock *clock = &c->bucket_clock[rw];
330 struct bucket_array *buckets = bucket_array(ca);
335 lockdep_assert_held(&c->bucket_clock[rw].lock);
337 /* Recalculate max_last_io for this device: */
338 for_each_bucket(g, buckets)
339 max_last_io = max(max_last_io, bucket_last_io(c, g, rw));
341 ca->max_last_bucket_io[rw] = max_last_io;
343 /* Recalculate global max_last_io: */
346 for_each_member_device(ca, c, i)
347 max_last_io = max(max_last_io, ca->max_last_bucket_io[rw]);
349 clock->max_last_io = max_last_io;
352 static void bch2_rescale_bucket_io_times(struct bch_fs *c, int rw)
354 struct bucket_clock *clock = &c->bucket_clock[rw];
355 struct bucket_array *buckets;
360 trace_rescale_prios(c);
362 for_each_member_device(ca, c, i) {
363 down_read(&ca->bucket_lock);
364 buckets = bucket_array(ca);
366 for_each_bucket(g, buckets)
367 g->io_time[rw] = clock->hand -
368 bucket_last_io(c, g, rw) / 2;
370 bch2_recalc_oldest_io(c, ca, rw);
372 up_read(&ca->bucket_lock);
376 static void bch2_inc_clock_hand(struct io_timer *timer)
378 struct bucket_clock *clock = container_of(timer,
379 struct bucket_clock, rescale);
380 struct bch_fs *c = container_of(clock,
381 struct bch_fs, bucket_clock[clock->rw]);
386 mutex_lock(&clock->lock);
388 /* if clock cannot be advanced more, rescale prio */
389 if (clock->max_last_io >= U16_MAX - 2)
390 bch2_rescale_bucket_io_times(c, clock->rw);
392 BUG_ON(clock->max_last_io >= U16_MAX - 2);
394 for_each_member_device(ca, c, i)
395 ca->max_last_bucket_io[clock->rw]++;
396 clock->max_last_io++;
399 mutex_unlock(&clock->lock);
401 capacity = READ_ONCE(c->capacity);
407 * we only increment when 0.1% of the filesystem capacity has been read
408 * or written too, this determines if it's time
410 * XXX: we shouldn't really be going off of the capacity of devices in
411 * RW mode (that will be 0 when we're RO, yet we can still service
414 timer->expire += capacity >> 10;
416 bch2_io_timer_add(&c->io_clock[clock->rw], timer);
419 static void bch2_bucket_clock_init(struct bch_fs *c, int rw)
421 struct bucket_clock *clock = &c->bucket_clock[rw];
425 clock->rescale.fn = bch2_inc_clock_hand;
426 clock->rescale.expire = c->capacity >> 10;
427 mutex_init(&clock->lock);
430 /* Background allocator thread: */
433 * Scans for buckets to be invalidated, invalidates them, rewrites prios/gens
434 * (marking them as invalidated on disk), then optionally issues discard
435 * commands to the newly free buckets, then puts them on the various freelists.
438 #define BUCKET_GC_GEN_MAX 96U
441 * wait_buckets_available - wait on reclaimable buckets
443 * If there aren't enough available buckets to fill up free_inc, wait until
446 static int wait_buckets_available(struct bch_fs *c, struct bch_dev *ca)
448 unsigned long gc_count = c->gc_count;
452 set_current_state(TASK_INTERRUPTIBLE);
453 if (kthread_should_stop()) {
458 if (gc_count != c->gc_count)
459 ca->inc_gen_really_needs_gc = 0;
461 if ((ssize_t) (dev_buckets_available(c, ca) -
462 ca->inc_gen_really_needs_gc) >=
463 (ssize_t) fifo_free(&ca->free_inc))
466 up_read(&c->gc_lock);
469 down_read(&c->gc_lock);
472 __set_current_state(TASK_RUNNING);
476 static bool bch2_can_invalidate_bucket(struct bch_dev *ca,
478 struct bucket_mark mark)
482 if (!is_available_bucket(mark))
485 gc_gen = bucket_gc_gen(ca, bucket);
487 if (gc_gen >= BUCKET_GC_GEN_MAX / 2)
488 ca->inc_gen_needs_gc++;
490 if (gc_gen >= BUCKET_GC_GEN_MAX)
491 ca->inc_gen_really_needs_gc++;
493 return gc_gen < BUCKET_GC_GEN_MAX;
497 * Determines what order we're going to reuse buckets, smallest bucket_key()
501 * - We take into account the read prio of the bucket, which gives us an
502 * indication of how hot the data is -- we scale the prio so that the prio
503 * farthest from the clock is worth 1/8th of the closest.
505 * - The number of sectors of cached data in the bucket, which gives us an
506 * indication of the cost in cache misses this eviction will cause.
508 * - If hotness * sectors used compares equal, we pick the bucket with the
509 * smallest bucket_gc_gen() - since incrementing the same bucket's generation
510 * number repeatedly forces us to run mark and sweep gc to avoid generation
514 static unsigned long bucket_sort_key(struct bch_fs *c, struct bch_dev *ca,
515 size_t b, struct bucket_mark m)
517 unsigned last_io = bucket_last_io(c, bucket(ca, b), READ);
518 unsigned max_last_io = ca->max_last_bucket_io[READ];
521 * Time since last read, scaled to [0, 8) where larger value indicates
522 * more recently read data:
524 unsigned long hotness = (max_last_io - last_io) * 7 / max_last_io;
526 /* How much we want to keep the data in this bucket: */
527 unsigned long data_wantness =
528 (hotness + 1) * bucket_sectors_used(m);
530 unsigned long needs_journal_commit =
531 bucket_needs_journal_commit(m, c->journal.last_seq_ondisk);
533 return (data_wantness << 9) |
534 (needs_journal_commit << 8) |
535 (bucket_gc_gen(ca, b) / 16);
538 static inline int bucket_alloc_cmp(alloc_heap *h,
539 struct alloc_heap_entry l,
540 struct alloc_heap_entry r)
542 return (l.key > r.key) - (l.key < r.key) ?:
543 (l.nr < r.nr) - (l.nr > r.nr) ?:
544 (l.bucket > r.bucket) - (l.bucket < r.bucket);
547 static inline int bucket_idx_cmp(const void *_l, const void *_r)
549 const struct alloc_heap_entry *l = _l, *r = _r;
551 return (l->bucket > r->bucket) - (l->bucket < r->bucket);
554 static void find_reclaimable_buckets_lru(struct bch_fs *c, struct bch_dev *ca)
556 struct bucket_array *buckets;
557 struct alloc_heap_entry e = { 0 };
560 ca->alloc_heap.used = 0;
562 mutex_lock(&c->bucket_clock[READ].lock);
563 down_read(&ca->bucket_lock);
565 buckets = bucket_array(ca);
567 bch2_recalc_oldest_io(c, ca, READ);
570 * Find buckets with lowest read priority, by building a maxheap sorted
571 * by read priority and repeatedly replacing the maximum element until
572 * all buckets have been visited.
574 for (b = ca->mi.first_bucket; b < ca->mi.nbuckets; b++) {
575 struct bucket_mark m = READ_ONCE(buckets->b[b].mark);
576 unsigned long key = bucket_sort_key(c, ca, b, m);
578 if (!bch2_can_invalidate_bucket(ca, b, m))
581 if (e.nr && e.bucket + e.nr == b && e.key == key) {
585 heap_add_or_replace(&ca->alloc_heap, e, -bucket_alloc_cmp);
587 e = (struct alloc_heap_entry) {
598 heap_add_or_replace(&ca->alloc_heap, e, -bucket_alloc_cmp);
600 for (i = 0; i < ca->alloc_heap.used; i++)
601 nr += ca->alloc_heap.data[i].nr;
603 while (nr - ca->alloc_heap.data[0].nr >= ALLOC_SCAN_BATCH(ca)) {
604 nr -= ca->alloc_heap.data[0].nr;
605 heap_pop(&ca->alloc_heap, e, -bucket_alloc_cmp);
608 up_read(&ca->bucket_lock);
609 mutex_unlock(&c->bucket_clock[READ].lock);
612 static void find_reclaimable_buckets_fifo(struct bch_fs *c, struct bch_dev *ca)
614 struct bucket_array *buckets = bucket_array(ca);
615 struct bucket_mark m;
618 if (ca->fifo_last_bucket < ca->mi.first_bucket ||
619 ca->fifo_last_bucket >= ca->mi.nbuckets)
620 ca->fifo_last_bucket = ca->mi.first_bucket;
622 start = ca->fifo_last_bucket;
625 ca->fifo_last_bucket++;
626 if (ca->fifo_last_bucket == ca->mi.nbuckets)
627 ca->fifo_last_bucket = ca->mi.first_bucket;
629 b = ca->fifo_last_bucket;
630 m = READ_ONCE(buckets->b[b].mark);
632 if (bch2_can_invalidate_bucket(ca, b, m)) {
633 struct alloc_heap_entry e = { .bucket = b, .nr = 1, };
635 heap_add(&ca->alloc_heap, e, bucket_alloc_cmp);
636 if (heap_full(&ca->alloc_heap))
641 } while (ca->fifo_last_bucket != start);
644 static void find_reclaimable_buckets_random(struct bch_fs *c, struct bch_dev *ca)
646 struct bucket_array *buckets = bucket_array(ca);
647 struct bucket_mark m;
651 checked < ca->mi.nbuckets / 2;
653 size_t b = bch2_rand_range(ca->mi.nbuckets -
654 ca->mi.first_bucket) +
657 m = READ_ONCE(buckets->b[b].mark);
659 if (bch2_can_invalidate_bucket(ca, b, m)) {
660 struct alloc_heap_entry e = { .bucket = b, .nr = 1, };
662 heap_add(&ca->alloc_heap, e, bucket_alloc_cmp);
663 if (heap_full(&ca->alloc_heap))
670 sort(ca->alloc_heap.data,
672 sizeof(ca->alloc_heap.data[0]),
673 bucket_idx_cmp, NULL);
675 /* remove duplicates: */
676 for (i = 0; i + 1 < ca->alloc_heap.used; i++)
677 if (ca->alloc_heap.data[i].bucket ==
678 ca->alloc_heap.data[i + 1].bucket)
679 ca->alloc_heap.data[i].nr = 0;
682 static size_t find_reclaimable_buckets(struct bch_fs *c, struct bch_dev *ca)
686 ca->inc_gen_needs_gc = 0;
688 switch (ca->mi.replacement) {
689 case CACHE_REPLACEMENT_LRU:
690 find_reclaimable_buckets_lru(c, ca);
692 case CACHE_REPLACEMENT_FIFO:
693 find_reclaimable_buckets_fifo(c, ca);
695 case CACHE_REPLACEMENT_RANDOM:
696 find_reclaimable_buckets_random(c, ca);
700 heap_resort(&ca->alloc_heap, bucket_alloc_cmp);
702 for (i = 0; i < ca->alloc_heap.used; i++)
703 nr += ca->alloc_heap.data[i].nr;
708 static inline long next_alloc_bucket(struct bch_dev *ca)
710 struct alloc_heap_entry e, *top = ca->alloc_heap.data;
712 while (ca->alloc_heap.used) {
714 size_t b = top->bucket;
721 heap_pop(&ca->alloc_heap, e, bucket_alloc_cmp);
727 static bool bch2_invalidate_one_bucket(struct bch_fs *c, struct bch_dev *ca,
728 size_t bucket, u64 *flush_seq)
730 struct bucket_mark m;
732 percpu_down_read_preempt_disable(&c->usage_lock);
733 spin_lock(&c->freelist_lock);
735 bch2_invalidate_bucket(c, ca, bucket, &m);
737 verify_not_on_freelist(c, ca, bucket);
738 BUG_ON(!fifo_push(&ca->free_inc, bucket));
740 spin_unlock(&c->freelist_lock);
742 bucket_io_clock_reset(c, ca, bucket, READ);
743 bucket_io_clock_reset(c, ca, bucket, WRITE);
745 percpu_up_read_preempt_enable(&c->usage_lock);
747 if (m.journal_seq_valid) {
748 u64 journal_seq = atomic64_read(&c->journal.seq);
749 u64 bucket_seq = journal_seq;
751 bucket_seq &= ~((u64) U16_MAX);
752 bucket_seq |= m.journal_seq;
754 if (bucket_seq > journal_seq)
755 bucket_seq -= 1 << 16;
757 *flush_seq = max(*flush_seq, bucket_seq);
760 return m.cached_sectors != 0;
764 * Pull buckets off ca->alloc_heap, invalidate them, move them to ca->free_inc:
766 static int bch2_invalidate_buckets(struct bch_fs *c, struct bch_dev *ca)
768 struct btree_iter iter;
773 bch2_btree_iter_init(&iter, c, BTREE_ID_ALLOC, POS(ca->dev_idx, 0),
774 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
776 /* Only use nowait if we've already invalidated at least one bucket: */
778 !fifo_full(&ca->free_inc) &&
779 (b = next_alloc_bucket(ca)) >= 0) {
781 bch2_invalidate_one_bucket(c, ca, b, &journal_seq);
783 ret = __bch2_alloc_write_key(c, ca, b, &iter,
784 must_flush ? &journal_seq : NULL,
785 !fifo_empty(&ca->free_inc) ? BTREE_INSERT_NOWAIT : 0);
788 bch2_btree_iter_unlock(&iter);
790 /* If we used NOWAIT, don't return the error: */
791 if (!fifo_empty(&ca->free_inc))
794 bch_err(ca, "error invalidating buckets: %i", ret);
799 ret = bch2_journal_flush_seq(&c->journal, journal_seq);
801 bch_err(ca, "journal error: %i", ret);
808 static int push_invalidated_bucket(struct bch_fs *c, struct bch_dev *ca, size_t bucket)
814 set_current_state(TASK_INTERRUPTIBLE);
816 spin_lock(&c->freelist_lock);
817 for (i = 0; i < RESERVE_NR; i++)
818 if (fifo_push(&ca->free[i], bucket)) {
819 fifo_pop(&ca->free_inc, bucket);
820 closure_wake_up(&c->freelist_wait);
821 spin_unlock(&c->freelist_lock);
824 spin_unlock(&c->freelist_lock);
826 if ((current->flags & PF_KTHREAD) &&
827 kthread_should_stop()) {
836 __set_current_state(TASK_RUNNING);
841 * Pulls buckets off free_inc, discards them (if enabled), then adds them to
842 * freelists, waiting until there's room if necessary:
844 static int discard_invalidated_buckets(struct bch_fs *c, struct bch_dev *ca)
846 while (!fifo_empty(&ca->free_inc)) {
847 size_t bucket = fifo_peek(&ca->free_inc);
849 if (ca->mi.discard &&
850 blk_queue_discard(bdev_get_queue(ca->disk_sb.bdev)))
851 blkdev_issue_discard(ca->disk_sb.bdev,
852 bucket_to_sector(ca, bucket),
853 ca->mi.bucket_size, GFP_NOIO, 0);
855 if (push_invalidated_bucket(c, ca, bucket))
863 * bch_allocator_thread - move buckets from free_inc to reserves
865 * The free_inc FIFO is populated by find_reclaimable_buckets(), and
866 * the reserves are depleted by bucket allocation. When we run out
867 * of free_inc, try to invalidate some buckets and write out
870 static int bch2_allocator_thread(void *arg)
872 struct bch_dev *ca = arg;
873 struct bch_fs *c = ca->fs;
882 pr_debug("discarding %zu invalidated buckets",
883 fifo_used(&ca->free_inc));
885 ret = discard_invalidated_buckets(c, ca);
889 down_read(&c->gc_lock);
891 ret = bch2_invalidate_buckets(c, ca);
893 up_read(&c->gc_lock);
897 if (!fifo_empty(&ca->free_inc)) {
898 up_read(&c->gc_lock);
902 pr_debug("free_inc now empty");
905 if (test_bit(BCH_FS_GC_FAILURE, &c->flags)) {
906 up_read(&c->gc_lock);
907 bch_err(ca, "gc failure");
912 * Find some buckets that we can invalidate, either
913 * they're completely unused, or only contain clean data
914 * that's been written back to the backing device or
918 pr_debug("scanning for reclaimable buckets");
920 nr = find_reclaimable_buckets(c, ca);
922 pr_debug("found %zu buckets", nr);
924 trace_alloc_batch(ca, nr, ca->alloc_heap.size);
926 if ((ca->inc_gen_needs_gc >= ALLOC_SCAN_BATCH(ca) ||
927 ca->inc_gen_really_needs_gc) &&
929 atomic_inc(&c->kick_gc);
930 wake_up_process(c->gc_thread);
934 * If we found any buckets, we have to invalidate them
935 * before we scan for more - but if we didn't find very
936 * many we may want to wait on more buckets being
937 * available so we don't spin:
940 (nr < ALLOC_SCAN_BATCH(ca) &&
941 !fifo_full(&ca->free[RESERVE_MOVINGGC]))) {
942 ca->allocator_blocked = true;
943 closure_wake_up(&c->freelist_wait);
945 ret = wait_buckets_available(c, ca);
947 up_read(&c->gc_lock);
953 ca->allocator_blocked = false;
954 up_read(&c->gc_lock);
956 pr_debug("%zu buckets to invalidate", nr);
959 * alloc_heap is now full of newly-invalidated buckets: next,
960 * write out the new bucket gens:
965 pr_debug("alloc thread stopping (ret %i)", ret);
969 /* Startup/shutdown (ro/rw): */
971 void bch2_recalc_capacity(struct bch_fs *c)
974 u64 capacity = 0, reserved_sectors = 0, gc_reserve;
975 unsigned long ra_pages = 0;
978 lockdep_assert_held(&c->state_lock);
980 for_each_online_member(ca, c, i) {
981 struct backing_dev_info *bdi = ca->disk_sb.bdev->bd_bdi;
983 ra_pages += bdi->ra_pages;
986 bch2_set_ra_pages(c, ra_pages);
988 for_each_rw_member(ca, c, i) {
992 * We need to reserve buckets (from the number
993 * of currently available buckets) against
994 * foreground writes so that mainly copygc can
995 * make forward progress.
997 * We need enough to refill the various reserves
998 * from scratch - copygc will use its entire
999 * reserve all at once, then run against when
1000 * its reserve is refilled (from the formerly
1001 * available buckets).
1003 * This reserve is just used when considering if
1004 * allocations for foreground writes must wait -
1005 * not -ENOSPC calculations.
1007 for (j = 0; j < RESERVE_NONE; j++)
1008 dev_reserve += ca->free[j].size;
1010 dev_reserve += ca->free_inc.size;
1012 dev_reserve += ARRAY_SIZE(c->write_points);
1014 dev_reserve += 1; /* btree write point */
1015 dev_reserve += 1; /* copygc write point */
1016 dev_reserve += 1; /* rebalance write point */
1017 dev_reserve += WRITE_POINT_COUNT;
1019 dev_reserve *= ca->mi.bucket_size;
1021 ca->copygc_threshold = dev_reserve;
1023 capacity += bucket_to_sector(ca, ca->mi.nbuckets -
1024 ca->mi.first_bucket);
1026 reserved_sectors += dev_reserve * 2;
1029 gc_reserve = c->opts.gc_reserve_bytes
1030 ? c->opts.gc_reserve_bytes >> 9
1031 : div64_u64(capacity * c->opts.gc_reserve_percent, 100);
1033 reserved_sectors = max(gc_reserve, reserved_sectors);
1035 reserved_sectors = min(reserved_sectors, capacity);
1037 c->capacity = capacity - reserved_sectors;
1040 bch2_io_timer_add(&c->io_clock[READ],
1041 &c->bucket_clock[READ].rescale);
1042 bch2_io_timer_add(&c->io_clock[WRITE],
1043 &c->bucket_clock[WRITE].rescale);
1045 bch2_io_timer_del(&c->io_clock[READ],
1046 &c->bucket_clock[READ].rescale);
1047 bch2_io_timer_del(&c->io_clock[WRITE],
1048 &c->bucket_clock[WRITE].rescale);
1051 /* Wake up case someone was waiting for buckets */
1052 closure_wake_up(&c->freelist_wait);
1055 static bool bch2_dev_has_open_write_point(struct bch_fs *c, struct bch_dev *ca)
1057 struct open_bucket *ob;
1060 for (ob = c->open_buckets;
1061 ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
1063 spin_lock(&ob->lock);
1064 if (ob->valid && !ob->on_partial_list &&
1065 ob->ptr.dev == ca->dev_idx)
1067 spin_unlock(&ob->lock);
1073 /* device goes ro: */
1074 void bch2_dev_allocator_remove(struct bch_fs *c, struct bch_dev *ca)
1078 BUG_ON(ca->alloc_thread);
1080 /* First, remove device from allocation groups: */
1082 for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
1083 clear_bit(ca->dev_idx, c->rw_devs[i].d);
1086 * Capacity is calculated based off of devices in allocation groups:
1088 bch2_recalc_capacity(c);
1090 /* Next, close write points that point to this device... */
1091 for (i = 0; i < ARRAY_SIZE(c->write_points); i++)
1092 bch2_writepoint_stop(c, ca, &c->write_points[i]);
1094 bch2_writepoint_stop(c, ca, &ca->copygc_write_point);
1095 bch2_writepoint_stop(c, ca, &c->rebalance_write_point);
1096 bch2_writepoint_stop(c, ca, &c->btree_write_point);
1098 mutex_lock(&c->btree_reserve_cache_lock);
1099 while (c->btree_reserve_cache_nr) {
1100 struct btree_alloc *a =
1101 &c->btree_reserve_cache[--c->btree_reserve_cache_nr];
1103 bch2_open_buckets_put(c, &a->ob);
1105 mutex_unlock(&c->btree_reserve_cache_lock);
1108 * Wake up threads that were blocked on allocation, so they can notice
1109 * the device can no longer be removed and the capacity has changed:
1111 closure_wake_up(&c->freelist_wait);
1114 * journal_res_get() can block waiting for free space in the journal -
1115 * it needs to notice there may not be devices to allocate from anymore:
1117 wake_up(&c->journal.wait);
1119 /* Now wait for any in flight writes: */
1121 closure_wait_event(&c->open_buckets_wait,
1122 !bch2_dev_has_open_write_point(c, ca));
1125 /* device goes rw: */
1126 void bch2_dev_allocator_add(struct bch_fs *c, struct bch_dev *ca)
1130 for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
1131 if (ca->mi.data_allowed & (1 << i))
1132 set_bit(ca->dev_idx, c->rw_devs[i].d);
1135 /* stop allocator thread: */
1136 void bch2_dev_allocator_stop(struct bch_dev *ca)
1138 struct task_struct *p;
1140 p = rcu_dereference_protected(ca->alloc_thread, 1);
1141 ca->alloc_thread = NULL;
1144 * We need an rcu barrier between setting ca->alloc_thread = NULL and
1145 * the thread shutting down to avoid bch2_wake_allocator() racing:
1147 * XXX: it would be better to have the rcu barrier be asynchronous
1148 * instead of blocking us here
1158 /* start allocator thread: */
1159 int bch2_dev_allocator_start(struct bch_dev *ca)
1161 struct task_struct *p;
1164 * allocator thread already started?
1166 if (ca->alloc_thread)
1169 p = kthread_create(bch2_allocator_thread, ca,
1170 "bch_alloc[%s]", ca->name);
1175 rcu_assign_pointer(ca->alloc_thread, p);
1180 static void flush_held_btree_writes(struct bch_fs *c)
1182 struct bucket_table *tbl;
1183 struct rhash_head *pos;
1186 size_t i, nr_pending_updates;
1188 clear_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags);
1190 pr_debug("flushing dirty btree nodes");
1193 flush_updates = false;
1194 nr_pending_updates = bch2_btree_interior_updates_nr_pending(c);
1197 for_each_cached_btree(b, c, tbl, i, pos)
1198 if (btree_node_dirty(b) && (!b->written || b->level)) {
1199 if (btree_node_may_write(b)) {
1201 btree_node_lock_type(c, b, SIX_LOCK_read);
1202 bch2_btree_node_write(c, b, SIX_LOCK_read);
1203 six_unlock_read(&b->lock);
1206 flush_updates = true;
1211 if (c->btree_roots_dirty)
1212 bch2_journal_meta(&c->journal);
1215 * This is ugly, but it's needed to flush btree node writes
1216 * without spinning...
1218 if (flush_updates) {
1219 closure_wait_event(&c->btree_interior_update_wait,
1220 bch2_btree_interior_updates_nr_pending(c) <
1221 nr_pending_updates);
1227 static void allocator_start_issue_discards(struct bch_fs *c)
1233 for_each_rw_member(ca, c, dev_iter)
1234 while (fifo_pop(&ca->free_inc, bu))
1235 blkdev_issue_discard(ca->disk_sb.bdev,
1236 bucket_to_sector(ca, bu),
1237 ca->mi.bucket_size, GFP_NOIO, 0);
1240 static int __bch2_fs_allocator_start(struct bch_fs *c)
1244 u64 journal_seq = 0;
1246 bool invalidating_data = false;
1249 if (test_bit(BCH_FS_GC_FAILURE, &c->flags))
1252 if (test_alloc_startup(c)) {
1253 invalidating_data = true;
1257 /* Scan for buckets that are already invalidated: */
1258 for_each_rw_member(ca, c, dev_iter) {
1259 struct btree_iter iter;
1260 struct bucket_mark m;
1263 for_each_btree_key(&iter, c, BTREE_ID_ALLOC, POS(ca->dev_idx, 0), 0, k) {
1264 if (k.k->type != BCH_ALLOC)
1268 m = READ_ONCE(bucket(ca, bu)->mark);
1270 if (!is_available_bucket(m) || m.cached_sectors)
1273 percpu_down_read_preempt_disable(&c->usage_lock);
1274 bch2_mark_alloc_bucket(c, ca, bu, true,
1275 gc_pos_alloc(c, NULL),
1276 BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE|
1277 BCH_BUCKET_MARK_GC_LOCK_HELD);
1278 percpu_up_read_preempt_enable(&c->usage_lock);
1280 fifo_push(&ca->free_inc, bu);
1282 if (fifo_full(&ca->free_inc))
1285 bch2_btree_iter_unlock(&iter);
1288 /* did we find enough buckets? */
1289 for_each_rw_member(ca, c, dev_iter)
1290 if (fifo_used(&ca->free_inc) < ca->free[RESERVE_BTREE].size) {
1291 percpu_ref_put(&ca->io_ref);
1297 pr_debug("did not find enough empty buckets; issuing discards");
1299 /* clear out free_inc, we'll be using it again below: */
1300 for_each_rw_member(ca, c, dev_iter)
1301 discard_invalidated_buckets(c, ca);
1303 pr_debug("scanning for reclaimable buckets");
1305 for_each_rw_member(ca, c, dev_iter) {
1306 find_reclaimable_buckets(c, ca);
1308 while (!fifo_full(&ca->free[RESERVE_BTREE]) &&
1309 (bu = next_alloc_bucket(ca)) >= 0) {
1310 invalidating_data |=
1311 bch2_invalidate_one_bucket(c, ca, bu, &journal_seq);
1313 fifo_push(&ca->free[RESERVE_BTREE], bu);
1314 set_bit(bu, ca->buckets_dirty);
1318 pr_debug("done scanning for reclaimable buckets");
1321 * We're moving buckets to freelists _before_ they've been marked as
1322 * invalidated on disk - we have to so that we can allocate new btree
1323 * nodes to mark them as invalidated on disk.
1325 * However, we can't _write_ to any of these buckets yet - they might
1326 * have cached data in them, which is live until they're marked as
1327 * invalidated on disk:
1329 if (invalidating_data) {
1331 pr_info("holding writes");
1332 pr_debug("invalidating existing data");
1333 set_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags);
1335 pr_debug("issuing discards");
1336 allocator_start_issue_discards(c);
1340 * XXX: it's possible for this to deadlock waiting on journal reclaim,
1341 * since we're holding btree writes. What then?
1343 ret = bch2_alloc_write(c);
1347 if (invalidating_data) {
1348 pr_debug("flushing journal");
1350 ret = bch2_journal_flush_seq(&c->journal, journal_seq);
1354 pr_debug("issuing discards");
1355 allocator_start_issue_discards(c);
1358 set_bit(BCH_FS_ALLOCATOR_STARTED, &c->flags);
1360 /* now flush dirty btree nodes: */
1361 if (invalidating_data)
1362 flush_held_btree_writes(c);
1367 int bch2_fs_allocator_start(struct bch_fs *c)
1373 down_read(&c->gc_lock);
1374 ret = __bch2_fs_allocator_start(c);
1375 up_read(&c->gc_lock);
1380 for_each_rw_member(ca, c, i) {
1381 ret = bch2_dev_allocator_start(ca);
1383 percpu_ref_put(&ca->io_ref);
1388 return bch2_alloc_write(c);
1391 void bch2_fs_allocator_init(struct bch_fs *c)
1393 struct open_bucket *ob;
1394 struct write_point *wp;
1396 mutex_init(&c->write_points_hash_lock);
1397 spin_lock_init(&c->freelist_lock);
1398 bch2_bucket_clock_init(c, READ);
1399 bch2_bucket_clock_init(c, WRITE);
1401 /* open bucket 0 is a sentinal NULL: */
1402 spin_lock_init(&c->open_buckets[0].lock);
1404 for (ob = c->open_buckets + 1;
1405 ob < c->open_buckets + ARRAY_SIZE(c->open_buckets); ob++) {
1406 spin_lock_init(&ob->lock);
1407 c->open_buckets_nr_free++;
1409 ob->freelist = c->open_buckets_freelist;
1410 c->open_buckets_freelist = ob - c->open_buckets;
1413 writepoint_init(&c->btree_write_point, BCH_DATA_BTREE);
1414 writepoint_init(&c->rebalance_write_point, BCH_DATA_USER);
1416 for (wp = c->write_points;
1417 wp < c->write_points + ARRAY_SIZE(c->write_points); wp++) {
1418 writepoint_init(wp, BCH_DATA_USER);
1420 wp->last_used = sched_clock();
1421 wp->write_point = (unsigned long) wp;
1422 hlist_add_head_rcu(&wp->node, writepoint_hash(c, wp->write_point));
1425 c->pd_controllers_update_seconds = 5;
1426 INIT_DELAYED_WORK(&c->pd_controllers_update, pd_controllers_update);