2 #include "alloc_background.h"
3 #include "alloc_foreground.h"
4 #include "btree_cache.h"
6 #include "btree_update.h"
7 #include "btree_update_interior.h"
14 #include "journal_io.h"
16 #include <linux/kthread.h>
17 #include <linux/math64.h>
18 #include <linux/random.h>
19 #include <linux/rculist.h>
20 #include <linux/rcupdate.h>
21 #include <linux/sched/task.h>
22 #include <linux/sort.h>
23 #include <trace/events/bcachefs.h>
25 static const char * const bch2_alloc_field_names[] = {
26 #define x(name, bytes) #name,
32 static void bch2_recalc_oldest_io(struct bch_fs *, struct bch_dev *, int);
34 /* Ratelimiting/PD controllers */
36 static void pd_controllers_update(struct work_struct *work)
38 struct bch_fs *c = container_of(to_delayed_work(work),
40 pd_controllers_update);
44 for_each_member_device(ca, c, i) {
45 struct bch_dev_usage stats = bch2_dev_usage_read(c, ca);
47 u64 free = bucket_to_sector(ca,
48 __dev_buckets_free(ca, stats)) << 9;
50 * Bytes of internal fragmentation, which can be
51 * reclaimed by copy GC
53 s64 fragmented = (bucket_to_sector(ca,
54 stats.buckets[BCH_DATA_USER] +
55 stats.buckets[BCH_DATA_CACHED]) -
56 (stats.sectors[BCH_DATA_USER] +
57 stats.sectors[BCH_DATA_CACHED])) << 9;
59 fragmented = max(0LL, fragmented);
61 bch2_pd_controller_update(&ca->copygc_pd,
62 free, fragmented, -1);
65 schedule_delayed_work(&c->pd_controllers_update,
66 c->pd_controllers_update_seconds * HZ);
69 /* Persistent alloc info: */
71 static inline u64 get_alloc_field(const struct bch_alloc *a,
72 const void **p, unsigned field)
74 unsigned bytes = BCH_ALLOC_FIELD_BYTES[field];
77 if (!(a->fields & (1 << field)))
82 v = *((const u8 *) *p);
101 static inline void put_alloc_field(struct bkey_i_alloc *a, void **p,
102 unsigned field, u64 v)
104 unsigned bytes = BCH_ALLOC_FIELD_BYTES[field];
109 a->v.fields |= 1 << field;
116 *((__le16 *) *p) = cpu_to_le16(v);
119 *((__le32 *) *p) = cpu_to_le32(v);
122 *((__le64 *) *p) = cpu_to_le64(v);
131 struct bkey_alloc_unpacked bch2_alloc_unpack(const struct bch_alloc *a)
133 struct bkey_alloc_unpacked ret = { .gen = a->gen };
134 const void *d = a->data;
137 #define x(_name, _bits) ret._name = get_alloc_field(a, &d, idx++);
143 static void bch2_alloc_pack(struct bkey_i_alloc *dst,
144 const struct bkey_alloc_unpacked src)
147 void *d = dst->v.data;
150 dst->v.gen = src.gen;
152 #define x(_name, _bits) put_alloc_field(dst, &d, idx++, src._name);
156 set_bkey_val_bytes(&dst->k, (void *) d - (void *) &dst->v);
159 static unsigned bch_alloc_val_u64s(const struct bch_alloc *a)
161 unsigned i, bytes = offsetof(struct bch_alloc, data);
163 for (i = 0; i < ARRAY_SIZE(BCH_ALLOC_FIELD_BYTES); i++)
164 if (a->fields & (1 << i))
165 bytes += BCH_ALLOC_FIELD_BYTES[i];
167 return DIV_ROUND_UP(bytes, sizeof(u64));
170 const char *bch2_alloc_invalid(const struct bch_fs *c, struct bkey_s_c k)
172 struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k);
174 if (k.k->p.inode >= c->sb.nr_devices ||
175 !c->devs[k.k->p.inode])
176 return "invalid device";
178 /* allow for unknown fields */
179 if (bkey_val_u64s(a.k) < bch_alloc_val_u64s(a.v))
180 return "incorrect value size";
185 void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c,
188 struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k);
189 const void *d = a.v->data;
192 pr_buf(out, "gen %u", a.v->gen);
194 for (i = 0; i < BCH_ALLOC_FIELD_NR; i++)
195 if (a.v->fields & (1 << i))
196 pr_buf(out, " %s %llu",
197 bch2_alloc_field_names[i],
198 get_alloc_field(a.v, &d, i));
201 static void __alloc_read_key(struct bucket *g, const struct bch_alloc *a)
203 const void *d = a->data;
204 unsigned idx = 0, data_type, dirty_sectors, cached_sectors;
205 struct bucket_mark m;
207 g->io_time[READ] = get_alloc_field(a, &d, idx++);
208 g->io_time[WRITE] = get_alloc_field(a, &d, idx++);
209 data_type = get_alloc_field(a, &d, idx++);
210 dirty_sectors = get_alloc_field(a, &d, idx++);
211 cached_sectors = get_alloc_field(a, &d, idx++);
212 g->oldest_gen = get_alloc_field(a, &d, idx++);
214 bucket_cmpxchg(g, m, ({
216 m.data_type = data_type;
217 m.dirty_sectors = dirty_sectors;
218 m.cached_sectors = cached_sectors;
224 static void __alloc_write_key(struct bkey_i_alloc *a, struct bucket *g,
225 struct bucket_mark m)
234 put_alloc_field(a, &d, idx++, g->io_time[READ]);
235 put_alloc_field(a, &d, idx++, g->io_time[WRITE]);
236 put_alloc_field(a, &d, idx++, m.data_type);
237 put_alloc_field(a, &d, idx++, m.dirty_sectors);
238 put_alloc_field(a, &d, idx++, m.cached_sectors);
239 put_alloc_field(a, &d, idx++, g->oldest_gen);
241 set_bkey_val_bytes(&a->k, (void *) d - (void *) &a->v);
244 static void bch2_alloc_read_key(struct bch_fs *c, struct bkey_s_c k)
247 struct bkey_s_c_alloc a;
249 if (k.k->type != KEY_TYPE_alloc)
252 a = bkey_s_c_to_alloc(k);
253 ca = bch_dev_bkey_exists(c, a.k->p.inode);
255 if (a.k->p.offset >= ca->mi.nbuckets)
258 percpu_down_read_preempt_disable(&c->mark_lock);
259 __alloc_read_key(bucket(ca, a.k->p.offset), a.v);
260 percpu_up_read_preempt_enable(&c->mark_lock);
263 int bch2_alloc_read(struct bch_fs *c, struct list_head *journal_replay_list)
265 struct journal_replay *r;
266 struct btree_iter iter;
272 for_each_btree_key(&iter, c, BTREE_ID_ALLOC, POS_MIN, 0, k) {
273 bch2_alloc_read_key(c, k);
274 bch2_btree_iter_cond_resched(&iter);
277 ret = bch2_btree_iter_unlock(&iter);
281 list_for_each_entry(r, journal_replay_list, list) {
282 struct bkey_i *k, *n;
283 struct jset_entry *entry;
285 for_each_jset_key(k, n, entry, &r->j)
286 if (entry->btree_id == BTREE_ID_ALLOC)
287 bch2_alloc_read_key(c, bkey_i_to_s_c(k));
290 for_each_member_device(ca, c, i)
291 bch2_dev_usage_from_buckets(c, ca);
293 mutex_lock(&c->bucket_clock[READ].lock);
294 for_each_member_device(ca, c, i) {
295 down_read(&ca->bucket_lock);
296 bch2_recalc_oldest_io(c, ca, READ);
297 up_read(&ca->bucket_lock);
299 mutex_unlock(&c->bucket_clock[READ].lock);
301 mutex_lock(&c->bucket_clock[WRITE].lock);
302 for_each_member_device(ca, c, i) {
303 down_read(&ca->bucket_lock);
304 bch2_recalc_oldest_io(c, ca, WRITE);
305 up_read(&ca->bucket_lock);
307 mutex_unlock(&c->bucket_clock[WRITE].lock);
312 static int __bch2_alloc_write_key(struct bch_fs *c, struct bch_dev *ca,
313 size_t b, struct btree_iter *iter,
314 u64 *journal_seq, unsigned flags)
317 __BKEY_PADDED(k, BKEY_ALLOC_VAL_U64s_MAX) alloc_key;
320 __BKEY_PADDED(k, 8) alloc_key;
322 struct bkey_i_alloc *a = bkey_alloc_init(&alloc_key.k);
324 struct bucket_mark m, new;
327 BUG_ON(BKEY_ALLOC_VAL_U64s_MAX > 8);
329 a->k.p = POS(ca->dev_idx, b);
331 bch2_btree_iter_set_pos(iter, a->k.p);
333 ret = bch2_btree_iter_traverse(iter);
337 percpu_down_read_preempt_disable(&c->mark_lock);
339 m = READ_ONCE(g->mark);
342 percpu_up_read_preempt_enable(&c->mark_lock);
346 __alloc_write_key(a, g, m);
347 percpu_up_read_preempt_enable(&c->mark_lock);
349 bch2_btree_iter_cond_resched(iter);
351 ret = bch2_btree_insert_at(c, NULL, journal_seq,
352 BTREE_INSERT_NOCHECK_RW|
354 BTREE_INSERT_USE_RESERVE|
355 BTREE_INSERT_USE_ALLOC_RESERVE|
358 BTREE_INSERT_ENTRY(iter, &a->k_i));
364 atomic64_cmpxchg(&g->_mark.v, m.v.counter, new.v.counter);
366 if (ca->buckets_written)
367 set_bit(b, ca->buckets_written);
372 int bch2_alloc_replay_key(struct bch_fs *c, struct bkey_i *k)
375 struct btree_iter iter;
378 if (k->k.p.inode >= c->sb.nr_devices ||
379 !c->devs[k->k.p.inode])
382 ca = bch_dev_bkey_exists(c, k->k.p.inode);
384 if (k->k.p.offset >= ca->mi.nbuckets)
387 bch2_btree_iter_init(&iter, c, BTREE_ID_ALLOC, k->k.p,
390 ret = bch2_btree_iter_traverse(&iter);
394 /* check buckets_written with btree node locked: */
396 ret = test_bit(k->k.p.offset, ca->buckets_written)
398 : bch2_btree_insert_at(c, NULL, NULL,
400 BTREE_INSERT_JOURNAL_REPLAY|
402 BTREE_INSERT_ENTRY(&iter, k));
404 bch2_btree_iter_unlock(&iter);
408 int bch2_alloc_write(struct bch_fs *c, bool nowait, bool *wrote)
416 for_each_rw_member(ca, c, i) {
417 struct btree_iter iter;
418 struct bucket_array *buckets;
421 bch2_btree_iter_init(&iter, c, BTREE_ID_ALLOC, POS_MIN,
422 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
424 down_read(&ca->bucket_lock);
425 buckets = bucket_array(ca);
427 for (b = buckets->first_bucket;
428 b < buckets->nbuckets;
430 if (!buckets->b[b].mark.dirty)
433 ret = __bch2_alloc_write_key(c, ca, b, &iter, NULL,
435 ? BTREE_INSERT_NOWAIT
442 up_read(&ca->bucket_lock);
443 bch2_btree_iter_unlock(&iter);
446 percpu_ref_put(&ca->io_ref);
454 /* Bucket IO clocks: */
456 static void bch2_recalc_oldest_io(struct bch_fs *c, struct bch_dev *ca, int rw)
458 struct bucket_clock *clock = &c->bucket_clock[rw];
459 struct bucket_array *buckets = bucket_array(ca);
464 lockdep_assert_held(&c->bucket_clock[rw].lock);
466 /* Recalculate max_last_io for this device: */
467 for_each_bucket(g, buckets)
468 max_last_io = max(max_last_io, bucket_last_io(c, g, rw));
470 ca->max_last_bucket_io[rw] = max_last_io;
472 /* Recalculate global max_last_io: */
475 for_each_member_device(ca, c, i)
476 max_last_io = max(max_last_io, ca->max_last_bucket_io[rw]);
478 clock->max_last_io = max_last_io;
481 static void bch2_rescale_bucket_io_times(struct bch_fs *c, int rw)
483 struct bucket_clock *clock = &c->bucket_clock[rw];
484 struct bucket_array *buckets;
489 trace_rescale_prios(c);
491 for_each_member_device(ca, c, i) {
492 down_read(&ca->bucket_lock);
493 buckets = bucket_array(ca);
495 for_each_bucket(g, buckets)
496 g->io_time[rw] = clock->hand -
497 bucket_last_io(c, g, rw) / 2;
499 bch2_recalc_oldest_io(c, ca, rw);
501 up_read(&ca->bucket_lock);
505 static inline u64 bucket_clock_freq(u64 capacity)
507 return max(capacity >> 10, 2028ULL);
510 static void bch2_inc_clock_hand(struct io_timer *timer)
512 struct bucket_clock *clock = container_of(timer,
513 struct bucket_clock, rescale);
514 struct bch_fs *c = container_of(clock,
515 struct bch_fs, bucket_clock[clock->rw]);
520 mutex_lock(&clock->lock);
522 /* if clock cannot be advanced more, rescale prio */
523 if (clock->max_last_io >= U16_MAX - 2)
524 bch2_rescale_bucket_io_times(c, clock->rw);
526 BUG_ON(clock->max_last_io >= U16_MAX - 2);
528 for_each_member_device(ca, c, i)
529 ca->max_last_bucket_io[clock->rw]++;
530 clock->max_last_io++;
533 mutex_unlock(&clock->lock);
535 capacity = READ_ONCE(c->capacity);
541 * we only increment when 0.1% of the filesystem capacity has been read
542 * or written too, this determines if it's time
544 * XXX: we shouldn't really be going off of the capacity of devices in
545 * RW mode (that will be 0 when we're RO, yet we can still service
548 timer->expire += bucket_clock_freq(capacity);
550 bch2_io_timer_add(&c->io_clock[clock->rw], timer);
553 static void bch2_bucket_clock_init(struct bch_fs *c, int rw)
555 struct bucket_clock *clock = &c->bucket_clock[rw];
559 clock->rescale.fn = bch2_inc_clock_hand;
560 clock->rescale.expire = bucket_clock_freq(c->capacity);
561 mutex_init(&clock->lock);
564 /* Background allocator thread: */
567 * Scans for buckets to be invalidated, invalidates them, rewrites prios/gens
568 * (marking them as invalidated on disk), then optionally issues discard
569 * commands to the newly free buckets, then puts them on the various freelists.
572 #define BUCKET_GC_GEN_MAX 96U
575 * wait_buckets_available - wait on reclaimable buckets
577 * If there aren't enough available buckets to fill up free_inc, wait until
580 static int wait_buckets_available(struct bch_fs *c, struct bch_dev *ca)
582 unsigned long gc_count = c->gc_count;
586 set_current_state(TASK_INTERRUPTIBLE);
587 if (kthread_should_stop()) {
592 if (gc_count != c->gc_count)
593 ca->inc_gen_really_needs_gc = 0;
595 if ((ssize_t) (dev_buckets_available(c, ca) -
596 ca->inc_gen_really_needs_gc) >=
597 (ssize_t) fifo_free(&ca->free_inc))
600 up_read(&c->gc_lock);
603 down_read(&c->gc_lock);
606 __set_current_state(TASK_RUNNING);
610 static bool bch2_can_invalidate_bucket(struct bch_dev *ca,
612 struct bucket_mark mark)
616 if (!is_available_bucket(mark))
619 if (ca->buckets_nouse &&
620 test_bit(bucket, ca->buckets_nouse))
623 gc_gen = bucket_gc_gen(ca, bucket);
625 if (gc_gen >= BUCKET_GC_GEN_MAX / 2)
626 ca->inc_gen_needs_gc++;
628 if (gc_gen >= BUCKET_GC_GEN_MAX)
629 ca->inc_gen_really_needs_gc++;
631 return gc_gen < BUCKET_GC_GEN_MAX;
635 * Determines what order we're going to reuse buckets, smallest bucket_key()
639 * - We take into account the read prio of the bucket, which gives us an
640 * indication of how hot the data is -- we scale the prio so that the prio
641 * farthest from the clock is worth 1/8th of the closest.
643 * - The number of sectors of cached data in the bucket, which gives us an
644 * indication of the cost in cache misses this eviction will cause.
646 * - If hotness * sectors used compares equal, we pick the bucket with the
647 * smallest bucket_gc_gen() - since incrementing the same bucket's generation
648 * number repeatedly forces us to run mark and sweep gc to avoid generation
652 static unsigned long bucket_sort_key(struct bch_fs *c, struct bch_dev *ca,
653 size_t b, struct bucket_mark m)
655 unsigned last_io = bucket_last_io(c, bucket(ca, b), READ);
656 unsigned max_last_io = ca->max_last_bucket_io[READ];
659 * Time since last read, scaled to [0, 8) where larger value indicates
660 * more recently read data:
662 unsigned long hotness = (max_last_io - last_io) * 7 / max_last_io;
664 /* How much we want to keep the data in this bucket: */
665 unsigned long data_wantness =
666 (hotness + 1) * bucket_sectors_used(m);
668 unsigned long needs_journal_commit =
669 bucket_needs_journal_commit(m, c->journal.last_seq_ondisk);
671 return (data_wantness << 9) |
672 (needs_journal_commit << 8) |
673 (bucket_gc_gen(ca, b) / 16);
676 static inline int bucket_alloc_cmp(alloc_heap *h,
677 struct alloc_heap_entry l,
678 struct alloc_heap_entry r)
680 return (l.key > r.key) - (l.key < r.key) ?:
681 (l.nr < r.nr) - (l.nr > r.nr) ?:
682 (l.bucket > r.bucket) - (l.bucket < r.bucket);
685 static inline int bucket_idx_cmp(const void *_l, const void *_r)
687 const struct alloc_heap_entry *l = _l, *r = _r;
689 return (l->bucket > r->bucket) - (l->bucket < r->bucket);
692 static void find_reclaimable_buckets_lru(struct bch_fs *c, struct bch_dev *ca)
694 struct bucket_array *buckets;
695 struct alloc_heap_entry e = { 0 };
698 ca->alloc_heap.used = 0;
700 mutex_lock(&c->bucket_clock[READ].lock);
701 down_read(&ca->bucket_lock);
703 buckets = bucket_array(ca);
705 bch2_recalc_oldest_io(c, ca, READ);
708 * Find buckets with lowest read priority, by building a maxheap sorted
709 * by read priority and repeatedly replacing the maximum element until
710 * all buckets have been visited.
712 for (b = ca->mi.first_bucket; b < ca->mi.nbuckets; b++) {
713 struct bucket_mark m = READ_ONCE(buckets->b[b].mark);
714 unsigned long key = bucket_sort_key(c, ca, b, m);
716 if (!bch2_can_invalidate_bucket(ca, b, m))
719 if (e.nr && e.bucket + e.nr == b && e.key == key) {
723 heap_add_or_replace(&ca->alloc_heap, e,
724 -bucket_alloc_cmp, NULL);
726 e = (struct alloc_heap_entry) {
737 heap_add_or_replace(&ca->alloc_heap, e,
738 -bucket_alloc_cmp, NULL);
740 for (i = 0; i < ca->alloc_heap.used; i++)
741 nr += ca->alloc_heap.data[i].nr;
743 while (nr - ca->alloc_heap.data[0].nr >= ALLOC_SCAN_BATCH(ca)) {
744 nr -= ca->alloc_heap.data[0].nr;
745 heap_pop(&ca->alloc_heap, e, -bucket_alloc_cmp, NULL);
748 up_read(&ca->bucket_lock);
749 mutex_unlock(&c->bucket_clock[READ].lock);
752 static void find_reclaimable_buckets_fifo(struct bch_fs *c, struct bch_dev *ca)
754 struct bucket_array *buckets = bucket_array(ca);
755 struct bucket_mark m;
758 if (ca->fifo_last_bucket < ca->mi.first_bucket ||
759 ca->fifo_last_bucket >= ca->mi.nbuckets)
760 ca->fifo_last_bucket = ca->mi.first_bucket;
762 start = ca->fifo_last_bucket;
765 ca->fifo_last_bucket++;
766 if (ca->fifo_last_bucket == ca->mi.nbuckets)
767 ca->fifo_last_bucket = ca->mi.first_bucket;
769 b = ca->fifo_last_bucket;
770 m = READ_ONCE(buckets->b[b].mark);
772 if (bch2_can_invalidate_bucket(ca, b, m)) {
773 struct alloc_heap_entry e = { .bucket = b, .nr = 1, };
775 heap_add(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
776 if (heap_full(&ca->alloc_heap))
781 } while (ca->fifo_last_bucket != start);
784 static void find_reclaimable_buckets_random(struct bch_fs *c, struct bch_dev *ca)
786 struct bucket_array *buckets = bucket_array(ca);
787 struct bucket_mark m;
791 checked < ca->mi.nbuckets / 2;
793 size_t b = bch2_rand_range(ca->mi.nbuckets -
794 ca->mi.first_bucket) +
797 m = READ_ONCE(buckets->b[b].mark);
799 if (bch2_can_invalidate_bucket(ca, b, m)) {
800 struct alloc_heap_entry e = { .bucket = b, .nr = 1, };
802 heap_add(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
803 if (heap_full(&ca->alloc_heap))
810 sort(ca->alloc_heap.data,
812 sizeof(ca->alloc_heap.data[0]),
813 bucket_idx_cmp, NULL);
815 /* remove duplicates: */
816 for (i = 0; i + 1 < ca->alloc_heap.used; i++)
817 if (ca->alloc_heap.data[i].bucket ==
818 ca->alloc_heap.data[i + 1].bucket)
819 ca->alloc_heap.data[i].nr = 0;
822 static size_t find_reclaimable_buckets(struct bch_fs *c, struct bch_dev *ca)
826 ca->inc_gen_needs_gc = 0;
828 switch (ca->mi.replacement) {
829 case CACHE_REPLACEMENT_LRU:
830 find_reclaimable_buckets_lru(c, ca);
832 case CACHE_REPLACEMENT_FIFO:
833 find_reclaimable_buckets_fifo(c, ca);
835 case CACHE_REPLACEMENT_RANDOM:
836 find_reclaimable_buckets_random(c, ca);
840 heap_resort(&ca->alloc_heap, bucket_alloc_cmp, NULL);
842 for (i = 0; i < ca->alloc_heap.used; i++)
843 nr += ca->alloc_heap.data[i].nr;
848 static inline long next_alloc_bucket(struct bch_dev *ca)
850 struct alloc_heap_entry e, *top = ca->alloc_heap.data;
852 while (ca->alloc_heap.used) {
854 size_t b = top->bucket;
861 heap_pop(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
868 * returns sequence number of most recent journal entry that updated this
871 static u64 bucket_journal_seq(struct bch_fs *c, struct bucket_mark m)
873 if (m.journal_seq_valid) {
874 u64 journal_seq = atomic64_read(&c->journal.seq);
875 u64 bucket_seq = journal_seq;
877 bucket_seq &= ~((u64) U16_MAX);
878 bucket_seq |= m.journal_seq;
880 if (bucket_seq > journal_seq)
881 bucket_seq -= 1 << 16;
889 static int bch2_invalidate_one_bucket2(struct bch_fs *c, struct bch_dev *ca,
890 struct btree_iter *iter,
891 u64 *journal_seq, unsigned flags)
894 __BKEY_PADDED(k, BKEY_ALLOC_VAL_U64s_MAX) alloc_key;
897 __BKEY_PADDED(k, 8) alloc_key;
899 struct bkey_i_alloc *a;
900 struct bkey_alloc_unpacked u;
901 struct bucket_mark m;
903 bool invalidating_cached_data;
907 BUG_ON(!ca->alloc_heap.used ||
908 !ca->alloc_heap.data[0].nr);
909 b = ca->alloc_heap.data[0].bucket;
911 /* first, put on free_inc and mark as owned by allocator: */
912 percpu_down_read_preempt_disable(&c->mark_lock);
913 spin_lock(&c->freelist_lock);
915 verify_not_on_freelist(c, ca, b);
917 BUG_ON(!fifo_push(&ca->free_inc, b));
919 bch2_mark_alloc_bucket(c, ca, b, true, gc_pos_alloc(c, NULL), 0);
920 m = bucket(ca, b)->mark;
922 spin_unlock(&c->freelist_lock);
923 percpu_up_read_preempt_enable(&c->mark_lock);
925 bch2_btree_iter_cond_resched(iter);
927 BUG_ON(BKEY_ALLOC_VAL_U64s_MAX > 8);
929 bch2_btree_iter_set_pos(iter, POS(ca->dev_idx, b));
931 k = bch2_btree_iter_peek_slot(iter);
932 ret = btree_iter_err(k);
936 if (k.k && k.k->type == KEY_TYPE_alloc)
937 u = bch2_alloc_unpack(bkey_s_c_to_alloc(k).v);
939 memset(&u, 0, sizeof(u));
941 invalidating_cached_data = u.cached_sectors != 0;
943 //BUG_ON(u.dirty_sectors);
946 u.cached_sectors = 0;
947 u.read_time = c->bucket_clock[READ].hand;
948 u.write_time = c->bucket_clock[WRITE].hand;
951 a = bkey_alloc_init(&alloc_key.k);
953 bch2_alloc_pack(a, u);
955 ret = bch2_btree_insert_at(c, NULL,
956 invalidating_cached_data ? journal_seq : NULL,
958 BTREE_INSERT_NOCHECK_RW|
960 BTREE_INSERT_USE_RESERVE|
961 BTREE_INSERT_USE_ALLOC_RESERVE|
963 BTREE_INSERT_ENTRY(iter, &a->k_i));
968 /* remove from alloc_heap: */
969 struct alloc_heap_entry e, *top = ca->alloc_heap.data;
975 heap_pop(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
978 * Make sure we flush the last journal entry that updated this
979 * bucket (i.e. deleting the last reference) before writing to
982 *journal_seq = max(*journal_seq, bucket_journal_seq(c, m));
986 /* remove from free_inc: */
987 percpu_down_read_preempt_disable(&c->mark_lock);
988 spin_lock(&c->freelist_lock);
990 bch2_mark_alloc_bucket(c, ca, b, false,
991 gc_pos_alloc(c, NULL), 0);
993 BUG_ON(!fifo_pop_back(&ca->free_inc, b2));
996 spin_unlock(&c->freelist_lock);
997 percpu_up_read_preempt_enable(&c->mark_lock);
1003 static bool bch2_invalidate_one_bucket(struct bch_fs *c, struct bch_dev *ca,
1004 size_t bucket, u64 *flush_seq)
1006 struct bucket_mark m;
1008 percpu_down_read_preempt_disable(&c->mark_lock);
1009 spin_lock(&c->freelist_lock);
1011 bch2_invalidate_bucket(c, ca, bucket, &m);
1013 verify_not_on_freelist(c, ca, bucket);
1014 BUG_ON(!fifo_push(&ca->free_inc, bucket));
1016 spin_unlock(&c->freelist_lock);
1018 bucket_io_clock_reset(c, ca, bucket, READ);
1019 bucket_io_clock_reset(c, ca, bucket, WRITE);
1021 percpu_up_read_preempt_enable(&c->mark_lock);
1023 *flush_seq = max(*flush_seq, bucket_journal_seq(c, m));
1025 return m.cached_sectors != 0;
1029 * Pull buckets off ca->alloc_heap, invalidate them, move them to ca->free_inc:
1031 static int bch2_invalidate_buckets(struct bch_fs *c, struct bch_dev *ca)
1033 struct btree_iter iter;
1034 u64 journal_seq = 0;
1037 bch2_btree_iter_init(&iter, c, BTREE_ID_ALLOC, POS(ca->dev_idx, 0),
1038 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
1040 /* Only use nowait if we've already invalidated at least one bucket: */
1042 !fifo_full(&ca->free_inc) &&
1043 ca->alloc_heap.used)
1044 ret = bch2_invalidate_one_bucket2(c, ca, &iter, &journal_seq,
1045 BTREE_INSERT_GC_LOCK_HELD|
1046 (!fifo_empty(&ca->free_inc)
1047 ? BTREE_INSERT_NOWAIT : 0));
1049 bch2_btree_iter_unlock(&iter);
1051 /* If we used NOWAIT, don't return the error: */
1052 if (!fifo_empty(&ca->free_inc))
1055 bch_err(ca, "error invalidating buckets: %i", ret);
1060 ret = bch2_journal_flush_seq(&c->journal, journal_seq);
1062 bch_err(ca, "journal error: %i", ret);
1069 static int push_invalidated_bucket(struct bch_fs *c, struct bch_dev *ca, size_t bucket)
1075 set_current_state(TASK_INTERRUPTIBLE);
1077 spin_lock(&c->freelist_lock);
1078 for (i = 0; i < RESERVE_NR; i++)
1079 if (fifo_push(&ca->free[i], bucket)) {
1080 fifo_pop(&ca->free_inc, bucket);
1082 closure_wake_up(&c->freelist_wait);
1083 ca->allocator_blocked_full = false;
1085 spin_unlock(&c->freelist_lock);
1089 if (!ca->allocator_blocked_full) {
1090 ca->allocator_blocked_full = true;
1091 closure_wake_up(&c->freelist_wait);
1094 spin_unlock(&c->freelist_lock);
1096 if ((current->flags & PF_KTHREAD) &&
1097 kthread_should_stop()) {
1106 __set_current_state(TASK_RUNNING);
1111 * Pulls buckets off free_inc, discards them (if enabled), then adds them to
1112 * freelists, waiting until there's room if necessary:
1114 static int discard_invalidated_buckets(struct bch_fs *c, struct bch_dev *ca)
1116 while (!fifo_empty(&ca->free_inc)) {
1117 size_t bucket = fifo_peek(&ca->free_inc);
1119 if (ca->mi.discard &&
1120 blk_queue_discard(bdev_get_queue(ca->disk_sb.bdev)))
1121 blkdev_issue_discard(ca->disk_sb.bdev,
1122 bucket_to_sector(ca, bucket),
1123 ca->mi.bucket_size, GFP_NOIO, 0);
1125 if (push_invalidated_bucket(c, ca, bucket))
1133 * bch_allocator_thread - move buckets from free_inc to reserves
1135 * The free_inc FIFO is populated by find_reclaimable_buckets(), and
1136 * the reserves are depleted by bucket allocation. When we run out
1137 * of free_inc, try to invalidate some buckets and write out
1140 static int bch2_allocator_thread(void *arg)
1142 struct bch_dev *ca = arg;
1143 struct bch_fs *c = ca->fs;
1152 pr_debug("discarding %zu invalidated buckets",
1153 fifo_used(&ca->free_inc));
1155 ret = discard_invalidated_buckets(c, ca);
1159 down_read(&c->gc_lock);
1161 ret = bch2_invalidate_buckets(c, ca);
1163 up_read(&c->gc_lock);
1167 if (!fifo_empty(&ca->free_inc)) {
1168 up_read(&c->gc_lock);
1172 pr_debug("free_inc now empty");
1176 * Find some buckets that we can invalidate, either
1177 * they're completely unused, or only contain clean data
1178 * that's been written back to the backing device or
1179 * another cache tier
1182 pr_debug("scanning for reclaimable buckets");
1184 nr = find_reclaimable_buckets(c, ca);
1186 pr_debug("found %zu buckets", nr);
1188 trace_alloc_batch(ca, nr, ca->alloc_heap.size);
1190 if ((ca->inc_gen_needs_gc >= ALLOC_SCAN_BATCH(ca) ||
1191 ca->inc_gen_really_needs_gc) &&
1193 atomic_inc(&c->kick_gc);
1194 wake_up_process(c->gc_thread);
1198 * If we found any buckets, we have to invalidate them
1199 * before we scan for more - but if we didn't find very
1200 * many we may want to wait on more buckets being
1201 * available so we don't spin:
1204 (nr < ALLOC_SCAN_BATCH(ca) &&
1205 !fifo_full(&ca->free[RESERVE_MOVINGGC]))) {
1206 ca->allocator_blocked = true;
1207 closure_wake_up(&c->freelist_wait);
1209 ret = wait_buckets_available(c, ca);
1211 up_read(&c->gc_lock);
1217 ca->allocator_blocked = false;
1218 up_read(&c->gc_lock);
1220 pr_debug("%zu buckets to invalidate", nr);
1223 * alloc_heap is now full of newly-invalidated buckets: next,
1224 * write out the new bucket gens:
1229 pr_debug("alloc thread stopping (ret %i)", ret);
1233 /* Startup/shutdown (ro/rw): */
1235 void bch2_recalc_capacity(struct bch_fs *c)
1238 u64 capacity = 0, reserved_sectors = 0, gc_reserve;
1239 unsigned bucket_size_max = 0;
1240 unsigned long ra_pages = 0;
1243 lockdep_assert_held(&c->state_lock);
1245 for_each_online_member(ca, c, i) {
1246 struct backing_dev_info *bdi = ca->disk_sb.bdev->bd_bdi;
1248 ra_pages += bdi->ra_pages;
1251 bch2_set_ra_pages(c, ra_pages);
1253 for_each_rw_member(ca, c, i) {
1254 u64 dev_reserve = 0;
1257 * We need to reserve buckets (from the number
1258 * of currently available buckets) against
1259 * foreground writes so that mainly copygc can
1260 * make forward progress.
1262 * We need enough to refill the various reserves
1263 * from scratch - copygc will use its entire
1264 * reserve all at once, then run against when
1265 * its reserve is refilled (from the formerly
1266 * available buckets).
1268 * This reserve is just used when considering if
1269 * allocations for foreground writes must wait -
1270 * not -ENOSPC calculations.
1272 for (j = 0; j < RESERVE_NONE; j++)
1273 dev_reserve += ca->free[j].size;
1275 dev_reserve += 1; /* btree write point */
1276 dev_reserve += 1; /* copygc write point */
1277 dev_reserve += 1; /* rebalance write point */
1279 dev_reserve *= ca->mi.bucket_size;
1281 ca->copygc_threshold = dev_reserve;
1283 capacity += bucket_to_sector(ca, ca->mi.nbuckets -
1284 ca->mi.first_bucket);
1286 reserved_sectors += dev_reserve * 2;
1288 bucket_size_max = max_t(unsigned, bucket_size_max,
1289 ca->mi.bucket_size);
1292 gc_reserve = c->opts.gc_reserve_bytes
1293 ? c->opts.gc_reserve_bytes >> 9
1294 : div64_u64(capacity * c->opts.gc_reserve_percent, 100);
1296 reserved_sectors = max(gc_reserve, reserved_sectors);
1298 reserved_sectors = min(reserved_sectors, capacity);
1300 c->capacity = capacity - reserved_sectors;
1302 c->bucket_size_max = bucket_size_max;
1305 bch2_io_timer_add(&c->io_clock[READ],
1306 &c->bucket_clock[READ].rescale);
1307 bch2_io_timer_add(&c->io_clock[WRITE],
1308 &c->bucket_clock[WRITE].rescale);
1310 bch2_io_timer_del(&c->io_clock[READ],
1311 &c->bucket_clock[READ].rescale);
1312 bch2_io_timer_del(&c->io_clock[WRITE],
1313 &c->bucket_clock[WRITE].rescale);
1316 /* Wake up case someone was waiting for buckets */
1317 closure_wake_up(&c->freelist_wait);
1320 static bool bch2_dev_has_open_write_point(struct bch_fs *c, struct bch_dev *ca)
1322 struct open_bucket *ob;
1325 for (ob = c->open_buckets;
1326 ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
1328 spin_lock(&ob->lock);
1329 if (ob->valid && !ob->on_partial_list &&
1330 ob->ptr.dev == ca->dev_idx)
1332 spin_unlock(&ob->lock);
1338 /* device goes ro: */
1339 void bch2_dev_allocator_remove(struct bch_fs *c, struct bch_dev *ca)
1343 BUG_ON(ca->alloc_thread);
1345 /* First, remove device from allocation groups: */
1347 for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
1348 clear_bit(ca->dev_idx, c->rw_devs[i].d);
1351 * Capacity is calculated based off of devices in allocation groups:
1353 bch2_recalc_capacity(c);
1355 /* Next, close write points that point to this device... */
1356 for (i = 0; i < ARRAY_SIZE(c->write_points); i++)
1357 bch2_writepoint_stop(c, ca, &c->write_points[i]);
1359 bch2_writepoint_stop(c, ca, &ca->copygc_write_point);
1360 bch2_writepoint_stop(c, ca, &c->rebalance_write_point);
1361 bch2_writepoint_stop(c, ca, &c->btree_write_point);
1363 mutex_lock(&c->btree_reserve_cache_lock);
1364 while (c->btree_reserve_cache_nr) {
1365 struct btree_alloc *a =
1366 &c->btree_reserve_cache[--c->btree_reserve_cache_nr];
1368 bch2_open_buckets_put(c, &a->ob);
1370 mutex_unlock(&c->btree_reserve_cache_lock);
1373 struct open_bucket *ob;
1375 spin_lock(&c->freelist_lock);
1376 if (!ca->open_buckets_partial_nr) {
1377 spin_unlock(&c->freelist_lock);
1380 ob = c->open_buckets +
1381 ca->open_buckets_partial[--ca->open_buckets_partial_nr];
1382 ob->on_partial_list = false;
1383 spin_unlock(&c->freelist_lock);
1385 bch2_open_bucket_put(c, ob);
1388 bch2_ec_stop_dev(c, ca);
1391 * Wake up threads that were blocked on allocation, so they can notice
1392 * the device can no longer be removed and the capacity has changed:
1394 closure_wake_up(&c->freelist_wait);
1397 * journal_res_get() can block waiting for free space in the journal -
1398 * it needs to notice there may not be devices to allocate from anymore:
1400 wake_up(&c->journal.wait);
1402 /* Now wait for any in flight writes: */
1404 closure_wait_event(&c->open_buckets_wait,
1405 !bch2_dev_has_open_write_point(c, ca));
1408 /* device goes rw: */
1409 void bch2_dev_allocator_add(struct bch_fs *c, struct bch_dev *ca)
1413 for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
1414 if (ca->mi.data_allowed & (1 << i))
1415 set_bit(ca->dev_idx, c->rw_devs[i].d);
1418 void bch2_dev_allocator_quiesce(struct bch_fs *c, struct bch_dev *ca)
1420 if (ca->alloc_thread)
1421 closure_wait_event(&c->freelist_wait, ca->allocator_blocked_full);
1424 /* stop allocator thread: */
1425 void bch2_dev_allocator_stop(struct bch_dev *ca)
1427 struct task_struct *p;
1429 p = rcu_dereference_protected(ca->alloc_thread, 1);
1430 ca->alloc_thread = NULL;
1433 * We need an rcu barrier between setting ca->alloc_thread = NULL and
1434 * the thread shutting down to avoid bch2_wake_allocator() racing:
1436 * XXX: it would be better to have the rcu barrier be asynchronous
1437 * instead of blocking us here
1447 /* start allocator thread: */
1448 int bch2_dev_allocator_start(struct bch_dev *ca)
1450 struct task_struct *p;
1453 * allocator thread already started?
1455 if (ca->alloc_thread)
1458 p = kthread_create(bch2_allocator_thread, ca,
1459 "bch_alloc[%s]", ca->name);
1464 rcu_assign_pointer(ca->alloc_thread, p);
1469 static bool flush_held_btree_writes(struct bch_fs *c)
1471 struct bucket_table *tbl;
1472 struct rhash_head *pos;
1474 bool nodes_unwritten;
1478 nodes_unwritten = false;
1481 for_each_cached_btree(b, c, tbl, i, pos)
1482 if (btree_node_need_write(b)) {
1483 if (btree_node_may_write(b)) {
1485 btree_node_lock_type(c, b, SIX_LOCK_read);
1486 bch2_btree_node_write(c, b, SIX_LOCK_read);
1487 six_unlock_read(&b->lock);
1490 nodes_unwritten = true;
1495 if (c->btree_roots_dirty) {
1496 bch2_journal_meta(&c->journal);
1500 return !nodes_unwritten &&
1501 !bch2_btree_interior_updates_nr_pending(c);
1504 static void allocator_start_issue_discards(struct bch_fs *c)
1510 for_each_rw_member(ca, c, dev_iter)
1511 while (fifo_pop(&ca->free_inc, bu))
1512 blkdev_issue_discard(ca->disk_sb.bdev,
1513 bucket_to_sector(ca, bu),
1514 ca->mi.bucket_size, GFP_NOIO, 0);
1517 static int resize_free_inc(struct bch_dev *ca)
1519 alloc_fifo free_inc;
1521 if (!fifo_full(&ca->free_inc))
1524 if (!init_fifo(&free_inc,
1525 ca->free_inc.size * 2,
1529 fifo_move(&free_inc, &ca->free_inc);
1530 swap(free_inc, ca->free_inc);
1531 free_fifo(&free_inc);
1535 static bool bch2_fs_allocator_start_fast(struct bch_fs *c)
1541 if (test_alloc_startup(c))
1544 down_read(&c->gc_lock);
1546 /* Scan for buckets that are already invalidated: */
1547 for_each_rw_member(ca, c, dev_iter) {
1548 struct bucket_array *buckets;
1549 struct bucket_mark m;
1552 down_read(&ca->bucket_lock);
1553 buckets = bucket_array(ca);
1555 for (bu = buckets->first_bucket;
1556 bu < buckets->nbuckets; bu++) {
1557 m = READ_ONCE(buckets->b[bu].mark);
1559 if (!buckets->b[bu].gen_valid ||
1560 !is_available_bucket(m) ||
1562 (ca->buckets_nouse &&
1563 test_bit(bu, ca->buckets_nouse)))
1566 percpu_down_read_preempt_disable(&c->mark_lock);
1567 bch2_mark_alloc_bucket(c, ca, bu, true,
1568 gc_pos_alloc(c, NULL), 0);
1569 percpu_up_read_preempt_enable(&c->mark_lock);
1571 fifo_push(&ca->free_inc, bu);
1573 discard_invalidated_buckets(c, ca);
1575 if (fifo_full(&ca->free[RESERVE_BTREE]))
1578 up_read(&ca->bucket_lock);
1581 up_read(&c->gc_lock);
1583 /* did we find enough buckets? */
1584 for_each_rw_member(ca, c, dev_iter)
1585 if (!fifo_full(&ca->free[RESERVE_BTREE]))
1591 static int __bch2_fs_allocator_start(struct bch_fs *c)
1595 u64 journal_seq = 0;
1600 pr_debug("not enough empty buckets; scanning for reclaimable buckets");
1603 * We're moving buckets to freelists _before_ they've been marked as
1604 * invalidated on disk - we have to so that we can allocate new btree
1605 * nodes to mark them as invalidated on disk.
1607 * However, we can't _write_ to any of these buckets yet - they might
1608 * have cached data in them, which is live until they're marked as
1609 * invalidated on disk:
1611 set_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags);
1613 down_read(&c->gc_lock);
1617 for_each_rw_member(ca, c, dev_iter) {
1618 find_reclaimable_buckets(c, ca);
1620 while (!fifo_full(&ca->free[RESERVE_BTREE]) &&
1621 (bu = next_alloc_bucket(ca)) >= 0) {
1622 ret = resize_free_inc(ca);
1624 percpu_ref_put(&ca->io_ref);
1625 up_read(&c->gc_lock);
1629 bch2_invalidate_one_bucket(c, ca, bu,
1632 fifo_push(&ca->free[RESERVE_BTREE], bu);
1636 pr_debug("done scanning for reclaimable buckets");
1639 * XXX: it's possible for this to deadlock waiting on journal reclaim,
1640 * since we're holding btree writes. What then?
1642 ret = bch2_alloc_write(c, true, &wrote);
1645 * If bch2_alloc_write() did anything, it may have used some
1646 * buckets, and we need the RESERVE_BTREE freelist full - so we
1647 * need to loop and scan again.
1648 * And if it errored, it may have been because there weren't
1649 * enough buckets, so just scan and loop again as long as it
1650 * made some progress:
1653 up_read(&c->gc_lock);
1658 pr_debug("flushing journal");
1660 ret = bch2_journal_flush(&c->journal);
1664 pr_debug("issuing discards");
1665 allocator_start_issue_discards(c);
1667 clear_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags);
1668 closure_wait_event(&c->btree_interior_update_wait,
1669 flush_held_btree_writes(c));
1674 int bch2_fs_allocator_start(struct bch_fs *c)
1680 ret = bch2_fs_allocator_start_fast(c) ? 0 :
1681 __bch2_fs_allocator_start(c);
1685 set_bit(BCH_FS_ALLOCATOR_STARTED, &c->flags);
1687 for_each_rw_member(ca, c, i) {
1688 ret = bch2_dev_allocator_start(ca);
1690 percpu_ref_put(&ca->io_ref);
1695 set_bit(BCH_FS_ALLOCATOR_RUNNING, &c->flags);
1699 void bch2_fs_allocator_background_init(struct bch_fs *c)
1701 spin_lock_init(&c->freelist_lock);
1702 bch2_bucket_clock_init(c, READ);
1703 bch2_bucket_clock_init(c, WRITE);
1705 c->pd_controllers_update_seconds = 5;
1706 INIT_DELAYED_WORK(&c->pd_controllers_update, pd_controllers_update);