1 // SPDX-License-Identifier: GPL-2.0
3 #include "alloc_background.h"
4 #include "alloc_foreground.h"
5 #include "btree_cache.h"
7 #include "btree_key_cache.h"
8 #include "btree_update.h"
9 #include "btree_update_interior.h"
18 #include <linux/kthread.h>
19 #include <linux/math64.h>
20 #include <linux/random.h>
21 #include <linux/rculist.h>
22 #include <linux/rcupdate.h>
23 #include <linux/sched/task.h>
24 #include <linux/sort.h>
25 #include <trace/events/bcachefs.h>
27 static const char * const bch2_alloc_field_names[] = {
28 #define x(name, bytes) #name,
34 static void bch2_recalc_oldest_io(struct bch_fs *, struct bch_dev *, int);
36 /* Ratelimiting/PD controllers */
38 static void pd_controllers_update(struct work_struct *work)
40 struct bch_fs *c = container_of(to_delayed_work(work),
42 pd_controllers_update);
44 s64 free = 0, fragmented = 0;
47 for_each_member_device(ca, c, i) {
48 struct bch_dev_usage stats = bch2_dev_usage_read(ca);
50 free += bucket_to_sector(ca,
51 __dev_buckets_free(ca, stats)) << 9;
53 * Bytes of internal fragmentation, which can be
54 * reclaimed by copy GC
56 fragmented += max_t(s64, 0, (bucket_to_sector(ca,
57 stats.buckets[BCH_DATA_user] +
58 stats.buckets[BCH_DATA_cached]) -
59 (stats.sectors[BCH_DATA_user] +
60 stats.sectors[BCH_DATA_cached])) << 9);
63 bch2_pd_controller_update(&c->copygc_pd, free, fragmented, -1);
64 schedule_delayed_work(&c->pd_controllers_update,
65 c->pd_controllers_update_seconds * HZ);
68 /* Persistent alloc info: */
70 static inline u64 get_alloc_field(const struct bch_alloc *a,
71 const void **p, unsigned field)
73 unsigned bytes = BCH_ALLOC_FIELD_BYTES[field];
76 if (!(a->fields & (1 << field)))
81 v = *((const u8 *) *p);
100 static inline void put_alloc_field(struct bkey_i_alloc *a, void **p,
101 unsigned field, u64 v)
103 unsigned bytes = BCH_ALLOC_FIELD_BYTES[field];
108 a->v.fields |= 1 << field;
115 *((__le16 *) *p) = cpu_to_le16(v);
118 *((__le32 *) *p) = cpu_to_le32(v);
121 *((__le64 *) *p) = cpu_to_le64(v);
130 struct bkey_alloc_unpacked bch2_alloc_unpack(struct bkey_s_c k)
132 struct bkey_alloc_unpacked ret = { .gen = 0 };
134 if (k.k->type == KEY_TYPE_alloc) {
135 const struct bch_alloc *a = bkey_s_c_to_alloc(k).v;
136 const void *d = a->data;
141 #define x(_name, _bits) ret._name = get_alloc_field(a, &d, idx++);
148 void bch2_alloc_pack(struct bkey_i_alloc *dst,
149 const struct bkey_alloc_unpacked src)
152 void *d = dst->v.data;
156 dst->v.gen = src.gen;
158 #define x(_name, _bits) put_alloc_field(dst, &d, idx++, src._name);
162 bytes = (void *) d - (void *) &dst->v;
163 set_bkey_val_bytes(&dst->k, bytes);
164 memset_u64s_tail(&dst->v, 0, bytes);
167 static unsigned bch_alloc_val_u64s(const struct bch_alloc *a)
169 unsigned i, bytes = offsetof(struct bch_alloc, data);
171 for (i = 0; i < ARRAY_SIZE(BCH_ALLOC_FIELD_BYTES); i++)
172 if (a->fields & (1 << i))
173 bytes += BCH_ALLOC_FIELD_BYTES[i];
175 return DIV_ROUND_UP(bytes, sizeof(u64));
178 const char *bch2_alloc_invalid(const struct bch_fs *c, struct bkey_s_c k)
180 struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k);
182 if (k.k->p.inode >= c->sb.nr_devices ||
183 !c->devs[k.k->p.inode])
184 return "invalid device";
186 /* allow for unknown fields */
187 if (bkey_val_u64s(a.k) < bch_alloc_val_u64s(a.v))
188 return "incorrect value size";
193 void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c,
196 struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k);
197 const void *d = a.v->data;
200 pr_buf(out, "gen %u", a.v->gen);
202 for (i = 0; i < BCH_ALLOC_FIELD_NR; i++)
203 if (a.v->fields & (1 << i))
204 pr_buf(out, " %s %llu",
205 bch2_alloc_field_names[i],
206 get_alloc_field(a.v, &d, i));
209 static int bch2_alloc_read_fn(struct bch_fs *c, enum btree_id id,
210 unsigned level, struct bkey_s_c k)
214 struct bkey_alloc_unpacked u;
216 if (level || k.k->type != KEY_TYPE_alloc)
219 ca = bch_dev_bkey_exists(c, k.k->p.inode);
220 g = __bucket(ca, k.k->p.offset, 0);
221 u = bch2_alloc_unpack(k);
223 g->_mark.gen = u.gen;
224 g->_mark.data_type = u.data_type;
225 g->_mark.dirty_sectors = u.dirty_sectors;
226 g->_mark.cached_sectors = u.cached_sectors;
227 g->io_time[READ] = u.read_time;
228 g->io_time[WRITE] = u.write_time;
229 g->oldest_gen = u.oldest_gen;
235 int bch2_alloc_read(struct bch_fs *c, struct journal_keys *journal_keys)
241 down_read(&c->gc_lock);
242 ret = bch2_btree_and_journal_walk(c, journal_keys, BTREE_ID_ALLOC,
243 NULL, bch2_alloc_read_fn);
244 up_read(&c->gc_lock);
247 bch_err(c, "error reading alloc info: %i", ret);
251 percpu_down_write(&c->mark_lock);
252 bch2_dev_usage_from_buckets(c);
253 percpu_up_write(&c->mark_lock);
255 mutex_lock(&c->bucket_clock[READ].lock);
256 for_each_member_device(ca, c, i) {
257 down_read(&ca->bucket_lock);
258 bch2_recalc_oldest_io(c, ca, READ);
259 up_read(&ca->bucket_lock);
261 mutex_unlock(&c->bucket_clock[READ].lock);
263 mutex_lock(&c->bucket_clock[WRITE].lock);
264 for_each_member_device(ca, c, i) {
265 down_read(&ca->bucket_lock);
266 bch2_recalc_oldest_io(c, ca, WRITE);
267 up_read(&ca->bucket_lock);
269 mutex_unlock(&c->bucket_clock[WRITE].lock);
274 static int bch2_alloc_write_key(struct btree_trans *trans,
275 struct btree_iter *iter,
278 struct bch_fs *c = trans->c;
281 struct bucket_array *ba;
283 struct bucket_mark m;
284 struct bkey_alloc_unpacked old_u, new_u;
285 __BKEY_PADDED(k, 8) alloc_key; /* hack: */
286 struct bkey_i_alloc *a;
289 bch2_trans_begin(trans);
291 ret = bch2_btree_key_cache_flush(trans,
292 BTREE_ID_ALLOC, iter->pos);
296 k = bch2_btree_iter_peek_slot(iter);
301 old_u = bch2_alloc_unpack(k);
303 percpu_down_read(&c->mark_lock);
304 ca = bch_dev_bkey_exists(c, iter->pos.inode);
305 ba = bucket_array(ca);
307 g = &ba->b[iter->pos.offset];
308 m = READ_ONCE(g->mark);
309 new_u = alloc_mem_to_key(g, m);
310 percpu_up_read(&c->mark_lock);
312 if (!bkey_alloc_unpacked_cmp(old_u, new_u))
315 a = bkey_alloc_init(&alloc_key.k);
317 bch2_alloc_pack(a, new_u);
319 bch2_trans_update(trans, iter, &a->k_i,
320 BTREE_TRIGGER_NORUN);
321 ret = bch2_trans_commit(trans, NULL, NULL,
323 BTREE_INSERT_USE_RESERVE|
331 int bch2_dev_alloc_write(struct bch_fs *c, struct bch_dev *ca, unsigned flags)
333 struct btree_trans trans;
334 struct btree_iter *iter;
335 u64 first_bucket, nbuckets;
338 percpu_down_read(&c->mark_lock);
339 first_bucket = bucket_array(ca)->first_bucket;
340 nbuckets = bucket_array(ca)->nbuckets;
341 percpu_up_read(&c->mark_lock);
343 BUG_ON(BKEY_ALLOC_VAL_U64s_MAX > 8);
345 bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
347 iter = bch2_trans_get_iter(&trans, BTREE_ID_ALLOC,
348 POS(ca->dev_idx, first_bucket),
349 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
351 while (iter->pos.offset < nbuckets) {
352 bch2_trans_cond_resched(&trans);
354 ret = bch2_alloc_write_key(&trans, iter, flags);
357 bch2_btree_iter_next_slot(iter);
360 bch2_trans_exit(&trans);
365 int bch2_alloc_write(struct bch_fs *c, unsigned flags)
371 for_each_rw_member(ca, c, i) {
372 bch2_dev_alloc_write(c, ca, flags);
374 percpu_ref_put(&ca->io_ref);
382 /* Bucket IO clocks: */
384 static void bch2_recalc_oldest_io(struct bch_fs *c, struct bch_dev *ca, int rw)
386 struct bucket_clock *clock = &c->bucket_clock[rw];
387 struct bucket_array *buckets = bucket_array(ca);
392 lockdep_assert_held(&c->bucket_clock[rw].lock);
394 /* Recalculate max_last_io for this device: */
395 for_each_bucket(g, buckets)
396 max_last_io = max(max_last_io, bucket_last_io(c, g, rw));
398 ca->max_last_bucket_io[rw] = max_last_io;
400 /* Recalculate global max_last_io: */
403 for_each_member_device(ca, c, i)
404 max_last_io = max(max_last_io, ca->max_last_bucket_io[rw]);
406 clock->max_last_io = max_last_io;
409 static void bch2_rescale_bucket_io_times(struct bch_fs *c, int rw)
411 struct bucket_clock *clock = &c->bucket_clock[rw];
412 struct bucket_array *buckets;
417 trace_rescale_prios(c);
419 for_each_member_device(ca, c, i) {
420 down_read(&ca->bucket_lock);
421 buckets = bucket_array(ca);
423 for_each_bucket(g, buckets)
424 g->io_time[rw] = clock->hand -
425 bucket_last_io(c, g, rw) / 2;
427 bch2_recalc_oldest_io(c, ca, rw);
429 up_read(&ca->bucket_lock);
433 static inline u64 bucket_clock_freq(u64 capacity)
435 return max(capacity >> 10, 2028ULL);
438 static void bch2_inc_clock_hand(struct io_timer *timer)
440 struct bucket_clock *clock = container_of(timer,
441 struct bucket_clock, rescale);
442 struct bch_fs *c = container_of(clock,
443 struct bch_fs, bucket_clock[clock->rw]);
448 mutex_lock(&clock->lock);
450 /* if clock cannot be advanced more, rescale prio */
451 if (clock->max_last_io >= U16_MAX - 2)
452 bch2_rescale_bucket_io_times(c, clock->rw);
454 BUG_ON(clock->max_last_io >= U16_MAX - 2);
456 for_each_member_device(ca, c, i)
457 ca->max_last_bucket_io[clock->rw]++;
458 clock->max_last_io++;
461 mutex_unlock(&clock->lock);
463 capacity = READ_ONCE(c->capacity);
469 * we only increment when 0.1% of the filesystem capacity has been read
470 * or written too, this determines if it's time
472 * XXX: we shouldn't really be going off of the capacity of devices in
473 * RW mode (that will be 0 when we're RO, yet we can still service
476 timer->expire += bucket_clock_freq(capacity);
478 bch2_io_timer_add(&c->io_clock[clock->rw], timer);
481 static void bch2_bucket_clock_init(struct bch_fs *c, int rw)
483 struct bucket_clock *clock = &c->bucket_clock[rw];
487 clock->rescale.fn = bch2_inc_clock_hand;
488 clock->rescale.expire = bucket_clock_freq(c->capacity);
489 mutex_init(&clock->lock);
492 int bch2_bucket_io_time_reset(struct btree_trans *trans, unsigned dev,
493 size_t bucket_nr, int rw)
495 struct bch_fs *c = trans->c;
496 struct bch_dev *ca = bch_dev_bkey_exists(c, dev);
497 struct btree_iter *iter;
499 struct bkey_i_alloc *a;
500 struct bkey_alloc_unpacked u;
504 iter = bch2_trans_get_iter(trans, BTREE_ID_ALLOC, POS(dev, bucket_nr),
506 BTREE_ITER_CACHED_NOFILL|
509 return PTR_ERR(iter);
511 a = bch2_trans_kmalloc(trans, BKEY_ALLOC_U64s_MAX * 8);
512 ret = PTR_ERR_OR_ZERO(a);
516 percpu_down_read(&c->mark_lock);
517 g = bucket(ca, bucket_nr);
518 u = alloc_mem_to_key(g, READ_ONCE(g->mark));
519 percpu_up_read(&c->mark_lock);
521 bkey_alloc_init(&a->k_i);
524 time = rw == READ ? &u.read_time : &u.write_time;
525 if (*time == c->bucket_clock[rw].hand)
528 *time = c->bucket_clock[rw].hand;
530 bch2_alloc_pack(a, u);
532 ret = bch2_trans_update(trans, iter, &a->k_i, 0) ?:
533 bch2_trans_commit(trans, NULL, NULL, 0);
535 bch2_trans_iter_put(trans, iter);
539 /* Background allocator thread: */
542 * Scans for buckets to be invalidated, invalidates them, rewrites prios/gens
543 * (marking them as invalidated on disk), then optionally issues discard
544 * commands to the newly free buckets, then puts them on the various freelists.
547 #define BUCKET_GC_GEN_MAX 96U
550 * wait_buckets_available - wait on reclaimable buckets
552 * If there aren't enough available buckets to fill up free_inc, wait until
555 static int wait_buckets_available(struct bch_fs *c, struct bch_dev *ca)
557 unsigned long gc_count = c->gc_count;
561 ca->allocator_state = ALLOCATOR_BLOCKED;
562 closure_wake_up(&c->freelist_wait);
565 set_current_state(TASK_INTERRUPTIBLE);
566 if (kthread_should_stop()) {
571 if (gc_count != c->gc_count)
572 ca->inc_gen_really_needs_gc = 0;
574 available = max_t(s64, 0, dev_buckets_available(ca) -
575 ca->inc_gen_really_needs_gc);
577 if (available > fifo_free(&ca->free_inc) ||
579 (!fifo_full(&ca->free[RESERVE_BTREE]) ||
580 !fifo_full(&ca->free[RESERVE_MOVINGGC]))))
583 up_read(&c->gc_lock);
586 down_read(&c->gc_lock);
589 __set_current_state(TASK_RUNNING);
590 ca->allocator_state = ALLOCATOR_RUNNING;
591 closure_wake_up(&c->freelist_wait);
596 static bool bch2_can_invalidate_bucket(struct bch_dev *ca,
598 struct bucket_mark mark)
602 if (!is_available_bucket(mark))
605 if (ca->buckets_nouse &&
606 test_bit(bucket, ca->buckets_nouse))
609 gc_gen = bucket_gc_gen(ca, bucket);
611 if (gc_gen >= BUCKET_GC_GEN_MAX / 2)
612 ca->inc_gen_needs_gc++;
614 if (gc_gen >= BUCKET_GC_GEN_MAX)
615 ca->inc_gen_really_needs_gc++;
617 return gc_gen < BUCKET_GC_GEN_MAX;
621 * Determines what order we're going to reuse buckets, smallest bucket_key()
625 * - We take into account the read prio of the bucket, which gives us an
626 * indication of how hot the data is -- we scale the prio so that the prio
627 * farthest from the clock is worth 1/8th of the closest.
629 * - The number of sectors of cached data in the bucket, which gives us an
630 * indication of the cost in cache misses this eviction will cause.
632 * - If hotness * sectors used compares equal, we pick the bucket with the
633 * smallest bucket_gc_gen() - since incrementing the same bucket's generation
634 * number repeatedly forces us to run mark and sweep gc to avoid generation
638 static unsigned long bucket_sort_key(struct bch_fs *c, struct bch_dev *ca,
639 size_t b, struct bucket_mark m)
641 unsigned last_io = bucket_last_io(c, bucket(ca, b), READ);
642 unsigned max_last_io = ca->max_last_bucket_io[READ];
645 * Time since last read, scaled to [0, 8) where larger value indicates
646 * more recently read data:
648 unsigned long hotness = (max_last_io - last_io) * 7 / max_last_io;
650 /* How much we want to keep the data in this bucket: */
651 unsigned long data_wantness =
652 (hotness + 1) * bucket_sectors_used(m);
654 unsigned long needs_journal_commit =
655 bucket_needs_journal_commit(m, c->journal.last_seq_ondisk);
657 return (data_wantness << 9) |
658 (needs_journal_commit << 8) |
659 (bucket_gc_gen(ca, b) / 16);
662 static inline int bucket_alloc_cmp(alloc_heap *h,
663 struct alloc_heap_entry l,
664 struct alloc_heap_entry r)
666 return cmp_int(l.key, r.key) ?:
667 cmp_int(r.nr, l.nr) ?:
668 cmp_int(l.bucket, r.bucket);
671 static inline int bucket_idx_cmp(const void *_l, const void *_r)
673 const struct alloc_heap_entry *l = _l, *r = _r;
675 return cmp_int(l->bucket, r->bucket);
678 static void find_reclaimable_buckets_lru(struct bch_fs *c, struct bch_dev *ca)
680 struct bucket_array *buckets;
681 struct alloc_heap_entry e = { 0 };
684 ca->alloc_heap.used = 0;
686 mutex_lock(&c->bucket_clock[READ].lock);
687 down_read(&ca->bucket_lock);
689 buckets = bucket_array(ca);
691 bch2_recalc_oldest_io(c, ca, READ);
694 * Find buckets with lowest read priority, by building a maxheap sorted
695 * by read priority and repeatedly replacing the maximum element until
696 * all buckets have been visited.
698 for (b = ca->mi.first_bucket; b < ca->mi.nbuckets; b++) {
699 struct bucket_mark m = READ_ONCE(buckets->b[b].mark);
700 unsigned long key = bucket_sort_key(c, ca, b, m);
702 if (!bch2_can_invalidate_bucket(ca, b, m))
705 if (e.nr && e.bucket + e.nr == b && e.key == key) {
709 heap_add_or_replace(&ca->alloc_heap, e,
710 -bucket_alloc_cmp, NULL);
712 e = (struct alloc_heap_entry) {
723 heap_add_or_replace(&ca->alloc_heap, e,
724 -bucket_alloc_cmp, NULL);
726 for (i = 0; i < ca->alloc_heap.used; i++)
727 nr += ca->alloc_heap.data[i].nr;
729 while (nr - ca->alloc_heap.data[0].nr >= ALLOC_SCAN_BATCH(ca)) {
730 nr -= ca->alloc_heap.data[0].nr;
731 heap_pop(&ca->alloc_heap, e, -bucket_alloc_cmp, NULL);
734 up_read(&ca->bucket_lock);
735 mutex_unlock(&c->bucket_clock[READ].lock);
738 static void find_reclaimable_buckets_fifo(struct bch_fs *c, struct bch_dev *ca)
740 struct bucket_array *buckets = bucket_array(ca);
741 struct bucket_mark m;
744 if (ca->fifo_last_bucket < ca->mi.first_bucket ||
745 ca->fifo_last_bucket >= ca->mi.nbuckets)
746 ca->fifo_last_bucket = ca->mi.first_bucket;
748 start = ca->fifo_last_bucket;
751 ca->fifo_last_bucket++;
752 if (ca->fifo_last_bucket == ca->mi.nbuckets)
753 ca->fifo_last_bucket = ca->mi.first_bucket;
755 b = ca->fifo_last_bucket;
756 m = READ_ONCE(buckets->b[b].mark);
758 if (bch2_can_invalidate_bucket(ca, b, m)) {
759 struct alloc_heap_entry e = { .bucket = b, .nr = 1, };
761 heap_add(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
762 if (heap_full(&ca->alloc_heap))
767 } while (ca->fifo_last_bucket != start);
770 static void find_reclaimable_buckets_random(struct bch_fs *c, struct bch_dev *ca)
772 struct bucket_array *buckets = bucket_array(ca);
773 struct bucket_mark m;
777 checked < ca->mi.nbuckets / 2;
779 size_t b = bch2_rand_range(ca->mi.nbuckets -
780 ca->mi.first_bucket) +
783 m = READ_ONCE(buckets->b[b].mark);
785 if (bch2_can_invalidate_bucket(ca, b, m)) {
786 struct alloc_heap_entry e = { .bucket = b, .nr = 1, };
788 heap_add(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
789 if (heap_full(&ca->alloc_heap))
796 sort(ca->alloc_heap.data,
798 sizeof(ca->alloc_heap.data[0]),
799 bucket_idx_cmp, NULL);
801 /* remove duplicates: */
802 for (i = 0; i + 1 < ca->alloc_heap.used; i++)
803 if (ca->alloc_heap.data[i].bucket ==
804 ca->alloc_heap.data[i + 1].bucket)
805 ca->alloc_heap.data[i].nr = 0;
808 static size_t find_reclaimable_buckets(struct bch_fs *c, struct bch_dev *ca)
812 ca->inc_gen_needs_gc = 0;
814 switch (ca->mi.replacement) {
815 case CACHE_REPLACEMENT_LRU:
816 find_reclaimable_buckets_lru(c, ca);
818 case CACHE_REPLACEMENT_FIFO:
819 find_reclaimable_buckets_fifo(c, ca);
821 case CACHE_REPLACEMENT_RANDOM:
822 find_reclaimable_buckets_random(c, ca);
826 heap_resort(&ca->alloc_heap, bucket_alloc_cmp, NULL);
828 for (i = 0; i < ca->alloc_heap.used; i++)
829 nr += ca->alloc_heap.data[i].nr;
834 static inline long next_alloc_bucket(struct bch_dev *ca)
836 struct alloc_heap_entry e, *top = ca->alloc_heap.data;
838 while (ca->alloc_heap.used) {
840 size_t b = top->bucket;
847 heap_pop(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
854 * returns sequence number of most recent journal entry that updated this
857 static u64 bucket_journal_seq(struct bch_fs *c, struct bucket_mark m)
859 if (m.journal_seq_valid) {
860 u64 journal_seq = atomic64_read(&c->journal.seq);
861 u64 bucket_seq = journal_seq;
863 bucket_seq &= ~((u64) U16_MAX);
864 bucket_seq |= m.journal_seq;
866 if (bucket_seq > journal_seq)
867 bucket_seq -= 1 << 16;
875 static int bch2_invalidate_one_bucket2(struct btree_trans *trans,
877 struct btree_iter *iter,
878 u64 *journal_seq, unsigned flags)
881 __BKEY_PADDED(k, BKEY_ALLOC_VAL_U64s_MAX) alloc_key;
884 __BKEY_PADDED(k, 8) alloc_key;
886 struct bch_fs *c = trans->c;
887 struct bkey_i_alloc *a;
888 struct bkey_alloc_unpacked u;
890 struct bucket_mark m;
891 bool invalidating_cached_data;
895 BUG_ON(!ca->alloc_heap.used ||
896 !ca->alloc_heap.data[0].nr);
897 b = ca->alloc_heap.data[0].bucket;
899 /* first, put on free_inc and mark as owned by allocator: */
900 percpu_down_read(&c->mark_lock);
901 spin_lock(&c->freelist_lock);
903 verify_not_on_freelist(c, ca, b);
905 BUG_ON(!fifo_push(&ca->free_inc, b));
908 m = READ_ONCE(g->mark);
910 invalidating_cached_data = m.cached_sectors != 0;
913 * If we're not invalidating cached data, we only increment the bucket
914 * gen in memory here, the incremented gen will be updated in the btree
915 * by bch2_trans_mark_pointer():
918 if (!invalidating_cached_data)
919 bch2_invalidate_bucket(c, ca, b, &m);
921 bch2_mark_alloc_bucket(c, ca, b, true, gc_pos_alloc(c, NULL), 0);
923 spin_unlock(&c->freelist_lock);
924 percpu_up_read(&c->mark_lock);
926 if (!invalidating_cached_data)
930 * If the read-only path is trying to shut down, we can't be generating
933 if (test_bit(BCH_FS_ALLOCATOR_STOPPING, &c->flags)) {
938 BUG_ON(BKEY_ALLOC_VAL_U64s_MAX > 8);
940 bch2_btree_iter_set_pos(iter, POS(ca->dev_idx, b));
942 ret = bch2_btree_iter_traverse(iter);
946 percpu_down_read(&c->mark_lock);
947 g = bucket(ca, iter->pos.offset);
948 m = READ_ONCE(g->mark);
949 u = alloc_mem_to_key(g, m);
951 percpu_up_read(&c->mark_lock);
953 invalidating_cached_data = u.cached_sectors != 0;
958 u.cached_sectors = 0;
959 u.read_time = c->bucket_clock[READ].hand;
960 u.write_time = c->bucket_clock[WRITE].hand;
962 a = bkey_alloc_init(&alloc_key.k);
964 bch2_alloc_pack(a, u);
966 bch2_trans_update(trans, iter, &a->k_i,
967 BTREE_TRIGGER_BUCKET_INVALIDATE);
971 * when using deferred btree updates, we have journal reclaim doing
972 * btree updates and thus requiring the allocator to make forward
973 * progress, and here the allocator is requiring space in the journal -
974 * so we need a journal pre-reservation:
976 ret = bch2_trans_commit(trans, NULL,
977 invalidating_cached_data ? journal_seq : NULL,
978 BTREE_INSERT_NOUNLOCK|
979 BTREE_INSERT_NOCHECK_RW|
981 BTREE_INSERT_USE_RESERVE|
982 BTREE_INSERT_USE_ALLOC_RESERVE|
988 /* remove from alloc_heap: */
989 struct alloc_heap_entry e, *top = ca->alloc_heap.data;
995 heap_pop(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
998 * Make sure we flush the last journal entry that updated this
999 * bucket (i.e. deleting the last reference) before writing to
1000 * this bucket again:
1002 *journal_seq = max(*journal_seq, bucket_journal_seq(c, m));
1006 /* remove from free_inc: */
1007 percpu_down_read(&c->mark_lock);
1008 spin_lock(&c->freelist_lock);
1010 bch2_mark_alloc_bucket(c, ca, b, false,
1011 gc_pos_alloc(c, NULL), 0);
1013 BUG_ON(!fifo_pop_back(&ca->free_inc, b2));
1016 spin_unlock(&c->freelist_lock);
1017 percpu_up_read(&c->mark_lock);
1020 return ret < 0 ? ret : 0;
1024 * Pull buckets off ca->alloc_heap, invalidate them, move them to ca->free_inc:
1026 static int bch2_invalidate_buckets(struct bch_fs *c, struct bch_dev *ca)
1028 struct btree_trans trans;
1029 struct btree_iter *iter;
1030 u64 journal_seq = 0;
1033 bch2_trans_init(&trans, c, 0, 0);
1035 iter = bch2_trans_get_iter(&trans, BTREE_ID_ALLOC,
1036 POS(ca->dev_idx, 0),
1038 BTREE_ITER_CACHED_NOFILL|
1041 /* Only use nowait if we've already invalidated at least one bucket: */
1043 !fifo_full(&ca->free_inc) &&
1044 ca->alloc_heap.used)
1045 ret = bch2_invalidate_one_bucket2(&trans, ca, iter, &journal_seq,
1046 BTREE_INSERT_GC_LOCK_HELD|
1047 (!fifo_empty(&ca->free_inc)
1048 ? BTREE_INSERT_NOWAIT : 0));
1050 bch2_trans_exit(&trans);
1052 /* If we used NOWAIT, don't return the error: */
1053 if (!fifo_empty(&ca->free_inc))
1056 bch_err(ca, "error invalidating buckets: %i", ret);
1061 ret = bch2_journal_flush_seq(&c->journal, journal_seq);
1063 bch_err(ca, "journal error: %i", ret);
1070 static int push_invalidated_bucket(struct bch_fs *c, struct bch_dev *ca, size_t bucket)
1076 set_current_state(TASK_INTERRUPTIBLE);
1078 spin_lock(&c->freelist_lock);
1079 for (i = 0; i < RESERVE_NR; i++) {
1082 * Don't strand buckets on the copygc freelist until
1083 * after recovery is finished:
1085 if (!test_bit(BCH_FS_STARTED, &c->flags) &&
1086 i == RESERVE_MOVINGGC)
1089 if (fifo_push(&ca->free[i], bucket)) {
1090 fifo_pop(&ca->free_inc, bucket);
1092 closure_wake_up(&c->freelist_wait);
1093 ca->allocator_state = ALLOCATOR_RUNNING;
1095 spin_unlock(&c->freelist_lock);
1100 if (ca->allocator_state != ALLOCATOR_BLOCKED_FULL) {
1101 ca->allocator_state = ALLOCATOR_BLOCKED_FULL;
1102 closure_wake_up(&c->freelist_wait);
1105 spin_unlock(&c->freelist_lock);
1107 if ((current->flags & PF_KTHREAD) &&
1108 kthread_should_stop()) {
1117 __set_current_state(TASK_RUNNING);
1122 * Pulls buckets off free_inc, discards them (if enabled), then adds them to
1123 * freelists, waiting until there's room if necessary:
1125 static int discard_invalidated_buckets(struct bch_fs *c, struct bch_dev *ca)
1127 while (!fifo_empty(&ca->free_inc)) {
1128 size_t bucket = fifo_peek(&ca->free_inc);
1130 if (ca->mi.discard &&
1131 blk_queue_discard(bdev_get_queue(ca->disk_sb.bdev)))
1132 blkdev_issue_discard(ca->disk_sb.bdev,
1133 bucket_to_sector(ca, bucket),
1134 ca->mi.bucket_size, GFP_NOIO, 0);
1136 if (push_invalidated_bucket(c, ca, bucket))
1144 * bch_allocator_thread - move buckets from free_inc to reserves
1146 * The free_inc FIFO is populated by find_reclaimable_buckets(), and
1147 * the reserves are depleted by bucket allocation. When we run out
1148 * of free_inc, try to invalidate some buckets and write out
1151 static int bch2_allocator_thread(void *arg)
1153 struct bch_dev *ca = arg;
1154 struct bch_fs *c = ca->fs;
1159 ca->allocator_state = ALLOCATOR_RUNNING;
1163 if (kthread_should_stop())
1166 pr_debug("discarding %zu invalidated buckets",
1167 fifo_used(&ca->free_inc));
1169 ret = discard_invalidated_buckets(c, ca);
1173 down_read(&c->gc_lock);
1175 ret = bch2_invalidate_buckets(c, ca);
1177 up_read(&c->gc_lock);
1181 if (!fifo_empty(&ca->free_inc)) {
1182 up_read(&c->gc_lock);
1186 pr_debug("free_inc now empty");
1190 * Find some buckets that we can invalidate, either
1191 * they're completely unused, or only contain clean data
1192 * that's been written back to the backing device or
1193 * another cache tier
1196 pr_debug("scanning for reclaimable buckets");
1198 nr = find_reclaimable_buckets(c, ca);
1200 pr_debug("found %zu buckets", nr);
1202 trace_alloc_batch(ca, nr, ca->alloc_heap.size);
1204 if ((ca->inc_gen_needs_gc >= ALLOC_SCAN_BATCH(ca) ||
1205 ca->inc_gen_really_needs_gc) &&
1207 atomic_inc(&c->kick_gc);
1208 wake_up_process(c->gc_thread);
1212 * If we found any buckets, we have to invalidate them
1213 * before we scan for more - but if we didn't find very
1214 * many we may want to wait on more buckets being
1215 * available so we don't spin:
1218 (nr < ALLOC_SCAN_BATCH(ca) &&
1219 !fifo_empty(&ca->free[RESERVE_NONE]))) {
1220 ret = wait_buckets_available(c, ca);
1222 up_read(&c->gc_lock);
1228 up_read(&c->gc_lock);
1230 pr_debug("%zu buckets to invalidate", nr);
1233 * alloc_heap is now full of newly-invalidated buckets: next,
1234 * write out the new bucket gens:
1239 pr_debug("alloc thread stopping (ret %i)", ret);
1240 ca->allocator_state = ALLOCATOR_STOPPED;
1241 closure_wake_up(&c->freelist_wait);
1245 /* Startup/shutdown (ro/rw): */
1247 void bch2_recalc_capacity(struct bch_fs *c)
1250 u64 capacity = 0, reserved_sectors = 0, gc_reserve, copygc_threshold = 0;
1251 unsigned bucket_size_max = 0;
1252 unsigned long ra_pages = 0;
1255 lockdep_assert_held(&c->state_lock);
1257 for_each_online_member(ca, c, i) {
1258 struct backing_dev_info *bdi = ca->disk_sb.bdev->bd_bdi;
1260 ra_pages += bdi->ra_pages;
1263 bch2_set_ra_pages(c, ra_pages);
1265 for_each_rw_member(ca, c, i) {
1266 u64 dev_reserve = 0;
1269 * We need to reserve buckets (from the number
1270 * of currently available buckets) against
1271 * foreground writes so that mainly copygc can
1272 * make forward progress.
1274 * We need enough to refill the various reserves
1275 * from scratch - copygc will use its entire
1276 * reserve all at once, then run against when
1277 * its reserve is refilled (from the formerly
1278 * available buckets).
1280 * This reserve is just used when considering if
1281 * allocations for foreground writes must wait -
1282 * not -ENOSPC calculations.
1284 for (j = 0; j < RESERVE_NONE; j++)
1285 dev_reserve += ca->free[j].size;
1287 dev_reserve += 1; /* btree write point */
1288 dev_reserve += 1; /* copygc write point */
1289 dev_reserve += 1; /* rebalance write point */
1291 dev_reserve *= ca->mi.bucket_size;
1293 copygc_threshold += dev_reserve;
1295 capacity += bucket_to_sector(ca, ca->mi.nbuckets -
1296 ca->mi.first_bucket);
1298 reserved_sectors += dev_reserve * 2;
1300 bucket_size_max = max_t(unsigned, bucket_size_max,
1301 ca->mi.bucket_size);
1304 gc_reserve = c->opts.gc_reserve_bytes
1305 ? c->opts.gc_reserve_bytes >> 9
1306 : div64_u64(capacity * c->opts.gc_reserve_percent, 100);
1308 reserved_sectors = max(gc_reserve, reserved_sectors);
1310 reserved_sectors = min(reserved_sectors, capacity);
1312 c->copygc_threshold = copygc_threshold;
1313 c->capacity = capacity - reserved_sectors;
1315 c->bucket_size_max = bucket_size_max;
1317 /* Wake up case someone was waiting for buckets */
1318 closure_wake_up(&c->freelist_wait);
1321 static bool bch2_dev_has_open_write_point(struct bch_fs *c, struct bch_dev *ca)
1323 struct open_bucket *ob;
1326 for (ob = c->open_buckets;
1327 ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
1329 spin_lock(&ob->lock);
1330 if (ob->valid && !ob->on_partial_list &&
1331 ob->ptr.dev == ca->dev_idx)
1333 spin_unlock(&ob->lock);
1339 /* device goes ro: */
1340 void bch2_dev_allocator_remove(struct bch_fs *c, struct bch_dev *ca)
1344 BUG_ON(ca->alloc_thread);
1346 /* First, remove device from allocation groups: */
1348 for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
1349 clear_bit(ca->dev_idx, c->rw_devs[i].d);
1352 * Capacity is calculated based off of devices in allocation groups:
1354 bch2_recalc_capacity(c);
1356 /* Next, close write points that point to this device... */
1357 for (i = 0; i < ARRAY_SIZE(c->write_points); i++)
1358 bch2_writepoint_stop(c, ca, &c->write_points[i]);
1360 bch2_writepoint_stop(c, ca, &c->copygc_write_point);
1361 bch2_writepoint_stop(c, ca, &c->rebalance_write_point);
1362 bch2_writepoint_stop(c, ca, &c->btree_write_point);
1364 mutex_lock(&c->btree_reserve_cache_lock);
1365 while (c->btree_reserve_cache_nr) {
1366 struct btree_alloc *a =
1367 &c->btree_reserve_cache[--c->btree_reserve_cache_nr];
1369 bch2_open_buckets_put(c, &a->ob);
1371 mutex_unlock(&c->btree_reserve_cache_lock);
1374 struct open_bucket *ob;
1376 spin_lock(&c->freelist_lock);
1377 if (!ca->open_buckets_partial_nr) {
1378 spin_unlock(&c->freelist_lock);
1381 ob = c->open_buckets +
1382 ca->open_buckets_partial[--ca->open_buckets_partial_nr];
1383 ob->on_partial_list = false;
1384 spin_unlock(&c->freelist_lock);
1386 bch2_open_bucket_put(c, ob);
1389 bch2_ec_stop_dev(c, ca);
1392 * Wake up threads that were blocked on allocation, so they can notice
1393 * the device can no longer be removed and the capacity has changed:
1395 closure_wake_up(&c->freelist_wait);
1398 * journal_res_get() can block waiting for free space in the journal -
1399 * it needs to notice there may not be devices to allocate from anymore:
1401 wake_up(&c->journal.wait);
1403 /* Now wait for any in flight writes: */
1405 closure_wait_event(&c->open_buckets_wait,
1406 !bch2_dev_has_open_write_point(c, ca));
1409 /* device goes rw: */
1410 void bch2_dev_allocator_add(struct bch_fs *c, struct bch_dev *ca)
1414 for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
1415 if (ca->mi.data_allowed & (1 << i))
1416 set_bit(ca->dev_idx, c->rw_devs[i].d);
1419 void bch2_dev_allocator_quiesce(struct bch_fs *c, struct bch_dev *ca)
1421 if (ca->alloc_thread)
1422 closure_wait_event(&c->freelist_wait,
1423 ca->allocator_state != ALLOCATOR_RUNNING);
1426 /* stop allocator thread: */
1427 void bch2_dev_allocator_stop(struct bch_dev *ca)
1429 struct task_struct *p;
1431 p = rcu_dereference_protected(ca->alloc_thread, 1);
1432 ca->alloc_thread = NULL;
1435 * We need an rcu barrier between setting ca->alloc_thread = NULL and
1436 * the thread shutting down to avoid bch2_wake_allocator() racing:
1438 * XXX: it would be better to have the rcu barrier be asynchronous
1439 * instead of blocking us here
1449 /* start allocator thread: */
1450 int bch2_dev_allocator_start(struct bch_dev *ca)
1452 struct task_struct *p;
1455 * allocator thread already started?
1457 if (ca->alloc_thread)
1460 p = kthread_create(bch2_allocator_thread, ca,
1461 "bch_alloc[%s]", ca->name);
1466 rcu_assign_pointer(ca->alloc_thread, p);
1471 void bch2_fs_allocator_background_init(struct bch_fs *c)
1473 spin_lock_init(&c->freelist_lock);
1474 bch2_bucket_clock_init(c, READ);
1475 bch2_bucket_clock_init(c, WRITE);
1477 c->pd_controllers_update_seconds = 5;
1478 INIT_DELAYED_WORK(&c->pd_controllers_update, pd_controllers_update);