1 // SPDX-License-Identifier: GPL-2.0
3 #include "alloc_background.h"
4 #include "alloc_foreground.h"
5 #include "btree_cache.h"
7 #include "btree_update.h"
8 #include "btree_update_interior.h"
17 #include <linux/kthread.h>
18 #include <linux/math64.h>
19 #include <linux/random.h>
20 #include <linux/rculist.h>
21 #include <linux/rcupdate.h>
22 #include <linux/sched/task.h>
23 #include <linux/sort.h>
24 #include <trace/events/bcachefs.h>
26 static const char * const bch2_alloc_field_names[] = {
27 #define x(name, bytes) #name,
33 static void bch2_recalc_oldest_io(struct bch_fs *, struct bch_dev *, int);
35 /* Ratelimiting/PD controllers */
37 static void pd_controllers_update(struct work_struct *work)
39 struct bch_fs *c = container_of(to_delayed_work(work),
41 pd_controllers_update);
45 for_each_member_device(ca, c, i) {
46 struct bch_dev_usage stats = bch2_dev_usage_read(c, ca);
48 u64 free = bucket_to_sector(ca,
49 __dev_buckets_free(ca, stats)) << 9;
51 * Bytes of internal fragmentation, which can be
52 * reclaimed by copy GC
54 s64 fragmented = (bucket_to_sector(ca,
55 stats.buckets[BCH_DATA_USER] +
56 stats.buckets[BCH_DATA_CACHED]) -
57 (stats.sectors[BCH_DATA_USER] +
58 stats.sectors[BCH_DATA_CACHED])) << 9;
60 fragmented = max(0LL, fragmented);
62 bch2_pd_controller_update(&ca->copygc_pd,
63 free, fragmented, -1);
66 schedule_delayed_work(&c->pd_controllers_update,
67 c->pd_controllers_update_seconds * HZ);
70 /* Persistent alloc info: */
72 static inline u64 get_alloc_field(const struct bch_alloc *a,
73 const void **p, unsigned field)
75 unsigned bytes = BCH_ALLOC_FIELD_BYTES[field];
78 if (!(a->fields & (1 << field)))
83 v = *((const u8 *) *p);
102 static inline void put_alloc_field(struct bkey_i_alloc *a, void **p,
103 unsigned field, u64 v)
105 unsigned bytes = BCH_ALLOC_FIELD_BYTES[field];
110 a->v.fields |= 1 << field;
117 *((__le16 *) *p) = cpu_to_le16(v);
120 *((__le32 *) *p) = cpu_to_le32(v);
123 *((__le64 *) *p) = cpu_to_le64(v);
132 struct bkey_alloc_unpacked bch2_alloc_unpack(struct bkey_s_c k)
134 struct bkey_alloc_unpacked ret = { .gen = 0 };
136 if (k.k->type == KEY_TYPE_alloc) {
137 const struct bch_alloc *a = bkey_s_c_to_alloc(k).v;
138 const void *d = a->data;
143 #define x(_name, _bits) ret._name = get_alloc_field(a, &d, idx++);
150 void bch2_alloc_pack(struct bkey_i_alloc *dst,
151 const struct bkey_alloc_unpacked src)
154 void *d = dst->v.data;
157 dst->v.gen = src.gen;
159 #define x(_name, _bits) put_alloc_field(dst, &d, idx++, src._name);
163 set_bkey_val_bytes(&dst->k, (void *) d - (void *) &dst->v);
166 static unsigned bch_alloc_val_u64s(const struct bch_alloc *a)
168 unsigned i, bytes = offsetof(struct bch_alloc, data);
170 for (i = 0; i < ARRAY_SIZE(BCH_ALLOC_FIELD_BYTES); i++)
171 if (a->fields & (1 << i))
172 bytes += BCH_ALLOC_FIELD_BYTES[i];
174 return DIV_ROUND_UP(bytes, sizeof(u64));
177 const char *bch2_alloc_invalid(const struct bch_fs *c, struct bkey_s_c k)
179 struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k);
181 if (k.k->p.inode >= c->sb.nr_devices ||
182 !c->devs[k.k->p.inode])
183 return "invalid device";
185 /* allow for unknown fields */
186 if (bkey_val_u64s(a.k) < bch_alloc_val_u64s(a.v))
187 return "incorrect value size";
192 void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c,
195 struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k);
196 const void *d = a.v->data;
199 pr_buf(out, "gen %u", a.v->gen);
201 for (i = 0; i < BCH_ALLOC_FIELD_NR; i++)
202 if (a.v->fields & (1 << i))
203 pr_buf(out, " %s %llu",
204 bch2_alloc_field_names[i],
205 get_alloc_field(a.v, &d, i));
208 int bch2_alloc_read(struct bch_fs *c, struct journal_keys *journal_keys)
210 struct btree_trans trans;
211 struct btree_iter *iter;
214 struct journal_key *j;
218 bch2_trans_init(&trans, c, 0, 0);
220 for_each_btree_key(&trans, iter, BTREE_ID_ALLOC, POS_MIN, 0, k, ret)
221 bch2_mark_key(c, k, 0, 0, NULL, 0,
222 BCH_BUCKET_MARK_ALLOC_READ|
223 BCH_BUCKET_MARK_NOATOMIC);
225 ret = bch2_trans_exit(&trans) ?: ret;
227 bch_err(c, "error reading alloc info: %i", ret);
231 for_each_journal_key(*journal_keys, j)
232 if (j->btree_id == BTREE_ID_ALLOC)
233 bch2_mark_key(c, bkey_i_to_s_c(j->k),
235 BCH_BUCKET_MARK_ALLOC_READ|
236 BCH_BUCKET_MARK_NOATOMIC);
238 percpu_down_write(&c->mark_lock);
239 bch2_dev_usage_from_buckets(c);
240 percpu_up_write(&c->mark_lock);
242 mutex_lock(&c->bucket_clock[READ].lock);
243 for_each_member_device(ca, c, i) {
244 down_read(&ca->bucket_lock);
245 bch2_recalc_oldest_io(c, ca, READ);
246 up_read(&ca->bucket_lock);
248 mutex_unlock(&c->bucket_clock[READ].lock);
250 mutex_lock(&c->bucket_clock[WRITE].lock);
251 for_each_member_device(ca, c, i) {
252 down_read(&ca->bucket_lock);
253 bch2_recalc_oldest_io(c, ca, WRITE);
254 up_read(&ca->bucket_lock);
256 mutex_unlock(&c->bucket_clock[WRITE].lock);
261 int bch2_alloc_replay_key(struct bch_fs *c, struct bkey_i *k)
263 struct btree_trans trans;
264 struct btree_iter *iter;
268 if (k->k.p.inode >= c->sb.nr_devices ||
269 !c->devs[k->k.p.inode])
272 ca = bch_dev_bkey_exists(c, k->k.p.inode);
274 if (k->k.p.offset >= ca->mi.nbuckets)
277 bch2_trans_init(&trans, c, 0, 0);
279 iter = bch2_trans_get_iter(&trans, BTREE_ID_ALLOC, k->k.p,
282 ret = bch2_btree_iter_traverse(iter);
286 /* check buckets_written with btree node locked: */
287 if (test_bit(k->k.p.offset, ca->buckets_written)) {
292 bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, k));
294 ret = bch2_trans_commit(&trans, NULL, NULL,
296 BTREE_INSERT_LAZY_RW|
297 BTREE_INSERT_JOURNAL_REPLAY|
298 BTREE_INSERT_NOMARK);
300 bch2_trans_exit(&trans);
304 int bch2_alloc_write(struct bch_fs *c, unsigned flags, bool *wrote)
306 struct btree_trans trans;
307 struct btree_iter *iter;
308 struct bucket_array *buckets;
311 struct bucket_mark m, new;
312 struct bkey_alloc_unpacked old_u, new_u;
313 __BKEY_PADDED(k, 8) alloc_key; /* hack: */
314 struct bkey_i_alloc *a;
320 BUG_ON(BKEY_ALLOC_VAL_U64s_MAX > 8);
322 bch2_trans_init(&trans, c, 0, 0);
324 iter = bch2_trans_get_iter(&trans, BTREE_ID_ALLOC, POS_MIN,
325 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
327 for_each_rw_member(ca, c, i) {
328 down_read(&ca->bucket_lock);
330 buckets = bucket_array(ca);
332 for (b = buckets->first_bucket;
333 b < buckets->nbuckets;
335 if (!buckets->b[b].mark.dirty)
338 bch2_btree_iter_set_pos(iter, POS(i, b));
339 k = bch2_btree_iter_peek_slot(iter);
344 old_u = bch2_alloc_unpack(k);
346 percpu_down_read(&c->mark_lock);
348 m = READ_ONCE(g->mark);
349 new_u = alloc_mem_to_key(g, m);
350 percpu_up_read(&c->mark_lock);
355 if ((flags & BTREE_INSERT_LAZY_RW) &&
356 percpu_ref_is_zero(&c->writes)) {
357 up_read(&ca->bucket_lock);
358 bch2_trans_unlock(&trans);
360 ret = bch2_fs_read_write_early(c);
361 down_read(&ca->bucket_lock);
368 a = bkey_alloc_init(&alloc_key.k);
370 bch2_alloc_pack(a, new_u);
372 bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &a->k_i));
373 ret = bch2_trans_commit(&trans, NULL, NULL,
378 if (ret && !test_bit(BCH_FS_EMERGENCY_RO, &c->flags)) {
379 bch_err(c, "error %i writing alloc info", ret);
380 printk(KERN_CONT "dev %llu bucket %llu\n",
381 iter->pos.inode, iter->pos.offset);
382 printk(KERN_CONT "gen %u -> %u\n", old_u.gen, new_u.gen);
383 #define x(_name, _bits) printk(KERN_CONT #_name " %u -> %u\n", old_u._name, new_u._name);
392 atomic64_cmpxchg(&g->_mark.v, m.v.counter, new.v.counter);
394 if (ca->buckets_written)
395 set_bit(b, ca->buckets_written);
397 bch2_trans_cond_resched(&trans);
400 up_read(&ca->bucket_lock);
403 percpu_ref_put(&ca->io_ref);
408 bch2_trans_exit(&trans);
413 /* Bucket IO clocks: */
415 static void bch2_recalc_oldest_io(struct bch_fs *c, struct bch_dev *ca, int rw)
417 struct bucket_clock *clock = &c->bucket_clock[rw];
418 struct bucket_array *buckets = bucket_array(ca);
423 lockdep_assert_held(&c->bucket_clock[rw].lock);
425 /* Recalculate max_last_io for this device: */
426 for_each_bucket(g, buckets)
427 max_last_io = max(max_last_io, bucket_last_io(c, g, rw));
429 ca->max_last_bucket_io[rw] = max_last_io;
431 /* Recalculate global max_last_io: */
434 for_each_member_device(ca, c, i)
435 max_last_io = max(max_last_io, ca->max_last_bucket_io[rw]);
437 clock->max_last_io = max_last_io;
440 static void bch2_rescale_bucket_io_times(struct bch_fs *c, int rw)
442 struct bucket_clock *clock = &c->bucket_clock[rw];
443 struct bucket_array *buckets;
448 trace_rescale_prios(c);
450 for_each_member_device(ca, c, i) {
451 down_read(&ca->bucket_lock);
452 buckets = bucket_array(ca);
454 for_each_bucket(g, buckets)
455 g->io_time[rw] = clock->hand -
456 bucket_last_io(c, g, rw) / 2;
458 bch2_recalc_oldest_io(c, ca, rw);
460 up_read(&ca->bucket_lock);
464 static inline u64 bucket_clock_freq(u64 capacity)
466 return max(capacity >> 10, 2028ULL);
469 static void bch2_inc_clock_hand(struct io_timer *timer)
471 struct bucket_clock *clock = container_of(timer,
472 struct bucket_clock, rescale);
473 struct bch_fs *c = container_of(clock,
474 struct bch_fs, bucket_clock[clock->rw]);
479 mutex_lock(&clock->lock);
481 /* if clock cannot be advanced more, rescale prio */
482 if (clock->max_last_io >= U16_MAX - 2)
483 bch2_rescale_bucket_io_times(c, clock->rw);
485 BUG_ON(clock->max_last_io >= U16_MAX - 2);
487 for_each_member_device(ca, c, i)
488 ca->max_last_bucket_io[clock->rw]++;
489 clock->max_last_io++;
492 mutex_unlock(&clock->lock);
494 capacity = READ_ONCE(c->capacity);
500 * we only increment when 0.1% of the filesystem capacity has been read
501 * or written too, this determines if it's time
503 * XXX: we shouldn't really be going off of the capacity of devices in
504 * RW mode (that will be 0 when we're RO, yet we can still service
507 timer->expire += bucket_clock_freq(capacity);
509 bch2_io_timer_add(&c->io_clock[clock->rw], timer);
512 static void bch2_bucket_clock_init(struct bch_fs *c, int rw)
514 struct bucket_clock *clock = &c->bucket_clock[rw];
518 clock->rescale.fn = bch2_inc_clock_hand;
519 clock->rescale.expire = bucket_clock_freq(c->capacity);
520 mutex_init(&clock->lock);
523 /* Background allocator thread: */
526 * Scans for buckets to be invalidated, invalidates them, rewrites prios/gens
527 * (marking them as invalidated on disk), then optionally issues discard
528 * commands to the newly free buckets, then puts them on the various freelists.
531 #define BUCKET_GC_GEN_MAX 96U
534 * wait_buckets_available - wait on reclaimable buckets
536 * If there aren't enough available buckets to fill up free_inc, wait until
539 static int wait_buckets_available(struct bch_fs *c, struct bch_dev *ca)
541 unsigned long gc_count = c->gc_count;
544 ca->allocator_state = ALLOCATOR_BLOCKED;
545 closure_wake_up(&c->freelist_wait);
548 set_current_state(TASK_INTERRUPTIBLE);
549 if (kthread_should_stop()) {
554 if (gc_count != c->gc_count)
555 ca->inc_gen_really_needs_gc = 0;
557 if ((ssize_t) (dev_buckets_available(c, ca) -
558 ca->inc_gen_really_needs_gc) >=
559 (ssize_t) fifo_free(&ca->free_inc))
562 up_read(&c->gc_lock);
565 down_read(&c->gc_lock);
568 __set_current_state(TASK_RUNNING);
569 ca->allocator_state = ALLOCATOR_RUNNING;
570 closure_wake_up(&c->freelist_wait);
575 static bool bch2_can_invalidate_bucket(struct bch_dev *ca,
577 struct bucket_mark mark)
581 if (!is_available_bucket(mark))
584 if (ca->buckets_nouse &&
585 test_bit(bucket, ca->buckets_nouse))
588 gc_gen = bucket_gc_gen(ca, bucket);
590 if (gc_gen >= BUCKET_GC_GEN_MAX / 2)
591 ca->inc_gen_needs_gc++;
593 if (gc_gen >= BUCKET_GC_GEN_MAX)
594 ca->inc_gen_really_needs_gc++;
596 return gc_gen < BUCKET_GC_GEN_MAX;
600 * Determines what order we're going to reuse buckets, smallest bucket_key()
604 * - We take into account the read prio of the bucket, which gives us an
605 * indication of how hot the data is -- we scale the prio so that the prio
606 * farthest from the clock is worth 1/8th of the closest.
608 * - The number of sectors of cached data in the bucket, which gives us an
609 * indication of the cost in cache misses this eviction will cause.
611 * - If hotness * sectors used compares equal, we pick the bucket with the
612 * smallest bucket_gc_gen() - since incrementing the same bucket's generation
613 * number repeatedly forces us to run mark and sweep gc to avoid generation
617 static unsigned long bucket_sort_key(struct bch_fs *c, struct bch_dev *ca,
618 size_t b, struct bucket_mark m)
620 unsigned last_io = bucket_last_io(c, bucket(ca, b), READ);
621 unsigned max_last_io = ca->max_last_bucket_io[READ];
624 * Time since last read, scaled to [0, 8) where larger value indicates
625 * more recently read data:
627 unsigned long hotness = (max_last_io - last_io) * 7 / max_last_io;
629 /* How much we want to keep the data in this bucket: */
630 unsigned long data_wantness =
631 (hotness + 1) * bucket_sectors_used(m);
633 unsigned long needs_journal_commit =
634 bucket_needs_journal_commit(m, c->journal.last_seq_ondisk);
636 return (data_wantness << 9) |
637 (needs_journal_commit << 8) |
638 (bucket_gc_gen(ca, b) / 16);
641 static inline int bucket_alloc_cmp(alloc_heap *h,
642 struct alloc_heap_entry l,
643 struct alloc_heap_entry r)
645 return cmp_int(l.key, r.key) ?:
646 cmp_int(r.nr, l.nr) ?:
647 cmp_int(l.bucket, r.bucket);
650 static inline int bucket_idx_cmp(const void *_l, const void *_r)
652 const struct alloc_heap_entry *l = _l, *r = _r;
654 return cmp_int(l->bucket, r->bucket);
657 static void find_reclaimable_buckets_lru(struct bch_fs *c, struct bch_dev *ca)
659 struct bucket_array *buckets;
660 struct alloc_heap_entry e = { 0 };
663 ca->alloc_heap.used = 0;
665 mutex_lock(&c->bucket_clock[READ].lock);
666 down_read(&ca->bucket_lock);
668 buckets = bucket_array(ca);
670 bch2_recalc_oldest_io(c, ca, READ);
673 * Find buckets with lowest read priority, by building a maxheap sorted
674 * by read priority and repeatedly replacing the maximum element until
675 * all buckets have been visited.
677 for (b = ca->mi.first_bucket; b < ca->mi.nbuckets; b++) {
678 struct bucket_mark m = READ_ONCE(buckets->b[b].mark);
679 unsigned long key = bucket_sort_key(c, ca, b, m);
681 if (!bch2_can_invalidate_bucket(ca, b, m))
684 if (e.nr && e.bucket + e.nr == b && e.key == key) {
688 heap_add_or_replace(&ca->alloc_heap, e,
689 -bucket_alloc_cmp, NULL);
691 e = (struct alloc_heap_entry) {
702 heap_add_or_replace(&ca->alloc_heap, e,
703 -bucket_alloc_cmp, NULL);
705 for (i = 0; i < ca->alloc_heap.used; i++)
706 nr += ca->alloc_heap.data[i].nr;
708 while (nr - ca->alloc_heap.data[0].nr >= ALLOC_SCAN_BATCH(ca)) {
709 nr -= ca->alloc_heap.data[0].nr;
710 heap_pop(&ca->alloc_heap, e, -bucket_alloc_cmp, NULL);
713 up_read(&ca->bucket_lock);
714 mutex_unlock(&c->bucket_clock[READ].lock);
717 static void find_reclaimable_buckets_fifo(struct bch_fs *c, struct bch_dev *ca)
719 struct bucket_array *buckets = bucket_array(ca);
720 struct bucket_mark m;
723 if (ca->fifo_last_bucket < ca->mi.first_bucket ||
724 ca->fifo_last_bucket >= ca->mi.nbuckets)
725 ca->fifo_last_bucket = ca->mi.first_bucket;
727 start = ca->fifo_last_bucket;
730 ca->fifo_last_bucket++;
731 if (ca->fifo_last_bucket == ca->mi.nbuckets)
732 ca->fifo_last_bucket = ca->mi.first_bucket;
734 b = ca->fifo_last_bucket;
735 m = READ_ONCE(buckets->b[b].mark);
737 if (bch2_can_invalidate_bucket(ca, b, m)) {
738 struct alloc_heap_entry e = { .bucket = b, .nr = 1, };
740 heap_add(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
741 if (heap_full(&ca->alloc_heap))
746 } while (ca->fifo_last_bucket != start);
749 static void find_reclaimable_buckets_random(struct bch_fs *c, struct bch_dev *ca)
751 struct bucket_array *buckets = bucket_array(ca);
752 struct bucket_mark m;
756 checked < ca->mi.nbuckets / 2;
758 size_t b = bch2_rand_range(ca->mi.nbuckets -
759 ca->mi.first_bucket) +
762 m = READ_ONCE(buckets->b[b].mark);
764 if (bch2_can_invalidate_bucket(ca, b, m)) {
765 struct alloc_heap_entry e = { .bucket = b, .nr = 1, };
767 heap_add(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
768 if (heap_full(&ca->alloc_heap))
775 sort(ca->alloc_heap.data,
777 sizeof(ca->alloc_heap.data[0]),
778 bucket_idx_cmp, NULL);
780 /* remove duplicates: */
781 for (i = 0; i + 1 < ca->alloc_heap.used; i++)
782 if (ca->alloc_heap.data[i].bucket ==
783 ca->alloc_heap.data[i + 1].bucket)
784 ca->alloc_heap.data[i].nr = 0;
787 static size_t find_reclaimable_buckets(struct bch_fs *c, struct bch_dev *ca)
791 ca->inc_gen_needs_gc = 0;
793 switch (ca->mi.replacement) {
794 case CACHE_REPLACEMENT_LRU:
795 find_reclaimable_buckets_lru(c, ca);
797 case CACHE_REPLACEMENT_FIFO:
798 find_reclaimable_buckets_fifo(c, ca);
800 case CACHE_REPLACEMENT_RANDOM:
801 find_reclaimable_buckets_random(c, ca);
805 heap_resort(&ca->alloc_heap, bucket_alloc_cmp, NULL);
807 for (i = 0; i < ca->alloc_heap.used; i++)
808 nr += ca->alloc_heap.data[i].nr;
813 static inline long next_alloc_bucket(struct bch_dev *ca)
815 struct alloc_heap_entry e, *top = ca->alloc_heap.data;
817 while (ca->alloc_heap.used) {
819 size_t b = top->bucket;
826 heap_pop(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
833 * returns sequence number of most recent journal entry that updated this
836 static u64 bucket_journal_seq(struct bch_fs *c, struct bucket_mark m)
838 if (m.journal_seq_valid) {
839 u64 journal_seq = atomic64_read(&c->journal.seq);
840 u64 bucket_seq = journal_seq;
842 bucket_seq &= ~((u64) U16_MAX);
843 bucket_seq |= m.journal_seq;
845 if (bucket_seq > journal_seq)
846 bucket_seq -= 1 << 16;
854 static int bch2_invalidate_one_bucket2(struct btree_trans *trans,
856 struct btree_iter *iter,
857 u64 *journal_seq, unsigned flags)
860 __BKEY_PADDED(k, BKEY_ALLOC_VAL_U64s_MAX) alloc_key;
863 __BKEY_PADDED(k, 8) alloc_key;
865 struct bch_fs *c = trans->c;
866 struct bkey_i_alloc *a;
867 struct bkey_alloc_unpacked u;
869 struct bucket_mark m;
871 bool invalidating_cached_data;
875 BUG_ON(!ca->alloc_heap.used ||
876 !ca->alloc_heap.data[0].nr);
877 b = ca->alloc_heap.data[0].bucket;
879 /* first, put on free_inc and mark as owned by allocator: */
880 percpu_down_read(&c->mark_lock);
881 spin_lock(&c->freelist_lock);
883 verify_not_on_freelist(c, ca, b);
885 BUG_ON(!fifo_push(&ca->free_inc, b));
887 bch2_mark_alloc_bucket(c, ca, b, true, gc_pos_alloc(c, NULL), 0);
889 spin_unlock(&c->freelist_lock);
890 percpu_up_read(&c->mark_lock);
892 BUG_ON(BKEY_ALLOC_VAL_U64s_MAX > 8);
894 bch2_btree_iter_set_pos(iter, POS(ca->dev_idx, b));
896 k = bch2_btree_iter_peek_slot(iter);
902 * The allocator has to start before journal replay is finished - thus,
903 * we have to trust the in memory bucket @m, not the version in the
906 percpu_down_read(&c->mark_lock);
908 m = READ_ONCE(g->mark);
909 u = alloc_mem_to_key(g, m);
910 percpu_up_read(&c->mark_lock);
912 invalidating_cached_data = m.cached_sectors != 0;
917 u.cached_sectors = 0;
918 u.read_time = c->bucket_clock[READ].hand;
919 u.write_time = c->bucket_clock[WRITE].hand;
921 a = bkey_alloc_init(&alloc_key.k);
923 bch2_alloc_pack(a, u);
925 bch2_trans_update(trans, BTREE_INSERT_ENTRY(iter, &a->k_i));
929 * when using deferred btree updates, we have journal reclaim doing
930 * btree updates and thus requiring the allocator to make forward
931 * progress, and here the allocator is requiring space in the journal -
932 * so we need a journal pre-reservation:
934 ret = bch2_trans_commit(trans, NULL,
935 invalidating_cached_data ? journal_seq : NULL,
937 BTREE_INSERT_NOUNLOCK|
938 BTREE_INSERT_NOCHECK_RW|
940 BTREE_INSERT_USE_RESERVE|
941 BTREE_INSERT_USE_ALLOC_RESERVE|
942 BTREE_INSERT_BUCKET_INVALIDATE|
948 /* remove from alloc_heap: */
949 struct alloc_heap_entry e, *top = ca->alloc_heap.data;
955 heap_pop(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
957 /* with btree still locked: */
958 if (ca->buckets_written)
959 set_bit(b, ca->buckets_written);
962 * Make sure we flush the last journal entry that updated this
963 * bucket (i.e. deleting the last reference) before writing to
966 *journal_seq = max(*journal_seq, bucket_journal_seq(c, m));
970 /* remove from free_inc: */
971 percpu_down_read(&c->mark_lock);
972 spin_lock(&c->freelist_lock);
974 bch2_mark_alloc_bucket(c, ca, b, false,
975 gc_pos_alloc(c, NULL), 0);
977 BUG_ON(!fifo_pop_back(&ca->free_inc, b2));
980 spin_unlock(&c->freelist_lock);
981 percpu_up_read(&c->mark_lock);
987 static bool bch2_invalidate_one_bucket(struct bch_fs *c, struct bch_dev *ca,
988 size_t bucket, u64 *flush_seq)
990 struct bucket_mark m;
992 percpu_down_read(&c->mark_lock);
993 spin_lock(&c->freelist_lock);
995 bch2_invalidate_bucket(c, ca, bucket, &m);
997 verify_not_on_freelist(c, ca, bucket);
998 BUG_ON(!fifo_push(&ca->free_inc, bucket));
1000 spin_unlock(&c->freelist_lock);
1002 bucket_io_clock_reset(c, ca, bucket, READ);
1003 bucket_io_clock_reset(c, ca, bucket, WRITE);
1005 percpu_up_read(&c->mark_lock);
1007 *flush_seq = max(*flush_seq, bucket_journal_seq(c, m));
1009 return m.cached_sectors != 0;
1013 * Pull buckets off ca->alloc_heap, invalidate them, move them to ca->free_inc:
1015 static int bch2_invalidate_buckets(struct bch_fs *c, struct bch_dev *ca)
1017 struct btree_trans trans;
1018 struct btree_iter *iter;
1019 u64 journal_seq = 0;
1022 bch2_trans_init(&trans, c, 0, 0);
1024 iter = bch2_trans_get_iter(&trans, BTREE_ID_ALLOC,
1025 POS(ca->dev_idx, 0),
1026 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
1028 /* Only use nowait if we've already invalidated at least one bucket: */
1030 !fifo_full(&ca->free_inc) &&
1031 ca->alloc_heap.used)
1032 ret = bch2_invalidate_one_bucket2(&trans, ca, iter, &journal_seq,
1033 BTREE_INSERT_GC_LOCK_HELD|
1034 (!fifo_empty(&ca->free_inc)
1035 ? BTREE_INSERT_NOWAIT : 0));
1037 bch2_trans_exit(&trans);
1039 /* If we used NOWAIT, don't return the error: */
1040 if (!fifo_empty(&ca->free_inc))
1043 bch_err(ca, "error invalidating buckets: %i", ret);
1048 ret = bch2_journal_flush_seq(&c->journal, journal_seq);
1050 bch_err(ca, "journal error: %i", ret);
1057 static int push_invalidated_bucket(struct bch_fs *c, struct bch_dev *ca, size_t bucket)
1063 set_current_state(TASK_INTERRUPTIBLE);
1065 spin_lock(&c->freelist_lock);
1066 for (i = 0; i < RESERVE_NR; i++)
1067 if (fifo_push(&ca->free[i], bucket)) {
1068 fifo_pop(&ca->free_inc, bucket);
1070 closure_wake_up(&c->freelist_wait);
1071 ca->allocator_state = ALLOCATOR_RUNNING;
1073 spin_unlock(&c->freelist_lock);
1077 if (ca->allocator_state != ALLOCATOR_BLOCKED_FULL) {
1078 ca->allocator_state = ALLOCATOR_BLOCKED_FULL;
1079 closure_wake_up(&c->freelist_wait);
1082 spin_unlock(&c->freelist_lock);
1084 if ((current->flags & PF_KTHREAD) &&
1085 kthread_should_stop()) {
1094 __set_current_state(TASK_RUNNING);
1099 * Pulls buckets off free_inc, discards them (if enabled), then adds them to
1100 * freelists, waiting until there's room if necessary:
1102 static int discard_invalidated_buckets(struct bch_fs *c, struct bch_dev *ca)
1104 while (!fifo_empty(&ca->free_inc)) {
1105 size_t bucket = fifo_peek(&ca->free_inc);
1107 if (ca->mi.discard &&
1108 blk_queue_discard(bdev_get_queue(ca->disk_sb.bdev)))
1109 blkdev_issue_discard(ca->disk_sb.bdev,
1110 bucket_to_sector(ca, bucket),
1111 ca->mi.bucket_size, GFP_NOIO, 0);
1113 if (push_invalidated_bucket(c, ca, bucket))
1121 * bch_allocator_thread - move buckets from free_inc to reserves
1123 * The free_inc FIFO is populated by find_reclaimable_buckets(), and
1124 * the reserves are depleted by bucket allocation. When we run out
1125 * of free_inc, try to invalidate some buckets and write out
1128 static int bch2_allocator_thread(void *arg)
1130 struct bch_dev *ca = arg;
1131 struct bch_fs *c = ca->fs;
1136 ca->allocator_state = ALLOCATOR_RUNNING;
1141 pr_debug("discarding %zu invalidated buckets",
1142 fifo_used(&ca->free_inc));
1144 ret = discard_invalidated_buckets(c, ca);
1148 down_read(&c->gc_lock);
1150 ret = bch2_invalidate_buckets(c, ca);
1152 up_read(&c->gc_lock);
1156 if (!fifo_empty(&ca->free_inc)) {
1157 up_read(&c->gc_lock);
1161 pr_debug("free_inc now empty");
1165 * Find some buckets that we can invalidate, either
1166 * they're completely unused, or only contain clean data
1167 * that's been written back to the backing device or
1168 * another cache tier
1171 pr_debug("scanning for reclaimable buckets");
1173 nr = find_reclaimable_buckets(c, ca);
1175 pr_debug("found %zu buckets", nr);
1177 trace_alloc_batch(ca, nr, ca->alloc_heap.size);
1179 if ((ca->inc_gen_needs_gc >= ALLOC_SCAN_BATCH(ca) ||
1180 ca->inc_gen_really_needs_gc) &&
1182 atomic_inc(&c->kick_gc);
1183 wake_up_process(c->gc_thread);
1187 * If we found any buckets, we have to invalidate them
1188 * before we scan for more - but if we didn't find very
1189 * many we may want to wait on more buckets being
1190 * available so we don't spin:
1193 (nr < ALLOC_SCAN_BATCH(ca) &&
1194 !fifo_full(&ca->free[RESERVE_MOVINGGC]))) {
1195 ret = wait_buckets_available(c, ca);
1197 up_read(&c->gc_lock);
1203 up_read(&c->gc_lock);
1205 pr_debug("%zu buckets to invalidate", nr);
1208 * alloc_heap is now full of newly-invalidated buckets: next,
1209 * write out the new bucket gens:
1214 pr_debug("alloc thread stopping (ret %i)", ret);
1215 ca->allocator_state = ALLOCATOR_STOPPED;
1216 closure_wake_up(&c->freelist_wait);
1220 /* Startup/shutdown (ro/rw): */
1222 void bch2_recalc_capacity(struct bch_fs *c)
1225 u64 capacity = 0, reserved_sectors = 0, gc_reserve;
1226 unsigned bucket_size_max = 0;
1227 unsigned long ra_pages = 0;
1230 lockdep_assert_held(&c->state_lock);
1232 for_each_online_member(ca, c, i) {
1233 struct backing_dev_info *bdi = ca->disk_sb.bdev->bd_bdi;
1235 ra_pages += bdi->ra_pages;
1238 bch2_set_ra_pages(c, ra_pages);
1240 for_each_rw_member(ca, c, i) {
1241 u64 dev_reserve = 0;
1244 * We need to reserve buckets (from the number
1245 * of currently available buckets) against
1246 * foreground writes so that mainly copygc can
1247 * make forward progress.
1249 * We need enough to refill the various reserves
1250 * from scratch - copygc will use its entire
1251 * reserve all at once, then run against when
1252 * its reserve is refilled (from the formerly
1253 * available buckets).
1255 * This reserve is just used when considering if
1256 * allocations for foreground writes must wait -
1257 * not -ENOSPC calculations.
1259 for (j = 0; j < RESERVE_NONE; j++)
1260 dev_reserve += ca->free[j].size;
1262 dev_reserve += 1; /* btree write point */
1263 dev_reserve += 1; /* copygc write point */
1264 dev_reserve += 1; /* rebalance write point */
1266 dev_reserve *= ca->mi.bucket_size;
1268 ca->copygc_threshold = dev_reserve;
1270 capacity += bucket_to_sector(ca, ca->mi.nbuckets -
1271 ca->mi.first_bucket);
1273 reserved_sectors += dev_reserve * 2;
1275 bucket_size_max = max_t(unsigned, bucket_size_max,
1276 ca->mi.bucket_size);
1279 gc_reserve = c->opts.gc_reserve_bytes
1280 ? c->opts.gc_reserve_bytes >> 9
1281 : div64_u64(capacity * c->opts.gc_reserve_percent, 100);
1283 reserved_sectors = max(gc_reserve, reserved_sectors);
1285 reserved_sectors = min(reserved_sectors, capacity);
1287 c->capacity = capacity - reserved_sectors;
1289 c->bucket_size_max = bucket_size_max;
1292 bch2_io_timer_add(&c->io_clock[READ],
1293 &c->bucket_clock[READ].rescale);
1294 bch2_io_timer_add(&c->io_clock[WRITE],
1295 &c->bucket_clock[WRITE].rescale);
1297 bch2_io_timer_del(&c->io_clock[READ],
1298 &c->bucket_clock[READ].rescale);
1299 bch2_io_timer_del(&c->io_clock[WRITE],
1300 &c->bucket_clock[WRITE].rescale);
1303 /* Wake up case someone was waiting for buckets */
1304 closure_wake_up(&c->freelist_wait);
1307 static bool bch2_dev_has_open_write_point(struct bch_fs *c, struct bch_dev *ca)
1309 struct open_bucket *ob;
1312 for (ob = c->open_buckets;
1313 ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
1315 spin_lock(&ob->lock);
1316 if (ob->valid && !ob->on_partial_list &&
1317 ob->ptr.dev == ca->dev_idx)
1319 spin_unlock(&ob->lock);
1325 /* device goes ro: */
1326 void bch2_dev_allocator_remove(struct bch_fs *c, struct bch_dev *ca)
1330 BUG_ON(ca->alloc_thread);
1332 /* First, remove device from allocation groups: */
1334 for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
1335 clear_bit(ca->dev_idx, c->rw_devs[i].d);
1338 * Capacity is calculated based off of devices in allocation groups:
1340 bch2_recalc_capacity(c);
1342 /* Next, close write points that point to this device... */
1343 for (i = 0; i < ARRAY_SIZE(c->write_points); i++)
1344 bch2_writepoint_stop(c, ca, &c->write_points[i]);
1346 bch2_writepoint_stop(c, ca, &ca->copygc_write_point);
1347 bch2_writepoint_stop(c, ca, &c->rebalance_write_point);
1348 bch2_writepoint_stop(c, ca, &c->btree_write_point);
1350 mutex_lock(&c->btree_reserve_cache_lock);
1351 while (c->btree_reserve_cache_nr) {
1352 struct btree_alloc *a =
1353 &c->btree_reserve_cache[--c->btree_reserve_cache_nr];
1355 bch2_open_buckets_put(c, &a->ob);
1357 mutex_unlock(&c->btree_reserve_cache_lock);
1360 struct open_bucket *ob;
1362 spin_lock(&c->freelist_lock);
1363 if (!ca->open_buckets_partial_nr) {
1364 spin_unlock(&c->freelist_lock);
1367 ob = c->open_buckets +
1368 ca->open_buckets_partial[--ca->open_buckets_partial_nr];
1369 ob->on_partial_list = false;
1370 spin_unlock(&c->freelist_lock);
1372 bch2_open_bucket_put(c, ob);
1375 bch2_ec_stop_dev(c, ca);
1378 * Wake up threads that were blocked on allocation, so they can notice
1379 * the device can no longer be removed and the capacity has changed:
1381 closure_wake_up(&c->freelist_wait);
1384 * journal_res_get() can block waiting for free space in the journal -
1385 * it needs to notice there may not be devices to allocate from anymore:
1387 wake_up(&c->journal.wait);
1389 /* Now wait for any in flight writes: */
1391 closure_wait_event(&c->open_buckets_wait,
1392 !bch2_dev_has_open_write_point(c, ca));
1395 /* device goes rw: */
1396 void bch2_dev_allocator_add(struct bch_fs *c, struct bch_dev *ca)
1400 for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
1401 if (ca->mi.data_allowed & (1 << i))
1402 set_bit(ca->dev_idx, c->rw_devs[i].d);
1405 void bch2_dev_allocator_quiesce(struct bch_fs *c, struct bch_dev *ca)
1407 if (ca->alloc_thread)
1408 closure_wait_event(&c->freelist_wait,
1409 ca->allocator_state != ALLOCATOR_RUNNING);
1412 /* stop allocator thread: */
1413 void bch2_dev_allocator_stop(struct bch_dev *ca)
1415 struct task_struct *p;
1417 p = rcu_dereference_protected(ca->alloc_thread, 1);
1418 ca->alloc_thread = NULL;
1421 * We need an rcu barrier between setting ca->alloc_thread = NULL and
1422 * the thread shutting down to avoid bch2_wake_allocator() racing:
1424 * XXX: it would be better to have the rcu barrier be asynchronous
1425 * instead of blocking us here
1435 /* start allocator thread: */
1436 int bch2_dev_allocator_start(struct bch_dev *ca)
1438 struct task_struct *p;
1441 * allocator thread already started?
1443 if (ca->alloc_thread)
1446 p = kthread_create(bch2_allocator_thread, ca,
1447 "bch_alloc[%s]", ca->name);
1452 rcu_assign_pointer(ca->alloc_thread, p);
1457 static bool flush_held_btree_writes(struct bch_fs *c)
1459 struct bucket_table *tbl;
1460 struct rhash_head *pos;
1462 bool nodes_unwritten;
1466 nodes_unwritten = false;
1469 for_each_cached_btree(b, c, tbl, i, pos)
1470 if (btree_node_need_write(b)) {
1471 if (btree_node_may_write(b)) {
1473 btree_node_lock_type(c, b, SIX_LOCK_read);
1474 bch2_btree_node_write(c, b, SIX_LOCK_read);
1475 six_unlock_read(&b->lock);
1478 nodes_unwritten = true;
1483 if (c->btree_roots_dirty) {
1484 bch2_journal_meta(&c->journal);
1488 return !nodes_unwritten &&
1489 !bch2_btree_interior_updates_nr_pending(c);
1492 static void allocator_start_issue_discards(struct bch_fs *c)
1498 for_each_rw_member(ca, c, dev_iter)
1499 while (fifo_pop(&ca->free_inc, bu))
1500 blkdev_issue_discard(ca->disk_sb.bdev,
1501 bucket_to_sector(ca, bu),
1502 ca->mi.bucket_size, GFP_NOIO, 0);
1505 static int resize_free_inc(struct bch_dev *ca)
1507 alloc_fifo free_inc;
1509 if (!fifo_full(&ca->free_inc))
1512 if (!init_fifo(&free_inc,
1513 ca->free_inc.size * 2,
1517 fifo_move(&free_inc, &ca->free_inc);
1518 swap(free_inc, ca->free_inc);
1519 free_fifo(&free_inc);
1523 static bool bch2_fs_allocator_start_fast(struct bch_fs *c)
1529 if (test_alloc_startup(c))
1532 down_read(&c->gc_lock);
1534 /* Scan for buckets that are already invalidated: */
1535 for_each_rw_member(ca, c, dev_iter) {
1536 struct bucket_array *buckets;
1537 struct bucket_mark m;
1540 down_read(&ca->bucket_lock);
1541 buckets = bucket_array(ca);
1543 for (bu = buckets->first_bucket;
1544 bu < buckets->nbuckets; bu++) {
1545 m = READ_ONCE(buckets->b[bu].mark);
1547 if (!buckets->b[bu].gen_valid ||
1548 !is_available_bucket(m) ||
1550 (ca->buckets_nouse &&
1551 test_bit(bu, ca->buckets_nouse)))
1554 percpu_down_read(&c->mark_lock);
1555 bch2_mark_alloc_bucket(c, ca, bu, true,
1556 gc_pos_alloc(c, NULL), 0);
1557 percpu_up_read(&c->mark_lock);
1559 fifo_push(&ca->free_inc, bu);
1561 discard_invalidated_buckets(c, ca);
1563 if (fifo_full(&ca->free[RESERVE_BTREE]))
1566 up_read(&ca->bucket_lock);
1569 up_read(&c->gc_lock);
1571 /* did we find enough buckets? */
1572 for_each_rw_member(ca, c, dev_iter)
1573 if (!fifo_full(&ca->free[RESERVE_BTREE]))
1579 int bch2_fs_allocator_start(struct bch_fs *c)
1583 u64 journal_seq = 0;
1588 if (!test_alloc_startup(c) &&
1589 bch2_fs_allocator_start_fast(c))
1592 pr_debug("not enough empty buckets; scanning for reclaimable buckets");
1595 * We're moving buckets to freelists _before_ they've been marked as
1596 * invalidated on disk - we have to so that we can allocate new btree
1597 * nodes to mark them as invalidated on disk.
1599 * However, we can't _write_ to any of these buckets yet - they might
1600 * have cached data in them, which is live until they're marked as
1601 * invalidated on disk:
1603 set_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags);
1605 down_read(&c->gc_lock);
1609 for_each_rw_member(ca, c, dev_iter) {
1610 find_reclaimable_buckets(c, ca);
1612 while (!fifo_full(&ca->free[RESERVE_BTREE]) &&
1613 (bu = next_alloc_bucket(ca)) >= 0) {
1614 ret = resize_free_inc(ca);
1616 percpu_ref_put(&ca->io_ref);
1617 up_read(&c->gc_lock);
1621 bch2_invalidate_one_bucket(c, ca, bu,
1624 fifo_push(&ca->free[RESERVE_BTREE], bu);
1628 pr_debug("done scanning for reclaimable buckets");
1631 * XXX: it's possible for this to deadlock waiting on journal reclaim,
1632 * since we're holding btree writes. What then?
1634 ret = bch2_alloc_write(c,
1635 BTREE_INSERT_NOCHECK_RW|
1636 BTREE_INSERT_USE_ALLOC_RESERVE|
1637 BTREE_INSERT_NOWAIT, &wrote);
1640 * If bch2_alloc_write() did anything, it may have used some
1641 * buckets, and we need the RESERVE_BTREE freelist full - so we
1642 * need to loop and scan again.
1643 * And if it errored, it may have been because there weren't
1644 * enough buckets, so just scan and loop again as long as it
1645 * made some progress:
1648 up_read(&c->gc_lock);
1653 pr_debug("flushing journal");
1655 ret = bch2_journal_flush(&c->journal);
1659 pr_debug("issuing discards");
1660 allocator_start_issue_discards(c);
1662 clear_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags);
1663 closure_wait_event(&c->btree_interior_update_wait,
1664 flush_held_btree_writes(c));
1669 void bch2_fs_allocator_background_init(struct bch_fs *c)
1671 spin_lock_init(&c->freelist_lock);
1672 bch2_bucket_clock_init(c, READ);
1673 bch2_bucket_clock_init(c, WRITE);
1675 c->pd_controllers_update_seconds = 5;
1676 INIT_DELAYED_WORK(&c->pd_controllers_update, pd_controllers_update);