1 // SPDX-License-Identifier: GPL-2.0
3 #include "alloc_background.h"
4 #include "alloc_foreground.h"
5 #include "btree_cache.h"
7 #include "btree_key_cache.h"
8 #include "btree_update.h"
9 #include "btree_update_interior.h"
18 #include <linux/kthread.h>
19 #include <linux/math64.h>
20 #include <linux/random.h>
21 #include <linux/rculist.h>
22 #include <linux/rcupdate.h>
23 #include <linux/sched/task.h>
24 #include <linux/sort.h>
25 #include <trace/events/bcachefs.h>
27 static const char * const bch2_alloc_field_names[] = {
28 #define x(name, bytes) #name,
34 static void bch2_recalc_oldest_io(struct bch_fs *, struct bch_dev *, int);
36 /* Ratelimiting/PD controllers */
38 static void pd_controllers_update(struct work_struct *work)
40 struct bch_fs *c = container_of(to_delayed_work(work),
42 pd_controllers_update);
46 for_each_member_device(ca, c, i) {
47 struct bch_dev_usage stats = bch2_dev_usage_read(c, ca);
49 u64 free = bucket_to_sector(ca,
50 __dev_buckets_free(ca, stats)) << 9;
52 * Bytes of internal fragmentation, which can be
53 * reclaimed by copy GC
55 s64 fragmented = (bucket_to_sector(ca,
56 stats.buckets[BCH_DATA_USER] +
57 stats.buckets[BCH_DATA_CACHED]) -
58 (stats.sectors[BCH_DATA_USER] +
59 stats.sectors[BCH_DATA_CACHED])) << 9;
61 fragmented = max(0LL, fragmented);
63 bch2_pd_controller_update(&ca->copygc_pd,
64 free, fragmented, -1);
67 schedule_delayed_work(&c->pd_controllers_update,
68 c->pd_controllers_update_seconds * HZ);
71 /* Persistent alloc info: */
73 static inline u64 get_alloc_field(const struct bch_alloc *a,
74 const void **p, unsigned field)
76 unsigned bytes = BCH_ALLOC_FIELD_BYTES[field];
79 if (!(a->fields & (1 << field)))
84 v = *((const u8 *) *p);
103 static inline void put_alloc_field(struct bkey_i_alloc *a, void **p,
104 unsigned field, u64 v)
106 unsigned bytes = BCH_ALLOC_FIELD_BYTES[field];
111 a->v.fields |= 1 << field;
118 *((__le16 *) *p) = cpu_to_le16(v);
121 *((__le32 *) *p) = cpu_to_le32(v);
124 *((__le64 *) *p) = cpu_to_le64(v);
133 struct bkey_alloc_unpacked bch2_alloc_unpack(struct bkey_s_c k)
135 struct bkey_alloc_unpacked ret = { .gen = 0 };
137 if (k.k->type == KEY_TYPE_alloc) {
138 const struct bch_alloc *a = bkey_s_c_to_alloc(k).v;
139 const void *d = a->data;
144 #define x(_name, _bits) ret._name = get_alloc_field(a, &d, idx++);
151 void bch2_alloc_pack(struct bkey_i_alloc *dst,
152 const struct bkey_alloc_unpacked src)
155 void *d = dst->v.data;
159 dst->v.gen = src.gen;
161 #define x(_name, _bits) put_alloc_field(dst, &d, idx++, src._name);
165 bytes = (void *) d - (void *) &dst->v;
166 set_bkey_val_bytes(&dst->k, bytes);
167 memset_u64s_tail(&dst->v, 0, bytes);
170 static unsigned bch_alloc_val_u64s(const struct bch_alloc *a)
172 unsigned i, bytes = offsetof(struct bch_alloc, data);
174 for (i = 0; i < ARRAY_SIZE(BCH_ALLOC_FIELD_BYTES); i++)
175 if (a->fields & (1 << i))
176 bytes += BCH_ALLOC_FIELD_BYTES[i];
178 return DIV_ROUND_UP(bytes, sizeof(u64));
181 const char *bch2_alloc_invalid(const struct bch_fs *c, struct bkey_s_c k)
183 struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k);
185 if (k.k->p.inode >= c->sb.nr_devices ||
186 !c->devs[k.k->p.inode])
187 return "invalid device";
189 /* allow for unknown fields */
190 if (bkey_val_u64s(a.k) < bch_alloc_val_u64s(a.v))
191 return "incorrect value size";
196 void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c,
199 struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k);
200 const void *d = a.v->data;
203 pr_buf(out, "gen %u", a.v->gen);
205 for (i = 0; i < BCH_ALLOC_FIELD_NR; i++)
206 if (a.v->fields & (1 << i))
207 pr_buf(out, " %s %llu",
208 bch2_alloc_field_names[i],
209 get_alloc_field(a.v, &d, i));
212 static int bch2_alloc_read_fn(struct bch_fs *c, enum btree_id id,
213 unsigned level, struct bkey_s_c k)
216 bch2_mark_key(c, k, 0, 0, NULL, 0,
217 BTREE_TRIGGER_ALLOC_READ|
218 BTREE_TRIGGER_NOATOMIC);
223 int bch2_alloc_read(struct bch_fs *c, struct journal_keys *journal_keys)
229 ret = bch2_btree_and_journal_walk(c, journal_keys, BTREE_ID_ALLOC,
230 NULL, bch2_alloc_read_fn);
232 bch_err(c, "error reading alloc info: %i", ret);
236 percpu_down_write(&c->mark_lock);
237 bch2_dev_usage_from_buckets(c);
238 percpu_up_write(&c->mark_lock);
240 mutex_lock(&c->bucket_clock[READ].lock);
241 for_each_member_device(ca, c, i) {
242 down_read(&ca->bucket_lock);
243 bch2_recalc_oldest_io(c, ca, READ);
244 up_read(&ca->bucket_lock);
246 mutex_unlock(&c->bucket_clock[READ].lock);
248 mutex_lock(&c->bucket_clock[WRITE].lock);
249 for_each_member_device(ca, c, i) {
250 down_read(&ca->bucket_lock);
251 bch2_recalc_oldest_io(c, ca, WRITE);
252 up_read(&ca->bucket_lock);
254 mutex_unlock(&c->bucket_clock[WRITE].lock);
259 enum alloc_write_ret {
265 static int bch2_alloc_write_key(struct btree_trans *trans,
266 struct btree_iter *iter,
269 struct bch_fs *c = trans->c;
272 struct bucket_array *ba;
274 struct bucket_mark m;
275 struct bkey_alloc_unpacked old_u, new_u;
276 __BKEY_PADDED(k, 8) alloc_key; /* hack: */
277 struct bkey_i_alloc *a;
280 bch2_trans_begin(trans);
282 ret = bch2_btree_key_cache_flush(trans,
283 BTREE_ID_ALLOC, iter->pos);
287 k = bch2_btree_iter_peek_slot(iter);
292 old_u = bch2_alloc_unpack(k);
294 if (iter->pos.inode >= c->sb.nr_devices ||
295 !c->devs[iter->pos.inode])
298 percpu_down_read(&c->mark_lock);
299 ca = bch_dev_bkey_exists(c, iter->pos.inode);
300 ba = bucket_array(ca);
302 if (iter->pos.offset >= ba->nbuckets) {
303 percpu_up_read(&c->mark_lock);
307 g = &ba->b[iter->pos.offset];
308 m = READ_ONCE(g->mark);
309 new_u = alloc_mem_to_key(g, m);
310 percpu_up_read(&c->mark_lock);
312 if (!bkey_alloc_unpacked_cmp(old_u, new_u))
313 return ALLOC_NOWROTE;
315 a = bkey_alloc_init(&alloc_key.k);
317 bch2_alloc_pack(a, new_u);
319 bch2_trans_update(trans, iter, &a->k_i,
320 BTREE_TRIGGER_NORUN);
321 ret = bch2_trans_commit(trans, NULL, NULL,
323 BTREE_INSERT_USE_RESERVE|
331 int bch2_alloc_write(struct bch_fs *c, unsigned flags, bool *wrote)
333 struct btree_trans trans;
334 struct btree_iter *iter;
339 BUG_ON(BKEY_ALLOC_VAL_U64s_MAX > 8);
341 bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
343 iter = bch2_trans_get_iter(&trans, BTREE_ID_ALLOC, POS_MIN,
344 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
346 for_each_rw_member(ca, c, i) {
347 unsigned first_bucket;
349 percpu_down_read(&c->mark_lock);
350 first_bucket = bucket_array(ca)->first_bucket;
351 percpu_up_read(&c->mark_lock);
353 bch2_btree_iter_set_pos(iter, POS(i, first_bucket));
356 ret = bch2_alloc_write_key(&trans, iter, flags);
357 if (ret < 0 || ret == ALLOC_END)
359 if (ret == ALLOC_WROTE)
361 bch2_btree_iter_next_slot(iter);
365 percpu_ref_put(&ca->io_ref);
370 bch2_trans_exit(&trans);
372 return ret < 0 ? ret : 0;
375 /* Bucket IO clocks: */
377 static void bch2_recalc_oldest_io(struct bch_fs *c, struct bch_dev *ca, int rw)
379 struct bucket_clock *clock = &c->bucket_clock[rw];
380 struct bucket_array *buckets = bucket_array(ca);
385 lockdep_assert_held(&c->bucket_clock[rw].lock);
387 /* Recalculate max_last_io for this device: */
388 for_each_bucket(g, buckets)
389 max_last_io = max(max_last_io, bucket_last_io(c, g, rw));
391 ca->max_last_bucket_io[rw] = max_last_io;
393 /* Recalculate global max_last_io: */
396 for_each_member_device(ca, c, i)
397 max_last_io = max(max_last_io, ca->max_last_bucket_io[rw]);
399 clock->max_last_io = max_last_io;
402 static void bch2_rescale_bucket_io_times(struct bch_fs *c, int rw)
404 struct bucket_clock *clock = &c->bucket_clock[rw];
405 struct bucket_array *buckets;
410 trace_rescale_prios(c);
412 for_each_member_device(ca, c, i) {
413 down_read(&ca->bucket_lock);
414 buckets = bucket_array(ca);
416 for_each_bucket(g, buckets)
417 g->io_time[rw] = clock->hand -
418 bucket_last_io(c, g, rw) / 2;
420 bch2_recalc_oldest_io(c, ca, rw);
422 up_read(&ca->bucket_lock);
426 static inline u64 bucket_clock_freq(u64 capacity)
428 return max(capacity >> 10, 2028ULL);
431 static void bch2_inc_clock_hand(struct io_timer *timer)
433 struct bucket_clock *clock = container_of(timer,
434 struct bucket_clock, rescale);
435 struct bch_fs *c = container_of(clock,
436 struct bch_fs, bucket_clock[clock->rw]);
441 mutex_lock(&clock->lock);
443 /* if clock cannot be advanced more, rescale prio */
444 if (clock->max_last_io >= U16_MAX - 2)
445 bch2_rescale_bucket_io_times(c, clock->rw);
447 BUG_ON(clock->max_last_io >= U16_MAX - 2);
449 for_each_member_device(ca, c, i)
450 ca->max_last_bucket_io[clock->rw]++;
451 clock->max_last_io++;
454 mutex_unlock(&clock->lock);
456 capacity = READ_ONCE(c->capacity);
462 * we only increment when 0.1% of the filesystem capacity has been read
463 * or written too, this determines if it's time
465 * XXX: we shouldn't really be going off of the capacity of devices in
466 * RW mode (that will be 0 when we're RO, yet we can still service
469 timer->expire += bucket_clock_freq(capacity);
471 bch2_io_timer_add(&c->io_clock[clock->rw], timer);
474 static void bch2_bucket_clock_init(struct bch_fs *c, int rw)
476 struct bucket_clock *clock = &c->bucket_clock[rw];
480 clock->rescale.fn = bch2_inc_clock_hand;
481 clock->rescale.expire = bucket_clock_freq(c->capacity);
482 mutex_init(&clock->lock);
485 /* Background allocator thread: */
488 * Scans for buckets to be invalidated, invalidates them, rewrites prios/gens
489 * (marking them as invalidated on disk), then optionally issues discard
490 * commands to the newly free buckets, then puts them on the various freelists.
493 #define BUCKET_GC_GEN_MAX 96U
496 * wait_buckets_available - wait on reclaimable buckets
498 * If there aren't enough available buckets to fill up free_inc, wait until
501 static int wait_buckets_available(struct bch_fs *c, struct bch_dev *ca)
503 unsigned long gc_count = c->gc_count;
506 ca->allocator_state = ALLOCATOR_BLOCKED;
507 closure_wake_up(&c->freelist_wait);
510 set_current_state(TASK_INTERRUPTIBLE);
511 if (kthread_should_stop()) {
516 if (gc_count != c->gc_count)
517 ca->inc_gen_really_needs_gc = 0;
519 if ((ssize_t) (dev_buckets_available(c, ca) -
520 ca->inc_gen_really_needs_gc) >=
521 (ssize_t) fifo_free(&ca->free_inc))
524 up_read(&c->gc_lock);
527 down_read(&c->gc_lock);
530 __set_current_state(TASK_RUNNING);
531 ca->allocator_state = ALLOCATOR_RUNNING;
532 closure_wake_up(&c->freelist_wait);
537 static bool bch2_can_invalidate_bucket(struct bch_dev *ca,
539 struct bucket_mark mark)
543 if (!is_available_bucket(mark))
546 if (ca->buckets_nouse &&
547 test_bit(bucket, ca->buckets_nouse))
550 gc_gen = bucket_gc_gen(ca, bucket);
552 if (gc_gen >= BUCKET_GC_GEN_MAX / 2)
553 ca->inc_gen_needs_gc++;
555 if (gc_gen >= BUCKET_GC_GEN_MAX)
556 ca->inc_gen_really_needs_gc++;
558 return gc_gen < BUCKET_GC_GEN_MAX;
562 * Determines what order we're going to reuse buckets, smallest bucket_key()
566 * - We take into account the read prio of the bucket, which gives us an
567 * indication of how hot the data is -- we scale the prio so that the prio
568 * farthest from the clock is worth 1/8th of the closest.
570 * - The number of sectors of cached data in the bucket, which gives us an
571 * indication of the cost in cache misses this eviction will cause.
573 * - If hotness * sectors used compares equal, we pick the bucket with the
574 * smallest bucket_gc_gen() - since incrementing the same bucket's generation
575 * number repeatedly forces us to run mark and sweep gc to avoid generation
579 static unsigned long bucket_sort_key(struct bch_fs *c, struct bch_dev *ca,
580 size_t b, struct bucket_mark m)
582 unsigned last_io = bucket_last_io(c, bucket(ca, b), READ);
583 unsigned max_last_io = ca->max_last_bucket_io[READ];
586 * Time since last read, scaled to [0, 8) where larger value indicates
587 * more recently read data:
589 unsigned long hotness = (max_last_io - last_io) * 7 / max_last_io;
591 /* How much we want to keep the data in this bucket: */
592 unsigned long data_wantness =
593 (hotness + 1) * bucket_sectors_used(m);
595 unsigned long needs_journal_commit =
596 bucket_needs_journal_commit(m, c->journal.last_seq_ondisk);
598 return (data_wantness << 9) |
599 (needs_journal_commit << 8) |
600 (bucket_gc_gen(ca, b) / 16);
603 static inline int bucket_alloc_cmp(alloc_heap *h,
604 struct alloc_heap_entry l,
605 struct alloc_heap_entry r)
607 return cmp_int(l.key, r.key) ?:
608 cmp_int(r.nr, l.nr) ?:
609 cmp_int(l.bucket, r.bucket);
612 static inline int bucket_idx_cmp(const void *_l, const void *_r)
614 const struct alloc_heap_entry *l = _l, *r = _r;
616 return cmp_int(l->bucket, r->bucket);
619 static void find_reclaimable_buckets_lru(struct bch_fs *c, struct bch_dev *ca)
621 struct bucket_array *buckets;
622 struct alloc_heap_entry e = { 0 };
625 ca->alloc_heap.used = 0;
627 mutex_lock(&c->bucket_clock[READ].lock);
628 down_read(&ca->bucket_lock);
630 buckets = bucket_array(ca);
632 bch2_recalc_oldest_io(c, ca, READ);
635 * Find buckets with lowest read priority, by building a maxheap sorted
636 * by read priority and repeatedly replacing the maximum element until
637 * all buckets have been visited.
639 for (b = ca->mi.first_bucket; b < ca->mi.nbuckets; b++) {
640 struct bucket_mark m = READ_ONCE(buckets->b[b].mark);
641 unsigned long key = bucket_sort_key(c, ca, b, m);
643 if (!bch2_can_invalidate_bucket(ca, b, m))
646 if (e.nr && e.bucket + e.nr == b && e.key == key) {
650 heap_add_or_replace(&ca->alloc_heap, e,
651 -bucket_alloc_cmp, NULL);
653 e = (struct alloc_heap_entry) {
664 heap_add_or_replace(&ca->alloc_heap, e,
665 -bucket_alloc_cmp, NULL);
667 for (i = 0; i < ca->alloc_heap.used; i++)
668 nr += ca->alloc_heap.data[i].nr;
670 while (nr - ca->alloc_heap.data[0].nr >= ALLOC_SCAN_BATCH(ca)) {
671 nr -= ca->alloc_heap.data[0].nr;
672 heap_pop(&ca->alloc_heap, e, -bucket_alloc_cmp, NULL);
675 up_read(&ca->bucket_lock);
676 mutex_unlock(&c->bucket_clock[READ].lock);
679 static void find_reclaimable_buckets_fifo(struct bch_fs *c, struct bch_dev *ca)
681 struct bucket_array *buckets = bucket_array(ca);
682 struct bucket_mark m;
685 if (ca->fifo_last_bucket < ca->mi.first_bucket ||
686 ca->fifo_last_bucket >= ca->mi.nbuckets)
687 ca->fifo_last_bucket = ca->mi.first_bucket;
689 start = ca->fifo_last_bucket;
692 ca->fifo_last_bucket++;
693 if (ca->fifo_last_bucket == ca->mi.nbuckets)
694 ca->fifo_last_bucket = ca->mi.first_bucket;
696 b = ca->fifo_last_bucket;
697 m = READ_ONCE(buckets->b[b].mark);
699 if (bch2_can_invalidate_bucket(ca, b, m)) {
700 struct alloc_heap_entry e = { .bucket = b, .nr = 1, };
702 heap_add(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
703 if (heap_full(&ca->alloc_heap))
708 } while (ca->fifo_last_bucket != start);
711 static void find_reclaimable_buckets_random(struct bch_fs *c, struct bch_dev *ca)
713 struct bucket_array *buckets = bucket_array(ca);
714 struct bucket_mark m;
718 checked < ca->mi.nbuckets / 2;
720 size_t b = bch2_rand_range(ca->mi.nbuckets -
721 ca->mi.first_bucket) +
724 m = READ_ONCE(buckets->b[b].mark);
726 if (bch2_can_invalidate_bucket(ca, b, m)) {
727 struct alloc_heap_entry e = { .bucket = b, .nr = 1, };
729 heap_add(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
730 if (heap_full(&ca->alloc_heap))
737 sort(ca->alloc_heap.data,
739 sizeof(ca->alloc_heap.data[0]),
740 bucket_idx_cmp, NULL);
742 /* remove duplicates: */
743 for (i = 0; i + 1 < ca->alloc_heap.used; i++)
744 if (ca->alloc_heap.data[i].bucket ==
745 ca->alloc_heap.data[i + 1].bucket)
746 ca->alloc_heap.data[i].nr = 0;
749 static size_t find_reclaimable_buckets(struct bch_fs *c, struct bch_dev *ca)
753 ca->inc_gen_needs_gc = 0;
755 switch (ca->mi.replacement) {
756 case CACHE_REPLACEMENT_LRU:
757 find_reclaimable_buckets_lru(c, ca);
759 case CACHE_REPLACEMENT_FIFO:
760 find_reclaimable_buckets_fifo(c, ca);
762 case CACHE_REPLACEMENT_RANDOM:
763 find_reclaimable_buckets_random(c, ca);
767 heap_resort(&ca->alloc_heap, bucket_alloc_cmp, NULL);
769 for (i = 0; i < ca->alloc_heap.used; i++)
770 nr += ca->alloc_heap.data[i].nr;
775 static inline long next_alloc_bucket(struct bch_dev *ca)
777 struct alloc_heap_entry e, *top = ca->alloc_heap.data;
779 while (ca->alloc_heap.used) {
781 size_t b = top->bucket;
788 heap_pop(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
795 * returns sequence number of most recent journal entry that updated this
798 static u64 bucket_journal_seq(struct bch_fs *c, struct bucket_mark m)
800 if (m.journal_seq_valid) {
801 u64 journal_seq = atomic64_read(&c->journal.seq);
802 u64 bucket_seq = journal_seq;
804 bucket_seq &= ~((u64) U16_MAX);
805 bucket_seq |= m.journal_seq;
807 if (bucket_seq > journal_seq)
808 bucket_seq -= 1 << 16;
816 static int bch2_invalidate_one_bucket2(struct btree_trans *trans,
818 struct btree_iter *iter,
819 u64 *journal_seq, unsigned flags)
822 __BKEY_PADDED(k, BKEY_ALLOC_VAL_U64s_MAX) alloc_key;
825 __BKEY_PADDED(k, 8) alloc_key;
827 struct bch_fs *c = trans->c;
828 struct bkey_i_alloc *a;
829 struct bkey_alloc_unpacked u;
831 struct bucket_mark m;
832 bool invalidating_cached_data;
836 BUG_ON(!ca->alloc_heap.used ||
837 !ca->alloc_heap.data[0].nr);
838 b = ca->alloc_heap.data[0].bucket;
840 /* first, put on free_inc and mark as owned by allocator: */
841 percpu_down_read(&c->mark_lock);
842 spin_lock(&c->freelist_lock);
844 verify_not_on_freelist(c, ca, b);
846 BUG_ON(!fifo_push(&ca->free_inc, b));
849 m = READ_ONCE(g->mark);
851 invalidating_cached_data = m.cached_sectors != 0;
854 * If we're not invalidating cached data, we only increment the bucket
855 * gen in memory here, the incremented gen will be updated in the btree
856 * by bch2_trans_mark_pointer():
859 if (!invalidating_cached_data)
860 bch2_invalidate_bucket(c, ca, b, &m);
862 bch2_mark_alloc_bucket(c, ca, b, true, gc_pos_alloc(c, NULL), 0);
864 spin_unlock(&c->freelist_lock);
865 percpu_up_read(&c->mark_lock);
867 if (!invalidating_cached_data)
871 * If the read-only path is trying to shut down, we can't be generating
874 if (test_bit(BCH_FS_ALLOCATOR_STOPPING, &c->flags)) {
879 BUG_ON(BKEY_ALLOC_VAL_U64s_MAX > 8);
881 bch2_btree_iter_set_pos(iter, POS(ca->dev_idx, b));
883 ret = bch2_btree_iter_traverse(iter);
887 percpu_down_read(&c->mark_lock);
888 g = bucket(ca, iter->pos.offset);
889 m = READ_ONCE(g->mark);
890 u = alloc_mem_to_key(g, m);
892 percpu_up_read(&c->mark_lock);
894 invalidating_cached_data = u.cached_sectors != 0;
899 u.cached_sectors = 0;
900 u.read_time = c->bucket_clock[READ].hand;
901 u.write_time = c->bucket_clock[WRITE].hand;
903 a = bkey_alloc_init(&alloc_key.k);
905 bch2_alloc_pack(a, u);
907 bch2_trans_update(trans, iter, &a->k_i,
908 BTREE_TRIGGER_BUCKET_INVALIDATE);
912 * when using deferred btree updates, we have journal reclaim doing
913 * btree updates and thus requiring the allocator to make forward
914 * progress, and here the allocator is requiring space in the journal -
915 * so we need a journal pre-reservation:
917 ret = bch2_trans_commit(trans, NULL,
918 invalidating_cached_data ? journal_seq : NULL,
919 BTREE_INSERT_NOUNLOCK|
920 BTREE_INSERT_NOCHECK_RW|
922 BTREE_INSERT_USE_RESERVE|
923 BTREE_INSERT_USE_ALLOC_RESERVE|
929 /* remove from alloc_heap: */
930 struct alloc_heap_entry e, *top = ca->alloc_heap.data;
936 heap_pop(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
939 * Make sure we flush the last journal entry that updated this
940 * bucket (i.e. deleting the last reference) before writing to
943 *journal_seq = max(*journal_seq, bucket_journal_seq(c, m));
947 /* remove from free_inc: */
948 percpu_down_read(&c->mark_lock);
949 spin_lock(&c->freelist_lock);
951 bch2_mark_alloc_bucket(c, ca, b, false,
952 gc_pos_alloc(c, NULL), 0);
954 BUG_ON(!fifo_pop_back(&ca->free_inc, b2));
957 spin_unlock(&c->freelist_lock);
958 percpu_up_read(&c->mark_lock);
961 return ret < 0 ? ret : 0;
965 * Pull buckets off ca->alloc_heap, invalidate them, move them to ca->free_inc:
967 static int bch2_invalidate_buckets(struct bch_fs *c, struct bch_dev *ca)
969 struct btree_trans trans;
970 struct btree_iter *iter;
974 bch2_trans_init(&trans, c, 0, 0);
976 iter = bch2_trans_get_iter(&trans, BTREE_ID_ALLOC,
979 BTREE_ITER_CACHED_NOFILL|
982 /* Only use nowait if we've already invalidated at least one bucket: */
984 !fifo_full(&ca->free_inc) &&
986 ret = bch2_invalidate_one_bucket2(&trans, ca, iter, &journal_seq,
987 BTREE_INSERT_GC_LOCK_HELD|
988 (!fifo_empty(&ca->free_inc)
989 ? BTREE_INSERT_NOWAIT : 0));
991 bch2_trans_exit(&trans);
993 /* If we used NOWAIT, don't return the error: */
994 if (!fifo_empty(&ca->free_inc))
997 bch_err(ca, "error invalidating buckets: %i", ret);
1002 ret = bch2_journal_flush_seq(&c->journal, journal_seq);
1004 bch_err(ca, "journal error: %i", ret);
1011 static int push_invalidated_bucket(struct bch_fs *c, struct bch_dev *ca, size_t bucket)
1017 set_current_state(TASK_INTERRUPTIBLE);
1019 spin_lock(&c->freelist_lock);
1020 for (i = 0; i < RESERVE_NR; i++) {
1023 * Don't strand buckets on the copygc freelist until
1024 * after recovery is finished:
1026 if (!test_bit(BCH_FS_STARTED, &c->flags) &&
1027 i == RESERVE_MOVINGGC)
1030 if (fifo_push(&ca->free[i], bucket)) {
1031 fifo_pop(&ca->free_inc, bucket);
1033 closure_wake_up(&c->freelist_wait);
1034 ca->allocator_state = ALLOCATOR_RUNNING;
1036 spin_unlock(&c->freelist_lock);
1041 if (ca->allocator_state != ALLOCATOR_BLOCKED_FULL) {
1042 ca->allocator_state = ALLOCATOR_BLOCKED_FULL;
1043 closure_wake_up(&c->freelist_wait);
1046 spin_unlock(&c->freelist_lock);
1048 if ((current->flags & PF_KTHREAD) &&
1049 kthread_should_stop()) {
1058 __set_current_state(TASK_RUNNING);
1063 * Pulls buckets off free_inc, discards them (if enabled), then adds them to
1064 * freelists, waiting until there's room if necessary:
1066 static int discard_invalidated_buckets(struct bch_fs *c, struct bch_dev *ca)
1068 while (!fifo_empty(&ca->free_inc)) {
1069 size_t bucket = fifo_peek(&ca->free_inc);
1071 if (ca->mi.discard &&
1072 blk_queue_discard(bdev_get_queue(ca->disk_sb.bdev)))
1073 blkdev_issue_discard(ca->disk_sb.bdev,
1074 bucket_to_sector(ca, bucket),
1075 ca->mi.bucket_size, GFP_NOIO, 0);
1077 if (push_invalidated_bucket(c, ca, bucket))
1085 * bch_allocator_thread - move buckets from free_inc to reserves
1087 * The free_inc FIFO is populated by find_reclaimable_buckets(), and
1088 * the reserves are depleted by bucket allocation. When we run out
1089 * of free_inc, try to invalidate some buckets and write out
1092 static int bch2_allocator_thread(void *arg)
1094 struct bch_dev *ca = arg;
1095 struct bch_fs *c = ca->fs;
1100 ca->allocator_state = ALLOCATOR_RUNNING;
1105 pr_debug("discarding %zu invalidated buckets",
1106 fifo_used(&ca->free_inc));
1108 ret = discard_invalidated_buckets(c, ca);
1112 down_read(&c->gc_lock);
1114 ret = bch2_invalidate_buckets(c, ca);
1116 up_read(&c->gc_lock);
1120 if (!fifo_empty(&ca->free_inc)) {
1121 up_read(&c->gc_lock);
1125 pr_debug("free_inc now empty");
1129 * Find some buckets that we can invalidate, either
1130 * they're completely unused, or only contain clean data
1131 * that's been written back to the backing device or
1132 * another cache tier
1135 pr_debug("scanning for reclaimable buckets");
1137 nr = find_reclaimable_buckets(c, ca);
1139 pr_debug("found %zu buckets", nr);
1141 trace_alloc_batch(ca, nr, ca->alloc_heap.size);
1143 if ((ca->inc_gen_needs_gc >= ALLOC_SCAN_BATCH(ca) ||
1144 ca->inc_gen_really_needs_gc) &&
1146 atomic_inc(&c->kick_gc);
1147 wake_up_process(c->gc_thread);
1151 * If we found any buckets, we have to invalidate them
1152 * before we scan for more - but if we didn't find very
1153 * many we may want to wait on more buckets being
1154 * available so we don't spin:
1157 (nr < ALLOC_SCAN_BATCH(ca) &&
1158 !fifo_empty(&ca->free[RESERVE_NONE]))) {
1159 ret = wait_buckets_available(c, ca);
1161 up_read(&c->gc_lock);
1167 up_read(&c->gc_lock);
1169 pr_debug("%zu buckets to invalidate", nr);
1172 * alloc_heap is now full of newly-invalidated buckets: next,
1173 * write out the new bucket gens:
1178 pr_debug("alloc thread stopping (ret %i)", ret);
1179 ca->allocator_state = ALLOCATOR_STOPPED;
1180 closure_wake_up(&c->freelist_wait);
1184 /* Startup/shutdown (ro/rw): */
1186 void bch2_recalc_capacity(struct bch_fs *c)
1189 u64 capacity = 0, reserved_sectors = 0, gc_reserve;
1190 unsigned bucket_size_max = 0;
1191 unsigned long ra_pages = 0;
1194 lockdep_assert_held(&c->state_lock);
1196 for_each_online_member(ca, c, i) {
1197 struct backing_dev_info *bdi = ca->disk_sb.bdev->bd_bdi;
1199 ra_pages += bdi->ra_pages;
1202 bch2_set_ra_pages(c, ra_pages);
1204 for_each_rw_member(ca, c, i) {
1205 u64 dev_reserve = 0;
1208 * We need to reserve buckets (from the number
1209 * of currently available buckets) against
1210 * foreground writes so that mainly copygc can
1211 * make forward progress.
1213 * We need enough to refill the various reserves
1214 * from scratch - copygc will use its entire
1215 * reserve all at once, then run against when
1216 * its reserve is refilled (from the formerly
1217 * available buckets).
1219 * This reserve is just used when considering if
1220 * allocations for foreground writes must wait -
1221 * not -ENOSPC calculations.
1223 for (j = 0; j < RESERVE_NONE; j++)
1224 dev_reserve += ca->free[j].size;
1226 dev_reserve += 1; /* btree write point */
1227 dev_reserve += 1; /* copygc write point */
1228 dev_reserve += 1; /* rebalance write point */
1230 dev_reserve *= ca->mi.bucket_size;
1232 ca->copygc_threshold = dev_reserve;
1234 capacity += bucket_to_sector(ca, ca->mi.nbuckets -
1235 ca->mi.first_bucket);
1237 reserved_sectors += dev_reserve * 2;
1239 bucket_size_max = max_t(unsigned, bucket_size_max,
1240 ca->mi.bucket_size);
1243 gc_reserve = c->opts.gc_reserve_bytes
1244 ? c->opts.gc_reserve_bytes >> 9
1245 : div64_u64(capacity * c->opts.gc_reserve_percent, 100);
1247 reserved_sectors = max(gc_reserve, reserved_sectors);
1249 reserved_sectors = min(reserved_sectors, capacity);
1251 c->capacity = capacity - reserved_sectors;
1253 c->bucket_size_max = bucket_size_max;
1256 bch2_io_timer_add(&c->io_clock[READ],
1257 &c->bucket_clock[READ].rescale);
1258 bch2_io_timer_add(&c->io_clock[WRITE],
1259 &c->bucket_clock[WRITE].rescale);
1261 bch2_io_timer_del(&c->io_clock[READ],
1262 &c->bucket_clock[READ].rescale);
1263 bch2_io_timer_del(&c->io_clock[WRITE],
1264 &c->bucket_clock[WRITE].rescale);
1267 /* Wake up case someone was waiting for buckets */
1268 closure_wake_up(&c->freelist_wait);
1271 static bool bch2_dev_has_open_write_point(struct bch_fs *c, struct bch_dev *ca)
1273 struct open_bucket *ob;
1276 for (ob = c->open_buckets;
1277 ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
1279 spin_lock(&ob->lock);
1280 if (ob->valid && !ob->on_partial_list &&
1281 ob->ptr.dev == ca->dev_idx)
1283 spin_unlock(&ob->lock);
1289 /* device goes ro: */
1290 void bch2_dev_allocator_remove(struct bch_fs *c, struct bch_dev *ca)
1294 BUG_ON(ca->alloc_thread);
1296 /* First, remove device from allocation groups: */
1298 for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
1299 clear_bit(ca->dev_idx, c->rw_devs[i].d);
1302 * Capacity is calculated based off of devices in allocation groups:
1304 bch2_recalc_capacity(c);
1306 /* Next, close write points that point to this device... */
1307 for (i = 0; i < ARRAY_SIZE(c->write_points); i++)
1308 bch2_writepoint_stop(c, ca, &c->write_points[i]);
1310 bch2_writepoint_stop(c, ca, &ca->copygc_write_point);
1311 bch2_writepoint_stop(c, ca, &c->rebalance_write_point);
1312 bch2_writepoint_stop(c, ca, &c->btree_write_point);
1314 mutex_lock(&c->btree_reserve_cache_lock);
1315 while (c->btree_reserve_cache_nr) {
1316 struct btree_alloc *a =
1317 &c->btree_reserve_cache[--c->btree_reserve_cache_nr];
1319 bch2_open_buckets_put(c, &a->ob);
1321 mutex_unlock(&c->btree_reserve_cache_lock);
1324 struct open_bucket *ob;
1326 spin_lock(&c->freelist_lock);
1327 if (!ca->open_buckets_partial_nr) {
1328 spin_unlock(&c->freelist_lock);
1331 ob = c->open_buckets +
1332 ca->open_buckets_partial[--ca->open_buckets_partial_nr];
1333 ob->on_partial_list = false;
1334 spin_unlock(&c->freelist_lock);
1336 bch2_open_bucket_put(c, ob);
1339 bch2_ec_stop_dev(c, ca);
1342 * Wake up threads that were blocked on allocation, so they can notice
1343 * the device can no longer be removed and the capacity has changed:
1345 closure_wake_up(&c->freelist_wait);
1348 * journal_res_get() can block waiting for free space in the journal -
1349 * it needs to notice there may not be devices to allocate from anymore:
1351 wake_up(&c->journal.wait);
1353 /* Now wait for any in flight writes: */
1355 closure_wait_event(&c->open_buckets_wait,
1356 !bch2_dev_has_open_write_point(c, ca));
1359 /* device goes rw: */
1360 void bch2_dev_allocator_add(struct bch_fs *c, struct bch_dev *ca)
1364 for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
1365 if (ca->mi.data_allowed & (1 << i))
1366 set_bit(ca->dev_idx, c->rw_devs[i].d);
1369 void bch2_dev_allocator_quiesce(struct bch_fs *c, struct bch_dev *ca)
1371 if (ca->alloc_thread)
1372 closure_wait_event(&c->freelist_wait,
1373 ca->allocator_state != ALLOCATOR_RUNNING);
1376 /* stop allocator thread: */
1377 void bch2_dev_allocator_stop(struct bch_dev *ca)
1379 struct task_struct *p;
1381 p = rcu_dereference_protected(ca->alloc_thread, 1);
1382 ca->alloc_thread = NULL;
1385 * We need an rcu barrier between setting ca->alloc_thread = NULL and
1386 * the thread shutting down to avoid bch2_wake_allocator() racing:
1388 * XXX: it would be better to have the rcu barrier be asynchronous
1389 * instead of blocking us here
1399 /* start allocator thread: */
1400 int bch2_dev_allocator_start(struct bch_dev *ca)
1402 struct task_struct *p;
1405 * allocator thread already started?
1407 if (ca->alloc_thread)
1410 p = kthread_create(bch2_allocator_thread, ca,
1411 "bch_alloc[%s]", ca->name);
1416 rcu_assign_pointer(ca->alloc_thread, p);
1421 void bch2_fs_allocator_background_init(struct bch_fs *c)
1423 spin_lock_init(&c->freelist_lock);
1424 bch2_bucket_clock_init(c, READ);
1425 bch2_bucket_clock_init(c, WRITE);
1427 c->pd_controllers_update_seconds = 5;
1428 INIT_DELAYED_WORK(&c->pd_controllers_update, pd_controllers_update);