1 // SPDX-License-Identifier: GPL-2.0
3 #include "alloc_background.h"
4 #include "alloc_foreground.h"
5 #include "btree_cache.h"
7 #include "btree_key_cache.h"
8 #include "btree_update.h"
9 #include "btree_update_interior.h"
19 #include <linux/kthread.h>
20 #include <linux/math64.h>
21 #include <linux/random.h>
22 #include <linux/rculist.h>
23 #include <linux/rcupdate.h>
24 #include <linux/sched/task.h>
25 #include <linux/sort.h>
26 #include <trace/events/bcachefs.h>
28 static const unsigned BCH_ALLOC_V1_FIELD_BYTES[] = {
29 #define x(name, bits) [BCH_ALLOC_FIELD_V1_##name] = bits / 8,
34 /* Ratelimiting/PD controllers */
36 static void pd_controllers_update(struct work_struct *work)
38 struct bch_fs *c = container_of(to_delayed_work(work),
40 pd_controllers_update);
42 s64 free = 0, fragmented = 0;
45 for_each_member_device(ca, c, i) {
46 struct bch_dev_usage stats = bch2_dev_usage_read(ca);
48 free += bucket_to_sector(ca,
49 __dev_buckets_available(ca, stats)) << 9;
51 * Bytes of internal fragmentation, which can be
52 * reclaimed by copy GC
54 fragmented += max_t(s64, 0, (bucket_to_sector(ca,
55 stats.d[BCH_DATA_user].buckets +
56 stats.d[BCH_DATA_cached].buckets) -
57 (stats.d[BCH_DATA_user].sectors +
58 stats.d[BCH_DATA_cached].sectors)) << 9);
61 bch2_pd_controller_update(&c->copygc_pd, free, fragmented, -1);
62 schedule_delayed_work(&c->pd_controllers_update,
63 c->pd_controllers_update_seconds * HZ);
66 /* Persistent alloc info: */
68 static inline u64 alloc_field_v1_get(const struct bch_alloc *a,
69 const void **p, unsigned field)
71 unsigned bytes = BCH_ALLOC_V1_FIELD_BYTES[field];
74 if (!(a->fields & (1 << field)))
79 v = *((const u8 *) *p);
98 static inline void alloc_field_v1_put(struct bkey_i_alloc *a, void **p,
99 unsigned field, u64 v)
101 unsigned bytes = BCH_ALLOC_V1_FIELD_BYTES[field];
106 a->v.fields |= 1 << field;
113 *((__le16 *) *p) = cpu_to_le16(v);
116 *((__le32 *) *p) = cpu_to_le32(v);
119 *((__le64 *) *p) = cpu_to_le64(v);
128 static void bch2_alloc_unpack_v1(struct bkey_alloc_unpacked *out,
131 const struct bch_alloc *in = bkey_s_c_to_alloc(k).v;
132 const void *d = in->data;
137 #define x(_name, _bits) out->_name = alloc_field_v1_get(in, &d, idx++);
138 BCH_ALLOC_FIELDS_V1()
142 static int bch2_alloc_unpack_v2(struct bkey_alloc_unpacked *out,
145 struct bkey_s_c_alloc_v2 a = bkey_s_c_to_alloc_v2(k);
146 const u8 *in = a.v->data;
147 const u8 *end = bkey_val_end(a);
148 unsigned fieldnr = 0;
153 out->oldest_gen = a.v->oldest_gen;
154 out->data_type = a.v->data_type;
156 #define x(_name, _bits) \
157 if (fieldnr < a.v->nr_fields) { \
158 ret = bch2_varint_decode(in, end, &v); \
166 if (v != out->_name) \
170 BCH_ALLOC_FIELDS_V2()
175 static void bch2_alloc_pack_v2(struct bkey_alloc_buf *dst,
176 const struct bkey_alloc_unpacked src)
178 struct bkey_i_alloc_v2 *a = bkey_alloc_v2_init(&dst->k);
179 unsigned nr_fields = 0, last_nonzero_fieldnr = 0;
181 u8 *end = (void *) &dst[1];
182 u8 *last_nonzero_field = out;
185 a->k.p = POS(src.dev, src.bucket);
187 a->v.oldest_gen = src.oldest_gen;
188 a->v.data_type = src.data_type;
190 #define x(_name, _bits) \
194 out += bch2_varint_encode(out, src._name); \
196 last_nonzero_field = out; \
197 last_nonzero_fieldnr = nr_fields; \
202 BCH_ALLOC_FIELDS_V2()
206 out = last_nonzero_field;
207 a->v.nr_fields = last_nonzero_fieldnr;
209 bytes = (u8 *) out - (u8 *) &a->v;
210 set_bkey_val_bytes(&a->k, bytes);
211 memset_u64s_tail(&a->v, 0, bytes);
214 struct bkey_alloc_unpacked bch2_alloc_unpack(struct bkey_s_c k)
216 struct bkey_alloc_unpacked ret = {
218 .bucket = k.k->p.offset,
222 if (k.k->type == KEY_TYPE_alloc_v2)
223 bch2_alloc_unpack_v2(&ret, k);
224 else if (k.k->type == KEY_TYPE_alloc)
225 bch2_alloc_unpack_v1(&ret, k);
230 void bch2_alloc_pack(struct bch_fs *c,
231 struct bkey_alloc_buf *dst,
232 const struct bkey_alloc_unpacked src)
234 bch2_alloc_pack_v2(dst, src);
237 static unsigned bch_alloc_val_u64s(const struct bch_alloc *a)
239 unsigned i, bytes = offsetof(struct bch_alloc, data);
241 for (i = 0; i < ARRAY_SIZE(BCH_ALLOC_V1_FIELD_BYTES); i++)
242 if (a->fields & (1 << i))
243 bytes += BCH_ALLOC_V1_FIELD_BYTES[i];
245 return DIV_ROUND_UP(bytes, sizeof(u64));
248 const char *bch2_alloc_v1_invalid(const struct bch_fs *c, struct bkey_s_c k)
250 struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k);
252 if (k.k->p.inode >= c->sb.nr_devices ||
253 !c->devs[k.k->p.inode])
254 return "invalid device";
256 /* allow for unknown fields */
257 if (bkey_val_u64s(a.k) < bch_alloc_val_u64s(a.v))
258 return "incorrect value size";
263 const char *bch2_alloc_v2_invalid(const struct bch_fs *c, struct bkey_s_c k)
265 struct bkey_alloc_unpacked u;
267 if (k.k->p.inode >= c->sb.nr_devices ||
268 !c->devs[k.k->p.inode])
269 return "invalid device";
271 if (bch2_alloc_unpack_v2(&u, k))
272 return "unpack error";
277 void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c,
280 struct bkey_alloc_unpacked u = bch2_alloc_unpack(k);
282 pr_buf(out, "gen %u oldest_gen %u data_type %u",
283 u.gen, u.oldest_gen, u.data_type);
284 #define x(_name, ...) pr_buf(out, #_name " %llu ", (u64) u._name);
285 BCH_ALLOC_FIELDS_V2()
289 static int bch2_alloc_read_fn(struct bch_fs *c, enum btree_id id,
290 unsigned level, struct bkey_s_c k)
294 struct bkey_alloc_unpacked u;
297 (k.k->type != KEY_TYPE_alloc &&
298 k.k->type != KEY_TYPE_alloc_v2))
301 ca = bch_dev_bkey_exists(c, k.k->p.inode);
302 g = bucket(ca, k.k->p.offset);
303 u = bch2_alloc_unpack(k);
305 g->_mark.gen = u.gen;
306 g->_mark.data_type = u.data_type;
307 g->_mark.dirty_sectors = u.dirty_sectors;
308 g->_mark.cached_sectors = u.cached_sectors;
309 g->io_time[READ] = u.read_time;
310 g->io_time[WRITE] = u.write_time;
311 g->oldest_gen = u.oldest_gen;
317 int bch2_alloc_read(struct bch_fs *c, struct journal_keys *journal_keys)
321 down_read(&c->gc_lock);
322 ret = bch2_btree_and_journal_walk(c, journal_keys, BTREE_ID_alloc,
323 NULL, bch2_alloc_read_fn);
324 up_read(&c->gc_lock);
327 bch_err(c, "error reading alloc info: %i", ret);
334 static int bch2_alloc_write_key(struct btree_trans *trans,
335 struct btree_iter *iter,
338 struct bch_fs *c = trans->c;
342 struct bucket_mark m;
343 struct bkey_alloc_unpacked old_u, new_u;
344 struct bkey_alloc_buf a;
347 bch2_trans_begin(trans);
349 ret = bch2_btree_key_cache_flush(trans,
350 BTREE_ID_alloc, iter->pos);
354 k = bch2_btree_iter_peek_slot(iter);
359 old_u = bch2_alloc_unpack(k);
361 percpu_down_read(&c->mark_lock);
362 ca = bch_dev_bkey_exists(c, iter->pos.inode);
363 g = bucket(ca, iter->pos.offset);
364 m = READ_ONCE(g->mark);
365 new_u = alloc_mem_to_key(iter, g, m);
366 percpu_up_read(&c->mark_lock);
368 if (!bkey_alloc_unpacked_cmp(old_u, new_u))
371 bch2_alloc_pack(c, &a, new_u);
372 bch2_trans_update(trans, iter, &a.k,
373 BTREE_TRIGGER_NORUN);
374 ret = bch2_trans_commit(trans, NULL, NULL,
375 BTREE_INSERT_NOFAIL|flags);
382 int bch2_alloc_write(struct bch_fs *c, unsigned flags)
384 struct btree_trans trans;
385 struct btree_iter *iter;
390 bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
391 iter = bch2_trans_get_iter(&trans, BTREE_ID_alloc, POS_MIN,
392 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
394 for_each_member_device(ca, c, i) {
395 bch2_btree_iter_set_pos(iter,
396 POS(ca->dev_idx, ca->mi.first_bucket));
398 while (iter->pos.offset < ca->mi.nbuckets) {
399 bch2_trans_cond_resched(&trans);
401 ret = bch2_alloc_write_key(&trans, iter, flags);
403 percpu_ref_put(&ca->io_ref);
406 bch2_btree_iter_next_slot(iter);
410 bch2_trans_iter_put(&trans, iter);
411 bch2_trans_exit(&trans);
415 /* Bucket IO clocks: */
417 int bch2_bucket_io_time_reset(struct btree_trans *trans, unsigned dev,
418 size_t bucket_nr, int rw)
420 struct bch_fs *c = trans->c;
421 struct bch_dev *ca = bch_dev_bkey_exists(c, dev);
422 struct btree_iter *iter;
424 struct bkey_alloc_buf *a;
425 struct bkey_alloc_unpacked u;
429 iter = bch2_trans_get_iter(trans, BTREE_ID_alloc, POS(dev, bucket_nr),
431 BTREE_ITER_CACHED_NOFILL|
433 ret = bch2_btree_iter_traverse(iter);
437 a = bch2_trans_kmalloc(trans, sizeof(struct bkey_alloc_buf));
438 ret = PTR_ERR_OR_ZERO(a);
442 percpu_down_read(&c->mark_lock);
443 g = bucket(ca, bucket_nr);
444 u = alloc_mem_to_key(iter, g, READ_ONCE(g->mark));
445 percpu_up_read(&c->mark_lock);
447 time = rw == READ ? &u.read_time : &u.write_time;
448 now = atomic64_read(&c->io_clock[rw].now);
454 bch2_alloc_pack(c, a, u);
455 ret = bch2_trans_update(trans, iter, &a->k, 0) ?:
456 bch2_trans_commit(trans, NULL, NULL, 0);
458 bch2_trans_iter_put(trans, iter);
462 /* Background allocator thread: */
465 * Scans for buckets to be invalidated, invalidates them, rewrites prios/gens
466 * (marking them as invalidated on disk), then optionally issues discard
467 * commands to the newly free buckets, then puts them on the various freelists.
471 * wait_buckets_available - wait on reclaimable buckets
473 * If there aren't enough available buckets to fill up free_inc, wait until
476 static int wait_buckets_available(struct bch_fs *c, struct bch_dev *ca)
478 unsigned long gc_count = c->gc_count;
482 ca->allocator_state = ALLOCATOR_BLOCKED;
483 closure_wake_up(&c->freelist_wait);
486 set_current_state(TASK_INTERRUPTIBLE);
487 if (kthread_should_stop()) {
492 if (gc_count != c->gc_count)
493 ca->inc_gen_really_needs_gc = 0;
495 available = dev_buckets_reclaimable(ca);
496 available -= ca->inc_gen_really_needs_gc;
498 available = max(available, 0LL);
503 up_read(&c->gc_lock);
506 down_read(&c->gc_lock);
509 __set_current_state(TASK_RUNNING);
510 ca->allocator_state = ALLOCATOR_RUNNING;
511 closure_wake_up(&c->freelist_wait);
516 static bool bch2_can_invalidate_bucket(struct bch_dev *ca, size_t b,
517 struct bucket_mark m)
521 if (!is_available_bucket(m))
524 if (m.owned_by_allocator)
527 if (ca->buckets_nouse &&
528 test_bit(b, ca->buckets_nouse))
531 gc_gen = bucket_gc_gen(bucket(ca, b));
533 if (gc_gen >= BUCKET_GC_GEN_MAX / 2)
534 ca->inc_gen_needs_gc++;
536 if (gc_gen >= BUCKET_GC_GEN_MAX)
537 ca->inc_gen_really_needs_gc++;
539 return gc_gen < BUCKET_GC_GEN_MAX;
543 * Determines what order we're going to reuse buckets, smallest bucket_key()
547 static unsigned bucket_sort_key(struct bucket *g, struct bucket_mark m,
548 u64 now, u64 last_seq_ondisk)
550 unsigned used = bucket_sectors_used(m);
554 * Prefer to keep buckets that have been read more recently, and
555 * buckets that have more data in them:
557 u64 last_read = max_t(s64, 0, now - g->io_time[READ]);
558 u32 last_read_scaled = max_t(u64, U32_MAX, div_u64(last_read, used));
560 return -last_read_scaled;
563 * Prefer to use buckets with smaller gc_gen so that we don't
564 * have to walk the btree and recalculate oldest_gen - but shift
565 * off the low bits so that buckets will still have equal sort
566 * keys when there's only a small difference, so that we can
567 * keep sequential buckets together:
569 return (bucket_needs_journal_commit(m, last_seq_ondisk) << 4)|
570 (bucket_gc_gen(g) >> 4);
574 static inline int bucket_alloc_cmp(alloc_heap *h,
575 struct alloc_heap_entry l,
576 struct alloc_heap_entry r)
578 return cmp_int(l.key, r.key) ?:
579 cmp_int(r.nr, l.nr) ?:
580 cmp_int(l.bucket, r.bucket);
583 static inline int bucket_idx_cmp(const void *_l, const void *_r)
585 const struct alloc_heap_entry *l = _l, *r = _r;
587 return cmp_int(l->bucket, r->bucket);
590 static void find_reclaimable_buckets_lru(struct bch_fs *c, struct bch_dev *ca)
592 struct bucket_array *buckets;
593 struct alloc_heap_entry e = { 0 };
594 u64 now, last_seq_ondisk;
597 down_read(&ca->bucket_lock);
599 buckets = bucket_array(ca);
600 ca->alloc_heap.used = 0;
601 now = atomic64_read(&c->io_clock[READ].now);
602 last_seq_ondisk = c->journal.last_seq_ondisk;
605 * Find buckets with lowest read priority, by building a maxheap sorted
606 * by read priority and repeatedly replacing the maximum element until
607 * all buckets have been visited.
609 for (b = ca->mi.first_bucket; b < ca->mi.nbuckets; b++) {
610 struct bucket *g = &buckets->b[b];
611 struct bucket_mark m = READ_ONCE(g->mark);
612 unsigned key = bucket_sort_key(g, m, now, last_seq_ondisk);
614 if (!bch2_can_invalidate_bucket(ca, b, m))
617 if (e.nr && e.bucket + e.nr == b && e.key == key) {
621 heap_add_or_replace(&ca->alloc_heap, e,
622 -bucket_alloc_cmp, NULL);
624 e = (struct alloc_heap_entry) {
635 heap_add_or_replace(&ca->alloc_heap, e,
636 -bucket_alloc_cmp, NULL);
638 for (i = 0; i < ca->alloc_heap.used; i++)
639 nr += ca->alloc_heap.data[i].nr;
641 while (nr - ca->alloc_heap.data[0].nr >= ALLOC_SCAN_BATCH(ca)) {
642 nr -= ca->alloc_heap.data[0].nr;
643 heap_pop(&ca->alloc_heap, e, -bucket_alloc_cmp, NULL);
646 up_read(&ca->bucket_lock);
649 static void find_reclaimable_buckets_fifo(struct bch_fs *c, struct bch_dev *ca)
651 struct bucket_array *buckets = bucket_array(ca);
652 struct bucket_mark m;
655 if (ca->fifo_last_bucket < ca->mi.first_bucket ||
656 ca->fifo_last_bucket >= ca->mi.nbuckets)
657 ca->fifo_last_bucket = ca->mi.first_bucket;
659 start = ca->fifo_last_bucket;
662 ca->fifo_last_bucket++;
663 if (ca->fifo_last_bucket == ca->mi.nbuckets)
664 ca->fifo_last_bucket = ca->mi.first_bucket;
666 b = ca->fifo_last_bucket;
667 m = READ_ONCE(buckets->b[b].mark);
669 if (bch2_can_invalidate_bucket(ca, b, m)) {
670 struct alloc_heap_entry e = { .bucket = b, .nr = 1, };
672 heap_add(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
673 if (heap_full(&ca->alloc_heap))
678 } while (ca->fifo_last_bucket != start);
681 static void find_reclaimable_buckets_random(struct bch_fs *c, struct bch_dev *ca)
683 struct bucket_array *buckets = bucket_array(ca);
684 struct bucket_mark m;
688 checked < ca->mi.nbuckets / 2;
690 size_t b = bch2_rand_range(ca->mi.nbuckets -
691 ca->mi.first_bucket) +
694 m = READ_ONCE(buckets->b[b].mark);
696 if (bch2_can_invalidate_bucket(ca, b, m)) {
697 struct alloc_heap_entry e = { .bucket = b, .nr = 1, };
699 heap_add(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
700 if (heap_full(&ca->alloc_heap))
707 sort(ca->alloc_heap.data,
709 sizeof(ca->alloc_heap.data[0]),
710 bucket_idx_cmp, NULL);
712 /* remove duplicates: */
713 for (i = 0; i + 1 < ca->alloc_heap.used; i++)
714 if (ca->alloc_heap.data[i].bucket ==
715 ca->alloc_heap.data[i + 1].bucket)
716 ca->alloc_heap.data[i].nr = 0;
719 static size_t find_reclaimable_buckets(struct bch_fs *c, struct bch_dev *ca)
723 ca->inc_gen_needs_gc = 0;
725 switch (ca->mi.replacement) {
726 case BCH_CACHE_REPLACEMENT_lru:
727 find_reclaimable_buckets_lru(c, ca);
729 case BCH_CACHE_REPLACEMENT_fifo:
730 find_reclaimable_buckets_fifo(c, ca);
732 case BCH_CACHE_REPLACEMENT_random:
733 find_reclaimable_buckets_random(c, ca);
737 heap_resort(&ca->alloc_heap, bucket_alloc_cmp, NULL);
739 for (i = 0; i < ca->alloc_heap.used; i++)
740 nr += ca->alloc_heap.data[i].nr;
745 static inline long next_alloc_bucket(struct bch_dev *ca)
747 struct alloc_heap_entry e, *top = ca->alloc_heap.data;
749 while (ca->alloc_heap.used) {
751 size_t b = top->bucket;
758 heap_pop(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
765 * returns sequence number of most recent journal entry that updated this
768 static u64 bucket_journal_seq(struct bch_fs *c, struct bucket_mark m)
770 if (m.journal_seq_valid) {
771 u64 journal_seq = atomic64_read(&c->journal.seq);
772 u64 bucket_seq = journal_seq;
774 bucket_seq &= ~((u64) U16_MAX);
775 bucket_seq |= m.journal_seq;
777 if (bucket_seq > journal_seq)
778 bucket_seq -= 1 << 16;
786 static int bch2_invalidate_one_bucket2(struct btree_trans *trans,
788 struct btree_iter *iter,
789 u64 *journal_seq, unsigned flags)
791 struct bch_fs *c = trans->c;
792 struct bkey_alloc_buf a;
793 struct bkey_alloc_unpacked u;
795 struct bucket_mark m;
796 bool invalidating_cached_data;
800 BUG_ON(!ca->alloc_heap.used ||
801 !ca->alloc_heap.data[0].nr);
802 b = ca->alloc_heap.data[0].bucket;
804 /* first, put on free_inc and mark as owned by allocator: */
805 percpu_down_read(&c->mark_lock);
807 m = READ_ONCE(g->mark);
809 BUG_ON(m.dirty_sectors);
811 bch2_mark_alloc_bucket(c, ca, b, true, gc_pos_alloc(c, NULL), 0);
813 spin_lock(&c->freelist_lock);
814 verify_not_on_freelist(c, ca, b);
815 BUG_ON(!fifo_push(&ca->free_inc, b));
816 spin_unlock(&c->freelist_lock);
819 * If we're not invalidating cached data, we only increment the bucket
820 * gen in memory here, the incremented gen will be updated in the btree
821 * by bch2_trans_mark_pointer():
823 if (!m.cached_sectors &&
824 !bucket_needs_journal_commit(m, c->journal.last_seq_ondisk)) {
826 bucket_cmpxchg(g, m, m.gen++);
827 percpu_up_read(&c->mark_lock);
831 percpu_up_read(&c->mark_lock);
834 * If the read-only path is trying to shut down, we can't be generating
837 if (test_bit(BCH_FS_ALLOCATOR_STOPPING, &c->flags)) {
842 bch2_btree_iter_set_pos(iter, POS(ca->dev_idx, b));
844 ret = bch2_btree_iter_traverse(iter);
848 percpu_down_read(&c->mark_lock);
849 g = bucket(ca, iter->pos.offset);
850 m = READ_ONCE(g->mark);
851 u = alloc_mem_to_key(iter, g, m);
853 percpu_up_read(&c->mark_lock);
855 invalidating_cached_data = u.cached_sectors != 0;
860 u.cached_sectors = 0;
861 u.read_time = atomic64_read(&c->io_clock[READ].now);
862 u.write_time = atomic64_read(&c->io_clock[WRITE].now);
864 bch2_alloc_pack(c, &a, u);
865 bch2_trans_update(trans, iter, &a.k,
866 BTREE_TRIGGER_BUCKET_INVALIDATE);
870 * when using deferred btree updates, we have journal reclaim doing
871 * btree updates and thus requiring the allocator to make forward
872 * progress, and here the allocator is requiring space in the journal -
873 * so we need a journal pre-reservation:
875 ret = bch2_trans_commit(trans, NULL,
876 invalidating_cached_data ? journal_seq : NULL,
877 BTREE_INSERT_NOUNLOCK|
878 BTREE_INSERT_NOCHECK_RW|
880 BTREE_INSERT_JOURNAL_RESERVED|
886 /* remove from alloc_heap: */
887 struct alloc_heap_entry e, *top = ca->alloc_heap.data;
893 heap_pop(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
896 * Make sure we flush the last journal entry that updated this
897 * bucket (i.e. deleting the last reference) before writing to
900 *journal_seq = max(*journal_seq, bucket_journal_seq(c, m));
904 /* remove from free_inc: */
905 percpu_down_read(&c->mark_lock);
906 spin_lock(&c->freelist_lock);
908 bch2_mark_alloc_bucket(c, ca, b, false,
909 gc_pos_alloc(c, NULL), 0);
911 BUG_ON(!fifo_pop_back(&ca->free_inc, b2));
914 spin_unlock(&c->freelist_lock);
915 percpu_up_read(&c->mark_lock);
918 return ret < 0 ? ret : 0;
922 * Pull buckets off ca->alloc_heap, invalidate them, move them to ca->free_inc:
924 static int bch2_invalidate_buckets(struct bch_fs *c, struct bch_dev *ca)
926 struct btree_trans trans;
927 struct btree_iter *iter;
931 bch2_trans_init(&trans, c, 0, 0);
932 iter = bch2_trans_get_iter(&trans, BTREE_ID_alloc,
935 BTREE_ITER_CACHED_NOFILL|
938 /* Only use nowait if we've already invalidated at least one bucket: */
940 !fifo_full(&ca->free_inc) &&
942 ret = bch2_invalidate_one_bucket2(&trans, ca, iter, &journal_seq,
943 BTREE_INSERT_GC_LOCK_HELD|
944 (!fifo_empty(&ca->free_inc)
945 ? BTREE_INSERT_NOWAIT : 0));
947 bch2_trans_iter_put(&trans, iter);
948 bch2_trans_exit(&trans);
950 /* If we used NOWAIT, don't return the error: */
951 if (!fifo_empty(&ca->free_inc))
954 bch_err(ca, "error invalidating buckets: %i", ret);
959 ret = bch2_journal_flush_seq(&c->journal, journal_seq);
961 bch_err(ca, "journal error: %i", ret);
968 static int push_invalidated_bucket(struct bch_fs *c, struct bch_dev *ca, size_t bucket)
974 set_current_state(TASK_INTERRUPTIBLE);
976 spin_lock(&c->freelist_lock);
977 for (i = 0; i < RESERVE_NR; i++) {
980 * Don't strand buckets on the copygc freelist until
981 * after recovery is finished:
983 if (!test_bit(BCH_FS_STARTED, &c->flags) &&
984 i == RESERVE_MOVINGGC)
987 if (fifo_push(&ca->free[i], bucket)) {
988 fifo_pop(&ca->free_inc, bucket);
990 closure_wake_up(&c->freelist_wait);
991 ca->allocator_state = ALLOCATOR_RUNNING;
993 spin_unlock(&c->freelist_lock);
998 if (ca->allocator_state != ALLOCATOR_BLOCKED_FULL) {
999 ca->allocator_state = ALLOCATOR_BLOCKED_FULL;
1000 closure_wake_up(&c->freelist_wait);
1003 spin_unlock(&c->freelist_lock);
1005 if ((current->flags & PF_KTHREAD) &&
1006 kthread_should_stop()) {
1015 __set_current_state(TASK_RUNNING);
1020 * Pulls buckets off free_inc, discards them (if enabled), then adds them to
1021 * freelists, waiting until there's room if necessary:
1023 static int discard_invalidated_buckets(struct bch_fs *c, struct bch_dev *ca)
1025 while (!fifo_empty(&ca->free_inc)) {
1026 size_t bucket = fifo_peek(&ca->free_inc);
1028 if (ca->mi.discard &&
1029 blk_queue_discard(bdev_get_queue(ca->disk_sb.bdev)))
1030 blkdev_issue_discard(ca->disk_sb.bdev,
1031 bucket_to_sector(ca, bucket),
1032 ca->mi.bucket_size, GFP_NOIO, 0);
1034 if (push_invalidated_bucket(c, ca, bucket))
1041 static inline bool allocator_thread_running(struct bch_dev *ca)
1043 return ca->mi.state == BCH_MEMBER_STATE_rw &&
1044 test_bit(BCH_FS_ALLOCATOR_RUNNING, &ca->fs->flags);
1048 * bch_allocator_thread - move buckets from free_inc to reserves
1050 * The free_inc FIFO is populated by find_reclaimable_buckets(), and
1051 * the reserves are depleted by bucket allocation. When we run out
1052 * of free_inc, try to invalidate some buckets and write out
1055 static int bch2_allocator_thread(void *arg)
1057 struct bch_dev *ca = arg;
1058 struct bch_fs *c = ca->fs;
1065 if (!allocator_thread_running(ca)) {
1066 ca->allocator_state = ALLOCATOR_STOPPED;
1067 if (kthread_wait_freezable(allocator_thread_running(ca)))
1071 ca->allocator_state = ALLOCATOR_RUNNING;
1074 if (kthread_should_stop())
1077 pr_debug("discarding %zu invalidated buckets",
1078 fifo_used(&ca->free_inc));
1080 ret = discard_invalidated_buckets(c, ca);
1084 down_read(&c->gc_lock);
1086 ret = bch2_invalidate_buckets(c, ca);
1088 up_read(&c->gc_lock);
1092 if (!fifo_empty(&ca->free_inc)) {
1093 up_read(&c->gc_lock);
1097 pr_debug("free_inc now empty");
1102 * Find some buckets that we can invalidate, either
1103 * they're completely unused, or only contain clean data
1104 * that's been written back to the backing device or
1105 * another cache tier
1108 pr_debug("scanning for reclaimable buckets");
1110 nr = find_reclaimable_buckets(c, ca);
1112 pr_debug("found %zu buckets", nr);
1114 trace_alloc_batch(ca, nr, ca->alloc_heap.size);
1116 if ((ca->inc_gen_needs_gc >= ALLOC_SCAN_BATCH(ca) ||
1117 ca->inc_gen_really_needs_gc) &&
1119 atomic_inc(&c->kick_gc);
1120 wake_up_process(c->gc_thread);
1127 * If we found any buckets, we have to invalidate them
1128 * before we scan for more - but if we didn't find very
1129 * many we may want to wait on more buckets being
1130 * available so we don't spin:
1132 ret = wait_buckets_available(c, ca);
1134 up_read(&c->gc_lock);
1139 up_read(&c->gc_lock);
1141 pr_debug("%zu buckets to invalidate", nr);
1144 * alloc_heap is now full of newly-invalidated buckets: next,
1145 * write out the new bucket gens:
1150 pr_debug("alloc thread stopping (ret %i)", ret);
1151 ca->allocator_state = ALLOCATOR_STOPPED;
1152 closure_wake_up(&c->freelist_wait);
1156 /* Startup/shutdown (ro/rw): */
1158 void bch2_recalc_capacity(struct bch_fs *c)
1161 u64 capacity = 0, reserved_sectors = 0, gc_reserve, copygc_threshold = 0;
1162 unsigned bucket_size_max = 0;
1163 unsigned long ra_pages = 0;
1166 lockdep_assert_held(&c->state_lock);
1168 for_each_online_member(ca, c, i) {
1169 struct backing_dev_info *bdi = ca->disk_sb.bdev->bd_bdi;
1171 ra_pages += bdi->ra_pages;
1174 bch2_set_ra_pages(c, ra_pages);
1176 for_each_rw_member(ca, c, i) {
1177 u64 dev_reserve = 0;
1180 * We need to reserve buckets (from the number
1181 * of currently available buckets) against
1182 * foreground writes so that mainly copygc can
1183 * make forward progress.
1185 * We need enough to refill the various reserves
1186 * from scratch - copygc will use its entire
1187 * reserve all at once, then run against when
1188 * its reserve is refilled (from the formerly
1189 * available buckets).
1191 * This reserve is just used when considering if
1192 * allocations for foreground writes must wait -
1193 * not -ENOSPC calculations.
1195 for (j = 0; j < RESERVE_NONE; j++)
1196 dev_reserve += ca->free[j].size;
1198 dev_reserve += 1; /* btree write point */
1199 dev_reserve += 1; /* copygc write point */
1200 dev_reserve += 1; /* rebalance write point */
1202 dev_reserve *= ca->mi.bucket_size;
1204 copygc_threshold += dev_reserve;
1206 capacity += bucket_to_sector(ca, ca->mi.nbuckets -
1207 ca->mi.first_bucket);
1209 reserved_sectors += dev_reserve * 2;
1211 bucket_size_max = max_t(unsigned, bucket_size_max,
1212 ca->mi.bucket_size);
1215 gc_reserve = c->opts.gc_reserve_bytes
1216 ? c->opts.gc_reserve_bytes >> 9
1217 : div64_u64(capacity * c->opts.gc_reserve_percent, 100);
1219 reserved_sectors = max(gc_reserve, reserved_sectors);
1221 reserved_sectors = min(reserved_sectors, capacity);
1223 c->copygc_threshold = copygc_threshold;
1224 c->capacity = capacity - reserved_sectors;
1226 c->bucket_size_max = bucket_size_max;
1228 /* Wake up case someone was waiting for buckets */
1229 closure_wake_up(&c->freelist_wait);
1232 static bool bch2_dev_has_open_write_point(struct bch_fs *c, struct bch_dev *ca)
1234 struct open_bucket *ob;
1237 for (ob = c->open_buckets;
1238 ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
1240 spin_lock(&ob->lock);
1241 if (ob->valid && !ob->on_partial_list &&
1242 ob->ptr.dev == ca->dev_idx)
1244 spin_unlock(&ob->lock);
1250 /* device goes ro: */
1251 void bch2_dev_allocator_remove(struct bch_fs *c, struct bch_dev *ca)
1255 BUG_ON(ca->alloc_thread);
1257 /* First, remove device from allocation groups: */
1259 for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
1260 clear_bit(ca->dev_idx, c->rw_devs[i].d);
1263 * Capacity is calculated based off of devices in allocation groups:
1265 bch2_recalc_capacity(c);
1267 /* Next, close write points that point to this device... */
1268 for (i = 0; i < ARRAY_SIZE(c->write_points); i++)
1269 bch2_writepoint_stop(c, ca, &c->write_points[i]);
1271 bch2_writepoint_stop(c, ca, &c->copygc_write_point);
1272 bch2_writepoint_stop(c, ca, &c->rebalance_write_point);
1273 bch2_writepoint_stop(c, ca, &c->btree_write_point);
1275 mutex_lock(&c->btree_reserve_cache_lock);
1276 while (c->btree_reserve_cache_nr) {
1277 struct btree_alloc *a =
1278 &c->btree_reserve_cache[--c->btree_reserve_cache_nr];
1280 bch2_open_buckets_put(c, &a->ob);
1282 mutex_unlock(&c->btree_reserve_cache_lock);
1285 struct open_bucket *ob;
1287 spin_lock(&c->freelist_lock);
1288 if (!ca->open_buckets_partial_nr) {
1289 spin_unlock(&c->freelist_lock);
1292 ob = c->open_buckets +
1293 ca->open_buckets_partial[--ca->open_buckets_partial_nr];
1294 ob->on_partial_list = false;
1295 spin_unlock(&c->freelist_lock);
1297 bch2_open_bucket_put(c, ob);
1300 bch2_ec_stop_dev(c, ca);
1303 * Wake up threads that were blocked on allocation, so they can notice
1304 * the device can no longer be removed and the capacity has changed:
1306 closure_wake_up(&c->freelist_wait);
1309 * journal_res_get() can block waiting for free space in the journal -
1310 * it needs to notice there may not be devices to allocate from anymore:
1312 wake_up(&c->journal.wait);
1314 /* Now wait for any in flight writes: */
1316 closure_wait_event(&c->open_buckets_wait,
1317 !bch2_dev_has_open_write_point(c, ca));
1320 /* device goes rw: */
1321 void bch2_dev_allocator_add(struct bch_fs *c, struct bch_dev *ca)
1325 for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
1326 if (ca->mi.data_allowed & (1 << i))
1327 set_bit(ca->dev_idx, c->rw_devs[i].d);
1330 void bch2_dev_allocator_quiesce(struct bch_fs *c, struct bch_dev *ca)
1332 if (ca->alloc_thread)
1333 closure_wait_event(&c->freelist_wait,
1334 ca->allocator_state != ALLOCATOR_RUNNING);
1337 /* stop allocator thread: */
1338 void bch2_dev_allocator_stop(struct bch_dev *ca)
1340 struct task_struct *p;
1342 p = rcu_dereference_protected(ca->alloc_thread, 1);
1343 ca->alloc_thread = NULL;
1346 * We need an rcu barrier between setting ca->alloc_thread = NULL and
1347 * the thread shutting down to avoid bch2_wake_allocator() racing:
1349 * XXX: it would be better to have the rcu barrier be asynchronous
1350 * instead of blocking us here
1360 /* start allocator thread: */
1361 int bch2_dev_allocator_start(struct bch_dev *ca)
1363 struct task_struct *p;
1366 * allocator thread already started?
1368 if (ca->alloc_thread)
1371 p = kthread_create(bch2_allocator_thread, ca,
1372 "bch-alloc/%s", ca->name);
1374 bch_err(ca->fs, "error creating allocator thread: %li",
1380 rcu_assign_pointer(ca->alloc_thread, p);
1385 void bch2_fs_allocator_background_init(struct bch_fs *c)
1387 spin_lock_init(&c->freelist_lock);
1389 c->pd_controllers_update_seconds = 5;
1390 INIT_DELAYED_WORK(&c->pd_controllers_update, pd_controllers_update);