1 // SPDX-License-Identifier: GPL-2.0
3 #include "alloc_background.h"
4 #include "alloc_foreground.h"
5 #include "btree_cache.h"
7 #include "btree_key_cache.h"
8 #include "btree_update.h"
9 #include "btree_update_interior.h"
19 #include <linux/kthread.h>
20 #include <linux/math64.h>
21 #include <linux/random.h>
22 #include <linux/rculist.h>
23 #include <linux/rcupdate.h>
24 #include <linux/sched/task.h>
25 #include <linux/sort.h>
26 #include <trace/events/bcachefs.h>
28 const char * const bch2_allocator_states[] = {
35 static const unsigned BCH_ALLOC_V1_FIELD_BYTES[] = {
36 #define x(name, bits) [BCH_ALLOC_FIELD_V1_##name] = bits / 8,
41 /* Persistent alloc info: */
43 static inline u64 alloc_field_v1_get(const struct bch_alloc *a,
44 const void **p, unsigned field)
46 unsigned bytes = BCH_ALLOC_V1_FIELD_BYTES[field];
49 if (!(a->fields & (1 << field)))
54 v = *((const u8 *) *p);
73 static inline void alloc_field_v1_put(struct bkey_i_alloc *a, void **p,
74 unsigned field, u64 v)
76 unsigned bytes = BCH_ALLOC_V1_FIELD_BYTES[field];
81 a->v.fields |= 1 << field;
88 *((__le16 *) *p) = cpu_to_le16(v);
91 *((__le32 *) *p) = cpu_to_le32(v);
94 *((__le64 *) *p) = cpu_to_le64(v);
103 static void bch2_alloc_unpack_v1(struct bkey_alloc_unpacked *out,
106 const struct bch_alloc *in = bkey_s_c_to_alloc(k).v;
107 const void *d = in->data;
112 #define x(_name, _bits) out->_name = alloc_field_v1_get(in, &d, idx++);
113 BCH_ALLOC_FIELDS_V1()
117 static int bch2_alloc_unpack_v2(struct bkey_alloc_unpacked *out,
120 struct bkey_s_c_alloc_v2 a = bkey_s_c_to_alloc_v2(k);
121 const u8 *in = a.v->data;
122 const u8 *end = bkey_val_end(a);
123 unsigned fieldnr = 0;
128 out->oldest_gen = a.v->oldest_gen;
129 out->data_type = a.v->data_type;
131 #define x(_name, _bits) \
132 if (fieldnr < a.v->nr_fields) { \
133 ret = bch2_varint_decode_fast(in, end, &v); \
141 if (v != out->_name) \
145 BCH_ALLOC_FIELDS_V2()
150 static int bch2_alloc_unpack_v3(struct bkey_alloc_unpacked *out,
153 struct bkey_s_c_alloc_v3 a = bkey_s_c_to_alloc_v3(k);
154 const u8 *in = a.v->data;
155 const u8 *end = bkey_val_end(a);
156 unsigned fieldnr = 0;
161 out->oldest_gen = a.v->oldest_gen;
162 out->data_type = a.v->data_type;
163 out->journal_seq = le64_to_cpu(a.v->journal_seq);
165 #define x(_name, _bits) \
166 if (fieldnr < a.v->nr_fields) { \
167 ret = bch2_varint_decode_fast(in, end, &v); \
175 if (v != out->_name) \
179 BCH_ALLOC_FIELDS_V2()
184 static void bch2_alloc_pack_v3(struct bkey_alloc_buf *dst,
185 const struct bkey_alloc_unpacked src)
187 struct bkey_i_alloc_v3 *a = bkey_alloc_v3_init(&dst->k);
188 unsigned nr_fields = 0, last_nonzero_fieldnr = 0;
190 u8 *end = (void *) &dst[1];
191 u8 *last_nonzero_field = out;
194 a->k.p = POS(src.dev, src.bucket);
196 a->v.oldest_gen = src.oldest_gen;
197 a->v.data_type = src.data_type;
198 a->v.journal_seq = cpu_to_le64(src.journal_seq);
200 #define x(_name, _bits) \
204 out += bch2_varint_encode_fast(out, src._name); \
206 last_nonzero_field = out; \
207 last_nonzero_fieldnr = nr_fields; \
212 BCH_ALLOC_FIELDS_V2()
216 out = last_nonzero_field;
217 a->v.nr_fields = last_nonzero_fieldnr;
219 bytes = (u8 *) out - (u8 *) &a->v;
220 set_bkey_val_bytes(&a->k, bytes);
221 memset_u64s_tail(&a->v, 0, bytes);
224 struct bkey_alloc_unpacked bch2_alloc_unpack(struct bkey_s_c k)
226 struct bkey_alloc_unpacked ret = {
228 .bucket = k.k->p.offset,
234 bch2_alloc_unpack_v1(&ret, k);
236 case KEY_TYPE_alloc_v2:
237 bch2_alloc_unpack_v2(&ret, k);
239 case KEY_TYPE_alloc_v3:
240 bch2_alloc_unpack_v3(&ret, k);
247 void bch2_alloc_pack(struct bch_fs *c,
248 struct bkey_alloc_buf *dst,
249 const struct bkey_alloc_unpacked src)
251 bch2_alloc_pack_v3(dst, src);
254 static unsigned bch_alloc_v1_val_u64s(const struct bch_alloc *a)
256 unsigned i, bytes = offsetof(struct bch_alloc, data);
258 for (i = 0; i < ARRAY_SIZE(BCH_ALLOC_V1_FIELD_BYTES); i++)
259 if (a->fields & (1 << i))
260 bytes += BCH_ALLOC_V1_FIELD_BYTES[i];
262 return DIV_ROUND_UP(bytes, sizeof(u64));
265 const char *bch2_alloc_v1_invalid(const struct bch_fs *c, struct bkey_s_c k)
267 struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k);
269 if (k.k->p.inode >= c->sb.nr_devices ||
270 !c->devs[k.k->p.inode])
271 return "invalid device";
273 /* allow for unknown fields */
274 if (bkey_val_u64s(a.k) < bch_alloc_v1_val_u64s(a.v))
275 return "incorrect value size";
280 const char *bch2_alloc_v2_invalid(const struct bch_fs *c, struct bkey_s_c k)
282 struct bkey_alloc_unpacked u;
284 if (k.k->p.inode >= c->sb.nr_devices ||
285 !c->devs[k.k->p.inode])
286 return "invalid device";
288 if (bch2_alloc_unpack_v2(&u, k))
289 return "unpack error";
294 const char *bch2_alloc_v3_invalid(const struct bch_fs *c, struct bkey_s_c k)
296 struct bkey_alloc_unpacked u;
298 if (k.k->p.inode >= c->sb.nr_devices ||
299 !c->devs[k.k->p.inode])
300 return "invalid device";
302 if (bch2_alloc_unpack_v3(&u, k))
303 return "unpack error";
308 void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c,
311 struct bkey_alloc_unpacked u = bch2_alloc_unpack(k);
313 pr_buf(out, "gen %u oldest_gen %u data_type %s journal_seq %llu",
314 u.gen, u.oldest_gen, bch2_data_types[u.data_type],
316 #define x(_name, ...) pr_buf(out, " " #_name " %llu", (u64) u._name);
317 BCH_ALLOC_FIELDS_V2()
321 static int bch2_alloc_read_fn(struct btree_trans *trans, struct bkey_s_c k)
323 struct bch_fs *c = trans->c;
326 struct bkey_alloc_unpacked u;
328 if (!bkey_is_alloc(k.k))
331 ca = bch_dev_bkey_exists(c, k.k->p.inode);
332 g = bucket(ca, k.k->p.offset);
333 u = bch2_alloc_unpack(k);
335 g->_mark.gen = u.gen;
336 g->_mark.data_type = u.data_type;
337 g->_mark.dirty_sectors = u.dirty_sectors;
338 g->_mark.cached_sectors = u.cached_sectors;
339 g->_mark.stripe = u.stripe != 0;
340 g->stripe = u.stripe;
341 g->stripe_redundancy = u.stripe_redundancy;
342 g->io_time[READ] = u.read_time;
343 g->io_time[WRITE] = u.write_time;
344 g->oldest_gen = u.oldest_gen;
350 int bch2_alloc_read(struct bch_fs *c)
352 struct btree_trans trans;
355 bch2_trans_init(&trans, c, 0, 0);
356 down_read(&c->gc_lock);
357 ret = bch2_btree_and_journal_walk(&trans, BTREE_ID_alloc, bch2_alloc_read_fn);
358 up_read(&c->gc_lock);
359 bch2_trans_exit(&trans);
361 bch_err(c, "error reading alloc info: %i", ret);
368 static int bch2_alloc_write_key(struct btree_trans *trans,
369 struct btree_iter *iter,
372 struct bch_fs *c = trans->c;
376 struct bucket_mark m;
377 struct bkey_alloc_unpacked old_u, new_u;
378 struct bkey_alloc_buf a;
381 bch2_trans_begin(trans);
383 ret = bch2_btree_key_cache_flush(trans,
384 BTREE_ID_alloc, iter->pos);
388 k = bch2_btree_iter_peek_slot(iter);
393 old_u = bch2_alloc_unpack(k);
395 percpu_down_read(&c->mark_lock);
396 ca = bch_dev_bkey_exists(c, iter->pos.inode);
397 g = bucket(ca, iter->pos.offset);
398 m = READ_ONCE(g->mark);
399 new_u = alloc_mem_to_key(iter, g, m);
400 percpu_up_read(&c->mark_lock);
402 if (!bkey_alloc_unpacked_cmp(old_u, new_u))
405 bch2_alloc_pack(c, &a, new_u);
406 ret = bch2_trans_update(trans, iter, &a.k,
407 BTREE_TRIGGER_NORUN) ?:
408 bch2_trans_commit(trans, NULL, NULL,
409 BTREE_INSERT_NOFAIL|flags);
416 int bch2_alloc_write(struct bch_fs *c, unsigned flags)
418 struct btree_trans trans;
419 struct btree_iter iter;
424 bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
425 bch2_trans_iter_init(&trans, &iter, BTREE_ID_alloc, POS_MIN,
426 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
428 for_each_member_device(ca, c, i) {
429 bch2_btree_iter_set_pos(&iter,
430 POS(ca->dev_idx, ca->mi.first_bucket));
432 while (iter.pos.offset < ca->mi.nbuckets) {
433 ret = bch2_alloc_write_key(&trans, &iter, flags);
435 percpu_ref_put(&ca->ref);
438 bch2_btree_iter_advance(&iter);
442 bch2_trans_iter_exit(&trans, &iter);
443 bch2_trans_exit(&trans);
447 /* Bucket IO clocks: */
449 int bch2_bucket_io_time_reset(struct btree_trans *trans, unsigned dev,
450 size_t bucket_nr, int rw)
452 struct bch_fs *c = trans->c;
453 struct bch_dev *ca = bch_dev_bkey_exists(c, dev);
454 struct btree_iter iter;
456 struct bkey_alloc_buf *a;
457 struct bkey_alloc_unpacked u;
461 bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, POS(dev, bucket_nr),
463 BTREE_ITER_CACHED_NOFILL|
465 ret = bch2_btree_iter_traverse(&iter);
469 a = bch2_trans_kmalloc(trans, sizeof(struct bkey_alloc_buf));
470 ret = PTR_ERR_OR_ZERO(a);
474 percpu_down_read(&c->mark_lock);
475 g = bucket(ca, bucket_nr);
476 u = alloc_mem_to_key(&iter, g, READ_ONCE(g->mark));
477 percpu_up_read(&c->mark_lock);
479 time = rw == READ ? &u.read_time : &u.write_time;
480 now = atomic64_read(&c->io_clock[rw].now);
486 bch2_alloc_pack(c, a, u);
487 ret = bch2_trans_update(trans, &iter, &a->k, 0) ?:
488 bch2_trans_commit(trans, NULL, NULL, 0);
490 bch2_trans_iter_exit(trans, &iter);
494 /* Background allocator thread: */
497 * Scans for buckets to be invalidated, invalidates them, rewrites prios/gens
498 * (marking them as invalidated on disk), then optionally issues discard
499 * commands to the newly free buckets, then puts them on the various freelists.
502 static bool bch2_can_invalidate_bucket(struct bch_dev *ca, size_t b,
503 struct bucket_mark m)
507 if (!is_available_bucket(m))
510 if (m.owned_by_allocator)
513 if (ca->buckets_nouse &&
514 test_bit(b, ca->buckets_nouse))
517 gc_gen = bucket_gc_gen(bucket(ca, b));
519 ca->inc_gen_needs_gc += gc_gen >= BUCKET_GC_GEN_MAX / 2;
520 ca->inc_gen_really_needs_gc += gc_gen >= BUCKET_GC_GEN_MAX;
522 return gc_gen < BUCKET_GC_GEN_MAX;
526 * Determines what order we're going to reuse buckets, smallest bucket_key()
530 static unsigned bucket_sort_key(struct bucket *g, struct bucket_mark m,
531 u64 now, u64 last_seq_ondisk)
533 unsigned used = bucket_sectors_used(m);
537 * Prefer to keep buckets that have been read more recently, and
538 * buckets that have more data in them:
540 u64 last_read = max_t(s64, 0, now - g->io_time[READ]);
541 u32 last_read_scaled = max_t(u64, U32_MAX, div_u64(last_read, used));
543 return -last_read_scaled;
546 * Prefer to use buckets with smaller gc_gen so that we don't
547 * have to walk the btree and recalculate oldest_gen - but shift
548 * off the low bits so that buckets will still have equal sort
549 * keys when there's only a small difference, so that we can
550 * keep sequential buckets together:
552 return (bucket_needs_journal_commit(m, last_seq_ondisk) << 4)|
553 (bucket_gc_gen(g) >> 4);
557 static inline int bucket_alloc_cmp(alloc_heap *h,
558 struct alloc_heap_entry l,
559 struct alloc_heap_entry r)
561 return cmp_int(l.key, r.key) ?:
562 cmp_int(r.nr, l.nr) ?:
563 cmp_int(l.bucket, r.bucket);
566 static inline int bucket_idx_cmp(const void *_l, const void *_r)
568 const struct alloc_heap_entry *l = _l, *r = _r;
570 return cmp_int(l->bucket, r->bucket);
573 static void find_reclaimable_buckets_lru(struct bch_fs *c, struct bch_dev *ca)
575 struct bucket_array *buckets;
576 struct alloc_heap_entry e = { 0 };
577 u64 now, last_seq_ondisk;
580 down_read(&ca->bucket_lock);
582 buckets = bucket_array(ca);
583 ca->alloc_heap.used = 0;
584 now = atomic64_read(&c->io_clock[READ].now);
585 last_seq_ondisk = c->journal.last_seq_ondisk;
588 * Find buckets with lowest read priority, by building a maxheap sorted
589 * by read priority and repeatedly replacing the maximum element until
590 * all buckets have been visited.
592 for (b = ca->mi.first_bucket; b < ca->mi.nbuckets; b++) {
593 struct bucket *g = &buckets->b[b];
594 struct bucket_mark m = READ_ONCE(g->mark);
595 unsigned key = bucket_sort_key(g, m, now, last_seq_ondisk);
599 if (!bch2_can_invalidate_bucket(ca, b, m))
602 if (e.nr && e.bucket + e.nr == b && e.key == key) {
606 heap_add_or_replace(&ca->alloc_heap, e,
607 -bucket_alloc_cmp, NULL);
609 e = (struct alloc_heap_entry) {
618 heap_add_or_replace(&ca->alloc_heap, e,
619 -bucket_alloc_cmp, NULL);
621 for (i = 0; i < ca->alloc_heap.used; i++)
622 nr += ca->alloc_heap.data[i].nr;
624 while (nr - ca->alloc_heap.data[0].nr >= ALLOC_SCAN_BATCH(ca)) {
625 nr -= ca->alloc_heap.data[0].nr;
626 heap_pop(&ca->alloc_heap, e, -bucket_alloc_cmp, NULL);
629 up_read(&ca->bucket_lock);
632 static void find_reclaimable_buckets_fifo(struct bch_fs *c, struct bch_dev *ca)
634 struct bucket_array *buckets = bucket_array(ca);
635 struct bucket_mark m;
638 if (ca->fifo_last_bucket < ca->mi.first_bucket ||
639 ca->fifo_last_bucket >= ca->mi.nbuckets)
640 ca->fifo_last_bucket = ca->mi.first_bucket;
642 start = ca->fifo_last_bucket;
645 ca->fifo_last_bucket++;
646 if (ca->fifo_last_bucket == ca->mi.nbuckets)
647 ca->fifo_last_bucket = ca->mi.first_bucket;
649 b = ca->fifo_last_bucket;
650 m = READ_ONCE(buckets->b[b].mark);
652 if (bch2_can_invalidate_bucket(ca, b, m)) {
653 struct alloc_heap_entry e = { .bucket = b, .nr = 1, };
655 heap_add(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
656 if (heap_full(&ca->alloc_heap))
661 } while (ca->fifo_last_bucket != start);
664 static void find_reclaimable_buckets_random(struct bch_fs *c, struct bch_dev *ca)
666 struct bucket_array *buckets = bucket_array(ca);
667 struct bucket_mark m;
671 checked < ca->mi.nbuckets / 2;
673 size_t b = bch2_rand_range(ca->mi.nbuckets -
674 ca->mi.first_bucket) +
677 m = READ_ONCE(buckets->b[b].mark);
679 if (bch2_can_invalidate_bucket(ca, b, m)) {
680 struct alloc_heap_entry e = { .bucket = b, .nr = 1, };
682 heap_add(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
683 if (heap_full(&ca->alloc_heap))
690 sort(ca->alloc_heap.data,
692 sizeof(ca->alloc_heap.data[0]),
693 bucket_idx_cmp, NULL);
695 /* remove duplicates: */
696 for (i = 0; i + 1 < ca->alloc_heap.used; i++)
697 if (ca->alloc_heap.data[i].bucket ==
698 ca->alloc_heap.data[i + 1].bucket)
699 ca->alloc_heap.data[i].nr = 0;
702 static size_t find_reclaimable_buckets(struct bch_fs *c, struct bch_dev *ca)
706 ca->inc_gen_needs_gc = 0;
707 ca->inc_gen_really_needs_gc = 0;
709 switch (ca->mi.replacement) {
710 case BCH_CACHE_REPLACEMENT_lru:
711 find_reclaimable_buckets_lru(c, ca);
713 case BCH_CACHE_REPLACEMENT_fifo:
714 find_reclaimable_buckets_fifo(c, ca);
716 case BCH_CACHE_REPLACEMENT_random:
717 find_reclaimable_buckets_random(c, ca);
721 heap_resort(&ca->alloc_heap, bucket_alloc_cmp, NULL);
723 for (i = 0; i < ca->alloc_heap.used; i++)
724 nr += ca->alloc_heap.data[i].nr;
730 * returns sequence number of most recent journal entry that updated this
733 static u64 bucket_journal_seq(struct bch_fs *c, struct bucket_mark m)
735 if (m.journal_seq_valid) {
736 u64 journal_seq = atomic64_read(&c->journal.seq);
737 u64 bucket_seq = journal_seq;
739 bucket_seq &= ~((u64) U16_MAX);
740 bucket_seq |= m.journal_seq;
742 if (bucket_seq > journal_seq)
743 bucket_seq -= 1 << 16;
751 static int bucket_invalidate_btree(struct btree_trans *trans,
752 struct bch_dev *ca, u64 b)
754 struct bch_fs *c = trans->c;
755 struct bkey_alloc_buf *a;
756 struct bkey_alloc_unpacked u;
758 struct bucket_mark m;
759 struct btree_iter iter;
762 bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
765 BTREE_ITER_CACHED_NOFILL|
768 a = bch2_trans_kmalloc(trans, sizeof(*a));
769 ret = PTR_ERR_OR_ZERO(a);
773 ret = bch2_btree_iter_traverse(&iter);
777 percpu_down_read(&c->mark_lock);
779 m = READ_ONCE(g->mark);
780 u = alloc_mem_to_key(&iter, g, m);
781 percpu_up_read(&c->mark_lock);
786 u.cached_sectors = 0;
787 u.read_time = atomic64_read(&c->io_clock[READ].now);
788 u.write_time = atomic64_read(&c->io_clock[WRITE].now);
790 bch2_alloc_pack(c, a, u);
791 ret = bch2_trans_update(trans, &iter, &a->k,
792 BTREE_TRIGGER_BUCKET_INVALIDATE);
794 bch2_trans_iter_exit(trans, &iter);
798 static int bch2_invalidate_one_bucket(struct bch_fs *c, struct bch_dev *ca,
799 u64 *journal_seq, unsigned flags)
802 struct bucket_mark m;
806 BUG_ON(!ca->alloc_heap.used ||
807 !ca->alloc_heap.data[0].nr);
808 b = ca->alloc_heap.data[0].bucket;
810 /* first, put on free_inc and mark as owned by allocator: */
811 percpu_down_read(&c->mark_lock);
813 m = READ_ONCE(g->mark);
815 BUG_ON(m.dirty_sectors);
817 bch2_mark_alloc_bucket(c, ca, b, true);
819 spin_lock(&c->freelist_lock);
820 verify_not_on_freelist(c, ca, b);
821 BUG_ON(!fifo_push(&ca->free_inc, b));
822 spin_unlock(&c->freelist_lock);
825 * If we're not invalidating cached data, we only increment the bucket
826 * gen in memory here, the incremented gen will be updated in the btree
827 * by bch2_trans_mark_pointer():
829 if (!m.cached_sectors &&
830 !bucket_needs_journal_commit(m, c->journal.last_seq_ondisk)) {
832 bucket_cmpxchg(g, m, m.gen++);
833 percpu_up_read(&c->mark_lock);
837 percpu_up_read(&c->mark_lock);
840 * If the read-only path is trying to shut down, we can't be generating
843 if (test_bit(BCH_FS_ALLOCATOR_STOPPING, &c->flags)) {
848 ret = bch2_trans_do(c, NULL, journal_seq,
849 BTREE_INSERT_NOCHECK_RW|
851 BTREE_INSERT_JOURNAL_RESERVED|
853 bucket_invalidate_btree(&trans, ca, b));
856 /* remove from alloc_heap: */
857 struct alloc_heap_entry e, *top = ca->alloc_heap.data;
863 heap_pop(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
866 * Make sure we flush the last journal entry that updated this
867 * bucket (i.e. deleting the last reference) before writing to
870 *journal_seq = max(*journal_seq, bucket_journal_seq(c, m));
874 /* remove from free_inc: */
875 percpu_down_read(&c->mark_lock);
876 spin_lock(&c->freelist_lock);
878 bch2_mark_alloc_bucket(c, ca, b, false);
880 BUG_ON(!fifo_pop_back(&ca->free_inc, b2));
883 spin_unlock(&c->freelist_lock);
884 percpu_up_read(&c->mark_lock);
887 return ret < 0 ? ret : 0;
891 * Pull buckets off ca->alloc_heap, invalidate them, move them to ca->free_inc:
893 static int bch2_invalidate_buckets(struct bch_fs *c, struct bch_dev *ca)
898 /* Only use nowait if we've already invalidated at least one bucket: */
900 !fifo_full(&ca->free_inc) &&
901 ca->alloc_heap.used) {
902 if (kthread_should_stop()) {
907 ret = bch2_invalidate_one_bucket(c, ca, &journal_seq,
908 (!fifo_empty(&ca->free_inc)
909 ? BTREE_INSERT_NOWAIT : 0));
911 * We only want to batch up invalidates when they're going to
912 * require flushing the journal:
918 /* If we used NOWAIT, don't return the error: */
919 if (!fifo_empty(&ca->free_inc))
922 bch_err(ca, "error invalidating buckets: %i", ret);
927 ret = bch2_journal_flush_seq(&c->journal, journal_seq);
929 bch_err(ca, "journal error: %i", ret);
936 static void alloc_thread_set_state(struct bch_dev *ca, unsigned new_state)
938 if (ca->allocator_state != new_state) {
939 ca->allocator_state = new_state;
940 closure_wake_up(&ca->fs->freelist_wait);
944 static int push_invalidated_bucket(struct bch_fs *c, struct bch_dev *ca, u64 b)
949 spin_lock(&c->freelist_lock);
950 for (i = 0; i < RESERVE_NR; i++) {
952 * Don't strand buckets on the copygc freelist until
953 * after recovery is finished:
955 if (i == RESERVE_MOVINGGC &&
956 !test_bit(BCH_FS_STARTED, &c->flags))
959 if (fifo_push(&ca->free[i], b)) {
960 fifo_pop(&ca->free_inc, b);
965 spin_unlock(&c->freelist_lock);
967 ca->allocator_state = ret
969 : ALLOCATOR_blocked_full;
970 closure_wake_up(&c->freelist_wait);
974 static void discard_one_bucket(struct bch_fs *c, struct bch_dev *ca, u64 b)
976 if (ca->mi.discard &&
977 blk_queue_discard(bdev_get_queue(ca->disk_sb.bdev)))
978 blkdev_issue_discard(ca->disk_sb.bdev, bucket_to_sector(ca, b),
979 ca->mi.bucket_size, GFP_NOFS, 0);
982 static bool allocator_thread_running(struct bch_dev *ca)
984 unsigned state = ca->mi.state == BCH_MEMBER_STATE_rw &&
985 test_bit(BCH_FS_ALLOCATOR_RUNNING, &ca->fs->flags)
988 alloc_thread_set_state(ca, state);
989 return state == ALLOCATOR_running;
992 static int buckets_available(struct bch_dev *ca, unsigned long gc_count)
994 s64 available = dev_buckets_reclaimable(ca) -
995 (gc_count == ca->fs->gc_count ? ca->inc_gen_really_needs_gc : 0);
996 bool ret = available > 0;
998 alloc_thread_set_state(ca, ret
1000 : ALLOCATOR_blocked);
1005 * bch_allocator_thread - move buckets from free_inc to reserves
1007 * The free_inc FIFO is populated by find_reclaimable_buckets(), and
1008 * the reserves are depleted by bucket allocation. When we run out
1009 * of free_inc, try to invalidate some buckets and write out
1012 static int bch2_allocator_thread(void *arg)
1014 struct bch_dev *ca = arg;
1015 struct bch_fs *c = ca->fs;
1016 unsigned long gc_count = c->gc_count;
1023 ret = kthread_wait_freezable(allocator_thread_running(ca));
1027 while (!ca->alloc_heap.used) {
1030 ret = kthread_wait_freezable(buckets_available(ca, gc_count));
1034 gc_count = c->gc_count;
1035 nr = find_reclaimable_buckets(c, ca);
1037 trace_alloc_scan(ca, nr, ca->inc_gen_needs_gc,
1038 ca->inc_gen_really_needs_gc);
1040 if ((ca->inc_gen_needs_gc >= ALLOC_SCAN_BATCH(ca) ||
1041 ca->inc_gen_really_needs_gc) &&
1043 atomic_inc(&c->kick_gc);
1044 wake_up_process(c->gc_thread);
1048 ret = bch2_invalidate_buckets(c, ca);
1052 while (!fifo_empty(&ca->free_inc)) {
1053 u64 b = fifo_peek(&ca->free_inc);
1055 discard_one_bucket(c, ca, b);
1057 ret = kthread_wait_freezable(push_invalidated_bucket(c, ca, b));
1063 alloc_thread_set_state(ca, ALLOCATOR_stopped);
1067 /* Startup/shutdown (ro/rw): */
1069 void bch2_recalc_capacity(struct bch_fs *c)
1072 u64 capacity = 0, reserved_sectors = 0, gc_reserve;
1073 unsigned bucket_size_max = 0;
1074 unsigned long ra_pages = 0;
1077 lockdep_assert_held(&c->state_lock);
1079 for_each_online_member(ca, c, i) {
1080 struct backing_dev_info *bdi = ca->disk_sb.bdev->bd_disk->bdi;
1082 ra_pages += bdi->ra_pages;
1085 bch2_set_ra_pages(c, ra_pages);
1087 for_each_rw_member(ca, c, i) {
1088 u64 dev_reserve = 0;
1091 * We need to reserve buckets (from the number
1092 * of currently available buckets) against
1093 * foreground writes so that mainly copygc can
1094 * make forward progress.
1096 * We need enough to refill the various reserves
1097 * from scratch - copygc will use its entire
1098 * reserve all at once, then run against when
1099 * its reserve is refilled (from the formerly
1100 * available buckets).
1102 * This reserve is just used when considering if
1103 * allocations for foreground writes must wait -
1104 * not -ENOSPC calculations.
1106 for (j = 0; j < RESERVE_NONE; j++)
1107 dev_reserve += ca->free[j].size;
1109 dev_reserve += 1; /* btree write point */
1110 dev_reserve += 1; /* copygc write point */
1111 dev_reserve += 1; /* rebalance write point */
1113 dev_reserve *= ca->mi.bucket_size;
1115 capacity += bucket_to_sector(ca, ca->mi.nbuckets -
1116 ca->mi.first_bucket);
1118 reserved_sectors += dev_reserve * 2;
1120 bucket_size_max = max_t(unsigned, bucket_size_max,
1121 ca->mi.bucket_size);
1124 gc_reserve = c->opts.gc_reserve_bytes
1125 ? c->opts.gc_reserve_bytes >> 9
1126 : div64_u64(capacity * c->opts.gc_reserve_percent, 100);
1128 reserved_sectors = max(gc_reserve, reserved_sectors);
1130 reserved_sectors = min(reserved_sectors, capacity);
1132 c->capacity = capacity - reserved_sectors;
1134 c->bucket_size_max = bucket_size_max;
1136 /* Wake up case someone was waiting for buckets */
1137 closure_wake_up(&c->freelist_wait);
1140 static bool bch2_dev_has_open_write_point(struct bch_fs *c, struct bch_dev *ca)
1142 struct open_bucket *ob;
1145 for (ob = c->open_buckets;
1146 ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
1148 spin_lock(&ob->lock);
1149 if (ob->valid && !ob->on_partial_list &&
1150 ob->ptr.dev == ca->dev_idx)
1152 spin_unlock(&ob->lock);
1158 /* device goes ro: */
1159 void bch2_dev_allocator_remove(struct bch_fs *c, struct bch_dev *ca)
1163 BUG_ON(ca->alloc_thread);
1165 /* First, remove device from allocation groups: */
1167 for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
1168 clear_bit(ca->dev_idx, c->rw_devs[i].d);
1171 * Capacity is calculated based off of devices in allocation groups:
1173 bch2_recalc_capacity(c);
1175 /* Next, close write points that point to this device... */
1176 for (i = 0; i < ARRAY_SIZE(c->write_points); i++)
1177 bch2_writepoint_stop(c, ca, &c->write_points[i]);
1179 bch2_writepoint_stop(c, ca, &c->copygc_write_point);
1180 bch2_writepoint_stop(c, ca, &c->rebalance_write_point);
1181 bch2_writepoint_stop(c, ca, &c->btree_write_point);
1183 mutex_lock(&c->btree_reserve_cache_lock);
1184 while (c->btree_reserve_cache_nr) {
1185 struct btree_alloc *a =
1186 &c->btree_reserve_cache[--c->btree_reserve_cache_nr];
1188 bch2_open_buckets_put(c, &a->ob);
1190 mutex_unlock(&c->btree_reserve_cache_lock);
1193 struct open_bucket *ob;
1195 spin_lock(&c->freelist_lock);
1196 if (!ca->open_buckets_partial_nr) {
1197 spin_unlock(&c->freelist_lock);
1200 ob = c->open_buckets +
1201 ca->open_buckets_partial[--ca->open_buckets_partial_nr];
1202 ob->on_partial_list = false;
1203 spin_unlock(&c->freelist_lock);
1205 bch2_open_bucket_put(c, ob);
1208 bch2_ec_stop_dev(c, ca);
1211 * Wake up threads that were blocked on allocation, so they can notice
1212 * the device can no longer be removed and the capacity has changed:
1214 closure_wake_up(&c->freelist_wait);
1217 * journal_res_get() can block waiting for free space in the journal -
1218 * it needs to notice there may not be devices to allocate from anymore:
1220 wake_up(&c->journal.wait);
1222 /* Now wait for any in flight writes: */
1224 closure_wait_event(&c->open_buckets_wait,
1225 !bch2_dev_has_open_write_point(c, ca));
1228 /* device goes rw: */
1229 void bch2_dev_allocator_add(struct bch_fs *c, struct bch_dev *ca)
1233 for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
1234 if (ca->mi.data_allowed & (1 << i))
1235 set_bit(ca->dev_idx, c->rw_devs[i].d);
1238 void bch2_dev_allocator_quiesce(struct bch_fs *c, struct bch_dev *ca)
1240 if (ca->alloc_thread)
1241 closure_wait_event(&c->freelist_wait,
1242 ca->allocator_state != ALLOCATOR_running);
1245 /* stop allocator thread: */
1246 void bch2_dev_allocator_stop(struct bch_dev *ca)
1248 struct task_struct *p;
1250 p = rcu_dereference_protected(ca->alloc_thread, 1);
1251 ca->alloc_thread = NULL;
1254 * We need an rcu barrier between setting ca->alloc_thread = NULL and
1255 * the thread shutting down to avoid bch2_wake_allocator() racing:
1257 * XXX: it would be better to have the rcu barrier be asynchronous
1258 * instead of blocking us here
1268 /* start allocator thread: */
1269 int bch2_dev_allocator_start(struct bch_dev *ca)
1271 struct task_struct *p;
1274 * allocator thread already started?
1276 if (ca->alloc_thread)
1279 p = kthread_create(bch2_allocator_thread, ca,
1280 "bch-alloc/%s", ca->name);
1282 bch_err(ca->fs, "error creating allocator thread: %li",
1288 rcu_assign_pointer(ca->alloc_thread, p);
1293 void bch2_fs_allocator_background_init(struct bch_fs *c)
1295 spin_lock_init(&c->freelist_lock);
1298 void bch2_open_buckets_to_text(struct printbuf *out, struct bch_fs *c)
1300 struct open_bucket *ob;
1302 for (ob = c->open_buckets;
1303 ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
1305 spin_lock(&ob->lock);
1306 if (ob->valid && !ob->on_partial_list) {
1307 pr_buf(out, "%zu ref %u type %s\n",
1308 ob - c->open_buckets,
1309 atomic_read(&ob->pin),
1310 bch2_data_types[ob->type]);
1312 spin_unlock(&ob->lock);