1 // SPDX-License-Identifier: GPL-2.0
3 #include "alloc_background.h"
4 #include "alloc_foreground.h"
5 #include "btree_cache.h"
7 #include "btree_key_cache.h"
8 #include "btree_update.h"
9 #include "btree_update_interior.h"
12 #include "buckets_waiting_for_journal.h"
20 #include <linux/kthread.h>
21 #include <linux/math64.h>
22 #include <linux/random.h>
23 #include <linux/rculist.h>
24 #include <linux/rcupdate.h>
25 #include <linux/sched/task.h>
26 #include <linux/sort.h>
27 #include <trace/events/bcachefs.h>
29 const char * const bch2_allocator_states[] = {
36 static const unsigned BCH_ALLOC_V1_FIELD_BYTES[] = {
37 #define x(name, bits) [BCH_ALLOC_FIELD_V1_##name] = bits / 8,
42 struct bkey_alloc_buf {
44 struct bch_alloc_v3 v;
46 #define x(_name, _bits) + _bits / 8
47 u8 _pad[0 + BCH_ALLOC_FIELDS_V2()];
49 } __attribute__((packed, aligned(8)));
51 /* Persistent alloc info: */
53 static inline u64 alloc_field_v1_get(const struct bch_alloc *a,
54 const void **p, unsigned field)
56 unsigned bytes = BCH_ALLOC_V1_FIELD_BYTES[field];
59 if (!(a->fields & (1 << field)))
64 v = *((const u8 *) *p);
83 static inline void alloc_field_v1_put(struct bkey_i_alloc *a, void **p,
84 unsigned field, u64 v)
86 unsigned bytes = BCH_ALLOC_V1_FIELD_BYTES[field];
91 a->v.fields |= 1 << field;
98 *((__le16 *) *p) = cpu_to_le16(v);
101 *((__le32 *) *p) = cpu_to_le32(v);
104 *((__le64 *) *p) = cpu_to_le64(v);
113 static void bch2_alloc_unpack_v1(struct bkey_alloc_unpacked *out,
116 const struct bch_alloc *in = bkey_s_c_to_alloc(k).v;
117 const void *d = in->data;
122 #define x(_name, _bits) out->_name = alloc_field_v1_get(in, &d, idx++);
123 BCH_ALLOC_FIELDS_V1()
127 static int bch2_alloc_unpack_v2(struct bkey_alloc_unpacked *out,
130 struct bkey_s_c_alloc_v2 a = bkey_s_c_to_alloc_v2(k);
131 const u8 *in = a.v->data;
132 const u8 *end = bkey_val_end(a);
133 unsigned fieldnr = 0;
138 out->oldest_gen = a.v->oldest_gen;
139 out->data_type = a.v->data_type;
141 #define x(_name, _bits) \
142 if (fieldnr < a.v->nr_fields) { \
143 ret = bch2_varint_decode_fast(in, end, &v); \
151 if (v != out->_name) \
155 BCH_ALLOC_FIELDS_V2()
160 static int bch2_alloc_unpack_v3(struct bkey_alloc_unpacked *out,
163 struct bkey_s_c_alloc_v3 a = bkey_s_c_to_alloc_v3(k);
164 const u8 *in = a.v->data;
165 const u8 *end = bkey_val_end(a);
166 unsigned fieldnr = 0;
171 out->oldest_gen = a.v->oldest_gen;
172 out->data_type = a.v->data_type;
173 out->journal_seq = le64_to_cpu(a.v->journal_seq);
175 #define x(_name, _bits) \
176 if (fieldnr < a.v->nr_fields) { \
177 ret = bch2_varint_decode_fast(in, end, &v); \
185 if (v != out->_name) \
189 BCH_ALLOC_FIELDS_V2()
194 static void bch2_alloc_pack_v3(struct bkey_alloc_buf *dst,
195 const struct bkey_alloc_unpacked src)
197 struct bkey_i_alloc_v3 *a = bkey_alloc_v3_init(&dst->k);
198 unsigned nr_fields = 0, last_nonzero_fieldnr = 0;
200 u8 *end = (void *) &dst[1];
201 u8 *last_nonzero_field = out;
204 a->k.p = POS(src.dev, src.bucket);
206 a->v.oldest_gen = src.oldest_gen;
207 a->v.data_type = src.data_type;
208 a->v.journal_seq = cpu_to_le64(src.journal_seq);
210 #define x(_name, _bits) \
214 out += bch2_varint_encode_fast(out, src._name); \
216 last_nonzero_field = out; \
217 last_nonzero_fieldnr = nr_fields; \
222 BCH_ALLOC_FIELDS_V2()
226 out = last_nonzero_field;
227 a->v.nr_fields = last_nonzero_fieldnr;
229 bytes = (u8 *) out - (u8 *) &a->v;
230 set_bkey_val_bytes(&a->k, bytes);
231 memset_u64s_tail(&a->v, 0, bytes);
234 struct bkey_alloc_unpacked bch2_alloc_unpack(struct bkey_s_c k)
236 struct bkey_alloc_unpacked ret = {
238 .bucket = k.k->p.offset,
244 bch2_alloc_unpack_v1(&ret, k);
246 case KEY_TYPE_alloc_v2:
247 bch2_alloc_unpack_v2(&ret, k);
249 case KEY_TYPE_alloc_v3:
250 bch2_alloc_unpack_v3(&ret, k);
257 static void bch2_alloc_pack(struct bch_fs *c,
258 struct bkey_alloc_buf *dst,
259 const struct bkey_alloc_unpacked src)
261 bch2_alloc_pack_v3(dst, src);
264 int bch2_alloc_write(struct btree_trans *trans, struct btree_iter *iter,
265 struct bkey_alloc_unpacked *u, unsigned trigger_flags)
267 struct bkey_alloc_buf *a;
269 a = bch2_trans_kmalloc(trans, sizeof(struct bkey_alloc_buf));
273 bch2_alloc_pack(trans->c, a, *u);
274 return bch2_trans_update(trans, iter, &a->k, trigger_flags);
277 static unsigned bch_alloc_v1_val_u64s(const struct bch_alloc *a)
279 unsigned i, bytes = offsetof(struct bch_alloc, data);
281 for (i = 0; i < ARRAY_SIZE(BCH_ALLOC_V1_FIELD_BYTES); i++)
282 if (a->fields & (1 << i))
283 bytes += BCH_ALLOC_V1_FIELD_BYTES[i];
285 return DIV_ROUND_UP(bytes, sizeof(u64));
288 const char *bch2_alloc_v1_invalid(const struct bch_fs *c, struct bkey_s_c k)
290 struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k);
292 if (k.k->p.inode >= c->sb.nr_devices ||
293 !c->devs[k.k->p.inode])
294 return "invalid device";
296 /* allow for unknown fields */
297 if (bkey_val_u64s(a.k) < bch_alloc_v1_val_u64s(a.v))
298 return "incorrect value size";
303 const char *bch2_alloc_v2_invalid(const struct bch_fs *c, struct bkey_s_c k)
305 struct bkey_alloc_unpacked u;
307 if (k.k->p.inode >= c->sb.nr_devices ||
308 !c->devs[k.k->p.inode])
309 return "invalid device";
311 if (bch2_alloc_unpack_v2(&u, k))
312 return "unpack error";
317 const char *bch2_alloc_v3_invalid(const struct bch_fs *c, struct bkey_s_c k)
319 struct bkey_alloc_unpacked u;
321 if (k.k->p.inode >= c->sb.nr_devices ||
322 !c->devs[k.k->p.inode])
323 return "invalid device";
325 if (bch2_alloc_unpack_v3(&u, k))
326 return "unpack error";
331 void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c,
334 struct bkey_alloc_unpacked u = bch2_alloc_unpack(k);
336 pr_buf(out, "gen %u oldest_gen %u data_type %s journal_seq %llu",
337 u.gen, u.oldest_gen, bch2_data_types[u.data_type],
339 #define x(_name, ...) pr_buf(out, " " #_name " %llu", (u64) u._name);
340 BCH_ALLOC_FIELDS_V2()
344 int bch2_alloc_read(struct bch_fs *c)
346 struct btree_trans trans;
347 struct btree_iter iter;
351 struct bkey_alloc_unpacked u;
354 bch2_trans_init(&trans, c, 0, 0);
355 down_read(&c->gc_lock);
357 for_each_btree_key(&trans, iter, BTREE_ID_alloc, POS_MIN,
358 BTREE_ITER_PREFETCH, k, ret) {
359 if (!bkey_is_alloc(k.k))
362 ca = bch_dev_bkey_exists(c, k.k->p.inode);
363 g = bucket(ca, k.k->p.offset);
364 u = bch2_alloc_unpack(k);
366 *bucket_gen(ca, k.k->p.offset) = u.gen;
367 g->_mark.gen = u.gen;
368 g->_mark.data_type = u.data_type;
369 g->_mark.dirty_sectors = u.dirty_sectors;
370 g->_mark.cached_sectors = u.cached_sectors;
371 g->_mark.stripe = u.stripe != 0;
372 g->stripe = u.stripe;
373 g->stripe_redundancy = u.stripe_redundancy;
374 g->io_time[READ] = u.read_time;
375 g->io_time[WRITE] = u.write_time;
376 g->oldest_gen = u.oldest_gen;
379 bch2_trans_iter_exit(&trans, &iter);
381 up_read(&c->gc_lock);
382 bch2_trans_exit(&trans);
385 bch_err(c, "error reading alloc info: %i", ret);
392 static int bch2_alloc_write_key(struct btree_trans *trans,
393 struct btree_iter *iter,
396 struct bch_fs *c = trans->c;
398 struct bkey_alloc_unpacked old_u, new_u;
401 bch2_trans_begin(trans);
403 ret = bch2_btree_key_cache_flush(trans,
404 BTREE_ID_alloc, iter->pos);
408 k = bch2_btree_iter_peek_slot(iter);
413 old_u = bch2_alloc_unpack(k);
414 new_u = alloc_mem_to_key(c, iter);
416 if (!bkey_alloc_unpacked_cmp(old_u, new_u))
419 ret = bch2_alloc_write(trans, iter, &new_u,
420 BTREE_TRIGGER_NORUN) ?:
421 bch2_trans_commit(trans, NULL, NULL,
422 BTREE_INSERT_NOFAIL|flags);
429 int bch2_alloc_write_all(struct bch_fs *c, unsigned flags)
431 struct btree_trans trans;
432 struct btree_iter iter;
437 bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
438 bch2_trans_iter_init(&trans, &iter, BTREE_ID_alloc, POS_MIN,
439 BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
441 for_each_member_device(ca, c, i) {
442 bch2_btree_iter_set_pos(&iter,
443 POS(ca->dev_idx, ca->mi.first_bucket));
445 while (iter.pos.offset < ca->mi.nbuckets) {
446 ret = bch2_alloc_write_key(&trans, &iter, flags);
448 percpu_ref_put(&ca->ref);
451 bch2_btree_iter_advance(&iter);
455 bch2_trans_iter_exit(&trans, &iter);
456 bch2_trans_exit(&trans);
460 /* Bucket IO clocks: */
462 int bch2_bucket_io_time_reset(struct btree_trans *trans, unsigned dev,
463 size_t bucket_nr, int rw)
465 struct bch_fs *c = trans->c;
466 struct btree_iter iter;
468 struct bkey_alloc_unpacked u;
472 bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, POS(dev, bucket_nr),
475 k = bch2_btree_iter_peek_slot(&iter);
480 u = bch2_alloc_unpack(k);
482 time = rw == READ ? &u.read_time : &u.write_time;
483 now = atomic64_read(&c->io_clock[rw].now);
489 ret = bch2_alloc_write(trans, &iter, &u, 0) ?:
490 bch2_trans_commit(trans, NULL, NULL, 0);
492 bch2_trans_iter_exit(trans, &iter);
496 /* Background allocator thread: */
499 * Scans for buckets to be invalidated, invalidates them, rewrites prios/gens
500 * (marking them as invalidated on disk), then optionally issues discard
501 * commands to the newly free buckets, then puts them on the various freelists.
504 static bool bch2_can_invalidate_bucket(struct bch_dev *ca, size_t b,
505 struct bucket_mark m)
509 if (!is_available_bucket(m))
512 if (m.owned_by_allocator)
515 if (ca->buckets_nouse &&
516 test_bit(b, ca->buckets_nouse))
519 if (ca->new_fs_bucket_idx) {
521 * Device or filesystem is still being initialized, and we
522 * haven't fully marked superblocks & journal:
524 if (is_superblock_bucket(ca, b))
527 if (b < ca->new_fs_bucket_idx)
531 gc_gen = bucket_gc_gen(bucket(ca, b));
533 ca->inc_gen_needs_gc += gc_gen >= BUCKET_GC_GEN_MAX / 2;
534 ca->inc_gen_really_needs_gc += gc_gen >= BUCKET_GC_GEN_MAX;
536 return gc_gen < BUCKET_GC_GEN_MAX;
540 * Determines what order we're going to reuse buckets, smallest bucket_key()
544 static unsigned bucket_sort_key(struct bucket *g, struct bucket_mark m,
545 u64 now, u64 last_seq_ondisk)
547 unsigned used = m.cached_sectors;
551 * Prefer to keep buckets that have been read more recently, and
552 * buckets that have more data in them:
554 u64 last_read = max_t(s64, 0, now - g->io_time[READ]);
555 u32 last_read_scaled = max_t(u64, U32_MAX, div_u64(last_read, used));
557 return -last_read_scaled;
560 * Prefer to use buckets with smaller gc_gen so that we don't
561 * have to walk the btree and recalculate oldest_gen - but shift
562 * off the low bits so that buckets will still have equal sort
563 * keys when there's only a small difference, so that we can
564 * keep sequential buckets together:
566 return bucket_gc_gen(g) >> 4;
570 static inline int bucket_alloc_cmp(alloc_heap *h,
571 struct alloc_heap_entry l,
572 struct alloc_heap_entry r)
574 return cmp_int(l.key, r.key) ?:
575 cmp_int(r.nr, l.nr) ?:
576 cmp_int(l.bucket, r.bucket);
579 static inline int bucket_idx_cmp(const void *_l, const void *_r)
581 const struct alloc_heap_entry *l = _l, *r = _r;
583 return cmp_int(l->bucket, r->bucket);
586 static void find_reclaimable_buckets_lru(struct bch_fs *c, struct bch_dev *ca)
588 struct bucket_array *buckets;
589 struct alloc_heap_entry e = { 0 };
590 u64 now, last_seq_ondisk;
593 down_read(&ca->bucket_lock);
595 buckets = bucket_array(ca);
596 ca->alloc_heap.used = 0;
597 now = atomic64_read(&c->io_clock[READ].now);
598 last_seq_ondisk = c->journal.flushed_seq_ondisk;
601 * Find buckets with lowest read priority, by building a maxheap sorted
602 * by read priority and repeatedly replacing the maximum element until
603 * all buckets have been visited.
605 for (b = ca->mi.first_bucket; b < ca->mi.nbuckets; b++) {
606 struct bucket *g = &buckets->b[b];
607 struct bucket_mark m = READ_ONCE(g->mark);
608 unsigned key = bucket_sort_key(g, m, now, last_seq_ondisk);
612 if (!bch2_can_invalidate_bucket(ca, b, m))
616 bch2_bucket_needs_journal_commit(c, last_seq_ondisk,
618 ca->buckets_waiting_on_journal++;
622 if (e.nr && e.bucket + e.nr == b && e.key == key) {
626 heap_add_or_replace(&ca->alloc_heap, e,
627 -bucket_alloc_cmp, NULL);
629 e = (struct alloc_heap_entry) {
638 heap_add_or_replace(&ca->alloc_heap, e,
639 -bucket_alloc_cmp, NULL);
641 for (i = 0; i < ca->alloc_heap.used; i++)
642 nr += ca->alloc_heap.data[i].nr;
644 while (nr - ca->alloc_heap.data[0].nr >= ALLOC_SCAN_BATCH(ca)) {
645 nr -= ca->alloc_heap.data[0].nr;
646 heap_pop(&ca->alloc_heap, e, -bucket_alloc_cmp, NULL);
649 up_read(&ca->bucket_lock);
652 static size_t find_reclaimable_buckets(struct bch_fs *c, struct bch_dev *ca)
656 ca->inc_gen_needs_gc = 0;
657 ca->inc_gen_really_needs_gc = 0;
658 ca->buckets_waiting_on_journal = 0;
660 find_reclaimable_buckets_lru(c, ca);
662 heap_resort(&ca->alloc_heap, bucket_alloc_cmp, NULL);
664 for (i = 0; i < ca->alloc_heap.used; i++)
665 nr += ca->alloc_heap.data[i].nr;
670 static int bucket_invalidate_btree(struct btree_trans *trans,
671 struct bch_dev *ca, u64 b,
672 struct bkey_alloc_unpacked *u)
674 struct bch_fs *c = trans->c;
675 struct btree_iter iter;
679 bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
684 k = bch2_btree_iter_peek_slot(&iter);
689 *u = bch2_alloc_unpack(k);
692 u->dirty_sectors = 0;
693 u->cached_sectors = 0;
694 u->read_time = atomic64_read(&c->io_clock[READ].now);
695 u->write_time = atomic64_read(&c->io_clock[WRITE].now);
697 ret = bch2_alloc_write(trans, &iter, u,
698 BTREE_TRIGGER_BUCKET_INVALIDATE);
700 bch2_trans_iter_exit(trans, &iter);
704 static int bch2_invalidate_one_bucket(struct bch_fs *c, struct bch_dev *ca,
705 u64 *journal_seq, unsigned flags)
707 struct bkey_alloc_unpacked u;
713 * If the read-only path is trying to shut down, we can't be generating
716 if (test_bit(BCH_FS_ALLOCATOR_STOPPING, &c->flags))
719 BUG_ON(!ca->alloc_heap.used ||
720 !ca->alloc_heap.data[0].nr);
721 b = ca->alloc_heap.data[0].bucket;
723 /* first, put on free_inc and mark as owned by allocator: */
724 percpu_down_read(&c->mark_lock);
726 bch2_mark_alloc_bucket(c, ca, b, true);
728 spin_lock(&c->freelist_lock);
729 verify_not_on_freelist(c, ca, b);
730 BUG_ON(!fifo_push(&ca->free_inc, b));
731 spin_unlock(&c->freelist_lock);
733 percpu_up_read(&c->mark_lock);
735 ret = bch2_trans_do(c, NULL, &commit_seq,
736 BTREE_INSERT_NOCHECK_RW|
738 BTREE_INSERT_JOURNAL_RESERVED|
740 bucket_invalidate_btree(&trans, ca, b, &u));
743 /* remove from alloc_heap: */
744 struct alloc_heap_entry e, *top = ca->alloc_heap.data;
750 heap_pop(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
753 * If we invalidating cached data then we need to wait on the
757 *journal_seq = max(*journal_seq, commit_seq);
760 * We already waiting on u.alloc_seq when we filtered out
761 * buckets that need journal commit:
763 BUG_ON(*journal_seq > u.journal_seq);
767 /* remove from free_inc: */
768 percpu_down_read(&c->mark_lock);
769 spin_lock(&c->freelist_lock);
771 bch2_mark_alloc_bucket(c, ca, b, false);
773 BUG_ON(!fifo_pop_back(&ca->free_inc, b2));
776 spin_unlock(&c->freelist_lock);
777 percpu_up_read(&c->mark_lock);
780 return ret < 0 ? ret : 0;
784 * Pull buckets off ca->alloc_heap, invalidate them, move them to ca->free_inc:
786 static int bch2_invalidate_buckets(struct bch_fs *c, struct bch_dev *ca)
791 /* Only use nowait if we've already invalidated at least one bucket: */
793 !fifo_full(&ca->free_inc) &&
794 ca->alloc_heap.used) {
795 if (kthread_should_stop()) {
800 ret = bch2_invalidate_one_bucket(c, ca, &journal_seq,
801 (!fifo_empty(&ca->free_inc)
802 ? BTREE_INSERT_NOWAIT : 0));
804 * We only want to batch up invalidates when they're going to
805 * require flushing the journal:
811 /* If we used NOWAIT, don't return the error: */
812 if (!fifo_empty(&ca->free_inc))
815 bch_err(ca, "error invalidating buckets: %i", ret);
820 ret = bch2_journal_flush_seq(&c->journal, journal_seq);
822 bch_err(ca, "journal error: %i", ret);
829 static void alloc_thread_set_state(struct bch_dev *ca, unsigned new_state)
831 if (ca->allocator_state != new_state) {
832 ca->allocator_state = new_state;
833 closure_wake_up(&ca->fs->freelist_wait);
837 static int push_invalidated_bucket(struct bch_fs *c, struct bch_dev *ca, u64 b)
842 spin_lock(&c->freelist_lock);
843 for (i = 0; i < RESERVE_NR; i++) {
845 * Don't strand buckets on the copygc freelist until
846 * after recovery is finished:
848 if (i == RESERVE_MOVINGGC &&
849 !test_bit(BCH_FS_STARTED, &c->flags))
852 if (fifo_push(&ca->free[i], b)) {
853 fifo_pop(&ca->free_inc, b);
858 spin_unlock(&c->freelist_lock);
860 ca->allocator_state = ret
862 : ALLOCATOR_blocked_full;
863 closure_wake_up(&c->freelist_wait);
867 static void discard_one_bucket(struct bch_fs *c, struct bch_dev *ca, u64 b)
869 if (ca->mi.discard &&
870 blk_queue_discard(bdev_get_queue(ca->disk_sb.bdev)))
871 blkdev_issue_discard(ca->disk_sb.bdev, bucket_to_sector(ca, b),
872 ca->mi.bucket_size, GFP_NOFS, 0);
875 static bool allocator_thread_running(struct bch_dev *ca)
877 unsigned state = ca->mi.state == BCH_MEMBER_STATE_rw &&
878 test_bit(BCH_FS_ALLOCATOR_RUNNING, &ca->fs->flags)
881 alloc_thread_set_state(ca, state);
882 return state == ALLOCATOR_running;
885 static int buckets_available(struct bch_dev *ca, unsigned long gc_count)
887 s64 available = dev_buckets_reclaimable(ca) -
888 (gc_count == ca->fs->gc_count ? ca->inc_gen_really_needs_gc : 0);
889 bool ret = available > 0;
891 alloc_thread_set_state(ca, ret
893 : ALLOCATOR_blocked);
898 * bch_allocator_thread - move buckets from free_inc to reserves
900 * The free_inc FIFO is populated by find_reclaimable_buckets(), and
901 * the reserves are depleted by bucket allocation. When we run out
902 * of free_inc, try to invalidate some buckets and write out
905 static int bch2_allocator_thread(void *arg)
907 struct bch_dev *ca = arg;
908 struct bch_fs *c = ca->fs;
909 unsigned long gc_count = c->gc_count;
916 ret = kthread_wait_freezable(allocator_thread_running(ca));
920 while (!ca->alloc_heap.used) {
923 ret = kthread_wait_freezable(buckets_available(ca, gc_count));
927 gc_count = c->gc_count;
928 nr = find_reclaimable_buckets(c, ca);
930 if (!nr && ca->buckets_waiting_on_journal) {
931 ret = bch2_journal_flush(&c->journal);
934 } else if (nr < (ca->mi.nbuckets >> 6) &&
935 ca->buckets_waiting_on_journal >= nr / 2) {
936 bch2_journal_flush_async(&c->journal, NULL);
939 if ((ca->inc_gen_needs_gc >= ALLOC_SCAN_BATCH(ca) ||
940 ca->inc_gen_really_needs_gc) &&
942 atomic_inc(&c->kick_gc);
943 wake_up_process(c->gc_thread);
946 trace_alloc_scan(ca, nr, ca->inc_gen_needs_gc,
947 ca->inc_gen_really_needs_gc);
950 ret = bch2_invalidate_buckets(c, ca);
954 while (!fifo_empty(&ca->free_inc)) {
955 u64 b = fifo_peek(&ca->free_inc);
957 discard_one_bucket(c, ca, b);
959 ret = kthread_wait_freezable(push_invalidated_bucket(c, ca, b));
965 alloc_thread_set_state(ca, ALLOCATOR_stopped);
969 /* Startup/shutdown (ro/rw): */
971 void bch2_recalc_capacity(struct bch_fs *c)
974 u64 capacity = 0, reserved_sectors = 0, gc_reserve;
975 unsigned bucket_size_max = 0;
976 unsigned long ra_pages = 0;
979 lockdep_assert_held(&c->state_lock);
981 for_each_online_member(ca, c, i) {
982 struct backing_dev_info *bdi = ca->disk_sb.bdev->bd_disk->bdi;
984 ra_pages += bdi->ra_pages;
987 bch2_set_ra_pages(c, ra_pages);
989 for_each_rw_member(ca, c, i) {
993 * We need to reserve buckets (from the number
994 * of currently available buckets) against
995 * foreground writes so that mainly copygc can
996 * make forward progress.
998 * We need enough to refill the various reserves
999 * from scratch - copygc will use its entire
1000 * reserve all at once, then run against when
1001 * its reserve is refilled (from the formerly
1002 * available buckets).
1004 * This reserve is just used when considering if
1005 * allocations for foreground writes must wait -
1006 * not -ENOSPC calculations.
1008 for (j = 0; j < RESERVE_NONE; j++)
1009 dev_reserve += ca->free[j].size;
1011 dev_reserve += 1; /* btree write point */
1012 dev_reserve += 1; /* copygc write point */
1013 dev_reserve += 1; /* rebalance write point */
1015 dev_reserve *= ca->mi.bucket_size;
1017 capacity += bucket_to_sector(ca, ca->mi.nbuckets -
1018 ca->mi.first_bucket);
1020 reserved_sectors += dev_reserve * 2;
1022 bucket_size_max = max_t(unsigned, bucket_size_max,
1023 ca->mi.bucket_size);
1026 gc_reserve = c->opts.gc_reserve_bytes
1027 ? c->opts.gc_reserve_bytes >> 9
1028 : div64_u64(capacity * c->opts.gc_reserve_percent, 100);
1030 reserved_sectors = max(gc_reserve, reserved_sectors);
1032 reserved_sectors = min(reserved_sectors, capacity);
1034 c->capacity = capacity - reserved_sectors;
1036 c->bucket_size_max = bucket_size_max;
1038 /* Wake up case someone was waiting for buckets */
1039 closure_wake_up(&c->freelist_wait);
1042 static bool bch2_dev_has_open_write_point(struct bch_fs *c, struct bch_dev *ca)
1044 struct open_bucket *ob;
1047 for (ob = c->open_buckets;
1048 ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
1050 spin_lock(&ob->lock);
1051 if (ob->valid && !ob->on_partial_list &&
1052 ob->dev == ca->dev_idx)
1054 spin_unlock(&ob->lock);
1060 /* device goes ro: */
1061 void bch2_dev_allocator_remove(struct bch_fs *c, struct bch_dev *ca)
1065 BUG_ON(ca->alloc_thread);
1067 /* First, remove device from allocation groups: */
1069 for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
1070 clear_bit(ca->dev_idx, c->rw_devs[i].d);
1073 * Capacity is calculated based off of devices in allocation groups:
1075 bch2_recalc_capacity(c);
1077 /* Next, close write points that point to this device... */
1078 for (i = 0; i < ARRAY_SIZE(c->write_points); i++)
1079 bch2_writepoint_stop(c, ca, &c->write_points[i]);
1081 bch2_writepoint_stop(c, ca, &c->copygc_write_point);
1082 bch2_writepoint_stop(c, ca, &c->rebalance_write_point);
1083 bch2_writepoint_stop(c, ca, &c->btree_write_point);
1085 mutex_lock(&c->btree_reserve_cache_lock);
1086 while (c->btree_reserve_cache_nr) {
1087 struct btree_alloc *a =
1088 &c->btree_reserve_cache[--c->btree_reserve_cache_nr];
1090 bch2_open_buckets_put(c, &a->ob);
1092 mutex_unlock(&c->btree_reserve_cache_lock);
1095 struct open_bucket *ob;
1097 spin_lock(&c->freelist_lock);
1098 if (!ca->open_buckets_partial_nr) {
1099 spin_unlock(&c->freelist_lock);
1102 ob = c->open_buckets +
1103 ca->open_buckets_partial[--ca->open_buckets_partial_nr];
1104 ob->on_partial_list = false;
1105 spin_unlock(&c->freelist_lock);
1107 bch2_open_bucket_put(c, ob);
1110 bch2_ec_stop_dev(c, ca);
1113 * Wake up threads that were blocked on allocation, so they can notice
1114 * the device can no longer be removed and the capacity has changed:
1116 closure_wake_up(&c->freelist_wait);
1119 * journal_res_get() can block waiting for free space in the journal -
1120 * it needs to notice there may not be devices to allocate from anymore:
1122 wake_up(&c->journal.wait);
1124 /* Now wait for any in flight writes: */
1126 closure_wait_event(&c->open_buckets_wait,
1127 !bch2_dev_has_open_write_point(c, ca));
1130 /* device goes rw: */
1131 void bch2_dev_allocator_add(struct bch_fs *c, struct bch_dev *ca)
1135 for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
1136 if (ca->mi.data_allowed & (1 << i))
1137 set_bit(ca->dev_idx, c->rw_devs[i].d);
1140 void bch2_dev_allocator_quiesce(struct bch_fs *c, struct bch_dev *ca)
1142 if (ca->alloc_thread)
1143 closure_wait_event(&c->freelist_wait,
1144 ca->allocator_state != ALLOCATOR_running);
1147 /* stop allocator thread: */
1148 void bch2_dev_allocator_stop(struct bch_dev *ca)
1150 struct task_struct *p;
1152 p = rcu_dereference_protected(ca->alloc_thread, 1);
1153 ca->alloc_thread = NULL;
1156 * We need an rcu barrier between setting ca->alloc_thread = NULL and
1157 * the thread shutting down to avoid bch2_wake_allocator() racing:
1159 * XXX: it would be better to have the rcu barrier be asynchronous
1160 * instead of blocking us here
1170 /* start allocator thread: */
1171 int bch2_dev_allocator_start(struct bch_dev *ca)
1173 struct task_struct *p;
1176 * allocator thread already started?
1178 if (ca->alloc_thread)
1181 p = kthread_create(bch2_allocator_thread, ca,
1182 "bch-alloc/%s", ca->name);
1184 bch_err(ca->fs, "error creating allocator thread: %li",
1190 rcu_assign_pointer(ca->alloc_thread, p);
1195 void bch2_fs_allocator_background_init(struct bch_fs *c)
1197 spin_lock_init(&c->freelist_lock);