1 // SPDX-License-Identifier: GPL-2.0
3 #include "alloc_background.h"
4 #include "alloc_foreground.h"
5 #include "btree_cache.h"
7 #include "btree_key_cache.h"
8 #include "btree_update.h"
9 #include "btree_update_interior.h"
12 #include "buckets_waiting_for_journal.h"
21 #include <linux/kthread.h>
22 #include <linux/math64.h>
23 #include <linux/random.h>
24 #include <linux/rculist.h>
25 #include <linux/rcupdate.h>
26 #include <linux/sched/task.h>
27 #include <linux/sort.h>
28 #include <trace/events/bcachefs.h>
30 /* Persistent alloc info: */
32 static const unsigned BCH_ALLOC_V1_FIELD_BYTES[] = {
33 #define x(name, bits) [BCH_ALLOC_FIELD_V1_##name] = bits / 8,
38 struct bkey_alloc_unpacked {
47 #define x(_name, _bits) u##_bits _name;
52 static inline u64 alloc_field_v1_get(const struct bch_alloc *a,
53 const void **p, unsigned field)
55 unsigned bytes = BCH_ALLOC_V1_FIELD_BYTES[field];
58 if (!(a->fields & (1 << field)))
63 v = *((const u8 *) *p);
82 static inline void alloc_field_v1_put(struct bkey_i_alloc *a, void **p,
83 unsigned field, u64 v)
85 unsigned bytes = BCH_ALLOC_V1_FIELD_BYTES[field];
90 a->v.fields |= 1 << field;
97 *((__le16 *) *p) = cpu_to_le16(v);
100 *((__le32 *) *p) = cpu_to_le32(v);
103 *((__le64 *) *p) = cpu_to_le64(v);
112 static void bch2_alloc_unpack_v1(struct bkey_alloc_unpacked *out,
115 const struct bch_alloc *in = bkey_s_c_to_alloc(k).v;
116 const void *d = in->data;
121 #define x(_name, _bits) out->_name = alloc_field_v1_get(in, &d, idx++);
122 BCH_ALLOC_FIELDS_V1()
126 static int bch2_alloc_unpack_v2(struct bkey_alloc_unpacked *out,
129 struct bkey_s_c_alloc_v2 a = bkey_s_c_to_alloc_v2(k);
130 const u8 *in = a.v->data;
131 const u8 *end = bkey_val_end(a);
132 unsigned fieldnr = 0;
137 out->oldest_gen = a.v->oldest_gen;
138 out->data_type = a.v->data_type;
140 #define x(_name, _bits) \
141 if (fieldnr < a.v->nr_fields) { \
142 ret = bch2_varint_decode_fast(in, end, &v); \
150 if (v != out->_name) \
154 BCH_ALLOC_FIELDS_V2()
159 static int bch2_alloc_unpack_v3(struct bkey_alloc_unpacked *out,
162 struct bkey_s_c_alloc_v3 a = bkey_s_c_to_alloc_v3(k);
163 const u8 *in = a.v->data;
164 const u8 *end = bkey_val_end(a);
165 unsigned fieldnr = 0;
170 out->oldest_gen = a.v->oldest_gen;
171 out->data_type = a.v->data_type;
172 out->need_discard = BCH_ALLOC_V3_NEED_DISCARD(a.v);
173 out->need_inc_gen = BCH_ALLOC_V3_NEED_INC_GEN(a.v);
174 out->journal_seq = le64_to_cpu(a.v->journal_seq);
176 #define x(_name, _bits) \
177 if (fieldnr < a.v->nr_fields) { \
178 ret = bch2_varint_decode_fast(in, end, &v); \
186 if (v != out->_name) \
190 BCH_ALLOC_FIELDS_V2()
195 static struct bkey_alloc_unpacked bch2_alloc_unpack(struct bkey_s_c k)
197 struct bkey_alloc_unpacked ret = {
199 .bucket = k.k->p.offset,
205 bch2_alloc_unpack_v1(&ret, k);
207 case KEY_TYPE_alloc_v2:
208 bch2_alloc_unpack_v2(&ret, k);
210 case KEY_TYPE_alloc_v3:
211 bch2_alloc_unpack_v3(&ret, k);
218 void bch2_alloc_to_v4(struct bkey_s_c k, struct bch_alloc_v4 *out)
220 if (k.k->type == KEY_TYPE_alloc_v4) {
221 *out = *bkey_s_c_to_alloc_v4(k).v;
223 struct bkey_alloc_unpacked u = bch2_alloc_unpack(k);
225 *out = (struct bch_alloc_v4) {
226 .journal_seq = u.journal_seq,
227 .flags = u.need_discard,
229 .oldest_gen = u.oldest_gen,
230 .data_type = u.data_type,
231 .stripe_redundancy = u.stripe_redundancy,
232 .dirty_sectors = u.dirty_sectors,
233 .cached_sectors = u.cached_sectors,
234 .io_time[READ] = u.read_time,
235 .io_time[WRITE] = u.write_time,
241 struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut(struct btree_trans *trans, struct bkey_s_c k)
243 struct bkey_i_alloc_v4 *ret;
245 if (k.k->type == KEY_TYPE_alloc_v4) {
246 ret = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
248 bkey_reassemble(&ret->k_i, k);
250 ret = bch2_trans_kmalloc(trans, sizeof(*ret));
252 bkey_alloc_v4_init(&ret->k_i);
254 bch2_alloc_to_v4(k, &ret->v);
260 struct bkey_i_alloc_v4 *
261 bch2_trans_start_alloc_update(struct btree_trans *trans, struct btree_iter *iter,
265 struct bkey_i_alloc_v4 *a;
268 bch2_trans_iter_init(trans, iter, BTREE_ID_alloc, pos,
269 BTREE_ITER_WITH_UPDATES|
272 k = bch2_btree_iter_peek_slot(iter);
275 bch2_trans_iter_exit(trans, iter);
279 a = bch2_alloc_to_v4_mut(trans, k);
281 bch2_trans_iter_exit(trans, iter);
285 static unsigned bch_alloc_v1_val_u64s(const struct bch_alloc *a)
287 unsigned i, bytes = offsetof(struct bch_alloc, data);
289 for (i = 0; i < ARRAY_SIZE(BCH_ALLOC_V1_FIELD_BYTES); i++)
290 if (a->fields & (1 << i))
291 bytes += BCH_ALLOC_V1_FIELD_BYTES[i];
293 return DIV_ROUND_UP(bytes, sizeof(u64));
296 int bch2_alloc_v1_invalid(const struct bch_fs *c, struct bkey_s_c k,
297 int rw, struct printbuf *err)
299 struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k);
301 /* allow for unknown fields */
302 if (bkey_val_u64s(a.k) < bch_alloc_v1_val_u64s(a.v)) {
303 pr_buf(err, "incorrect value size (%zu < %u)",
304 bkey_val_u64s(a.k), bch_alloc_v1_val_u64s(a.v));
311 int bch2_alloc_v2_invalid(const struct bch_fs *c, struct bkey_s_c k,
312 int rw, struct printbuf *err)
314 struct bkey_alloc_unpacked u;
316 if (bch2_alloc_unpack_v2(&u, k)) {
317 pr_buf(err, "unpack error");
324 int bch2_alloc_v3_invalid(const struct bch_fs *c, struct bkey_s_c k,
325 int rw, struct printbuf *err)
327 struct bkey_alloc_unpacked u;
329 if (bch2_alloc_unpack_v3(&u, k)) {
330 pr_buf(err, "unpack error");
337 int bch2_alloc_v4_invalid(const struct bch_fs *c, struct bkey_s_c k,
338 int rw, struct printbuf *err)
340 struct bkey_s_c_alloc_v4 a = bkey_s_c_to_alloc_v4(k);
342 if (bkey_val_bytes(k.k) != sizeof(struct bch_alloc_v4)) {
343 pr_buf(err, "bad val size (%zu != %zu)",
344 bkey_val_bytes(k.k), sizeof(struct bch_alloc_v4));
349 if (alloc_data_type(*a.v, a.v->data_type) != a.v->data_type) {
350 pr_buf(err, "invalid data type (got %u should be %u)",
351 a.v->data_type, alloc_data_type(*a.v, a.v->data_type));
355 switch (a.v->data_type) {
357 case BCH_DATA_need_gc_gens:
358 case BCH_DATA_need_discard:
359 if (a.v->dirty_sectors ||
360 a.v->cached_sectors ||
362 pr_buf(err, "empty data type free but have data");
367 case BCH_DATA_journal:
370 case BCH_DATA_parity:
371 if (!a.v->dirty_sectors) {
372 pr_buf(err, "data_type %s but dirty_sectors==0",
373 bch2_data_types[a.v->data_type]);
377 case BCH_DATA_cached:
378 if (!a.v->cached_sectors ||
379 a.v->dirty_sectors ||
381 pr_buf(err, "data type inconsistency");
385 if (!a.v->io_time[READ]) {
386 pr_buf(err, "cached bucket with read_time == 0");
390 case BCH_DATA_stripe:
392 pr_buf(err, "data_type %s but stripe==0",
393 bch2_data_types[a.v->data_type]);
403 void bch2_alloc_v4_swab(struct bkey_s k)
405 struct bch_alloc_v4 *a = bkey_s_to_alloc_v4(k).v;
407 a->journal_seq = swab64(a->journal_seq);
408 a->flags = swab32(a->flags);
409 a->dirty_sectors = swab32(a->dirty_sectors);
410 a->cached_sectors = swab32(a->cached_sectors);
411 a->io_time[0] = swab64(a->io_time[0]);
412 a->io_time[1] = swab64(a->io_time[1]);
413 a->stripe = swab32(a->stripe);
414 a->nr_external_backpointers = swab32(a->nr_external_backpointers);
417 void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
419 struct bch_alloc_v4 a;
421 bch2_alloc_to_v4(k, &a);
423 pr_buf(out, "gen %u oldest_gen %u data_type %s journal_seq %llu need_discard %llu need_inc_gen %llu",
424 a.gen, a.oldest_gen, bch2_data_types[a.data_type],
426 BCH_ALLOC_V4_NEED_DISCARD(&a),
427 BCH_ALLOC_V4_NEED_INC_GEN(&a));
428 pr_buf(out, " dirty_sectors %u", a.dirty_sectors);
429 pr_buf(out, " cached_sectors %u", a.cached_sectors);
430 pr_buf(out, " stripe %u", a.stripe);
431 pr_buf(out, " stripe_redundancy %u", a.stripe_redundancy);
432 pr_buf(out, " read_time %llu", a.io_time[READ]);
433 pr_buf(out, " write_time %llu", a.io_time[WRITE]);
436 int bch2_alloc_read(struct bch_fs *c)
438 struct btree_trans trans;
439 struct btree_iter iter;
441 struct bch_alloc_v4 a;
445 bch2_trans_init(&trans, c, 0, 0);
447 for_each_btree_key(&trans, iter, BTREE_ID_alloc, POS_MIN,
448 BTREE_ITER_PREFETCH, k, ret) {
450 * Not a fsck error because this is checked/repaired by
451 * bch2_check_alloc_key() which runs later:
453 if (!bch2_dev_bucket_exists(c, k.k->p))
456 ca = bch_dev_bkey_exists(c, k.k->p.inode);
457 bch2_alloc_to_v4(k, &a);
459 *bucket_gen(ca, k.k->p.offset) = a.gen;
461 bch2_trans_iter_exit(&trans, &iter);
463 bch2_trans_exit(&trans);
466 bch_err(c, "error reading alloc info: %i", ret);
471 /* Free space/discard btree: */
473 static int bch2_bucket_do_index(struct btree_trans *trans,
474 struct bkey_s_c alloc_k,
475 const struct bch_alloc_v4 *a,
478 struct bch_fs *c = trans->c;
479 struct bch_dev *ca = bch_dev_bkey_exists(c, alloc_k.k->p.inode);
480 struct btree_iter iter;
484 enum bch_bkey_type old_type = !set ? KEY_TYPE_set : KEY_TYPE_deleted;
485 enum bch_bkey_type new_type = set ? KEY_TYPE_set : KEY_TYPE_deleted;
486 struct printbuf buf = PRINTBUF;
489 if (a->data_type != BCH_DATA_free &&
490 a->data_type != BCH_DATA_need_discard)
493 k = bch2_trans_kmalloc(trans, sizeof(*k));
498 k->k.type = new_type;
500 switch (a->data_type) {
502 btree = BTREE_ID_freespace;
503 k->k.p = alloc_freespace_pos(alloc_k.k->p, *a);
504 bch2_key_resize(&k->k, 1);
506 case BCH_DATA_need_discard:
507 btree = BTREE_ID_need_discard;
508 k->k.p = alloc_k.k->p;
514 bch2_trans_iter_init(trans, &iter, btree,
515 bkey_start_pos(&k->k),
517 old = bch2_btree_iter_peek_slot(&iter);
522 if (ca->mi.freespace_initialized &&
523 bch2_trans_inconsistent_on(old.k->type != old_type, trans,
524 "incorrect key when %s %s btree (got %s should be %s)\n"
526 set ? "setting" : "clearing",
527 bch2_btree_ids[btree],
528 bch2_bkey_types[old.k->type],
529 bch2_bkey_types[old_type],
530 (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
535 ret = bch2_trans_update(trans, &iter, k, 0);
537 bch2_trans_iter_exit(trans, &iter);
542 int bch2_trans_mark_alloc(struct btree_trans *trans,
543 struct bkey_s_c old, struct bkey_i *new,
546 struct bch_fs *c = trans->c;
547 struct bch_alloc_v4 old_a, *new_a;
548 u64 old_lru, new_lru;
552 * Deletion only happens in the device removal path, with
553 * BTREE_TRIGGER_NORUN:
555 BUG_ON(new->k.type != KEY_TYPE_alloc_v4);
557 bch2_alloc_to_v4(old, &old_a);
558 new_a = &bkey_i_to_alloc_v4(new)->v;
560 new_a->data_type = alloc_data_type(*new_a, new_a->data_type);
562 if (new_a->dirty_sectors > old_a.dirty_sectors ||
563 new_a->cached_sectors > old_a.cached_sectors) {
564 new_a->io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
565 new_a->io_time[WRITE]= max_t(u64, 1, atomic64_read(&c->io_clock[WRITE].now));
566 SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, true);
567 SET_BCH_ALLOC_V4_NEED_DISCARD(new_a, true);
570 if (data_type_is_empty(new_a->data_type) &&
571 BCH_ALLOC_V4_NEED_INC_GEN(new_a) &&
572 !bch2_bucket_is_open_safe(c, new->k.p.inode, new->k.p.offset)) {
574 SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, false);
577 if (old_a.data_type != new_a->data_type ||
578 (new_a->data_type == BCH_DATA_free &&
579 alloc_freespace_genbits(old_a) != alloc_freespace_genbits(*new_a))) {
580 ret = bch2_bucket_do_index(trans, old, &old_a, false) ?:
581 bch2_bucket_do_index(trans, bkey_i_to_s_c(new), new_a, true);
586 if (new_a->data_type == BCH_DATA_cached &&
587 !new_a->io_time[READ])
588 new_a->io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
591 old_lru = alloc_lru_idx(old_a);
592 new_lru = alloc_lru_idx(*new_a);
594 if (old_lru != new_lru) {
595 ret = bch2_lru_change(trans, new->k.p.inode, new->k.p.offset,
596 old_lru, &new_lru, old);
600 if (new_a->data_type == BCH_DATA_cached)
601 new_a->io_time[READ] = new_lru;
607 static int bch2_check_alloc_key(struct btree_trans *trans,
608 struct btree_iter *alloc_iter)
610 struct bch_fs *c = trans->c;
612 struct btree_iter discard_iter, freespace_iter;
613 struct bch_alloc_v4 a;
614 unsigned discard_key_type, freespace_key_type;
615 struct bkey_s_c alloc_k, k;
616 struct printbuf buf = PRINTBUF;
617 struct printbuf buf2 = PRINTBUF;
620 alloc_k = bch2_btree_iter_peek(alloc_iter);
624 ret = bkey_err(alloc_k);
628 if (fsck_err_on(!bch2_dev_bucket_exists(c, alloc_k.k->p), c,
629 "alloc key for invalid device:bucket %llu:%llu",
630 alloc_k.k->p.inode, alloc_k.k->p.offset))
631 return bch2_btree_delete_at(trans, alloc_iter, 0);
633 ca = bch_dev_bkey_exists(c, alloc_k.k->p.inode);
634 if (!ca->mi.freespace_initialized)
637 bch2_alloc_to_v4(alloc_k, &a);
639 discard_key_type = a.data_type == BCH_DATA_need_discard
641 freespace_key_type = a.data_type == BCH_DATA_free
644 bch2_trans_iter_init(trans, &discard_iter, BTREE_ID_need_discard,
646 bch2_trans_iter_init(trans, &freespace_iter, BTREE_ID_freespace,
647 alloc_freespace_pos(alloc_k.k->p, a), 0);
649 k = bch2_btree_iter_peek_slot(&discard_iter);
654 if (fsck_err_on(k.k->type != discard_key_type, c,
655 "incorrect key in need_discard btree (got %s should be %s)\n"
657 bch2_bkey_types[k.k->type],
658 bch2_bkey_types[discard_key_type],
659 (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
660 struct bkey_i *update =
661 bch2_trans_kmalloc(trans, sizeof(*update));
663 ret = PTR_ERR_OR_ZERO(update);
667 bkey_init(&update->k);
668 update->k.type = discard_key_type;
669 update->k.p = discard_iter.pos;
671 ret = bch2_trans_update(trans, &discard_iter, update, 0);
676 k = bch2_btree_iter_peek_slot(&freespace_iter);
681 if (fsck_err_on(k.k->type != freespace_key_type, c,
682 "incorrect key in freespace btree (got %s should be %s)\n"
684 bch2_bkey_types[k.k->type],
685 bch2_bkey_types[freespace_key_type],
686 (printbuf_reset(&buf),
687 bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
688 struct bkey_i *update =
689 bch2_trans_kmalloc(trans, sizeof(*update));
691 ret = PTR_ERR_OR_ZERO(update);
695 bkey_init(&update->k);
696 update->k.type = freespace_key_type;
697 update->k.p = freespace_iter.pos;
698 bch2_key_resize(&update->k, 1);
700 ret = bch2_trans_update(trans, &freespace_iter, update, 0);
706 bch2_trans_iter_exit(trans, &freespace_iter);
707 bch2_trans_iter_exit(trans, &discard_iter);
708 printbuf_exit(&buf2);
713 static int bch2_check_discard_freespace_key(struct btree_trans *trans,
714 struct btree_iter *iter)
716 struct bch_fs *c = trans->c;
717 struct btree_iter alloc_iter;
718 struct bkey_s_c k, freespace_k;
719 struct bch_alloc_v4 a;
722 enum bch_data_type state = iter->btree_id == BTREE_ID_need_discard
723 ? BCH_DATA_need_discard
725 struct printbuf buf = PRINTBUF;
728 freespace_k = bch2_btree_iter_peek(iter);
732 ret = bkey_err(freespace_k);
737 pos.offset &= ~(~0ULL << 56);
738 genbits = iter->pos.offset & (~0ULL << 56);
740 bch2_trans_iter_init(trans, &alloc_iter, BTREE_ID_alloc, pos, 0);
742 if (fsck_err_on(!bch2_dev_bucket_exists(c, pos), c,
743 "entry in %s btree for nonexistant dev:bucket %llu:%llu",
744 bch2_btree_ids[iter->btree_id], pos.inode, pos.offset))
747 k = bch2_btree_iter_peek_slot(&alloc_iter);
752 bch2_alloc_to_v4(k, &a);
754 if (fsck_err_on(a.data_type != state ||
755 (state == BCH_DATA_free &&
756 genbits != alloc_freespace_genbits(a)), c,
757 "%s\n incorrectly set in %s index (free %u, genbits %llu should be %llu)",
758 (bch2_bkey_val_to_text(&buf, c, k), buf.buf),
759 bch2_btree_ids[iter->btree_id],
760 a.data_type == state,
761 genbits >> 56, alloc_freespace_genbits(a) >> 56))
766 bch2_trans_iter_exit(trans, &alloc_iter);
770 ret = bch2_btree_delete_extent_at(trans, iter,
771 iter->btree_id == BTREE_ID_freespace ? 1 : 0, 0);
775 int bch2_check_alloc_info(struct bch_fs *c)
777 struct btree_trans trans;
778 struct btree_iter iter;
782 bch2_trans_init(&trans, c, 0, 0);
784 for_each_btree_key(&trans, iter, BTREE_ID_alloc, POS_MIN,
785 BTREE_ITER_PREFETCH, k, ret) {
786 ret = __bch2_trans_do(&trans, NULL, NULL, 0,
787 bch2_check_alloc_key(&trans, &iter));
791 bch2_trans_iter_exit(&trans, &iter);
796 bch2_trans_iter_init(&trans, &iter, BTREE_ID_need_discard, POS_MIN,
797 BTREE_ITER_PREFETCH);
799 ret = __bch2_trans_do(&trans, NULL, NULL, 0,
800 bch2_check_discard_freespace_key(&trans, &iter));
804 bch2_btree_iter_set_pos(&iter, bpos_nosnap_successor(iter.pos));
806 bch2_trans_iter_exit(&trans, &iter);
811 bch2_trans_iter_init(&trans, &iter, BTREE_ID_freespace, POS_MIN,
812 BTREE_ITER_PREFETCH);
814 ret = __bch2_trans_do(&trans, NULL, NULL, 0,
815 bch2_check_discard_freespace_key(&trans, &iter));
819 bch2_btree_iter_set_pos(&iter, bpos_nosnap_successor(iter.pos));
821 bch2_trans_iter_exit(&trans, &iter);
823 bch2_trans_exit(&trans);
824 return ret < 0 ? ret : 0;
827 static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans,
828 struct btree_iter *alloc_iter)
830 struct bch_fs *c = trans->c;
831 struct btree_iter lru_iter;
832 struct bch_alloc_v4 a;
833 struct bkey_s_c alloc_k, k;
834 struct printbuf buf = PRINTBUF;
835 struct printbuf buf2 = PRINTBUF;
838 alloc_k = bch2_btree_iter_peek(alloc_iter);
842 ret = bkey_err(alloc_k);
846 bch2_alloc_to_v4(alloc_k, &a);
848 if (a.data_type != BCH_DATA_cached)
851 bch2_trans_iter_init(trans, &lru_iter, BTREE_ID_lru,
852 POS(alloc_k.k->p.inode, a.io_time[READ]), 0);
854 k = bch2_btree_iter_peek_slot(&lru_iter);
859 if (fsck_err_on(!a.io_time[READ], c,
860 "cached bucket with read_time 0\n"
862 (printbuf_reset(&buf),
863 bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf)) ||
864 fsck_err_on(k.k->type != KEY_TYPE_lru ||
865 le64_to_cpu(bkey_s_c_to_lru(k).v->idx) != alloc_k.k->p.offset, c,
866 "incorrect/missing lru entry\n"
869 (printbuf_reset(&buf),
870 bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf),
871 (bch2_bkey_val_to_text(&buf2, c, k), buf2.buf))) {
872 u64 read_time = a.io_time[READ];
874 if (!a.io_time[READ])
875 a.io_time[READ] = atomic64_read(&c->io_clock[READ].now);
877 ret = bch2_lru_set(trans,
884 if (a.io_time[READ] != read_time) {
885 struct bkey_i_alloc_v4 *a_mut =
886 bch2_alloc_to_v4_mut(trans, alloc_k);
887 ret = PTR_ERR_OR_ZERO(a_mut);
891 a_mut->v.io_time[READ] = a.io_time[READ];
892 ret = bch2_trans_update(trans, alloc_iter,
893 &a_mut->k_i, BTREE_TRIGGER_NORUN);
900 bch2_trans_iter_exit(trans, &lru_iter);
901 printbuf_exit(&buf2);
906 int bch2_check_alloc_to_lru_refs(struct bch_fs *c)
908 struct btree_trans trans;
909 struct btree_iter iter;
913 bch2_trans_init(&trans, c, 0, 0);
915 for_each_btree_key(&trans, iter, BTREE_ID_alloc, POS_MIN,
916 BTREE_ITER_PREFETCH, k, ret) {
917 ret = __bch2_trans_do(&trans, NULL, NULL,
919 BTREE_INSERT_LAZY_RW,
920 bch2_check_alloc_to_lru_ref(&trans, &iter));
924 bch2_trans_iter_exit(&trans, &iter);
926 bch2_trans_exit(&trans);
927 return ret < 0 ? ret : 0;
930 static int bch2_clear_need_discard(struct btree_trans *trans, struct bpos pos,
931 struct bch_dev *ca, bool *discard_done)
933 struct bch_fs *c = trans->c;
934 struct btree_iter iter;
936 struct bkey_i_alloc_v4 *a;
937 struct printbuf buf = PRINTBUF;
940 bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, pos,
942 k = bch2_btree_iter_peek_slot(&iter);
947 a = bch2_alloc_to_v4_mut(trans, k);
948 ret = PTR_ERR_OR_ZERO(a);
952 if (BCH_ALLOC_V4_NEED_INC_GEN(&a->v)) {
954 SET_BCH_ALLOC_V4_NEED_INC_GEN(&a->v, false);
958 if (bch2_trans_inconsistent_on(a->v.journal_seq > c->journal.flushed_seq_ondisk, trans,
959 "clearing need_discard but journal_seq %llu > flushed_seq %llu\n"
962 c->journal.flushed_seq_ondisk,
963 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
968 if (bch2_trans_inconsistent_on(a->v.data_type != BCH_DATA_need_discard, trans,
969 "bucket incorrectly set in need_discard btree\n"
971 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
976 if (!*discard_done && ca->mi.discard && !c->opts.nochanges) {
978 * This works without any other locks because this is the only
979 * thread that removes items from the need_discard tree
981 bch2_trans_unlock(trans);
982 blkdev_issue_discard(ca->disk_sb.bdev,
983 k.k->p.offset * ca->mi.bucket_size,
986 *discard_done = true;
988 ret = bch2_trans_relock(trans) ? 0 : -EINTR;
993 SET_BCH_ALLOC_V4_NEED_DISCARD(&a->v, false);
994 a->v.data_type = alloc_data_type(a->v, a->v.data_type);
996 ret = bch2_trans_update(trans, &iter, &a->k_i, 0);
998 bch2_trans_iter_exit(trans, &iter);
1003 static void bch2_do_discards_work(struct work_struct *work)
1005 struct bch_fs *c = container_of(work, struct bch_fs, discard_work);
1006 struct bch_dev *ca = NULL;
1007 struct btree_trans trans;
1008 struct btree_iter iter;
1010 u64 seen = 0, open = 0, need_journal_commit = 0, discarded = 0;
1013 bch2_trans_init(&trans, c, 0, 0);
1015 for_each_btree_key(&trans, iter, BTREE_ID_need_discard,
1016 POS_MIN, 0, k, ret) {
1017 bool discard_done = false;
1019 if (ca && k.k->p.inode != ca->dev_idx) {
1020 percpu_ref_put(&ca->io_ref);
1025 ca = bch_dev_bkey_exists(c, k.k->p.inode);
1026 if (!percpu_ref_tryget(&ca->io_ref)) {
1028 bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode + 1, 0));
1035 if (bch2_bucket_is_open_safe(c, k.k->p.inode, k.k->p.offset)) {
1040 if (bch2_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
1041 c->journal.flushed_seq_ondisk,
1042 k.k->p.inode, k.k->p.offset)) {
1043 need_journal_commit++;
1047 ret = __bch2_trans_do(&trans, NULL, NULL,
1048 BTREE_INSERT_USE_RESERVE|
1049 BTREE_INSERT_NOFAIL,
1050 bch2_clear_need_discard(&trans, k.k->p, ca, &discard_done));
1056 bch2_trans_iter_exit(&trans, &iter);
1059 percpu_ref_put(&ca->io_ref);
1061 bch2_trans_exit(&trans);
1063 if (need_journal_commit * 2 > seen)
1064 bch2_journal_flush_async(&c->journal, NULL);
1066 percpu_ref_put(&c->writes);
1068 trace_do_discards(c, seen, open, need_journal_commit, discarded, ret);
1071 void bch2_do_discards(struct bch_fs *c)
1073 if (percpu_ref_tryget(&c->writes) &&
1074 !queue_work(system_long_wq, &c->discard_work))
1075 percpu_ref_put(&c->writes);
1078 static int invalidate_one_bucket(struct btree_trans *trans, struct bch_dev *ca)
1080 struct bch_fs *c = trans->c;
1081 struct btree_iter lru_iter, alloc_iter = { NULL };
1083 struct bkey_i_alloc_v4 *a;
1085 struct printbuf buf = PRINTBUF;
1088 bch2_trans_iter_init(trans, &lru_iter, BTREE_ID_lru,
1089 POS(ca->dev_idx, 0), 0);
1090 k = bch2_btree_iter_peek(&lru_iter);
1095 if (!k.k || k.k->p.inode != ca->dev_idx)
1098 if (bch2_trans_inconsistent_on(k.k->type != KEY_TYPE_lru, trans,
1099 "non lru key in lru btree"))
1102 idx = k.k->p.offset;
1103 bucket = le64_to_cpu(bkey_s_c_to_lru(k).v->idx);
1105 a = bch2_trans_start_alloc_update(trans, &alloc_iter,
1106 POS(ca->dev_idx, bucket));
1107 ret = PTR_ERR_OR_ZERO(a);
1111 if (idx != alloc_lru_idx(a->v)) {
1112 pr_buf(&buf, "alloc key does not point back to lru entry when invalidating bucket:\n ");
1114 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&a->k_i));
1115 pr_buf(&buf, "\n ");
1116 bch2_bkey_val_to_text(&buf, c, k);
1117 bch2_trans_inconsistent(trans, "%s", buf.buf);
1122 SET_BCH_ALLOC_V4_NEED_INC_GEN(&a->v, false);
1125 a->v.dirty_sectors = 0;
1126 a->v.cached_sectors = 0;
1127 a->v.io_time[READ] = atomic64_read(&c->io_clock[READ].now);
1128 a->v.io_time[WRITE] = atomic64_read(&c->io_clock[WRITE].now);
1130 ret = bch2_trans_update(trans, &alloc_iter, &a->k_i,
1131 BTREE_TRIGGER_BUCKET_INVALIDATE);
1133 bch2_trans_iter_exit(trans, &alloc_iter);
1134 bch2_trans_iter_exit(trans, &lru_iter);
1135 printbuf_exit(&buf);
1139 static void bch2_do_invalidates_work(struct work_struct *work)
1141 struct bch_fs *c = container_of(work, struct bch_fs, invalidate_work);
1143 struct btree_trans trans;
1147 bch2_trans_init(&trans, c, 0, 0);
1149 for_each_member_device(ca, c, i) {
1150 s64 nr_to_invalidate =
1151 should_invalidate_buckets(ca, bch2_dev_usage_read(ca));
1153 while (!ret && nr_to_invalidate-- >= 0)
1154 ret = __bch2_trans_do(&trans, NULL, NULL,
1155 BTREE_INSERT_USE_RESERVE|
1156 BTREE_INSERT_NOFAIL,
1157 invalidate_one_bucket(&trans, ca));
1160 bch2_trans_exit(&trans);
1161 percpu_ref_put(&c->writes);
1164 void bch2_do_invalidates(struct bch_fs *c)
1166 if (percpu_ref_tryget(&c->writes))
1167 queue_work(system_long_wq, &c->invalidate_work);
1170 static int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca)
1172 struct btree_trans trans;
1173 struct btree_iter iter;
1175 struct bch_alloc_v4 a;
1176 struct bch_member *m;
1179 bch2_trans_init(&trans, c, 0, 0);
1181 for_each_btree_key(&trans, iter, BTREE_ID_alloc,
1182 POS(ca->dev_idx, ca->mi.first_bucket),
1184 BTREE_ITER_PREFETCH, k, ret) {
1185 if (iter.pos.offset >= ca->mi.nbuckets)
1188 bch2_alloc_to_v4(k, &a);
1189 ret = __bch2_trans_do(&trans, NULL, NULL,
1190 BTREE_INSERT_LAZY_RW,
1191 bch2_bucket_do_index(&trans, k, &a, true));
1195 bch2_trans_iter_exit(&trans, &iter);
1197 bch2_trans_exit(&trans);
1200 bch_err(ca, "error initializing free space: %i", ret);
1204 mutex_lock(&c->sb_lock);
1205 m = bch2_sb_get_members(c->disk_sb.sb)->members + ca->dev_idx;
1206 SET_BCH_MEMBER_FREESPACE_INITIALIZED(m, true);
1207 mutex_unlock(&c->sb_lock);
1212 int bch2_fs_freespace_init(struct bch_fs *c)
1217 bool doing_init = false;
1220 * We can crash during the device add path, so we need to check this on
1224 for_each_member_device(ca, c, i) {
1225 if (ca->mi.freespace_initialized)
1229 bch_info(c, "initializing freespace");
1233 ret = bch2_dev_freespace_init(c, ca);
1235 percpu_ref_put(&ca->ref);
1241 mutex_lock(&c->sb_lock);
1242 bch2_write_super(c);
1243 mutex_unlock(&c->sb_lock);
1245 bch_verbose(c, "done initializing freespace");
1251 /* Bucket IO clocks: */
1253 int bch2_bucket_io_time_reset(struct btree_trans *trans, unsigned dev,
1254 size_t bucket_nr, int rw)
1256 struct bch_fs *c = trans->c;
1257 struct btree_iter iter;
1258 struct bkey_i_alloc_v4 *a;
1262 a = bch2_trans_start_alloc_update(trans, &iter, POS(dev, bucket_nr));
1263 ret = PTR_ERR_OR_ZERO(a);
1267 now = atomic64_read(&c->io_clock[rw].now);
1268 if (a->v.io_time[rw] == now)
1271 a->v.io_time[rw] = now;
1273 ret = bch2_trans_update(trans, &iter, &a->k_i, 0) ?:
1274 bch2_trans_commit(trans, NULL, NULL, 0);
1276 bch2_trans_iter_exit(trans, &iter);
1280 /* Startup/shutdown (ro/rw): */
1282 void bch2_recalc_capacity(struct bch_fs *c)
1285 u64 capacity = 0, reserved_sectors = 0, gc_reserve;
1286 unsigned bucket_size_max = 0;
1287 unsigned long ra_pages = 0;
1290 lockdep_assert_held(&c->state_lock);
1292 for_each_online_member(ca, c, i) {
1293 struct backing_dev_info *bdi = ca->disk_sb.bdev->bd_disk->bdi;
1295 ra_pages += bdi->ra_pages;
1298 bch2_set_ra_pages(c, ra_pages);
1300 for_each_rw_member(ca, c, i) {
1301 u64 dev_reserve = 0;
1304 * We need to reserve buckets (from the number
1305 * of currently available buckets) against
1306 * foreground writes so that mainly copygc can
1307 * make forward progress.
1309 * We need enough to refill the various reserves
1310 * from scratch - copygc will use its entire
1311 * reserve all at once, then run against when
1312 * its reserve is refilled (from the formerly
1313 * available buckets).
1315 * This reserve is just used when considering if
1316 * allocations for foreground writes must wait -
1317 * not -ENOSPC calculations.
1320 dev_reserve += ca->nr_btree_reserve * 2;
1321 dev_reserve += ca->mi.nbuckets >> 6; /* copygc reserve */
1323 dev_reserve += 1; /* btree write point */
1324 dev_reserve += 1; /* copygc write point */
1325 dev_reserve += 1; /* rebalance write point */
1327 dev_reserve *= ca->mi.bucket_size;
1329 capacity += bucket_to_sector(ca, ca->mi.nbuckets -
1330 ca->mi.first_bucket);
1332 reserved_sectors += dev_reserve * 2;
1334 bucket_size_max = max_t(unsigned, bucket_size_max,
1335 ca->mi.bucket_size);
1338 gc_reserve = c->opts.gc_reserve_bytes
1339 ? c->opts.gc_reserve_bytes >> 9
1340 : div64_u64(capacity * c->opts.gc_reserve_percent, 100);
1342 reserved_sectors = max(gc_reserve, reserved_sectors);
1344 reserved_sectors = min(reserved_sectors, capacity);
1346 c->capacity = capacity - reserved_sectors;
1348 c->bucket_size_max = bucket_size_max;
1350 /* Wake up case someone was waiting for buckets */
1351 closure_wake_up(&c->freelist_wait);
1354 static bool bch2_dev_has_open_write_point(struct bch_fs *c, struct bch_dev *ca)
1356 struct open_bucket *ob;
1359 for (ob = c->open_buckets;
1360 ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
1362 spin_lock(&ob->lock);
1363 if (ob->valid && !ob->on_partial_list &&
1364 ob->dev == ca->dev_idx)
1366 spin_unlock(&ob->lock);
1372 /* device goes ro: */
1373 void bch2_dev_allocator_remove(struct bch_fs *c, struct bch_dev *ca)
1377 /* First, remove device from allocation groups: */
1379 for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
1380 clear_bit(ca->dev_idx, c->rw_devs[i].d);
1383 * Capacity is calculated based off of devices in allocation groups:
1385 bch2_recalc_capacity(c);
1387 /* Next, close write points that point to this device... */
1388 for (i = 0; i < ARRAY_SIZE(c->write_points); i++)
1389 bch2_writepoint_stop(c, ca, &c->write_points[i]);
1391 bch2_writepoint_stop(c, ca, &c->copygc_write_point);
1392 bch2_writepoint_stop(c, ca, &c->rebalance_write_point);
1393 bch2_writepoint_stop(c, ca, &c->btree_write_point);
1395 mutex_lock(&c->btree_reserve_cache_lock);
1396 while (c->btree_reserve_cache_nr) {
1397 struct btree_alloc *a =
1398 &c->btree_reserve_cache[--c->btree_reserve_cache_nr];
1400 bch2_open_buckets_put(c, &a->ob);
1402 mutex_unlock(&c->btree_reserve_cache_lock);
1405 struct open_bucket *ob;
1407 spin_lock(&c->freelist_lock);
1408 if (!ca->open_buckets_partial_nr) {
1409 spin_unlock(&c->freelist_lock);
1412 ob = c->open_buckets +
1413 ca->open_buckets_partial[--ca->open_buckets_partial_nr];
1414 ob->on_partial_list = false;
1415 spin_unlock(&c->freelist_lock);
1417 bch2_open_bucket_put(c, ob);
1420 bch2_ec_stop_dev(c, ca);
1423 * Wake up threads that were blocked on allocation, so they can notice
1424 * the device can no longer be removed and the capacity has changed:
1426 closure_wake_up(&c->freelist_wait);
1429 * journal_res_get() can block waiting for free space in the journal -
1430 * it needs to notice there may not be devices to allocate from anymore:
1432 wake_up(&c->journal.wait);
1434 /* Now wait for any in flight writes: */
1436 closure_wait_event(&c->open_buckets_wait,
1437 !bch2_dev_has_open_write_point(c, ca));
1440 /* device goes rw: */
1441 void bch2_dev_allocator_add(struct bch_fs *c, struct bch_dev *ca)
1445 for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
1446 if (ca->mi.data_allowed & (1 << i))
1447 set_bit(ca->dev_idx, c->rw_devs[i].d);
1450 void bch2_fs_allocator_background_init(struct bch_fs *c)
1452 spin_lock_init(&c->freelist_lock);
1453 INIT_WORK(&c->discard_work, bch2_do_discards_work);
1454 INIT_WORK(&c->invalidate_work, bch2_do_invalidates_work);