1 // SPDX-License-Identifier: GPL-2.0
3 #include "alloc_background.h"
4 #include "alloc_foreground.h"
5 #include "backpointers.h"
6 #include "btree_cache.h"
8 #include "btree_key_cache.h"
9 #include "btree_update.h"
10 #include "btree_update_interior.h"
12 #include "btree_write_buffer.h"
14 #include "buckets_waiting_for_journal.h"
24 #include <linux/kthread.h>
25 #include <linux/math64.h>
26 #include <linux/random.h>
27 #include <linux/rculist.h>
28 #include <linux/rcupdate.h>
29 #include <linux/sched/task.h>
30 #include <linux/sort.h>
32 /* Persistent alloc info: */
34 static const unsigned BCH_ALLOC_V1_FIELD_BYTES[] = {
35 #define x(name, bits) [BCH_ALLOC_FIELD_V1_##name] = bits / 8,
40 struct bkey_alloc_unpacked {
47 #define x(_name, _bits) u##_bits _name;
52 static inline u64 alloc_field_v1_get(const struct bch_alloc *a,
53 const void **p, unsigned field)
55 unsigned bytes = BCH_ALLOC_V1_FIELD_BYTES[field];
58 if (!(a->fields & (1 << field)))
63 v = *((const u8 *) *p);
82 static void bch2_alloc_unpack_v1(struct bkey_alloc_unpacked *out,
85 const struct bch_alloc *in = bkey_s_c_to_alloc(k).v;
86 const void *d = in->data;
91 #define x(_name, _bits) out->_name = alloc_field_v1_get(in, &d, idx++);
96 static int bch2_alloc_unpack_v2(struct bkey_alloc_unpacked *out,
99 struct bkey_s_c_alloc_v2 a = bkey_s_c_to_alloc_v2(k);
100 const u8 *in = a.v->data;
101 const u8 *end = bkey_val_end(a);
102 unsigned fieldnr = 0;
107 out->oldest_gen = a.v->oldest_gen;
108 out->data_type = a.v->data_type;
110 #define x(_name, _bits) \
111 if (fieldnr < a.v->nr_fields) { \
112 ret = bch2_varint_decode_fast(in, end, &v); \
120 if (v != out->_name) \
124 BCH_ALLOC_FIELDS_V2()
129 static int bch2_alloc_unpack_v3(struct bkey_alloc_unpacked *out,
132 struct bkey_s_c_alloc_v3 a = bkey_s_c_to_alloc_v3(k);
133 const u8 *in = a.v->data;
134 const u8 *end = bkey_val_end(a);
135 unsigned fieldnr = 0;
140 out->oldest_gen = a.v->oldest_gen;
141 out->data_type = a.v->data_type;
142 out->need_discard = BCH_ALLOC_V3_NEED_DISCARD(a.v);
143 out->need_inc_gen = BCH_ALLOC_V3_NEED_INC_GEN(a.v);
144 out->journal_seq = le64_to_cpu(a.v->journal_seq);
146 #define x(_name, _bits) \
147 if (fieldnr < a.v->nr_fields) { \
148 ret = bch2_varint_decode_fast(in, end, &v); \
156 if (v != out->_name) \
160 BCH_ALLOC_FIELDS_V2()
165 static struct bkey_alloc_unpacked bch2_alloc_unpack(struct bkey_s_c k)
167 struct bkey_alloc_unpacked ret = { .gen = 0 };
171 bch2_alloc_unpack_v1(&ret, k);
173 case KEY_TYPE_alloc_v2:
174 bch2_alloc_unpack_v2(&ret, k);
176 case KEY_TYPE_alloc_v3:
177 bch2_alloc_unpack_v3(&ret, k);
184 static unsigned bch_alloc_v1_val_u64s(const struct bch_alloc *a)
186 unsigned i, bytes = offsetof(struct bch_alloc, data);
188 for (i = 0; i < ARRAY_SIZE(BCH_ALLOC_V1_FIELD_BYTES); i++)
189 if (a->fields & (1 << i))
190 bytes += BCH_ALLOC_V1_FIELD_BYTES[i];
192 return DIV_ROUND_UP(bytes, sizeof(u64));
195 int bch2_alloc_v1_invalid(struct bch_fs *c, struct bkey_s_c k,
196 enum bkey_invalid_flags flags,
197 struct printbuf *err)
199 struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k);
202 /* allow for unknown fields */
203 bkey_fsck_err_on(bkey_val_u64s(a.k) < bch_alloc_v1_val_u64s(a.v), c, err,
204 alloc_v1_val_size_bad,
205 "incorrect value size (%zu < %u)",
206 bkey_val_u64s(a.k), bch_alloc_v1_val_u64s(a.v));
211 int bch2_alloc_v2_invalid(struct bch_fs *c, struct bkey_s_c k,
212 enum bkey_invalid_flags flags,
213 struct printbuf *err)
215 struct bkey_alloc_unpacked u;
218 bkey_fsck_err_on(bch2_alloc_unpack_v2(&u, k), c, err,
219 alloc_v2_unpack_error,
225 int bch2_alloc_v3_invalid(struct bch_fs *c, struct bkey_s_c k,
226 enum bkey_invalid_flags flags,
227 struct printbuf *err)
229 struct bkey_alloc_unpacked u;
232 bkey_fsck_err_on(bch2_alloc_unpack_v3(&u, k), c, err,
233 alloc_v2_unpack_error,
239 int bch2_alloc_v4_invalid(struct bch_fs *c, struct bkey_s_c k,
240 enum bkey_invalid_flags flags, struct printbuf *err)
242 struct bkey_s_c_alloc_v4 a = bkey_s_c_to_alloc_v4(k);
245 bkey_fsck_err_on(alloc_v4_u64s(a.v) > bkey_val_u64s(k.k), c, err,
246 alloc_v4_val_size_bad,
247 "bad val size (%u > %zu)",
248 alloc_v4_u64s(a.v), bkey_val_u64s(k.k));
250 bkey_fsck_err_on(!BCH_ALLOC_V4_BACKPOINTERS_START(a.v) &&
251 BCH_ALLOC_V4_NR_BACKPOINTERS(a.v), c, err,
252 alloc_v4_backpointers_start_bad,
253 "invalid backpointers_start");
255 bkey_fsck_err_on(alloc_data_type(*a.v, a.v->data_type) != a.v->data_type, c, err,
256 alloc_key_data_type_bad,
257 "invalid data type (got %u should be %u)",
258 a.v->data_type, alloc_data_type(*a.v, a.v->data_type));
260 switch (a.v->data_type) {
262 case BCH_DATA_need_gc_gens:
263 case BCH_DATA_need_discard:
264 bkey_fsck_err_on(bch2_bucket_sectors(*a.v) || a.v->stripe,
265 c, err, alloc_key_empty_but_have_data,
266 "empty data type free but have data");
269 case BCH_DATA_journal:
272 case BCH_DATA_parity:
273 bkey_fsck_err_on(!bch2_bucket_sectors_dirty(*a.v),
274 c, err, alloc_key_dirty_sectors_0,
275 "data_type %s but dirty_sectors==0",
276 bch2_data_type_str(a.v->data_type));
278 case BCH_DATA_cached:
279 bkey_fsck_err_on(!a.v->cached_sectors ||
280 bch2_bucket_sectors_dirty(*a.v) ||
282 c, err, alloc_key_cached_inconsistency,
283 "data type inconsistency");
285 bkey_fsck_err_on(!a.v->io_time[READ] &&
286 c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_to_lru_refs,
287 c, err, alloc_key_cached_but_read_time_zero,
288 "cached bucket with read_time == 0");
290 case BCH_DATA_stripe:
297 void bch2_alloc_v4_swab(struct bkey_s k)
299 struct bch_alloc_v4 *a = bkey_s_to_alloc_v4(k).v;
300 struct bch_backpointer *bp, *bps;
302 a->journal_seq = swab64(a->journal_seq);
303 a->flags = swab32(a->flags);
304 a->dirty_sectors = swab32(a->dirty_sectors);
305 a->cached_sectors = swab32(a->cached_sectors);
306 a->io_time[0] = swab64(a->io_time[0]);
307 a->io_time[1] = swab64(a->io_time[1]);
308 a->stripe = swab32(a->stripe);
309 a->nr_external_backpointers = swab32(a->nr_external_backpointers);
310 a->fragmentation_lru = swab64(a->fragmentation_lru);
312 bps = alloc_v4_backpointers(a);
313 for (bp = bps; bp < bps + BCH_ALLOC_V4_NR_BACKPOINTERS(a); bp++) {
314 bp->bucket_offset = swab40(bp->bucket_offset);
315 bp->bucket_len = swab32(bp->bucket_len);
316 bch2_bpos_swab(&bp->pos);
320 void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
322 struct bch_alloc_v4 _a;
323 const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &_a);
326 printbuf_indent_add(out, 2);
328 prt_printf(out, "gen %u oldest_gen %u data_type ", a->gen, a->oldest_gen);
329 bch2_prt_data_type(out, a->data_type);
331 prt_printf(out, "journal_seq %llu", a->journal_seq);
333 prt_printf(out, "need_discard %llu", BCH_ALLOC_V4_NEED_DISCARD(a));
335 prt_printf(out, "need_inc_gen %llu", BCH_ALLOC_V4_NEED_INC_GEN(a));
337 prt_printf(out, "dirty_sectors %u", a->dirty_sectors);
339 prt_printf(out, "cached_sectors %u", a->cached_sectors);
341 prt_printf(out, "stripe %u", a->stripe);
343 prt_printf(out, "stripe_redundancy %u", a->stripe_redundancy);
345 prt_printf(out, "io_time[READ] %llu", a->io_time[READ]);
347 prt_printf(out, "io_time[WRITE] %llu", a->io_time[WRITE]);
349 prt_printf(out, "fragmentation %llu", a->fragmentation_lru);
351 prt_printf(out, "bp_start %llu", BCH_ALLOC_V4_BACKPOINTERS_START(a));
352 printbuf_indent_sub(out, 2);
355 void __bch2_alloc_to_v4(struct bkey_s_c k, struct bch_alloc_v4 *out)
357 if (k.k->type == KEY_TYPE_alloc_v4) {
360 *out = *bkey_s_c_to_alloc_v4(k).v;
362 src = alloc_v4_backpointers(out);
363 SET_BCH_ALLOC_V4_BACKPOINTERS_START(out, BCH_ALLOC_V4_U64s);
364 dst = alloc_v4_backpointers(out);
367 memset(src, 0, dst - src);
369 SET_BCH_ALLOC_V4_NR_BACKPOINTERS(out, 0);
371 struct bkey_alloc_unpacked u = bch2_alloc_unpack(k);
373 *out = (struct bch_alloc_v4) {
374 .journal_seq = u.journal_seq,
375 .flags = u.need_discard,
377 .oldest_gen = u.oldest_gen,
378 .data_type = u.data_type,
379 .stripe_redundancy = u.stripe_redundancy,
380 .dirty_sectors = u.dirty_sectors,
381 .cached_sectors = u.cached_sectors,
382 .io_time[READ] = u.read_time,
383 .io_time[WRITE] = u.write_time,
387 SET_BCH_ALLOC_V4_BACKPOINTERS_START(out, BCH_ALLOC_V4_U64s);
391 static noinline struct bkey_i_alloc_v4 *
392 __bch2_alloc_to_v4_mut(struct btree_trans *trans, struct bkey_s_c k)
394 struct bkey_i_alloc_v4 *ret;
396 ret = bch2_trans_kmalloc(trans, max(bkey_bytes(k.k), sizeof(struct bkey_i_alloc_v4)));
400 if (k.k->type == KEY_TYPE_alloc_v4) {
403 bkey_reassemble(&ret->k_i, k);
405 src = alloc_v4_backpointers(&ret->v);
406 SET_BCH_ALLOC_V4_BACKPOINTERS_START(&ret->v, BCH_ALLOC_V4_U64s);
407 dst = alloc_v4_backpointers(&ret->v);
410 memset(src, 0, dst - src);
412 SET_BCH_ALLOC_V4_NR_BACKPOINTERS(&ret->v, 0);
413 set_alloc_v4_u64s(ret);
415 bkey_alloc_v4_init(&ret->k_i);
417 bch2_alloc_to_v4(k, &ret->v);
422 static inline struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut_inlined(struct btree_trans *trans, struct bkey_s_c k)
424 struct bkey_s_c_alloc_v4 a;
426 if (likely(k.k->type == KEY_TYPE_alloc_v4) &&
427 ((a = bkey_s_c_to_alloc_v4(k), true) &&
428 BCH_ALLOC_V4_NR_BACKPOINTERS(a.v) == 0))
429 return bch2_bkey_make_mut_noupdate_typed(trans, k, alloc_v4);
431 return __bch2_alloc_to_v4_mut(trans, k);
434 struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut(struct btree_trans *trans, struct bkey_s_c k)
436 return bch2_alloc_to_v4_mut_inlined(trans, k);
439 struct bkey_i_alloc_v4 *
440 bch2_trans_start_alloc_update(struct btree_trans *trans, struct btree_iter *iter,
444 struct bkey_i_alloc_v4 *a;
447 k = bch2_bkey_get_iter(trans, iter, BTREE_ID_alloc, pos,
448 BTREE_ITER_WITH_UPDATES|
455 a = bch2_alloc_to_v4_mut_inlined(trans, k);
456 ret = PTR_ERR_OR_ZERO(a);
461 bch2_trans_iter_exit(trans, iter);
465 static struct bpos alloc_gens_pos(struct bpos pos, unsigned *offset)
467 *offset = pos.offset & KEY_TYPE_BUCKET_GENS_MASK;
469 pos.offset >>= KEY_TYPE_BUCKET_GENS_BITS;
473 static struct bpos bucket_gens_pos_to_alloc(struct bpos pos, unsigned offset)
475 pos.offset <<= KEY_TYPE_BUCKET_GENS_BITS;
476 pos.offset += offset;
480 static unsigned alloc_gen(struct bkey_s_c k, unsigned offset)
482 return k.k->type == KEY_TYPE_bucket_gens
483 ? bkey_s_c_to_bucket_gens(k).v->gens[offset]
487 int bch2_bucket_gens_invalid(struct bch_fs *c, struct bkey_s_c k,
488 enum bkey_invalid_flags flags,
489 struct printbuf *err)
493 bkey_fsck_err_on(bkey_val_bytes(k.k) != sizeof(struct bch_bucket_gens), c, err,
494 bucket_gens_val_size_bad,
495 "bad val size (%zu != %zu)",
496 bkey_val_bytes(k.k), sizeof(struct bch_bucket_gens));
501 void bch2_bucket_gens_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
503 struct bkey_s_c_bucket_gens g = bkey_s_c_to_bucket_gens(k);
506 for (i = 0; i < ARRAY_SIZE(g.v->gens); i++) {
509 prt_printf(out, "%u", g.v->gens[i]);
513 int bch2_bucket_gens_init(struct bch_fs *c)
515 struct btree_trans *trans = bch2_trans_get(c);
516 struct bkey_i_bucket_gens g;
517 bool have_bucket_gens_key = false;
520 ret = for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN,
521 BTREE_ITER_PREFETCH, k, ({
523 * Not a fsck error because this is checked/repaired by
524 * bch2_check_alloc_key() which runs later:
526 if (!bch2_dev_bucket_exists(c, k.k->p))
529 struct bch_alloc_v4 a;
530 u8 gen = bch2_alloc_to_v4(k, &a)->gen;
532 struct bpos pos = alloc_gens_pos(iter.pos, &offset);
534 if (have_bucket_gens_key && bkey_cmp(iter.pos, pos)) {
535 ret = commit_do(trans, NULL, NULL,
536 BCH_TRANS_COMMIT_no_enospc,
537 bch2_btree_insert_trans(trans, BTREE_ID_bucket_gens, &g.k_i, 0));
540 have_bucket_gens_key = false;
543 if (!have_bucket_gens_key) {
544 bkey_bucket_gens_init(&g.k_i);
546 have_bucket_gens_key = true;
549 g.v.gens[offset] = gen;
553 if (have_bucket_gens_key && !ret)
554 ret = commit_do(trans, NULL, NULL,
555 BCH_TRANS_COMMIT_no_enospc,
556 bch2_btree_insert_trans(trans, BTREE_ID_bucket_gens, &g.k_i, 0));
558 bch2_trans_put(trans);
564 int bch2_alloc_read(struct bch_fs *c)
566 struct btree_trans *trans = bch2_trans_get(c);
569 down_read(&c->gc_lock);
571 if (c->sb.version_upgrade_complete >= bcachefs_metadata_version_bucket_gens) {
572 ret = for_each_btree_key(trans, iter, BTREE_ID_bucket_gens, POS_MIN,
573 BTREE_ITER_PREFETCH, k, ({
574 u64 start = bucket_gens_pos_to_alloc(k.k->p, 0).offset;
575 u64 end = bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0).offset;
577 if (k.k->type != KEY_TYPE_bucket_gens)
580 const struct bch_bucket_gens *g = bkey_s_c_to_bucket_gens(k).v;
583 * Not a fsck error because this is checked/repaired by
584 * bch2_check_alloc_key() which runs later:
586 if (!bch2_dev_exists2(c, k.k->p.inode))
589 struct bch_dev *ca = bch_dev_bkey_exists(c, k.k->p.inode);
591 for (u64 b = max_t(u64, ca->mi.first_bucket, start);
592 b < min_t(u64, ca->mi.nbuckets, end);
594 *bucket_gen(ca, b) = g->gens[b & KEY_TYPE_BUCKET_GENS_MASK];
598 ret = for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN,
599 BTREE_ITER_PREFETCH, k, ({
601 * Not a fsck error because this is checked/repaired by
602 * bch2_check_alloc_key() which runs later:
604 if (!bch2_dev_bucket_exists(c, k.k->p))
607 struct bch_dev *ca = bch_dev_bkey_exists(c, k.k->p.inode);
609 struct bch_alloc_v4 a;
610 *bucket_gen(ca, k.k->p.offset) = bch2_alloc_to_v4(k, &a)->gen;
615 bch2_trans_put(trans);
616 up_read(&c->gc_lock);
622 /* Free space/discard btree: */
624 static int bch2_bucket_do_index(struct btree_trans *trans,
625 struct bkey_s_c alloc_k,
626 const struct bch_alloc_v4 *a,
629 struct bch_fs *c = trans->c;
630 struct bch_dev *ca = bch_dev_bkey_exists(c, alloc_k.k->p.inode);
631 struct btree_iter iter;
635 enum bch_bkey_type old_type = !set ? KEY_TYPE_set : KEY_TYPE_deleted;
636 enum bch_bkey_type new_type = set ? KEY_TYPE_set : KEY_TYPE_deleted;
637 struct printbuf buf = PRINTBUF;
640 if (a->data_type != BCH_DATA_free &&
641 a->data_type != BCH_DATA_need_discard)
644 k = bch2_trans_kmalloc_nomemzero(trans, sizeof(*k));
649 k->k.type = new_type;
651 switch (a->data_type) {
653 btree = BTREE_ID_freespace;
654 k->k.p = alloc_freespace_pos(alloc_k.k->p, *a);
655 bch2_key_resize(&k->k, 1);
657 case BCH_DATA_need_discard:
658 btree = BTREE_ID_need_discard;
659 k->k.p = alloc_k.k->p;
665 old = bch2_bkey_get_iter(trans, &iter, btree,
666 bkey_start_pos(&k->k),
672 if (ca->mi.freespace_initialized &&
673 c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info &&
674 bch2_trans_inconsistent_on(old.k->type != old_type, trans,
675 "incorrect key when %s %s:%llu:%llu:0 (got %s should be %s)\n"
677 set ? "setting" : "clearing",
678 bch2_btree_id_str(btree),
681 bch2_bkey_types[old.k->type],
682 bch2_bkey_types[old_type],
683 (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
688 ret = bch2_trans_update(trans, &iter, k, 0);
690 bch2_trans_iter_exit(trans, &iter);
695 static noinline int bch2_bucket_gen_update(struct btree_trans *trans,
696 struct bpos bucket, u8 gen)
698 struct btree_iter iter;
700 struct bpos pos = alloc_gens_pos(bucket, &offset);
701 struct bkey_i_bucket_gens *g;
705 g = bch2_trans_kmalloc(trans, sizeof(*g));
706 ret = PTR_ERR_OR_ZERO(g);
710 k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_bucket_gens, pos,
712 BTREE_ITER_WITH_UPDATES);
717 if (k.k->type != KEY_TYPE_bucket_gens) {
718 bkey_bucket_gens_init(&g->k_i);
721 bkey_reassemble(&g->k_i, k);
724 g->v.gens[offset] = gen;
726 ret = bch2_trans_update(trans, &iter, &g->k_i, 0);
727 bch2_trans_iter_exit(trans, &iter);
731 int bch2_trigger_alloc(struct btree_trans *trans,
732 enum btree_id btree, unsigned level,
733 struct bkey_s_c old, struct bkey_s new,
736 struct bch_fs *c = trans->c;
739 if (bch2_trans_inconsistent_on(!bch2_dev_bucket_exists(c, new.k->p), trans,
740 "alloc key for invalid device or bucket"))
743 struct bch_dev *ca = bch_dev_bkey_exists(c, new.k->p.inode);
745 struct bch_alloc_v4 old_a_convert;
746 const struct bch_alloc_v4 *old_a = bch2_alloc_to_v4(old, &old_a_convert);
748 if (flags & BTREE_TRIGGER_TRANSACTIONAL) {
749 struct bch_alloc_v4 *new_a = bkey_s_to_alloc_v4(new).v;
751 new_a->data_type = alloc_data_type(*new_a, new_a->data_type);
753 if (bch2_bucket_sectors(*new_a) > bch2_bucket_sectors(*old_a)) {
754 new_a->io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
755 new_a->io_time[WRITE]= max_t(u64, 1, atomic64_read(&c->io_clock[WRITE].now));
756 SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, true);
757 SET_BCH_ALLOC_V4_NEED_DISCARD(new_a, true);
760 if (data_type_is_empty(new_a->data_type) &&
761 BCH_ALLOC_V4_NEED_INC_GEN(new_a) &&
762 !bch2_bucket_is_open_safe(c, new.k->p.inode, new.k->p.offset)) {
764 SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, false);
767 if (old_a->data_type != new_a->data_type ||
768 (new_a->data_type == BCH_DATA_free &&
769 alloc_freespace_genbits(*old_a) != alloc_freespace_genbits(*new_a))) {
770 ret = bch2_bucket_do_index(trans, old, old_a, false) ?:
771 bch2_bucket_do_index(trans, new.s_c, new_a, true);
776 if (new_a->data_type == BCH_DATA_cached &&
777 !new_a->io_time[READ])
778 new_a->io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
780 u64 old_lru = alloc_lru_idx_read(*old_a);
781 u64 new_lru = alloc_lru_idx_read(*new_a);
782 if (old_lru != new_lru) {
783 ret = bch2_lru_change(trans, new.k->p.inode,
784 bucket_to_u64(new.k->p),
790 new_a->fragmentation_lru = alloc_lru_idx_fragmentation(*new_a,
791 bch_dev_bkey_exists(c, new.k->p.inode));
792 if (old_a->fragmentation_lru != new_a->fragmentation_lru) {
793 ret = bch2_lru_change(trans,
794 BCH_LRU_FRAGMENTATION_START,
795 bucket_to_u64(new.k->p),
796 old_a->fragmentation_lru, new_a->fragmentation_lru);
801 if (old_a->gen != new_a->gen) {
802 ret = bch2_bucket_gen_update(trans, new.k->p, new_a->gen);
808 * need to know if we're getting called from the invalidate path or
812 if ((flags & BTREE_TRIGGER_BUCKET_INVALIDATE) &&
813 old_a->cached_sectors) {
814 ret = bch2_update_cached_sectors_list(trans, new.k->p.inode,
815 -((s64) old_a->cached_sectors));
821 if ((flags & BTREE_TRIGGER_ATOMIC) && (flags & BTREE_TRIGGER_INSERT)) {
822 struct bch_alloc_v4 *new_a = bkey_s_to_alloc_v4(new).v;
823 u64 journal_seq = trans->journal_res.seq;
824 u64 bucket_journal_seq = new_a->journal_seq;
826 if ((flags & BTREE_TRIGGER_INSERT) &&
827 data_type_is_empty(old_a->data_type) !=
828 data_type_is_empty(new_a->data_type) &&
829 new.k->type == KEY_TYPE_alloc_v4) {
830 struct bch_alloc_v4 *v = bkey_s_to_alloc_v4(new).v;
833 * If the btree updates referring to a bucket weren't flushed
834 * before the bucket became empty again, then the we don't have
835 * to wait on a journal flush before we can reuse the bucket:
837 v->journal_seq = bucket_journal_seq =
838 data_type_is_empty(new_a->data_type) &&
839 (journal_seq == v->journal_seq ||
840 bch2_journal_noflush_seq(&c->journal, v->journal_seq))
844 if (!data_type_is_empty(old_a->data_type) &&
845 data_type_is_empty(new_a->data_type) &&
846 bucket_journal_seq) {
847 ret = bch2_set_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
848 c->journal.flushed_seq_ondisk,
849 new.k->p.inode, new.k->p.offset,
852 bch2_fs_fatal_error(c,
853 "error setting bucket_needs_journal_commit: %i", ret);
858 percpu_down_read(&c->mark_lock);
859 if (new_a->gen != old_a->gen)
860 *bucket_gen(ca, new.k->p.offset) = new_a->gen;
862 bch2_dev_usage_update(c, ca, old_a, new_a, journal_seq, false);
863 percpu_up_read(&c->mark_lock);
865 #define eval_state(_a, expr) ({ const struct bch_alloc_v4 *a = _a; expr; })
866 #define statechange(expr) !eval_state(old_a, expr) && eval_state(new_a, expr)
867 #define bucket_flushed(a) (!a->journal_seq || a->journal_seq <= c->journal.flushed_seq_ondisk)
869 if (statechange(a->data_type == BCH_DATA_free &&
871 closure_wake_up(&c->freelist_wait);
873 if (statechange(a->data_type == BCH_DATA_need_discard &&
874 bucket_flushed(a)) &&
875 !bch2_bucket_is_open(c, new.k->p.inode, new.k->p.offset))
878 if (statechange(a->data_type == BCH_DATA_cached) &&
879 !bch2_bucket_is_open(c, new.k->p.inode, new.k->p.offset) &&
880 should_invalidate_buckets(ca, bch2_dev_usage_read(ca)))
881 bch2_do_invalidates(c);
883 if (statechange(a->data_type == BCH_DATA_need_gc_gens))
887 if ((flags & BTREE_TRIGGER_GC) &&
888 (flags & BTREE_TRIGGER_BUCKET_INVALIDATE)) {
889 struct bch_alloc_v4 new_a_convert;
890 const struct bch_alloc_v4 *new_a = bch2_alloc_to_v4(new.s_c, &new_a_convert);
892 percpu_down_read(&c->mark_lock);
893 struct bucket *g = gc_bucket(ca, new.k->p.offset);
899 g->data_type = new_a->data_type;
900 g->stripe = new_a->stripe;
901 g->stripe_redundancy = new_a->stripe_redundancy;
902 g->dirty_sectors = new_a->dirty_sectors;
903 g->cached_sectors = new_a->cached_sectors;
906 percpu_up_read(&c->mark_lock);
913 * This synthesizes deleted extents for holes, similar to BTREE_ITER_SLOTS for
914 * extents style btrees, but works on non-extents btrees:
916 static struct bkey_s_c bch2_get_key_or_hole(struct btree_iter *iter, struct bpos end, struct bkey *hole)
918 struct bkey_s_c k = bch2_btree_iter_peek_slot(iter);
926 struct btree_iter iter2;
929 bch2_trans_copy_iter(&iter2, iter);
931 struct btree_path *path = btree_iter_path(iter->trans, iter);
932 if (!bpos_eq(path->l[0].b->key.k.p, SPOS_MAX))
933 end = bkey_min(end, bpos_nosnap_successor(path->l[0].b->key.k.p));
935 end = bkey_min(end, POS(iter->pos.inode, iter->pos.offset + U32_MAX - 1));
938 * btree node min/max is a closed interval, upto takes a half
941 k = bch2_btree_iter_peek_upto(&iter2, end);
943 bch2_trans_iter_exit(iter->trans, &iter2);
945 BUG_ON(next.offset >= iter->pos.offset + U32_MAX);
953 bch2_key_resize(hole, next.offset - iter->pos.offset);
954 return (struct bkey_s_c) { hole, NULL };
958 static bool next_bucket(struct bch_fs *c, struct bpos *bucket)
962 if (bch2_dev_bucket_exists(c, *bucket))
965 if (bch2_dev_exists2(c, bucket->inode)) {
966 ca = bch_dev_bkey_exists(c, bucket->inode);
968 if (bucket->offset < ca->mi.first_bucket) {
969 bucket->offset = ca->mi.first_bucket;
978 ca = __bch2_next_dev_idx(c, bucket->inode, NULL);
980 *bucket = POS(ca->dev_idx, ca->mi.first_bucket);
986 static struct bkey_s_c bch2_get_key_or_real_bucket_hole(struct btree_iter *iter, struct bkey *hole)
988 struct bch_fs *c = iter->trans->c;
991 k = bch2_get_key_or_hole(iter, POS_MAX, hole);
996 struct bpos bucket = bkey_start_pos(k.k);
998 if (!bch2_dev_bucket_exists(c, bucket)) {
999 if (!next_bucket(c, &bucket))
1000 return bkey_s_c_null;
1002 bch2_btree_iter_set_pos(iter, bucket);
1006 if (!bch2_dev_bucket_exists(c, k.k->p)) {
1007 struct bch_dev *ca = bch_dev_bkey_exists(c, bucket.inode);
1009 bch2_key_resize(hole, ca->mi.nbuckets - bucket.offset);
1016 static noinline_for_stack
1017 int bch2_check_alloc_key(struct btree_trans *trans,
1018 struct bkey_s_c alloc_k,
1019 struct btree_iter *alloc_iter,
1020 struct btree_iter *discard_iter,
1021 struct btree_iter *freespace_iter,
1022 struct btree_iter *bucket_gens_iter)
1024 struct bch_fs *c = trans->c;
1026 struct bch_alloc_v4 a_convert;
1027 const struct bch_alloc_v4 *a;
1028 unsigned discard_key_type, freespace_key_type;
1029 unsigned gens_offset;
1031 struct printbuf buf = PRINTBUF;
1034 if (fsck_err_on(!bch2_dev_bucket_exists(c, alloc_k.k->p), c,
1035 alloc_key_to_missing_dev_bucket,
1036 "alloc key for invalid device:bucket %llu:%llu",
1037 alloc_k.k->p.inode, alloc_k.k->p.offset))
1038 return bch2_btree_delete_at(trans, alloc_iter, 0);
1040 ca = bch_dev_bkey_exists(c, alloc_k.k->p.inode);
1041 if (!ca->mi.freespace_initialized)
1044 a = bch2_alloc_to_v4(alloc_k, &a_convert);
1046 discard_key_type = a->data_type == BCH_DATA_need_discard ? KEY_TYPE_set : 0;
1047 bch2_btree_iter_set_pos(discard_iter, alloc_k.k->p);
1048 k = bch2_btree_iter_peek_slot(discard_iter);
1053 if (k.k->type != discard_key_type &&
1054 (c->opts.reconstruct_alloc ||
1055 fsck_err(c, need_discard_key_wrong,
1056 "incorrect key in need_discard btree (got %s should be %s)\n"
1058 bch2_bkey_types[k.k->type],
1059 bch2_bkey_types[discard_key_type],
1060 (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf)))) {
1061 struct bkey_i *update =
1062 bch2_trans_kmalloc(trans, sizeof(*update));
1064 ret = PTR_ERR_OR_ZERO(update);
1068 bkey_init(&update->k);
1069 update->k.type = discard_key_type;
1070 update->k.p = discard_iter->pos;
1072 ret = bch2_trans_update(trans, discard_iter, update, 0);
1077 freespace_key_type = a->data_type == BCH_DATA_free ? KEY_TYPE_set : 0;
1078 bch2_btree_iter_set_pos(freespace_iter, alloc_freespace_pos(alloc_k.k->p, *a));
1079 k = bch2_btree_iter_peek_slot(freespace_iter);
1084 if (k.k->type != freespace_key_type &&
1085 (c->opts.reconstruct_alloc ||
1086 fsck_err(c, freespace_key_wrong,
1087 "incorrect key in freespace btree (got %s should be %s)\n"
1089 bch2_bkey_types[k.k->type],
1090 bch2_bkey_types[freespace_key_type],
1091 (printbuf_reset(&buf),
1092 bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf)))) {
1093 struct bkey_i *update =
1094 bch2_trans_kmalloc(trans, sizeof(*update));
1096 ret = PTR_ERR_OR_ZERO(update);
1100 bkey_init(&update->k);
1101 update->k.type = freespace_key_type;
1102 update->k.p = freespace_iter->pos;
1103 bch2_key_resize(&update->k, 1);
1105 ret = bch2_trans_update(trans, freespace_iter, update, 0);
1110 bch2_btree_iter_set_pos(bucket_gens_iter, alloc_gens_pos(alloc_k.k->p, &gens_offset));
1111 k = bch2_btree_iter_peek_slot(bucket_gens_iter);
1116 if (a->gen != alloc_gen(k, gens_offset) &&
1117 (c->opts.reconstruct_alloc ||
1118 fsck_err(c, bucket_gens_key_wrong,
1119 "incorrect gen in bucket_gens btree (got %u should be %u)\n"
1121 alloc_gen(k, gens_offset), a->gen,
1122 (printbuf_reset(&buf),
1123 bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf)))) {
1124 struct bkey_i_bucket_gens *g =
1125 bch2_trans_kmalloc(trans, sizeof(*g));
1127 ret = PTR_ERR_OR_ZERO(g);
1131 if (k.k->type == KEY_TYPE_bucket_gens) {
1132 bkey_reassemble(&g->k_i, k);
1134 bkey_bucket_gens_init(&g->k_i);
1135 g->k.p = alloc_gens_pos(alloc_k.k->p, &gens_offset);
1138 g->v.gens[gens_offset] = a->gen;
1140 ret = bch2_trans_update(trans, bucket_gens_iter, &g->k_i, 0);
1146 printbuf_exit(&buf);
1150 static noinline_for_stack
1151 int bch2_check_alloc_hole_freespace(struct btree_trans *trans,
1154 struct btree_iter *freespace_iter)
1156 struct bch_fs *c = trans->c;
1159 struct printbuf buf = PRINTBUF;
1162 ca = bch_dev_bkey_exists(c, start.inode);
1163 if (!ca->mi.freespace_initialized)
1166 bch2_btree_iter_set_pos(freespace_iter, start);
1168 k = bch2_btree_iter_peek_slot(freespace_iter);
1173 *end = bkey_min(k.k->p, *end);
1175 if (k.k->type != KEY_TYPE_set &&
1176 (c->opts.reconstruct_alloc ||
1177 fsck_err(c, freespace_hole_missing,
1178 "hole in alloc btree missing in freespace btree\n"
1179 " device %llu buckets %llu-%llu",
1180 freespace_iter->pos.inode,
1181 freespace_iter->pos.offset,
1183 struct bkey_i *update =
1184 bch2_trans_kmalloc(trans, sizeof(*update));
1186 ret = PTR_ERR_OR_ZERO(update);
1190 bkey_init(&update->k);
1191 update->k.type = KEY_TYPE_set;
1192 update->k.p = freespace_iter->pos;
1193 bch2_key_resize(&update->k,
1194 min_t(u64, U32_MAX, end->offset -
1195 freespace_iter->pos.offset));
1197 ret = bch2_trans_update(trans, freespace_iter, update, 0);
1203 printbuf_exit(&buf);
1207 static noinline_for_stack
1208 int bch2_check_alloc_hole_bucket_gens(struct btree_trans *trans,
1211 struct btree_iter *bucket_gens_iter)
1213 struct bch_fs *c = trans->c;
1215 struct printbuf buf = PRINTBUF;
1216 unsigned i, gens_offset, gens_end_offset;
1219 bch2_btree_iter_set_pos(bucket_gens_iter, alloc_gens_pos(start, &gens_offset));
1221 k = bch2_btree_iter_peek_slot(bucket_gens_iter);
1226 if (bkey_cmp(alloc_gens_pos(start, &gens_offset),
1227 alloc_gens_pos(*end, &gens_end_offset)))
1228 gens_end_offset = KEY_TYPE_BUCKET_GENS_NR;
1230 if (k.k->type == KEY_TYPE_bucket_gens) {
1231 struct bkey_i_bucket_gens g;
1232 bool need_update = false;
1234 bkey_reassemble(&g.k_i, k);
1236 for (i = gens_offset; i < gens_end_offset; i++) {
1237 if (fsck_err_on(g.v.gens[i], c,
1238 bucket_gens_hole_wrong,
1239 "hole in alloc btree at %llu:%llu with nonzero gen in bucket_gens btree (%u)",
1240 bucket_gens_pos_to_alloc(k.k->p, i).inode,
1241 bucket_gens_pos_to_alloc(k.k->p, i).offset,
1249 struct bkey_i *u = bch2_trans_kmalloc(trans, sizeof(g));
1251 ret = PTR_ERR_OR_ZERO(u);
1255 memcpy(u, &g, sizeof(g));
1257 ret = bch2_trans_update(trans, bucket_gens_iter, u, 0);
1263 *end = bkey_min(*end, bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0));
1266 printbuf_exit(&buf);
1270 static noinline_for_stack int bch2_check_discard_freespace_key(struct btree_trans *trans,
1271 struct btree_iter *iter)
1273 struct bch_fs *c = trans->c;
1274 struct btree_iter alloc_iter;
1275 struct bkey_s_c alloc_k;
1276 struct bch_alloc_v4 a_convert;
1277 const struct bch_alloc_v4 *a;
1280 enum bch_data_type state = iter->btree_id == BTREE_ID_need_discard
1281 ? BCH_DATA_need_discard
1283 struct printbuf buf = PRINTBUF;
1287 pos.offset &= ~(~0ULL << 56);
1288 genbits = iter->pos.offset & (~0ULL << 56);
1290 alloc_k = bch2_bkey_get_iter(trans, &alloc_iter, BTREE_ID_alloc, pos, 0);
1291 ret = bkey_err(alloc_k);
1295 if (fsck_err_on(!bch2_dev_bucket_exists(c, pos), c,
1296 need_discard_freespace_key_to_invalid_dev_bucket,
1297 "entry in %s btree for nonexistant dev:bucket %llu:%llu",
1298 bch2_btree_id_str(iter->btree_id), pos.inode, pos.offset))
1301 a = bch2_alloc_to_v4(alloc_k, &a_convert);
1303 if (fsck_err_on(a->data_type != state ||
1304 (state == BCH_DATA_free &&
1305 genbits != alloc_freespace_genbits(*a)), c,
1306 need_discard_freespace_key_bad,
1307 "%s\n incorrectly set at %s:%llu:%llu:0 (free %u, genbits %llu should be %llu)",
1308 (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf),
1309 bch2_btree_id_str(iter->btree_id),
1312 a->data_type == state,
1313 genbits >> 56, alloc_freespace_genbits(*a) >> 56))
1317 set_btree_iter_dontneed(&alloc_iter);
1318 bch2_trans_iter_exit(trans, &alloc_iter);
1319 printbuf_exit(&buf);
1322 ret = bch2_btree_delete_extent_at(trans, iter,
1323 iter->btree_id == BTREE_ID_freespace ? 1 : 0, 0) ?:
1324 bch2_trans_commit(trans, NULL, NULL,
1325 BCH_TRANS_COMMIT_no_enospc);
1330 * We've already checked that generation numbers in the bucket_gens btree are
1331 * valid for buckets that exist; this just checks for keys for nonexistent
1334 static noinline_for_stack
1335 int bch2_check_bucket_gens_key(struct btree_trans *trans,
1336 struct btree_iter *iter,
1339 struct bch_fs *c = trans->c;
1340 struct bkey_i_bucket_gens g;
1342 u64 start = bucket_gens_pos_to_alloc(k.k->p, 0).offset;
1343 u64 end = bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0).offset;
1345 bool need_update = false, dev_exists;
1346 struct printbuf buf = PRINTBUF;
1349 BUG_ON(k.k->type != KEY_TYPE_bucket_gens);
1350 bkey_reassemble(&g.k_i, k);
1352 /* if no bch_dev, skip out whether we repair or not */
1353 dev_exists = bch2_dev_exists2(c, k.k->p.inode);
1355 if (fsck_err_on(!dev_exists, c,
1356 bucket_gens_to_invalid_dev,
1357 "bucket_gens key for invalid device:\n %s",
1358 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
1359 ret = bch2_btree_delete_at(trans, iter, 0);
1364 ca = bch_dev_bkey_exists(c, k.k->p.inode);
1365 if (fsck_err_on(end <= ca->mi.first_bucket ||
1366 start >= ca->mi.nbuckets, c,
1367 bucket_gens_to_invalid_buckets,
1368 "bucket_gens key for invalid buckets:\n %s",
1369 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
1370 ret = bch2_btree_delete_at(trans, iter, 0);
1374 for (b = start; b < ca->mi.first_bucket; b++)
1375 if (fsck_err_on(g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK], c,
1376 bucket_gens_nonzero_for_invalid_buckets,
1377 "bucket_gens key has nonzero gen for invalid bucket")) {
1378 g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK] = 0;
1382 for (b = ca->mi.nbuckets; b < end; b++)
1383 if (fsck_err_on(g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK], c,
1384 bucket_gens_nonzero_for_invalid_buckets,
1385 "bucket_gens key has nonzero gen for invalid bucket")) {
1386 g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK] = 0;
1391 struct bkey_i *u = bch2_trans_kmalloc(trans, sizeof(g));
1393 ret = PTR_ERR_OR_ZERO(u);
1397 memcpy(u, &g, sizeof(g));
1398 ret = bch2_trans_update(trans, iter, u, 0);
1402 printbuf_exit(&buf);
1406 int bch2_check_alloc_info(struct bch_fs *c)
1408 struct btree_trans *trans = bch2_trans_get(c);
1409 struct btree_iter iter, discard_iter, freespace_iter, bucket_gens_iter;
1414 bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, POS_MIN,
1415 BTREE_ITER_PREFETCH);
1416 bch2_trans_iter_init(trans, &discard_iter, BTREE_ID_need_discard, POS_MIN,
1417 BTREE_ITER_PREFETCH);
1418 bch2_trans_iter_init(trans, &freespace_iter, BTREE_ID_freespace, POS_MIN,
1419 BTREE_ITER_PREFETCH);
1420 bch2_trans_iter_init(trans, &bucket_gens_iter, BTREE_ID_bucket_gens, POS_MIN,
1421 BTREE_ITER_PREFETCH);
1426 bch2_trans_begin(trans);
1428 k = bch2_get_key_or_real_bucket_hole(&iter, &hole);
1437 next = bpos_nosnap_successor(k.k->p);
1439 ret = bch2_check_alloc_key(trans,
1449 ret = bch2_check_alloc_hole_freespace(trans,
1450 bkey_start_pos(k.k),
1453 bch2_check_alloc_hole_bucket_gens(trans,
1454 bkey_start_pos(k.k),
1461 ret = bch2_trans_commit(trans, NULL, NULL,
1462 BCH_TRANS_COMMIT_no_enospc);
1466 bch2_btree_iter_set_pos(&iter, next);
1468 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
1473 bch2_trans_iter_exit(trans, &bucket_gens_iter);
1474 bch2_trans_iter_exit(trans, &freespace_iter);
1475 bch2_trans_iter_exit(trans, &discard_iter);
1476 bch2_trans_iter_exit(trans, &iter);
1481 ret = for_each_btree_key(trans, iter,
1482 BTREE_ID_need_discard, POS_MIN,
1483 BTREE_ITER_PREFETCH, k,
1484 bch2_check_discard_freespace_key(trans, &iter));
1488 bch2_trans_iter_init(trans, &iter, BTREE_ID_freespace, POS_MIN,
1489 BTREE_ITER_PREFETCH);
1491 bch2_trans_begin(trans);
1492 k = bch2_btree_iter_peek(&iter);
1496 ret = bkey_err(k) ?:
1497 bch2_check_discard_freespace_key(trans, &iter);
1498 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
1503 struct printbuf buf = PRINTBUF;
1504 bch2_bkey_val_to_text(&buf, c, k);
1506 bch_err(c, "while checking %s", buf.buf);
1507 printbuf_exit(&buf);
1511 bch2_btree_iter_set_pos(&iter, bpos_nosnap_successor(iter.pos));
1513 bch2_trans_iter_exit(trans, &iter);
1517 ret = for_each_btree_key_commit(trans, iter,
1518 BTREE_ID_bucket_gens, POS_MIN,
1519 BTREE_ITER_PREFETCH, k,
1520 NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
1521 bch2_check_bucket_gens_key(trans, &iter, k));
1523 bch2_trans_put(trans);
1528 static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans,
1529 struct btree_iter *alloc_iter)
1531 struct bch_fs *c = trans->c;
1532 struct btree_iter lru_iter;
1533 struct bch_alloc_v4 a_convert;
1534 const struct bch_alloc_v4 *a;
1535 struct bkey_s_c alloc_k, lru_k;
1536 struct printbuf buf = PRINTBUF;
1539 alloc_k = bch2_btree_iter_peek(alloc_iter);
1543 ret = bkey_err(alloc_k);
1547 a = bch2_alloc_to_v4(alloc_k, &a_convert);
1549 if (a->data_type != BCH_DATA_cached)
1552 if (fsck_err_on(!a->io_time[READ], c,
1553 alloc_key_cached_but_read_time_zero,
1554 "cached bucket with read_time 0\n"
1556 (printbuf_reset(&buf),
1557 bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
1558 struct bkey_i_alloc_v4 *a_mut =
1559 bch2_alloc_to_v4_mut(trans, alloc_k);
1560 ret = PTR_ERR_OR_ZERO(a_mut);
1564 a_mut->v.io_time[READ] = atomic64_read(&c->io_clock[READ].now);
1565 ret = bch2_trans_update(trans, alloc_iter,
1566 &a_mut->k_i, BTREE_TRIGGER_NORUN);
1573 lru_k = bch2_bkey_get_iter(trans, &lru_iter, BTREE_ID_lru,
1574 lru_pos(alloc_k.k->p.inode,
1575 bucket_to_u64(alloc_k.k->p),
1576 a->io_time[READ]), 0);
1577 ret = bkey_err(lru_k);
1581 if (fsck_err_on(lru_k.k->type != KEY_TYPE_set, c,
1582 alloc_key_to_missing_lru_entry,
1583 "missing lru entry\n"
1585 (printbuf_reset(&buf),
1586 bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
1587 ret = bch2_lru_set(trans,
1589 bucket_to_u64(alloc_k.k->p),
1596 bch2_trans_iter_exit(trans, &lru_iter);
1597 printbuf_exit(&buf);
1601 int bch2_check_alloc_to_lru_refs(struct bch_fs *c)
1603 int ret = bch2_trans_run(c,
1604 for_each_btree_key_commit(trans, iter, BTREE_ID_alloc,
1605 POS_MIN, BTREE_ITER_PREFETCH, k,
1606 NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
1607 bch2_check_alloc_to_lru_ref(trans, &iter)));
1612 struct discard_buckets_state {
1615 u64 need_journal_commit;
1618 u64 need_journal_commit_this_dev;
1621 static void discard_buckets_next_dev(struct bch_fs *c, struct discard_buckets_state *s, struct bch_dev *ca)
1626 if (s->ca && s->need_journal_commit_this_dev >
1627 bch2_dev_usage_read(s->ca).d[BCH_DATA_free].buckets)
1628 bch2_journal_flush_async(&c->journal, NULL);
1631 percpu_ref_put(&s->ca->ref);
1633 percpu_ref_get(&ca->ref);
1635 s->need_journal_commit_this_dev = 0;
1638 static int bch2_discard_one_bucket(struct btree_trans *trans,
1639 struct btree_iter *need_discard_iter,
1640 struct bpos *discard_pos_done,
1641 struct discard_buckets_state *s)
1643 struct bch_fs *c = trans->c;
1644 struct bpos pos = need_discard_iter->pos;
1645 struct btree_iter iter = { NULL };
1648 struct bkey_i_alloc_v4 *a;
1649 struct printbuf buf = PRINTBUF;
1652 ca = bch_dev_bkey_exists(c, pos.inode);
1654 if (!percpu_ref_tryget(&ca->io_ref)) {
1655 bch2_btree_iter_set_pos(need_discard_iter, POS(pos.inode + 1, 0));
1659 discard_buckets_next_dev(c, s, ca);
1661 if (bch2_bucket_is_open_safe(c, pos.inode, pos.offset)) {
1666 if (bch2_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
1667 c->journal.flushed_seq_ondisk,
1668 pos.inode, pos.offset)) {
1669 s->need_journal_commit++;
1670 s->need_journal_commit_this_dev++;
1674 k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_alloc,
1675 need_discard_iter->pos,
1681 a = bch2_alloc_to_v4_mut(trans, k);
1682 ret = PTR_ERR_OR_ZERO(a);
1686 if (BCH_ALLOC_V4_NEED_INC_GEN(&a->v)) {
1688 SET_BCH_ALLOC_V4_NEED_INC_GEN(&a->v, false);
1692 if (a->v.journal_seq > c->journal.flushed_seq_ondisk) {
1693 if (c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info) {
1694 bch2_trans_inconsistent(trans,
1695 "clearing need_discard but journal_seq %llu > flushed_seq %llu\n"
1698 c->journal.flushed_seq_ondisk,
1699 (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
1705 if (a->v.data_type != BCH_DATA_need_discard) {
1706 if (c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info) {
1707 bch2_trans_inconsistent(trans,
1708 "bucket incorrectly set in need_discard btree\n"
1710 (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
1717 if (!bkey_eq(*discard_pos_done, iter.pos) &&
1718 ca->mi.discard && !c->opts.nochanges) {
1720 * This works without any other locks because this is the only
1721 * thread that removes items from the need_discard tree
1723 bch2_trans_unlock_long(trans);
1724 blkdev_issue_discard(ca->disk_sb.bdev,
1725 k.k->p.offset * ca->mi.bucket_size,
1728 *discard_pos_done = iter.pos;
1730 ret = bch2_trans_relock_notrace(trans);
1735 SET_BCH_ALLOC_V4_NEED_DISCARD(&a->v, false);
1736 a->v.data_type = alloc_data_type(a->v, a->v.data_type);
1738 ret = bch2_trans_update(trans, &iter, &a->k_i, 0) ?:
1739 bch2_trans_commit(trans, NULL, NULL,
1740 BCH_WATERMARK_btree|
1741 BCH_TRANS_COMMIT_no_enospc);
1745 count_event(c, bucket_discard);
1749 bch2_trans_iter_exit(trans, &iter);
1750 percpu_ref_put(&ca->io_ref);
1751 printbuf_exit(&buf);
1755 static void bch2_do_discards_work(struct work_struct *work)
1757 struct bch_fs *c = container_of(work, struct bch_fs, discard_work);
1758 struct discard_buckets_state s = {};
1759 struct bpos discard_pos_done = POS_MAX;
1763 * We're doing the commit in bch2_discard_one_bucket instead of using
1764 * for_each_btree_key_commit() so that we can increment counters after
1765 * successful commit:
1767 ret = bch2_trans_run(c,
1768 for_each_btree_key(trans, iter,
1769 BTREE_ID_need_discard, POS_MIN, 0, k,
1770 bch2_discard_one_bucket(trans, &iter, &discard_pos_done, &s)));
1772 discard_buckets_next_dev(c, &s, NULL);
1774 trace_discard_buckets(c, s.seen, s.open, s.need_journal_commit, s.discarded,
1777 bch2_write_ref_put(c, BCH_WRITE_REF_discard);
1780 void bch2_do_discards(struct bch_fs *c)
1782 if (bch2_write_ref_tryget(c, BCH_WRITE_REF_discard) &&
1783 !queue_work(c->write_ref_wq, &c->discard_work))
1784 bch2_write_ref_put(c, BCH_WRITE_REF_discard);
1787 static int invalidate_one_bucket(struct btree_trans *trans,
1788 struct btree_iter *lru_iter,
1789 struct bkey_s_c lru_k,
1790 s64 *nr_to_invalidate)
1792 struct bch_fs *c = trans->c;
1793 struct btree_iter alloc_iter = { NULL };
1794 struct bkey_i_alloc_v4 *a = NULL;
1795 struct printbuf buf = PRINTBUF;
1796 struct bpos bucket = u64_to_bucket(lru_k.k->p.offset);
1797 unsigned cached_sectors;
1800 if (*nr_to_invalidate <= 0)
1803 if (!bch2_dev_bucket_exists(c, bucket)) {
1804 prt_str(&buf, "lru entry points to invalid bucket");
1808 if (bch2_bucket_is_open_safe(c, bucket.inode, bucket.offset))
1811 a = bch2_trans_start_alloc_update(trans, &alloc_iter, bucket);
1812 ret = PTR_ERR_OR_ZERO(a);
1816 /* We expect harmless races here due to the btree write buffer: */
1817 if (lru_pos_time(lru_iter->pos) != alloc_lru_idx_read(a->v))
1820 BUG_ON(a->v.data_type != BCH_DATA_cached);
1822 if (!a->v.cached_sectors)
1823 bch_err(c, "invalidating empty bucket, confused");
1825 cached_sectors = a->v.cached_sectors;
1827 SET_BCH_ALLOC_V4_NEED_INC_GEN(&a->v, false);
1830 a->v.dirty_sectors = 0;
1831 a->v.cached_sectors = 0;
1832 a->v.io_time[READ] = atomic64_read(&c->io_clock[READ].now);
1833 a->v.io_time[WRITE] = atomic64_read(&c->io_clock[WRITE].now);
1835 ret = bch2_trans_update(trans, &alloc_iter, &a->k_i,
1836 BTREE_TRIGGER_BUCKET_INVALIDATE) ?:
1837 bch2_trans_commit(trans, NULL, NULL,
1838 BCH_WATERMARK_btree|
1839 BCH_TRANS_COMMIT_no_enospc);
1843 trace_and_count(c, bucket_invalidate, c, bucket.inode, bucket.offset, cached_sectors);
1844 --*nr_to_invalidate;
1846 bch2_trans_iter_exit(trans, &alloc_iter);
1847 printbuf_exit(&buf);
1850 prt_str(&buf, "\n lru key: ");
1851 bch2_bkey_val_to_text(&buf, c, lru_k);
1853 prt_str(&buf, "\n lru entry: ");
1854 bch2_lru_pos_to_text(&buf, lru_iter->pos);
1856 prt_str(&buf, "\n alloc key: ");
1858 bch2_bpos_to_text(&buf, bucket);
1860 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&a->k_i));
1862 bch_err(c, "%s", buf.buf);
1863 if (c->curr_recovery_pass > BCH_RECOVERY_PASS_check_lrus) {
1864 bch2_inconsistent_error(c);
1871 static void bch2_do_invalidates_work(struct work_struct *work)
1873 struct bch_fs *c = container_of(work, struct bch_fs, invalidate_work);
1874 struct btree_trans *trans = bch2_trans_get(c);
1877 ret = bch2_btree_write_buffer_tryflush(trans);
1881 for_each_member_device(c, ca) {
1882 s64 nr_to_invalidate =
1883 should_invalidate_buckets(ca, bch2_dev_usage_read(ca));
1885 ret = for_each_btree_key_upto(trans, iter, BTREE_ID_lru,
1886 lru_pos(ca->dev_idx, 0, 0),
1887 lru_pos(ca->dev_idx, U64_MAX, LRU_TIME_MAX),
1888 BTREE_ITER_INTENT, k,
1889 invalidate_one_bucket(trans, &iter, k, &nr_to_invalidate));
1892 percpu_ref_put(&ca->ref);
1897 bch2_trans_put(trans);
1898 bch2_write_ref_put(c, BCH_WRITE_REF_invalidate);
1901 void bch2_do_invalidates(struct bch_fs *c)
1903 if (bch2_write_ref_tryget(c, BCH_WRITE_REF_invalidate) &&
1904 !queue_work(c->write_ref_wq, &c->invalidate_work))
1905 bch2_write_ref_put(c, BCH_WRITE_REF_invalidate);
1908 int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca,
1909 u64 bucket_start, u64 bucket_end)
1911 struct btree_trans *trans = bch2_trans_get(c);
1912 struct btree_iter iter;
1915 struct bpos end = POS(ca->dev_idx, bucket_end);
1916 struct bch_member *m;
1917 unsigned long last_updated = jiffies;
1920 BUG_ON(bucket_start > bucket_end);
1921 BUG_ON(bucket_end > ca->mi.nbuckets);
1923 bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
1924 POS(ca->dev_idx, max_t(u64, ca->mi.first_bucket, bucket_start)),
1925 BTREE_ITER_PREFETCH);
1927 * Scan the alloc btree for every bucket on @ca, and add buckets to the
1928 * freespace/need_discard/need_gc_gens btrees as needed:
1931 if (last_updated + HZ * 10 < jiffies) {
1932 bch_info(ca, "%s: currently at %llu/%llu",
1933 __func__, iter.pos.offset, ca->mi.nbuckets);
1934 last_updated = jiffies;
1937 bch2_trans_begin(trans);
1939 if (bkey_ge(iter.pos, end)) {
1944 k = bch2_get_key_or_hole(&iter, end, &hole);
1951 * We process live keys in the alloc btree one at a
1954 struct bch_alloc_v4 a_convert;
1955 const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &a_convert);
1957 ret = bch2_bucket_do_index(trans, k, a, true) ?:
1958 bch2_trans_commit(trans, NULL, NULL,
1959 BCH_TRANS_COMMIT_no_enospc);
1963 bch2_btree_iter_advance(&iter);
1965 struct bkey_i *freespace;
1967 freespace = bch2_trans_kmalloc(trans, sizeof(*freespace));
1968 ret = PTR_ERR_OR_ZERO(freespace);
1972 bkey_init(&freespace->k);
1973 freespace->k.type = KEY_TYPE_set;
1974 freespace->k.p = k.k->p;
1975 freespace->k.size = k.k->size;
1977 ret = bch2_btree_insert_trans(trans, BTREE_ID_freespace, freespace, 0) ?:
1978 bch2_trans_commit(trans, NULL, NULL,
1979 BCH_TRANS_COMMIT_no_enospc);
1983 bch2_btree_iter_set_pos(&iter, k.k->p);
1986 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
1992 bch2_trans_iter_exit(trans, &iter);
1993 bch2_trans_put(trans);
1996 bch_err_msg(ca, ret, "initializing free space");
2000 mutex_lock(&c->sb_lock);
2001 m = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx);
2002 SET_BCH_MEMBER_FREESPACE_INITIALIZED(m, true);
2003 mutex_unlock(&c->sb_lock);
2008 int bch2_fs_freespace_init(struct bch_fs *c)
2011 bool doing_init = false;
2014 * We can crash during the device add path, so we need to check this on
2018 for_each_member_device(c, ca) {
2019 if (ca->mi.freespace_initialized)
2023 bch_info(c, "initializing freespace");
2027 ret = bch2_dev_freespace_init(c, ca, 0, ca->mi.nbuckets);
2029 percpu_ref_put(&ca->ref);
2036 mutex_lock(&c->sb_lock);
2037 bch2_write_super(c);
2038 mutex_unlock(&c->sb_lock);
2039 bch_verbose(c, "done initializing freespace");
2045 /* Bucket IO clocks: */
2047 int bch2_bucket_io_time_reset(struct btree_trans *trans, unsigned dev,
2048 size_t bucket_nr, int rw)
2050 struct bch_fs *c = trans->c;
2051 struct btree_iter iter;
2052 struct bkey_i_alloc_v4 *a;
2056 a = bch2_trans_start_alloc_update(trans, &iter, POS(dev, bucket_nr));
2057 ret = PTR_ERR_OR_ZERO(a);
2061 now = atomic64_read(&c->io_clock[rw].now);
2062 if (a->v.io_time[rw] == now)
2065 a->v.io_time[rw] = now;
2067 ret = bch2_trans_update(trans, &iter, &a->k_i, 0) ?:
2068 bch2_trans_commit(trans, NULL, NULL, 0);
2070 bch2_trans_iter_exit(trans, &iter);
2074 /* Startup/shutdown (ro/rw): */
2076 void bch2_recalc_capacity(struct bch_fs *c)
2078 u64 capacity = 0, reserved_sectors = 0, gc_reserve;
2079 unsigned bucket_size_max = 0;
2080 unsigned long ra_pages = 0;
2082 lockdep_assert_held(&c->state_lock);
2084 for_each_online_member(c, ca) {
2085 struct backing_dev_info *bdi = ca->disk_sb.bdev->bd_disk->bdi;
2087 ra_pages += bdi->ra_pages;
2090 bch2_set_ra_pages(c, ra_pages);
2092 for_each_rw_member(c, ca) {
2093 u64 dev_reserve = 0;
2096 * We need to reserve buckets (from the number
2097 * of currently available buckets) against
2098 * foreground writes so that mainly copygc can
2099 * make forward progress.
2101 * We need enough to refill the various reserves
2102 * from scratch - copygc will use its entire
2103 * reserve all at once, then run against when
2104 * its reserve is refilled (from the formerly
2105 * available buckets).
2107 * This reserve is just used when considering if
2108 * allocations for foreground writes must wait -
2109 * not -ENOSPC calculations.
2112 dev_reserve += ca->nr_btree_reserve * 2;
2113 dev_reserve += ca->mi.nbuckets >> 6; /* copygc reserve */
2115 dev_reserve += 1; /* btree write point */
2116 dev_reserve += 1; /* copygc write point */
2117 dev_reserve += 1; /* rebalance write point */
2119 dev_reserve *= ca->mi.bucket_size;
2121 capacity += bucket_to_sector(ca, ca->mi.nbuckets -
2122 ca->mi.first_bucket);
2124 reserved_sectors += dev_reserve * 2;
2126 bucket_size_max = max_t(unsigned, bucket_size_max,
2127 ca->mi.bucket_size);
2130 gc_reserve = c->opts.gc_reserve_bytes
2131 ? c->opts.gc_reserve_bytes >> 9
2132 : div64_u64(capacity * c->opts.gc_reserve_percent, 100);
2134 reserved_sectors = max(gc_reserve, reserved_sectors);
2136 reserved_sectors = min(reserved_sectors, capacity);
2138 c->capacity = capacity - reserved_sectors;
2140 c->bucket_size_max = bucket_size_max;
2142 /* Wake up case someone was waiting for buckets */
2143 closure_wake_up(&c->freelist_wait);
2146 u64 bch2_min_rw_member_capacity(struct bch_fs *c)
2150 for_each_rw_member(c, ca)
2151 ret = min(ret, ca->mi.nbuckets * ca->mi.bucket_size);
2155 static bool bch2_dev_has_open_write_point(struct bch_fs *c, struct bch_dev *ca)
2157 struct open_bucket *ob;
2160 for (ob = c->open_buckets;
2161 ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
2163 spin_lock(&ob->lock);
2164 if (ob->valid && !ob->on_partial_list &&
2165 ob->dev == ca->dev_idx)
2167 spin_unlock(&ob->lock);
2173 /* device goes ro: */
2174 void bch2_dev_allocator_remove(struct bch_fs *c, struct bch_dev *ca)
2178 /* First, remove device from allocation groups: */
2180 for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
2181 clear_bit(ca->dev_idx, c->rw_devs[i].d);
2184 * Capacity is calculated based off of devices in allocation groups:
2186 bch2_recalc_capacity(c);
2188 bch2_open_buckets_stop(c, ca, false);
2191 * Wake up threads that were blocked on allocation, so they can notice
2192 * the device can no longer be removed and the capacity has changed:
2194 closure_wake_up(&c->freelist_wait);
2197 * journal_res_get() can block waiting for free space in the journal -
2198 * it needs to notice there may not be devices to allocate from anymore:
2200 wake_up(&c->journal.wait);
2202 /* Now wait for any in flight writes: */
2204 closure_wait_event(&c->open_buckets_wait,
2205 !bch2_dev_has_open_write_point(c, ca));
2208 /* device goes rw: */
2209 void bch2_dev_allocator_add(struct bch_fs *c, struct bch_dev *ca)
2213 for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
2214 if (ca->mi.data_allowed & (1 << i))
2215 set_bit(ca->dev_idx, c->rw_devs[i].d);
2218 void bch2_fs_allocator_background_init(struct bch_fs *c)
2220 spin_lock_init(&c->freelist_lock);
2221 INIT_WORK(&c->discard_work, bch2_do_discards_work);
2222 INIT_WORK(&c->invalidate_work, bch2_do_invalidates_work);