1 // SPDX-License-Identifier: GPL-2.0
3 #include "alloc_background.h"
4 #include "alloc_foreground.h"
5 #include "backpointers.h"
6 #include "btree_cache.h"
8 #include "btree_key_cache.h"
9 #include "btree_update.h"
10 #include "btree_update_interior.h"
12 #include "btree_write_buffer.h"
14 #include "buckets_waiting_for_journal.h"
24 #include <linux/kthread.h>
25 #include <linux/math64.h>
26 #include <linux/random.h>
27 #include <linux/rculist.h>
28 #include <linux/rcupdate.h>
29 #include <linux/sched/task.h>
30 #include <linux/sort.h>
32 /* Persistent alloc info: */
34 static const unsigned BCH_ALLOC_V1_FIELD_BYTES[] = {
35 #define x(name, bits) [BCH_ALLOC_FIELD_V1_##name] = bits / 8,
40 struct bkey_alloc_unpacked {
47 #define x(_name, _bits) u##_bits _name;
52 static inline u64 alloc_field_v1_get(const struct bch_alloc *a,
53 const void **p, unsigned field)
55 unsigned bytes = BCH_ALLOC_V1_FIELD_BYTES[field];
58 if (!(a->fields & (1 << field)))
63 v = *((const u8 *) *p);
82 static void bch2_alloc_unpack_v1(struct bkey_alloc_unpacked *out,
85 const struct bch_alloc *in = bkey_s_c_to_alloc(k).v;
86 const void *d = in->data;
91 #define x(_name, _bits) out->_name = alloc_field_v1_get(in, &d, idx++);
96 static int bch2_alloc_unpack_v2(struct bkey_alloc_unpacked *out,
99 struct bkey_s_c_alloc_v2 a = bkey_s_c_to_alloc_v2(k);
100 const u8 *in = a.v->data;
101 const u8 *end = bkey_val_end(a);
102 unsigned fieldnr = 0;
107 out->oldest_gen = a.v->oldest_gen;
108 out->data_type = a.v->data_type;
110 #define x(_name, _bits) \
111 if (fieldnr < a.v->nr_fields) { \
112 ret = bch2_varint_decode_fast(in, end, &v); \
120 if (v != out->_name) \
124 BCH_ALLOC_FIELDS_V2()
129 static int bch2_alloc_unpack_v3(struct bkey_alloc_unpacked *out,
132 struct bkey_s_c_alloc_v3 a = bkey_s_c_to_alloc_v3(k);
133 const u8 *in = a.v->data;
134 const u8 *end = bkey_val_end(a);
135 unsigned fieldnr = 0;
140 out->oldest_gen = a.v->oldest_gen;
141 out->data_type = a.v->data_type;
142 out->need_discard = BCH_ALLOC_V3_NEED_DISCARD(a.v);
143 out->need_inc_gen = BCH_ALLOC_V3_NEED_INC_GEN(a.v);
144 out->journal_seq = le64_to_cpu(a.v->journal_seq);
146 #define x(_name, _bits) \
147 if (fieldnr < a.v->nr_fields) { \
148 ret = bch2_varint_decode_fast(in, end, &v); \
156 if (v != out->_name) \
160 BCH_ALLOC_FIELDS_V2()
165 static struct bkey_alloc_unpacked bch2_alloc_unpack(struct bkey_s_c k)
167 struct bkey_alloc_unpacked ret = { .gen = 0 };
171 bch2_alloc_unpack_v1(&ret, k);
173 case KEY_TYPE_alloc_v2:
174 bch2_alloc_unpack_v2(&ret, k);
176 case KEY_TYPE_alloc_v3:
177 bch2_alloc_unpack_v3(&ret, k);
184 static unsigned bch_alloc_v1_val_u64s(const struct bch_alloc *a)
186 unsigned i, bytes = offsetof(struct bch_alloc, data);
188 for (i = 0; i < ARRAY_SIZE(BCH_ALLOC_V1_FIELD_BYTES); i++)
189 if (a->fields & (1 << i))
190 bytes += BCH_ALLOC_V1_FIELD_BYTES[i];
192 return DIV_ROUND_UP(bytes, sizeof(u64));
195 int bch2_alloc_v1_invalid(const struct bch_fs *c, struct bkey_s_c k,
196 enum bkey_invalid_flags flags,
197 struct printbuf *err)
199 struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k);
201 /* allow for unknown fields */
202 if (bkey_val_u64s(a.k) < bch_alloc_v1_val_u64s(a.v)) {
203 prt_printf(err, "incorrect value size (%zu < %u)",
204 bkey_val_u64s(a.k), bch_alloc_v1_val_u64s(a.v));
205 return -BCH_ERR_invalid_bkey;
211 int bch2_alloc_v2_invalid(const struct bch_fs *c, struct bkey_s_c k,
212 enum bkey_invalid_flags flags,
213 struct printbuf *err)
215 struct bkey_alloc_unpacked u;
217 if (bch2_alloc_unpack_v2(&u, k)) {
218 prt_printf(err, "unpack error");
219 return -BCH_ERR_invalid_bkey;
225 int bch2_alloc_v3_invalid(const struct bch_fs *c, struct bkey_s_c k,
226 enum bkey_invalid_flags flags,
227 struct printbuf *err)
229 struct bkey_alloc_unpacked u;
231 if (bch2_alloc_unpack_v3(&u, k)) {
232 prt_printf(err, "unpack error");
233 return -BCH_ERR_invalid_bkey;
239 int bch2_alloc_v4_invalid(const struct bch_fs *c, struct bkey_s_c k,
240 enum bkey_invalid_flags flags,
241 struct printbuf *err)
243 struct bkey_s_c_alloc_v4 a = bkey_s_c_to_alloc_v4(k);
244 int rw = flags & WRITE;
246 if (alloc_v4_u64s(a.v) > bkey_val_u64s(k.k)) {
247 prt_printf(err, "bad val size (%u > %lu)",
248 alloc_v4_u64s(a.v), bkey_val_u64s(k.k));
249 return -BCH_ERR_invalid_bkey;
252 if (!BCH_ALLOC_V4_BACKPOINTERS_START(a.v) &&
253 BCH_ALLOC_V4_NR_BACKPOINTERS(a.v)) {
254 prt_printf(err, "invalid backpointers_start");
255 return -BCH_ERR_invalid_bkey;
259 !(flags & BKEY_INVALID_JOURNAL) &&
260 c->curr_recovery_pass > BCH_RECOVERY_PASS_check_btree_backpointers) {
261 unsigned i, bp_len = 0;
263 for (i = 0; i < BCH_ALLOC_V4_NR_BACKPOINTERS(a.v); i++)
264 bp_len += alloc_v4_backpointers_c(a.v)[i].bucket_len;
266 if (bp_len > a.v->dirty_sectors) {
267 prt_printf(err, "too many backpointers");
268 return -BCH_ERR_invalid_bkey;
273 if (alloc_data_type(*a.v, a.v->data_type) != a.v->data_type) {
274 prt_printf(err, "invalid data type (got %u should be %u)",
275 a.v->data_type, alloc_data_type(*a.v, a.v->data_type));
276 return -BCH_ERR_invalid_bkey;
279 switch (a.v->data_type) {
281 case BCH_DATA_need_gc_gens:
282 case BCH_DATA_need_discard:
283 if (a.v->dirty_sectors ||
284 a.v->cached_sectors ||
286 prt_printf(err, "empty data type free but have data");
287 return -BCH_ERR_invalid_bkey;
291 case BCH_DATA_journal:
294 case BCH_DATA_parity:
295 if (!a.v->dirty_sectors) {
296 prt_printf(err, "data_type %s but dirty_sectors==0",
297 bch2_data_types[a.v->data_type]);
298 return -BCH_ERR_invalid_bkey;
301 case BCH_DATA_cached:
302 if (!a.v->cached_sectors ||
303 a.v->dirty_sectors ||
305 prt_printf(err, "data type inconsistency");
306 return -BCH_ERR_invalid_bkey;
309 if (!a.v->io_time[READ] &&
310 c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_to_lru_refs) {
311 prt_printf(err, "cached bucket with read_time == 0");
312 return -BCH_ERR_invalid_bkey;
315 case BCH_DATA_stripe:
317 prt_printf(err, "data_type %s but stripe==0",
318 bch2_data_types[a.v->data_type]);
319 return -BCH_ERR_invalid_bkey;
328 static inline u64 swab40(u64 x)
330 return (((x & 0x00000000ffULL) << 32)|
331 ((x & 0x000000ff00ULL) << 16)|
332 ((x & 0x0000ff0000ULL) >> 0)|
333 ((x & 0x00ff000000ULL) >> 16)|
334 ((x & 0xff00000000ULL) >> 32));
337 void bch2_alloc_v4_swab(struct bkey_s k)
339 struct bch_alloc_v4 *a = bkey_s_to_alloc_v4(k).v;
340 struct bch_backpointer *bp, *bps;
342 a->journal_seq = swab64(a->journal_seq);
343 a->flags = swab32(a->flags);
344 a->dirty_sectors = swab32(a->dirty_sectors);
345 a->cached_sectors = swab32(a->cached_sectors);
346 a->io_time[0] = swab64(a->io_time[0]);
347 a->io_time[1] = swab64(a->io_time[1]);
348 a->stripe = swab32(a->stripe);
349 a->nr_external_backpointers = swab32(a->nr_external_backpointers);
351 bps = alloc_v4_backpointers(a);
352 for (bp = bps; bp < bps + BCH_ALLOC_V4_NR_BACKPOINTERS(a); bp++) {
353 bp->bucket_offset = swab40(bp->bucket_offset);
354 bp->bucket_len = swab32(bp->bucket_len);
355 bch2_bpos_swab(&bp->pos);
359 void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
361 struct bch_alloc_v4 _a;
362 const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &_a);
366 printbuf_indent_add(out, 2);
368 prt_printf(out, "gen %u oldest_gen %u data_type %s",
369 a->gen, a->oldest_gen,
370 a->data_type < BCH_DATA_NR
371 ? bch2_data_types[a->data_type]
372 : "(invalid data type)");
374 prt_printf(out, "journal_seq %llu", a->journal_seq);
376 prt_printf(out, "need_discard %llu", BCH_ALLOC_V4_NEED_DISCARD(a));
378 prt_printf(out, "need_inc_gen %llu", BCH_ALLOC_V4_NEED_INC_GEN(a));
380 prt_printf(out, "dirty_sectors %u", a->dirty_sectors);
382 prt_printf(out, "cached_sectors %u", a->cached_sectors);
384 prt_printf(out, "stripe %u", a->stripe);
386 prt_printf(out, "stripe_redundancy %u", a->stripe_redundancy);
388 prt_printf(out, "io_time[READ] %llu", a->io_time[READ]);
390 prt_printf(out, "io_time[WRITE] %llu", a->io_time[WRITE]);
392 prt_printf(out, "fragmentation %llu", a->fragmentation_lru);
394 prt_printf(out, "bp_start %llu", BCH_ALLOC_V4_BACKPOINTERS_START(a));
397 if (BCH_ALLOC_V4_NR_BACKPOINTERS(a)) {
398 struct bkey_s_c_alloc_v4 a_raw = bkey_s_c_to_alloc_v4(k);
399 const struct bch_backpointer *bps = alloc_v4_backpointers_c(a_raw.v);
401 prt_printf(out, "backpointers: %llu", BCH_ALLOC_V4_NR_BACKPOINTERS(a_raw.v));
402 printbuf_indent_add(out, 2);
404 for (i = 0; i < BCH_ALLOC_V4_NR_BACKPOINTERS(a_raw.v); i++) {
406 bch2_backpointer_to_text(out, &bps[i]);
409 printbuf_indent_sub(out, 2);
412 printbuf_indent_sub(out, 2);
415 void __bch2_alloc_to_v4(struct bkey_s_c k, struct bch_alloc_v4 *out)
417 if (k.k->type == KEY_TYPE_alloc_v4) {
420 *out = *bkey_s_c_to_alloc_v4(k).v;
422 src = alloc_v4_backpointers(out);
423 SET_BCH_ALLOC_V4_BACKPOINTERS_START(out, BCH_ALLOC_V4_U64s);
424 dst = alloc_v4_backpointers(out);
427 memset(src, 0, dst - src);
429 SET_BCH_ALLOC_V4_NR_BACKPOINTERS(out, 0);
431 struct bkey_alloc_unpacked u = bch2_alloc_unpack(k);
433 *out = (struct bch_alloc_v4) {
434 .journal_seq = u.journal_seq,
435 .flags = u.need_discard,
437 .oldest_gen = u.oldest_gen,
438 .data_type = u.data_type,
439 .stripe_redundancy = u.stripe_redundancy,
440 .dirty_sectors = u.dirty_sectors,
441 .cached_sectors = u.cached_sectors,
442 .io_time[READ] = u.read_time,
443 .io_time[WRITE] = u.write_time,
447 SET_BCH_ALLOC_V4_BACKPOINTERS_START(out, BCH_ALLOC_V4_U64s);
451 static noinline struct bkey_i_alloc_v4 *
452 __bch2_alloc_to_v4_mut(struct btree_trans *trans, struct bkey_s_c k)
454 struct bkey_i_alloc_v4 *ret;
456 ret = bch2_trans_kmalloc(trans, max(bkey_bytes(k.k), sizeof(struct bkey_i_alloc_v4)));
460 if (k.k->type == KEY_TYPE_alloc_v4) {
463 bkey_reassemble(&ret->k_i, k);
465 src = alloc_v4_backpointers(&ret->v);
466 SET_BCH_ALLOC_V4_BACKPOINTERS_START(&ret->v, BCH_ALLOC_V4_U64s);
467 dst = alloc_v4_backpointers(&ret->v);
470 memset(src, 0, dst - src);
472 SET_BCH_ALLOC_V4_NR_BACKPOINTERS(&ret->v, 0);
473 set_alloc_v4_u64s(ret);
475 bkey_alloc_v4_init(&ret->k_i);
477 bch2_alloc_to_v4(k, &ret->v);
482 static inline struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut_inlined(struct btree_trans *trans, struct bkey_s_c k)
484 struct bkey_s_c_alloc_v4 a;
486 if (likely(k.k->type == KEY_TYPE_alloc_v4) &&
487 ((a = bkey_s_c_to_alloc_v4(k), true) &&
488 BCH_ALLOC_V4_NR_BACKPOINTERS(a.v) == 0))
489 return bch2_bkey_make_mut_noupdate_typed(trans, k, alloc_v4);
491 return __bch2_alloc_to_v4_mut(trans, k);
494 struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut(struct btree_trans *trans, struct bkey_s_c k)
496 return bch2_alloc_to_v4_mut_inlined(trans, k);
499 struct bkey_i_alloc_v4 *
500 bch2_trans_start_alloc_update(struct btree_trans *trans, struct btree_iter *iter,
504 struct bkey_i_alloc_v4 *a;
507 k = bch2_bkey_get_iter(trans, iter, BTREE_ID_alloc, pos,
508 BTREE_ITER_WITH_UPDATES|
515 a = bch2_alloc_to_v4_mut_inlined(trans, k);
516 ret = PTR_ERR_OR_ZERO(a);
521 bch2_trans_iter_exit(trans, iter);
525 static struct bpos alloc_gens_pos(struct bpos pos, unsigned *offset)
527 *offset = pos.offset & KEY_TYPE_BUCKET_GENS_MASK;
529 pos.offset >>= KEY_TYPE_BUCKET_GENS_BITS;
533 static struct bpos bucket_gens_pos_to_alloc(struct bpos pos, unsigned offset)
535 pos.offset <<= KEY_TYPE_BUCKET_GENS_BITS;
536 pos.offset += offset;
540 static unsigned alloc_gen(struct bkey_s_c k, unsigned offset)
542 return k.k->type == KEY_TYPE_bucket_gens
543 ? bkey_s_c_to_bucket_gens(k).v->gens[offset]
547 int bch2_bucket_gens_invalid(const struct bch_fs *c, struct bkey_s_c k,
548 enum bkey_invalid_flags flags,
549 struct printbuf *err)
551 if (bkey_val_bytes(k.k) != sizeof(struct bch_bucket_gens)) {
552 prt_printf(err, "bad val size (%lu != %zu)",
553 bkey_val_bytes(k.k), sizeof(struct bch_bucket_gens));
554 return -BCH_ERR_invalid_bkey;
560 void bch2_bucket_gens_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
562 struct bkey_s_c_bucket_gens g = bkey_s_c_to_bucket_gens(k);
565 for (i = 0; i < ARRAY_SIZE(g.v->gens); i++) {
568 prt_printf(out, "%u", g.v->gens[i]);
572 int bch2_bucket_gens_init(struct bch_fs *c)
574 struct btree_trans trans;
575 struct btree_iter iter;
577 struct bch_alloc_v4 a;
578 struct bkey_i_bucket_gens g;
579 bool have_bucket_gens_key = false;
585 bch2_trans_init(&trans, c, 0, 0);
587 for_each_btree_key(&trans, iter, BTREE_ID_alloc, POS_MIN,
588 BTREE_ITER_PREFETCH, k, ret) {
590 * Not a fsck error because this is checked/repaired by
591 * bch2_check_alloc_key() which runs later:
593 if (!bch2_dev_bucket_exists(c, k.k->p))
596 gen = bch2_alloc_to_v4(k, &a)->gen;
597 pos = alloc_gens_pos(iter.pos, &offset);
599 if (have_bucket_gens_key && bkey_cmp(iter.pos, pos)) {
600 ret = commit_do(&trans, NULL, NULL,
602 BTREE_INSERT_LAZY_RW,
603 __bch2_btree_insert(&trans, BTREE_ID_bucket_gens, &g.k_i, 0));
606 have_bucket_gens_key = false;
609 if (!have_bucket_gens_key) {
610 bkey_bucket_gens_init(&g.k_i);
612 have_bucket_gens_key = true;
615 g.v.gens[offset] = gen;
617 bch2_trans_iter_exit(&trans, &iter);
619 if (have_bucket_gens_key && !ret)
620 ret = commit_do(&trans, NULL, NULL,
622 BTREE_INSERT_LAZY_RW,
623 __bch2_btree_insert(&trans, BTREE_ID_bucket_gens, &g.k_i, 0));
625 bch2_trans_exit(&trans);
632 int bch2_alloc_read(struct bch_fs *c)
634 struct btree_trans trans;
635 struct btree_iter iter;
640 down_read(&c->gc_lock);
641 bch2_trans_init(&trans, c, 0, 0);
643 if (c->sb.version_upgrade_complete >= bcachefs_metadata_version_bucket_gens) {
644 const struct bch_bucket_gens *g;
647 for_each_btree_key(&trans, iter, BTREE_ID_bucket_gens, POS_MIN,
648 BTREE_ITER_PREFETCH, k, ret) {
649 u64 start = bucket_gens_pos_to_alloc(k.k->p, 0).offset;
650 u64 end = bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0).offset;
652 if (k.k->type != KEY_TYPE_bucket_gens)
655 g = bkey_s_c_to_bucket_gens(k).v;
658 * Not a fsck error because this is checked/repaired by
659 * bch2_check_alloc_key() which runs later:
661 if (!bch2_dev_exists2(c, k.k->p.inode))
664 ca = bch_dev_bkey_exists(c, k.k->p.inode);
666 for (b = max_t(u64, ca->mi.first_bucket, start);
667 b < min_t(u64, ca->mi.nbuckets, end);
669 *bucket_gen(ca, b) = g->gens[b & KEY_TYPE_BUCKET_GENS_MASK];
671 bch2_trans_iter_exit(&trans, &iter);
673 struct bch_alloc_v4 a;
675 for_each_btree_key(&trans, iter, BTREE_ID_alloc, POS_MIN,
676 BTREE_ITER_PREFETCH, k, ret) {
678 * Not a fsck error because this is checked/repaired by
679 * bch2_check_alloc_key() which runs later:
681 if (!bch2_dev_bucket_exists(c, k.k->p))
684 ca = bch_dev_bkey_exists(c, k.k->p.inode);
686 *bucket_gen(ca, k.k->p.offset) = bch2_alloc_to_v4(k, &a)->gen;
688 bch2_trans_iter_exit(&trans, &iter);
691 bch2_trans_exit(&trans);
692 up_read(&c->gc_lock);
700 /* Free space/discard btree: */
702 static int bch2_bucket_do_index(struct btree_trans *trans,
703 struct bkey_s_c alloc_k,
704 const struct bch_alloc_v4 *a,
707 struct bch_fs *c = trans->c;
708 struct bch_dev *ca = bch_dev_bkey_exists(c, alloc_k.k->p.inode);
709 struct btree_iter iter;
713 enum bch_bkey_type old_type = !set ? KEY_TYPE_set : KEY_TYPE_deleted;
714 enum bch_bkey_type new_type = set ? KEY_TYPE_set : KEY_TYPE_deleted;
715 struct printbuf buf = PRINTBUF;
718 if (a->data_type != BCH_DATA_free &&
719 a->data_type != BCH_DATA_need_discard)
722 k = bch2_trans_kmalloc_nomemzero(trans, sizeof(*k));
727 k->k.type = new_type;
729 switch (a->data_type) {
731 btree = BTREE_ID_freespace;
732 k->k.p = alloc_freespace_pos(alloc_k.k->p, *a);
733 bch2_key_resize(&k->k, 1);
735 case BCH_DATA_need_discard:
736 btree = BTREE_ID_need_discard;
737 k->k.p = alloc_k.k->p;
743 old = bch2_bkey_get_iter(trans, &iter, btree,
744 bkey_start_pos(&k->k),
750 if (ca->mi.freespace_initialized &&
751 c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info &&
752 bch2_trans_inconsistent_on(old.k->type != old_type, trans,
753 "incorrect key when %s %s:%llu:%llu:0 (got %s should be %s)\n"
755 set ? "setting" : "clearing",
756 bch2_btree_ids[btree],
759 bch2_bkey_types[old.k->type],
760 bch2_bkey_types[old_type],
761 (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
766 ret = bch2_trans_update(trans, &iter, k, 0);
768 bch2_trans_iter_exit(trans, &iter);
773 static noinline int bch2_bucket_gen_update(struct btree_trans *trans,
774 struct bpos bucket, u8 gen)
776 struct btree_iter iter;
778 struct bpos pos = alloc_gens_pos(bucket, &offset);
779 struct bkey_i_bucket_gens *g;
783 g = bch2_trans_kmalloc(trans, sizeof(*g));
784 ret = PTR_ERR_OR_ZERO(g);
788 k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_bucket_gens, pos,
790 BTREE_ITER_WITH_UPDATES);
795 if (k.k->type != KEY_TYPE_bucket_gens) {
796 bkey_bucket_gens_init(&g->k_i);
799 bkey_reassemble(&g->k_i, k);
802 g->v.gens[offset] = gen;
804 ret = bch2_trans_update(trans, &iter, &g->k_i, 0);
805 bch2_trans_iter_exit(trans, &iter);
809 int bch2_trans_mark_alloc(struct btree_trans *trans,
810 enum btree_id btree_id, unsigned level,
811 struct bkey_s_c old, struct bkey_i *new,
814 struct bch_fs *c = trans->c;
815 struct bch_alloc_v4 old_a_convert, *new_a;
816 const struct bch_alloc_v4 *old_a;
817 u64 old_lru, new_lru;
821 * Deletion only happens in the device removal path, with
822 * BTREE_TRIGGER_NORUN:
824 BUG_ON(new->k.type != KEY_TYPE_alloc_v4);
826 old_a = bch2_alloc_to_v4(old, &old_a_convert);
827 new_a = &bkey_i_to_alloc_v4(new)->v;
829 new_a->data_type = alloc_data_type(*new_a, new_a->data_type);
831 if (new_a->dirty_sectors > old_a->dirty_sectors ||
832 new_a->cached_sectors > old_a->cached_sectors) {
833 new_a->io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
834 new_a->io_time[WRITE]= max_t(u64, 1, atomic64_read(&c->io_clock[WRITE].now));
835 SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, true);
836 SET_BCH_ALLOC_V4_NEED_DISCARD(new_a, true);
839 if (data_type_is_empty(new_a->data_type) &&
840 BCH_ALLOC_V4_NEED_INC_GEN(new_a) &&
841 !bch2_bucket_is_open_safe(c, new->k.p.inode, new->k.p.offset)) {
843 SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, false);
846 if (old_a->data_type != new_a->data_type ||
847 (new_a->data_type == BCH_DATA_free &&
848 alloc_freespace_genbits(*old_a) != alloc_freespace_genbits(*new_a))) {
849 ret = bch2_bucket_do_index(trans, old, old_a, false) ?:
850 bch2_bucket_do_index(trans, bkey_i_to_s_c(new), new_a, true);
855 if (new_a->data_type == BCH_DATA_cached &&
856 !new_a->io_time[READ])
857 new_a->io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
859 old_lru = alloc_lru_idx_read(*old_a);
860 new_lru = alloc_lru_idx_read(*new_a);
862 if (old_lru != new_lru) {
863 ret = bch2_lru_change(trans, new->k.p.inode,
864 bucket_to_u64(new->k.p),
870 new_a->fragmentation_lru = alloc_lru_idx_fragmentation(*new_a,
871 bch_dev_bkey_exists(c, new->k.p.inode));
873 if (old_a->fragmentation_lru != new_a->fragmentation_lru) {
874 ret = bch2_lru_change(trans,
875 BCH_LRU_FRAGMENTATION_START,
876 bucket_to_u64(new->k.p),
877 old_a->fragmentation_lru, new_a->fragmentation_lru);
882 if (old_a->gen != new_a->gen) {
883 ret = bch2_bucket_gen_update(trans, new->k.p, new_a->gen);
892 * This synthesizes deleted extents for holes, similar to BTREE_ITER_SLOTS for
893 * extents style btrees, but works on non-extents btrees:
895 static struct bkey_s_c bch2_get_key_or_hole(struct btree_iter *iter, struct bpos end, struct bkey *hole)
897 struct bkey_s_c k = bch2_btree_iter_peek_slot(iter);
905 struct btree_iter iter2;
908 bch2_trans_copy_iter(&iter2, iter);
910 if (!bpos_eq(iter->path->l[0].b->key.k.p, SPOS_MAX))
911 end = bkey_min(end, bpos_nosnap_successor(iter->path->l[0].b->key.k.p));
913 end = bkey_min(end, POS(iter->pos.inode, iter->pos.offset + U32_MAX - 1));
916 * btree node min/max is a closed interval, upto takes a half
919 k = bch2_btree_iter_peek_upto(&iter2, end);
921 bch2_trans_iter_exit(iter->trans, &iter2);
923 BUG_ON(next.offset >= iter->pos.offset + U32_MAX);
931 bch2_key_resize(hole, next.offset - iter->pos.offset);
932 return (struct bkey_s_c) { hole, NULL };
936 static bool next_bucket(struct bch_fs *c, struct bpos *bucket)
941 if (bch2_dev_bucket_exists(c, *bucket))
944 if (bch2_dev_exists2(c, bucket->inode)) {
945 ca = bch_dev_bkey_exists(c, bucket->inode);
947 if (bucket->offset < ca->mi.first_bucket) {
948 bucket->offset = ca->mi.first_bucket;
957 iter = bucket->inode;
958 ca = __bch2_next_dev(c, &iter, NULL);
960 *bucket = POS(ca->dev_idx, ca->mi.first_bucket);
966 static struct bkey_s_c bch2_get_key_or_real_bucket_hole(struct btree_iter *iter, struct bkey *hole)
968 struct bch_fs *c = iter->trans->c;
971 k = bch2_get_key_or_hole(iter, POS_MAX, hole);
976 struct bpos bucket = bkey_start_pos(k.k);
978 if (!bch2_dev_bucket_exists(c, bucket)) {
979 if (!next_bucket(c, &bucket))
980 return bkey_s_c_null;
982 bch2_btree_iter_set_pos(iter, bucket);
986 if (!bch2_dev_bucket_exists(c, k.k->p)) {
987 struct bch_dev *ca = bch_dev_bkey_exists(c, bucket.inode);
989 bch2_key_resize(hole, ca->mi.nbuckets - bucket.offset);
996 static noinline_for_stack
997 int bch2_check_alloc_key(struct btree_trans *trans,
998 struct bkey_s_c alloc_k,
999 struct btree_iter *alloc_iter,
1000 struct btree_iter *discard_iter,
1001 struct btree_iter *freespace_iter,
1002 struct btree_iter *bucket_gens_iter)
1004 struct bch_fs *c = trans->c;
1006 struct bch_alloc_v4 a_convert;
1007 const struct bch_alloc_v4 *a;
1008 unsigned discard_key_type, freespace_key_type;
1009 unsigned gens_offset;
1011 struct printbuf buf = PRINTBUF;
1014 if (fsck_err_on(!bch2_dev_bucket_exists(c, alloc_k.k->p), c,
1015 "alloc key for invalid device:bucket %llu:%llu",
1016 alloc_k.k->p.inode, alloc_k.k->p.offset))
1017 return bch2_btree_delete_at(trans, alloc_iter, 0);
1019 ca = bch_dev_bkey_exists(c, alloc_k.k->p.inode);
1020 if (!ca->mi.freespace_initialized)
1023 a = bch2_alloc_to_v4(alloc_k, &a_convert);
1025 discard_key_type = a->data_type == BCH_DATA_need_discard ? KEY_TYPE_set : 0;
1026 bch2_btree_iter_set_pos(discard_iter, alloc_k.k->p);
1027 k = bch2_btree_iter_peek_slot(discard_iter);
1032 if (k.k->type != discard_key_type &&
1033 (c->opts.reconstruct_alloc ||
1034 fsck_err(c, "incorrect key in need_discard btree (got %s should be %s)\n"
1036 bch2_bkey_types[k.k->type],
1037 bch2_bkey_types[discard_key_type],
1038 (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf)))) {
1039 struct bkey_i *update =
1040 bch2_trans_kmalloc(trans, sizeof(*update));
1042 ret = PTR_ERR_OR_ZERO(update);
1046 bkey_init(&update->k);
1047 update->k.type = discard_key_type;
1048 update->k.p = discard_iter->pos;
1050 ret = bch2_trans_update(trans, discard_iter, update, 0);
1055 freespace_key_type = a->data_type == BCH_DATA_free ? KEY_TYPE_set : 0;
1056 bch2_btree_iter_set_pos(freespace_iter, alloc_freespace_pos(alloc_k.k->p, *a));
1057 k = bch2_btree_iter_peek_slot(freespace_iter);
1062 if (k.k->type != freespace_key_type &&
1063 (c->opts.reconstruct_alloc ||
1064 fsck_err(c, "incorrect key in freespace btree (got %s should be %s)\n"
1066 bch2_bkey_types[k.k->type],
1067 bch2_bkey_types[freespace_key_type],
1068 (printbuf_reset(&buf),
1069 bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf)))) {
1070 struct bkey_i *update =
1071 bch2_trans_kmalloc(trans, sizeof(*update));
1073 ret = PTR_ERR_OR_ZERO(update);
1077 bkey_init(&update->k);
1078 update->k.type = freespace_key_type;
1079 update->k.p = freespace_iter->pos;
1080 bch2_key_resize(&update->k, 1);
1082 ret = bch2_trans_update(trans, freespace_iter, update, 0);
1087 bch2_btree_iter_set_pos(bucket_gens_iter, alloc_gens_pos(alloc_k.k->p, &gens_offset));
1088 k = bch2_btree_iter_peek_slot(bucket_gens_iter);
1093 if (a->gen != alloc_gen(k, gens_offset) &&
1094 (c->opts.reconstruct_alloc ||
1095 fsck_err(c, "incorrect gen in bucket_gens btree (got %u should be %u)\n"
1097 alloc_gen(k, gens_offset), a->gen,
1098 (printbuf_reset(&buf),
1099 bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf)))) {
1100 struct bkey_i_bucket_gens *g =
1101 bch2_trans_kmalloc(trans, sizeof(*g));
1103 ret = PTR_ERR_OR_ZERO(g);
1107 if (k.k->type == KEY_TYPE_bucket_gens) {
1108 bkey_reassemble(&g->k_i, k);
1110 bkey_bucket_gens_init(&g->k_i);
1111 g->k.p = alloc_gens_pos(alloc_k.k->p, &gens_offset);
1114 g->v.gens[gens_offset] = a->gen;
1116 ret = bch2_trans_update(trans, bucket_gens_iter, &g->k_i, 0);
1122 printbuf_exit(&buf);
1126 static noinline_for_stack
1127 int bch2_check_alloc_hole_freespace(struct btree_trans *trans,
1130 struct btree_iter *freespace_iter)
1132 struct bch_fs *c = trans->c;
1135 struct printbuf buf = PRINTBUF;
1138 ca = bch_dev_bkey_exists(c, start.inode);
1139 if (!ca->mi.freespace_initialized)
1142 bch2_btree_iter_set_pos(freespace_iter, start);
1144 k = bch2_btree_iter_peek_slot(freespace_iter);
1149 *end = bkey_min(k.k->p, *end);
1151 if (k.k->type != KEY_TYPE_set &&
1152 (c->opts.reconstruct_alloc ||
1153 fsck_err(c, "hole in alloc btree missing in freespace btree\n"
1154 " device %llu buckets %llu-%llu",
1155 freespace_iter->pos.inode,
1156 freespace_iter->pos.offset,
1158 struct bkey_i *update =
1159 bch2_trans_kmalloc(trans, sizeof(*update));
1161 ret = PTR_ERR_OR_ZERO(update);
1165 bkey_init(&update->k);
1166 update->k.type = KEY_TYPE_set;
1167 update->k.p = freespace_iter->pos;
1168 bch2_key_resize(&update->k,
1169 min_t(u64, U32_MAX, end->offset -
1170 freespace_iter->pos.offset));
1172 ret = bch2_trans_update(trans, freespace_iter, update, 0);
1178 printbuf_exit(&buf);
1182 static noinline_for_stack
1183 int bch2_check_alloc_hole_bucket_gens(struct btree_trans *trans,
1186 struct btree_iter *bucket_gens_iter)
1188 struct bch_fs *c = trans->c;
1190 struct printbuf buf = PRINTBUF;
1191 unsigned i, gens_offset, gens_end_offset;
1194 if (c->sb.version < bcachefs_metadata_version_bucket_gens)
1197 bch2_btree_iter_set_pos(bucket_gens_iter, alloc_gens_pos(start, &gens_offset));
1199 k = bch2_btree_iter_peek_slot(bucket_gens_iter);
1204 if (bkey_cmp(alloc_gens_pos(start, &gens_offset),
1205 alloc_gens_pos(*end, &gens_end_offset)))
1206 gens_end_offset = KEY_TYPE_BUCKET_GENS_NR;
1208 if (k.k->type == KEY_TYPE_bucket_gens) {
1209 struct bkey_i_bucket_gens g;
1210 bool need_update = false;
1212 bkey_reassemble(&g.k_i, k);
1214 for (i = gens_offset; i < gens_end_offset; i++) {
1215 if (fsck_err_on(g.v.gens[i], c,
1216 "hole in alloc btree at %llu:%llu with nonzero gen in bucket_gens btree (%u)",
1217 bucket_gens_pos_to_alloc(k.k->p, i).inode,
1218 bucket_gens_pos_to_alloc(k.k->p, i).offset,
1226 struct bkey_i *k = bch2_trans_kmalloc(trans, sizeof(g));
1228 ret = PTR_ERR_OR_ZERO(k);
1232 memcpy(k, &g, sizeof(g));
1234 ret = bch2_trans_update(trans, bucket_gens_iter, k, 0);
1240 *end = bkey_min(*end, bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0));
1243 printbuf_exit(&buf);
1247 static noinline_for_stack int __bch2_check_discard_freespace_key(struct btree_trans *trans,
1248 struct btree_iter *iter)
1250 struct bch_fs *c = trans->c;
1251 struct btree_iter alloc_iter;
1252 struct bkey_s_c alloc_k;
1253 struct bch_alloc_v4 a_convert;
1254 const struct bch_alloc_v4 *a;
1257 enum bch_data_type state = iter->btree_id == BTREE_ID_need_discard
1258 ? BCH_DATA_need_discard
1260 struct printbuf buf = PRINTBUF;
1264 pos.offset &= ~(~0ULL << 56);
1265 genbits = iter->pos.offset & (~0ULL << 56);
1267 alloc_k = bch2_bkey_get_iter(trans, &alloc_iter, BTREE_ID_alloc, pos, 0);
1268 ret = bkey_err(alloc_k);
1272 if (fsck_err_on(!bch2_dev_bucket_exists(c, pos), c,
1273 "entry in %s btree for nonexistant dev:bucket %llu:%llu",
1274 bch2_btree_ids[iter->btree_id], pos.inode, pos.offset))
1277 a = bch2_alloc_to_v4(alloc_k, &a_convert);
1279 if (fsck_err_on(a->data_type != state ||
1280 (state == BCH_DATA_free &&
1281 genbits != alloc_freespace_genbits(*a)), c,
1282 "%s\n incorrectly set at %s:%llu:%llu:0 (free %u, genbits %llu should be %llu)",
1283 (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf),
1284 bch2_btree_ids[iter->btree_id],
1287 a->data_type == state,
1288 genbits >> 56, alloc_freespace_genbits(*a) >> 56))
1292 set_btree_iter_dontneed(&alloc_iter);
1293 bch2_trans_iter_exit(trans, &alloc_iter);
1294 printbuf_exit(&buf);
1297 ret = bch2_btree_delete_extent_at(trans, iter,
1298 iter->btree_id == BTREE_ID_freespace ? 1 : 0, 0) ?:
1299 bch2_trans_commit(trans, NULL, NULL,
1300 BTREE_INSERT_NOFAIL|BTREE_INSERT_LAZY_RW);
1304 static int bch2_check_discard_freespace_key(struct btree_trans *trans,
1305 struct btree_iter *iter,
1308 if (!btree_id_is_extents(iter->btree_id)) {
1309 return __bch2_check_discard_freespace_key(trans, iter);
1313 while (!bkey_eq(iter->pos, end) &&
1314 !(ret = btree_trans_too_many_iters(trans) ?:
1315 __bch2_check_discard_freespace_key(trans, iter)))
1316 bch2_btree_iter_set_pos(iter, bpos_nosnap_successor(iter->pos));
1323 * We've already checked that generation numbers in the bucket_gens btree are
1324 * valid for buckets that exist; this just checks for keys for nonexistent
1327 static noinline_for_stack
1328 int bch2_check_bucket_gens_key(struct btree_trans *trans,
1329 struct btree_iter *iter,
1332 struct bch_fs *c = trans->c;
1333 struct bkey_i_bucket_gens g;
1335 u64 start = bucket_gens_pos_to_alloc(k.k->p, 0).offset;
1336 u64 end = bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0).offset;
1338 bool need_update = false, dev_exists;
1339 struct printbuf buf = PRINTBUF;
1342 BUG_ON(k.k->type != KEY_TYPE_bucket_gens);
1343 bkey_reassemble(&g.k_i, k);
1345 /* if no bch_dev, skip out whether we repair or not */
1346 dev_exists = bch2_dev_exists2(c, k.k->p.inode);
1348 if (fsck_err_on(!dev_exists, c,
1349 "bucket_gens key for invalid device:\n %s",
1350 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
1351 ret = bch2_btree_delete_at(trans, iter, 0);
1356 ca = bch_dev_bkey_exists(c, k.k->p.inode);
1357 if (fsck_err_on(end <= ca->mi.first_bucket ||
1358 start >= ca->mi.nbuckets, c,
1359 "bucket_gens key for invalid buckets:\n %s",
1360 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
1361 ret = bch2_btree_delete_at(trans, iter, 0);
1365 for (b = start; b < ca->mi.first_bucket; b++)
1366 if (fsck_err_on(g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK], c,
1367 "bucket_gens key has nonzero gen for invalid bucket")) {
1368 g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK] = 0;
1372 for (b = ca->mi.nbuckets; b < end; b++)
1373 if (fsck_err_on(g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK], c,
1374 "bucket_gens key has nonzero gen for invalid bucket")) {
1375 g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK] = 0;
1382 k = bch2_trans_kmalloc(trans, sizeof(g));
1383 ret = PTR_ERR_OR_ZERO(k);
1387 memcpy(k, &g, sizeof(g));
1388 ret = bch2_trans_update(trans, iter, k, 0);
1392 printbuf_exit(&buf);
1396 int bch2_check_alloc_info(struct bch_fs *c)
1398 struct btree_trans trans;
1399 struct btree_iter iter, discard_iter, freespace_iter, bucket_gens_iter;
1404 bch2_trans_init(&trans, c, 0, 0);
1406 bch2_trans_iter_init(&trans, &iter, BTREE_ID_alloc, POS_MIN,
1407 BTREE_ITER_PREFETCH);
1408 bch2_trans_iter_init(&trans, &discard_iter, BTREE_ID_need_discard, POS_MIN,
1409 BTREE_ITER_PREFETCH);
1410 bch2_trans_iter_init(&trans, &freespace_iter, BTREE_ID_freespace, POS_MIN,
1411 BTREE_ITER_PREFETCH);
1412 bch2_trans_iter_init(&trans, &bucket_gens_iter, BTREE_ID_bucket_gens, POS_MIN,
1413 BTREE_ITER_PREFETCH);
1418 bch2_trans_begin(&trans);
1420 k = bch2_get_key_or_real_bucket_hole(&iter, &hole);
1429 next = bpos_nosnap_successor(k.k->p);
1431 ret = bch2_check_alloc_key(&trans,
1441 ret = bch2_check_alloc_hole_freespace(&trans,
1442 bkey_start_pos(k.k),
1445 bch2_check_alloc_hole_bucket_gens(&trans,
1446 bkey_start_pos(k.k),
1453 ret = bch2_trans_commit(&trans, NULL, NULL,
1454 BTREE_INSERT_NOFAIL|
1455 BTREE_INSERT_LAZY_RW);
1459 bch2_btree_iter_set_pos(&iter, next);
1461 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
1466 bch2_trans_iter_exit(&trans, &bucket_gens_iter);
1467 bch2_trans_iter_exit(&trans, &freespace_iter);
1468 bch2_trans_iter_exit(&trans, &discard_iter);
1469 bch2_trans_iter_exit(&trans, &iter);
1474 ret = for_each_btree_key2(&trans, iter,
1475 BTREE_ID_need_discard, POS_MIN,
1476 BTREE_ITER_PREFETCH, k,
1477 bch2_check_discard_freespace_key(&trans, &iter, k.k->p)) ?:
1478 for_each_btree_key2(&trans, iter,
1479 BTREE_ID_freespace, POS_MIN,
1480 BTREE_ITER_PREFETCH, k,
1481 bch2_check_discard_freespace_key(&trans, &iter, k.k->p)) ?:
1482 for_each_btree_key_commit(&trans, iter,
1483 BTREE_ID_bucket_gens, POS_MIN,
1484 BTREE_ITER_PREFETCH, k,
1485 NULL, NULL, BTREE_INSERT_NOFAIL|BTREE_INSERT_LAZY_RW,
1486 bch2_check_bucket_gens_key(&trans, &iter, k));
1488 bch2_trans_exit(&trans);
1494 static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans,
1495 struct btree_iter *alloc_iter)
1497 struct bch_fs *c = trans->c;
1498 struct btree_iter lru_iter;
1499 struct bch_alloc_v4 a_convert;
1500 const struct bch_alloc_v4 *a;
1501 struct bkey_s_c alloc_k, lru_k;
1502 struct printbuf buf = PRINTBUF;
1505 alloc_k = bch2_btree_iter_peek(alloc_iter);
1509 ret = bkey_err(alloc_k);
1513 a = bch2_alloc_to_v4(alloc_k, &a_convert);
1515 if (a->data_type != BCH_DATA_cached)
1518 lru_k = bch2_bkey_get_iter(trans, &lru_iter, BTREE_ID_lru,
1519 lru_pos(alloc_k.k->p.inode,
1520 bucket_to_u64(alloc_k.k->p),
1521 a->io_time[READ]), 0);
1522 ret = bkey_err(lru_k);
1526 if (fsck_err_on(!a->io_time[READ], c,
1527 "cached bucket with read_time 0\n"
1529 (printbuf_reset(&buf),
1530 bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf)) ||
1531 fsck_err_on(lru_k.k->type != KEY_TYPE_set, c,
1532 "missing lru entry\n"
1534 (printbuf_reset(&buf),
1535 bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
1536 u64 read_time = a->io_time[READ] ?:
1537 atomic64_read(&c->io_clock[READ].now);
1539 ret = bch2_lru_set(trans,
1541 bucket_to_u64(alloc_k.k->p),
1546 if (a->io_time[READ] != read_time) {
1547 struct bkey_i_alloc_v4 *a_mut =
1548 bch2_alloc_to_v4_mut(trans, alloc_k);
1549 ret = PTR_ERR_OR_ZERO(a_mut);
1553 a_mut->v.io_time[READ] = read_time;
1554 ret = bch2_trans_update(trans, alloc_iter,
1555 &a_mut->k_i, BTREE_TRIGGER_NORUN);
1562 bch2_trans_iter_exit(trans, &lru_iter);
1563 printbuf_exit(&buf);
1567 int bch2_check_alloc_to_lru_refs(struct bch_fs *c)
1569 struct btree_iter iter;
1573 ret = bch2_trans_run(c,
1574 for_each_btree_key_commit(&trans, iter, BTREE_ID_alloc,
1575 POS_MIN, BTREE_ITER_PREFETCH, k,
1576 NULL, NULL, BTREE_INSERT_NOFAIL|BTREE_INSERT_LAZY_RW,
1577 bch2_check_alloc_to_lru_ref(&trans, &iter)));
1583 static int bch2_discard_one_bucket(struct btree_trans *trans,
1584 struct btree_iter *need_discard_iter,
1585 struct bpos *discard_pos_done,
1588 u64 *need_journal_commit,
1591 struct bch_fs *c = trans->c;
1592 struct bpos pos = need_discard_iter->pos;
1593 struct btree_iter iter = { NULL };
1596 struct bkey_i_alloc_v4 *a;
1597 struct printbuf buf = PRINTBUF;
1600 ca = bch_dev_bkey_exists(c, pos.inode);
1601 if (!percpu_ref_tryget(&ca->io_ref)) {
1602 bch2_btree_iter_set_pos(need_discard_iter, POS(pos.inode + 1, 0));
1606 if (bch2_bucket_is_open_safe(c, pos.inode, pos.offset)) {
1611 if (bch2_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
1612 c->journal.flushed_seq_ondisk,
1613 pos.inode, pos.offset)) {
1614 (*need_journal_commit)++;
1618 k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_alloc,
1619 need_discard_iter->pos,
1625 a = bch2_alloc_to_v4_mut(trans, k);
1626 ret = PTR_ERR_OR_ZERO(a);
1630 if (BCH_ALLOC_V4_NEED_INC_GEN(&a->v)) {
1632 SET_BCH_ALLOC_V4_NEED_INC_GEN(&a->v, false);
1636 if (a->v.journal_seq > c->journal.flushed_seq_ondisk) {
1637 if (c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info) {
1638 bch2_trans_inconsistent(trans,
1639 "clearing need_discard but journal_seq %llu > flushed_seq %llu\n"
1642 c->journal.flushed_seq_ondisk,
1643 (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
1649 if (a->v.data_type != BCH_DATA_need_discard) {
1650 if (c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info) {
1651 bch2_trans_inconsistent(trans,
1652 "bucket incorrectly set in need_discard btree\n"
1654 (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
1661 if (!bkey_eq(*discard_pos_done, iter.pos) &&
1662 ca->mi.discard && !c->opts.nochanges) {
1664 * This works without any other locks because this is the only
1665 * thread that removes items from the need_discard tree
1667 bch2_trans_unlock(trans);
1668 blkdev_issue_discard(ca->disk_sb.bdev,
1669 k.k->p.offset * ca->mi.bucket_size,
1672 *discard_pos_done = iter.pos;
1674 ret = bch2_trans_relock_notrace(trans);
1679 SET_BCH_ALLOC_V4_NEED_DISCARD(&a->v, false);
1680 a->v.data_type = alloc_data_type(a->v, a->v.data_type);
1682 ret = bch2_trans_update(trans, &iter, &a->k_i, 0) ?:
1683 bch2_trans_commit(trans, NULL, NULL,
1684 BCH_WATERMARK_btree|
1685 BTREE_INSERT_NOFAIL);
1689 this_cpu_inc(c->counters[BCH_COUNTER_bucket_discard]);
1693 bch2_trans_iter_exit(trans, &iter);
1694 percpu_ref_put(&ca->io_ref);
1695 printbuf_exit(&buf);
1699 static void bch2_do_discards_work(struct work_struct *work)
1701 struct bch_fs *c = container_of(work, struct bch_fs, discard_work);
1702 struct btree_trans trans;
1703 struct btree_iter iter;
1705 u64 seen = 0, open = 0, need_journal_commit = 0, discarded = 0;
1706 struct bpos discard_pos_done = POS_MAX;
1709 bch2_trans_init(&trans, c, 0, 0);
1712 * We're doing the commit in bch2_discard_one_bucket instead of using
1713 * for_each_btree_key_commit() so that we can increment counters after
1714 * successful commit:
1716 ret = for_each_btree_key2(&trans, iter,
1717 BTREE_ID_need_discard, POS_MIN, 0, k,
1718 bch2_discard_one_bucket(&trans, &iter, &discard_pos_done,
1721 &need_journal_commit,
1724 bch2_trans_exit(&trans);
1726 if (need_journal_commit * 2 > seen)
1727 bch2_journal_flush_async(&c->journal, NULL);
1729 bch2_write_ref_put(c, BCH_WRITE_REF_discard);
1731 trace_discard_buckets(c, seen, open, need_journal_commit, discarded,
1735 void bch2_do_discards(struct bch_fs *c)
1737 if (bch2_write_ref_tryget(c, BCH_WRITE_REF_discard) &&
1738 !queue_work(c->write_ref_wq, &c->discard_work))
1739 bch2_write_ref_put(c, BCH_WRITE_REF_discard);
1742 static int invalidate_one_bucket(struct btree_trans *trans,
1743 struct btree_iter *lru_iter,
1744 struct bkey_s_c lru_k,
1745 s64 *nr_to_invalidate)
1747 struct bch_fs *c = trans->c;
1748 struct btree_iter alloc_iter = { NULL };
1749 struct bkey_i_alloc_v4 *a = NULL;
1750 struct printbuf buf = PRINTBUF;
1751 struct bpos bucket = u64_to_bucket(lru_k.k->p.offset);
1752 unsigned cached_sectors;
1755 if (*nr_to_invalidate <= 0)
1758 if (!bch2_dev_bucket_exists(c, bucket)) {
1759 prt_str(&buf, "lru entry points to invalid bucket");
1763 if (bch2_bucket_is_open_safe(c, bucket.inode, bucket.offset))
1766 a = bch2_trans_start_alloc_update(trans, &alloc_iter, bucket);
1767 ret = PTR_ERR_OR_ZERO(a);
1771 /* We expect harmless races here due to the btree write buffer: */
1772 if (lru_pos_time(lru_iter->pos) != alloc_lru_idx_read(a->v))
1775 BUG_ON(a->v.data_type != BCH_DATA_cached);
1777 if (!a->v.cached_sectors)
1778 bch_err(c, "invalidating empty bucket, confused");
1780 cached_sectors = a->v.cached_sectors;
1782 SET_BCH_ALLOC_V4_NEED_INC_GEN(&a->v, false);
1785 a->v.dirty_sectors = 0;
1786 a->v.cached_sectors = 0;
1787 a->v.io_time[READ] = atomic64_read(&c->io_clock[READ].now);
1788 a->v.io_time[WRITE] = atomic64_read(&c->io_clock[WRITE].now);
1790 ret = bch2_trans_update(trans, &alloc_iter, &a->k_i,
1791 BTREE_TRIGGER_BUCKET_INVALIDATE) ?:
1792 bch2_trans_commit(trans, NULL, NULL,
1793 BCH_WATERMARK_btree|
1794 BTREE_INSERT_NOFAIL);
1798 trace_and_count(c, bucket_invalidate, c, bucket.inode, bucket.offset, cached_sectors);
1799 --*nr_to_invalidate;
1801 bch2_trans_iter_exit(trans, &alloc_iter);
1802 printbuf_exit(&buf);
1805 prt_str(&buf, "\n lru key: ");
1806 bch2_bkey_val_to_text(&buf, c, lru_k);
1808 prt_str(&buf, "\n lru entry: ");
1809 bch2_lru_pos_to_text(&buf, lru_iter->pos);
1811 prt_str(&buf, "\n alloc key: ");
1813 bch2_bpos_to_text(&buf, bucket);
1815 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&a->k_i));
1817 bch_err(c, "%s", buf.buf);
1818 if (c->curr_recovery_pass > BCH_RECOVERY_PASS_check_lrus) {
1819 bch2_inconsistent_error(c);
1826 static void bch2_do_invalidates_work(struct work_struct *work)
1828 struct bch_fs *c = container_of(work, struct bch_fs, invalidate_work);
1830 struct btree_trans trans;
1831 struct btree_iter iter;
1836 bch2_trans_init(&trans, c, 0, 0);
1838 ret = bch2_btree_write_buffer_flush(&trans);
1842 for_each_member_device(ca, c, i) {
1843 s64 nr_to_invalidate =
1844 should_invalidate_buckets(ca, bch2_dev_usage_read(ca));
1846 ret = for_each_btree_key2_upto(&trans, iter, BTREE_ID_lru,
1847 lru_pos(ca->dev_idx, 0, 0),
1848 lru_pos(ca->dev_idx, U64_MAX, LRU_TIME_MAX),
1849 BTREE_ITER_INTENT, k,
1850 invalidate_one_bucket(&trans, &iter, k, &nr_to_invalidate));
1853 percpu_ref_put(&ca->ref);
1858 bch2_trans_exit(&trans);
1859 bch2_write_ref_put(c, BCH_WRITE_REF_invalidate);
1862 void bch2_do_invalidates(struct bch_fs *c)
1864 if (bch2_write_ref_tryget(c, BCH_WRITE_REF_invalidate) &&
1865 !queue_work(c->write_ref_wq, &c->invalidate_work))
1866 bch2_write_ref_put(c, BCH_WRITE_REF_invalidate);
1869 static int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca,
1870 unsigned long *last_updated)
1872 struct btree_trans trans;
1873 struct btree_iter iter;
1876 struct bpos end = POS(ca->dev_idx, ca->mi.nbuckets);
1877 struct bch_member *m;
1880 bch2_trans_init(&trans, c, 0, 0);
1882 bch2_trans_iter_init(&trans, &iter, BTREE_ID_alloc,
1883 POS(ca->dev_idx, ca->mi.first_bucket),
1884 BTREE_ITER_PREFETCH);
1886 * Scan the alloc btree for every bucket on @ca, and add buckets to the
1887 * freespace/need_discard/need_gc_gens btrees as needed:
1890 if (*last_updated + HZ * 10 < jiffies) {
1891 bch_info(ca, "%s: currently at %llu/%llu",
1892 __func__, iter.pos.offset, ca->mi.nbuckets);
1893 *last_updated = jiffies;
1896 bch2_trans_begin(&trans);
1898 if (bkey_ge(iter.pos, end)) {
1903 k = bch2_get_key_or_hole(&iter, end, &hole);
1910 * We process live keys in the alloc btree one at a
1913 struct bch_alloc_v4 a_convert;
1914 const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &a_convert);
1916 ret = bch2_bucket_do_index(&trans, k, a, true) ?:
1917 bch2_trans_commit(&trans, NULL, NULL,
1918 BTREE_INSERT_LAZY_RW|
1919 BTREE_INSERT_NOFAIL);
1923 bch2_btree_iter_advance(&iter);
1925 struct bkey_i *freespace;
1927 freespace = bch2_trans_kmalloc(&trans, sizeof(*freespace));
1928 ret = PTR_ERR_OR_ZERO(freespace);
1932 bkey_init(&freespace->k);
1933 freespace->k.type = KEY_TYPE_set;
1934 freespace->k.p = k.k->p;
1935 freespace->k.size = k.k->size;
1937 ret = __bch2_btree_insert(&trans, BTREE_ID_freespace, freespace, 0) ?:
1938 bch2_trans_commit(&trans, NULL, NULL,
1939 BTREE_INSERT_LAZY_RW|
1940 BTREE_INSERT_NOFAIL);
1944 bch2_btree_iter_set_pos(&iter, k.k->p);
1947 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
1953 bch2_trans_iter_exit(&trans, &iter);
1954 bch2_trans_exit(&trans);
1957 bch_err(ca, "error initializing free space: %s", bch2_err_str(ret));
1961 mutex_lock(&c->sb_lock);
1962 m = bch2_sb_get_members(c->disk_sb.sb)->members + ca->dev_idx;
1963 SET_BCH_MEMBER_FREESPACE_INITIALIZED(m, true);
1964 mutex_unlock(&c->sb_lock);
1969 int bch2_fs_freespace_init(struct bch_fs *c)
1974 bool doing_init = false;
1975 unsigned long last_updated = jiffies;
1978 * We can crash during the device add path, so we need to check this on
1982 for_each_member_device(ca, c, i) {
1983 if (ca->mi.freespace_initialized)
1987 bch_info(c, "initializing freespace");
1991 ret = bch2_dev_freespace_init(c, ca, &last_updated);
1993 percpu_ref_put(&ca->ref);
2000 mutex_lock(&c->sb_lock);
2001 bch2_write_super(c);
2002 mutex_unlock(&c->sb_lock);
2003 bch_verbose(c, "done initializing freespace");
2009 /* Bucket IO clocks: */
2011 int bch2_bucket_io_time_reset(struct btree_trans *trans, unsigned dev,
2012 size_t bucket_nr, int rw)
2014 struct bch_fs *c = trans->c;
2015 struct btree_iter iter;
2016 struct bkey_i_alloc_v4 *a;
2020 a = bch2_trans_start_alloc_update(trans, &iter, POS(dev, bucket_nr));
2021 ret = PTR_ERR_OR_ZERO(a);
2025 now = atomic64_read(&c->io_clock[rw].now);
2026 if (a->v.io_time[rw] == now)
2029 a->v.io_time[rw] = now;
2031 ret = bch2_trans_update(trans, &iter, &a->k_i, 0) ?:
2032 bch2_trans_commit(trans, NULL, NULL, 0);
2034 bch2_trans_iter_exit(trans, &iter);
2038 /* Startup/shutdown (ro/rw): */
2040 void bch2_recalc_capacity(struct bch_fs *c)
2043 u64 capacity = 0, reserved_sectors = 0, gc_reserve;
2044 unsigned bucket_size_max = 0;
2045 unsigned long ra_pages = 0;
2048 lockdep_assert_held(&c->state_lock);
2050 for_each_online_member(ca, c, i) {
2051 struct backing_dev_info *bdi = ca->disk_sb.bdev->bd_disk->bdi;
2053 ra_pages += bdi->ra_pages;
2056 bch2_set_ra_pages(c, ra_pages);
2058 for_each_rw_member(ca, c, i) {
2059 u64 dev_reserve = 0;
2062 * We need to reserve buckets (from the number
2063 * of currently available buckets) against
2064 * foreground writes so that mainly copygc can
2065 * make forward progress.
2067 * We need enough to refill the various reserves
2068 * from scratch - copygc will use its entire
2069 * reserve all at once, then run against when
2070 * its reserve is refilled (from the formerly
2071 * available buckets).
2073 * This reserve is just used when considering if
2074 * allocations for foreground writes must wait -
2075 * not -ENOSPC calculations.
2078 dev_reserve += ca->nr_btree_reserve * 2;
2079 dev_reserve += ca->mi.nbuckets >> 6; /* copygc reserve */
2081 dev_reserve += 1; /* btree write point */
2082 dev_reserve += 1; /* copygc write point */
2083 dev_reserve += 1; /* rebalance write point */
2085 dev_reserve *= ca->mi.bucket_size;
2087 capacity += bucket_to_sector(ca, ca->mi.nbuckets -
2088 ca->mi.first_bucket);
2090 reserved_sectors += dev_reserve * 2;
2092 bucket_size_max = max_t(unsigned, bucket_size_max,
2093 ca->mi.bucket_size);
2096 gc_reserve = c->opts.gc_reserve_bytes
2097 ? c->opts.gc_reserve_bytes >> 9
2098 : div64_u64(capacity * c->opts.gc_reserve_percent, 100);
2100 reserved_sectors = max(gc_reserve, reserved_sectors);
2102 reserved_sectors = min(reserved_sectors, capacity);
2104 c->capacity = capacity - reserved_sectors;
2106 c->bucket_size_max = bucket_size_max;
2108 /* Wake up case someone was waiting for buckets */
2109 closure_wake_up(&c->freelist_wait);
2112 static bool bch2_dev_has_open_write_point(struct bch_fs *c, struct bch_dev *ca)
2114 struct open_bucket *ob;
2117 for (ob = c->open_buckets;
2118 ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
2120 spin_lock(&ob->lock);
2121 if (ob->valid && !ob->on_partial_list &&
2122 ob->dev == ca->dev_idx)
2124 spin_unlock(&ob->lock);
2130 /* device goes ro: */
2131 void bch2_dev_allocator_remove(struct bch_fs *c, struct bch_dev *ca)
2135 /* First, remove device from allocation groups: */
2137 for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
2138 clear_bit(ca->dev_idx, c->rw_devs[i].d);
2141 * Capacity is calculated based off of devices in allocation groups:
2143 bch2_recalc_capacity(c);
2145 bch2_open_buckets_stop(c, ca, false);
2148 * Wake up threads that were blocked on allocation, so they can notice
2149 * the device can no longer be removed and the capacity has changed:
2151 closure_wake_up(&c->freelist_wait);
2154 * journal_res_get() can block waiting for free space in the journal -
2155 * it needs to notice there may not be devices to allocate from anymore:
2157 wake_up(&c->journal.wait);
2159 /* Now wait for any in flight writes: */
2161 closure_wait_event(&c->open_buckets_wait,
2162 !bch2_dev_has_open_write_point(c, ca));
2165 /* device goes rw: */
2166 void bch2_dev_allocator_add(struct bch_fs *c, struct bch_dev *ca)
2170 for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
2171 if (ca->mi.data_allowed & (1 << i))
2172 set_bit(ca->dev_idx, c->rw_devs[i].d);
2175 void bch2_fs_allocator_background_init(struct bch_fs *c)
2177 spin_lock_init(&c->freelist_lock);
2178 INIT_WORK(&c->discard_work, bch2_do_discards_work);
2179 INIT_WORK(&c->invalidate_work, bch2_do_invalidates_work);