1 // SPDX-License-Identifier: GPL-2.0
3 #include "alloc_background.h"
4 #include "alloc_foreground.h"
5 #include "backpointers.h"
6 #include "btree_cache.h"
8 #include "btree_key_cache.h"
9 #include "btree_update.h"
10 #include "btree_update_interior.h"
13 #include "buckets_waiting_for_journal.h"
22 #include <linux/kthread.h>
23 #include <linux/math64.h>
24 #include <linux/random.h>
25 #include <linux/rculist.h>
26 #include <linux/rcupdate.h>
27 #include <linux/sched/task.h>
28 #include <linux/sort.h>
29 #include <trace/events/bcachefs.h>
31 /* Persistent alloc info: */
33 static const unsigned BCH_ALLOC_V1_FIELD_BYTES[] = {
34 #define x(name, bits) [BCH_ALLOC_FIELD_V1_##name] = bits / 8,
39 struct bkey_alloc_unpacked {
46 #define x(_name, _bits) u##_bits _name;
51 static inline u64 alloc_field_v1_get(const struct bch_alloc *a,
52 const void **p, unsigned field)
54 unsigned bytes = BCH_ALLOC_V1_FIELD_BYTES[field];
57 if (!(a->fields & (1 << field)))
62 v = *((const u8 *) *p);
81 static inline void alloc_field_v1_put(struct bkey_i_alloc *a, void **p,
82 unsigned field, u64 v)
84 unsigned bytes = BCH_ALLOC_V1_FIELD_BYTES[field];
89 a->v.fields |= 1 << field;
96 *((__le16 *) *p) = cpu_to_le16(v);
99 *((__le32 *) *p) = cpu_to_le32(v);
102 *((__le64 *) *p) = cpu_to_le64(v);
111 static void bch2_alloc_unpack_v1(struct bkey_alloc_unpacked *out,
114 const struct bch_alloc *in = bkey_s_c_to_alloc(k).v;
115 const void *d = in->data;
120 #define x(_name, _bits) out->_name = alloc_field_v1_get(in, &d, idx++);
121 BCH_ALLOC_FIELDS_V1()
125 static int bch2_alloc_unpack_v2(struct bkey_alloc_unpacked *out,
128 struct bkey_s_c_alloc_v2 a = bkey_s_c_to_alloc_v2(k);
129 const u8 *in = a.v->data;
130 const u8 *end = bkey_val_end(a);
131 unsigned fieldnr = 0;
136 out->oldest_gen = a.v->oldest_gen;
137 out->data_type = a.v->data_type;
139 #define x(_name, _bits) \
140 if (fieldnr < a.v->nr_fields) { \
141 ret = bch2_varint_decode_fast(in, end, &v); \
149 if (v != out->_name) \
153 BCH_ALLOC_FIELDS_V2()
158 static int bch2_alloc_unpack_v3(struct bkey_alloc_unpacked *out,
161 struct bkey_s_c_alloc_v3 a = bkey_s_c_to_alloc_v3(k);
162 const u8 *in = a.v->data;
163 const u8 *end = bkey_val_end(a);
164 unsigned fieldnr = 0;
169 out->oldest_gen = a.v->oldest_gen;
170 out->data_type = a.v->data_type;
171 out->need_discard = BCH_ALLOC_V3_NEED_DISCARD(a.v);
172 out->need_inc_gen = BCH_ALLOC_V3_NEED_INC_GEN(a.v);
173 out->journal_seq = le64_to_cpu(a.v->journal_seq);
175 #define x(_name, _bits) \
176 if (fieldnr < a.v->nr_fields) { \
177 ret = bch2_varint_decode_fast(in, end, &v); \
185 if (v != out->_name) \
189 BCH_ALLOC_FIELDS_V2()
194 static struct bkey_alloc_unpacked bch2_alloc_unpack(struct bkey_s_c k)
196 struct bkey_alloc_unpacked ret = { .gen = 0 };
200 bch2_alloc_unpack_v1(&ret, k);
202 case KEY_TYPE_alloc_v2:
203 bch2_alloc_unpack_v2(&ret, k);
205 case KEY_TYPE_alloc_v3:
206 bch2_alloc_unpack_v3(&ret, k);
213 static unsigned bch_alloc_v1_val_u64s(const struct bch_alloc *a)
215 unsigned i, bytes = offsetof(struct bch_alloc, data);
217 for (i = 0; i < ARRAY_SIZE(BCH_ALLOC_V1_FIELD_BYTES); i++)
218 if (a->fields & (1 << i))
219 bytes += BCH_ALLOC_V1_FIELD_BYTES[i];
221 return DIV_ROUND_UP(bytes, sizeof(u64));
224 int bch2_alloc_v1_invalid(const struct bch_fs *c, struct bkey_s_c k,
225 int rw, struct printbuf *err)
227 struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k);
229 /* allow for unknown fields */
230 if (bkey_val_u64s(a.k) < bch_alloc_v1_val_u64s(a.v)) {
231 prt_printf(err, "incorrect value size (%zu < %u)",
232 bkey_val_u64s(a.k), bch_alloc_v1_val_u64s(a.v));
233 return -BCH_ERR_invalid_bkey;
239 int bch2_alloc_v2_invalid(const struct bch_fs *c, struct bkey_s_c k,
240 int rw, struct printbuf *err)
242 struct bkey_alloc_unpacked u;
244 if (bch2_alloc_unpack_v2(&u, k)) {
245 prt_printf(err, "unpack error");
246 return -BCH_ERR_invalid_bkey;
252 int bch2_alloc_v3_invalid(const struct bch_fs *c, struct bkey_s_c k,
253 int rw, struct printbuf *err)
255 struct bkey_alloc_unpacked u;
257 if (bch2_alloc_unpack_v3(&u, k)) {
258 prt_printf(err, "unpack error");
259 return -BCH_ERR_invalid_bkey;
265 int bch2_alloc_v4_invalid(const struct bch_fs *c, struct bkey_s_c k,
266 int rw, struct printbuf *err)
268 struct bkey_s_c_alloc_v4 a = bkey_s_c_to_alloc_v4(k);
270 if (alloc_v4_u64s(a.v) != bkey_val_u64s(k.k)) {
271 prt_printf(err, "bad val size (%lu != %u)",
272 bkey_val_u64s(k.k), alloc_v4_u64s(a.v));
273 return -BCH_ERR_invalid_bkey;
276 if (!BCH_ALLOC_V4_BACKPOINTERS_START(a.v) &&
277 BCH_ALLOC_V4_NR_BACKPOINTERS(a.v)) {
278 prt_printf(err, "invalid backpointers_start");
279 return -BCH_ERR_invalid_bkey;
283 * XXX this is wrong, we'll be checking updates that happened from
284 * before BCH_FS_CHECK_BACKPOINTERS_DONE
286 if (rw == WRITE && test_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags)) {
287 unsigned i, bp_len = 0;
289 for (i = 0; i < BCH_ALLOC_V4_NR_BACKPOINTERS(a.v); i++)
290 bp_len += alloc_v4_backpointers_c(a.v)[i].bucket_len;
292 if (bp_len > a.v->dirty_sectors) {
293 prt_printf(err, "too many backpointers");
294 return -BCH_ERR_invalid_bkey;
299 if (alloc_data_type(*a.v, a.v->data_type) != a.v->data_type) {
300 prt_printf(err, "invalid data type (got %u should be %u)",
301 a.v->data_type, alloc_data_type(*a.v, a.v->data_type));
302 return -BCH_ERR_invalid_bkey;
305 switch (a.v->data_type) {
307 case BCH_DATA_need_gc_gens:
308 case BCH_DATA_need_discard:
309 if (a.v->dirty_sectors ||
310 a.v->cached_sectors ||
312 prt_printf(err, "empty data type free but have data");
313 return -BCH_ERR_invalid_bkey;
317 case BCH_DATA_journal:
320 case BCH_DATA_parity:
321 if (!a.v->dirty_sectors) {
322 prt_printf(err, "data_type %s but dirty_sectors==0",
323 bch2_data_types[a.v->data_type]);
324 return -BCH_ERR_invalid_bkey;
327 case BCH_DATA_cached:
328 if (!a.v->cached_sectors ||
329 a.v->dirty_sectors ||
331 prt_printf(err, "data type inconsistency");
332 return -BCH_ERR_invalid_bkey;
335 if (!a.v->io_time[READ] &&
336 test_bit(BCH_FS_CHECK_ALLOC_TO_LRU_REFS_DONE, &c->flags)) {
337 prt_printf(err, "cached bucket with read_time == 0");
338 return -BCH_ERR_invalid_bkey;
341 case BCH_DATA_stripe:
343 prt_printf(err, "data_type %s but stripe==0",
344 bch2_data_types[a.v->data_type]);
345 return -BCH_ERR_invalid_bkey;
354 static inline u64 swab40(u64 x)
356 return (((x & 0x00000000ffULL) << 32)|
357 ((x & 0x000000ff00ULL) << 16)|
358 ((x & 0x0000ff0000ULL) >> 0)|
359 ((x & 0x00ff000000ULL) >> 16)|
360 ((x & 0xff00000000ULL) >> 32));
363 void bch2_alloc_v4_swab(struct bkey_s k)
365 struct bch_alloc_v4 *a = bkey_s_to_alloc_v4(k).v;
366 struct bch_backpointer *bp, *bps;
368 a->journal_seq = swab64(a->journal_seq);
369 a->flags = swab32(a->flags);
370 a->dirty_sectors = swab32(a->dirty_sectors);
371 a->cached_sectors = swab32(a->cached_sectors);
372 a->io_time[0] = swab64(a->io_time[0]);
373 a->io_time[1] = swab64(a->io_time[1]);
374 a->stripe = swab32(a->stripe);
375 a->nr_external_backpointers = swab32(a->nr_external_backpointers);
377 bps = alloc_v4_backpointers(a);
378 for (bp = bps; bp < bps + BCH_ALLOC_V4_NR_BACKPOINTERS(a); bp++) {
379 bp->bucket_offset = swab40(bp->bucket_offset);
380 bp->bucket_len = swab32(bp->bucket_len);
381 bch2_bpos_swab(&bp->pos);
385 void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
387 struct bch_alloc_v4 _a;
388 const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &_a);
389 const struct bch_backpointer *bps;
393 printbuf_indent_add(out, 2);
395 prt_printf(out, "gen %u oldest_gen %u data_type %s",
396 a->gen, a->oldest_gen, bch2_data_types[a->data_type]);
398 prt_printf(out, "journal_seq %llu", a->journal_seq);
400 prt_printf(out, "need_discard %llu", BCH_ALLOC_V4_NEED_DISCARD(a));
402 prt_printf(out, "need_inc_gen %llu", BCH_ALLOC_V4_NEED_INC_GEN(a));
404 prt_printf(out, "dirty_sectors %u", a->dirty_sectors);
406 prt_printf(out, "cached_sectors %u", a->cached_sectors);
408 prt_printf(out, "stripe %u", a->stripe);
410 prt_printf(out, "stripe_redundancy %u", a->stripe_redundancy);
412 prt_printf(out, "io_time[READ] %llu", a->io_time[READ]);
414 prt_printf(out, "io_time[WRITE] %llu", a->io_time[WRITE]);
416 prt_printf(out, "backpointers: %llu", BCH_ALLOC_V4_NR_BACKPOINTERS(a));
417 printbuf_indent_add(out, 2);
419 bps = alloc_v4_backpointers_c(a);
420 for (i = 0; i < BCH_ALLOC_V4_NR_BACKPOINTERS(a); i++) {
422 bch2_backpointer_to_text(out, &bps[i]);
425 printbuf_indent_sub(out, 4);
428 void __bch2_alloc_to_v4(struct bkey_s_c k, struct bch_alloc_v4 *out)
430 if (k.k->type == KEY_TYPE_alloc_v4) {
433 *out = *bkey_s_c_to_alloc_v4(k).v;
435 d = (int) BCH_ALLOC_V4_U64s -
436 (int) (BCH_ALLOC_V4_BACKPOINTERS_START(out) ?: BCH_ALLOC_V4_U64s_V0);
437 if (unlikely(d > 0)) {
438 memset((u64 *) out + BCH_ALLOC_V4_BACKPOINTERS_START(out),
441 SET_BCH_ALLOC_V4_BACKPOINTERS_START(out, BCH_ALLOC_V4_U64s);
444 struct bkey_alloc_unpacked u = bch2_alloc_unpack(k);
446 *out = (struct bch_alloc_v4) {
447 .journal_seq = u.journal_seq,
448 .flags = u.need_discard,
450 .oldest_gen = u.oldest_gen,
451 .data_type = u.data_type,
452 .stripe_redundancy = u.stripe_redundancy,
453 .dirty_sectors = u.dirty_sectors,
454 .cached_sectors = u.cached_sectors,
455 .io_time[READ] = u.read_time,
456 .io_time[WRITE] = u.write_time,
460 SET_BCH_ALLOC_V4_BACKPOINTERS_START(out, BCH_ALLOC_V4_U64s);
464 static noinline struct bkey_i_alloc_v4 *
465 __bch2_alloc_to_v4_mut(struct btree_trans *trans, struct bkey_s_c k)
467 struct bkey_i_alloc_v4 *ret;
468 unsigned bytes = k.k->type == KEY_TYPE_alloc_v4
470 : sizeof(struct bkey_i_alloc_v4);
473 * Reserve space for one more backpointer here:
474 * Not sketchy at doing it this way, nope...
476 ret = bch2_trans_kmalloc(trans, bytes + sizeof(struct bch_backpointer));
480 if (k.k->type == KEY_TYPE_alloc_v4) {
481 struct bch_backpointer *src, *dst;
483 bkey_reassemble(&ret->k_i, k);
485 src = alloc_v4_backpointers(&ret->v);
486 SET_BCH_ALLOC_V4_BACKPOINTERS_START(&ret->v, BCH_ALLOC_V4_U64s);
487 dst = alloc_v4_backpointers(&ret->v);
489 memmove(dst, src, BCH_ALLOC_V4_NR_BACKPOINTERS(&ret->v) *
490 sizeof(struct bch_backpointer));
491 memset(src, 0, dst - src);
492 set_alloc_v4_u64s(ret);
494 bkey_alloc_v4_init(&ret->k_i);
496 bch2_alloc_to_v4(k, &ret->v);
501 static inline struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut_inlined(struct btree_trans *trans, struct bkey_s_c k)
503 if (likely(k.k->type == KEY_TYPE_alloc_v4) &&
504 BCH_ALLOC_V4_BACKPOINTERS_START(bkey_s_c_to_alloc_v4(k).v) == BCH_ALLOC_V4_U64s) {
506 * Reserve space for one more backpointer here:
507 * Not sketchy at doing it this way, nope...
509 struct bkey_i_alloc_v4 *ret =
510 bch2_trans_kmalloc_nomemzero(trans, bkey_bytes(k.k) + sizeof(struct bch_backpointer));
512 bkey_reassemble(&ret->k_i, k);
513 memset((void *) ret + bkey_bytes(k.k), 0, sizeof(struct bch_backpointer));
518 return __bch2_alloc_to_v4_mut(trans, k);
521 struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut(struct btree_trans *trans, struct bkey_s_c k)
523 return bch2_alloc_to_v4_mut_inlined(trans, k);
526 struct bkey_i_alloc_v4 *
527 bch2_trans_start_alloc_update(struct btree_trans *trans, struct btree_iter *iter,
531 struct bkey_i_alloc_v4 *a;
534 bch2_trans_iter_init(trans, iter, BTREE_ID_alloc, pos,
535 BTREE_ITER_WITH_UPDATES|
538 k = bch2_btree_iter_peek_slot(iter);
543 a = bch2_alloc_to_v4_mut_inlined(trans, k);
544 ret = PTR_ERR_OR_ZERO(a);
549 bch2_trans_iter_exit(trans, iter);
553 int bch2_alloc_read(struct bch_fs *c)
555 struct btree_trans trans;
556 struct btree_iter iter;
558 struct bch_alloc_v4 a;
562 bch2_trans_init(&trans, c, 0, 0);
564 for_each_btree_key(&trans, iter, BTREE_ID_alloc, POS_MIN,
565 BTREE_ITER_PREFETCH, k, ret) {
567 * Not a fsck error because this is checked/repaired by
568 * bch2_check_alloc_key() which runs later:
570 if (!bch2_dev_bucket_exists(c, k.k->p))
573 ca = bch_dev_bkey_exists(c, k.k->p.inode);
575 *bucket_gen(ca, k.k->p.offset) = bch2_alloc_to_v4(k, &a)->gen;
577 bch2_trans_iter_exit(&trans, &iter);
579 bch2_trans_exit(&trans);
582 bch_err(c, "error reading alloc info: %s", bch2_err_str(ret));
587 static struct bpos alloc_gens_pos(struct bpos pos, unsigned *offset)
589 *offset = pos.offset & KEY_TYPE_BUCKET_GENS_MASK;
591 pos.offset >>= KEY_TYPE_BUCKET_GENS_BITS;
595 static struct bpos bucket_gens_pos_to_alloc(struct bpos pos, unsigned offset)
597 pos.offset <<= KEY_TYPE_BUCKET_GENS_BITS;
598 pos.offset += offset;
602 static unsigned alloc_gen(struct bkey_s_c k, unsigned offset)
604 return k.k->type == KEY_TYPE_bucket_gens
605 ? bkey_s_c_to_bucket_gens(k).v->gens[offset]
609 int bch2_bucket_gens_invalid(const struct bch_fs *c, struct bkey_s_c k,
610 int rw, struct printbuf *err)
612 if (bkey_val_bytes(k.k) != sizeof(struct bch_bucket_gens)) {
613 prt_printf(err, "bad val size (%lu != %zu)",
614 bkey_val_bytes(k.k), sizeof(struct bch_bucket_gens));
615 return -BCH_ERR_invalid_bkey;
621 void bch2_bucket_gens_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
623 struct bkey_s_c_bucket_gens g = bkey_s_c_to_bucket_gens(k);
626 for (i = 0; i < ARRAY_SIZE(g.v->gens); i++) {
629 prt_printf(out, "%u", g.v->gens[i]);
633 int bch2_bucket_gens_init(struct bch_fs *c)
635 struct btree_trans trans;
636 struct btree_iter iter;
638 struct bch_alloc_v4 a;
639 struct bkey_i_bucket_gens g;
640 bool have_bucket_gens_key = false;
646 bch2_trans_init(&trans, c, 0, 0);
648 for_each_btree_key(&trans, iter, BTREE_ID_alloc, POS_MIN,
649 BTREE_ITER_PREFETCH, k, ret) {
651 * Not a fsck error because this is checked/repaired by
652 * bch2_check_alloc_key() which runs later:
654 if (!bch2_dev_bucket_exists(c, k.k->p))
657 gen = bch2_alloc_to_v4(k, &a)->gen;
658 pos = alloc_gens_pos(iter.pos, &offset);
660 if (have_bucket_gens_key && bkey_cmp(iter.pos, pos)) {
661 ret = commit_do(&trans, NULL, NULL,
663 BTREE_INSERT_LAZY_RW,
664 __bch2_btree_insert(&trans, BTREE_ID_bucket_gens, &g.k_i));
667 have_bucket_gens_key = false;
670 if (!have_bucket_gens_key) {
671 bkey_bucket_gens_init(&g.k_i);
673 have_bucket_gens_key = true;
676 g.v.gens[offset] = gen;
678 bch2_trans_iter_exit(&trans, &iter);
680 if (have_bucket_gens_key && !ret)
681 ret = commit_do(&trans, NULL, NULL,
683 BTREE_INSERT_LAZY_RW,
684 __bch2_btree_insert(&trans, BTREE_ID_bucket_gens, &g.k_i));
686 bch2_trans_exit(&trans);
689 bch_err(c, "%s: error %s", __func__, bch2_err_str(ret));
694 int bch2_bucket_gens_read(struct bch_fs *c)
696 struct btree_trans trans;
697 struct btree_iter iter;
699 const struct bch_bucket_gens *g;
704 bch2_trans_init(&trans, c, 0, 0);
706 for_each_btree_key(&trans, iter, BTREE_ID_bucket_gens, POS_MIN,
707 BTREE_ITER_PREFETCH, k, ret) {
708 u64 start = bucket_gens_pos_to_alloc(k.k->p, 0).offset;
709 u64 end = bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0).offset;
711 if (k.k->type != KEY_TYPE_bucket_gens)
714 g = bkey_s_c_to_bucket_gens(k).v;
717 * Not a fsck error because this is checked/repaired by
718 * bch2_check_alloc_key() which runs later:
720 if (!bch2_dev_exists2(c, k.k->p.inode))
723 ca = bch_dev_bkey_exists(c, k.k->p.inode);
725 for (b = max_t(u64, ca->mi.first_bucket, start);
726 b < min_t(u64, ca->mi.nbuckets, end);
728 *bucket_gen(ca, b) = g->gens[b & KEY_TYPE_BUCKET_GENS_MASK];
730 bch2_trans_iter_exit(&trans, &iter);
732 bch2_trans_exit(&trans);
735 bch_err(c, "error reading alloc info: %s", bch2_err_str(ret));
740 /* Free space/discard btree: */
742 static int bch2_bucket_do_index(struct btree_trans *trans,
743 struct bkey_s_c alloc_k,
744 const struct bch_alloc_v4 *a,
747 struct bch_fs *c = trans->c;
748 struct bch_dev *ca = bch_dev_bkey_exists(c, alloc_k.k->p.inode);
749 struct btree_iter iter;
753 enum bch_bkey_type old_type = !set ? KEY_TYPE_set : KEY_TYPE_deleted;
754 enum bch_bkey_type new_type = set ? KEY_TYPE_set : KEY_TYPE_deleted;
755 struct printbuf buf = PRINTBUF;
758 if (a->data_type != BCH_DATA_free &&
759 a->data_type != BCH_DATA_need_discard)
762 k = bch2_trans_kmalloc_nomemzero(trans, sizeof(*k));
767 k->k.type = new_type;
769 switch (a->data_type) {
771 btree = BTREE_ID_freespace;
772 k->k.p = alloc_freespace_pos(alloc_k.k->p, *a);
773 bch2_key_resize(&k->k, 1);
775 case BCH_DATA_need_discard:
776 btree = BTREE_ID_need_discard;
777 k->k.p = alloc_k.k->p;
783 bch2_trans_iter_init(trans, &iter, btree,
784 bkey_start_pos(&k->k),
786 old = bch2_btree_iter_peek_slot(&iter);
791 if (ca->mi.freespace_initialized &&
792 bch2_trans_inconsistent_on(old.k->type != old_type, trans,
793 "incorrect key when %s %s btree (got %s should be %s)\n"
795 set ? "setting" : "clearing",
796 bch2_btree_ids[btree],
797 bch2_bkey_types[old.k->type],
798 bch2_bkey_types[old_type],
799 (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
804 ret = bch2_trans_update(trans, &iter, k, 0);
806 bch2_trans_iter_exit(trans, &iter);
811 static noinline int bch2_bucket_gen_update(struct btree_trans *trans,
812 struct bpos bucket, u8 gen)
814 struct btree_iter iter;
816 struct bpos pos = alloc_gens_pos(bucket, &offset);
817 struct bkey_i_bucket_gens *g;
821 g = bch2_trans_kmalloc(trans, sizeof(*g));
822 ret = PTR_ERR_OR_ZERO(g);
826 bch2_trans_iter_init(trans, &iter, BTREE_ID_bucket_gens, pos,
828 BTREE_ITER_WITH_UPDATES);
829 k = bch2_btree_iter_peek_slot(&iter);
834 if (k.k->type != KEY_TYPE_bucket_gens) {
835 bkey_bucket_gens_init(&g->k_i);
838 bkey_reassemble(&g->k_i, k);
841 g->v.gens[offset] = gen;
843 ret = bch2_trans_update(trans, &iter, &g->k_i, 0);
845 bch2_trans_iter_exit(trans, &iter);
849 int bch2_trans_mark_alloc(struct btree_trans *trans,
850 enum btree_id btree_id, unsigned level,
851 struct bkey_s_c old, struct bkey_i *new,
854 struct bch_fs *c = trans->c;
855 struct bch_alloc_v4 old_a_convert, *new_a;
856 const struct bch_alloc_v4 *old_a;
857 u64 old_lru, new_lru;
861 * Deletion only happens in the device removal path, with
862 * BTREE_TRIGGER_NORUN:
864 BUG_ON(new->k.type != KEY_TYPE_alloc_v4);
866 old_a = bch2_alloc_to_v4(old, &old_a_convert);
867 new_a = &bkey_i_to_alloc_v4(new)->v;
869 new_a->data_type = alloc_data_type(*new_a, new_a->data_type);
871 if (new_a->dirty_sectors > old_a->dirty_sectors ||
872 new_a->cached_sectors > old_a->cached_sectors) {
873 new_a->io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
874 new_a->io_time[WRITE]= max_t(u64, 1, atomic64_read(&c->io_clock[WRITE].now));
875 SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, true);
876 SET_BCH_ALLOC_V4_NEED_DISCARD(new_a, true);
879 if (data_type_is_empty(new_a->data_type) &&
880 BCH_ALLOC_V4_NEED_INC_GEN(new_a) &&
881 !bch2_bucket_is_open_safe(c, new->k.p.inode, new->k.p.offset)) {
883 SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, false);
886 if (old_a->data_type != new_a->data_type ||
887 (new_a->data_type == BCH_DATA_free &&
888 alloc_freespace_genbits(*old_a) != alloc_freespace_genbits(*new_a))) {
889 ret = bch2_bucket_do_index(trans, old, old_a, false) ?:
890 bch2_bucket_do_index(trans, bkey_i_to_s_c(new), new_a, true);
895 if (new_a->data_type == BCH_DATA_cached &&
896 !new_a->io_time[READ])
897 new_a->io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
899 old_lru = alloc_lru_idx(*old_a);
900 new_lru = alloc_lru_idx(*new_a);
902 if (old_lru != new_lru) {
903 ret = bch2_lru_change(trans, new->k.p.inode, new->k.p.offset,
904 old_lru, &new_lru, old);
908 if (new_a->data_type == BCH_DATA_cached)
909 new_a->io_time[READ] = new_lru;
912 if (old_a->gen != new_a->gen) {
913 ret = bch2_bucket_gen_update(trans, new->k.p, new_a->gen);
922 * This synthesizes deleted extents for holes, similar to BTREE_ITER_SLOTS for
923 * extents style btrees, but works on non-extents btrees:
925 struct bkey_s_c bch2_get_key_or_hole(struct btree_iter *iter, struct bpos end, struct bkey *hole)
927 struct bkey_s_c k = bch2_btree_iter_peek_slot(iter);
935 struct btree_iter iter2;
938 bch2_trans_copy_iter(&iter2, iter);
939 k = bch2_btree_iter_peek_upto(&iter2,
940 bkey_min(bkey_min(end,
941 iter->path->l[0].b->key.k.p),
942 POS(iter->pos.inode, iter->pos.offset + U32_MAX - 1)));
944 bch2_trans_iter_exit(iter->trans, &iter2);
946 BUG_ON(next.offset >= iter->pos.offset + U32_MAX);
954 bch2_key_resize(hole, next.offset - iter->pos.offset);
955 return (struct bkey_s_c) { hole, NULL };
959 static bool next_bucket(struct bch_fs *c, struct bpos *bucket)
964 if (bch2_dev_bucket_exists(c, *bucket))
967 if (bch2_dev_exists2(c, bucket->inode)) {
968 ca = bch_dev_bkey_exists(c, bucket->inode);
970 if (bucket->offset < ca->mi.first_bucket) {
971 bucket->offset = ca->mi.first_bucket;
980 iter = bucket->inode;
981 ca = __bch2_next_dev(c, &iter, NULL);
983 bucket->offset = ca->mi.first_bucket;
989 struct bkey_s_c bch2_get_key_or_real_bucket_hole(struct btree_iter *iter, struct bkey *hole)
991 struct bch_fs *c = iter->trans->c;
994 k = bch2_get_key_or_hole(iter, POS_MAX, hole);
999 struct bpos bucket = bkey_start_pos(k.k);
1001 if (!bch2_dev_bucket_exists(c, bucket)) {
1002 if (!next_bucket(c, &bucket))
1003 return bkey_s_c_null;
1005 bch2_btree_iter_set_pos(iter, bucket);
1009 if (!bch2_dev_bucket_exists(c, k.k->p)) {
1010 struct bch_dev *ca = bch_dev_bkey_exists(c, bucket.inode);
1012 bch2_key_resize(hole, ca->mi.nbuckets - bucket.offset);
1019 static int bch2_check_alloc_key(struct btree_trans *trans,
1020 struct bkey_s_c alloc_k,
1021 struct btree_iter *alloc_iter,
1022 struct btree_iter *discard_iter,
1023 struct btree_iter *freespace_iter,
1024 struct btree_iter *bucket_gens_iter)
1026 struct bch_fs *c = trans->c;
1028 struct bch_alloc_v4 a_convert;
1029 const struct bch_alloc_v4 *a;
1030 unsigned discard_key_type, freespace_key_type;
1031 unsigned gens_offset;
1033 struct printbuf buf = PRINTBUF;
1036 if (fsck_err_on(!bch2_dev_bucket_exists(c, alloc_k.k->p), c,
1037 "alloc key for invalid device:bucket %llu:%llu",
1038 alloc_k.k->p.inode, alloc_k.k->p.offset))
1039 return bch2_btree_delete_at(trans, alloc_iter, 0);
1041 ca = bch_dev_bkey_exists(c, alloc_k.k->p.inode);
1042 if (!ca->mi.freespace_initialized)
1045 a = bch2_alloc_to_v4(alloc_k, &a_convert);
1047 discard_key_type = a->data_type == BCH_DATA_need_discard ? KEY_TYPE_set : 0;
1048 bch2_btree_iter_set_pos(discard_iter, alloc_k.k->p);
1049 k = bch2_btree_iter_peek_slot(discard_iter);
1054 if (k.k->type != discard_key_type &&
1055 (c->opts.reconstruct_alloc ||
1056 fsck_err(c, "incorrect key in need_discard btree (got %s should be %s)\n"
1058 bch2_bkey_types[k.k->type],
1059 bch2_bkey_types[discard_key_type],
1060 (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf)))) {
1061 struct bkey_i *update =
1062 bch2_trans_kmalloc(trans, sizeof(*update));
1064 ret = PTR_ERR_OR_ZERO(update);
1068 bkey_init(&update->k);
1069 update->k.type = discard_key_type;
1070 update->k.p = discard_iter->pos;
1072 ret = bch2_trans_update(trans, discard_iter, update, 0);
1077 freespace_key_type = a->data_type == BCH_DATA_free ? KEY_TYPE_set : 0;
1078 bch2_btree_iter_set_pos(freespace_iter, alloc_freespace_pos(alloc_k.k->p, *a));
1079 k = bch2_btree_iter_peek_slot(freespace_iter);
1084 if (k.k->type != freespace_key_type &&
1085 (c->opts.reconstruct_alloc ||
1086 fsck_err(c, "incorrect key in freespace btree (got %s should be %s)\n"
1088 bch2_bkey_types[k.k->type],
1089 bch2_bkey_types[freespace_key_type],
1090 (printbuf_reset(&buf),
1091 bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf)))) {
1092 struct bkey_i *update =
1093 bch2_trans_kmalloc(trans, sizeof(*update));
1095 ret = PTR_ERR_OR_ZERO(update);
1099 bkey_init(&update->k);
1100 update->k.type = freespace_key_type;
1101 update->k.p = freespace_iter->pos;
1102 bch2_key_resize(&update->k, 1);
1104 ret = bch2_trans_update(trans, freespace_iter, update, 0);
1109 bch2_btree_iter_set_pos(bucket_gens_iter, alloc_gens_pos(alloc_k.k->p, &gens_offset));
1110 k = bch2_btree_iter_peek_slot(bucket_gens_iter);
1115 if (a->gen != alloc_gen(k, gens_offset) &&
1116 (c->opts.reconstruct_alloc ||
1117 fsck_err(c, "incorrect gen in bucket_gens btree (got %u should be %u)\n"
1119 alloc_gen(k, gens_offset), a->gen,
1120 (printbuf_reset(&buf),
1121 bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf)))) {
1122 struct bkey_i_bucket_gens *g =
1123 bch2_trans_kmalloc(trans, sizeof(*g));
1125 ret = PTR_ERR_OR_ZERO(g);
1129 if (k.k->type == KEY_TYPE_bucket_gens) {
1130 bkey_reassemble(&g->k_i, k);
1132 bkey_bucket_gens_init(&g->k_i);
1133 g->k.p = alloc_gens_pos(alloc_k.k->p, &gens_offset);
1136 g->v.gens[gens_offset] = a->gen;
1138 ret = bch2_trans_update(trans, bucket_gens_iter, &g->k_i, 0);
1144 printbuf_exit(&buf);
1148 static int bch2_check_alloc_hole_freespace(struct btree_trans *trans,
1151 struct btree_iter *freespace_iter)
1153 struct bch_fs *c = trans->c;
1156 struct printbuf buf = PRINTBUF;
1159 ca = bch_dev_bkey_exists(c, start.inode);
1160 if (!ca->mi.freespace_initialized)
1163 bch2_btree_iter_set_pos(freespace_iter, start);
1165 k = bch2_btree_iter_peek_slot(freespace_iter);
1170 *end = bkey_min(k.k->p, *end);
1172 if (k.k->type != KEY_TYPE_set &&
1173 (c->opts.reconstruct_alloc ||
1174 fsck_err(c, "hole in alloc btree missing in freespace btree\n"
1175 " device %llu buckets %llu-%llu",
1176 freespace_iter->pos.inode,
1177 freespace_iter->pos.offset,
1179 struct bkey_i *update =
1180 bch2_trans_kmalloc(trans, sizeof(*update));
1182 ret = PTR_ERR_OR_ZERO(update);
1186 bkey_init(&update->k);
1187 update->k.type = KEY_TYPE_set;
1188 update->k.p = freespace_iter->pos;
1189 bch2_key_resize(&update->k,
1190 min_t(u64, U32_MAX, end->offset -
1191 freespace_iter->pos.offset));
1193 ret = bch2_trans_update(trans, freespace_iter, update, 0);
1199 printbuf_exit(&buf);
1203 static int bch2_check_alloc_hole_bucket_gens(struct btree_trans *trans,
1206 struct btree_iter *bucket_gens_iter)
1208 struct bch_fs *c = trans->c;
1210 struct printbuf buf = PRINTBUF;
1211 unsigned i, gens_offset, gens_end_offset;
1214 if (c->sb.version < bcachefs_metadata_version_bucket_gens &&
1215 !c->opts.version_upgrade)
1218 bch2_btree_iter_set_pos(bucket_gens_iter, alloc_gens_pos(start, &gens_offset));
1220 k = bch2_btree_iter_peek_slot(bucket_gens_iter);
1225 if (bkey_cmp(alloc_gens_pos(start, &gens_offset),
1226 alloc_gens_pos(*end, &gens_end_offset)))
1227 gens_end_offset = KEY_TYPE_BUCKET_GENS_NR;
1229 if (k.k->type == KEY_TYPE_bucket_gens) {
1230 struct bkey_i_bucket_gens g;
1231 bool need_update = false;
1233 bkey_reassemble(&g.k_i, k);
1235 for (i = gens_offset; i < gens_end_offset; i++) {
1236 if (fsck_err_on(g.v.gens[i], c,
1237 "hole in alloc btree at %llu:%llu with nonzero gen in bucket_gens btree (%u)",
1238 bucket_gens_pos_to_alloc(k.k->p, i).inode,
1239 bucket_gens_pos_to_alloc(k.k->p, i).offset,
1247 ret = bch2_trans_update(trans, bucket_gens_iter, &g.k_i, 0);
1253 *end = bkey_min(*end, bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0));
1256 printbuf_exit(&buf);
1260 static int bch2_check_discard_freespace_key(struct btree_trans *trans,
1261 struct btree_iter *iter)
1263 struct bch_fs *c = trans->c;
1264 struct btree_iter alloc_iter;
1265 struct bkey_s_c alloc_k;
1266 struct bch_alloc_v4 a_convert;
1267 const struct bch_alloc_v4 *a;
1270 enum bch_data_type state = iter->btree_id == BTREE_ID_need_discard
1271 ? BCH_DATA_need_discard
1273 struct printbuf buf = PRINTBUF;
1277 pos.offset &= ~(~0ULL << 56);
1278 genbits = iter->pos.offset & (~0ULL << 56);
1280 bch2_trans_iter_init(trans, &alloc_iter, BTREE_ID_alloc, pos, 0);
1282 if (fsck_err_on(!bch2_dev_bucket_exists(c, pos), c,
1283 "entry in %s btree for nonexistant dev:bucket %llu:%llu",
1284 bch2_btree_ids[iter->btree_id], pos.inode, pos.offset))
1287 alloc_k = bch2_btree_iter_peek_slot(&alloc_iter);
1288 ret = bkey_err(alloc_k);
1292 a = bch2_alloc_to_v4(alloc_k, &a_convert);
1294 if (fsck_err_on(a->data_type != state ||
1295 (state == BCH_DATA_free &&
1296 genbits != alloc_freespace_genbits(*a)), c,
1297 "%s\n incorrectly set in %s index (free %u, genbits %llu should be %llu)",
1298 (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf),
1299 bch2_btree_ids[iter->btree_id],
1300 a->data_type == state,
1301 genbits >> 56, alloc_freespace_genbits(*a) >> 56))
1306 bch2_trans_iter_exit(trans, &alloc_iter);
1307 printbuf_exit(&buf);
1310 ret = bch2_btree_delete_extent_at(trans, iter,
1311 iter->btree_id == BTREE_ID_freespace ? 1 : 0, 0);
1316 * We've already checked that generation numbers in the bucket_gens btree are
1317 * valid for buckets that exist; this just checks for keys for nonexistent
1320 static int bch2_check_bucket_gens_key(struct btree_trans *trans,
1321 struct btree_iter *iter,
1324 struct bch_fs *c = trans->c;
1325 struct bkey_i_bucket_gens g;
1327 u64 start = bucket_gens_pos_to_alloc(k.k->p, 0).offset;
1328 u64 end = bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0).offset;
1330 bool need_update = false;
1331 struct printbuf buf = PRINTBUF;
1334 BUG_ON(k.k->type != KEY_TYPE_bucket_gens);
1335 bkey_reassemble(&g.k_i, k);
1337 if (fsck_err_on(!bch2_dev_exists2(c, k.k->p.inode), c,
1338 "bucket_gens key for invalid device:\n %s",
1339 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
1340 ret = bch2_btree_delete_at(trans, iter, 0);
1344 ca = bch_dev_bkey_exists(c, k.k->p.inode);
1345 if (fsck_err_on(end <= ca->mi.first_bucket ||
1346 start >= ca->mi.nbuckets, c,
1347 "bucket_gens key for invalid buckets:\n %s",
1348 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
1349 ret = bch2_btree_delete_at(trans, iter, 0);
1353 for (b = start; b < ca->mi.first_bucket; b++)
1354 if (fsck_err_on(g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK], c,
1355 "bucket_gens key has nonzero gen for invalid bucket")) {
1356 g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK] = 0;
1360 for (b = ca->mi.nbuckets; b < end; b++)
1361 if (fsck_err_on(g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK], c,
1362 "bucket_gens key has nonzero gen for invalid bucket")) {
1363 g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK] = 0;
1370 k = bch2_trans_kmalloc(trans, sizeof(g));
1371 ret = PTR_ERR_OR_ZERO(k);
1375 memcpy(k, &g, sizeof(g));
1376 ret = bch2_trans_update(trans, iter, k, 0);
1380 printbuf_exit(&buf);
1384 int bch2_check_alloc_info(struct bch_fs *c)
1386 struct btree_trans trans;
1387 struct btree_iter iter, discard_iter, freespace_iter, bucket_gens_iter;
1392 bch2_trans_init(&trans, c, 0, 0);
1394 bch2_trans_iter_init(&trans, &iter, BTREE_ID_alloc, POS_MIN,
1395 BTREE_ITER_PREFETCH);
1396 bch2_trans_iter_init(&trans, &discard_iter, BTREE_ID_need_discard, POS_MIN,
1397 BTREE_ITER_PREFETCH);
1398 bch2_trans_iter_init(&trans, &freespace_iter, BTREE_ID_freespace, POS_MIN,
1399 BTREE_ITER_PREFETCH);
1400 bch2_trans_iter_init(&trans, &bucket_gens_iter, BTREE_ID_bucket_gens, POS_MIN,
1401 BTREE_ITER_PREFETCH);
1406 bch2_trans_begin(&trans);
1408 k = bch2_get_key_or_real_bucket_hole(&iter, &hole);
1417 next = bpos_nosnap_successor(k.k->p);
1419 ret = bch2_check_alloc_key(&trans,
1429 ret = bch2_check_alloc_hole_freespace(&trans,
1430 bkey_start_pos(k.k),
1433 bch2_check_alloc_hole_bucket_gens(&trans,
1434 bkey_start_pos(k.k),
1441 ret = bch2_trans_commit(&trans, NULL, NULL,
1442 BTREE_INSERT_NOFAIL|
1443 BTREE_INSERT_LAZY_RW);
1447 bch2_btree_iter_set_pos(&iter, next);
1449 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
1454 bch2_trans_iter_exit(&trans, &bucket_gens_iter);
1455 bch2_trans_iter_exit(&trans, &freespace_iter);
1456 bch2_trans_iter_exit(&trans, &discard_iter);
1457 bch2_trans_iter_exit(&trans, &iter);
1462 ret = for_each_btree_key_commit(&trans, iter,
1463 BTREE_ID_need_discard, POS_MIN,
1464 BTREE_ITER_PREFETCH, k,
1465 NULL, NULL, BTREE_INSERT_NOFAIL|BTREE_INSERT_LAZY_RW,
1466 bch2_check_discard_freespace_key(&trans, &iter)) ?:
1467 for_each_btree_key_commit(&trans, iter,
1468 BTREE_ID_freespace, POS_MIN,
1469 BTREE_ITER_PREFETCH, k,
1470 NULL, NULL, BTREE_INSERT_NOFAIL|BTREE_INSERT_LAZY_RW,
1471 bch2_check_discard_freespace_key(&trans, &iter)) ?:
1472 for_each_btree_key_commit(&trans, iter,
1473 BTREE_ID_bucket_gens, POS_MIN,
1474 BTREE_ITER_PREFETCH, k,
1475 NULL, NULL, BTREE_INSERT_NOFAIL|BTREE_INSERT_LAZY_RW,
1476 bch2_check_bucket_gens_key(&trans, &iter, k));
1478 bch2_trans_exit(&trans);
1479 return ret < 0 ? ret : 0;
1482 static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans,
1483 struct btree_iter *alloc_iter)
1485 struct bch_fs *c = trans->c;
1486 struct btree_iter lru_iter;
1487 struct bch_alloc_v4 a_convert;
1488 const struct bch_alloc_v4 *a;
1489 struct bkey_s_c alloc_k, k;
1490 struct printbuf buf = PRINTBUF;
1491 struct printbuf buf2 = PRINTBUF;
1494 alloc_k = bch2_btree_iter_peek(alloc_iter);
1498 ret = bkey_err(alloc_k);
1502 a = bch2_alloc_to_v4(alloc_k, &a_convert);
1504 if (a->data_type != BCH_DATA_cached)
1507 bch2_trans_iter_init(trans, &lru_iter, BTREE_ID_lru,
1508 POS(alloc_k.k->p.inode, a->io_time[READ]), 0);
1510 k = bch2_btree_iter_peek_slot(&lru_iter);
1515 if (fsck_err_on(!a->io_time[READ], c,
1516 "cached bucket with read_time 0\n"
1518 (printbuf_reset(&buf),
1519 bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf)) ||
1520 fsck_err_on(k.k->type != KEY_TYPE_lru ||
1521 le64_to_cpu(bkey_s_c_to_lru(k).v->idx) != alloc_k.k->p.offset, c,
1522 "incorrect/missing lru entry\n"
1525 (printbuf_reset(&buf),
1526 bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf),
1527 (bch2_bkey_val_to_text(&buf2, c, k), buf2.buf))) {
1528 u64 read_time = a->io_time[READ] ?:
1529 atomic64_read(&c->io_clock[READ].now);
1531 ret = bch2_lru_set(trans,
1533 alloc_k.k->p.offset,
1538 if (a->io_time[READ] != read_time) {
1539 struct bkey_i_alloc_v4 *a_mut =
1540 bch2_alloc_to_v4_mut(trans, alloc_k);
1541 ret = PTR_ERR_OR_ZERO(a_mut);
1545 a_mut->v.io_time[READ] = read_time;
1546 ret = bch2_trans_update(trans, alloc_iter,
1547 &a_mut->k_i, BTREE_TRIGGER_NORUN);
1554 bch2_trans_iter_exit(trans, &lru_iter);
1555 printbuf_exit(&buf2);
1556 printbuf_exit(&buf);
1560 int bch2_check_alloc_to_lru_refs(struct bch_fs *c)
1562 struct btree_trans trans;
1563 struct btree_iter iter;
1567 bch2_trans_init(&trans, c, 0, 0);
1569 for_each_btree_key_commit(&trans, iter, BTREE_ID_alloc,
1570 POS_MIN, BTREE_ITER_PREFETCH, k,
1571 NULL, NULL, BTREE_INSERT_NOFAIL|BTREE_INSERT_LAZY_RW,
1572 bch2_check_alloc_to_lru_ref(&trans, &iter));
1574 bch2_trans_exit(&trans);
1575 return ret < 0 ? ret : 0;
1578 static int bch2_discard_one_bucket(struct btree_trans *trans,
1579 struct btree_iter *need_discard_iter,
1580 struct bpos *discard_pos_done,
1583 u64 *need_journal_commit,
1586 struct bch_fs *c = trans->c;
1587 struct bpos pos = need_discard_iter->pos;
1588 struct btree_iter iter = { NULL };
1591 struct bkey_i_alloc_v4 *a;
1592 struct printbuf buf = PRINTBUF;
1593 bool did_discard = false;
1596 ca = bch_dev_bkey_exists(c, pos.inode);
1597 if (!percpu_ref_tryget(&ca->io_ref)) {
1598 bch2_btree_iter_set_pos(need_discard_iter, POS(pos.inode + 1, 0));
1602 if (bch2_bucket_is_open_safe(c, pos.inode, pos.offset)) {
1607 if (bch2_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
1608 c->journal.flushed_seq_ondisk,
1609 pos.inode, pos.offset)) {
1610 (*need_journal_commit)++;
1614 bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
1615 need_discard_iter->pos,
1617 k = bch2_btree_iter_peek_slot(&iter);
1622 a = bch2_alloc_to_v4_mut(trans, k);
1623 ret = PTR_ERR_OR_ZERO(a);
1627 if (BCH_ALLOC_V4_NEED_INC_GEN(&a->v)) {
1629 SET_BCH_ALLOC_V4_NEED_INC_GEN(&a->v, false);
1633 if (bch2_trans_inconsistent_on(a->v.journal_seq > c->journal.flushed_seq_ondisk, trans,
1634 "clearing need_discard but journal_seq %llu > flushed_seq %llu\n"
1637 c->journal.flushed_seq_ondisk,
1638 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
1643 if (bch2_trans_inconsistent_on(a->v.data_type != BCH_DATA_need_discard, trans,
1644 "bucket incorrectly set in need_discard btree\n"
1646 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
1651 if (!bkey_eq(*discard_pos_done, iter.pos) &&
1652 ca->mi.discard && !c->opts.nochanges) {
1654 * This works without any other locks because this is the only
1655 * thread that removes items from the need_discard tree
1657 bch2_trans_unlock(trans);
1658 blkdev_issue_discard(ca->disk_sb.bdev,
1659 k.k->p.offset * ca->mi.bucket_size,
1663 ret = bch2_trans_relock(trans);
1668 *discard_pos_done = iter.pos;
1671 SET_BCH_ALLOC_V4_NEED_DISCARD(&a->v, false);
1672 a->v.data_type = alloc_data_type(a->v, a->v.data_type);
1674 ret = bch2_trans_update(trans, &iter, &a->k_i, 0) ?:
1675 bch2_trans_commit(trans, NULL, NULL,
1676 BTREE_INSERT_USE_RESERVE|BTREE_INSERT_NOFAIL);
1681 this_cpu_inc(c->counters[BCH_COUNTER_bucket_discard]);
1685 bch2_trans_iter_exit(trans, &iter);
1686 percpu_ref_put(&ca->io_ref);
1687 printbuf_exit(&buf);
1691 static void bch2_do_discards_work(struct work_struct *work)
1693 struct bch_fs *c = container_of(work, struct bch_fs, discard_work);
1694 struct btree_trans trans;
1695 struct btree_iter iter;
1697 u64 seen = 0, open = 0, need_journal_commit = 0, discarded = 0;
1698 struct bpos discard_pos_done = POS_MAX;
1701 bch2_trans_init(&trans, c, 0, 0);
1704 * We're doing the commit in bch2_discard_one_bucket instead of using
1705 * for_each_btree_key_commit() so that we can increment counters after
1706 * successful commit:
1708 ret = for_each_btree_key2(&trans, iter,
1709 BTREE_ID_need_discard, POS_MIN, 0, k,
1710 bch2_discard_one_bucket(&trans, &iter, &discard_pos_done,
1713 &need_journal_commit,
1716 bch2_trans_exit(&trans);
1718 if (need_journal_commit * 2 > seen)
1719 bch2_journal_flush_async(&c->journal, NULL);
1721 percpu_ref_put(&c->writes);
1723 trace_discard_buckets(c, seen, open, need_journal_commit, discarded,
1727 void bch2_do_discards(struct bch_fs *c)
1729 if (percpu_ref_tryget_live(&c->writes) &&
1730 !queue_work(system_long_wq, &c->discard_work))
1731 percpu_ref_put(&c->writes);
1734 static int invalidate_one_bucket(struct btree_trans *trans,
1735 struct btree_iter *lru_iter, struct bkey_s_c k,
1736 unsigned dev_idx, s64 *nr_to_invalidate)
1738 struct bch_fs *c = trans->c;
1739 struct btree_iter alloc_iter = { NULL };
1740 struct bkey_i_alloc_v4 *a;
1742 struct printbuf buf = PRINTBUF;
1743 unsigned cached_sectors;
1746 if (*nr_to_invalidate <= 0 || k.k->p.inode != dev_idx)
1749 if (k.k->type != KEY_TYPE_lru) {
1750 prt_printf(&buf, "non lru key in lru btree:\n ");
1751 bch2_bkey_val_to_text(&buf, c, k);
1753 if (!test_bit(BCH_FS_CHECK_LRUS_DONE, &c->flags)) {
1754 bch_err(c, "%s", buf.buf);
1756 bch2_trans_inconsistent(trans, "%s", buf.buf);
1763 bucket = POS(dev_idx, le64_to_cpu(bkey_s_c_to_lru(k).v->idx));
1765 a = bch2_trans_start_alloc_update(trans, &alloc_iter, bucket);
1766 ret = PTR_ERR_OR_ZERO(a);
1770 if (k.k->p.offset != alloc_lru_idx(a->v)) {
1771 prt_printf(&buf, "alloc key does not point back to lru entry when invalidating bucket:\n ");
1772 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&a->k_i));
1773 prt_printf(&buf, "\n ");
1774 bch2_bkey_val_to_text(&buf, c, k);
1776 if (!test_bit(BCH_FS_CHECK_LRUS_DONE, &c->flags)) {
1777 bch_err(c, "%s", buf.buf);
1779 bch2_trans_inconsistent(trans, "%s", buf.buf);
1786 if (!a->v.cached_sectors)
1787 bch_err(c, "invalidating empty bucket, confused");
1789 cached_sectors = a->v.cached_sectors;
1791 SET_BCH_ALLOC_V4_NEED_INC_GEN(&a->v, false);
1794 a->v.dirty_sectors = 0;
1795 a->v.cached_sectors = 0;
1796 a->v.io_time[READ] = atomic64_read(&c->io_clock[READ].now);
1797 a->v.io_time[WRITE] = atomic64_read(&c->io_clock[WRITE].now);
1799 ret = bch2_trans_update(trans, &alloc_iter, &a->k_i,
1800 BTREE_TRIGGER_BUCKET_INVALIDATE) ?:
1801 bch2_trans_commit(trans, NULL, NULL,
1802 BTREE_INSERT_USE_RESERVE|BTREE_INSERT_NOFAIL);
1806 trace_and_count(c, bucket_invalidate, c, bucket.inode, bucket.offset, cached_sectors);
1807 --*nr_to_invalidate;
1809 bch2_trans_iter_exit(trans, &alloc_iter);
1810 printbuf_exit(&buf);
1814 static void bch2_do_invalidates_work(struct work_struct *work)
1816 struct bch_fs *c = container_of(work, struct bch_fs, invalidate_work);
1818 struct btree_trans trans;
1819 struct btree_iter iter;
1824 bch2_trans_init(&trans, c, 0, 0);
1826 for_each_member_device(ca, c, i) {
1827 s64 nr_to_invalidate =
1828 should_invalidate_buckets(ca, bch2_dev_usage_read(ca));
1830 ret = for_each_btree_key2(&trans, iter, BTREE_ID_lru,
1831 POS(ca->dev_idx, 0), BTREE_ITER_INTENT, k,
1832 invalidate_one_bucket(&trans, &iter, k, ca->dev_idx, &nr_to_invalidate));
1835 percpu_ref_put(&ca->ref);
1840 bch2_trans_exit(&trans);
1841 percpu_ref_put(&c->writes);
1844 void bch2_do_invalidates(struct bch_fs *c)
1846 if (percpu_ref_tryget_live(&c->writes) &&
1847 !queue_work(system_long_wq, &c->invalidate_work))
1848 percpu_ref_put(&c->writes);
1851 static int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca)
1853 struct btree_trans trans;
1854 struct btree_iter iter;
1857 struct bpos end = POS(ca->dev_idx, ca->mi.nbuckets);
1858 struct bch_member *m;
1861 bch2_trans_init(&trans, c, 0, 0);
1863 bch2_trans_iter_init(&trans, &iter, BTREE_ID_alloc,
1864 POS(ca->dev_idx, ca->mi.first_bucket),
1865 BTREE_ITER_PREFETCH);
1867 * Scan the alloc btree for every bucket on @ca, and add buckets to the
1868 * freespace/need_discard/need_gc_gens btrees as needed:
1871 bch2_trans_begin(&trans);
1873 if (bkey_ge(iter.pos, end)) {
1878 k = bch2_get_key_or_hole(&iter, end, &hole);
1885 * We process live keys in the alloc btree one at a
1888 struct bch_alloc_v4 a_convert;
1889 const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &a_convert);
1891 ret = bch2_bucket_do_index(&trans, k, a, true) ?:
1892 bch2_trans_commit(&trans, NULL, NULL,
1893 BTREE_INSERT_LAZY_RW|
1894 BTREE_INSERT_NOFAIL);
1898 bch2_btree_iter_advance(&iter);
1900 struct bkey_i *freespace;
1902 freespace = bch2_trans_kmalloc(&trans, sizeof(*freespace));
1903 ret = PTR_ERR_OR_ZERO(freespace);
1907 bkey_init(&freespace->k);
1908 freespace->k.type = KEY_TYPE_set;
1909 freespace->k.p = k.k->p;
1910 freespace->k.size = k.k->size;
1912 ret = __bch2_btree_insert(&trans, BTREE_ID_freespace, freespace) ?:
1913 bch2_trans_commit(&trans, NULL, NULL,
1914 BTREE_INSERT_LAZY_RW|
1915 BTREE_INSERT_NOFAIL);
1919 bch2_btree_iter_set_pos(&iter, k.k->p);
1922 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
1928 bch2_trans_iter_exit(&trans, &iter);
1929 bch2_trans_exit(&trans);
1932 bch_err(ca, "error initializing free space: %s", bch2_err_str(ret));
1936 mutex_lock(&c->sb_lock);
1937 m = bch2_sb_get_members(c->disk_sb.sb)->members + ca->dev_idx;
1938 SET_BCH_MEMBER_FREESPACE_INITIALIZED(m, true);
1939 mutex_unlock(&c->sb_lock);
1944 int bch2_fs_freespace_init(struct bch_fs *c)
1949 bool doing_init = false;
1952 * We can crash during the device add path, so we need to check this on
1956 for_each_member_device(ca, c, i) {
1957 if (ca->mi.freespace_initialized)
1961 bch_info(c, "initializing freespace");
1965 ret = bch2_dev_freespace_init(c, ca);
1967 percpu_ref_put(&ca->ref);
1973 mutex_lock(&c->sb_lock);
1974 bch2_write_super(c);
1975 mutex_unlock(&c->sb_lock);
1977 bch_verbose(c, "done initializing freespace");
1983 /* Bucket IO clocks: */
1985 int bch2_bucket_io_time_reset(struct btree_trans *trans, unsigned dev,
1986 size_t bucket_nr, int rw)
1988 struct bch_fs *c = trans->c;
1989 struct btree_iter iter;
1990 struct bkey_i_alloc_v4 *a;
1994 a = bch2_trans_start_alloc_update(trans, &iter, POS(dev, bucket_nr));
1995 ret = PTR_ERR_OR_ZERO(a);
1999 now = atomic64_read(&c->io_clock[rw].now);
2000 if (a->v.io_time[rw] == now)
2003 a->v.io_time[rw] = now;
2005 ret = bch2_trans_update(trans, &iter, &a->k_i, 0) ?:
2006 bch2_trans_commit(trans, NULL, NULL, 0);
2008 bch2_trans_iter_exit(trans, &iter);
2012 /* Startup/shutdown (ro/rw): */
2014 void bch2_recalc_capacity(struct bch_fs *c)
2017 u64 capacity = 0, reserved_sectors = 0, gc_reserve;
2018 unsigned bucket_size_max = 0;
2019 unsigned long ra_pages = 0;
2022 lockdep_assert_held(&c->state_lock);
2024 for_each_online_member(ca, c, i) {
2025 struct backing_dev_info *bdi = ca->disk_sb.bdev->bd_disk->bdi;
2027 ra_pages += bdi->ra_pages;
2030 bch2_set_ra_pages(c, ra_pages);
2032 for_each_rw_member(ca, c, i) {
2033 u64 dev_reserve = 0;
2036 * We need to reserve buckets (from the number
2037 * of currently available buckets) against
2038 * foreground writes so that mainly copygc can
2039 * make forward progress.
2041 * We need enough to refill the various reserves
2042 * from scratch - copygc will use its entire
2043 * reserve all at once, then run against when
2044 * its reserve is refilled (from the formerly
2045 * available buckets).
2047 * This reserve is just used when considering if
2048 * allocations for foreground writes must wait -
2049 * not -ENOSPC calculations.
2052 dev_reserve += ca->nr_btree_reserve * 2;
2053 dev_reserve += ca->mi.nbuckets >> 6; /* copygc reserve */
2055 dev_reserve += 1; /* btree write point */
2056 dev_reserve += 1; /* copygc write point */
2057 dev_reserve += 1; /* rebalance write point */
2059 dev_reserve *= ca->mi.bucket_size;
2061 capacity += bucket_to_sector(ca, ca->mi.nbuckets -
2062 ca->mi.first_bucket);
2064 reserved_sectors += dev_reserve * 2;
2066 bucket_size_max = max_t(unsigned, bucket_size_max,
2067 ca->mi.bucket_size);
2070 gc_reserve = c->opts.gc_reserve_bytes
2071 ? c->opts.gc_reserve_bytes >> 9
2072 : div64_u64(capacity * c->opts.gc_reserve_percent, 100);
2074 reserved_sectors = max(gc_reserve, reserved_sectors);
2076 reserved_sectors = min(reserved_sectors, capacity);
2078 c->capacity = capacity - reserved_sectors;
2080 c->bucket_size_max = bucket_size_max;
2082 /* Wake up case someone was waiting for buckets */
2083 closure_wake_up(&c->freelist_wait);
2086 static bool bch2_dev_has_open_write_point(struct bch_fs *c, struct bch_dev *ca)
2088 struct open_bucket *ob;
2091 for (ob = c->open_buckets;
2092 ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
2094 spin_lock(&ob->lock);
2095 if (ob->valid && !ob->on_partial_list &&
2096 ob->dev == ca->dev_idx)
2098 spin_unlock(&ob->lock);
2104 /* device goes ro: */
2105 void bch2_dev_allocator_remove(struct bch_fs *c, struct bch_dev *ca)
2109 /* First, remove device from allocation groups: */
2111 for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
2112 clear_bit(ca->dev_idx, c->rw_devs[i].d);
2115 * Capacity is calculated based off of devices in allocation groups:
2117 bch2_recalc_capacity(c);
2119 /* Next, close write points that point to this device... */
2120 for (i = 0; i < ARRAY_SIZE(c->write_points); i++)
2121 bch2_writepoint_stop(c, ca, &c->write_points[i]);
2123 bch2_writepoint_stop(c, ca, &c->copygc_write_point);
2124 bch2_writepoint_stop(c, ca, &c->rebalance_write_point);
2125 bch2_writepoint_stop(c, ca, &c->btree_write_point);
2127 mutex_lock(&c->btree_reserve_cache_lock);
2128 while (c->btree_reserve_cache_nr) {
2129 struct btree_alloc *a =
2130 &c->btree_reserve_cache[--c->btree_reserve_cache_nr];
2132 bch2_open_buckets_put(c, &a->ob);
2134 mutex_unlock(&c->btree_reserve_cache_lock);
2137 struct open_bucket *ob;
2139 spin_lock(&c->freelist_lock);
2140 if (!ca->open_buckets_partial_nr) {
2141 spin_unlock(&c->freelist_lock);
2144 ob = c->open_buckets +
2145 ca->open_buckets_partial[--ca->open_buckets_partial_nr];
2146 ob->on_partial_list = false;
2147 spin_unlock(&c->freelist_lock);
2149 bch2_open_bucket_put(c, ob);
2152 bch2_ec_stop_dev(c, ca);
2155 * Wake up threads that were blocked on allocation, so they can notice
2156 * the device can no longer be removed and the capacity has changed:
2158 closure_wake_up(&c->freelist_wait);
2161 * journal_res_get() can block waiting for free space in the journal -
2162 * it needs to notice there may not be devices to allocate from anymore:
2164 wake_up(&c->journal.wait);
2166 /* Now wait for any in flight writes: */
2168 closure_wait_event(&c->open_buckets_wait,
2169 !bch2_dev_has_open_write_point(c, ca));
2172 /* device goes rw: */
2173 void bch2_dev_allocator_add(struct bch_fs *c, struct bch_dev *ca)
2177 for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
2178 if (ca->mi.data_allowed & (1 << i))
2179 set_bit(ca->dev_idx, c->rw_devs[i].d);
2182 void bch2_fs_allocator_background_init(struct bch_fs *c)
2184 spin_lock_init(&c->freelist_lock);
2185 INIT_WORK(&c->discard_work, bch2_do_discards_work);
2186 INIT_WORK(&c->invalidate_work, bch2_do_invalidates_work);